text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# slc_prj.py
import os
import os.path as osp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as au
import astropy.constants as ac
from matplotlib.colors import Normalize, LogNorm
from mpl_toolkits.axes_grid1 import ImageGrid
import xarray as xr
from ..load_sim import LoadSim
from ..io.read_starpar_vtk import read_starpar_vtk
from ..plt_tools.cmap_shift import cmap_shift
from ..plt_tools.plt_starpar import scatter_sp
from ..classic.utils import texteffect
cmap_def = dict(
Sigma_gas=plt.cm.pink_r,
Sigma_H2=plt.cm.pink_r,
EM=plt.cm.plasma,
nH=plt.cm.Spectral_r,
T=cmap_shift(mpl.cm.RdYlBu_r, midpoint=3./7.),
vz=plt.cm.bwr,
chi_FUV=plt.cm.viridis,
Erad_LyC=plt.cm.viridis,
xi_CR=plt.cm.viridis,
Bmag=plt.cm.cividis,
)
norm_def = dict(
Sigma_gas=LogNorm(1e-2,1e2),
Sigma_H2=LogNorm(1e-2,1e2),
EM=LogNorm(1e0,1e5),
nH=LogNorm(1e-4,1e3),
T=LogNorm(1e1,1e7),
vz=Normalize(-200,200),
chi_FUV=LogNorm(1e-2,1e2),
Erad_LyC=LogNorm(1e-16,5e-13),
xi_CR=LogNorm(5e-17,1e-15),
Bmag=LogNorm(1.e-2,1.e2)
)
class SliceProj:
@staticmethod
def _get_extent(domain):
r = dict()
r['x'] = (domain['le'][1], domain['re'][1],
domain['le'][2], domain['re'][2])
r['y'] = (domain['le'][0], domain['re'][0],
domain['le'][2], domain['re'][2])
r['z'] = (domain['le'][0], domain['re'][0],
domain['le'][1], domain['re'][1])
return r
@LoadSim.Decorators.check_pickle
def read_slc(self, num, axes=['x', 'y', 'z'], fields=None, prefix='slc',
savdir=None, force_override=False):
fields_def = ['nH', 'nH2', 'ne', 'vz', 'T', 'cs', 'vx', 'vy', 'vz', 'pok']
if self.par['configure']['radps'] == 'ON':
if (self.par['cooling']['iCR_attenuation']):
fields_def += ['xi_CR']
if self.par['radps']['iPhotIon'] == 1:
fields_def += ['Erad_LyC']
if self.par['cooling']['iPEheating'] == 1:
fields_def += ['chi_FUV']
if self.par['configure']['gas'] == 'mhd':
fields_def += ['Bx','By','Bz','Bmag']
fields = fields_def
axes = np.atleast_1d(axes)
ds = self.load_vtk(num=num)
res = dict()
res['extent'] = self._get_extent(ds.domain)
for ax in axes:
dat = ds.get_slice(ax, fields, pos='c', method='nearest')
res[ax] = dict()
for f in fields:
res[ax][f] = dat[f].data
for zpos,zlab in zip([-1000,-500,500,1000],['zn10','zn05','zp05','zp10']):
dat = ds.get_slice('z', fields, pos=zpos, method='nearest')
res[zlab] = dict()
for f in fields:
res[zlab][f] = dat[f].data
return res
@LoadSim.Decorators.check_pickle
def read_prj(self, num, axes=['x', 'y', 'z'], prefix='prj',
savdir=None, force_override=False):
axtoi = dict(x=0, y=1, z=2)
fields = ['nH', 'nH2', 'nesq']
axes = np.atleast_1d(axes)
ds = self.load_vtk(num=num)
dat = ds.get_field(fields, as_xarray=True)
res = dict()
res['extent'] = self._get_extent(ds.domain)
for ax in axes:
i = axtoi[ax]
dx = ds.domain['dx'][i]*self.u.length
conv_Sigma = (dx*self.u.muH*ac.u.cgs/au.cm**3).to('Msun/pc**2')
conv_EM = (dx*au.cm**-6).to('pc cm-6')
res[ax] = dict()
res[ax]['Sigma_gas'] = (np.sum(dat['nH'], axis=2-i)*conv_Sigma).data
res[ax]['Sigma_H2'] = (2.0*np.sum(dat['nH2'], axis=2-i)*conv_Sigma).data
res[ax]['Sigma_HI'] = res[ax]['Sigma_gas'] - res[ax]['Sigma_H2']
res[ax]['EM'] = (np.sum(dat['nesq'], axis=2-i)*conv_EM).data
return res
def read_slc_xarray(self, num, axis='zall', force_override=False):
slc = self.read_slc(num, force_override=force_override)
if axis == 'zall':
slc_dset = slc_get_all_z(slc)
else:
slc_dset = slc_to_xarray(slc, axis)
return slc_dset
@staticmethod
def plt_slice(ax, slc, axis='z', field='density', cmap=None, norm=None):
try:
if cmap is None:
cmap = cmap_def[field]
if norm is None:
norm = mpl.colors.LogNorm()
elif norm is 'linear':
norm = mpl.colors.Normalize()
ax.imshow(slc[axis][field], cmap=cmap,
extent=slc['extent'][axis], norm=norm, origin='lower', interpolation='none')
except KeyError:
pass
@staticmethod
def plt_proj(ax, prj, axis='z', field='Sigma_gas',
cmap=None, norm=None, vmin=None, vmax=None):
try:
vminmax = dict(Sigma_gas=(1e-2,1e2))
cmap_def = dict(Sigma_gas='pink_r')
if cmap is None:
try:
cmap = cmap_def[field]
except KeyError:
cmap = plt.cm.viridis
if vmin is None or vmax is None:
vmin = vminmax[field][0]
vmax = vminmax[field][1]
if norm is None or 'log':
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)
elif norm is 'linear':
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
ax.imshow(prj[axis][field], cmap=cmap, extent=prj['extent'][axis],
norm=norm, origin='lower', interpolation='none')
except KeyError:
pass
def plt_snapshot(self, num,
fields_xy=('Sigma_gas', 'Sigma_H2', 'EM', 'nH', 'T', 'chi_FUV'),
fields_xz=('Sigma_gas', 'Sigma_H2', 'EM', 'nH', 'T', 'vz', 'Bmag'),
#fields_xy=('Sigma_gas', 'EM', 'xi_CR', 'nH', 'chi_FUV', 'Erad_LyC'),
#fields_xz=('Sigma_gas', 'EM', 'nH', 'chi_FUV', 'Erad_LyC', 'xi_CR'),
norm_factor=5.0, agemax=20.0, agemax_sn=40.0, runaway=False,
suptitle=None, savdir_pkl=None, savdir=None, force_override=False,
figsize=(26,12),
savefig=True):
"""Plot 12-panel projection, slice plots in the z and y directions
Parameters
----------
num : int
vtk snapshot number
fields_xy: list of str
Field names for z projections and slices
fields_xz: list of str
Field names for y projections and slices
norm_factor : float
Normalization factor for starpar size. Smaller norm_factor for bigger size.
agemax : float
Maximum age of radiation source particles [Myr]
agemax_sn : float
Maximum age of sn particles [Myr]
runaway : bool
If True, show runaway star particles
suptitle : str
Suptitle for snapshot
savdir_pkl : str
Path to which save (from which load) projections and slices
savdir : str
Path to which save (from which load) projections and slices
"""
label = dict(Sigma_gas=r'$\Sigma$',
Sigma_H2=r'$\Sigma_{\rm H_2}$',
EM=r'${\rm EM}$',
nH=r'$n_{\rm H}$',
T=r'$T$',
vz=r'$v_z$',
chi_FUV=r'$\mathcal{E}_{\rm FUV}$',
Erad_LyC=r'$\mathcal{E}_{\rm LyC}$',
xi_CR=r'$\xi_{\rm CR}$',
Bmag=r'$|B|$'
)
kind = dict(Sigma_gas='prj', Sigma_H2='prj', EM='prj',
nH='slc', T='slc', vz='slc', chi_FUV='slc',
Erad_LyC='slc', xi_CR='slc', Bmag='slc')
nxy = len(fields_xy)
nxz = len(fields_xz)
ds = self.load_vtk(num=num)
LzoLx = ds.domain['Lx'][2]/ds.domain['Lx'][0]
xwidth = 3
ysize = LzoLx*xwidth
xsize = ysize/nxy*4 + nxz*xwidth
x1 = 0.90*(ysize*4/nxy/xsize)
x2 = 0.90*(nxz*xwidth/xsize)
fig = plt.figure(figsize=(xsize, ysize))#, constrained_layout=True)
g1 = ImageGrid(fig, [0.02, 0.05, x1, 0.94], (nxy//2, 2), axes_pad=0.1,
aspect=True, share_all=True, direction='column')
g2 = ImageGrid(fig, [x1+0.07, 0.05, x2, 0.94], (1, nxz), axes_pad=0.1,
aspect=True, share_all=True)
dat = dict()
dat['slc'] = self.read_slc(num, savdir=savdir_pkl, force_override=force_override)
dat['prj'] = self.read_prj(num, savdir=savdir_pkl, force_override=force_override)
sp = self.load_starpar_vtk(num)
extent = dat['prj']['extent']['z']
for i, (ax, f) in enumerate(zip(g1, fields_xy)):
ax.set_aspect(ds.domain['Lx'][1]/ds.domain['Lx'][0])
self.plt_slice(ax, dat[kind[f]], 'z', f, cmap=cmap_def[f], norm=norm_def[f])
if i == 0:
scatter_sp(sp, ax, 'z', kind='prj', kpc=False,
norm_factor=norm_factor, agemax=agemax, agemax_sn=agemax_sn,
runaway=runaway, cmap=plt.cm.cool_r)
ax.set(xlim=(extent[0], extent[1]), ylim=(extent[2], extent[3]))
ax.text(0.5, 0.92, label[f], **texteffect(fontsize='x-large'),
ha='center', transform=ax.transAxes)
if i == 2:
ax.set(xlabel='x [pc]', ylabel='y [pc]')
else:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
extent = dat['prj']['extent']['y']
for i, (ax, f) in enumerate(zip(g2, fields_xz)):
ax.set_aspect(ds.domain['Lx'][2]/ds.domain['Lx'][0])
self.plt_slice(ax, dat[kind[f]], 'y', f, cmap=cmap_def[f], norm=norm_def[f])
if i == 0:
scatter_sp(sp, ax, 'y', kind='prj', kpc=False,
norm_factor=norm_factor, agemax=agemax,
cmap=plt.cm.cool_r)
ax.set(xlim=(extent[0], extent[1]), ylim=(extent[2], extent[3]))
ax.text(0.5, 0.97, label[f], **texteffect(fontsize='x-large'),
ha='center', transform=ax.transAxes)
if i == 0:
ax.set(xlabel='x [pc]', ylabel='z [pc]')
else:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if suptitle is None:
suptitle = self.basename
# fig.suptitle(suptitle + ' t=' + str(int(ds.domain['time'])), x=0.4, y=1.02,
# va='center', ha='center', **texteffect(fontsize='xx-large'))
fig.suptitle('Model: {0:s} time='.format(suptitle) + str(int(ds.domain['time'])), x=0.4, y=1.02,
va='center', ha='center', **texteffect(fontsize='xx-large'))
# plt.subplots_adjust(top=0.95)
if savefig:
if savdir is None:
savdir = osp.join(self.savdir, 'snapshot')
if not osp.exists(savdir):
os.makedirs(savdir)
savname = osp.join(savdir, '{0:s}_{1:04d}.png'.format(self.basename, num))
plt.savefig(savname, dpi=200, bbox_inches='tight')
return fig
def slc_to_xarray(slc,axis='z'):
dset = xr.Dataset()
for f in slc[axis].keys():
x0,x1,y0,y1=slc['extent'][axis[0]]
Ny,Nx=slc[axis][f].shape
xfc = np.linspace(x0,x1,Nx+1)
yfc = np.linspace(y0,y1,Ny+1)
xcc = 0.5*(xfc[1:] + xfc[:-1])
ycc = 0.5*(yfc[1:] + yfc[:-1])
dims = dict(z=['y','x'],x=['z','y'],y=['z','x'])
dset[f] = xr.DataArray(slc[axis][f],coords=[ycc,xcc],dims=dims[axis[0]])
return dset
def slc_get_all_z(slc):
dlist = []
for k in slc.keys():
if k.startswith('z'):
slc_dset = slc_to_xarray(slc,k)
if len(k) == 1:
z0=0.
elif k[1] == 'n':
z0 = float(k[2:])*(-100)
elif k[1] == 'p':
z0 = float(k[2:])*(100)
else:
raise KeyError
slc_dset = slc_dset.assign_coords(z=z0)
dlist.append(slc_dset)
else:
pass
return xr.concat(dlist,dim='z').sortby('z')
|
{"hexsha": "c25ede03b630a0a1e586e7cfe54d386d5356ed20", "size": 12406, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyathena/tigress_ncr/slc_prj.py", "max_stars_repo_name": "jeonggyukim/pyathena", "max_stars_repo_head_hexsha": "f3c983d5c0a3f36e28134a4a6d3eb80ac26c2a8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-03T13:59:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-03T13:59:14.000Z", "max_issues_repo_path": "pyathena/tigress_ncr/slc_prj.py", "max_issues_repo_name": "jeonggyukim/pyathena", "max_issues_repo_head_hexsha": "f3c983d5c0a3f36e28134a4a6d3eb80ac26c2a8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-23T23:36:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-11T06:16:56.000Z", "max_forks_repo_path": "pyathena/tigress_ncr/slc_prj.py", "max_forks_repo_name": "jeonggyukim/pyathena", "max_forks_repo_head_hexsha": "f3c983d5c0a3f36e28134a4a6d3eb80ac26c2a8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-10T04:26:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-04T22:27:02.000Z", "avg_line_length": 36.8130563798, "max_line_length": 105, "alphanum_fraction": 0.5181363856, "include": true, "reason": "import numpy,import astropy", "num_tokens": 3585}
|
import numpy as np
from keras_pretrained_models.imagenet_utils import preprocess_input
from keras.models import Model
from keras.preprocessing import image
from keras_pretrained_models.vgg19 import VGG19
base_model = VGG19(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc2').output)
img_path = '../test_image.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
print (x.shape)
print (np.max(x))
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print (x.shape)
print (np.max(x))
fc2_features = model.predict(x)
print (fc2_features.shape)
for i in range(4096):
print (fc2_features[:,i])
|
{"hexsha": "f56f284092c849f808dfac76f220b00f503006bd", "size": 690, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract_image_features/keras_pretrained_models/extract_VGG19.py", "max_stars_repo_name": "schen496/auditory-hallucinations", "max_stars_repo_head_hexsha": "31b89df838a9f3c4558c7c3b69dbcd43c7f9de19", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-05-05T10:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T22:27:05.000Z", "max_issues_repo_path": "extract_image_features/keras_pretrained_models/extract_VGG19.py", "max_issues_repo_name": "schen496/auditory-hallucinations", "max_issues_repo_head_hexsha": "31b89df838a9f3c4558c7c3b69dbcd43c7f9de19", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extract_image_features/keras_pretrained_models/extract_VGG19.py", "max_forks_repo_name": "schen496/auditory-hallucinations", "max_forks_repo_head_hexsha": "31b89df838a9f3c4558c7c3b69dbcd43c7f9de19", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-08-10T02:45:28.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-10T05:38:41.000Z", "avg_line_length": 27.6, "max_line_length": 80, "alphanum_fraction": 0.768115942, "include": true, "reason": "import numpy", "num_tokens": 183}
|
[STATEMENT]
lemma getFresh: "finite V \<Longrightarrow> getFresh V \<in> var \<and> getFresh V \<notin> V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite V \<Longrightarrow> getFresh V \<in> var \<and> getFresh V \<notin> V
[PROOF STEP]
by (metis (no_types, lifting) finite_subset getFresh_def infinite_var someI_ex subsetI)
|
{"llama_tokens": 124, "file": "Syntax_Independent_Logic_Syntax", "length": 1}
|
# -*- coding: utf-8 -*-
from numpy import pi
from ....Methods.Slot.Slot.check import SlotCheckError
def check(self):
"""Check that the HoleM54 object is correct
Parameters
----------
self : HoleM54
A HoleM54 object
Returns
-------
None
Raises
-------
H54_W0CheckError
You must have W0 < 2*pi/Zh
H54_R1CheckError
You must have H0 < R1
"""
if 2 * pi / self.Zh <= self.W0:
raise H54_W0CheckError("You must have W0 < 2*pi/Zh")
if self.R1 <= self.H0:
raise H54_R1CheckError("You must have H0 < R1")
class H54_W0CheckError(SlotCheckError):
""" """
pass
class H54_R1CheckError(SlotCheckError):
""" """
pass
|
{"hexsha": "a8542b53f1023fe504bca1879cd8f79f30c0a0ba", "size": 727, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyleecan/Methods/Slot/HoleM54/check.py", "max_stars_repo_name": "helene-t/pyleecan", "max_stars_repo_head_hexsha": "8362de9b0e32b346051b38192e07f3a6974ea9aa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-26T12:28:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T12:28:45.000Z", "max_issues_repo_path": "Methods/Slot/HoleM54/check.py", "max_issues_repo_name": "magnetron/pyleecan", "max_issues_repo_head_hexsha": "2a3338f4ab080ad6488b5ab8746c3fea1f36f177", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Methods/Slot/HoleM54/check.py", "max_forks_repo_name": "magnetron/pyleecan", "max_forks_repo_head_hexsha": "2a3338f4ab080ad6488b5ab8746c3fea1f36f177", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.1555555556, "max_line_length": 60, "alphanum_fraction": 0.5749656121, "include": true, "reason": "from numpy", "num_tokens": 215}
|
import json
import os
import subprocess
import sys
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from time import sleep
from typing import List, Union
import numpy as np
import torch
import torch.nn as nn
def create_logdir(root: Union[str, Path] = None):
if (root is None) or (root == ""):
root = Path.cwd()
else:
root = Path(root)
# When running multiple jobs in parallel (e.g. Slurm) we could get the same
# timestamp so let's allow ourselves to try a few times
for _ in range(10):
try:
timestamp = datetime.now().strftime("%Y-%m-%d-%A-%H-%M-%S")
log_dir = root / "runs" / timestamp
log_dir.mkdir(parents=True)
except FileExistsError:
sleep(1)
continue
else:
break
else:
raise SystemExit("Could not create logdir.")
return log_dir
def save_repo_status(path: Union[str, Path]):
path = Path(path)
with (path / "git_commit.txt").open("w") as f:
subprocess.run(["git", "rev-parse", "HEAD"], stdout=f)
with (path / "workspace_changes.diff").open("w") as f:
subprocess.run(["git", "diff"], stdout=f)
def save_command_line(path: Union[str, Path]):
path = Path(path)
with open(path / "command_line.txt", "w") as f:
f.write("python " + " ".join(sys.argv))
def set_seed(seed: int, allow_nondeterminism: bool):
torch.manual_seed(seed)
np.random.seed(seed)
if allow_nondeterminism is False:
# This can make the training slower
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def unconcatenate(x: torch.Tensor, orig_list: List[torch.Tensor]):
result = []
processed = 0
for ref in orig_list:
result.append(x[processed : processed + ref.numel()].reshape(ref.shape))
processed += ref.numel()
return result
def save_checkpoint(
logdir,
model: torch.nn.Module,
optimiser: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
epoch: int,
max_checkpoints=None,
):
state = {
"model": model.state_dict(),
"optimiser": optimiser.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
}
p = logdir / f"chkpt_epoch_{epoch}.pt"
torch.save(state, p)
if max_checkpoints:
chkpts = sorted(logdir.glob("chkpt_e[0-9]*.pt"), key=os.path.getmtime)
num_unwanted_chckpts = len(chkpts) - max_checkpoints
if num_unwanted_chckpts > 0:
for c in chkpts[0:num_unwanted_chckpts]:
c.unlink()
def load_checkpoint(
path: Union[Path, str],
model: torch.nn.Module,
optimiser: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
):
path = Path(path)
if not path.exists():
raise FileNotFoundError
print(f"🛻 Loading from checkpoint file {path}.")
chkpt = torch.load(path)
model.load_state_dict(chkpt["model"])
print("✅ Loaded the model.")
optimiser.load_state_dict(chkpt["optimiser"])
print("✅ Loaded the optimiser.")
lr_scheduler.load_state_dict(chkpt["lr_scheduler"])
print("✅ Loaded the LR scheduler.")
@contextmanager
def eval_mode(model: nn.Module):
"""
Sets training mode to False and restores it when exiting.
"""
is_training = model.training
try:
model.eval()
yield model
finally:
if is_training:
model.train()
class Hyperparameters:
def __init__(self, **kwargs):
self.from_dict(kwargs)
def from_argparse(self, args):
self.from_dict(args.__dict__)
def from_dict(self, d):
for k, v in d.items():
setattr(self, k, v)
def as_dict(self):
return {k: getattr(self, k) for k in self.__dict__}
def from_json(self, j):
d = json.loads(j)
return self.from_dict(d)
def to_json(self, path: Path):
j = json.dumps(self.as_dict(), indent=4, sort_keys=True)
path.write_text(j)
def __contains__(self, k):
return k in self.__dict__
def __str__(self):
s = [f"{k}={v}" for k, v in self.as_dict().items()]
return ",".join(s)
|
{"hexsha": "f7c570c8e0928247da08e94c9dce16712108981a", "size": 4273, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "mil-ad/prospr", "max_stars_repo_head_hexsha": "a92177989f4480f1f2b43a48b3e18a6597ebba6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-03-25T17:17:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T16:57:48.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "mil-ad/prospr", "max_issues_repo_head_hexsha": "a92177989f4480f1f2b43a48b3e18a6597ebba6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "mil-ad/prospr", "max_forks_repo_head_hexsha": "a92177989f4480f1f2b43a48b3e18a6597ebba6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5574712644, "max_line_length": 80, "alphanum_fraction": 0.6248537327, "include": true, "reason": "import numpy", "num_tokens": 1070}
|
// Copyright Gavin Band 2008 - 2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef GENFILE_COHORTINDIVIDUALSOURCE_HPP
#define GENFILE_COHORTINDIVIDUALSOURCE_HPP
#include <string>
#include <memory>
#include <iosfwd>
#include <boost/variant/variant.hpp>
#include <boost/variant/get.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include "genfile/Error.hpp"
#include "genfile/string_utils.hpp"
#include "genfile/MissingValue.hpp"
#include "genfile/VariantEntry.hpp"
namespace genfile {
// Base class for classes which provide random-access view
// to a set of samples,
class CohortIndividualSource
{
public:
typedef std::auto_ptr< CohortIndividualSource > UniquePtr ;
typedef std::auto_ptr< CohortIndividualSource const > ConstUniquePtr ;
typedef boost::shared_ptr< CohortIndividualSource > SharedPtr ;
static UniquePtr create(
std::string source_spec,
std::string const& missing_value = "NA",
std::string const& choice = "categorical"
) ;
struct ColumnSpec ;
public:
virtual ~CohortIndividualSource() {} ;
virtual std::size_t get_number_of_individuals() const = 0 ;
std::size_t size() const { return get_number_of_individuals() ; }
std::size_t get_number_of_covariates() const ;
std::size_t get_number_of_phenotypes() const ;
virtual ColumnSpec get_column_spec() const = 0 ;
virtual bool check_for_column( std::string const& column_name ) const ;
// method: get_entry()
// get_entry returns the entry for the sample whose index is given and the named column.
// is_missing(): return true if the value is missing, false otherwise
// as< type >(): return the value. type must either be int, double, or string, according to the type of the column.
typedef VariantEntry Entry ;
virtual Entry get_entry( std::size_t sample_i, std::string const& column_name ) const = 0 ;
virtual void get_column_values( std::string const& column_name, boost::function< void ( std::size_t, VariantEntry ) > callback ) const ;
// Source objects may live in hierarchies.
// This method return the eventual parent of the hierarchy.
virtual CohortIndividualSource const& get_base_source() const ;
// Find the parent of this source
virtual CohortIndividualSource const& get_parent_source() const ;
// method: get_source_spec()
// get_source_spec() returns a human-readable specification for this source.
virtual std::string get_source_spec() const ;
// method find_samples_by_value()
// find_sample_by_value returns the set of rows for which the given column equals the given entry.
std::vector< std::size_t > find_samples_by_value( std::string const& column_name, Entry const& entry ) const ;
public:
enum ColumnType { e_ID_COLUMN = 0, e_MISSINGNESS_COLUMN, e_DISCRETE_COVARIATE, e_CONTINUOUS_COVARIATE, e_BINARY_PHENOTYPE, e_CONTINUOUS_PHENOTYPE } ;
public:
struct SingleColumnSpec: private std::pair< std::string, ColumnType >
{
private:
typedef std::pair< std::string, ColumnType > Base ;
public:
SingleColumnSpec( std::string const& name = "", ColumnType const& type = e_ID_COLUMN ) ;
SingleColumnSpec( SingleColumnSpec const& other ) ;
SingleColumnSpec& operator=( SingleColumnSpec const& other ) ;
std::string const& name() const ;
ColumnType const type() const ;
bool is_discrete() const ;
bool is_continuous() const ;
bool is_phenotype() const ;
bool is_covariate() const ;
bool operator==( SingleColumnSpec const& right ) const ;
bool operator!=( SingleColumnSpec const& right ) const ;
} ;
struct ColumnSpec
{
public:
ColumnSpec() ;
ColumnSpec( std::vector< std::string > const& column_names, std::vector< ColumnType > const& column_types ) ;
ColumnSpec( ColumnSpec const& other ) ;
ColumnSpec& operator=( ColumnSpec const& other ) ;
std::size_t size() const ;
std::vector< std::string > get_names() const ;
std::vector< ColumnType > get_types() const ;
SingleColumnSpec get_spec( std::size_t i ) const ;
SingleColumnSpec operator[]( std::size_t i ) const ;
SingleColumnSpec operator[]( std::string const& column_name ) const ;
bool check_for_column( std::string const& name ) const ;
std::size_t find_column( std::string const& name ) const ;
std::size_t get_number_of_covariates() const ;
std::size_t get_number_of_phenotypes() const ;
bool operator==( ColumnSpec const& other ) ;
bool operator!=( ColumnSpec const& other ) ;
// Add a column
void add_column( std::string const& name, ColumnType const type ) ;
// Concatenate two ColumnSpecs.
ColumnSpec operator+( ColumnSpec const& other ) ;
// Remove an element
void remove( std::string const& name ) ;
private:
std::vector< std::string > m_column_names ;
std::vector< ColumnType > m_column_types ;
} ;
} ;
std::ostream& operator<< ( std::ostream& out, CohortIndividualSource::ColumnType const& type ) ;
std::ostream& operator<<( std::ostream& ostr, CohortIndividualSource::SingleColumnSpec const& spec ) ;
std::ostream& operator<<( std::ostream& ostr, CohortIndividualSource::ColumnSpec const& spec ) ;
}
#endif
|
{"hexsha": "4b485022dfe359225cb32dd9fcfd06061844cefd", "size": 5264, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "genfile/include/genfile/CohortIndividualSource.hpp", "max_stars_repo_name": "gavinband/bingwa", "max_stars_repo_head_hexsha": "d52e166b3bb6bc32cd32ba63bf8a4a147275eca1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "genfile/include/genfile/CohortIndividualSource.hpp", "max_issues_repo_name": "gavinband/bingwa", "max_issues_repo_head_hexsha": "d52e166b3bb6bc32cd32ba63bf8a4a147275eca1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "genfile/include/genfile/CohortIndividualSource.hpp", "max_forks_repo_name": "gavinband/bingwa", "max_forks_repo_head_hexsha": "d52e166b3bb6bc32cd32ba63bf8a4a147275eca1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8705035971, "max_line_length": 151, "alphanum_fraction": 0.7247340426, "num_tokens": 1354}
|
import subprocess, operator, random, msgpack, nltk, math, sys, os
from prettytable import PrettyTable
from nltk.corpus import stopwords
from datetime import datetime
from tqdm import tqdm
from PIL import Image
from collections import Counter
import numpy as np
import dateutil.parser
from utils import START_TIME, settings, liwc_keys, open_for_write, read_people_file, tag_set
WIDTH_ITERATIONS = 3
def make_mention_graph(cutoff, num_ppl):
PTU, first_names = read_mention_names(num_ppl)
valid_people = read_people_file("tagged_people")
ptypes = set()
# get people types
for name in valid_people:
ptypes.add(to_group_string(valid_people[name]))
for ptype in ptypes:
print(ptype)
print("Number of types: " + str(len(ptypes)))
print("first names: " + str(first_names))
#init to set of list for negative edges
mentions = {name: {n2: 0 for n2 in PTU} for name in PTU}
#mentions = {ptype: {pt2: 0 for pt2 in ptypes} for ptype in ptypes}
print("Reading conversation files...")
for filename in os.listdir(settings['DATA_DIR']):
print("File: " + filename)
convo = None
with open(settings['DATA_DIR'] + "/" + filename, "rb") as handle:
convo = msgpack.unpackb(handle.read())
cname = convo[b"with"].decode()
if cname not in PTU:
print(cname + " is not in the list of top people...")
continue
for message in tqdm(convo[b"messages"]):
if str(message[b"date"]) < str(START_TIME):
continue
if b"text" not in message:
continue
msg_text = message[b"text"]
if type(msg_text) == bytes:
msg_text = msg_text.decode()
msg_text = msg_text.lower()
mdate = dateutil.parser.parse(message[b"date"])
# add up tokens and number of incoming and outgoing messages
tokens = [_t for _t in nltk.word_tokenize(msg_text)]
for token in tokens:
if token in first_names:
#g_cname = to_group_string(valid_people[cname])
#g_fname = to_group_string(valid_people[first_names[token]])
#if g_fname in mentions[g_cname]:
# mentions[g_cname].remove(g_fname)
#if first_names[token] in mentions[cname]:
# mentions[cname].remove(first_names[token])
# count for edge width
if first_names[token] != cname:
mentions[cname][first_names[token]] += 1
#g_cname = to_group_string(valid_people[cname])
#g_fname = to_group_string(valid_people[first_names[token]])
#if g_fname != g_cname:
# mentions[g_cname][g_fname] += 1
if message[b"user"].decode() in settings['my_name']:# if the person is me
pass
else:# count tokens from other person
pass
make_dotty(mentions, cutoff)
def to_group_string(person):
# only not using 'shared ethnicity'
gstr = "male" if person["same gender"] == "yes" else "female"
gstr += "_"
gstr += "family_" if person["family"] == "yes" else ""
#gstr += "school_" if person["school"] == "from school" else ""
gstr += "work_" if person["work"] == "yes" else ""
#gstr += "girlfriend_" if person["non-platonic relationship"] == "yes" else ""
#gstr += "USA" if person["same childhood country"] == "yes" else "non-USA"
#gstr += person["relative age"]
return gstr
def make_dotty(ppl_map, cutoff):
# delete people with mentions in/out not greater than cutoff
kdels = []
for key in ppl_map:
will_del = True
for key2 in ppl_map:
if ppl_map[key][key2] > cutoff or ppl_map[key2][key] > cutoff:
tweight = ppl_map[key2][key] if ppl_map[key2][key] > ppl_map[key][key2] else ppl_map[key][key2]
print(key + ' has an edge with ' + key2 + ' with weight ' + str(tweight))
will_del = False
break
if will_del:
kdels.append(key)
print('Nodes to remove: ' + str(kdels))
for key in kdels:
del ppl_map[key]
maxc, minc = [1, 999999]
# get max/min
for name in ppl_map:
for n2 in ppl_map[name]:
if ppl_map[name][n2] > maxc:
maxc = ppl_map[name][n2]
if ppl_map[name][n2] < minc and ppl_map[name][n2] > cutoff:
minc = ppl_map[name][n2]
# try a few times to make a wide image and not a tall one
widest, wbest = [0, 0]
for nth_try in range(WIDTH_ITERATIONS):
# write dot file
with open('stats/' + settings['prefix'] + '/mgraph' + str(nth_try) + '.dot', 'w') as handle:
handle.write('#cutoff\t' + str(cutoff) + '\n#nppl\t' + str(len(ppl_map)) + '\n')
handle.write('digraph G {\n\tgraph [pad="0.1", nodesep="0.1", ranksep="0.3"];\n\n\tsubgraph {\n')
node_name = {}
node_lines = []
for name in ppl_map:
node_name[name] = 'n' + str(len(node_name))
node_lines.append('\t\t' + node_name[name] + '[label="' + name.replace("_", "\\n") + '"];\n')#str(len(node_name))
random.shuffle(node_lines)
for nline in node_lines:
handle.write(nline)
for name in ppl_map:
for m in ppl_map[name]:
if m != name and ppl_map[name][m] > cutoff:
handle.write('\t\t' + node_name[name] + ' -> ' + node_name[m] + ' [penwidth=' + str((ppl_map[name][m]-minc)*5.0/(maxc-minc)+0.5) + ']\n')
handle.write('\t}\n}\n')
proc = subprocess.Popen(['dot', '-Tpng', 'mgraph' + str(nth_try) + '.dot', '-o', 'mgraph' + str(nth_try) + '.png'], cwd=r'./stats/' + settings['prefix'])
proc.communicate()
im = Image.open('stats/' + settings['prefix'] + '/mgraph' + str(nth_try) + '.png')
width, height = im.size
if width > widest:
widest = width
wbest = nth_try
for nth_try in range(WIDTH_ITERATIONS):
if nth_try != wbest:
os.remove('stats/' + settings['prefix'] + '/mgraph' + str(nth_try) + '.png')
os.remove('stats/' + settings['prefix'] + '/mgraph' + str(nth_try) + '.dot')
os.rename('stats/' + settings['prefix'] + '/mgraph' + str(wbest) + '.png', 'stats/' + settings['prefix'] + '/mgraph.png')
os.rename('stats/' + settings['prefix'] + '/mgraph' + str(wbest) + '.dot', 'stats/' + settings['prefix'] + '/mgraph.dot')
# Set num_ppl to a number greater than zero to cut off the number of people in the generated graph.
def read_mention_names(num_ppl=-1):
# read nicknames file
nicknames = []
with open('nicknames', 'r') as handle:
for line in handle.readlines():
lp = line.strip()
if lp.startswith('#') or lp == '':
continue
lp = lp.split(':')
if len(lp) != 2:
print('Error reading this line -- skipping: ' + str(lp))
else:
nicknames.append([lp[0].strip(), lp[1].strip()])
# read valid people
P_BY_M = []
total_msg_stats = None
#with open('stats/' + settings['prefix'] + '/total_messages_per_person.csv') as handle:
with open('stats/' + settings['prefix'] + '/people_msg_order_desc') as handle:
total_msg_stats = handle.readlines()
for line in total_msg_stats:#[1:]:
P_BY_M.append(line.split('\t')[0])
if num_ppl > 0:
P_USE = num_ppl if num_ppl < len(P_BY_M) else len(P_BY_M)
PTU = [P_BY_M[i] for i in range(0, P_USE)]
else:
PTU = P_BY_M
first_names = {name.split()[0].lower(): name for name in PTU}
print('Loaded ' + str(len(first_names)) + ' first names...')
if len(first_names) != len(PTU):
print('Warning: First name collision ' + str(len(first_names)) + ' < ' + str(len(PTU)) + '.')
#print(Counter([name.split()[0].lower() for name in PTU]))
fn_coll = [fnk for fnk, fnv in dict(Counter([name.split()[0].lower() for name in PTU])).items() if fnv > 1]
print('Collision elements: ' + str(fn_coll))
print('Loaded ' + str(len(nicknames)) + ' nicknames...')
for name in nicknames:
if name[1] in PTU:
first_names[name[0]] = name[1]
return PTU, first_names
if __name__ == "__main__":
make_mention_graph(25, 20)
|
{"hexsha": "b4b1827fdabeff37edae2dbc09f094cdd6b7f0e2", "size": 8571, "ext": "py", "lang": "Python", "max_stars_repo_path": "mention_graph.py", "max_stars_repo_name": "cfwelch/longitudinal_dialog", "max_stars_repo_head_hexsha": "9f2de780026565df6447301a134a3f2126b0e64b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-04-23T20:24:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-21T12:55:02.000Z", "max_issues_repo_path": "mention_graph.py", "max_issues_repo_name": "cfwelch/longitudinal_dialog", "max_issues_repo_head_hexsha": "9f2de780026565df6447301a134a3f2126b0e64b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mention_graph.py", "max_forks_repo_name": "cfwelch/longitudinal_dialog", "max_forks_repo_head_hexsha": "9f2de780026565df6447301a134a3f2126b0e64b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6067961165, "max_line_length": 161, "alphanum_fraction": 0.5672616964, "include": true, "reason": "import numpy", "num_tokens": 2237}
|
from unittest import TestCase
from esbo_etc.classes.optical_component.Mirror import Mirror
from esbo_etc.classes.SpectralQty import SpectralQty
from esbo_etc.classes.target.FileTarget import FileTarget
import astropy.units as u
import numpy as np
class TestMirror(TestCase):
wl = np.arange(201, 205, 1) << u.nm
def setUp(self):
self.target = FileTarget("tests/data/target/target_demo_1.csv", self.wl)
self.mirror = Mirror(self.target, "tests/data/mirror/mirror_reflectance.csv", 0.5, temp=300 * u.K)
def test___init__(self):
self.assertEqual(self.mirror.calcBackground(),
SpectralQty(self.wl, np.array([4.31413931e-96, 1.37122214e-95, 4.30844544e-95,
1.33846280e-94]) << u.W / (u.m ** 2 * u.nm * u.sr)))
self.assertEqual(self.mirror.calcSignal()[0],
SpectralQty(np.arange(201, 205, 1) << u.nm, np.array([1.20e-15, 1.30e-15, 1.40e-15,
1.35e-15]) << u.W /
(u.m ** 2 * u.nm)))
|
{"hexsha": "1069283408e7b63cf5fe4aa129a7940e1f4c3056", "size": 1144, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/optical_component/test_Mirror.py", "max_stars_repo_name": "LukasK13/ESBO-ETC", "max_stars_repo_head_hexsha": "d1db999f1670f2777c5227d79629d421f03e5393", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/optical_component/test_Mirror.py", "max_issues_repo_name": "LukasK13/ESBO-ETC", "max_issues_repo_head_hexsha": "d1db999f1670f2777c5227d79629d421f03e5393", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/optical_component/test_Mirror.py", "max_forks_repo_name": "LukasK13/ESBO-ETC", "max_forks_repo_head_hexsha": "d1db999f1670f2777c5227d79629d421f03e5393", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6666666667, "max_line_length": 108, "alphanum_fraction": 0.5638111888, "include": true, "reason": "import numpy,import astropy", "num_tokens": 309}
|
import numpy as np
import data_loader
import decision_tree
###############
# Toy example #
###############
'''
Toy example
dim_1
┃
╋ ○
┃
╋ × ○
┃
╋ ×
┃
━╋━━━╋━━━╋━━━╋━ dim_0
Print the tree and check the result by yourself!
'''
# data
features, labels = data_loader.toy_data_3()
# build the tree
dTree = decision_tree.DecisionTree()
dTree.train(features, labels)
# print
dTree.print_tree()
|
{"hexsha": "a0cf573ab6c8913c32dd00e4ccd113ef80df81f1", "size": 434, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment-3/decision_tree_check.py", "max_stars_repo_name": "ZhangShiqiu1993/CSCI-567-machine-learning", "max_stars_repo_head_hexsha": "07144b299aeb9f29c304798549ef2d44fe1f4083", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment-3/decision_tree_check.py", "max_issues_repo_name": "ZhangShiqiu1993/CSCI-567-machine-learning", "max_issues_repo_head_hexsha": "07144b299aeb9f29c304798549ef2d44fe1f4083", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment-3/decision_tree_check.py", "max_forks_repo_name": "ZhangShiqiu1993/CSCI-567-machine-learning", "max_forks_repo_head_hexsha": "07144b299aeb9f29c304798549ef2d44fe1f4083", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.1515151515, "max_line_length": 48, "alphanum_fraction": 0.564516129, "include": true, "reason": "import numpy", "num_tokens": 136}
|
import numpy as np
import cv2
from .skeleton import _Skeleton
class Skeleton2D(_Skeleton):
"""
Class to visualise 2D skeletons on neutral background or original RGB.
"""
###########################################################################
# 2D drawing functions
###########################################################################
def draw(self,
keypoints,
img=None,
img_filename=None,
skeleton_sections=_Skeleton.Sections.MAIN
| _Skeleton.Sections.HEAD,
radius=None):
"""
Draw skeleton onto black background or given image.
Parameters
----------
keypoints : numpy array
The keypoints to be drawn
img : numpy array, optional (default is None)
Image to draw on, if None then the image specified by img_filename
is loaded or a black image is generated
img_filename : string, optional (default is None)
If given and img is None then loads the specified file to draw on
skeleton_sections : _Skeleton.Sections flags
Selection of shich sections of the skeleton to draw
radius : int, optional (default is class property radius)
Radius for points to be drawn
Returns
-------
img (h,w,c) with skeleton drawn on.
"""
rounded_keypoints = np.around(keypoints).astype(np.int32)
if img is None:
if img_filename is not None:
img = cv2.imread(img_filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
rounded_keypoints -= np.amin(rounded_keypoints, axis=0) - 10
dims = (np.amax(rounded_keypoints, axis=0) // 10) * 10 + 20
img = np.zeros(tuple(dims[::-1]) + (3, ), dtype=np.uint8)
if radius is None:
radius = self._radius
colours = self.get_colour_dict(skeleton_sections)
for c1, c2 in self.get_bone_list(skeleton_sections):
cv2.line(img, tuple(rounded_keypoints[c1, 0:2]),
tuple(rounded_keypoints[c2, 0:2]), colours[c1],
max(radius // 2, 1))
for i, colour in colours.items():
cv2.circle(img, tuple(rounded_keypoints[i, 0:2]), radius, colour,
-1)
return img
def draw_frame(self,
keypoints,
video_filename,
frame_id,
skeleton_sections=_Skeleton.Sections.MAIN
| _Skeleton.Sections.HEAD,
radius=None):
"""
Shortcut to extract a single frame and draw skeleton onto it.
Extracts a frame from anywhere in the video and draws the skeleton onto
it. Returns image array. This is not efficient for actually showing the
full video but good for just randomly smapling the odd frame from
within the video.
Parameters
----------
keypoints : numpy array
The keypoints to be drawn
video_filename : string
Filename of the video file to extract the frame image from
frame_id : int
ID of the frame to draw
skeleton_sections : _Skeleton.Sections flags
Selection of shich sections of the skeleton to draw
radius : int, optional (default is class property radius)
Radius for points to be drawn
Returns
-------
Frame from the video (h,w,c) with skeleton drawn on.
"""
video_file = cv2.VideoCapture(video_filename)
video_file.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
__, frame = video_file.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.draw(keypoints=keypoints,
img=frame,
skeleton_sections=skeleton_sections,
radius=radius)
return frame
def animate(self,
keypoints,
video_filename=None,
skeleton_sections=_Skeleton.Sections.MAIN
| _Skeleton.Sections.HEAD,
radius=None):
"""
Return a numpy array of all frames with skeletons drawn on.
Parameters
----------
keypoints : numpy array
The keypoints to be drawn
video_filename : string
Filename of the video file
skeleton_sections : _Skeleton.Sections flags
Selection of shich sections of the skeleton to draw
radius : int, optional (default is class property radius)
Radius for points to be drawn
Returns
-------
Sequence of frames (h,w,c) with skeleton drawn on.
"""
if video_filename is not None:
video_file = cv2.VideoCapture(video_filename)
video = []
while (video_file.isOpened()):
ret, frame = video_file.read()
if not ret:
break
video.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
video_file.release()
video = np.array(video)
else:
rounded_keypoints = (np.around(keypoints) -
np.amin(keypoints, axis=0) + 5)
dims = (np.amax(rounded_keypoints, axis=0) // 10) * 10 + 20
video = np.zeros((len(rounded_keypoints), ) + tuple(dims))
for frame_id in range(len(keypoints)):
self.draw(keypoints=keypoints[frame_id],
img=video[frame_id],
skeleton_sections=skeleton_sections,
radius=radius)
return video
|
{"hexsha": "4b8c604adfb74884e4a8a467c291a7cfb3e6195b", "size": 5748, "ext": "py", "lang": "Python", "max_stars_repo_path": "humanpose/visualise/skeleton2d.py", "max_stars_repo_name": "kschlegel/HumanPose", "max_stars_repo_head_hexsha": "2976116bbc276d7c5aa75b3f7f5708284c70d30f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "humanpose/visualise/skeleton2d.py", "max_issues_repo_name": "kschlegel/HumanPose", "max_issues_repo_head_hexsha": "2976116bbc276d7c5aa75b3f7f5708284c70d30f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "humanpose/visualise/skeleton2d.py", "max_forks_repo_name": "kschlegel/HumanPose", "max_forks_repo_head_hexsha": "2976116bbc276d7c5aa75b3f7f5708284c70d30f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8157894737, "max_line_length": 79, "alphanum_fraction": 0.5471468337, "include": true, "reason": "import numpy", "num_tokens": 1152}
|
import os
import numpy as np
from demo_utils import plot_image
import svmbir
"""
This file demonstrates the generation of a 3D microscopy phantom followed by sinogram projection and reconstruction using MBIR.
The phantom, sinogram, and reconstruction are then displayed.
"""
# Simulated image parameters
num_rows = 256
num_cols = 64
num_slices = 33
display_slice = 16 # Display slice at z=-0.0
# Simulated sinogram parameters
num_views = 64
tilt_angle = np.pi/3 # Tilt range of +-60deg
# Reconstruction parameters
sharpness = 2.0
T = 0.25
snr_db = 30.0
p = 1.2
# Multi-resolution works much better for limited and sparse view reconstruction
max_resolutions=2 # Use 2 additional resolutions to do reconstruction
# Display parameters
vmin = 0.0
vmax = 1.1
# Generate phantom
phantom = svmbir.phantom.gen_microscopy_sample_3d(num_rows,num_cols,num_slices)
# Generate the array of view angles
angles = np.linspace(-tilt_angle, tilt_angle, num_views)
# Generate sinogram by projecting phantom
sino = svmbir.project(phantom, angles, max(num_rows, num_cols))
# Determine resulting number of views, slices, and channels
(num_views, num_slices, num_channels) = sino.shape
# Perform MBIR reconstruction
recon = svmbir.recon(sino, angles, num_rows=num_rows, num_cols=num_cols, max_resolutions=max_resolutions, T=T, p=p, sharpness=sharpness, snr_db=snr_db )
# Compute Normalized Root Mean Squared Error
nrmse = svmbir.phantom.nrmse(recon, phantom)
# create output folder
os.makedirs('output', exist_ok=True)
# display phantom
plot_image(phantom[display_slice], title='Shepp Logan Phantom', filename='output/3D_microscopy_phantom.png', vmin=vmin, vmax=vmax)
# display reconstruction
title = f'Slice {display_slice:d} of Reconstruction with NRMSE={nrmse:.3f}.'
plot_image(recon[display_slice], title=title, filename='output/3D_microscopy_recon.png', vmin=vmin, vmax=vmax)
input("press Enter")
|
{"hexsha": "6a0ed8bae09bd5c113982402898cf88e6f11ee2f", "size": 1897, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/demo_3D_microscopy.py", "max_stars_repo_name": "Mohammad-Chowdhury-31/svmbir", "max_stars_repo_head_hexsha": "05665eb2a65b7aa951e26dd3691955e737f16c06", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-09-12T03:14:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T15:34:46.000Z", "max_issues_repo_path": "demo/demo_3D_microscopy.py", "max_issues_repo_name": "Mohammad-Chowdhury-31/svmbir", "max_issues_repo_head_hexsha": "05665eb2a65b7aa951e26dd3691955e737f16c06", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 117, "max_issues_repo_issues_event_min_datetime": "2020-07-24T20:13:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T16:10:05.000Z", "max_forks_repo_path": "demo/demo_3D_microscopy.py", "max_forks_repo_name": "Mohammad-Chowdhury-31/svmbir", "max_forks_repo_head_hexsha": "05665eb2a65b7aa951e26dd3691955e737f16c06", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-07-24T19:38:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T13:44:40.000Z", "avg_line_length": 30.1111111111, "max_line_length": 152, "alphanum_fraction": 0.7838692673, "include": true, "reason": "import numpy", "num_tokens": 515}
|
#!/usr/bin/env python
import _init_paths
import os, sys, cv2, json
import math, PIL, cairo
import numpy as np
import pickle, random
import os.path as osp
from time import time
from copy import deepcopy
from glob import glob
import matplotlib.pyplot as plt
from collections import OrderedDict
import torch, torchtext
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from config import get_config
from utils import *
from vocab import Vocabulary
from datasets.vg import vg
from modules.region_grounding_trainer import RegionGroundingTrainer
def test_model(config):
testdb = vg(config, 'test')
trainer = RegionGroundingTrainer(config)
trainer.test(testdb)
if __name__ == '__main__':
cv2.setNumThreads(0)
config, unparsed = get_config()
np.random.seed(config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
if(config.cuda):
torch.cuda.manual_seed_all(config.seed)
prepare_directories(config)
test_model(config)
|
{"hexsha": "948e73bf1da640592066b5ecd9a65e2d2c76afe9", "size": 1010, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/eval_region.py", "max_stars_repo_name": "uvavision/DrillDown", "max_stars_repo_head_hexsha": "58fb4f382afda8460a7d0971c45a76d3d0bbe22d", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-11-12T13:12:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T07:27:00.000Z", "max_issues_repo_path": "tools/eval_region.py", "max_issues_repo_name": "uvavision/DrillDown", "max_issues_repo_head_hexsha": "58fb4f382afda8460a7d0971c45a76d3d0bbe22d", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/eval_region.py", "max_forks_repo_name": "uvavision/DrillDown", "max_forks_repo_head_hexsha": "58fb4f382afda8460a7d0971c45a76d3d0bbe22d", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-27T06:43:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T07:27:03.000Z", "avg_line_length": 22.9545454545, "max_line_length": 67, "alphanum_fraction": 0.7663366337, "include": true, "reason": "import numpy", "num_tokens": 228}
|
using IterTools
using ProgressMeter
function find_first_invalid_number(input_numbers, window_length)
@showprogress for (j, i) in enumerate(window_length + 1:length(input_numbers))
input_subset = input_numbers[j:j + window_length - 1]
valid_sums = Set([sum(subset) for subset in subsets(input_subset, 2)])
if !(input_numbers[i] in valid_sums)
return (input_numbers[i], i)
end
end
end
# Invalid number is 507622668
function find_consecutives_that_sum_to_total(input_numbers, bad_number, bad_number_idx)
@showprogress for i in 1:bad_number_idx
j = i + 1
running_sum = 0
sequence = []
while running_sum < bad_number
running_sum += input_numbers[j]
push!(sequence, input_numbers[j])
j += 1
if running_sum == bad_number
println("Sequence: $sequence")
min_seq = minimum(sequence)
max_seq = maximum(sequence)
println("sum of min and max: $(min_seq + max_seq)")
end
end
end
end
function parse_file_to_input(input_file)
input_numbers = open(input_file) do file
file_string = read(file, String)
[parse(Int64, line) for line in split(file_string, "\r\n")]
end
input_numbers
end
function main(input_file, window_length)
input_numbers = parse_file_to_input(input_file)
bad_numbers_and_idx = find_first_invalid_number(input_numbers, window_length)
println("$bad_numbers_and_idx")
end
function part2(input_file)
input_numbers = parse_file_to_input(input_file)
find_consecutives_that_sum_to_total(input_numbers, 507622668, 634)
end
# main("day9_input.txt", 25)
part2("day9_input.txt")
|
{"hexsha": "821529b9d201e3c8a033b80e838c0646da4e12fa", "size": 1751, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Day9/day9_solution.jl", "max_stars_repo_name": "FedericoV/JuliaAdventOfCode2020", "max_stars_repo_head_hexsha": "30426fdee9f32fa15f4dd219462efff3de2d0fc9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Day9/day9_solution.jl", "max_issues_repo_name": "FedericoV/JuliaAdventOfCode2020", "max_issues_repo_head_hexsha": "30426fdee9f32fa15f4dd219462efff3de2d0fc9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Day9/day9_solution.jl", "max_forks_repo_name": "FedericoV/JuliaAdventOfCode2020", "max_forks_repo_head_hexsha": "30426fdee9f32fa15f4dd219462efff3de2d0fc9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1833333333, "max_line_length": 87, "alphanum_fraction": 0.6716162193, "num_tokens": 415}
|
-- ---------------------------------------------------------------- [ Core.idr ]
-- Module : Lightyear.Core
-- Description : Central Definitions and Instances
--
-- This code is distributed under the BSD 2-clause license.
-- See the file LICENSE in the root directory for its full text.
-- --------------------------------------------------------------------- [ EOH ]
module Lightyear.Core
import Data.Fin
import Control.Monad.Trans
import Control.Monad.State
%access export
%default total
||| Parse results
public export
data Result str a =
||| Sucess, returning the remaining string and the parser result
Success str a |
||| Failure, returning a stack trace of errors based on `<?>`
Failure (List (str, String)) -- a stacktrace of errors based on <?> and friends
implementation Functor (Result str) where
map f (Success s x ) = Success s (f x)
map f (Failure es) = Failure es
record ParserT str (m : Type -> Type) a where
constructor PT
runParserT :
(r : Type) ->
(a -> str -> m r) -> -- uncommitted success
(a -> str -> m r) -> -- committed success
(List (str, String) -> m r) -> -- uncommitted error
(List (str, String) -> m r) -> -- committed error
str ->
m r
||| Run a parser monad on some input
execParserT : Monad m => ParserT str m a
-> (input : str)
-> m (Result str a)
execParserT {str} {m} {a} (PT p) input = p (Result str a) success success failure failure input
where success x i = pure $ Success i x
failure = pure . Failure
implementation Monad m => Functor (ParserT str m) where
map {a} {b} f (PT p) = PT $ \r, us, cs => p r (us . f) (cs . f)
implementation Monad m => Applicative (ParserT str m) where
pure x = PT (\r, us, cs, ue, ce => us x)
(<*>) (PT f) (PT g) = PT $ \r, us, cs, ue, ce =>
f r (\f' => g r (us . f') (cs . f') ue ce)
(\f' => g r (cs . f') (cs . f') ce ce)
ue ce
infixl 2 <*>|
||| A variant of <$>, lazy in its second argument, which must NOT be
||| pattern-matched right away because we want to keep it lazy in case
||| it's not used.
(<*>|) : Monad m => ParserT str m (a -> b)
-> Lazy (ParserT str m a)
-> ParserT str m b
(<*>|) (PT f) x = PT $ \r, us, cs, ue, ce =>
f r (\f' => let PT g = x in g r (us . f') (cs . f') ue ce)
(\f' => let PT g = x in g r (cs . f') (cs . f') ce ce)
ue ce
implementation Monad m => Monad (ParserT str m) where
(>>=) (PT x) f = PT $ \r, us, cs, ue, ce =>
x r (\x' => let PT y = f x' in y r us cs ue ce)
(\x' => let PT y = f x' in y r cs cs ce ce)
ue ce
implementation Monad m => MonadTrans (ParserT str) where
lift x = PT $ \r, us, cs, ue, ce, s => (x >>= flip us s)
-- HACK
-- for some reason the MonadState instance does not work with plain lift :(
private
lift' : Monad m => m a -> ParserT str m a
lift' = lift
implementation MonadState s m => MonadState s (ParserT str m) where
get = lift' get
put = lift' . put
||| Fail with some error message
fail : String -> ParserT str m a
fail msg = PT $ \r, us, cs, ue, ce, i => ue [(i, msg)]
implementation Monad m => Alternative (ParserT str m) where
empty = fail "non-empty alternative"
(<|>) (PT x) (PT y) = PT $ \r, us, cs, ue, ce, i =>
x r us cs (\err => y r us cs (ue . (err ++))
(ce . (err ++)) i) ce i
infixl 3 <|>|
||| A variant of <|>, lazy in its second argument, which must NOT be
||| pattern-matched right away because we want to keep it lazy in case
||| it's not used.
(<|>|) : Monad m => ParserT str m a
-> Lazy (ParserT str m a)
-> ParserT str m a
(<|>|) (PT x) y = PT $ \r, us, cs, ue, ce, i =>
x r us cs (\err => let PT y' = y in y' r us cs (ue . (err ++))
(ce . (err ++)) i) ce i
infixl 0 <?>
||| Associate an error with parse failure
(<?>) : Monad m => ParserT str m a -> String -> ParserT str m a
(PT f) <?> msg = PT $ \r, us, cs, ue, ce, i =>
f r us cs (ue . ((i, msg) ::)) (ce . ((i, msg) ::)) i
||| Commit to a parse alternative and prevent backtracking
commitTo : Monad m => ParserT str m a -> ParserT str m a
commitTo (PT f) = PT $ \r, us, cs, ue, ce => f r cs cs ce ce
-- There is no reason that we mark "str" as the determining type
-- other than to aid typeinterface resolution.
--
-- I feel that having this restriction (which is probably okay
-- given that the only streams so far are String and Text anyway)
-- is more acceptable than failing surprisingly
-- any time the unsuspecting user calls "satisfy" without {tok=Char}
-- in an odd context.
--
-- We make "str" the determining type because it's usually fixed
-- by the parser monad you're working in, which helps resolution.
interface Stream tok str | str where
uncons : str -> Maybe (tok, str)
||| Matches a single element that satisfies some condition, accepting
||| a transformation of successes.
satisfyMaybe : (Monad m, Stream tok str)
=> (tok -> Maybe out)
-> ParserT str m out
satisfyMaybe {tok=tok} {str=str} f =
PT $ \r, us, cs, ue, ce, i =>
case uncons {tok=tok} {str=str} i of
Nothing => ue [(i, "a token, not EOF")]
Just (t, i') => case f t of
Nothing => ue [(i, "a different token")]
Just res => us res i'
||| Matches a single element that satisfies some condition.
satisfy : (Monad m, Stream tok str)
=> (tok -> Bool)
-> ParserT str m tok
satisfy p = satisfyMaybe (\t => if p t then Just t else Nothing)
||| Succeeds if and only if the argument parser fails.
|||
||| In Parsec, this combinator is called `notFollowedBy`.
requireFailure : ParserT str m tok -> ParserT str m ()
requireFailure (PT f) = PT $ \r, us, cs, ue, ce, i =>
f r
(\t, s => ue [(i, "argument parser to fail")])
(\t, s => ce [(i, "argument parser to fail")])
(\errs => us () i)
(\errs => cs () i)
i
-- --------------------------------------------------------------------- [ EOF ]
|
{"hexsha": "b6db21ca41f9d47bcf7719e10ff04a5eeb8e2608", "size": 6252, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Lightyear/Core.idr", "max_stars_repo_name": "david-christiansen/lightyear", "max_stars_repo_head_hexsha": "3d4f025a77159af3a2d12c803d783405c9b0a04b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Lightyear/Core.idr", "max_issues_repo_name": "david-christiansen/lightyear", "max_issues_repo_head_hexsha": "3d4f025a77159af3a2d12c803d783405c9b0a04b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lightyear/Core.idr", "max_forks_repo_name": "david-christiansen/lightyear", "max_forks_repo_head_hexsha": "3d4f025a77159af3a2d12c803d783405c9b0a04b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9940828402, "max_line_length": 95, "alphanum_fraction": 0.5393474088, "num_tokens": 1778}
|
import unittest
import tensorflow as tf
import numpy as np
from DeepQNetwork import DeepQnetwork
from ExperienceReplay import ExperienceReplay
from PreProcessor import PreProcessor
from ResultsRecorder import ResultsRecorder
# Test the functionality of the Deep Q Network
class TestDQN(unittest.TestCase):
# Ensure that only inputs of the correct dimsonality and type are accepted
def test_invalidInput(self):
testNetwork = DeepQnetwork(6, 0.0001)
init = tf.global_variables_initializer()
# Start a tensorflow session so the test network can be examined
with tf.Session() as sess:
# Init the network
sess.run(init)
# Define a three inputs, one valid, two invalid
validInput = np.zeros((105, 80, 4), dtype=float)
wrongSizeInput = np.zeros((210, 105, 3), dtype=float)
wrongTypeInput = np.zeros((105, 80, 4), dtype=str)
# Make assertions on the network output for each of the inputs
self.assertIsNotNone(sess.run(testNetwork.greedyOutput,
feed_dict={testNetwork.stateInput:[validInput]}))
# Should raise ValueError, placeholder is 105 x 80 x 4
with self.assertRaises(ValueError):
sess.run(testNetwork.greedyOutput,
feed_dict={testNetwork.stateInput:[wrongSizeInput]})
# Should raise ValueError, string can not explicitly be converted to float
with self.assertRaises(ValueError):
sess.run(testNetwork.greedyOutput,
feed_dict={testNetwork.stateInput:[wrongTypeInput]})
# Ensure the network has correct dimensonality at all points
def test_networkShape(self):
testNetwork = DeepQnetwork(6, 0.0001)
init = tf.global_variables_initializer()
# Start a tensorflow session so the test network can be examined
with tf.Session() as sess:
# Init the network
sess.run(init)
# Define a valid input
validInput = np.zeros((105, 80, 4), dtype=float)
# Feed the input and gather the output at each layer
networkOutputs = sess.run([testNetwork.conv1, testNetwork.conv2, testNetwork.conv3, testNetwork.dense],
feed_dict={testNetwork.stateInput:[validInput]})
# Check the shape of every layer is as intended
self.assertEqual(networkOutputs[0].shape, (1, 25, 19, 32))
self.assertEqual(networkOutputs[1].shape, (1, 11, 8, 64))
self.assertEqual(networkOutputs[2].shape, (1, 9, 6, 64))
self.assertEqual(networkOutputs[3].shape, (1, 512))
# Ensure that batches of n-size can be passed to the network
def test_inputBatches(self):
testNetwork = DeepQnetwork(6, 0.0001)
init = tf.global_variables_initializer()
# Start a tensorflow session so the test network can be examined
with tf.Session() as sess:
# Init the network
sess.run(init)
# Define some valid batch inputs
smallBatch = np.zeros((3, 105, 80, 4), dtype=float)
bigBatch = np.zeros((32, 105, 80, 4), dtype=float)
# Check the first conv layer to ensure the batch is being carried through
smallOutput = sess.run(testNetwork.conv1,
feed_dict={testNetwork.stateInput:smallBatch})
bigOutput = sess.run(testNetwork.conv1,
feed_dict={testNetwork.stateInput:bigBatch})
# First dimension should be equal to the number of states in a batch
self.assertEqual(smallOutput.shape, (3, 25, 19, 32))
self.assertEqual(bigOutput.shape, (32, 25, 19, 32))
# Ensure the network behaviour is consistent
def test_networkRecognition(self):
testNetwork = DeepQnetwork(50, 0.0001)
init = tf.global_variables_initializer()
# Start a tensorflow session so the test network can be examined
with tf.Session() as sess:
# Init the network
sess.run(init)
# Define a two contrasting inputs
zerosInput = np.zeros((105, 80, 4), dtype=float)
onesInput = np.ones((105, 80, 4), dtype=float)
# Retrieve an estimate from the network for both inputs
zerosOutput = sess.run(testNetwork.greedyOutput,
feed_dict={testNetwork.stateInput:[zerosInput]})
onesOutput = sess.run(testNetwork.greedyOutput,
feed_dict={testNetwork.stateInput:[onesInput]})
# Assert that these two outputs should not be the same
self.assertNotEqual(zerosOutput, onesOutput)
# Test the functionality of the Experience Repaly
class TestExperienceReplay(unittest.TestCase):
# Test whether the replay can initalised to a range of sizes
def test_variableSize(self):
# Declare a range of experience replay buffers
tinyER = ExperienceReplay(1)
midER = ExperienceReplay(30000)
bigER = ExperienceReplay(1000000)
# Check the size of the deques
self.assertNotEqual(len(tinyER.experienceBuffer), 1)
self.assertNotEqual(len(midER.experienceBuffer), 30000)
self.assertNotEqual(len(bigER.experienceBuffer), 1000000)
# Ensure that the deque will not grow above the defined maximum
def test_maxSize(self):
# Declare a couple of experience replay buffers
oneER = ExperienceReplay(1)
fiveER = ExperienceReplay(5)
# Overfill both buffers
dummyData = np.zeros((1,4), dtype=float)
oneER.add(dummyData)
oneER.add(dummyData)
for i in range(0, 8):
fiveER.add(dummyData)
# Compare the actual buffer size to expected max
self.assertEqual(len(oneER.experienceBuffer), 1)
self.assertEqual(len(fiveER.experienceBuffer), 5)
# Ensure that the oldest experiences are overwritten when a new one is added
def test_firstInFirstOut(self):
# Declare an experience replay buffer
testER = ExperienceReplay(4)
# Add incrementing numbers to fill the array
for i in range(0, 4):
testER.add(i)
# Add a 10 to the array and check that the deque shuffles as expected
testER.add(10)
# First position in the deque should be lost, last position should be the new addition
self.assertEqual(testER.experienceBuffer[0], 1)
self.assertEqual(testER.experienceBuffer[1], 2)
self.assertEqual(testER.experienceBuffer[2], 3)
self.assertEqual(testER.experienceBuffer[3], 10)
# Add a 20 to the array and ensure the shuffling continues
testER.add(20)
# First position in the deque should be lost, last position should be the new addition
self.assertEqual(testER.experienceBuffer[0], 2)
self.assertEqual(testER.experienceBuffer[1], 3)
self.assertEqual(testER.experienceBuffer[2], 10)
self.assertEqual(testER.experienceBuffer[3], 20)
# Test that a correct sized batch is returned when requested
def test_sampleBatch(self):
# Declare an experience replay buffer
testER = ExperienceReplay(4)
# Create a sample test experience
s = np.ones((105, 80, 4), float)
a = 1
r = 1
ns = np.zeros((105, 80, 4), float)
testExperience = np.reshape(np.array([s, a, r, ns]), [1,4])
# Fill the buffer with test inputs
for i in range(0, 4):
testER.add(testExperience)
# Request batches of varying sizes
oneBatch = testER.sample(1)
twoBatch = testER.sample(2)
fourBatch = testER.sample(4)
# Assert the shape of the returned batch
self.assertEqual(oneBatch.shape, (1, 4))
self.assertEqual(twoBatch.shape, (2, 4))
self.assertEqual(fourBatch.shape, (4, 4))
# Test that frames are being correctly modified in the preprocessing stage
class TestPreProcessor(unittest.TestCase):
# Greyscale tests
def test_greyscale(self):
# Create a preprocessor object and define a test RGB array (8-bit)
preprocess = PreProcessor()
testObservation = np.random.randint(255, size=(210, 160, 3))
# Use the PP to convert to greyscale
greyTest = preprocess.toGreyScale(testObservation)
# Ensure the dimsonality has reduced
self.assertEqual(greyTest.shape, (210, 160))
# Ensure the greyscale has been applied correctly
expectedGreyValue = int((testObservation[0, 0, 0] + testObservation[0, 0, 1] + testObservation[0, 0, 2]) / 3)
self.assertEqual(expectedGreyValue, greyTest[0, 0])
# Downsample tests
def test_downsample(self):
# Create a preprocessor object and define a test greyscale array (8-bit)
preprocess = PreProcessor()
testGreyFrame = np.random.randint(255, size=(210, 160))
# Use the PP to half the size of the input
halfFrame = preprocess.halfDownsample(testGreyFrame)
# Ensure the frame has reduced in size by half
self.assertEqual(testGreyFrame.shape[0]/2, halfFrame.shape[0])
self.assertEqual(testGreyFrame.shape[1]/2, halfFrame.shape[1])
# Run the DQN Tests
suite = unittest.TestLoader().loadTestsFromTestCase(TestDQN)
unittest.TextTestRunner(verbosity=2).run(suite)
# Run the Experience Replay Tests
suite = unittest.TestLoader().loadTestsFromTestCase(TestExperienceReplay)
unittest.TextTestRunner(verbosity=2).run(suite)
# Run the Preprocessor Tests
suite = unittest.TestLoader().loadTestsFromTestCase(TestPreProcessor)
unittest.TextTestRunner(verbosity=2).run(suite)
print('\nTesting Complete\n')
|
{"hexsha": "081e77281223e7f27adca0cce1afa4be58cb76df", "size": 9943, "ext": "py", "lang": "Python", "max_stars_repo_path": "UnitTests.py", "max_stars_repo_name": "ChristopherHaynes/Atari-2600-Deep-Learning-Agent", "max_stars_repo_head_hexsha": "02ccd83701a4eeb7af160b0ff2cdb258a2338048", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "UnitTests.py", "max_issues_repo_name": "ChristopherHaynes/Atari-2600-Deep-Learning-Agent", "max_issues_repo_head_hexsha": "02ccd83701a4eeb7af160b0ff2cdb258a2338048", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "UnitTests.py", "max_forks_repo_name": "ChristopherHaynes/Atari-2600-Deep-Learning-Agent", "max_forks_repo_head_hexsha": "02ccd83701a4eeb7af160b0ff2cdb258a2338048", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-15T10:47:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T10:47:06.000Z", "avg_line_length": 39.9317269076, "max_line_length": 117, "alphanum_fraction": 0.6445740722, "include": true, "reason": "import numpy", "num_tokens": 2272}
|
module TestFirstOrder2
using ModiaLang
using DifferentialEquations
@usingModiaPlot
using Test
# using RuntimeGeneratedFunctions
# RuntimeGeneratedFunctions.init(@__MODULE__)
inputSignal(t) = sin(t)
FirstOrder1 = Model(
T = 0.2,
x = Var(init=0.3),
equations = :[u = inputSignal(time/u"s"),
T * der(x) + x = u,
y = 2*x]
)
FirstOrder2 = FirstOrder1 | Map(T = 0.3, x = Var(init=0.6))
firstOrder = @instantiateModel(FirstOrder2, logCode=false)
simulate!(firstOrder, Tsit5(), stopTime = 10, merge = Map(T = 0.4, x = 0.9),
log=false, logParameters=true, logStates=true,
requiredFinalStates = [-0.17964872595554535])
# Test get_result(instantiatedModel)
println()
result1 = get_result(firstOrder)
@show(result1[1:10,:])
println()
@show(result1[1:10, ["time", "u", "y"]])
println()
result2 = get_result(firstOrder, onlyStates=true, extraNames=["y"])
@show(result2[1:10,:])
println()
result3 = get_result(firstOrder, extraNames=["y"])
@show(result3[1:10,:])
# Linearize
println("\n... Linearize at stopTime = 0 and 10:")
(A_0 , x_0) = linearize!(firstOrder, analytic = true)
(A_10, x_10) = linearize!(firstOrder, stopTime=10, analytic = true)
(A_10_numeric, x_10_numeric) = linearize!(firstOrder, stopTime=10, analytic=false)
xNames = get_xNames(firstOrder)
@show xNames
@show A_0 , x_0
@show A_10, x_10
@show A_10_numeric, x_10_numeric
@test isapprox(A_0,[-1/0.4])
@test isapprox(A_0, A_10)
plot(result1, [("u", "x"), "der(x)", "y"])
FirstOrder3 = Model(
T = 2u"hr",
x = Var(init=1.0),
equations = :[u = if after(1.5u"hr"); 1.0 else 0.0 end,
T * der(x) + x = u]
)
firstOrder3 = @instantiateModel(FirstOrder3, logCode=false)
simulate!(firstOrder3, Tsit5(), stopTime = 10u"hr")
plot(firstOrder3, [("u", "x"), "der(x)"], figure=2)
end
|
{"hexsha": "3516cc44ce42884b385466805e723a78c0ef4761", "size": 1841, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/TestFirstOrder2.jl", "max_stars_repo_name": "ModiaSim/ModiaLang", "max_stars_repo_head_hexsha": "6f8fc420f86f9af51eb897cfd9d7069c6ccc9659", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-04-19T01:53:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T12:00:12.000Z", "max_issues_repo_path": "test/TestFirstOrder2.jl", "max_issues_repo_name": "ModiaSim/ModiaLang", "max_issues_repo_head_hexsha": "6f8fc420f86f9af51eb897cfd9d7069c6ccc9659", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-27T13:47:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T17:48:07.000Z", "max_forks_repo_path": "test/TestFirstOrder2.jl", "max_forks_repo_name": "ModiaSim/ModiaLang", "max_forks_repo_head_hexsha": "6f8fc420f86f9af51eb897cfd9d7069c6ccc9659", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-03T11:39:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T17:20:31.000Z", "avg_line_length": 25.9295774648, "max_line_length": 83, "alphanum_fraction": 0.6550787615, "num_tokens": 604}
|
@testset "benchmark_normals" begin
p, q = synthetic_gradient(SynthSphere(50))
p2, q2 = synthetic_gradient(SynthSphere(51))
error = benchmark_normals(p, q, p2, q2)
@test error ≈ 1.4866545112360603
end
|
{"hexsha": "2a5f9f0b725a9a2f6a9b6147bd388fccc00daddf", "size": 216, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/benchmark_normals.jl", "max_stars_repo_name": "betttris13/ShapeFromShading.jl", "max_stars_repo_head_hexsha": "c486ad60d1675a65aacfe61dc1ef4d308bd534e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/benchmark_normals.jl", "max_issues_repo_name": "betttris13/ShapeFromShading.jl", "max_issues_repo_head_hexsha": "c486ad60d1675a65aacfe61dc1ef4d308bd534e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/benchmark_normals.jl", "max_forks_repo_name": "betttris13/ShapeFromShading.jl", "max_forks_repo_head_hexsha": "c486ad60d1675a65aacfe61dc1ef4d308bd534e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8571428571, "max_line_length": 48, "alphanum_fraction": 0.712962963, "num_tokens": 69}
|
/*******************************************************************************
* ARICPP - ARI interface for C++
* Copyright (C) 2017-2021 Daniele Pallastrelli
*
* This file is part of aricpp.
* For more information, see http://github.com/daniele77/aricpp
*
* Boost Software License - Version 1.0 - August 17th, 2003
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare derivative works of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
******************************************************************************/
#include <iostream>
#include <string>
#include <vector>
#include <boost/program_options.hpp>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <boost/uuid/uuid_io.hpp>
#include "../include/aricpp/arimodel.h"
#include "../include/aricpp/bridge.h"
#include "../include/aricpp/channel.h"
#include "../include/aricpp/client.h"
using namespace aricpp;
using namespace std;
inline std::string to_string(bool b)
{
return (b ? "true" : "false");
}
int main(int argc, char* argv[])
{
try
{
string host = "localhost";
string port = "8088";
string username = "asterisk";
string password = "asterisk";
string application = "attendant";
bool sipCh = false; // default = pjsip channel
namespace po = boost::program_options;
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("version,V", "print version string")
("host,H", po::value(&host), ("ip address of the ARI server ["s + host + ']').c_str())
("port,P", po::value(&port), ("port of the ARI server ["s + port + "]").c_str())
("username,u", po::value(&username), ("username of the ARI account on the server ["s + username + "]").c_str())
("password,p", po::value(&password), ("password of the ARI account on the server ["s + password + "]").c_str())
("application,a", po::value(&application), ("stasis application to use ["s + application + "]").c_str())
("sip-channel,S", po::bool_switch(&sipCh), ("use old sip channel instead of pjsip channel ["s + to_string(sipCh) + "]").c_str())
;
po::variables_map vm;
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
if (vm.count("help"))
{
cout << desc << "\n";
return 0;
}
if (vm.count("version"))
{
cout << "This is play_and_record v. 1.0, part of aricpp library\n";
return 0;
}
#if BOOST_VERSION < 106600
using IoContext = boost::asio::io_service;
#else
using IoContext = boost::asio::io_context;
#endif
IoContext ios;
// Register to handle the signals that indicate when the server should exit.
// It is safe to register for the same signal multiple times in a program,
// provided all registration for the specified signal is made through Asio.
boost::asio::signal_set signals(ios);
signals.add(SIGINT);
signals.add(SIGTERM);
#if defined(SIGQUIT)
signals.add(SIGQUIT);
#endif // defined(SIGQUIT)
signals.async_wait(
[&ios](boost::system::error_code /*ec*/, int /*signo*/)
{
cout << "Cleanup and exit application...\n";
ios.stop();
});
Client client(ios, host, port, username, password, application);
AriModel model(client);
shared_ptr<Bridge> bridge;
aricpp::Recording recording;
aricpp::Playback playback;
model.CreateBridge(
[&bridge](unique_ptr<Bridge> newBridge)
{
if (!newBridge) return;
bridge = move(newBridge);
cout << "Bridge created" << endl;
},
Bridge::Type::mixing
);
model.OnStasisStarted(
[&bridge](shared_ptr<Channel> ch, bool external)
{
if (external)
{
cout << "Call answered. Press a digit:\n";
cout << "1 - start play\n";
cout << "2 - stop play\n";
cout << "3 - start recording\n";
cout << "4 - stop recording\n";
cout << "5 - pause recording\n";
cout << "6 - resume recording\n";
ch->Answer();
bridge->Add(*ch, false /* mute */, Bridge::Role::participant);
}
else
{
cerr << "WARNING: should not reach this line" << endl;
}
});
model.OnChannelDtmfReceived(
[&](std::shared_ptr<aricpp::Channel> /*channel*/, const std::string& digit)
{
std::cout << "Received digit " << digit << std::endl;
if (digit == "1")
{
std::cout << "Start playing..." << std::endl;
bridge->Play("sound:tt-monkeys")
.After([&playback](aricpp::Playback p) { playback = p; })
.OnError(
[](aricpp::Error, const string& msg)
{
std::cerr << "Error starting playback of audio file."
<< " Msg: " << msg << std::endl;
});
}
else if (digit == "2")
{
std::cout << "stop playing..." << std::endl;
playback.Stop();
}
else if (digit == "3")
{
std::cout << "start recording..." << std::endl;
auto recordingId = boost::uuids::to_string(boost::uuids::random_generator()());
bridge->Record(recordingId, "wav")
.After([&recording](aricpp::Recording rec) { recording = rec; })
.OnError(
[](aricpp::Error, const std::string& msg)
{
std::cerr << "Error starting recording of audio file."
<< " Msg: " << msg << std::endl;
});
}
else if (digit == "4")
{
std::cout << "Stop recording..." << std::endl;
recording.Stop();
}
else if (digit == "5")
{
std::cout << "Pause recording..." << std::endl;
recording.Pause();
}
else if (digit == "6")
{
std::cout << "Resume recording..." << std::endl;
recording.Resume();
}
else
{
std::cout << "DTMF not allowed: " << digit << std::endl;
}
});
client.Connect(
[](boost::system::error_code e)
{
if (e)
{
cerr << "Connection error: " << e.message() << endl;
}
else
cout << "Connected" << endl;
},
10s /* reconnection seconds */);
ios.run();
}
catch (const exception& e)
{
cerr << "Exception in app: " << e.what() << ". Aborting\n";
return -1;
}
return 0;
}
|
{"hexsha": "33e7c485f55ac6a5d4d1ce9f0d1d2a4144f274b9", "size": 8881, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/play_and_record.cpp", "max_stars_repo_name": "daniele77/aricpp", "max_stars_repo_head_hexsha": "5af798197ff8bf81619c00cb1c21e9576b853dab", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2017-11-03T06:36:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:16:31.000Z", "max_issues_repo_path": "examples/play_and_record.cpp", "max_issues_repo_name": "daniele77/aricpp", "max_issues_repo_head_hexsha": "5af798197ff8bf81619c00cb1c21e9576b853dab", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 40.0, "max_issues_repo_issues_event_min_datetime": "2017-03-02T15:25:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T19:32:43.000Z", "max_forks_repo_path": "examples/play_and_record.cpp", "max_forks_repo_name": "daniele77/aricpp", "max_forks_repo_head_hexsha": "5af798197ff8bf81619c00cb1c21e9576b853dab", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2018-11-21T16:05:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-22T05:45:17.000Z", "avg_line_length": 38.2801724138, "max_line_length": 140, "alphanum_fraction": 0.4991555005, "num_tokens": 1847}
|
import os
import sys
import argparse
import torch
import numpy as np
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import models
import utils
from utils import alignment, data, attack
import definitions
parser = argparse.ArgumentParser(description='Aligns two GoogLeNets using cross-correlation of activations.')
parser.add_argument('--dir', type=str, default='model_dicts/paired_models/', metavar='DIR',
help='directory for saving model dicts (default: model_dicts/paired_models/)')
parser.add_argument('--dir2', type=str, default='model_data/paired_models/', metavar='DIR',
help='directory for saving paired model data (default: model_data/paired_models/)')
parser.add_argument('--data_path', type=str, default='data/', metavar='PATH',
help='path to datasets location (default: data/)')
parser.add_argument('--model_path', type=str, default='model_dicts/basic_models/', metavar='PATH',
help='path to models for pairing (default: model_dicts/basic_models/)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true', default=True,
help='switches between validation and test set (default: True)')
parser.add_argument('--transform', type=str, default='TinyTen', metavar='TRANSFORM',
help='transform name (default: TinyTen)')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='input batch size (default: 512)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--epochs', type=int, default=200, metavar='EPOCHS',
help='Number of epochs the models were trained for')
parser.add_argument('--model', type=str, default='GoogLeNet', metavar='MODEL',
help='model name (default: GoogLeNet)')
parser.add_argument('--align_name', type=str, default='corr', metavar='ALIGN',
help='name for alignment type (default: corr)')
parser.add_argument('--adv_flag', type=bool, default=False, metavar='ALIGN',
help='adversarial flag (default:False)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--seed_a', type=int, default=None, metavar='S', help='seed init of model 0 (default: None)')
parser.add_argument('--seed_b', type=int, default=None, metavar='S', help='seed init of model 1 (default: None)')
args = parser.parse_args()
args.dir = ('%s%s/%s/' % (args.dir, args.model, args.dataset))
args.dir2 = ('%s%s/%s/' % (args.dir2, args.model, args.dataset))
args.model_path = ('%s%s/%s/' % (args.model_path, args.model, args.dataset))
project_root = definitions.get_project_root()
os.chdir(project_root)
os.makedirs(args.dir, exist_ok=True)
os.makedirs(args.dir2, exist_ok=True)
print('Arguments')
for arg in vars(args):
print('%s: %s' % (arg, str(getattr(args, arg))))
model_paths = ['%scheckpoint_seed_%02d-%d.pt' % (args.model_path, args.seed_a, args.epochs),
'%scheckpoint_seed_%02d-%d.pt' % (args.model_path, args.seed_b, args.epochs)]
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
shuffle_train=True,
test_batch_size=args.batch_size
)
# Load the curve model
architecture = getattr(models, args.model)
checkpoint = [None] * 2
model = [None] * 2
for i in range(2):
checkpoint[i] = torch.load(model_paths[i], map_location=device)
model[i] = architecture.base(num_classes=num_classes, device=device, **architecture.kwargs)
model[i].load_state_dict(checkpoint[i]['model_state'])
model[i].to(device)
if args.adv_flag:
if args.dataset == 'TINY-IMAGENET-200':
eps = 4.0 / 255
eps_stp_sz = 1.0 / 255
else:
eps = 8.0 / 255
eps_stp_sz = 2.0 / 255
config = {
'epsilon': eps,
'num_steps': 5,
'step_size': eps_stp_sz,
'random_start': True,
'loss_func': 'xent',
}
net0 = attack.AttackPGD(model[0], config, loss_func=utils.googlenet_criterion)
net1 = attack.AttackPGD(model[1], config, loss_func=utils.googlenet_criterion)
net0.to(device)
net1.to(device)
else:
net0 = None
net1 = None
# Align the models
if args.model == 'GoogLeNet':
align_obj = alignment.AlignedModelPairs(model[0], model[1], loaders['align'], adv_flag=args.adv_flag,
net0=net0, net1=net1)
print('Alignment object created.')
align_obj.compute_moments()
print('Moments computed')
align_obj.compute_crosscorr()
print('Cross-correlation matrix computed')
align_obj.compute_match()
print('Match computed')
np.save('%smatch_%s_seeds_%02d_%02d.npy' % (args.dir, args.align_name, args.seed_a, args.seed_b), align_obj.matches)
print('Done')
|
{"hexsha": "e8d8c94d0074de805bdf0e92ba7e8f68caa8a24b", "size": 5394, "ext": "py", "lang": "Python", "max_stars_repo_path": "alignment/align_googlenet.py", "max_stars_repo_name": "IBM/NeuronAlignment", "max_stars_repo_head_hexsha": "5b82b60666db1fac72e53db07529a3328ee549c4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-09T01:23:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T16:56:00.000Z", "max_issues_repo_path": "alignment/align_googlenet.py", "max_issues_repo_name": "IBM/NeuronAlignment", "max_issues_repo_head_hexsha": "5b82b60666db1fac72e53db07529a3328ee549c4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alignment/align_googlenet.py", "max_forks_repo_name": "IBM/NeuronAlignment", "max_forks_repo_head_hexsha": "5b82b60666db1fac72e53db07529a3328ee549c4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9555555556, "max_line_length": 120, "alphanum_fraction": 0.6713014461, "include": true, "reason": "import numpy", "num_tokens": 1335}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import numpy
from neodroid.utilities.unity_specifications import (
Configuration,
Motion,
Reaction,
ReactionParameters,
) # Motion,; EnvironmentDescription,
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 9/4/19
"""
def verify_configuration_reactions(
*,
input_reactions,
environment_descriptions
#: Mapping[str, EnvironmentDescription]
):
"""
:param input_reactions:
:param environment_descriptions:
:return:
"""
"""
if environment_descriptions:
configurables = next(iter(environment_descriptions.items()))[1].configurables.values()
if configurables:
if isinstance(reaction_input, Reaction):
if reaction_input.configurations:
is_valid_configurations = all(
isinstance(m, Configuration)
for m in reaction_input.configurations
)
if is_valid_configurations:
return reaction_input
else:
reaction_input.motions(
construct_configurations_from_known_observables(
reaction_input.configurations, configurables
)
)
return reaction_input
elif isinstance(reaction_input, list):
is_valid_configurations = all(
isinstance(c, Configuration) for c in reaction_input
)
if is_valid_configurations:
return Reaction(
parameters=parameters, configurations=reaction_input, motions=[]
)
else:
return construct_configuration_reaction_from_list(
reaction_input, configurables
)
elif isinstance(reaction_input, int):
return construct_configuration_reaction_from_list(
[reaction_input], configurables
)
elif isinstance(reaction_input, float):
return construct_configuration_reaction_from_list(
[reaction_input], configurables
)
elif isinstance(reaction_input, (numpy.ndarray, numpy.generic)):
a = construct_configuration_reaction_from_list(
reaction_input.astype(float).tolist(), configurables
)
return a
if isinstance(reaction_input, Reaction):
return reaction_input
return Reaction(parameters=parameters)
"""
if isinstance(input_reactions, Reaction):
return input_reactions
parameters = ReactionParameters(
terminable=False,
step=False,
reset=True,
configure=True,
describe=True,
episode_count=False,
)
outs = []
if environment_descriptions and input_reactions:
if len(input_reactions) is not len(environment_descriptions):
logging.warning(
f"Inputs({len(input_reactions)}) and"
f" environment descriptions({len(environment_descriptions)}) are not the "
f"same length"
)
for input, (env_name, env_desc) in zip(
input_reactions, environment_descriptions.items()
):
configurables = env_desc.configurables.values()
if configurables:
if isinstance(input, Reaction):
is_valid_motions = all(isinstance(m, Motion) for m in input.motions)
if is_valid_motions:
return input
else:
input.motions = construct_configuration_reaction_from_list(
input.motions, configurables
)
return input
elif isinstance(input, list):
is_valid_motions = all(isinstance(m, Motion) for m in input)
if is_valid_motions:
outs.append(
Reaction(
parameters=parameters,
configurations=[],
motions=input,
environment_name=env_name,
)
)
else:
outs.append(
construct_configuration_reaction_from_list(
input, configurables, env_name=env_name
)
)
elif isinstance(input, (int, float)):
outs.append(
construct_configuration_reaction_from_list(
[input], configurables, env_name=env_name
)
)
elif isinstance(input, (numpy.ndarray, numpy.generic)):
a = construct_configuration_reaction_from_list(
input.astype(float).tolist(), configurables, env_name=env_name
)
outs.append(a)
else:
outs.append(Reaction(parameters=parameters, environment_name=env_name))
else:
outs.append(Reaction(parameters=parameters, environment_name="all"))
return outs
def construct_configuration_reaction_from_list(
configuration_list, configurables, env_name="all"
):
"""
@param configuration_list:
@type configuration_list:
@param configurables:
@type configurables:
@param env_name:
@type env_name:
@return:
@rtype:
"""
configurations = construct_configurations_from_known_observables(
configuration_list, configurables
)
parameters = ReactionParameters(
terminable=False,
step=False,
reset=True,
configure=True,
describe=True,
episode_count=False,
)
return Reaction(
parameters=parameters,
configurations=configurations,
motions=[],
environment_name=env_name,
)
def construct_configurations_from_known_observables(input_list, configurables):
"""
@param input_list:
@type input_list:
@param configurables:
@type configurables:
@return:
@rtype:
"""
new_configurations = [
Configuration(configurable.configurable_name, list_val)
for (list_val, configurable) in zip(input_list, configurables)
]
return new_configurations
|
{"hexsha": "f3befada4dd50fb61251b6188608a7c0d25ac730", "size": 6029, "ext": "py", "lang": "Python", "max_stars_repo_path": "neodroid/factories/configuration_reactions.py", "max_stars_repo_name": "sintefneodroid/neo", "max_stars_repo_head_hexsha": "0999f1dff95c4a8c5880a9b3add532d74f38586a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2017-09-13T08:28:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T15:59:14.000Z", "max_issues_repo_path": "neodroid/factories/configuration_reactions.py", "max_issues_repo_name": "sintefneodroid/neo", "max_issues_repo_head_hexsha": "0999f1dff95c4a8c5880a9b3add532d74f38586a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2019-03-25T13:49:43.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-02T13:58:13.000Z", "max_forks_repo_path": "neodroid/factories/configuration_reactions.py", "max_forks_repo_name": "sintefneodroid/neo", "max_forks_repo_head_hexsha": "0999f1dff95c4a8c5880a9b3add532d74f38586a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-09-21T10:14:39.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-21T09:57:04.000Z", "avg_line_length": 30.2964824121, "max_line_length": 90, "alphanum_fraction": 0.6123735279, "include": true, "reason": "import numpy", "num_tokens": 1099}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 15:59:11 2017
@author: af5u13
"""
# Usage for debugging from raw Python console
#exec(open("/Users/af5u13/dev/visr/src/python/scripts/rsao/reverbObjectBinauralisation.py").read())
#exec(open("/home/andi/dev/visr/src/python/scripts/rsao/reverbObjectBinauralisation.py").read())
import visr
import signalflows
import panning
import pml
import rbbl
import rcl
import rrl
#import objectmodel as om
import audiointerfaces as ai
import h5py
import numpy as np;
import matplotlib.pyplot as plt
import os
import scipy.io.wavfile as wavio
import time
class ReverbToBinaural( visr.CompositeComponent ):
def __init__( self, context, name, parent,
loudspeakerConfig,
numberOfInputs,
rendererOutputs,
interpolationPeriod,
diffusionFilters,
trackingConfiguration,
brirRouting,
brirFilters,
scenePort = 4242,
reverbConfiguration=''):
super(ReverbToBinaural,self).__init__( context, name, parent )
self.coreRenderer = signalflows.BaselineRenderer( ctxt, 'renderer', self,
loudspeakerConfig=loudspeakerConfig,
numberOfInputs=numberOfInputs,
numberOfOutputs=rendererOutputs,
interpolationPeriod=interpolationPeriod,
diffusionFilters=diffusionFilters,
reverbConfig=reverbConfiguration,
sceneReceiverPort=scenePort,
trackingConfiguration=trackingConfiguration
)
numFilters = brirFilters.numberOfRows
firLength = brirFilters.numberOfColumns
numRoutings = brirRouting.size
self.convolver = rcl.FirFilterMatrix( ctxt, 'convolver', self,
numberOfInputs=rendererOutputs,
numberOfOutputs=2,
maxFilters=numFilters,
filterLength=firLength,
maxRoutings=numRoutings,
filters=brirFilters,
routings=brirRouting,
controlInputs=rcl.FirFilterMatrix.ControlPortConfig.NoInputs,
fftImplementation='ffts'
)
self.audioIn = visr.AudioInputFloat( "audioIn", self, numberOfInputs )
self.audioOut = visr.AudioOutputFloat( "audioOut", self, 2 )
self.audioConnection( self.audioIn, self.coreRenderer.audioPort("input"))
self.audioConnection( self.coreRenderer.audioPort("output"),
self.convolver.audioPort("in"))
self.audioConnection( self.convolver.audioPort("out"), self.audioOut )
if len(trackingConfiguration) > 0:
self.posIn = visr.ParameterInput( "posIn", self,
pml.ListenerPosition.staticType,
pml.DoubleBufferingProtocol.staticType,
pml.EmptyParameterConfig() )
self.parameterConnection( self.posIn, self.coreRenderer.parameterPort("trackingPositionInput") )
# Get VISR base directory from rsao subdirectory.
visrBaseDirectory = os.path.normpath(os.path.join( os.getcwd(), '../../../..' )).replace('\\','/')
blockSize = 4096
samplingFrequency = 48000
parameterUpdatePeriod = 4096
numBlocks = 32
signalLength = blockSize * numBlocks
t = 1.0/samplingFrequency * np.arange(0,signalLength)
numObjects = 2;
ctxt = visr.SignalFlowContext( blockSize, samplingFrequency)
lspConfigFile = os.path.join( os.getcwd(), 'bs2051-4+5+0_nosub.xml').replace('\\','/')
# lspConfigFile = os.path.join( visrBaseDirectory, 'config/isvr/audiolab_39speakers_1subwoofer.xml' )
lc = panning.LoudspeakerArray( lspConfigFile )
numOutputChannels = np.max( lc.channelIndices() + lc.subwooferChannelIndices() ) +1
numLoudspeakers = lc.numberOfRegularLoudspeakers
diffFilterFile = os.path.join( visrBaseDirectory, 'config/filters/random_phase_allpass_64ch_512taps.wav')
diffFiltersRaw = np.array(pml.MatrixParameterFloat.fromAudioFile( diffFilterFile ),
dtype = np.float32 )
diffFilters = pml.MatrixParameterFloat( diffFiltersRaw[ np.array(lc.channelIndices() )-1,: ] )
reverbConfigStr = '{ "numReverbObjects": %i, "discreteReflectionsPerObject": 20, "lateReverbFilterLength": 2, "lateReverbDecorrelationFilters": "%s/config/filters/random_phase_allpass_64ch_1024taps.wav" }' % (numObjects, visrBaseDirectory )
#reverbConfigStr = ''
## Load the BBC BRIR dataset
#brirFile = os.path.join( os.getcwd(), 'BBC_BRIR.mat' )
#brirMat = h5py.File( brirFile )
#brirFull = np.array( brirMat['h_sweetspot'], dtype=np.float32 ).copy('C')
## Scalefactor to compensate for the very low amplitudes of the BBC BRIRs
#brirScaleFactor = 500;
#brirFlat = brirScaleFactor * np.concatenate( (brirFull[:,0,0:16384], brirFull[:,1,0:16384] ) )
#brirFilterParam = pml.MatrixParameterFloat( brirFlat, 16 )
#numBrirSpeakers = brirFull.shape[0]
## Define the routing for the binaural convolver such that it matches the organisation of the
## flat BRIR matrix.
#filterRouting = pml.FilterRoutingList()
##for idx in range(0, numBrirSpeakers ):
## filterRouting.addRouting( idx, 0, idx, 1.0 )
## filterRouting.addRouting( idx, 1, idx+numBrirSpeakers, 1.0 )
#activeChannels = lc.channelIndices() # These are zero-offset
#for idx in activeChannels:
# filterRouting.addRouting( idx, 0, idx, 1.0 )
# filterRouting.addRouting( idx, 1, idx+numBrirSpeakers, 1.0 )
brirFile = os.path.join( os.getcwd(), 'bbcrdlr9ch_brirs.wav' )
wavfs, brirRaw = wavio.read( brirFile )
brirFlat = np.asarray(1/32768.0 * brirRaw.T, dtype=np.float32 )
brirFilterParam = pml.MatrixParameterFloat( brirFlat, 16 )
numBrirSpeakers = brirFlat.shape[0]//2
# Define the routing for the binaural convolver such that it matches the organisation of the
# flat BRIR matrix.
filterRouting = rbbl.FilterRoutingList()
for idx in range(0, numBrirSpeakers ):
filterRouting.addRouting( idx, 0, idx, 1.0 )
filterRouting.addRouting( idx, 1, idx+numBrirSpeakers, 1.0 )
renderer = ReverbToBinaural( ctxt, 'top', None,
loudspeakerConfig=lc,
numberOfInputs=numObjects,
rendererOutputs=numOutputChannels,
interpolationPeriod=parameterUpdatePeriod,
diffusionFilters=diffFilters,
trackingConfiguration='',
brirFilters = brirFilterParam,
brirRouting = filterRouting,
reverbConfiguration=reverbConfigStr,
scenePort = 4242
)
print( 'Created renderer.' )
flow = rrl.AudioSignalFlow( renderer )
aiConfig = ai.AudioInterface.Configuration( flow.numberOfCaptureChannels,
flow.numberOfPlaybackChannels,
samplingFrequency,
blockSize )
jackCfg = """{ "clientname": "Reverb2Binaural",
"autoconnect" : "true",
"portconfig":
{
"capture": [{ "basename":"in_", "externalport" : {} }],
"playback": [{ "basename":"out_", "externalport" : {} }]
}
}"""
aIfc = ai.AudioInterfaceFactory.create("Jack", aiConfig, jackCfg)
aIfc.registerCallback( flow )
aIfc.start()
print( "Rendering started." )
## Non-realtime code
#inputSignal = np.zeros( (numObjects, signalLength ), dtype=np.float32 )
## inputSignal[0,:] = 0.75*np.sin( 2.0*np.pi*440 * t )
#inputSignal[ 0, 100 ] = 1
#
#outputSignal = np.zeros( (2, signalLength ), dtype=np.float32 )
#
#start = time.time()
#
#for blockIdx in range(0,numBlocks):
#
# inputBlock = inputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize]
# outputBlock = flow.process( inputBlock )
# outputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize] = outputBlock
#
#end = time.time()
#print("Time for calculation %f" % ((end-start)/numBlocks) )
#
#
#plt.figure(1)
#plt.plot( t, outputSignal[0,:], 'bo-', t, outputSignal[1,:], 'rx-' )
#plt.show( block = False )
|
{"hexsha": "abc151aa54dd326a2a86e59e32f75d39178dd47e", "size": 8637, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/scripts/rsao/reverbObjectBinauralisation_simple.py", "max_stars_repo_name": "s3a-spatialaudio/VISR", "max_stars_repo_head_hexsha": "55f6289bc5058d4898106f3520e1a60644ffb3ab", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-03-12T14:52:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T01:16:23.000Z", "max_issues_repo_path": "src/python/scripts/rsao/reverbObjectBinauralisation_simple.py", "max_issues_repo_name": "s3a-spatialaudio/VISR", "max_issues_repo_head_hexsha": "55f6289bc5058d4898106f3520e1a60644ffb3ab", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/scripts/rsao/reverbObjectBinauralisation_simple.py", "max_forks_repo_name": "s3a-spatialaudio/VISR", "max_forks_repo_head_hexsha": "55f6289bc5058d4898106f3520e1a60644ffb3ab", "max_forks_repo_licenses": ["ISC"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-11T12:53:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-22T10:08:08.000Z", "avg_line_length": 42.3382352941, "max_line_length": 240, "alphanum_fraction": 0.6143336807, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2011}
|
// Copyright (c) 2016
// Author: Chrono Law
#include <stack>
#include <std.hpp>
using namespace std;
#include <boost/array.hpp>
#include <boost/range.hpp>
using namespace boost;
///////////////////////////////////////
void case1()
{
assert(has_range_iterator<vector<int>>::value);
assert(has_range_iterator<string>::value);
assert(!has_range_iterator<stack<int>>::value);
typedef boost::array<char, 5> array_t;
assert(has_range_iterator<array_t>::value);
typedef pair<int*,int*> pair_t;
assert(has_range_iterator<pair_t>::value);
char a[] = "range";
assert(has_range_iterator<decltype(a)>::value);
assert(!has_range_iterator<char*>::value);
}
///////////////////////////////////////
int main()
{
std::cout << "hello range traits" << std::endl;
case1();
}
|
{"hexsha": "bf6abff173a28bd3124b2d5481661307e4b984c7", "size": 816, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "range/traits.cpp", "max_stars_repo_name": "MaxHonggg/professional_boost", "max_stars_repo_head_hexsha": "6fff73d3b9832644068dc8fe0443be813c7237b4", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 47.0, "max_stars_repo_stars_event_min_datetime": "2016-05-20T08:49:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T01:17:07.000Z", "max_issues_repo_path": "range/traits.cpp", "max_issues_repo_name": "MaxHonggg/professional_boost", "max_issues_repo_head_hexsha": "6fff73d3b9832644068dc8fe0443be813c7237b4", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "range/traits.cpp", "max_forks_repo_name": "MaxHonggg/professional_boost", "max_forks_repo_head_hexsha": "6fff73d3b9832644068dc8fe0443be813c7237b4", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 37.0, "max_forks_repo_forks_event_min_datetime": "2016-07-25T04:52:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T03:55:08.000Z", "avg_line_length": 19.4285714286, "max_line_length": 51, "alphanum_fraction": 0.6066176471, "num_tokens": 186}
|
using AbstractPlotting.PlotUtils, AbstractPlotting.Colors
################################################################################
# Colormap reference #
################################################################################
function colors_svg(cs, w, h)
n = length(cs)
ws = min(w / n, h)
html = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" version="1.1"
width="$(n * ws)mm" height="$(h)mm"
viewBox="0 0 $n 1" preserveAspectRatio="none"
shape-rendering="crispEdges" stroke="none">
"""
for (i, c) in enumerate(cs)
html *= """
<rect width="$(ws)mm" height="$(h)mm" x="$(i-1)" y="0" fill="#$(hex(convert(RGB, c)))" />
"""
end
html *= "</svg>"
return html
end
function generate_colorschemes_table(ks)
extra_dir = get(ENV, "CI", "false") == "true" ? "../" : ""
html = "<head><link type=\"text/css\" rel=\"stylesheet\" href=\"$(extra_dir)../assets/tables.css\" /></head><body><table><tr class=\"headerrow\">"
for header in ["NAME", "Categorical variant", "Continuous variant"]
html *= "<th>$header</th>"
end
html *= "</tr>"
w, h = 70, 5
for k in ks
grad = cgrad(k)
p = color_list(grad)
cg = grad[range(0, 1, length = 100)]
cp = length(p) <= 100 ? p : cg
# cp7 = color_list(palette(k, 7))
html *= "<tr><td class=\"attr\">:$k</td><td>"
html *= colors_svg(cp, w, h)
html *= "</td><td>"
html *= colors_svg(cg, w, h)
# html *= "</td><td>"
# html *= colors_svg(cp7, 35, h)
html *= "</td></tr>"
end
html *= "</table></body>"
return html
end
function generate_colorschemes_markdown(; GENDIR = joinpath(dirname(@__DIR__), "docs", "src", "generated"))
md = open(joinpath(GENDIR, "colors.md"), "w")
for line in readlines(normpath(@__DIR__, "..", "docs", "src", "colors.md"))
write(md, line)
write(md, "\n")
end
write(md, """
## misc
These colorschemes are not defined or provide different colors in ColorSchemes.jl
They are kept for compatibility with the old behaviour of Makie, before v0.10.
""")
write(md, "```@raw html\n")
write(
md,
generate_colorschemes_table(
[:default; sort(collect(keys(PlotUtils.MISC_COLORSCHEMES)))]
)
)
write(md, "\n```\n\nThe following colorschemes are defined by ColorSchemes.jl.\n\n")
for cs in ["cmocean", "scientific", "matplotlib", "colorbrewer", "gnuplot", "colorcet", "seaborn", "general"]
ks = sort([k for (k, v) in PlotUtils.ColorSchemes.colorschemes if occursin(cs, v.category)])
write(md, "\n\n## $cs\n\n```@raw html\n")
write(md, generate_colorschemes_table(ks))
write(md, "\n```\n\n")
end
close(md)
end
|
{"hexsha": "e26849356e74718c5a6b5a627d04aed4016dc097", "size": 3044, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/documentation.jl", "max_stars_repo_name": "pauljurczak/MakieGallery.jl", "max_stars_repo_head_hexsha": "3ba5534180d2314729aeb5a7d2d6a7afa1ffcdff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2018-11-14T10:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T13:03:17.000Z", "max_issues_repo_path": "src/documentation.jl", "max_issues_repo_name": "pauljurczak/MakieGallery.jl", "max_issues_repo_head_hexsha": "3ba5534180d2314729aeb5a7d2d6a7afa1ffcdff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 186, "max_issues_repo_issues_event_min_datetime": "2018-12-02T13:55:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-12T19:26:12.000Z", "max_forks_repo_path": "src/documentation.jl", "max_forks_repo_name": "pauljurczak/MakieGallery.jl", "max_forks_repo_head_hexsha": "3ba5534180d2314729aeb5a7d2d6a7afa1ffcdff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2018-12-02T10:01:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-12T15:15:18.000Z", "avg_line_length": 34.9885057471, "max_line_length": 150, "alphanum_fraction": 0.5180683311, "num_tokens": 846}
|
#!/usr/bin/env python3
PKG = 'tfg'
import roslib; roslib.load_manifest(PKG)
#import rosbag
import numpy as np
import rospy
from rospy.numpy_msg import numpy_msg
from sensor_msgs.msg import Image
from sensor_msgs.msg import CompressedImage
import os
import cv2
from cv_bridge import CvBridge, CvBridgeError
from utilities import show_images
def get_images():
vidcap = cv2.VideoCapture('2018-03-08-14-30-07_Dataset_year_-A0.h264')
success, image = vidcap.read()
while success:
images.append(image)
success, image = vidcap.read()
# TODO: maybe saving GIGABYTES OF FRAMES IN RAM is not a great idea
# images = []
# bag = rosbag.Bag('2018-03-08-14-30-07_Dataset_year_.bag', "r")
# for topic, msg, t in bag.read_messages(topics=[args.image_topic]):
# img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
# images.append(img)
# bag.close()
print('Done!')
# return images
def talker():
#bridge = CvBridge()
pub = rospy.Publisher('stream', CompressedImage, queue_size=10)
rospy.init_node('talker',anonymous=True)
r = rospy.Rate(30) # 30hz
vidcap = cv2.VideoCapture('2018-03-08-14-30-07_Dataset_year_-A0.h264')
success, image = vidcap.read()
msg = CompressedImage()
msg.format = "jpeg"
image_index = 0
while not rospy.is_shutdown() and success:
try:
#pub.publish(bridge.cv2_to_imgmsg(image))
image_index += 1
if image_index % 10 == 0:
msg.header.stamp = rospy.Time.now()
msg.data = np.array(cv2.imencode('.jpg', image)[1]).tostring()
pub.publish(msg)
image_index = 0
success, image = vidcap.read()
except CvBridgeError as e:
print(e)
r.sleep()
if __name__ == '__main__':
talker()
|
{"hexsha": "7a04cfb2a2fa45404c66706747d8a78cf8810091", "size": 1858, "ext": "py", "lang": "Python", "max_stars_repo_path": "tfg/src/camera_publisher.py", "max_stars_repo_name": "lccatala/tfg_ros", "max_stars_repo_head_hexsha": "d8da2bc6b1e0036e34460d174e708764a3c6f4ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-30T15:28:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-30T15:28:23.000Z", "max_issues_repo_path": "tfg/src/camera_publisher.py", "max_issues_repo_name": "lccatala/tfg_ros", "max_issues_repo_head_hexsha": "d8da2bc6b1e0036e34460d174e708764a3c6f4ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tfg/src/camera_publisher.py", "max_forks_repo_name": "lccatala/tfg_ros", "max_forks_repo_head_hexsha": "d8da2bc6b1e0036e34460d174e708764a3c6f4ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9677419355, "max_line_length": 78, "alphanum_fraction": 0.638320775, "include": true, "reason": "import numpy", "num_tokens": 499}
|
@testset "Covering" begin
@testset "Rectangle" begin
r = cover(RegularGrid{Float64}(100, 200), RectangleCoverer())
@test r == RectangleRegion((0.,0.), (99.,199.))
r = cover(PointSet([0. 1. 2.; 0. 2. 1.]), RectangleCoverer())
@test r == RectangleRegion((0.,0.), (2.,2.))
end
end
|
{"hexsha": "a4be7a0de6bddcfddb3e61351f358ad148e7a96d", "size": 299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/covering.jl", "max_stars_repo_name": "briochemc/GeoStatsBase.jl", "max_stars_repo_head_hexsha": "59ce064df9bcdc5c022edd80bd72125c2ca7819d", "max_stars_repo_licenses": ["ISC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/covering.jl", "max_issues_repo_name": "briochemc/GeoStatsBase.jl", "max_issues_repo_head_hexsha": "59ce064df9bcdc5c022edd80bd72125c2ca7819d", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/covering.jl", "max_forks_repo_name": "briochemc/GeoStatsBase.jl", "max_forks_repo_head_hexsha": "59ce064df9bcdc5c022edd80bd72125c2ca7819d", "max_forks_repo_licenses": ["ISC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9, "max_line_length": 65, "alphanum_fraction": 0.602006689, "num_tokens": 107}
|
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
import numpy
import string, random
import os
import SkinDetector
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", help="path to the video file")
ap.add_argument("-o", "--output", help="path to the video file")
args = vars(ap.parse_args())
print 'setting up camera'
#camera = cv2.VideoCapture(args["input"])
print 'making frames folder'
#frames_folder = 'working_space'
frames_folder = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20))
os.system('mkdir %s/' % (frames_folder))
##os.system('ffmpeg -i %s -r 1 -f image2 %s/%%08d.jpg' % (args['input'], frames_folder))
os.system('ffmpeg -i %s -f image2 %s/%%08d.jpg' % (args['input'], frames_folder))
os.system('ffmpeg -i %s -f image2 %s/%%08d.jpg' % (args['input'], frames_folder))
grays = []
# initialize the first frame in the video stream
firstFrame = None
avg = None
i = 0
for f in sorted(os.listdir(frames_folder)):
#print i
i += 1
# grab the current frame and initialize the occupied/unoccupied
# text
#(grabbed, frame) = camera.read()
frame = cv2.imread(frames_folder+'/'+f)
blurred = cv2.GaussianBlur(frame, (35, 35), 0)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)
converted[:,:,0] = cv2.equalizeHist(converted[:,:,0])
converted = cv2.cvtColor(converted, cv2.COLOR_YCR_CB2BGR)
converted = cv2.cvtColor(converted, cv2.COLOR_BGR2HSV)
#skinMask = SkinDetector.process(frame)
lower_thresh = numpy.array([0,5,5], dtype=numpy.uint8)
upper_thresh = numpy.array([180,255,255], dtype=numpy.uint8)
#lower_thresh = numpy.array([0, 50, 0], dtype=numpy.uint8)
#upper_thresh = numpy.array([120, 150, 255], dtype=numpy.uint8)
skinMask = cv2.inRange(converted, lower_thresh, upper_thresh)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
(cnts, _) = cv2.findContours(skinMask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
if len(cnts) > 0:
print 'HAS Contours', i, len(cnts)
temp = np.zeros(frame.shape,np.uint8)
#d = cv2.drawContours(temp,cnts,0,255,-1)
#areas = []
for c in cnts:
#(x, y, w, h) = cv2.boundingRect(c)
#areas.append((w*h, w, h))
#detections.append(((x, y, w, h), i))
#temp = np.zeros(frame.shape,np.uint8)
d = cv2.drawContours(temp,[c],0,255,-1)
x = np.where(temp != 0)
frame[x[:2]] = blurred[x[:2]]
filename = '%08d.jpg' % (i)
cv2.imwrite('%s/%s' % (frames_folder, filename), frame)
ffmpeg_cmd = '%sffmpeg -i %s/%%08d.jpg -y -r 24 -vcodec libx264 -crf 22 -preset ultrafast -b:a 32k -strict -2 %s' % ('./', frames_folder, args["output"])
print ffmpeg_cmd
os.system(ffmpeg_cmd)
#os.system('rm -rf %s' % (frames_folder))
|
{"hexsha": "37c4b52b890f0dd1689107f14e1f5c24995350f7", "size": 3093, "ext": "py", "lang": "Python", "max_stars_repo_path": "redact.py", "max_stars_repo_name": "timothyclemansinsea/bodycamredaction", "max_stars_repo_head_hexsha": "e9917021059aa819be173975076614a2932ca555", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "redact.py", "max_issues_repo_name": "timothyclemansinsea/bodycamredaction", "max_issues_repo_head_hexsha": "e9917021059aa819be173975076614a2932ca555", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "redact.py", "max_forks_repo_name": "timothyclemansinsea/bodycamredaction", "max_forks_repo_head_hexsha": "e9917021059aa819be173975076614a2932ca555", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1688311688, "max_line_length": 153, "alphanum_fraction": 0.666343356, "include": true, "reason": "import numpy", "num_tokens": 928}
|
# Implementation of an elementary cellular automata
# according to https://mathworld.wolfram.com/ElementaryCellularAutomaton.html
# uses random initialization and uses wolfram codes to specify the rule
# Asynchronous update of the 1D lattice
using Agents, Random
using CairoMakie
using InteractiveDynamics
using CSV
"""
The automaton living in a 1D space
"""
mutable struct Cell <: AbstractAgent
id::Int
pos::Dims{1}
status::Int # either 0 or 1, where 1 is 'alive'
end
"""
Returns an array with the rule for the next status configurations, 1-indexed.
Thus, the first position corresponds to the rule for the cell neighborhood status (0,0,0)
and the last to (1,1,1)
"""
function rule_from_code(wolfram_code)
return digits(wolfram_code, base=2, pad=8)
end
"""
Takes the status of a neighborhood and returns the corresponding
index in the model rule (0,0,0) -> 1 (0,1,0) -> 3
"""
function configuration_index(cell_statuses)
# takes the tuples and forms a string (0,1,1) -> "011"
binary_code = string(cell_statuses...)
# turns the string binary code into the integer
index = parse(Int, binary_code, base=2) + 1
return index
end
"""
Given a cell checks its neighbors and
decides the next status of the cell based on the model rule
"""
function next_status(cell, model)
neighbors = collect(nearby_agents(cell, model))
cell_statuses = (c.status for c in [neighbors[1], cell, neighbors[2]])
index = configuration_index(cell_statuses)
return model.rule[index]
end
"""
Initializes the ABM
# Arguments
- `n_cells::Int`: total of cell automaton
- `wolfram_code::Int`: wolfram code for the rule
- `seed::Int`: random seed
"""
function build_model(; n_cells = 100, wolfram_code=30, seed = 30)
space = GridSpace((n_cells,); metric=:chebyshev)
properties = Dict(:rule => rule_from_code(wolfram_code),)
model = ABM(
Cell,
space; properties,
scheduler=Schedulers.randomly,
rng = MersenneTwister(seed))
for x in 1:n_cells
cell = Cell(nextid(model), (x,), rand([0,1]))
add_agent_pos!(cell, model)
end
return model
end
"""
Asynchronous update of the cells
"""
function cell_step!(cell, model)
cell.status = next_status(cell, model)
end
# Initialize model
model = build_model(n_cells=100, wolfram_code=30)
# Runs the model and collects data
data, _ = run!(model, cell_step!, 100; adata=[:status]);
# The data contains the step, id/position and status of the cell (1/0)
data
CSV.write("ca_data_async.csv", data);
# Lets plot the time evolution in the y axis
heatmap(data.id, data.step, data.status, colormap=:Blues_3)
|
{"hexsha": "5a2011b46d71dc764508d48835ceb6fa343e23d7", "size": 2645, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "elementary_ca_async.jl", "max_stars_repo_name": "astenuz/cellular_automata", "max_stars_repo_head_hexsha": "f98815b9678d308a5bd31e70efa92e2fb5309bf3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "elementary_ca_async.jl", "max_issues_repo_name": "astenuz/cellular_automata", "max_issues_repo_head_hexsha": "f98815b9678d308a5bd31e70efa92e2fb5309bf3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "elementary_ca_async.jl", "max_forks_repo_name": "astenuz/cellular_automata", "max_forks_repo_head_hexsha": "f98815b9678d308a5bd31e70efa92e2fb5309bf3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.45, "max_line_length": 90, "alphanum_fraction": 0.7092627599, "num_tokens": 708}
|
import numpy as num
from direct.showbase import DirectObject
from direct.task.Task import Task
from panda3d.core import LVector3f, NodePath, WindowProperties
# from Hardware import HardwareHandler
# from Meshes import Arrow
from Engine.Utils.utils import get_hpr, get_distance
TO_RAD = 0.017453293
TO_DEG = 57.295779513
# class ShuttleFrame(HardwareHandler):
class ShuttleFrame(DirectObject.DirectObject):
"""
The frame linked to the shuttle (where the players are).
"""
def __init__(self, gameEngine):
DirectObject.DirectObject.__init__(self)
# HardwareHandler.__init__(self, gameEngine)
self.gameEngine = gameEngine
self.frame = NodePath("shuttle_frame")
self.frame.reparentTo(self.gameEngine.render)
# u follows the line of sight
self._u = LVector3f(-1.0, 0.0, 0.0)
# n is orthogonal to the camera
self._n = LVector3f(0.0, 0.0, 0.0)
self.reset()
self.mwn = gameEngine.mouseWatcherNode
self.main_task = self.gameEngine.taskMgr.add(self.cam_move_task, 'cam_move_task')
# dynamics
self.boost_time = 0.2
self.update_freq = 60
self.dt = 1 / self.update_freq
self._last_update = 0
self._is_boost = False
self.a = self.gameEngine.params("shuttle_velocity") * self.dt / self.boost_time
self.a_spin = self.gameEngine.params("shuttle_spin_velocity") * self.dt / self.boost_time
self.velocity_mean = self.gameEngine.params("shuttle_velocity")
self.velocity = LVector3f(0, 0, 0)
self.acceleration = LVector3f(0, 0, 0)
self.spinning_velocity = LVector3f(0, 0, 0)
self.spinning_acceleration = LVector3f(0, 0, 0)
self._mvt_tasks = None
def main_window_focus(self):
wp = WindowProperties()
wp.setForeground(True)
self.gameEngine.win.requestProperties(wp)
def reset(self):
self.gameEngine.space_craft.connect_to_shuttle(self.frame)
self.velocity = LVector3f(0, 0, 0)
self.acceleration = LVector3f(0, 0, 0)
self.spinning_velocity = LVector3f(0, 0, 0)
self.spinning_acceleration = LVector3f(0, 0, 0)
self.frame.set_hpr(self.frame.get_hpr())
self.compute_unit_vectors()
def impact(self, t=None):
def shake_task(task, p0, h0):
# if task.time == 0:
## self.boost('tm', play_sound=False, power=50)
# self.boost('dm', play_sound=False, power=100)
if task.time > 5:
# self.boost('tp', play_sound=False, power=5)
# self.boost('pm', play_sound=False, power=5)
# self.boost('tp', play_sound=False)
# self.boost('pm', play_sound=False)
return task.done
else:
# self.frame.set_p(p0 - TO_DEG * num.sin(task.time * 15)/(1 + 5*task.time)**2*0.1)
self.frame.set_p(self.frame.get_p() - TO_DEG * num.sin(task.time * 15) / (1 + 5 * task.time) ** 2 * 0.1)
# self.frame.set_h(h0 + TO_DEG * num.sin(task.time * 10)/(1 + 5*task.time)**2*0.05)
self.frame.set_h(
self.frame.get_h() + TO_DEG * num.sin(task.time * 10) / (1 + 5 * task.time) ** 2 * 0.05)
return task.cont
self.stop(play_sound=False)
task = Task(shake_task)
self.gameEngine.taskMgr.add(task, "shaking", extraArgs=[task, self.frame.get_p(), self.frame.get_h()])
self.gameEngine.sound_manager.play("impact", volume=1.5)
def align_along(self, axis):
d = {'x': LVector3f(1, 0, 0),
"-x": LVector3f(-1, 0, 0),
"y": LVector3f(0, 1, 0),
"-y": LVector3f(0, -1, 0),
"z": LVector3f(0, 0, 1),
"-z": LVector3f(0, 0, -1),
}
if axis in d:
self.stop()
self.look_at(self.frame.get_pos() + d[axis])
def dynamic_goto_hpr(self, hpr, time=5, update_is_moving=True, end_func=None):
self.stop()
if update_is_moving:
self.gameEngine.update_soft_state("is_moving", True)
self._mvt_tasks = self.frame.hprInterval(time, hpr, self.frame.get_hpr(), blendType='easeInOut')
self._mvt_tasks.start()
def end(_):
self.stop(play_sound=False)
if update_is_moving:
self.gameEngine.update_soft_state("is_moving", False)
if hasattr(end_func, '__call__'):
end_func.__call__()
self._boost_sound(max(0.1, time - 1.5))
self.gameEngine.taskMgr.doMethodLater(time, end, name="stabilization_end")
def fake_movement(self, time=0.1):
self.stop(play_sound=False)
# self._mvt_tasks = self.frame.hprInterval(time, self.frame.get_hpr() * 0.999, self.frame.get_hpr(), blendType='easeInOut')
# self._mvt_tasks.start()
def dynamic_look_at(self, target=None, time=5, update_is_moving=True, end_func=NodePath):
self.stop()
v = LVector3f(target if target is not None else (0, 0, 0))
if update_is_moving:
self.gameEngine.update_soft_state("is_moving", True)
self._mvt_tasks = self.frame.hprInterval(time, get_hpr(v - self.frame.get_pos()), self.frame.get_hpr(),
blendType='easeInOut')
self._mvt_tasks.start()
def end(_):
self.stop(play_sound=False)
if update_is_moving:
self.gameEngine.update_soft_state("is_moving", False)
if hasattr(end_func, '__call__'):
end_func.__call__()
self._boost_sound(max(0.1, time - 1.5))
self.gameEngine.taskMgr.doMethodLater(time, end, name="stabilization_end")
def show_shuttle(self):
model = self.gameEngine.loader.load_model("data/models/shuttle.egg")
model.set_bin("fixed", 10)
model.reparentTo(self.frame)
def dynamic_goto(self, target, power=1, t_spin=5.0, end_func=None):
self.gameEngine.update_soft_state("is_moving", True)
self.stop()
v = LVector3f(target if target is not None else (0, 0, 0))
self._mvt_tasks = self.frame.hprInterval(t_spin, get_hpr(v - self.frame.get_pos()), self.frame.get_hpr(),
blendType='easeInOut')
self._boost_sound(max(0.1, t_spin - 1.5))
move_time = get_distance(self.frame.get_pos(), target) / (power * self.velocity_mean)
print("new move. \n\ttime : ", move_time, '\n\tpos :', self.frame.get_pos(), "\n\ttarget :", target,
"\n\tpower :", power, "\n\tmean_v :", self.velocity_mean)
def start(_):
self._mvt_tasks = None
self.compute_unit_vectors()
self.boost("f", power)
def end(_):
self.stop(play_sound=False)
self.gameEngine.update_soft_state("is_moving", False)
if hasattr(end_func, '__call__'):
end_func.__call__()
self._mvt_tasks.start()
self.gameEngine.taskMgr.doMethodLater(t_spin, start, name="goto_start")
self._boost_sound(t_spin + move_time - 1.5)
self.gameEngine.taskMgr.doMethodLater(t_spin + move_time, end, name="goto_end")
def _boost_sound(self, t=0.0):
if t > 0.0:
self.gameEngine.taskMgr.doMethodLater(t, self.gameEngine.sound_manager.play, name="boost_sound",
extraArgs=["boost_new"])
else:
self.gameEngine.sound_manager.play("boost_new")
def boost(self, direction, power=1, play_sound=True):
if not self._is_boost:
self.compute_unit_vectors()
if direction == 'f':
self.acceleration += self._u * self.a * power
elif direction == 'b':
self.acceleration -= self._u * self.a * power
elif direction == 'r':
self.acceleration += self._n * self.a * power
elif direction == 'l':
self.acceleration -= self._n * self.a * power
elif direction == 'tp':
self.spinning_acceleration[0] += self.a_spin * power
elif direction == 'tm':
self.spinning_acceleration[0] -= self.a_spin * power
elif direction == 'pp':
self.spinning_acceleration[1] += self.a_spin * power
elif direction == 'pm':
self.spinning_acceleration[1] -= self.a_spin * power
elif direction == 'dp':
self.spinning_acceleration[2] += self.a_spin * power
elif direction == 'dm':
self.spinning_acceleration[2] -= self.a_spin * power
else:
return
self._is_boost = True
self.gameEngine.taskMgr.doMethodLater(self.boost_time, self._reset_acc, name="end_boost_" + direction)
if play_sound:
self._boost_sound()
def _reset_acc(self, t=None):
self._is_boost = False
self.acceleration = LVector3f(0., 0., 0.)
self.spinning_acceleration = LVector3f(0., 0., 0.)
def set_pos(self, pos):
self.frame.set_pos(pos)
def look_at(self, target):
self.frame.set_hpr(get_hpr(LVector3f(target if target is not None else (0, 0, 0)) - self.frame.get_pos()))
self.compute_unit_vectors()
def stop(self, play_sound=True):
if self._mvt_tasks is not None:
# eventually stops the ongoing tasks
self._mvt_tasks.finish()
self._mvt_tasks = None
self.gameEngine.taskMgr.remove("goto_start")
self.gameEngine.taskMgr.remove("goto_end")
self.velocity = LVector3f(0, 0, 0)
self.spinning_velocity = LVector3f(0, 0, 0)
self.compute_unit_vectors()
if play_sound:
self._boost_sound()
def compute_unit_vectors(self):
h = TO_RAD * self.frame.get_h()
p = TO_RAD * self.frame.get_p()
r = TO_RAD * self.frame.get_r()
self._u = LVector3f(- num.sin(h) * num.cos(p),
num.cos(h) * num.cos(p),
num.sin(p))
self._n = LVector3f(num.cos(h) * num.cos(r),
num.sin(h) * num.cos(r),
- num.sin(r))
def cam_move_task(self, task):
"""
The main task.
"""
# this limits the udpate to 60 fps.
if task.time - self._last_update > self.dt:
# print("update !")
self._last_update = task.time
if self._is_boost:
self.velocity += self.acceleration
self.spinning_velocity += self.spinning_acceleration
self.frame.set_pos(self.frame.get_pos() + self.velocity * self.dt)
if self.spinning_velocity[0] != 0.0 or self.spinning_velocity[1] != 0.0 or self.spinning_velocity[2] != 0.0:
self.frame.set_r(self.frame.get_r() + self.spinning_velocity[2] * self.dt)
self.frame.set_p(self.frame.get_p() + self.spinning_velocity[0] * self.dt)
self.frame.set_h(self.frame.get_h() + self.spinning_velocity[1] * self.dt)
# self.compute_unit_vectors()
return task.cont
|
{"hexsha": "89938a944f2367df1f3b227b4c823a78b53ff160", "size": 11314, "ext": "py", "lang": "Python", "max_stars_repo_path": "Engine/GraphicEngine/Shuttle.py", "max_stars_repo_name": "pdefromont/SpaceBusGame", "max_stars_repo_head_hexsha": "629f6aa58a11756edeb85735a98504d1aadff586", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Engine/GraphicEngine/Shuttle.py", "max_issues_repo_name": "pdefromont/SpaceBusGame", "max_issues_repo_head_hexsha": "629f6aa58a11756edeb85735a98504d1aadff586", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Engine/GraphicEngine/Shuttle.py", "max_forks_repo_name": "pdefromont/SpaceBusGame", "max_forks_repo_head_hexsha": "629f6aa58a11756edeb85735a98504d1aadff586", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.838028169, "max_line_length": 131, "alphanum_fraction": 0.5837016086, "include": true, "reason": "import numpy", "num_tokens": 2853}
|
import numpy as np
import scipy.sparse as sparse
from typing import Any
from torch.utils.checkpoint import checkpoint
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_max
from .. import register_model, BaseModel
from cogdl.utils import mul_edge_softmax, spmm, get_activation
from cogdl.trainers.deepergcn_trainer import DeeperGCNTrainer
class GENConv(nn.Module):
def __init__(self,
in_feat,
out_feat,
aggr="softmax_sg",
beta=1.0,
p=1.0,
learn_beta=False,
learn_p=False,
use_msg_norm=False,
learn_msg_scale=True,
):
super(GENConv, self).__init__()
self.use_msg_norm = use_msg_norm
self.mlp = nn.Linear(in_feat, out_feat)
self.message_encoder = torch.nn.ReLU()
self.aggr = aggr
if aggr == "softmax_sg":
self.beta = torch.nn.Parameter(torch.Tensor([beta, ]), requires_grad=learn_beta)
else:
self.register_buffer("beta", None)
if aggr == "powermean":
self.p = torch.nn.Parameter(torch.Tensor([p, ]), requires_grad=learn_p)
else:
self.register_buffer("p", None)
self.eps = 1e-7
self.s = torch.nn.Parameter(torch.Tensor([1.]), requires_grad=learn_msg_scale)
self.act = nn.ReLU()
def message_norm(self, x, msg):
x_norm = torch.norm(x, dim=1, p=2)
msg_norm = F.normalize(msg, p=2, dim=1)
msg_norm = msg_norm * x_norm.unsqueeze(-1)
return x + self.s * msg_norm
def forward(self, x, edge_index, edge_attr=None):
device = x.device
dim = x.shape[1]
num_nodes = x.shape[0]
edge_msg = x[edge_index[1]] # if edge_attr is None else x[edge_index[1]] + edge_attr
edge_msg = self.act(edge_msg) + self.eps
if self.aggr == "softmax_sg":
h = mul_edge_softmax(
edge_index,
self.beta * edge_msg,
shape=(num_nodes, num_nodes)
)
h = edge_msg * h
elif self.aggr == "softmax":
h = mul_edge_softmax(
edge_index,
edge_msg,
shape=(num_nodes, num_nodes)
)
h = edge_msg * h
elif self.aggr == "powermean":
deg = spmm(
indices=edge_index,
values=torch.ones(edge_index.shape[1]),
b=torch.ones(num_nodes).unsqueeze(-1).to(device)
).view(-1)
h = edge_msg.pow(self.t) / deg[edge_index[0]].unsqueeze(-1)
elif self.aggr == "max":
h, _ = scatter_max(edge_msg, edge_index[0].view(-1, 1).repeat(1, edge_msg.size(1)), dim=0)
else:
raise NotImplementedError
h = torch.zeros_like(x).scatter_add_(
dim=0,
index=edge_index[0].unsqueeze(-1).repeat(1, dim),
src=h
)
if self.aggr == "powermean":
h = h.pow(1. / self.p)
if self.use_msg_norm:
h = self.message_norm(x, h)
h = self.mlp(h)
return h
class DeepGCNLayer(nn.Module):
"""
Implementation of DeeperGCN in paper `"DeeperGCN: All You Need to Train Deeper GCNs"` <https://arxiv.org/abs/2006.07739>
Parameters
-----------
in_feat : int
Size of each input sample
out_feat : int
Size of each output sample
conv : class
Base convolution layer.
connection : str
Residual connection type, `res` or `res+`.
activation : str
dropout : float
checkpoint_grad : bool
"""
def __init__(
self,
in_feat,
out_feat,
conv,
connection="res",
activation="relu",
dropout=0.0,
checkpoint_grad=False,
):
super(DeepGCNLayer, self).__init__()
self.conv = conv
self.activation = get_activation(activation)
self.dropout = dropout
self.connection = connection
self.norm = nn.BatchNorm1d(out_feat, affine=True)
self.checkpoint_grad = checkpoint_grad
def forward(self, x, edge_index):
if self.connection == "res+":
h = self.norm(x)
h = self.activation(h)
h = F.dropout(h, p=self.dropout, training=self.training)
if self.checkpoint_grad:
h = checkpoint(self.conv, h, edge_index)
else:
h = self.conv(h, edge_index)
elif self.connection == "res":
h = self.conv(x, edge_index)
h = self.norm(h)
h = self.activation(h)
else:
raise NotImplementedError
return x + h
@register_model("deepergcn")
class DeeperGCN(BaseModel):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--num-layers", type=int, default=14)
parser.add_argument("--hidden-size", type=int, default=128)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--connection", type=str, default="res+")
parser.add_argument("--activation", type=str, default="relu")
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--cluster-number", type=int, default=10)
parser.add_argument("--aggr", type=str, default="softmax_sg")
parser.add_argument("--beta", type=float, default=1.0)
parser.add_argument("--p", type=float, default=1.0)
parser.add_argument("--learn-beta", action="store_true")
parser.add_argument("--learn-p", action="store_true")
parser.add_argument("--learn-msg-scale", action="store_true")
parser.add_argument("--use-msg-norm", action="store_true")
# fmt: on
"""
ogbn-products:
num_layers: 14
self_loop:
aggr: softmax_sg
beta: 0.1
"""
@classmethod
def build_model_from_args(cls, args):
return cls(
in_feat=args.num_features,
hidden_size=args.hidden_size,
out_feat=args.num_classes,
num_layers=args.num_layers,
connection=args.connection,
activation=args.connection,
dropout=args.dropout,
aggr=args.aggr,
beta=args.beta,
p=args.p,
learn_beta=args.learn_beta,
learn_p=args.learn_p,
learn_msg_scale=args.learn_msg_scale,
use_msg_norm=args.use_msg_norm
)
def __init__(
self,
in_feat,
hidden_size,
out_feat,
num_layers,
connection="res+",
activation="relu",
dropout=.0,
aggr="max",
beta=1.0,
p=1.0,
learn_beta=False,
learn_p=False,
learn_msg_scale=True,
use_msg_norm=False
):
super(DeeperGCN, self).__init__()
self.dropout = dropout
self.feat_encoder = nn.Linear(in_feat, hidden_size)
self.layers = nn.ModuleList()
self.layers.append(GENConv(hidden_size, hidden_size))
for i in range(num_layers - 1):
self.layers.append(
DeepGCNLayer(
in_feat=hidden_size,
out_feat=hidden_size,
conv=GENConv(
in_feat=hidden_size,
out_feat=hidden_size,
aggr=aggr,
beta=beta,
p=p,
learn_beta=learn_beta,
learn_p=learn_p,
use_msg_norm=use_msg_norm,
learn_msg_scale=learn_msg_scale
),
connection=connection,
activation=activation,
dropout=dropout,
checkpoint_grad=(num_layers > 3) and ((i + 1) == num_layers // 2),
)
)
self.norm = nn.BatchNorm1d(hidden_size, affine=True)
self.activation = get_activation(activation)
self.fc = nn.Linear(hidden_size, out_feat)
def forward(self, x, edge_index, edge_attr=None):
h = self.feat_encoder(x)
for layer in self.layers:
h = layer(h, edge_index)
h = self.activation(self.norm(h))
h = F.dropout(h, p=self.dropout, training=self.training)
h = self.fc(h)
return F.log_softmax(h, dim=-1)
def loss(self, x, edge_index, y, x_mask):
pred = self.forward(x, edge_index)[x_mask]
return F.nll_loss(pred, y)
def predict(self, x, edge_index):
return self.forward(x, edge_index)
@staticmethod
def get_trainer(taskType: Any, args):
return DeeperGCNTrainer
|
{"hexsha": "4af3d4bb1190be6fbe80860108d9b52cb30ba440", "size": 9165, "ext": "py", "lang": "Python", "max_stars_repo_path": "cogdl/models/nn/pyg_deepergcn.py", "max_stars_repo_name": "xssstory/cogdl", "max_stars_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-24T10:37:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-24T10:37:20.000Z", "max_issues_repo_path": "cogdl/models/nn/pyg_deepergcn.py", "max_issues_repo_name": "xssstory/cogdl", "max_issues_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cogdl/models/nn/pyg_deepergcn.py", "max_forks_repo_name": "xssstory/cogdl", "max_forks_repo_head_hexsha": "ae8de495c365993f19f04774f083960fd282c2a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5714285714, "max_line_length": 128, "alphanum_fraction": 0.5430441899, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2032}
|
'''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
######
CODE_SIZE = 16
SLICE_SHAPE = [16,16,1,1]
#########
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class BasicBlockCSG(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, CSG=None):
super(BasicBlockCSG, self).__init__()
self.CSG = CSG
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
# self.conv1 = nn.Conv2d(in_channels, in_channels,
# kernel_size=1, bias=False)
self.filter_size1 = 1
self.in_filters1 = in_channels
self.out_filters1 =in_channels
self.num_slices1 = int(np.ceil(self.in_filters1/SLICE_SHAPE[0])*np.ceil(self.out_filters1/SLICE_SHAPE[1]))
self.code1 = torch.nn.Parameter(torch.randn([self.num_slices1]+[CODE_SIZE]))
self.kernel1 = None
self.kernel1_defined = False
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
#########################################
## Updating the kernel
self.kernel1 = self.CSG(self.code1)
self.kernel1 = self.kernel1.view(int(np.ceil(self.out_filters1/SLICE_SHAPE[0])*SLICE_SHAPE[0]), int(np.ceil(self.in_filters1/SLICE_SHAPE[1])*SLICE_SHAPE[1]), 1,1)
self.kernel1 = self.kernel1[:self.out_filters1, :self.in_filters1, :self.filter_size1, :self.filter_size1]
self.kernel1_defined = True
# out = F.relu(self.bn1(self.conv1(x2)))
out = F.relu(self.bn1(F.conv2d(x2,self.kernel1,padding=0)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class DownBlockCSG(nn.Module):
def __init__(self, in_channels, out_channels, CSG):
super(DownBlockCSG, self).__init__()
mid_channels = out_channels // 2
# left
self.CSG = CSG
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
# self.conv2 = nn.Conv2d(in_channels, mid_channels,
# kernel_size=1, bias=False)
self.filter_size2 = 1
self.in_filters2 = in_channels
self.out_filters2 =mid_channels
self.num_slices2 = int(np.ceil(self.in_filters2/SLICE_SHAPE[0])*np.ceil(self.out_filters2/SLICE_SHAPE[1]))
self.code2 = torch.nn.Parameter(torch.randn([self.num_slices2]+[CODE_SIZE]))
self.kernel2 = None
self.kernel2_defined = False
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
# self.conv3 = nn.Conv2d(in_channels, mid_channels,
# kernel_size=1, bias=False)
self.filter_size3 = 1
self.in_filters3 = in_channels
self.out_filters3 =mid_channels
self.num_slices3 = int(np.ceil(self.in_filters3/SLICE_SHAPE[0])*np.ceil(self.out_filters3/SLICE_SHAPE[1]))
self.code3 = torch.nn.Parameter(torch.randn([self.num_slices3]+[CODE_SIZE]))
self.kernel3 = None
self.kernel3_defined = False
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
#########################################
## Updating the kernel
self.kernel2 = self.CSG(self.code2)
self.kernel2 = self.kernel2.view(int(np.ceil(self.out_filters2/SLICE_SHAPE[0])*SLICE_SHAPE[0]), int(np.ceil(self.in_filters2/SLICE_SHAPE[1])*SLICE_SHAPE[1]), 1,1)
self.kernel2 = self.kernel2[:self.out_filters2, :self.in_filters2, :self.filter_size2, :self.filter_size2]
self.kernel2_defined = True
# out1 = F.relu(self.bn2(self.conv2(out1)))
out1 = F.relu(self.bn2(F.conv2d(out1,self.kernel2,padding=0)))
# right
# out2 = F.relu(self.bn3(self.conv3(x)))
#########################################
## Updating the kernel
self.kernel3 = self.CSG(self.code3)
self.kernel3 = self.kernel3.view(int(np.ceil(self.out_filters3/SLICE_SHAPE[0])*SLICE_SHAPE[0]), int(np.ceil(self.in_filters3/SLICE_SHAPE[1])*SLICE_SHAPE[1]), 1,1)
self.kernel3 = self.kernel3[:self.out_filters3, :self.in_filters3, :self.filter_size3, :self.filter_size3]
self.kernel3_defined = True
out2 = F.relu(self.bn3(F.conv2d(x,self.kernel3,padding=0)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, org =False):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.org = org
if not org:
#############################################
## Here is where the CSG is defined.
self.CSG = torch.nn.Linear(CODE_SIZE,np.prod(SLICE_SHAPE), bias=False)
else:
self.CSG = None
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
# self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
# kernel_size=1, stride=1, padding=0, bias=False)
self.filter_size2 = 1
self.in_filters2 = out_channels[2]
self.out_filters2 =out_channels[3]
self.num_slices2 = int(np.ceil(self.in_filters2/SLICE_SHAPE[0])*np.ceil(self.out_filters2/SLICE_SHAPE[1]))
self.code2 = torch.nn.Parameter(torch.randn([self.num_slices2]+[CODE_SIZE]))
self.kernel2 = None
self.kernel2_defined = False
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
#########################################
## Updating the kernel
self.kernel2 = self.CSG(self.code2)
self.kernel2 = self.kernel2.view(int(np.ceil(self.out_filters2/SLICE_SHAPE[0])*SLICE_SHAPE[0]), int(np.ceil(self.in_filters2/SLICE_SHAPE[1])*SLICE_SHAPE[1]), 1,1)
self.kernel2 = self.kernel2[:self.out_filters2, :self.in_filters2, :self.filter_size2, :self.filter_size2]
self.kernel2_defined = True
# out = F.relu(self.bn2(self.conv2(out)))
out = F.relu(self.bn2(F.conv2d(out,self.kernel2,padding=0)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
|
{"hexsha": "5262fddd6ae4bbc3185a8daf72b3983dd9726734", "size": 12529, "ext": "py", "lang": "Python", "max_stars_repo_path": "Implementations/CIFAR10/models/shufflenetv2.py", "max_stars_repo_name": "hamedomidvar/associativeconv", "max_stars_repo_head_hexsha": "9930915abd3625871354df676865fc44eb92abf3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-13T01:42:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T01:42:00.000Z", "max_issues_repo_path": "Implementations/CIFAR10/models/shufflenetv2.py", "max_issues_repo_name": "hamedomidvar/associativeconv", "max_issues_repo_head_hexsha": "9930915abd3625871354df676865fc44eb92abf3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Implementations/CIFAR10/models/shufflenetv2.py", "max_forks_repo_name": "hamedomidvar/associativeconv", "max_forks_repo_head_hexsha": "9930915abd3625871354df676865fc44eb92abf3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.85, "max_line_length": 170, "alphanum_fraction": 0.5735493655, "include": true, "reason": "import numpy", "num_tokens": 3228}
|
[STATEMENT]
lemma ucast_s2: "(AND) w 0b00000000000000000000000010000000 = 0
\<Longrightarrow> (((get_S w))::word1) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w AND 128 = 0 \<Longrightarrow> get_S w = 0
[PROOF STEP]
by (simp add: get_S_def)
|
{"llama_tokens": 137, "file": "SPARCv8_SparcModel_MMU_Sparc_Properties", "length": 1}
|
[STATEMENT]
lemma LLs_LLq:
"t1 \<in> atrm \<Longrightarrow> t2 \<in> atrm \<Longrightarrow>
LLs t1 t2 = cnj (LLq t1 t2) (neg (eql t1 t2))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t1 \<in> atrm; t2 \<in> atrm\<rbrakk> \<Longrightarrow> LLs t1 t2 = cnj (LLq t1 t2) (neg (eql t1 t2))
[PROOF STEP]
by (simp add: LLs_def Ls_def LLq_def)
|
{"llama_tokens": 171, "file": "Syntax_Independent_Logic_Syntax_Arith", "length": 1}
|
"""
compute partial correlation
"""
import numpy
def pcor_from_precision(P,zero_diagonal=1):
# given a precision matrix, compute the partial correlation matrix
# based on wikipedia page: http://en.wikipedia.org/wiki/Partial_correlat
#Using_matrix_inversion
pcor=numpy.zeros(P.shape)
for i in range(P.shape[0]):
for j in range(P.shape[1]):
pcor[i,j]=P[i,j]/numpy.sqrt(P[i,i]*P[j,j])
if zero_diagonal==1 and i==j:
pcor[i,j]=0
return pcor
|
{"hexsha": "7c25f566ee1fd8f0f0909c72c892c0a2fb679839", "size": 512, "ext": "py", "lang": "Python", "max_stars_repo_path": "statistics/pcor_from_precision.py", "max_stars_repo_name": "poldrack/poldracklab-base", "max_stars_repo_head_hexsha": "d4c573aca032b67362fc25252779997dacb4a166", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 52, "max_stars_repo_stars_event_min_datetime": "2015-07-08T12:31:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T17:25:33.000Z", "max_issues_repo_path": "statistics/pcor_from_precision.py", "max_issues_repo_name": "poldracklab/poldracklab-base", "max_issues_repo_head_hexsha": "ec1e18e0f748bc2100aa1494eab256b2061850ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2015-07-11T00:57:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-26T21:35:42.000Z", "max_forks_repo_path": "analysis/utils/pcor_from_precision.py", "max_forks_repo_name": "poldrack/fmri-analysis-vm", "max_forks_repo_head_hexsha": "f8949ce7c7e716493f78db3c1a69ee7ba5c7301e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2015-07-08T07:07:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-02T18:47:49.000Z", "avg_line_length": 25.6, "max_line_length": 76, "alphanum_fraction": 0.63671875, "include": true, "reason": "import numpy", "num_tokens": 135}
|
[STATEMENT]
lemma zero_vector_1:
"zero_vector x \<longleftrightarrow> (\<forall>y . x * y = x * bot)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. zero_vector x = (\<forall>y. x * y = x * bot)
[PROOF STEP]
by (metis top_right_mult_increasing zero_vector_def zero_vector_left_zero)
|
{"llama_tokens": 114, "file": "Correctness_Algebras_Boolean_Semirings", "length": 1}
|
using Test
# using Revise
using PolynomialBasis
PB = PolynomialBasis
function allequal(v1,v2)
return all(v1 .≈ v2)
end
function allequal(v1,v2,tol)
np = length(v1)
f = length(v2) == np
return f && all([isapprox(v1[i],v2[i],atol=tol) for i = 1:np])
end
p = [-1.0 1.0]
@test_throws AssertionError PB.tensor_product_points(0,p)
@test_throws AssertionError PB.tensor_product_points(4,p)
tp = PB.tensor_product_points(1,p)
@test allequal(p,tp)
tp = PB.tensor_product_points(2,p)
testtp = [-1.0 -1.0 1.0 1.0
-1.0 1.0 -1.0 1.0]
@test allequal(tp,testtp)
tp = PB.tensor_product_points(3,p)
testtp = [-1.0 -1.0 -1.0 -1.0 1.0 1.0 1.0 1.0
-1.0 -1.0 1.0 1.0 -1.0 -1.0 1.0 1.0
-1.0 1.0 -1.0 1.0 -1.0 1.0 -1.0 1.0]
@test allequal(tp,testtp)
p = [1.0]
tp = PB.tensor_product_points(1,p')
@test allequal(p,tp)
tp = PB.tensor_product_points(2,p')
testtp = [1.0,1.0]
@test allequal(tp,testtp)
tp = PB.tensor_product_points(3,p')
testtp = [1.0;1.0;1.0]
@test allequal(tp,testtp)
basis = PB.LagrangeBasis(0)
@test_throws AssertionError PB.LagrangeTensorProductBasis(0,0)
@test_throws AssertionError PB.LagrangeTensorProductBasis(4,0)
tpb = PB.LagrangeTensorProductBasis(1,0)
@test allequal(tpb(0.0),[1.0])
@test allequal(PB.gradient(tpb,0.0),[0.0])
@test allequal(PB.gradient(tpb,[0.0]),[0.0])
@test_throws AssertionError PB.gradient(tpb,[0.0,0.0])
tpb = PB.LagrangeTensorProductBasis(1,0)
@test allequal(tpb(0.0),[1.0])
@test allequal(PB.gradient(tpb,0.0),[0.0])
@test allequal(PB.gradient(tpb,[0.0]),[0.0])
@test_throws AssertionError PB.gradient(tpb,[0.0,0.0])
tpb = PB.LagrangeTensorProductBasis(1,1)
@test allequal(tpb(-1.0),[1.0,0.0])
@test allequal(tpb(1.0),[0.0,1.0])
@test_throws AssertionError tpb([1.0,2.0])
@test allequal(tpb([1.0]),[0.0,1.0])
@test allequal(PB.gradient(tpb,-1.0),[-0.5,0.5])
@test allequal(PB.gradient(tpb,1.0),[-0.5,0.5])
@test_throws AssertionError PB.gradient(tpb,[1.0,2.0])
@test allequal(PB.gradient(tpb,[1.0]),[-0.5,0.5])
tpb = PB.LagrangeTensorProductBasis(1,2)
@test allequal(tpb(-1.0),[1.0,0.0,0.0])
@test allequal(tpb(0.0),[0.0,1.0,0.0])
@test allequal(tpb(1.0),[0.0,0.0,1.0])
@test_throws AssertionError tpb([1.0,2.0])
@test allequal(tpb([1.0]),[0.0,0.0,1.0])
@test allequal(PB.gradient(tpb,-1.0),[-1.5,2,-0.5])
@test allequal(PB.gradient(tpb,0.0),[-0.5,0,0.5])
@test allequal(PB.gradient(tpb,1.0),[0.5,-2,1.5])
@test_throws AssertionError PB.gradient(tpb,[1.0,2.0])
@test allequal(PB.gradient(tpb,[1.0]),[0.5,-2,1.5])
@test_throws AssertionError PB.LagrangeTensorProductBasis(1,-1)
@test_throws AssertionError PB.LagrangeTensorProductBasis(1,2,start=2)
@test_throws AssertionError PB.LagrangeTensorProductBasis(1,2,stop=-2)
tpb = PB.LagrangeTensorProductBasis(1,2,stop=0.0)
@test allequal(tpb.points,[-1.0 -0.5 0.0])
@test allequal(tpb(-1.0),[1.0,0.0,0.0])
@test allequal(tpb(-0.5),[0.0,1.0,0.0])
@test allequal(tpb(0.0),[0.0,0.0,1.0])
@test_throws AssertionError tpb([1.0,2.0])
@test allequal(tpb([0.0]),[0.0,0.0,1.0])
@test allequal(PB.gradient(tpb,-1.0),[-3.,4,-1])
@test allequal(PB.gradient(tpb,-0.5),[-1.0,0,1])
@test allequal(PB.gradient(tpb,0.0),[1.0,-4,3])
@test_throws AssertionError PB.gradient(tpb,[1.0,2.0])
@test allequal(PB.gradient(tpb,[0.0]),[1.0,-4,3])
function test_basis_on_points(basis::PB.LagrangeTensorProductBasis{2,T,N}) where {T,N}
flag = true
for i in 1:N
vals = zeros(N)
vals[i] = 1.0
p = basis.points[:,i]
flag = flag && basis(p[1],p[2]) ≈ vals
flag = flag && basis(p...) ≈ vals
end
return flag
end
tp2 = PB.LagrangeTensorProductBasis(2,0)
@test allequal(tp2.points,[0.0,0.0])
@test test_basis_on_points(tp2)
@test_throws MethodError PB.gradient(tp2,1.5,1.0,2.0)
@test allequal(PB.gradient(tp2,1,0.0,0.0),[0.0])
@test allequal(PB.gradient(tp2,2,0.0,0.0),[0.0])
@test allequal(PB.gradient(tp2,2,[0.0,0.0]),[0.0])
@test allequal(PB.gradient(tp2,0.0,0.0),[0.0 0.0])
@test allequal(PB.gradient(tp2,[0.0,0.0]),[0.0 0.0])
@test_throws AssertionError PB.gradient(tp2,[0.0])
@test_throws AssertionError PB.gradient(tp2,[0.0,0.0,0.0])
v1 = [1.0,0.0,0.0]
v2 = [0.0,1.0,0.0]
v3 = [0.0,0.0,1.0]
d1 = [-1.5,2.0,-0.5]
d2 = [-0.5,0.0,0.5]
d3 = [0.5,-2.0,1.5]
tp2 = PB.LagrangeTensorProductBasis(2,2)
@test_throws AssertionError PB.gradient(tp2,0,-1,1)
@test_throws AssertionError PB.gradient(tp2,3,-1,1)
@test allequal(PB.gradient(tp2, 1, -1.0, -1.0), kron(d1,v1))
@test allequal(PB.gradient(tp2, 1, -1.0, +0.0), kron(d1,v2))
@test allequal(PB.gradient(tp2, 1, -1.0, +1.0), kron(d1,v3))
@test allequal(PB.gradient(tp2, 1, +0.0, -1.0), kron(d2,v1))
@test allequal(PB.gradient(tp2, 1, +0.0, +0.0), kron(d2,v2))
@test allequal(PB.gradient(tp2, 1, +0.0, +1.0), kron(d2,v3))
@test allequal(PB.gradient(tp2, 1, +1.0, -1.0), kron(d3,v1))
@test allequal(PB.gradient(tp2, 1, +1.0, +0.0), kron(d3,v2))
@test allequal(PB.gradient(tp2, 1, +1.0, +1.0), kron(d3,v3))
@test allequal(PB.gradient(tp2, 1, [+1.0, +1.0]), kron(d3,v3))
@test allequal(PB.gradient(tp2, 2, -1.0, -1.0), kron(v1,d1))
@test allequal(PB.gradient(tp2, 2, -1.0, +0.0), kron(v1,d2))
@test allequal(PB.gradient(tp2, 2, -1.0, +1.0), kron(v1,d3))
@test allequal(PB.gradient(tp2, 2, +0.0, -1.0), kron(v2,d1))
@test allequal(PB.gradient(tp2, 2, +0.0, +0.0), kron(v2,d2))
@test allequal(PB.gradient(tp2, 2, +0.0, +1.0), kron(v2,d3))
@test allequal(PB.gradient(tp2, 2, +1.0, -1.0), kron(v3,d1))
@test allequal(PB.gradient(tp2, 2, +1.0, +0.0), kron(v3,d2))
@test allequal(PB.gradient(tp2, 2, +1.0, +1.0), kron(v3,d3))
@test allequal(PB.gradient(tp2, 2, [+1.0, +1.0]), kron(v3,d3))
@test allequal(PB.gradient(tp2,1.0,1.0),hcat(kron(d3,v3),kron(v3,d3)))
@test_throws AssertionError PB.gradient(tp2,[1.0,1.0,1.0])
@test allequal(PB.gradient(tp2,[1.0,1.0]),hcat(kron(d3,v3),kron(v3,d3)))
function test_basis_on_points(basis::PB.LagrangeTensorProductBasis{3,N}) where {T,N}
flag = true
for i in 1:N
vals = zeros(N)
vals[i] = 1.0
p = basis.points[:,i]
flag = flag && basis(p[1],p[2],p[3]) ≈ vals
flag = flag && basis(p) ≈ vals
end
return flag
end
tp3 = PB.LagrangeTensorProductBasis(3,2)
@test test_basis_on_points(tp3)
@test_throws AssertionError PB.gradient(tp3,0,-1.0,-1.0,-1.0)
@test_throws AssertionError PB.gradient(tp3,4,-1.0,-1.0,-1.0)
@test allequal(PB.gradient(tp3,1,-1.0,-1.0,-1.0),kron(d1,v1,v1))
@test allequal(PB.gradient(tp3,2,-1.0,0.0,-1.0),kron(v1,d2,v1))
@test allequal(PB.gradient(tp3,3,-1.0,0.0,+1.0),kron(v1,v2,d3))
@test_throws AssertionError PB.gradient(tp3,1,[1.0])
@test_throws AssertionError PB.gradient(tp3,1,[1.0,1.0])
@test_throws AssertionError PB.gradient(tp3,1,[1.0,1.0,1.0,1.0])
@test allequal(PB.gradient(tp3,3,[-1.0,-1.0,-1.0]),kron(v1,v1,d1))
@test allequal(PB.gradient(tp3,1.0,-1.0,0.0),hcat(kron(d3,v1,v2),kron(v3,d1,v2),kron(v3,v1,d2)))
@test_throws AssertionError PB.gradient(tp3,[1.0])
@test_throws AssertionError PB.gradient(tp3,[1.0,2.0,1.0,3.0])
@test allequal(PB.gradient(tp3,[1.0,-1.0,0.0]),hcat(kron(d3,v1,v2),kron(v3,d1,v2),kron(v3,v1,d2)))
tp1 = PB.LagrangeTensorProductBasis(1,2)
@test allequal(PB.hessian(tp1,-1),[1,-2,1])
@test allequal(PB.hessian(tp1,0),[1,-2,1])
@test allequal(PB.hessian(tp1,1),[1,-2,1])
@test allequal(PB.hessian(tp1,[-1]),[1,-2,1])
@test allequal(PB.hessian(tp1,[0]),[1,-2,1])
@test allequal(PB.hessian(tp1,[1]),[1,-2,1])
tp2 = PB.LagrangeTensorProductBasis(2,2)
f(x) = x[1]^2 + 2x[1]*x[2] + x[2]^2
coeffs = mapslices(f,Array(tp2.points),dims=1)
@test all([allequal(coeffs*PB.hessian(tp2,tp2.points[:,i]),[2,2,2]) for i = 1:9])
tp3 = PB.LagrangeTensorProductBasis(2,3)
f(x) = x[1]^3 + 2x[1]^2*x[2] + 18.0
tp3h(x) = [6x[1]+4x[2],4x[1],0.0]
coeffs = mapslices(f,Array(tp3.points),dims=1)
p = tp3.points
@test all([allequal(coeffs*PB.hessian(tp3,p[:,i]),tp3h(p[:,i]),1e3eps()) for i = 1:16])
|
{"hexsha": "c7136be3a1ec61eaf634db0e85cd38ce7aa1c979", "size": 7891, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_lagrange_tensor_product_basis.jl", "max_stars_repo_name": "ArjunNarayanan/PolynomialBasis.jl", "max_stars_repo_head_hexsha": "09a3479154639c45a285d559508aa0092dfedad6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_lagrange_tensor_product_basis.jl", "max_issues_repo_name": "ArjunNarayanan/PolynomialBasis.jl", "max_issues_repo_head_hexsha": "09a3479154639c45a285d559508aa0092dfedad6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_lagrange_tensor_product_basis.jl", "max_forks_repo_name": "ArjunNarayanan/PolynomialBasis.jl", "max_forks_repo_head_hexsha": "09a3479154639c45a285d559508aa0092dfedad6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5324074074, "max_line_length": 98, "alphanum_fraction": 0.6532758839, "num_tokens": 3328}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 10:46:09 2019
@author: lwg
"""
# http://www.numpy.org/
import numpy as np
import matplotlib.pyplot as plt
def relu(x):
return np.maximum(0, x)
x = np.arange(-5.0, 5.0, 0.1)
y = relu(x)
plt.plot(x, y)
plt.ylim(-1, 6) # y轴范围
plt.show()
|
{"hexsha": "69cd96e2424ab4db8f1a4ad57ba0a97f796aef76", "size": 296, "ext": "py", "lang": "Python", "max_stars_repo_path": "deeplearning_python/chapter3/ReLU.py", "max_stars_repo_name": "lwg82/DeepLearningPython", "max_stars_repo_head_hexsha": "a36d80a84ff05ea2e7e3cbd5cc868aa2929ebb99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deeplearning_python/chapter3/ReLU.py", "max_issues_repo_name": "lwg82/DeepLearningPython", "max_issues_repo_head_hexsha": "a36d80a84ff05ea2e7e3cbd5cc868aa2929ebb99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deeplearning_python/chapter3/ReLU.py", "max_forks_repo_name": "lwg82/DeepLearningPython", "max_forks_repo_head_hexsha": "a36d80a84ff05ea2e7e3cbd5cc868aa2929ebb99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.3333333333, "max_line_length": 35, "alphanum_fraction": 0.6013513514, "include": true, "reason": "import numpy", "num_tokens": 112}
|
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import cv2
import imageio
import numpy as np
import pathlib
from tensorflow_docs.vis import embed
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def crop_center_square(frame):
"""Crops a square from the center of a rectangular array."""
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def pad_to_square(frame):
"""Pads a rectangular array with zeros, so as to make it squared."""
y, x = frame.shape[0:2]
if y > x:
add_x_left = (y - x) // 2
add_x_right = y - x - add_x_left
frame = cv2.copyMakeBorder(
frame, 0, 0, add_x_left, add_x_right, cv2.BORDER_CONSTANT, value=0
)
else:
add_y_up = (x - y) // 2
add_y_down = x - y - add_y_up
frame = cv2.copyMakeBorder(
frame, add_y_down, add_y_up, 0, 0, cv2.BORDER_CONSTANT, value=0
)
return frame
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def load_and_resize_video(path, resize=(224, 224), resize_type="crop"):
"""Convert video to Numpy array of shape and type expected by i3d model.
The function resizes them to shape
[max_frames, 224, 224, 3], in RGB format, with floating point values in
range [0, 1], as expected by i3d.
"""
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read() # frame is in BGR format
if not ret:
break
if resize_type == "crop":
frame = crop_center_square(frame)
elif resize_type == "pad":
frame = pad_to_square(frame)
else:
return ValueError("Invalid resize_type: " + resize_type)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]] # Convert from BGR to RGB
frames.append(frame)
finally:
cap.release()
return np.array(frames).astype("float32") / 255.0
def resample_video(video: np.array, num_frames: int) -> np.array:
""" Resample a video to have num_frames number of frames.
Video must have shape (1, current_num_frames, :, :, :)
if num_frames < current_num_frames, video is downsampled by removing frames
more or less evenly spaced throughout the duration of the video.
if num_frames > current_num_frames, video is upsampled by duplicating frames
more or less evenly spaced throughout the duration of the video.
"""
current_num_frames = video.shape[1]
indices = [(current_num_frames * i) // num_frames for i in range(num_frames)]
return video[:, indices, :, :, :]
def video_acceptable(video_np, min_num_frames_acceptable: int = 128) -> bool:
"""Checks if video has minimum acceptable temporal length"""
num_frames = video_np.shape[1]
if num_frames < min_num_frames_acceptable:
video_path_no_dir = pathlib.Path(video_path).name
print(f"Skipping video {video_path_no_dir}, too few frames: {num_frames}")
return False
return True
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def to_gif(images):
"""Converts an array of images to gif."""
converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
imageio.mimsave("./animation.gif", converted_images, fps=25)
return embed.embed_file("./animation.gif")
|
{"hexsha": "1c491ddc1083a7330a21f38ee5180e48205db0d8", "size": 5606, "ext": "py", "lang": "Python", "max_stars_repo_path": "vidbench/data/process.py", "max_stars_repo_name": "melaniebeck/video-classification", "max_stars_repo_head_hexsha": "eeb879605f8265ce28a007d5239f0e85aeed0719", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-11T20:49:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T14:52:42.000Z", "max_issues_repo_path": "vidbench/data/process.py", "max_issues_repo_name": "melaniebeck/video-classification", "max_issues_repo_head_hexsha": "eeb879605f8265ce28a007d5239f0e85aeed0719", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-05T22:59:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T19:39:49.000Z", "max_forks_repo_path": "vidbench/data/process.py", "max_forks_repo_name": "isabella232/CML_AMP_Video_Classification", "max_forks_repo_head_hexsha": "145eb44ac70e7669a706d5f67914a7d28fd931fe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-07T18:23:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T18:23:59.000Z", "avg_line_length": 39.7588652482, "max_line_length": 86, "alphanum_fraction": 0.665715305, "include": true, "reason": "import numpy", "num_tokens": 1431}
|
@testset "ModelParameters" begin
@test mP_1.U ≈ 1.1
@test mP_1.μ ≈ 1.2
@test mP_1.β ≈ 1.3
@test mP_1.n ≈ 1.4
end
@testset "SimulationParameters" begin
@test sP_1.n_iω == 1
@test sP_1.n_iν == 2
@test sP_1.shift == false
@test sP_1.tc_type_f == :nothing
@test sP_1.tc_type_b == :nothing
@test sP_1.λc_type == :nothing
@test sP_1.ωsum_type == :common
@test sP_1.λ_rhs == :native
@test sP_1.fullChi == false
@test sP_1.χFillType == LadderDGA.zero_χ_fill
@test sP_1.bosonic_tail_coeffs == [0,1,2,3]
@test sP_1.fermionic_tail_coeffs == [0,1,2,3]
@test sP_1.usable_prct_reduction == 0.1
end
|
{"hexsha": "e3b5e4be18fbdfd2b7bd0cf589fafb3f57cb0387", "size": 653, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Config.jl", "max_stars_repo_name": "Atomtomate/LadderDGA.jl", "max_stars_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-04T12:31:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T10:00:25.000Z", "max_issues_repo_path": "test/Config.jl", "max_issues_repo_name": "Atomtomate/LadderDGA.jl", "max_issues_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Config.jl", "max_forks_repo_name": "Atomtomate/LadderDGA.jl", "max_forks_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-04T12:34:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T12:34:34.000Z", "avg_line_length": 28.3913043478, "max_line_length": 49, "alphanum_fraction": 0.6385911179, "num_tokens": 289}
|
section {* Backwards Compatibility for Version 1 *}
theory CollectionsV1
imports Collections
begin
text {*
This theory defines some stuff to establish (partial) backwards
compatibility with ICF Version 1.
*}
(*
TODO: Dirty hack to workaround a problem that occurs with sublocale here:
When declaring
sublocale poly_map_iteratei < v1_iteratei: map_iteratei \<alpha> invar iteratei
by (rule v1_iteratei_impl)
Any further
interpretation StdMap hm_ops
will fail with
*** exception TYPE raised (line 414 of "type.ML"):
*** Type variable "?'a" has two distinct sorts
*** ?'a::type
*** ?'a::hashable
The problem seems difficult to track down, as it, e.g., does not iccur for
sets.
*)
attribute_setup locale_witness_add = {*
Scan.succeed (Locale.witness_add)
*} "Add witness for locale instantiation. HACK, use
sublocale or interpretation whereever possible!"
subsection {* Iterators *}
text {* We define all the monomorphic iterator locales *}
subsubsection "Set"
locale set_iteratei = finite_set \<alpha> invar for \<alpha> :: "'s \<Rightarrow> 'x set" and invar +
fixes iteratei :: "'s \<Rightarrow> ('x, '\<sigma>) set_iterator"
assumes iteratei_rule: "invar S \<Longrightarrow> set_iterator (iteratei S) (\<alpha> S)"
begin
lemma iteratei_rule_P:
"\<lbrakk>
invar S;
I (\<alpha> S) \<sigma>0;
!!x it \<sigma>. \<lbrakk> c \<sigma>; x \<in> it; it \<subseteq> \<alpha> S; I it \<sigma> \<rbrakk> \<Longrightarrow> I (it - {x}) (f x \<sigma>);
!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>;
!!\<sigma> it. \<lbrakk> it \<subseteq> \<alpha> S; it \<noteq> {}; \<not> c \<sigma>; I it \<sigma> \<rbrakk> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei S c f \<sigma>0)"
apply (rule set_iterator_rule_P [OF iteratei_rule, of S I \<sigma>0 c f P])
apply simp_all
done
lemma iteratei_rule_insert_P:
"\<lbrakk>
invar S;
I {} \<sigma>0;
!!x it \<sigma>. \<lbrakk> c \<sigma>; x \<in> \<alpha> S - it; it \<subseteq> \<alpha> S; I it \<sigma> \<rbrakk> \<Longrightarrow> I (insert x it) (f x \<sigma>);
!!\<sigma>. I (\<alpha> S) \<sigma> \<Longrightarrow> P \<sigma>;
!!\<sigma> it. \<lbrakk> it \<subseteq> \<alpha> S; it \<noteq> \<alpha> S; \<not> c \<sigma>; I it \<sigma> \<rbrakk> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei S c f \<sigma>0)"
apply (rule set_iterator_rule_insert_P [OF iteratei_rule, of S I \<sigma>0 c f P])
apply simp_all
done
text {* Versions without break condition. *}
lemma iterate_rule_P:
"\<lbrakk>
invar S;
I (\<alpha> S) \<sigma>0;
!!x it \<sigma>. \<lbrakk> x \<in> it; it \<subseteq> \<alpha> S; I it \<sigma> \<rbrakk> \<Longrightarrow> I (it - {x}) (f x \<sigma>);
!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei S (\<lambda>_. True) f \<sigma>0)"
apply (rule set_iterator_no_cond_rule_P [OF iteratei_rule, of S I \<sigma>0 f P])
apply simp_all
done
lemma iterate_rule_insert_P:
"\<lbrakk>
invar S;
I {} \<sigma>0;
!!x it \<sigma>. \<lbrakk> x \<in> \<alpha> S - it; it \<subseteq> \<alpha> S; I it \<sigma> \<rbrakk> \<Longrightarrow> I (insert x it) (f x \<sigma>);
!!\<sigma>. I (\<alpha> S) \<sigma> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei S (\<lambda>_. True) f \<sigma>0)"
apply (rule set_iterator_no_cond_rule_insert_P [OF iteratei_rule, of S I \<sigma>0 f P])
apply simp_all
done
end
lemma set_iteratei_I :
assumes "\<And>s. invar s \<Longrightarrow> set_iterator (iti s) (\<alpha> s)"
shows "set_iteratei \<alpha> invar iti"
proof
fix s
assume invar_s: "invar s"
from assms(1)[OF invar_s] show it_OK: "set_iterator (iti s) (\<alpha> s)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_def]]
show "finite (\<alpha> s)" .
qed
locale set_iterateoi = ordered_finite_set \<alpha> invar
for \<alpha> :: "'s \<Rightarrow> ('u::linorder) set" and invar
+
fixes iterateoi :: "'s \<Rightarrow> ('u,'\<sigma>) set_iterator"
assumes iterateoi_rule:
"invar s \<Longrightarrow> set_iterator_linord (iterateoi s) (\<alpha> s)"
begin
lemma iterateoi_rule_P[case_names minv inv0 inv_pres i_complete i_inter]:
assumes MINV: "invar m"
assumes I0: "I (\<alpha> m) \<sigma>0"
assumes IP: "!!k it \<sigma>. \<lbrakk>
c \<sigma>;
k \<in> it;
\<forall>j\<in>it. k\<le>j;
\<forall>j\<in>\<alpha> m - it. j\<le>k;
it \<subseteq> \<alpha> m;
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f k \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
assumes II: "!!\<sigma> it. \<lbrakk>
it \<subseteq> \<alpha> m;
it \<noteq> {};
\<not> c \<sigma>;
I it \<sigma>;
\<forall>k\<in>it. \<forall>j\<in>\<alpha> m - it. j\<le>k
\<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (iterateoi m c f \<sigma>0)"
using set_iterator_linord_rule_P [OF iterateoi_rule, OF MINV, of I \<sigma>0 c f P,
OF I0 _ IF] IP II
by simp
lemma iterateo_rule_P[case_names minv inv0 inv_pres i_complete]:
assumes MINV: "invar m"
assumes I0: "I ((\<alpha> m)) \<sigma>0"
assumes IP: "!!k it \<sigma>. \<lbrakk> k \<in> it; \<forall>j\<in>it. k\<le>j; \<forall>j\<in>(\<alpha> m) - it. j\<le>k; it \<subseteq> (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (it - {k}) (f k \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
shows "P (iterateoi m (\<lambda>_. True) f \<sigma>0)"
apply (rule iterateoi_rule_P [where I = I])
apply (simp_all add: assms)
done
end
lemma set_iterateoi_I :
assumes "\<And>s. invar s \<Longrightarrow> set_iterator_linord (itoi s) (\<alpha> s)"
shows "set_iterateoi \<alpha> invar itoi"
proof
fix s
assume invar_s: "invar s"
from assms(1)[OF invar_s] show it_OK: "set_iterator_linord (itoi s) (\<alpha> s)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_linord_def]]
show "finite (\<alpha> s)" by simp
qed
(* Deprecated *)
locale set_reverse_iterateoi = ordered_finite_set \<alpha> invar
for \<alpha> :: "'s \<Rightarrow> ('u::linorder) set" and invar
+
fixes reverse_iterateoi :: "'s \<Rightarrow> ('u,'\<sigma>) set_iterator"
assumes reverse_iterateoi_rule: "
invar m \<Longrightarrow> set_iterator_rev_linord (reverse_iterateoi m) (\<alpha> m)"
begin
lemma reverse_iterateoi_rule_P[case_names minv inv0 inv_pres i_complete i_inter]:
assumes MINV: "invar m"
assumes I0: "I ((\<alpha> m)) \<sigma>0"
assumes IP: "!!k it \<sigma>. \<lbrakk>
c \<sigma>;
k \<in> it;
\<forall>j\<in>it. k\<ge>j;
\<forall>j\<in>(\<alpha> m) - it. j\<ge>k;
it \<subseteq> (\<alpha> m);
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f k \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
assumes II: "!!\<sigma> it. \<lbrakk>
it \<subseteq> (\<alpha> m);
it \<noteq> {};
\<not> c \<sigma>;
I it \<sigma>;
\<forall>k\<in>it. \<forall>j\<in>(\<alpha> m) - it. j\<ge>k
\<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (reverse_iterateoi m c f \<sigma>0)"
using set_iterator_rev_linord_rule_P [OF reverse_iterateoi_rule, OF MINV, of I \<sigma>0 c f P,
OF I0 _ IF] IP II
by simp
lemma reverse_iterateo_rule_P[case_names minv inv0 inv_pres i_complete]:
assumes MINV: "invar m"
assumes I0: "I ((\<alpha> m)) \<sigma>0"
assumes IP: "!!k it \<sigma>. \<lbrakk>
k \<in> it;
\<forall>j\<in>it. k\<ge>j;
\<forall>j\<in> (\<alpha> m) - it. j\<ge>k;
it \<subseteq> (\<alpha> m);
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f k \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
shows "P (reverse_iterateoi m (\<lambda>_. True) f \<sigma>0)"
apply (rule reverse_iterateoi_rule_P [where I = I])
apply (simp_all add: assms)
done
end
lemma set_reverse_iterateoi_I :
assumes "\<And>s. invar s \<Longrightarrow> set_iterator_rev_linord (itoi s) (\<alpha> s)"
shows "set_reverse_iterateoi \<alpha> invar itoi"
proof
fix s
assume invar_s: "invar s"
from assms(1)[OF invar_s] show it_OK: "set_iterator_rev_linord (itoi s) (\<alpha> s)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_rev_linord_def]]
show "finite (\<alpha> s)" by simp
qed
lemma (in poly_set_iteratei) v1_iteratei_impl:
"set_iteratei \<alpha> invar iteratei"
by unfold_locales (rule iteratei_correct)
lemma (in poly_set_iterateoi) v1_iterateoi_impl:
"set_iterateoi \<alpha> invar iterateoi"
by unfold_locales (rule iterateoi_correct)
lemma (in poly_set_rev_iterateoi) v1_reverse_iterateoi_impl:
"set_reverse_iterateoi \<alpha> invar rev_iterateoi"
by unfold_locales (rule rev_iterateoi_correct)
declare (in poly_set_iteratei) v1_iteratei_impl[locale_witness_add]
declare (in poly_set_iterateoi) v1_iterateoi_impl[locale_witness_add]
declare (in poly_set_rev_iterateoi)
v1_reverse_iterateoi_impl[locale_witness_add]
(* Commented out, as it causes strange errors of the kind:
Type variable "?'a" has two distinct sorts
sublocale poly_set_iteratei < v1_iteratei: set_iteratei \<alpha> invar iteratei
by (rule v1_iteratei_impl)
sublocale poly_set_iterateoi < v1_iteratei: set_iterateoi \<alpha> invar iterateoi
by (rule v1_iterateoi_impl)
sublocale poly_set_rev_iterateoi
< v1_iteratei!: set_reverse_iterateoi \<alpha> invar rev_iterateoi
by (rule v1_reverse_iterateoi_impl)
*)
subsubsection "Map"
locale map_iteratei = finite_map \<alpha> invar for \<alpha> :: "'s \<Rightarrow> 'u \<rightharpoonup> 'v" and invar +
fixes iteratei :: "'s \<Rightarrow> ('u \<times> 'v,'\<sigma>) set_iterator"
assumes iteratei_rule: "invar m \<Longrightarrow> map_iterator (iteratei m) (\<alpha> m)"
begin
lemma iteratei_rule_P:
assumes "invar m"
and I0: "I (dom (\<alpha> m)) \<sigma>0"
and IP: "!!k v it \<sigma>. \<lbrakk> c \<sigma>; k \<in> it; \<alpha> m k = Some v; it \<subseteq> dom (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>)"
and IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
and II: "!!\<sigma> it. \<lbrakk> it \<subseteq> dom (\<alpha> m); it \<noteq> {}; \<not> c \<sigma>; I it \<sigma> \<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (iteratei m c f \<sigma>0)"
using map_iterator_rule_P [OF iteratei_rule, of m I \<sigma>0 c f P]
by (simp_all add: assms)
lemma iteratei_rule_insert_P:
assumes
"invar m"
"I {} \<sigma>0"
"!!k v it \<sigma>. \<lbrakk> c \<sigma>; k \<in> (dom (\<alpha> m) - it); \<alpha> m k = Some v; it \<subseteq> dom (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (insert k it) (f (k, v) \<sigma>)"
"!!\<sigma>. I (dom (\<alpha> m)) \<sigma> \<Longrightarrow> P \<sigma>"
"!!\<sigma> it. \<lbrakk> it \<subseteq> dom (\<alpha> m); it \<noteq> dom (\<alpha> m);
\<not> (c \<sigma>);
I it \<sigma> \<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (iteratei m c f \<sigma>0)"
using map_iterator_rule_insert_P [OF iteratei_rule, of m I \<sigma>0 c f P]
by (simp_all add: assms)
lemma iterate_rule_P:
"\<lbrakk>
invar m;
I (dom (\<alpha> m)) \<sigma>0;
!!k v it \<sigma>. \<lbrakk> k \<in> it; \<alpha> m k = Some v; it \<subseteq> dom (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>);
!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei m (\<lambda>_. True) f \<sigma>0)"
using iteratei_rule_P [of m I \<sigma>0 "\<lambda>_. True" f P]
by fast
lemma iterate_rule_insert_P:
"\<lbrakk>
invar m;
I {} \<sigma>0;
!!k v it \<sigma>. \<lbrakk> k \<in> (dom (\<alpha> m) - it); \<alpha> m k = Some v; it \<subseteq> dom (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (insert k it) (f (k, v) \<sigma>);
!!\<sigma>. I (dom (\<alpha> m)) \<sigma> \<Longrightarrow> P \<sigma>
\<rbrakk> \<Longrightarrow> P (iteratei m (\<lambda>_. True) f \<sigma>0)"
using iteratei_rule_insert_P [of m I \<sigma>0 "\<lambda>_. True" f P]
by fast
end
lemma map_iteratei_I :
assumes "\<And>m. invar m \<Longrightarrow> map_iterator (iti m) (\<alpha> m)"
shows "map_iteratei \<alpha> invar iti"
proof
fix m
assume invar_m: "invar m"
from assms(1)[OF invar_m] show it_OK: "map_iterator (iti m) (\<alpha> m)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_def]]
show "finite (dom (\<alpha> m))" by (simp add: finite_map_to_set)
qed
locale map_iterateoi = ordered_finite_map \<alpha> invar
for \<alpha> :: "'s \<Rightarrow> ('u::linorder) \<rightharpoonup> 'v" and invar
+
fixes iterateoi :: "'s \<Rightarrow> ('u \<times> 'v,'\<sigma>) set_iterator"
assumes iterateoi_rule: "
invar m \<Longrightarrow> map_iterator_linord (iterateoi m) (\<alpha> m)"
begin
lemma iterateoi_rule_P[case_names minv inv0 inv_pres i_complete i_inter]:
assumes MINV: "invar m"
assumes I0: "I (dom (\<alpha> m)) \<sigma>0"
assumes IP: "!!k v it \<sigma>. \<lbrakk>
c \<sigma>;
k \<in> it;
\<forall>j\<in>it. k\<le>j;
\<forall>j\<in>dom (\<alpha> m) - it. j\<le>k;
\<alpha> m k = Some v;
it \<subseteq> dom (\<alpha> m);
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
assumes II: "!!\<sigma> it. \<lbrakk>
it \<subseteq> dom (\<alpha> m);
it \<noteq> {};
\<not> c \<sigma>;
I it \<sigma>;
\<forall>k\<in>it. \<forall>j\<in>dom (\<alpha> m) - it. j\<le>k
\<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (iterateoi m c f \<sigma>0)"
using map_iterator_linord_rule_P [OF iterateoi_rule, of m I \<sigma>0 c f P] assms
by simp
lemma iterateo_rule_P[case_names minv inv0 inv_pres i_complete]:
assumes MINV: "invar m"
assumes I0: "I (dom (\<alpha> m)) \<sigma>0"
assumes IP: "!!k v it \<sigma>. \<lbrakk> k \<in> it; \<forall>j\<in>it. k\<le>j; \<forall>j\<in>dom (\<alpha> m) - it. j\<le>k; \<alpha> m k = Some v; it \<subseteq> dom (\<alpha> m); I it \<sigma> \<rbrakk>
\<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
shows "P (iterateoi m (\<lambda>_. True) f \<sigma>0)"
using map_iterator_linord_rule_P [OF iterateoi_rule, of m I \<sigma>0 "\<lambda>_. True" f P] assms
by simp
end
lemma map_iterateoi_I :
assumes "\<And>m. invar m \<Longrightarrow> map_iterator_linord (itoi m) (\<alpha> m)"
shows "map_iterateoi \<alpha> invar itoi"
proof
fix m
assume invar_m: "invar m"
from assms(1)[OF invar_m] show it_OK: "map_iterator_linord (itoi m) (\<alpha> m)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_map_linord_def]]
show "finite (dom (\<alpha> m))" by (simp add: finite_map_to_set)
qed
locale map_reverse_iterateoi = ordered_finite_map \<alpha> invar
for \<alpha> :: "'s \<Rightarrow> ('u::linorder) \<rightharpoonup> 'v" and invar
+
fixes reverse_iterateoi :: "'s \<Rightarrow> ('u \<times> 'v,'\<sigma>) set_iterator"
assumes reverse_iterateoi_rule: "
invar m \<Longrightarrow> map_iterator_rev_linord (reverse_iterateoi m) (\<alpha> m)"
begin
lemma reverse_iterateoi_rule_P[case_names minv inv0 inv_pres i_complete i_inter]:
assumes MINV: "invar m"
assumes I0: "I (dom (\<alpha> m)) \<sigma>0"
assumes IP: "!!k v it \<sigma>. \<lbrakk>
c \<sigma>;
k \<in> it;
\<forall>j\<in>it. k\<ge>j;
\<forall>j\<in>dom (\<alpha> m) - it. j\<ge>k;
\<alpha> m k = Some v;
it \<subseteq> dom (\<alpha> m);
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
assumes II: "!!\<sigma> it. \<lbrakk>
it \<subseteq> dom (\<alpha> m);
it \<noteq> {};
\<not> c \<sigma>;
I it \<sigma>;
\<forall>k\<in>it. \<forall>j\<in>dom (\<alpha> m) - it. j\<ge>k
\<rbrakk> \<Longrightarrow> P \<sigma>"
shows "P (reverse_iterateoi m c f \<sigma>0)"
using map_iterator_rev_linord_rule_P [OF reverse_iterateoi_rule, of m I \<sigma>0 c f P] assms
by simp
lemma reverse_iterateo_rule_P[case_names minv inv0 inv_pres i_complete]:
assumes MINV: "invar m"
assumes I0: "I (dom (\<alpha> m)) \<sigma>0"
assumes IP: "!!k v it \<sigma>. \<lbrakk>
k \<in> it;
\<forall>j\<in>it. k\<ge>j;
\<forall>j\<in>dom (\<alpha> m) - it. j\<ge>k;
\<alpha> m k = Some v;
it \<subseteq> dom (\<alpha> m);
I it \<sigma>
\<rbrakk> \<Longrightarrow> I (it - {k}) (f (k, v) \<sigma>)"
assumes IF: "!!\<sigma>. I {} \<sigma> \<Longrightarrow> P \<sigma>"
shows "P (reverse_iterateoi m (\<lambda>_. True) f \<sigma>0)"
using map_iterator_rev_linord_rule_P[OF reverse_iterateoi_rule, of m I \<sigma>0 "\<lambda>_. True" f P] assms
by simp
end
lemma map_reverse_iterateoi_I :
assumes "\<And>m. invar m \<Longrightarrow> map_iterator_rev_linord (ritoi m) (\<alpha> m)"
shows "map_reverse_iterateoi \<alpha> invar ritoi"
proof
fix m
assume invar_m: "invar m"
from assms(1)[OF invar_m] show it_OK: "map_iterator_rev_linord (ritoi m) (\<alpha> m)" .
from set_iterator_genord.finite_S0 [OF it_OK[unfolded set_iterator_map_rev_linord_def]]
show "finite (dom (\<alpha> m))" by (simp add: finite_map_to_set)
qed
lemma (in poly_map_iteratei) v1_iteratei_impl:
"map_iteratei \<alpha> invar iteratei"
by unfold_locales (rule iteratei_correct)
lemma (in poly_map_iterateoi) v1_iterateoi_impl:
"map_iterateoi \<alpha> invar iterateoi"
by unfold_locales (rule iterateoi_correct)
lemma (in poly_map_rev_iterateoi) v1_reverse_iterateoi_impl:
"map_reverse_iterateoi \<alpha> invar rev_iterateoi"
by unfold_locales (rule rev_iterateoi_correct)
declare (in poly_map_iteratei) v1_iteratei_impl[locale_witness_add]
declare (in poly_map_iterateoi) v1_iterateoi_impl[locale_witness_add]
declare (in poly_map_rev_iterateoi)
v1_reverse_iterateoi_impl[locale_witness_add]
(*
sublocale poly_map_iteratei < v1_iteratei: map_iteratei \<alpha> invar iteratei
by (rule v1_iteratei_impl)
sublocale poly_map_iterateoi < v1_iteratei: map_iterateoi \<alpha> invar iterateoi
by (rule v1_iterateoi_impl)
sublocale poly_map_rev_iterateoi
< v1_iteratei!: map_reverse_iterateoi \<alpha> invar rev_iterateoi
by (rule v1_reverse_iterateoi_impl)
*)
subsection {* Concrete Operation Names *}
text {* We define abbreviations to recover the @{text "xx_op"}-names *}
(* TODO: This may take long, as Local_Theory.abbrev seems to be really slow *)
local_setup {* let
val thy = @{theory}
val ctxt = Proof_Context.init_global thy;
val pats = [
"hs","hm",
"rs","rm",
"ls","lm","lsi","lmi","lsnd","lss",
"ts","tm",
"ias","iam",
"ahs","ahm",
"bino",
"fifo",
"ft",
"alprioi",
"aluprioi",
"skew"
];
val {const_space, constants, ...} = Sign.consts_of thy |> Consts.dest
val clist = Name_Space.extern_entries true ctxt const_space constants |> map (apfst #1)
fun abbrevs_for pat = clist
|> map_filter (fn (n,_) => case Long_Name.explode n of
[_,prefix,opname] =>
if prefix = pat then let
val aname = prefix ^ "_" ^ opname
val rhs = Proof_Context.read_term_abbrev ctxt n
in SOME (aname,rhs) end
else NONE
| _ => NONE);
fun do_abbrevs pat lthy = let
val abbrevs = abbrevs_for pat;
in
case abbrevs of [] => (warning ("No stuff found for "^pat); lthy)
| _ => let
(*val _ = tracing ("Defining " ^ pat ^ "_xxx ...");*)
val lthy = fold (fn (name,rhs) =>
Local_Theory.abbrev
Syntax.mode_input
((Binding.name name,Mixfix.NoSyn),rhs) #> #2
) abbrevs lthy
(*val _ = tracing "Done";*)
in lthy end
end
in
fold do_abbrevs pats
end
*}
lemmas hs_correct = hs.correct
lemmas hm_correct = hm.correct
lemmas rs_correct = rs.correct
lemmas rm_correct = rm.correct
lemmas ls_correct = ls.correct
lemmas lm_correct = lm.correct
lemmas lsi_correct = lsi.correct
lemmas lmi_correct = lmi.correct
lemmas lsnd_correct = lsnd.correct
lemmas lss_correct = lss.correct
lemmas ts_correct = ts.correct
lemmas tm_correct = tm.correct
lemmas ias_correct = ias.correct
lemmas iam_correct = iam.correct
lemmas ahs_correct = ahs.correct
lemmas ahm_correct = ahm.correct
lemmas bino_correct = bino.correct
lemmas fifo_correct = fifo.correct
lemmas ft_correct = ft.correct
lemmas alprioi_correct = alprioi.correct
lemmas aluprioi_correct = aluprioi.correct
lemmas skew_correct = skew.correct
locale list_enqueue = list_appendr
locale list_dequeue = list_removel
locale list_push = list_appendl
locale list_pop = list_remover
locale list_top = list_leftmost
locale list_bot = list_rightmost
instantiation rbt :: ("{equal,linorder}",equal) equal
begin
(*definition equal_rbt :: "('a,'b) RBT.rbt \<Rightarrow> _" where "equal_rbt \<equiv> op ="*)
definition "equal_class.equal (r :: ('a, 'b) rbt) r'
== RBT.impl_of r = RBT.impl_of r'"
instance
apply intro_classes
apply (simp add: equal_rbt_def RBT.impl_of_inject)
done
end
end
|
{"author": "andredidier", "repo": "phd", "sha": "113f7c8b360a3914a571db13d9513e313954f4b2", "save_path": "github-repos/isabelle/andredidier-phd", "path": "github-repos/isabelle/andredidier-phd/phd-113f7c8b360a3914a571db13d9513e313954f4b2/thesis/Collections/ICF/CollectionsV1.thy"}
|
# -*- coding:utf-8 -*-
"""
A local image scale tool
Licensed under The MIT License
Writen by Shaowu Wu, 20190926
"""
import cv2.cv2 as cv
import numpy as np
import os
LINE_COLOR = (0, 255, 0) # 获取在原图上画的线的颜色
LINE_WIDTH = 2 # 在原图上线的宽度
SCALE = 2 # 对选取区域的放大倍数
ADD_BBOX = True # 是否对要保存的图像增加边框
BBOX_WIDTH = 4 # 增加的边框的宽度
BBOX_COLOR = (255, 255, 255) # 默认为白色
INTER_METHOD = cv.INTER_LINEAR # 默认使用最近邻, 双三次INTER_CUBIC, INTER_LANCZOS4, INTER_LINEAR
point1 = (-1, -1)
point2 = (-1, -1)
G_RECT = []
global img, g_rect # g_rect是选择的感兴趣的区域[min_x, min_y, width, height]
def read_image(path):
img = cv.imread(path)
return img
def draw_circle(event, x, y, flags, param):
global point1, img, G_RECT
img2 = img.copy() # 这里必须要拷贝一个新的
if event == cv.EVENT_LBUTTONDOWN: # 获取左上角的坐标
point1 = x, y
print("point1: ",point1)
# 画图的时候是先在img2数据上进行处理,之后再将其显示出来
cv.circle(img2, point1, 10, LINE_COLOR, LINE_WIDTH)
cv.imshow("ori_image", img2)
elif event == cv.EVENT_MOUSEMOVE and (flags & cv.EVENT_FLAG_LBUTTON):
cv.rectangle(img2, point1, (x, y), LINE_COLOR, LINE_WIDTH)
cv.imshow("ori_image", img2)
elif event == cv.EVENT_LBUTTONUP: # 鼠标左键按钮松开的时候画图
point2 = x, y
print("point2", point2)
if point1 != point2:
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
G_RECT = [min_x, min_y, width, height]
print("g_rect: ", G_RECT)
cv.rectangle(img2, point1, point2, LINE_COLOR, LINE_WIDTH)
cv.imshow('ori_image', img2)
def get_ROI():
global img
scaled_image = []
while True:
cv.namedWindow('ori_image')
cv.setMouseCallback('ori_image', draw_circle)
cv.imshow('ori_image', img)
k = cv.waitKey(0)
if k == 32: # 空格键化出对比图
scaled_image = get_compair_imgs(imgs)
plot_compair_imgs(scaled_image, img_names)
if k == 13: # 回车键退出
break
return scaled_image
def scale_image(img, scale, inter_mathod):
width, height, chl = img.shape
dst = cv.resize(img, (height*scale, width*scale), interpolation=inter_mathod)
return dst
def add_borders(img):
borderType = cv.BORDER_CONSTANT
dst = cv.copyMakeBorder(img, BBOX_WIDTH, BBOX_WIDTH, BBOX_WIDTH, BBOX_WIDTH, borderType, value=BBOX_COLOR)
return dst
def get_scale_image(img):
roi_img = img[G_RECT[1]:G_RECT[1] + G_RECT[3], G_RECT[0]:G_RECT[0] + G_RECT[2]] # 提取选择的区域
scaled_image = scale_image(roi_img, SCALE, INTER_METHOD) # 对选择的区域放大
add_borders_img = add_borders(scaled_image) # 增加边框
return add_borders_img
def read_imgs(path):
global img
g = os.walk(path)
imgs = []
img_names = []
for path, dir_list, file_list in g:
for file_name in file_list:
if "_ROI" in file_name:
img = read_image(os.path.join(path, file_name))
else:
imgs.append(read_image(os.path.join(path, file_name)))
img_names.append(file_name.split(".")[0])
return imgs, img_names
def get_compair_imgs(imgs):
compair_imgs = []
for img in imgs:
if ADD_BBOX:
compair_imgs.append(get_scale_image(img))
else:
compair_imgs.append(img)
return compair_imgs
def plot_compair_imgs(compair_imgs, imgs_name):
n = len(compair_imgs)
ori_img = get_scale_image(img)
h1 = compair_imgs[0]
h2 = ori_img
for i in range(1, n):
h1 = np.hstack((h1, compair_imgs[i]))
h2 = np.hstack((h2, ori_img))
h1 = np.vstack((h1, h2))
cv.imshow("C", h1)
print(imgs_name)
def save_scale_image(imgs, img_names):
if not os.path.exists(".\\result\\scale_image\\"):
os.makedirs(".\\result\\scale_image\\")
for i in range(len(imgs)):
cv.imwrite(".\\result\\scale_image\\" + img_names[i] + ".bmp", imgs[i])
cv.imwrite(".\\result\\scale_image\\" + "ori_image.bmp", get_scale_image(img))
def save_big_image(images, image_names):
if not os.path.exists(".\\result\\big_image\\"):
os.makedirs(".\\result\\big_image\\")
for i in range(len(images)):
cv.rectangle(images[i], (G_RECT[0], G_RECT[1]), (G_RECT[0] + G_RECT[2], G_RECT[1] + G_RECT[3]), LINE_COLOR, LINE_WIDTH)
cv.imwrite(".\\result\\big_image\\" + image_names[i] + ".bmp", imgs[i])
cv.rectangle(img, (G_RECT[0], G_RECT[1]), (G_RECT[0] + G_RECT[2], G_RECT[1] + G_RECT[3]), LINE_COLOR,
LINE_WIDTH)
cv.imwrite(".\\result\\big_image\\" + "ori.bmp", img)
if __name__ == '__main__':
# 单幅图像的放大
# global img
# path = "j20.PNG"
# img = read_image(path)
# img = cv.cvtColor(img, cv.COLOR_RGB2BGR)
# get_ROI()
# add_borders_img = get_scale_image(img)
# cv.imshow("roi", add_borders_img)
# 多幅图像的放大的比对
path = ".\\imgs"
imgs, img_names = read_imgs(path)
scaled_image = get_ROI()
save_scale_image(scaled_image, img_names)
save_big_image(imgs, img_names)
cv.destroyAllWindows()
|
{"hexsha": "a2c3253428f5e21102f3ecb0d061d4057e0bbc7d", "size": 5157, "ext": "py", "lang": "Python", "max_stars_repo_path": "scale_tool/enlarge_image_local.py", "max_stars_repo_name": "wshaow/enlarge_local_image_tool", "max_stars_repo_head_hexsha": "11b6eacf69dde5e0c6e06d57401a8c8d7c0dd4b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-12-13T03:22:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T11:23:33.000Z", "max_issues_repo_path": "scale_tool/enlarge_image_local.py", "max_issues_repo_name": "wshaow/tools", "max_issues_repo_head_hexsha": "11b6eacf69dde5e0c6e06d57401a8c8d7c0dd4b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scale_tool/enlarge_image_local.py", "max_forks_repo_name": "wshaow/tools", "max_forks_repo_head_hexsha": "11b6eacf69dde5e0c6e06d57401a8c8d7c0dd4b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8092485549, "max_line_length": 127, "alphanum_fraction": 0.6226488268, "include": true, "reason": "import numpy", "num_tokens": 1644}
|
/*!
@file
Forward declares `boost::hana::Pair`.
@copyright Louis Dionne 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_FWD_PAIR_HPP
#define BOOST_HANA_FWD_PAIR_HPP
#include <boost/hana/fwd/core/make.hpp>
namespace boost { namespace hana {
//! @ingroup group-datatypes
//! Generic container of two elements.
//!
//! A `Pair` is essentially equivalent to a `std::pair`, which can also
//! be seen as a 2-element tuple. `Pair`s are useful in some contexts
//! where a tuple would be too much, like returning two elements from a
//! function.
//!
//!
//! Modeled concepts
//! ----------------
//! 1. `Comparable` (operators provided)\n
//! Two pairs `(x, y)` and `(x', y')` are equal if and only if both
//! `x == x'` and `y == y'`.
//! @snippet example/pair.cpp comparable
//!
//! 2. `Orderable` (operators provided)\n
//! Pairs are ordered as-if they were 2-element tuples, using a
//! lexicographical ordering.
//! @snippet example/pair.cpp orderable
//!
//! 3. `Foldable`\n
//! Folding a `Pair` is equivalent to folding a 2-element tuple. In other
//! words:
//! @code
//! foldl(make_pair(x, y), s, f) == f(f(s, x), y)
//! foldr(make_pair(x, y), s, f) == f(x, f(y, s))
//! @endcode
//! Example:
//! @snippet example/pair.cpp foldable
//!
//! 4. `Product`\n
//! The model of `Product` is the simplest one possible; the first element
//! of a pair `(x, y)` is `x`, and its second element is `y`.
//! @snippet example/pair.cpp product
struct Pair { };
template <typename First, typename Second>
struct _pair;
#ifdef BOOST_HANA_DOXYGEN_INVOKED
//! Creates a `Pair` with the given elements.
//! @relates Pair
//!
//!
//! Example
//! -------
//! @snippet example/pair.cpp make<Pair>
template <>
constexpr auto make<Pair> = [](auto&& first, auto&& second) {
return _pair<decayed(decltype(first)), decayed(decltype(second))>{
forwarded(first), forwarded(second)
};
};
#endif
//! Alias to `make<Pair>`; provided for convenience.
//! @relates Pair
//!
//! Example
//! -------
//! @snippet example/pair.cpp make_pair
constexpr auto make_pair = make<Pair>;
//! @todo Provided for backward compatibility. What to do with it?
constexpr auto pair = make<Pair>;
}} // end namespace boost::hana
#endif // !BOOST_HANA_FWD_PAIR_HPP
|
{"hexsha": "b2d7451fedf7a9a3697d7b8132ab2190974789b0", "size": 2606, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/hana/fwd/pair.hpp", "max_stars_repo_name": "josephwinston/hana", "max_stars_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/hana/fwd/pair.hpp", "max_issues_repo_name": "josephwinston/hana", "max_issues_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/hana/fwd/pair.hpp", "max_forks_repo_name": "josephwinston/hana", "max_forks_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3023255814, "max_line_length": 78, "alphanum_fraction": 0.6020721412, "num_tokens": 685}
|
REAL FUNCTION URAND(IY)
INTEGER IY
C
C URAND IS A UNIFORM RANDOM NUMBER GENERATOR BASED ON THEORY AND
C SUGGESTIONS GIVEN IN D.E. KNUTH (1969), VOL 2. THE INTEGER IY
C SHOULD BE INITIALIZED TO AN ARBITRARY INTEGER PRIOR TO THE FIRST CALL
C TO URAND. THE CALLING PROGRAM SHOULD NOT ALTER THE VALUE OF IY
C BETWEEN SUBSEQUENT CALLS TO URAND. VALUES OF URAND WILL BE RETURNED
C IN THE INTERVAL (0,1).
C
INTEGER IA,IC,ITWO,M2,M,MIC
DOUBLE PRECISION HALFM
REAL S
DOUBLE PRECISION DATAN,DSQRT
DATA M2/0/,ITWO/2/
IF (M2 .NE. 0) GO TO 20
C
C IF FIRST ENTRY, COMPUTE MACHINE INTEGER WORD LENGTH
C
M = 1
10 M2 = M
M = ITWO*M2
IF (M .GT. M2) GO TO 10
HALFM = M2
C
C COMPUTE MULTIPLIER AND INCREMENT FOR LINEAR CONGRUENTIAL METHOD
C
IA = 8*IDINT(HALFM*DATAN(1.D0)/8.D0) + 5
IC = 2*IDINT(HALFM*(0.5D0-DSQRT(3.D0)/6.D0)) + 1
MIC = (M2 - IC) + M2
C
C S IS THE SCALE FACTOR FOR CONVERTING TO FLOATING POINT
C
S = 0.5/HALFM
C
C COMPUTE NEXT RANDOM NUMBER
C
20 IY = IY*IA
C
C THE FOLLOWING STATEMENT IS FOR COMPUTERS WHICH DO NOT ALLOW
C INTEGER OVERFLOW ON ADDITION
C
IF (IY .GT. MIC) IY = (IY - M2) - M2
C
IY = IY + IC
C
C THE FOLLOWING STATEMENT IS FOR COMPUTERS WHERE THE
C WORD LENGTH FOR ADDITION IS GREATER THAN FOR MULTIPLICATION
C
IF (IY/2 .GT. M2) IY = (IY - M2) - M2
C
C THE FOLLOWING STATEMENT IS FOR COMPUTERS WHERE INTEGER
C OVERFLOW AFFECTS THE SIGN BIT
C
IF (IY .LT. 0) IY = (IY + M2) + M2
URAND = FLOAT(IY)*S
RETURN
END
|
{"hexsha": "acd7361d30cd121d30def414abd2159f88111776", "size": 1666, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Modules/ThirdParty/VNL/src/vxl/v3p/netlib/laso/urand.f", "max_stars_repo_name": "nalinimsingh/ITK_4D", "max_stars_repo_head_hexsha": "95a2eacaeaffe572889832ef0894239f89e3f303", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-10-01T20:46:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-17T19:39:50.000Z", "max_issues_repo_path": "Modules/ThirdParty/VNL/src/vxl/v3p/netlib/laso/urand.f", "max_issues_repo_name": "nalinimsingh/ITK_4D", "max_issues_repo_head_hexsha": "95a2eacaeaffe572889832ef0894239f89e3f303", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modules/ThirdParty/VNL/src/vxl/v3p/netlib/laso/urand.f", "max_forks_repo_name": "nalinimsingh/ITK_4D", "max_forks_repo_head_hexsha": "95a2eacaeaffe572889832ef0894239f89e3f303", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-05-17T16:34:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-24T02:12:40.000Z", "avg_line_length": 28.2372881356, "max_line_length": 73, "alphanum_fraction": 0.618847539, "num_tokens": 623}
|
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
# pylint: disable=exec-used
import cv2
import logging as log
import numpy as np
import os.path as osp
import shutil
from openvino.inference_engine import IECore
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.launcher import Launcher
class _OpenvinoImporter(CliPlugin):
@staticmethod
def _parse_output_layers(s):
return [s.strip() for s in s.split(',')]
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument('-d', '--description', required=True,
help="Path to the model description file (.xml)")
parser.add_argument('-w', '--weights', required=True,
help="Path to the model weights file (.bin)")
parser.add_argument('-i', '--interpreter', required=True,
help="Path to the network output interprter script (.py)")
parser.add_argument('--device', default='CPU',
help="Target device (default: %(default)s)")
parser.add_argument('--output-layers', type=cls._parse_output_layers,
help="A comma-separated list of extra output layers")
return parser
@staticmethod
def copy_model(model_dir, model):
shutil.copy(model['description'],
osp.join(model_dir, osp.basename(model['description'])))
model['description'] = osp.basename(model['description'])
shutil.copy(model['weights'],
osp.join(model_dir, osp.basename(model['weights'])))
model['weights'] = osp.basename(model['weights'])
shutil.copy(model['interpreter'],
osp.join(model_dir, osp.basename(model['interpreter'])))
model['interpreter'] = osp.basename(model['interpreter'])
class InterpreterScript:
def __init__(self, path):
with open(path, 'r') as f:
script = f.read()
context = {}
exec(script, context, context)
process_outputs = context.get('process_outputs')
if not callable(process_outputs):
raise Exception("Can't find 'process_outputs' function in "
"the interpreter script")
self.__dict__['process_outputs'] = process_outputs
get_categories = context.get('get_categories')
assert get_categories is None or callable(get_categories)
if get_categories:
self.__dict__['get_categories'] = get_categories
@staticmethod
def get_categories():
return None
@staticmethod
def process_outputs(inputs, outputs):
raise NotImplementedError(
"Function should be implemented in the interpreter script")
class OpenvinoLauncher(Launcher):
cli_plugin = _OpenvinoImporter
def __init__(self, description, weights, interpreter,
device=None, model_dir=None, output_layers=None):
if not model_dir:
model_dir = ''
if not osp.isfile(description):
description = osp.join(model_dir, description)
if not osp.isfile(description):
raise Exception('Failed to open model description file "%s"' % \
(description))
if not osp.isfile(weights):
weights = osp.join(model_dir, weights)
if not osp.isfile(weights):
raise Exception('Failed to open model weights file "%s"' % \
(weights))
if not osp.isfile(interpreter):
interpreter = osp.join(model_dir, interpreter)
if not osp.isfile(interpreter):
raise Exception('Failed to open model interpreter script file "%s"' % \
(interpreter))
self._interpreter = InterpreterScript(interpreter)
self._device = device or 'CPU'
self._output_blobs = output_layers
self._ie = IECore()
self._network = self._ie.read_network(description, weights)
self._check_model_support(self._network, self._device)
self._load_executable_net()
def _check_model_support(self, net, device):
not_supported_layers = set(name
for name, dev in self._ie.query_network(net, device).items()
if not dev)
if len(not_supported_layers) != 0:
log.error("The following layers are not supported " \
"by the plugin for device '%s': %s." % \
(device, ', '.join(not_supported_layers)))
raise NotImplementedError(
"Some layers are not supported on the device")
def _load_executable_net(self, batch_size=1):
network = self._network
if self._output_blobs:
network.add_outputs(self._output_blobs)
iter_inputs = iter(network.input_info)
self._input_blob = next(iter_inputs)
# NOTE: handling for the inclusion of `image_info` in OpenVino2019
self._require_image_info = 'image_info' in network.input_info
if self._input_blob == 'image_info':
self._input_blob = next(iter_inputs)
self._input_layout = network.input_info[self._input_blob].input_data.shape
self._input_layout[0] = batch_size
network.reshape({self._input_blob: self._input_layout})
self._batch_size = batch_size
self._net = self._ie.load_network(network=network, num_requests=1,
device_name=self._device)
def infer(self, inputs):
assert len(inputs.shape) == 4, \
"Expected an input image in (N, H, W, C) format, got %s" % \
(inputs.shape, )
if inputs.shape[3] == 1: # A batch of single-channel images
inputs = np.repeat(inputs, 3, axis=3)
assert inputs.shape[3] == 3, \
"Expected BGR input, got %s" % (inputs.shape, )
n, c, h, w = self._input_layout
if inputs.shape[1:3] != (h, w):
resized_inputs = np.empty((n, h, w, c), dtype=inputs.dtype)
for inp, resized_input in zip(inputs, resized_inputs):
cv2.resize(inp, (w, h), resized_input)
inputs = resized_inputs
inputs = inputs.transpose((0, 3, 1, 2)) # NHWC to NCHW
inputs = {self._input_blob: inputs}
if self._require_image_info:
info = np.zeros([1, 3])
info[0, 0] = h
info[0, 1] = w
info[0, 2] = 1.0 # scale
inputs['image_info'] = info
results = self._net.infer(inputs)
if len(results) == 1:
return next(iter(results.values()))
else:
return results
def launch(self, inputs):
batch_size = len(inputs)
if self._batch_size < batch_size:
self._load_executable_net(batch_size)
outputs = self.infer(inputs)
results = self.process_outputs(inputs, outputs)
return results
def categories(self):
return self._interpreter.get_categories()
def process_outputs(self, inputs, outputs):
return self._interpreter.process_outputs(inputs, outputs)
|
{"hexsha": "7c64d6fa44995f7b6976125c0e7161c529ea5950", "size": 7048, "ext": "py", "lang": "Python", "max_stars_repo_path": "datumaro/plugins/openvino_plugin/launcher.py", "max_stars_repo_name": "einstonlabs/datumaro", "max_stars_repo_head_hexsha": "9eb5246febb4b4ae10c321fae80413bb87fb1a7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datumaro/plugins/openvino_plugin/launcher.py", "max_issues_repo_name": "einstonlabs/datumaro", "max_issues_repo_head_hexsha": "9eb5246febb4b4ae10c321fae80413bb87fb1a7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datumaro/plugins/openvino_plugin/launcher.py", "max_forks_repo_name": "einstonlabs/datumaro", "max_forks_repo_head_hexsha": "9eb5246febb4b4ae10c321fae80413bb87fb1a7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-05-17T07:00:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-26T02:15:10.000Z", "avg_line_length": 35.4170854271, "max_line_length": 83, "alphanum_fraction": 0.6232973893, "include": true, "reason": "import numpy", "num_tokens": 1538}
|
import numpy as np
MAX_ROUNDS = 18
RC = np.array([
0x01, 0x82, 0x8a, 0x00, 0x8b, 0x01, 0x81, 0x09, 0x8a, 0x88, 0x09, 0x0a,
0x8b, 0x8b, 0x89, 0x03, 0x02, 0x80
],
dtype=np.uint8)
RHO_OFFSETS = np.array([[0, 1, 6, 4, 3], [4, 4, 6, 7, 4], [3, 2, 3, 1, 7],
[1, 5, 7, 5, 0], [2, 2, 5, 0, 6]],
dtype=np.uint8)
def ROL8(states, offset):
'''
Elementwise 8 bit roll of given offset.
:param states: (N, 5, 5) Keccak states.
:param offset: left rolling offset.
:return: The updated states
'''
return (states << offset) ^ (states >> 8 - offset)
def theta(states: np.ndarray):
'''
Numpy implementation of keccak Theta fuction.
:param states: (N, 25) Keccak state.
:return: The updated states.
'''
states = states.reshape(states.shape[0], 5, 5)
C = np.bitwise_xor.reduce(states, axis=-2)
C1 = np.roll(C, shift=-1, axis=-1)
C4 = np.roll(C, shift=-4, axis=-1)
D = ROL8(C1, 1) ^ C4
D = np.repeat(D, 5, axis=-2).reshape(states.shape[0], 5, 5)
states = states ^ D
return states.reshape(states.shape[0], 25)
def rho(states: np.ndarray):
'''
Numpy implementation of keccak Rho fuction.
:param states: (N, 25) Keccak state.
:return: The updated states.
'''
states = states.reshape(states.shape[0], 5, 5)
states = ROL8(states, RHO_OFFSETS)
return states.reshape(states.shape[0], 25)
def pi(states: np.ndarray):
'''
Numpy implementation of keccak Pi fuction.
Uses a precomputed permutation matrix.
:param states: (N, 25) Keccak state.
:return: The updated states.
'''
p_mat = np.array([
0, 6, 12, 18, 24, 3, 9, 10, 16, 22, 1, 7, 13, 19, 20, 4, 5, 11, 17, 23,
2, 8, 14, 15, 21
],
dtype=np.uint8)
return states[:, p_mat]
def chi(states: np.ndarray):
'''
Numpy implementation of keccak Chi fuction.
:param states: (N, 25) Keccak state.
:return: The updated states.
'''
states = states.reshape(states.shape[0], 5, 5)
A1 = np.roll(states, -1, axis=-1)
A2 = np.roll(states, -2, axis=-1)
states = states ^ (np.invert(A1) & A2)
return states.reshape(states.shape[0], 25)
def iota(states: np.ndarray, round_index: int):
'''
Numpy implementation of keccak Iota fuction.
:param states: (N, 25) Keccak state.
:param round_index: the round index.
:return: The updated states.
'''
states[:, 0] ^= RC[round_index]
return states
def keccak_round(states: np.ndarray, round_index: int):
'''
Numpy implementation of one keccak round.
:param states: (N, 25) Keccak state.
:param round_index: the round index.
:return: The updated states.
'''
states = theta(states)
states = rho(states)
states = pi(states)
states = chi(states)
states = iota(states, round_index)
return states
def permutation(states: np.ndarray):
'''
Perform Keccak cryptographic permutation over
an numpy array.
:param states: (N, 25) Keccak state.
:return: The updated states.
- Example::
>>> import numpy as np
>>> from npcrypto.keccak import permutation
>>> states = np.random.randint(0, 256, size=(2, 25), dtype=np.uint8)
>>> states
array([[244, 237, 146, 136, 194, 119, 230, 20, 38, 153, 174, 61, 167,
242, 195, 179, 8, 8, 136, 17, 205, 246, 3, 170, 138],
[ 58, 11, 187, 245, 222, 188, 53, 201, 253, 243, 189, 249, 92,
101, 85, 40, 249, 90, 163, 52, 6, 12, 171, 222, 127]],
dtype=uint8)
>>> permutation(states)
array([[ 41, 65, 240, 43, 90, 191, 154, 77, 96, 226, 90, 29, 231,
175, 191, 227, 209, 75, 126, 230, 237, 185, 198, 91, 166],
[ 63, 140, 202, 213, 82, 102, 207, 20, 201, 81, 243, 22, 107,
233, 116, 81, 64, 106, 110, 44, 177, 10, 56, 49, 220]],
dtype=uint8)
'''
for i in range(MAX_ROUNDS):
states = keccak_round(states, i)
return states
|
{"hexsha": "3e1cd198f3d286dcba86c0e9c341a540b9de83b8", "size": 4147, "ext": "py", "lang": "Python", "max_stars_repo_path": "npcrypto/keccak.py", "max_stars_repo_name": "timoi-Lucypher/npCrypto", "max_stars_repo_head_hexsha": "10156482d70503fa01880421aba4a4a3d171bd98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "npcrypto/keccak.py", "max_issues_repo_name": "timoi-Lucypher/npCrypto", "max_issues_repo_head_hexsha": "10156482d70503fa01880421aba4a4a3d171bd98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "npcrypto/keccak.py", "max_forks_repo_name": "timoi-Lucypher/npCrypto", "max_forks_repo_head_hexsha": "10156482d70503fa01880421aba4a4a3d171bd98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1045751634, "max_line_length": 80, "alphanum_fraction": 0.5606462503, "include": true, "reason": "import numpy", "num_tokens": 1473}
|
# Measures of anisotropy
"""
Au(C) -> au
Return the Universal Elastic Anisotropy Index, `au`, of the tensor `C`.
See: Ranganathan & Ostoja-Starzewksi, Universal elastic anisotropy index,
Phys Rev Lett (2008) vol. 101 (5) pp. 055504
"""
function Au(C)
Kv, Gv, Kr, Gr = VoigtK(C), VoigtG(C), ReussK(C), ReussG(C)
5*(Gv/Gr) + Kv/Kr - 6
end
|
{"hexsha": "df207394fa98547d7a9c7406ad0f5106ef19458a", "size": 357, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/measures.jl", "max_stars_repo_name": "anowacki/CIJ.jl", "max_stars_repo_head_hexsha": "f6e7b0fac22048f9c20652c6d36c3e4d48d31d39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-21T14:43:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T11:31:34.000Z", "max_issues_repo_path": "src/measures.jl", "max_issues_repo_name": "anowacki/CIJ.jl", "max_issues_repo_head_hexsha": "f6e7b0fac22048f9c20652c6d36c3e4d48d31d39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/measures.jl", "max_forks_repo_name": "anowacki/CIJ.jl", "max_forks_repo_head_hexsha": "f6e7b0fac22048f9c20652c6d36c3e4d48d31d39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:44:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:44:11.000Z", "avg_line_length": 23.8, "max_line_length": 73, "alphanum_fraction": 0.6442577031, "num_tokens": 132}
|
/- Author: E.W.Ayers
This should be in mathlib. Some simp and extensionality lemmas for comma and over. -/
import category_theory.comma
namespace category_theory
section
universes v₁ v₂ v₃ u₁ u₂ u₃ -- declare the `v`'s first; see `category_theory.category` for an explanation
variables {A : Type u₁} [𝒜 : category.{v₁} A]
variables {B : Type u₂} [ℬ : category.{v₂} B]
variables {T : Type u₃} [𝒯 : category.{v₃} T]
include 𝒜 ℬ 𝒯
variables {L : A ⥤ T} {R : B ⥤ T}
lemma comma.ext : Π {l₁ l₂ : comma L R} (pl : l₁.left = l₂.left) (pr : l₁.right = l₂.right) (pf : l₁.hom == l₂.hom), l₁ = l₂ :=
begin
rintros ⟨_,_,_⟩ ⟨_,_,_⟩ pl pr pf, cases pl, cases pr, cases pf, refl,
end
end
section
open over
universes u v
variables {C : Type u} [𝒞 : category.{v} C] {X : C}
include 𝒞
@[ext] lemma over.ext : Π {o₁ o₂ : over X} (px : o₁.left = o₂.left) (p : o₁.hom == o₂.hom), o₁ = o₂ :=
begin
intros _ _ _ _,
apply comma.ext,
assumption,
rw over.over_right, rw over.over_right,
assumption
end
@[simp] lemma over.mk_hom_id {f : over X} : over.mk(f.hom) = f :=
begin ext, refl, refl, end
end
end category_theory
|
{"author": "Or7ando", "repo": "lean", "sha": "d41169cf4e416a0d42092fb6bdc14131cee9dd15", "save_path": "github-repos/lean/Or7ando-lean", "path": "github-repos/lean/Or7ando-lean/lean-d41169cf4e416a0d42092fb6bdc14131cee9dd15/.github/workflows/geo/src/comma.lean"}
|
# ---
# title: 424. Longest Repeating Character Replacement
# id: problem424
# author: Tian Jun
# date: 2020-10-31
# difficulty: Medium
# categories: Two Pointers, Sliding Window
# link: <https://leetcode.com/problems/longest-repeating-character-replacement/description/>
# hidden: true
# ---
#
# Given a string `s` that consists of only uppercase English letters, you can
# perform at most `k` operations on that string.
#
# In one operation, you can choose **any** character of the string and change it
# to any other uppercase English character.
#
# Find the length of the longest sub-string containing all repeating letters you
# can get after performing the above operations.
#
# **Note:**
# Both the string's length and _k_ will not exceed 104.
#
# **Example 1:**
#
#
#
# Input:
# s = "ABAB", k = 2
#
# Output:
# 4
#
# Explanation:
# Replace the two 'A's with two 'B's or vice versa.
#
#
#
#
# **Example 2:**
#
#
#
# Input:
# s = "AABABBA", k = 1
#
# Output:
# 4
#
# Explanation:
# Replace the one 'A' in the middle with 'B' and form "AABBBBA".
# The substring "BBBB" has the longest repeating letters, which is 4.
#
#
#
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
|
{"hexsha": "d9346880f583cbb5f28721635285b92f721d2eb2", "size": 1322, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/unresolved/424.longest-repeating-character-replacement.jl", "max_stars_repo_name": "noob-data-analaysis/LeetCode.jl", "max_stars_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/unresolved/424.longest-repeating-character-replacement.jl", "max_issues_repo_name": "noob-data-analaysis/LeetCode.jl", "max_issues_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/unresolved/424.longest-repeating-character-replacement.jl", "max_forks_repo_name": "noob-data-analaysis/LeetCode.jl", "max_forks_repo_head_hexsha": "94d91b295e988948e77e737c10d2f0e3ecb7c2b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.9841269841, "max_line_length": 92, "alphanum_fraction": 0.6180030257, "num_tokens": 387}
|
[STATEMENT]
lemma sign_r_pos_sgnx_iff:
"sign_r_pos p a \<longleftrightarrow> sgnx (poly p) (at_right a) > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sign_r_pos p a = (0 < sgnx (poly p) (at_right a))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
2. 0 < sgnx (poly p) (at_right a) \<Longrightarrow> sign_r_pos p a
[PROOF STEP]
assume asm:"0 < sgnx (poly p) (at_right a)"
[PROOF STATE]
proof (state)
this:
0 < sgnx (poly p) (at_right a)
goal (2 subgoals):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
2. 0 < sgnx (poly p) (at_right a) \<Longrightarrow> sign_r_pos p a
[PROOF STEP]
obtain c where c_def:"(poly p has_sgnx c) (at_right a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>c. (poly p has_sgnx c) (at_right a) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using sgnx_able_poly(1) sgnx_able_sgnx
[PROOF STATE]
proof (prove)
using this:
poly ?p sgnx_able at_right ?a
?f sgnx_able ?F \<Longrightarrow> (?f has_sgnx sgnx ?f ?F) ?F
goal (1 subgoal):
1. (\<And>c. (poly p has_sgnx c) (at_right a) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(poly p has_sgnx c) (at_right a)
goal (2 subgoals):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
2. 0 < sgnx (poly p) (at_right a) \<Longrightarrow> sign_r_pos p a
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(poly p has_sgnx c) (at_right a)
[PROOF STEP]
have "c>0"
[PROOF STATE]
proof (prove)
using this:
(poly p has_sgnx c) (at_right a)
goal (1 subgoal):
1. 0 < c
[PROOF STEP]
using asm
[PROOF STATE]
proof (prove)
using this:
(poly p has_sgnx c) (at_right a)
0 < sgnx (poly p) (at_right a)
goal (1 subgoal):
1. 0 < c
[PROOF STEP]
using has_sgnx_imp_sgnx trivial_limit_at_right_real
[PROOF STATE]
proof (prove)
using this:
(poly p has_sgnx c) (at_right a)
0 < sgnx (poly p) (at_right a)
\<lbrakk>(?f has_sgnx ?c) ?F; ?F \<noteq> bot\<rbrakk> \<Longrightarrow> sgnx ?f ?F = ?c
at_right ?x \<noteq> bot
goal (1 subgoal):
1. 0 < c
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
0 < c
goal (2 subgoals):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
2. 0 < sgnx (poly p) (at_right a) \<Longrightarrow> sign_r_pos p a
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < c
[PROOF STEP]
show "sign_r_pos p a"
[PROOF STATE]
proof (prove)
using this:
0 < c
goal (1 subgoal):
1. sign_r_pos p a
[PROOF STEP]
using c_def
[PROOF STATE]
proof (prove)
using this:
0 < c
(poly p has_sgnx c) (at_right a)
goal (1 subgoal):
1. sign_r_pos p a
[PROOF STEP]
unfolding sign_r_pos_def has_sgnx_def
[PROOF STATE]
proof (prove)
using this:
0 < c
\<forall>\<^sub>F x in at_right a. sgn (poly p x) = c
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_right a. 0 < poly p x
[PROOF STEP]
apply (elim eventually_mono)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>0 < c; sgn (poly p x) = c\<rbrakk> \<Longrightarrow> 0 < poly p x
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
sign_r_pos p a
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
assume asm:"sign_r_pos p a"
[PROOF STATE]
proof (state)
this:
sign_r_pos p a
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
define c where "c = sgnx (poly p) (at_right a)"
[PROOF STATE]
proof (state)
this:
c = sgnx (poly p) (at_right a)
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
c = sgnx (poly p) (at_right a)
[PROOF STEP]
have "(poly p has_sgnx c) (at_right a)"
[PROOF STATE]
proof (prove)
using this:
c = sgnx (poly p) (at_right a)
goal (1 subgoal):
1. (poly p has_sgnx c) (at_right a)
[PROOF STEP]
by (simp add: sgnx_able_sgnx)
[PROOF STATE]
proof (state)
this:
(poly p has_sgnx c) (at_right a)
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(poly p has_sgnx c) (at_right a)
[PROOF STEP]
have "(\<forall>\<^sub>F x in (at_right a). poly p x>0 \<and> sgn (poly p x) = c)"
[PROOF STATE]
proof (prove)
using this:
(poly p has_sgnx c) (at_right a)
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
[PROOF STEP]
using asm
[PROOF STATE]
proof (prove)
using this:
(poly p has_sgnx c) (at_right a)
sign_r_pos p a
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
[PROOF STEP]
unfolding has_sgnx_def sign_r_pos_def
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in at_right a. sgn (poly p x) = c
\<forall>\<^sub>F x in at_right a. 0 < poly p x
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
[PROOF STEP]
by (simp add:eventually_conj_iff)
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
[PROOF STEP]
have "\<forall>\<^sub>F x in (at_right a). c > 0"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in at_right a. 0 < poly p x \<and> sgn (poly p x) = c
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_right a. 0 < c
[PROOF STEP]
apply (elim eventually_mono)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. 0 < poly p x \<and> sgn (poly p x) = c \<Longrightarrow> 0 < c
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in at_right a. 0 < c
goal (1 subgoal):
1. sign_r_pos p a \<Longrightarrow> 0 < sgnx (poly p) (at_right a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in at_right a. 0 < c
[PROOF STEP]
show "c>0"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in at_right a. 0 < c
goal (1 subgoal):
1. 0 < c
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < c
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2947, "file": "Winding_Number_Eval_Cauchy_Index_Theorem", "length": 35}
|
#!/usr/bin/env python
"""
Copyright 2019 Daryl Gohl
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from scipy.interpolate import interp1d
import numpy as np
import pysam
import os
import argparse
__version__ = "1.2"
def get_args(x):
x.add_argument("-i", "--input_file",
type = str,
default = None,
metavar = '',
help="Input path for sam file [required].")
x.add_argument("-r", "--reference_length",
type = int,
default = None,
metavar = '',
help="Input reference length [required].")
x.add_argument("-m", "--min_length",
type = int,
default = 100,
metavar = '',
help="Minimum mapped read length to plot (default: 100)")
x.add_argument("-o", "--output_dir",
type = str,
default = '',
metavar = '',
help = "Output directory for plot (default: same folder as input file)")
x.add_argument("-l", "--line_spacing",
type = float,
default = 0.02,
metavar = '',
help = "Radial spacing of each read on plot (default 0.2)")
x.add_argument("-w", "--line_width",
type = float,
default = 0.75,
metavar = '',
help = "Line width of each read on plot (default 0.75)")
x.add_argument("-c", "--circle_size",
type = float,
default = 0.45,
metavar = '',
help = "Size of central circle (default 0.45)")
x.add_argument("-s", "--fig_size",
type = float,
default = 10,
metavar = '',
help = "Size of figure (default 10)")
x.add_argument("-x", "--clip",
type = str,
default = False,
metavar = '',
help = "Plot clipped portion of reads (default False)")
x.add_argument("-f", "--figure_format",
type = str,
default = "pdf",
metavar = '',
help = "Format of saved figure, supported formats: eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff.")
#####################
args = x.parse_args()
return args
# Parses command line arguments
argparser = argparse.ArgumentParser(description = "Map and plot reads against a circular reference (v" + __version__ +")\n" + \
"by Daryl Gohl\n" + \
"This program takes in SAM files of sequencing reads mapped to a concatenated reference sequence (two copies of the reference sequence repeated in tandem) and the reference sequence length (length of the original presumed circular reference sequence) and outputs a plot of the mapped reads against a circularized reference.",
add_help = True,
epilog ='')
args = get_args(argparser)
args = vars(args)
# possible input args
filename = args['input_file']
fname = os.path.split(filename)[1]
ref_length = args['reference_length']
incrementor = args['line_spacing']
width = args['line_width']
out_folder = args['output_dir']
Min_length = int(args['min_length'])
Fig_size = args['fig_size']
start_coord = args['circle_size']
Fig_format = args['figure_format']
Include_clipped = args['clip']
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
Plot_clipped = str2bool(Include_clipped)
start_coord_orig = start_coord
if out_folder == '':
out_dir = os.path.dirname(filename)
else:
out_dir = out_folder
#Set output file name
file = fname[:-4] + "." + Fig_format
fig_name = os.path.join(out_dir,file)
#SAM File
samfile = pysam.AlignmentFile(filename, "r")
#reads
reference_start = []
reference_end = []
clipped_start = []
clipped_end = []
clipped_start_len = []
clipped_end_len = []
for r in samfile.fetch(until_eof=True):
x = r.query_alignment_start #position of start of alignment in read
y = r.query_alignment_end #position of end of alignment in read
z = r.query_alignment_length #length of alignment in read
q = r.infer_read_length() #length of read including soft-clipped bases
x1 = r.reference_start #position of start of alignment in reference
y1 = r.reference_end #position of end of alignment in reference
z1 = r.reference_length #length alignment to reference
qs0 = x1-x #start position of clipped bases relative to the reference upstream
qs1 = x #end position of clipped bases relative to the reference upstream
cs_len = x #length of sequence clipped off the upstream end
ce_len = q-y #length of sequence clipped off the downstream end
qe0 = y #start position of clipped bases relative to the reference downstream
qe1 = y1+(q-y) #end position of clipped bases relative to the reference downstream
if z1 > Min_length:
reference_start.append(x1)
reference_end.append(y1)
clipped_start.append(qs0) #clipped sequence start relative to reference
clipped_end.append(qe1) #clipped sequence end relative to reference
clipped_start_len.append(cs_len) #length of upstream clipped sequence
clipped_end_len.append(ce_len) #length of downstream clipped sequence
#print str(r.infer_read_length())
#pr = "clip start,end = " + str(qs0) + "," + str(qe1) + " mapped start, end = " + str(x1) + "," + str(y1)
#print pr
###MAPPED READS
#Dedup reference
break_span = []
ref_start_collapsed = []
ref_end_collapsed = []
for i, item in enumerate(reference_start):
if item < ref_length and reference_end[i] > ref_length:
break_span.append(True)
else:
break_span.append(False)
if item > ref_length:
ref_start_collapsed.append(item-ref_length)
else:
ref_start_collapsed.append(item)
if reference_end[i] > ref_length:
ref_end_collapsed.append(reference_end[i]-ref_length)
else:
ref_end_collapsed.append(reference_end[i])
#Convert to polar coordinated (0-360)
deg_min = 0
deg_max = 360
deg_per_base = 360.0/ref_length
deg_start = []
deg_end = []
for i, item in enumerate(ref_start_collapsed):
deg_end.append(360-item*deg_per_base) #Note: Subtracting from 360 makes degrees 0 to 360 go in more intuitive clockwise orientation rather than default counterclockwise.
deg_start.append(360-ref_end_collapsed[i]*deg_per_base) #Note: Subtracting from 360 makes degrees 0 to 360 go in more intuitive clockwise orientation rather than default counterclockwise.
###FULL READS (with clipped relative to reference)
#First, clean up <0 or >ref_length cases
#If <0, add ref_length
#If >ref_length, subtract ref_length
#This will turn these cases into break spanning reads which will be handled normally below
clipped_start_clean = []
clipped_end_clean = []
mod = []
for i, item in enumerate(clipped_start):
if item < 0:
clipped_start_clean.append(item + ref_length)
clipped_end_clean.append(clipped_end[i] + ref_length)
mod.append(True)
elif clipped_end[i] > 2*ref_length:
clipped_start_clean.append(item - ref_length)
clipped_end_clean.append(clipped_end[i] - ref_length)
mod.append(False)
else:
clipped_start_clean.append(item)
clipped_end_clean.append(clipped_end[i])
mod.append(False)
clipped_break_span = []
clipped_start_collapsed = []
clipped_end_collapsed = []
for i, item in enumerate(clipped_start_clean):
if item < ref_length and clipped_end_clean[i] > ref_length:
clipped_break_span.append(True)
else:
clipped_break_span.append(False)
if item > ref_length:
clipped_start_collapsed.append(item-ref_length)
else:
clipped_start_collapsed.append(item)
if clipped_end[i] > ref_length:
clipped_end_collapsed.append(clipped_end_clean[i]-ref_length)
else:
clipped_end_collapsed.append(clipped_end_clean[i])
#Convert to polar coordinated (0-360)
deg_min = 0
deg_max = 360
deg_per_base = 360.0/ref_length
clipped_deg_start = []
clipped_deg_end = []
for i, item in enumerate(clipped_start_collapsed):
clipped_deg_end.append(360-item*deg_per_base)
clipped_deg_start.append(360-clipped_end_collapsed[i]*deg_per_base)
#Plotting data
with plt.style.context("seaborn-white"):
fig = plt.figure(figsize=(Fig_size,Fig_size))
#my_dpi = 300
#fig = plt.figure(figsize=(2400/my_dpi, 2400/my_dpi), dpi=my_dpi)
ax = fig.add_subplot(111, projection="polar")
ax.grid(False)
ax.set_rticks([])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_theta_zero_location('N')
ax.set_facecolor('white')
ax.axis('off')
# Connect two points with a curve
for curve in [[[0, 360], [(start_coord-2*incrementor), (start_coord-2*incrementor)]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, linewidth = 5)
for curve in [[[0, 0], [0.0, 0.0]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y)
#Draw clipped lines
if Plot_clipped != False:
for i, item in enumerate(clipped_break_span):
if mod[i] == True:
item = break_span[i]
if item == False:
for curve in [[[clipped_deg_start[i], clipped_deg_end[i]], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='red', linewidth = width)
else:
for curve in [[[360, clipped_deg_start[i]], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='red', linewidth = width)
for curve in [[[clipped_deg_end[i], 0], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='red', linewidth = width)
start_coord = start_coord + incrementor
#Mapped reads
start_coord = start_coord_orig
for i, item in enumerate(break_span):
if item == False:
for curve in [[[deg_start[i], deg_end[i]], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='grey', linewidth = width)
else:
for curve in [[[360, deg_start[i]], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='grey', linewidth = width)
for curve in [[[deg_end[i], 0], [start_coord, start_coord]]]:
curve[0] = np.deg2rad(curve[0])
x = np.linspace( curve[0][0], curve[0][1], 500)
y = interp1d( curve[0], curve[1])( x)
ax.plot(x, y, color='grey', linewidth = width)
start_coord = start_coord + incrementor
plt.savefig(fig_name,bbox_inches='tight')
|
{"hexsha": "5444a7d4b8bc49f0495a378e72a0db33d045dfd6", "size": 13108, "ext": "py", "lang": "Python", "max_stars_repo_path": "ConcatMap_v1.2.py", "max_stars_repo_name": "darylgohl/ConcatMap", "max_stars_repo_head_hexsha": "b00377f772c39f794606d4d1b4455fe5e0437552", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ConcatMap_v1.2.py", "max_issues_repo_name": "darylgohl/ConcatMap", "max_issues_repo_head_hexsha": "b00377f772c39f794606d4d1b4455fe5e0437552", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ConcatMap_v1.2.py", "max_forks_repo_name": "darylgohl/ConcatMap", "max_forks_repo_head_hexsha": "b00377f772c39f794606d4d1b4455fe5e0437552", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0128205128, "max_line_length": 460, "alphanum_fraction": 0.6200793409, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3143}
|
import glob
import json
import logging
import matplotlib.patheffects as path_effects
import numpy as np
import os
import pandas as pd
import re
import matplotlib as mpl
mpl.use('Agg')
from os.path import basename
from matplotlib import pyplot as plt
from shutil import copyfile
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
INPUT_PATH = 'output'
OUTPUT_PATH = os.path.join('output', 'images')
REPORT_PATH = os.path.join('output', 'report')
if not os.path.exists(REPORT_PATH):
os.makedirs(REPORT_PATH)
TO_PROCESS = {
'PI': {
'path': 'PI',
'file_regex': re.compile('(.*)_grid\.csv')
},
'VI': {
'path': 'VI',
'file_regex': re.compile('(.*)_grid\.csv')
},
'QL': {
'path': 'QL',
'file_regex': re.compile('(.*)_grid\.csv')
}
}
the_best = {}
WATERMARK = False
GATECH_USERNAME = 'DO NOT STEAL'
TERM = 'Spring 2019'
def watermark(p):
if not WATERMARK:
return p
ax = plt.gca()
for i in range(1, 11):
p.text(0.95, 0.95 - (i * (1.0/10)), '{} {}'.format(GATECH_USERNAME, TERM), transform=ax.transAxes,
fontsize=32, color='gray',
ha='right', va='bottom', alpha=0.2)
return p
def plot_episode_stats(title_base, stats, smoothing_window=50):
# Trim the DF down based on the episode lengths
stats = stats[stats['length'] > 0]
# Plot the episode length over time, both as a line and histogram
fig1 = plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.grid()
plt.tight_layout()
plt.plot(stats['length'])
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
plt.subplot(122)
plt.hist(stats['length'], zorder=3)
plt.grid(zorder=0)
plt.xlabel("Episode Length")
plt.ylabel("Count")
plt.title(title_base.format("Episode Length (Histogram)"))
fig1 = watermark(fig1)
plt.tight_layout()
# Plot the episode reward over time
fig2 = plt.figure(figsize=(10, 5))
rewards_smoothed = pd.Series(stats['reward']).rolling(
smoothing_window, min_periods=smoothing_window
).mean()
plt.subplot(121)
plt.grid()
plt.tight_layout()
plt.plot(rewards_smoothed)
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
plt.title("Episode Reward over Time ({})".format(smoothing_window))
plt.subplot(122)
plt.hist(stats['reward'], zorder=3)
plt.grid(zorder=0)
plt.xlabel("Episode Reward")
plt.ylabel("Count")
plt.title(title_base.format("Episode Reward (Histogram)"))
fig2 = watermark(fig2)
plt.tight_layout()
# Plot time steps and episode number
fig3 = plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.grid()
plt.tight_layout()
time_steps = np.cumsum(stats['time'])
plt.plot(time_steps, np.arange(len(stats['time'])))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
plt.subplot(122)
plt.hist(time_steps, zorder=3)
plt.grid(zorder=0)
plt.xlabel("Time Step")
plt.ylabel("Count")
plt.title(title_base.format("Episode Time (Histogram)"))
fig3 = watermark(fig3)
plt.tight_layout()
return fig1, fig2, fig3
def plot_policy_map(title, policy, map_desc, color_map, direction_map):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0, policy.shape[1]), ylim=(0, policy.shape[0]))
font_size = 'x-large'
if policy.shape[1] > 16:
font_size = 'small'
plt.title(title)
for i in range(policy.shape[0]):
for j in range(policy.shape[1]):
y = policy.shape[0] - i - 1
x = j
p = plt.Rectangle([x, y], 1, 1, edgecolor='k', linewidth=0.1)
p.set_facecolor(color_map[map_desc[i, j]])
ax.add_patch(p)
if map_desc[i, j] in b'CHG':
continue
text = ax.text(x+0.5, y+0.5, direction_map[policy[i, j]], weight='bold', size=font_size,
horizontalalignment='center', verticalalignment='center', color='w')
text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='black'),
path_effects.Normal()])
plt.axis('off')
plt.xlim((0, policy.shape[1]))
plt.ylim((0, policy.shape[0]))
plt.tight_layout()
return watermark(plt)
def plot_value_map(title, v, map_desc, color_map):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0, v.shape[1]), ylim=(0, v.shape[0]))
font_size = 'x-large'
if v.shape[1] > 16:
font_size = 'small'
v_min = np.min(v)
v_max = np.max(v)
bins = np.linspace(v_min, v_max, 100)
v_red = np.digitize(v, bins)/100.0
for i in range(v.shape[0]):
for j in range(v.shape[1]):
value = np.round(v[i, j], 1)
if len(str(value)) > 3:
font_size = 'small'
plt.title(title)
for i in range(v.shape[0]):
for j in range(v.shape[1]):
y = v.shape[0] - i - 1
x = j
p = plt.Rectangle([x, y], 1, 1, edgecolor='k', linewidth=0.1)
p.set_facecolor(color_map[map_desc[i, j]])
ax.add_patch(p)
value = np.round(v[i, j], 1)
red = v_red[i, j]
if map_desc[i, j] in b'HG':
continue
text2 = ax.text(x+0.5, y+0.5, value, size=font_size,
horizontalalignment='center', verticalalignment='center', color=(1.0, 1.0-red, 1.0-red))
text2.set_path_effects([path_effects.Stroke(linewidth=1, foreground='black'),
path_effects.Normal()])
plt.axis('off')
plt.xlim((0, v.shape[1]))
plt.ylim((0, v.shape[0]))
plt.tight_layout()
return watermark(plt)
def plot_time_vs_steps(title, df, xlabel="Steps", ylabel="Time (s)"):
plt.close()
plt.figure()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
plt.plot(df.index.values, df['time'], '-', linewidth=1)
plt.legend(loc="best")
plt.tight_layout()
return watermark(plt)
def plot_reward_and_delta_vs_steps(title, df, xlabel="Steps", ylabel="Reward"):
plt.close()
plt.figure()
f, (ax) = plt.subplots(1, 1)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
lns1 = ax.plot(df.index.values, df['reward'], color='green', linewidth=1, label=ylabel)
ex_ax = ax.twinx()
lns2 = ex_ax.plot(df.index.values, df['delta'], color='blue', linewidth=1, label='Delta')
ex_ax.set_ylabel('Delta')
ex_ax.tick_params('y')
ax.grid()
ax.axis('tight')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
f.tight_layout()
return watermark(plt)
# Adapted from http://code.activestate.com/recipes/578293-unicode-command-line-histograms/
def cli_hist(data, bins=10):
bars = u' ▁▂▃▄▅▆▇█'
n, bin_edges = np.histogram(data, bins=bins)
n2 = map(int, np.floor(n*(len(bars)-1)/(max(n))))
res = u' '.join(bars[i] for i in n2)
return res
# Adapted from https://gist.github.com/joezuntz/2f3bdc2ab0ea59229907
def ascii_hist(data, bins=10):
N, X = np.histogram(data, bins=bins)
total = 1.0 * len(data)
width = 50
nmax = N.max()
lines = []
for (xi, n) in zip(X, N):
bar = '#' * int(n * 1.0 * width / nmax)
xi = '{0: <8.4g}'.format(xi).ljust(10)
lines.append('{0}| {1}'.format(xi, bar))
return lines
def fetch_mdp_name(file, regexp):
search_result = regexp.search(basename(file))
if search_result is None:
return False, False
mdp_name = search_result.groups()[0]
return mdp_name, ' '.join(map(lambda x: x.capitalize(), mdp_name.split('_')))
def process_params(problem_name, params):
param_str = '{}'.format(params['discount_factor'])
if problem_name == 'QL':
param_str = '{}_{}_{}_{}_{}'.format(params['alpha'], params['q_init'], params['epsilon'],
params['epsilon_decay'], params['discount_factor'])
return param_str
def find_optimal_params(problem_name, base_dir, file_regex):
grid_files = glob.glob(os.path.join(base_dir, '*_grid*.csv'))
logger.info("Grid files {}".format(grid_files))
best_params = {}
for f in grid_files:
mdp, readable_mdp = fetch_mdp_name(f, file_regex)
logger.info("MDP: {}, Readable MDP: {}".format(mdp, readable_mdp))
df = pd.read_csv(f)
best = df.copy()
# Attempt to find the best params. First look at the reward mean, then median, then max. If at any point we
# have more than one result as "best", try the next criterion
for criterion in ['reward_median', 'reward_max', 'reward_mean']:
best_value = np.max(best[criterion])
best = best[best[criterion] == best_value]
if best.shape[0] == 1:
break
# If we have more than one best, take the highest index.
if best.shape[0] > 1:
best = best.iloc[-1:]
params = best.iloc[-1]['params']
params = json.loads(params)
best_index = best.iloc[-1].name
best_params[mdp] = {
'name': mdp,
'readable_name': readable_mdp,
'index': best_index,
'params': params,
'param_str': process_params(problem_name, params)
}
return best_params
def find_policy_images(base_dir, params):
policy_images = {}
for mdp in params:
mdp_params = params[mdp]
fileStart = os.path.join(base_dir, '{}_{}'.format(mdp_params['name'], mdp_params['param_str']))
image_files = glob.glob(fileStart + '*.png')
if len(image_files) == 2:
policy_file = None
value_file = None
for image_file in image_files:
if 'Value' in image_file:
value_file = image_file
else:
policy_file = image_file
logger.info("Value file {}, Policy File: {}".format(value_file, policy_file))
policy_images[mdp] = {
'value': value_file,
'policy': policy_file
}
else:
logger.error("Unable to find image file for {} with params {}".format(mdp, mdp_params))
return policy_images
def find_data_files(base_dir, params):
data_files = {}
for mdp in params:
mdp_params = params[mdp]
files = glob.glob(os.path.join(base_dir, '{}_{}.csv'.format(mdp_params['name'], mdp_params['param_str'])))
optimal_files = glob.glob(
os.path.join(base_dir, '{}_{}_optimal.csv'.format(mdp_params['name'], mdp_params['param_str'])))
episode_files = glob.glob(
os.path.join(base_dir, '{}_{}_episode.csv'.format(mdp_params['name'], mdp_params['param_str'])))
logger.info("files {}".format(files))
logger.info("optimal_files {}".format(optimal_files))
logger.info("episode_files {}".format(episode_files))
data_files[mdp] = {
'file': files[0],
'optimal_file': optimal_files[0]
}
if len(episode_files) > 0:
data_files[mdp]['episode_file'] = episode_files[0]
return data_files
def copy_best_images(best_images, base_dir):
for problem_name in best_images:
for mdp in best_images[problem_name]:
mdp_files = best_images[problem_name][mdp]
dest_dir = os.path.join(base_dir, problem_name)
policy_image = mdp_files['policy']
value_image = mdp_files['value']
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
policy_dest = os.path.join(dest_dir, basename(policy_image))
value_dest = os.path.join(dest_dir, basename(value_image))
logger.info("Copying {} to {}".format(policy_image, policy_dest))
logger.info("Copying {} to {}".format(value_image, value_dest))
copyfile(policy_image, policy_dest)
copyfile(value_image, value_dest)
def copy_data_files(data_files, base_dir):
for problem_name in data_files:
for mdp in data_files[problem_name]:
mdp_files = data_files[problem_name][mdp]
dest_dir = os.path.join(base_dir, problem_name)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for file_type in mdp_files:
file_name = mdp_files[file_type]
file_dest = os.path.join(dest_dir, basename(file_name))
logger.info("Copying {} file from {} to {}".format(file_type, file_name, file_dest))
copyfile(file_name, file_dest)
def plot_data(data_files, envs, base_dir):
for problem_name in data_files:
for mdp in data_files[problem_name]:
env = lookup_env_from_mdp(envs, mdp)
if env is None:
logger.error("Unable to find env for MDP {}".format(mdp))
return
mdp_files = data_files[problem_name][mdp]
step_term = 'Steps'
if problem_name == 'QL':
step_term = 'Episodes'
df = pd.read_csv(mdp_files['file'])
title = '{}: {} - Time vs {}'.format(env['readable_name'],
problem_name_to_descriptive_name(problem_name), step_term)
file_name = os.path.join(os.path.join(base_dir, problem_name), '{}_time.png'.format(mdp))
p = plot_time_vs_steps(title, df, xlabel=step_term)
p = watermark(p)
p.savefig(file_name, format='png', dpi=150)
p.close()
reward_term = 'Reward'
if problem_name in ['VI', 'PI']:
reward_term = 'Value'
title = '{}: {} - {} and Delta vs {}'.format(env['readable_name'],
problem_name_to_descriptive_name(problem_name),
reward_term, step_term)
file_name = os.path.join(os.path.join(base_dir, problem_name), '{}_reward_delta.png'.format(mdp))
p = plot_reward_and_delta_vs_steps(title, df, ylabel=reward_term, xlabel=step_term)
p = watermark(p)
p.savefig(file_name, format='png', dpi=150)
p.close()
if problem_name == 'QL' and 'episode_file' in mdp_files:
title = '{}: {} - {}'.format(env['readable_name'], problem_name_to_descriptive_name(problem_name),
'{}')
episode_df = pd.read_csv(mdp_files['episode_file'])
q_length, q_reward, q_time = plot_episode_stats(title, episode_df)
file_base = os.path.join(os.path.join(base_dir, problem_name), '{}_{}.png'.format(mdp, '{}'))
logger.info("Plotting episode stats with file base {}".format(file_base))
q_length.savefig(file_base.format('episode_length'), format='png', dpi=150)
q_reward.savefig(file_base.format('episode_reward'), format='png', dpi=150)
q_time.savefig(file_base.format('episode_time'), format='png', dpi=150)
plt.close()
def lookup_env_from_mdp(envs, mdp):
for env in envs:
if env['name'] == mdp:
return env
return None
def problem_name_to_descriptive_name(problem_name):
if problem_name == 'VI':
return 'Value Iteration'
if problem_name == 'PI':
return 'Policy Iteration'
if problem_name == 'QL':
return "Q-Learner"
return 'Unknown'
def plot_results(envs):
best_params = {}
best_images = {}
data_files = {}
for problem_name in TO_PROCESS:
logger.info("Processing {}".format(problem_name))
problem = TO_PROCESS[problem_name]
problem_path = os.path.join(INPUT_PATH, problem['path'])
problem_image_path = os.path.join(os.path.join(INPUT_PATH, 'images'), problem['path'])
best_params[problem_name] = find_optimal_params(problem_name, problem_path, problem['file_regex'])
best_images[problem_name] = find_policy_images(problem_image_path, best_params[problem_name])
data_files[problem_name] = find_data_files(problem_path, best_params[problem_name])
copy_best_images(best_images, REPORT_PATH)
copy_data_files(data_files, REPORT_PATH)
plot_data(data_files, envs, REPORT_PATH)
params_df = pd.DataFrame(best_params)
params_df.to_csv(os.path.join(REPORT_PATH, 'params.csv'))
|
{"hexsha": "71fc4001d1fa6b1c569bc8963806797374516cdf", "size": 16747, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment4/experiments/plotting.py", "max_stars_repo_name": "manishmalik/CS-7641-assignments", "max_stars_repo_head_hexsha": "f8a7de0aac0e53931ef7c364b9a752dbb4664d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment4/experiments/plotting.py", "max_issues_repo_name": "manishmalik/CS-7641-assignments", "max_issues_repo_head_hexsha": "f8a7de0aac0e53931ef7c364b9a752dbb4664d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment4/experiments/plotting.py", "max_forks_repo_name": "manishmalik/CS-7641-assignments", "max_forks_repo_head_hexsha": "f8a7de0aac0e53931ef7c364b9a752dbb4664d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6452241715, "max_line_length": 116, "alphanum_fraction": 0.5955693557, "include": true, "reason": "import numpy", "num_tokens": 4131}
|
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# --------
# overhead
# --------
rootdir = 'my/path/somewhere/'
subs = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
ROI_list = ['ROI1', 'ROI2', 'ROI3', 'ROI4']
condition_list = ['pre', 'post']
hemi_list = ["L", "R"]
# --------
# read data into DataFrame
# --------
df = pd.DataFrame(columns=["subj", "ROI", "hemi", "condition", "my_value"])
my_row = 0
for sub in subs:
# location of subject's derived data according to BIDS format
OD = os.path.join('rootdir', 'derivatives', 'sub-' + sub)
for ind_r, ROI in enumerate(ROI_list):
for hemi in hemi_list:
for ind_c, cond in enumerate(condition_list):
# generate random value here as example
my_val = np.random.uniform(0, 10) + ind_r + ind_c
df.loc[my_row] = [sub, ROI, hemi, cond, my_val]
my_row = my_row + 1
# --------
# plotting using seaborn
# --------
# boxes show quartiles
sns.catplot(x="ROI", y="my_value", data=df, dodge=True, hue='condition', col='hemi', kind='boxen', aspect=3)
plt.show()
|
{"hexsha": "e212213c0c1d1898b24531b944507d6ef63cae12", "size": 1163, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_visualization/example_catplot.py", "max_stars_repo_name": "NicoleEic/projects", "max_stars_repo_head_hexsha": "028a4bb4b49539fc98b442f0a2f9434e95c94561", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-09-10T19:41:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T21:29:11.000Z", "max_issues_repo_path": "data_visualization/example_catplot.py", "max_issues_repo_name": "NicoleEic/projects", "max_issues_repo_head_hexsha": "028a4bb4b49539fc98b442f0a2f9434e95c94561", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_visualization/example_catplot.py", "max_forks_repo_name": "NicoleEic/projects", "max_forks_repo_head_hexsha": "028a4bb4b49539fc98b442f0a2f9434e95c94561", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-09-10T19:41:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T13:51:55.000Z", "avg_line_length": 30.6052631579, "max_line_length": 108, "alphanum_fraction": 0.5855546002, "include": true, "reason": "import numpy", "num_tokens": 334}
|
"""
Simple way to control the torso through a ui
Author: Patrick Gmerek
"""
import sys
sys.path.append("../robot_drivers/")
import Adafruit_PCA9685
import numpy as np
import cv2 as cv
import time
from hex_walker_driver import *
def main():
torso = initialize_torso()
slider_names = ["Waist",
"Right Tip Joint", "Right Mid Joint", "Right Rotary Joint",
"Left Tip Joint", "Left Mid Joint", "Left Rotary Joint"]
slider_limits = [[90, 135],
[90, 180], [90, 180], [90, 180],
[90, 180], [90, 180], [90, 180]]
window_name = "Hexapod Torso Control"
cv.namedWindow(window_name)
for slider, limits in zip(slider_names, slider_limits):
cv.createTrackbar(slider, window_name, limits[0], limits[1], dummy)
user_inputs = fetch_trackbar_pos(window_name, slider_names)
torso[0].set_leg_position(Leg_Position(user_inputs[1], user_inputs[2], user_inputs[3]))
torso[1].set_leg_position(Leg_Position(user_inputs[4], user_inputs[5], user_inputs[6]))
torso[2].set_angle(user_inputs[0])
while True:
previous_user_inputs = user_inputs
user_inputs = fetch_trackbar_pos(window_name, slider_names)
key = cv.waitKey(1) & 0xFF
if key == ord("q"): # Quit if the user presses "q"
break
if not compare_lists(user_inputs, previous_user_inputs):
print("Values changed")
torso[0].set_leg_position(Leg_Position(user_inputs[1], user_inputs[2], user_inputs[3]))
torso[1].set_leg_position(Leg_Position(user_inputs[4], user_inputs[5], user_inputs[6]))
torso[2].set_angle(user_inputs[0])
cv.destroyAllWindows()
def compare_lists(list1, list2):
if not len(list1) == len(list1):
return -1
for i in range(0, len(list1)):
if not list1[i] == list2[i]:
return 0
return 1
def fetch_trackbar_pos(window_name, slider_names):
waist = cv.getTrackbarPos(slider_names[0], window_name)
rr = cv.getTrackbarPos(slider_names[1], window_name)
rm = cv.getTrackbarPos(slider_names[2], window_name)
rt = cv.getTrackbarPos(slider_names[3], window_name)
lr = cv.getTrackbarPos(slider_names[4], window_name)
lm = cv.getTrackbarPos(slider_names[5], window_name)
lt = cv.getTrackbarPos(slider_names[6], window_name)
return [waist, rr, rm, rt, lr, lm, lt]
def dummy(x):
return
def initialize_torso():
pwm_40= Adafruit_PCA9685.PCA9685(address=0x40)
pwm_41= Adafruit_PCA9685.PCA9685(address=0x41)
pwm_40.set_pwm_freq(60)
pwm_41.set_pwm_freq(60)
sleep_time = 2
# create the torso
r = Leg(0, pwm_41, 12, 11, 10, ARM_R)
l = Leg(0, pwm_40, 12, 11, 10, ARM_L)
rot = Rotator(0, pwm_40, 9)
return [r, l, rot]
if __name__ == '__main__':
main()
|
{"hexsha": "afa48d64731d3ca827ae46838c9d7ca53f1beda8", "size": 2846, "ext": "py", "lang": "Python", "max_stars_repo_path": "resources/robot/project_files/testing/interactive_control_torso.py", "max_stars_repo_name": "ramk94/Thief_Policemen", "max_stars_repo_head_hexsha": "557701909a20f9a50c9bebed8532873a1910e599", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-25T02:45:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-13T04:27:40.000Z", "max_issues_repo_path": "resources/robot/project_files/testing/interactive_control_torso.py", "max_issues_repo_name": "ramk94/Thief_Policemen", "max_issues_repo_head_hexsha": "557701909a20f9a50c9bebed8532873a1910e599", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resources/robot/project_files/testing/interactive_control_torso.py", "max_forks_repo_name": "ramk94/Thief_Policemen", "max_forks_repo_head_hexsha": "557701909a20f9a50c9bebed8532873a1910e599", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9347826087, "max_line_length": 99, "alphanum_fraction": 0.6539002108, "include": true, "reason": "import numpy", "num_tokens": 814}
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommRing.QuotientRing where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Nat
open import Cubical.Data.FinData
open import Cubical.HITs.SetQuotients as SQ renaming (_/_ to _/sq_)
open import Cubical.HITs.PropositionalTruncation as PT
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.CommRing.Ideal
open import Cubical.Algebra.CommRing.FGIdeal
open import Cubical.Algebra.Ring
open import Cubical.Algebra.Ring.QuotientRing renaming (_/_ to _/Ring_) hiding (asRing)
private
variable
ℓ ℓ' : Level
_/_ : (R : CommRing ℓ) → (I : IdealsIn R) → CommRing ℓ
R / I =
fst asRing , commringstr _ _ _ _ _
(iscommring (RingStr.isRing (snd asRing))
(elimProp2 (λ _ _ → squash/ _ _)
commEq))
where
asRing = (CommRing→Ring R) /Ring (CommIdeal→Ideal I)
_·/_ : fst asRing → fst asRing → fst asRing
_·/_ = RingStr._·_ (snd asRing)
commEq : (x y : fst R) → ([ x ] ·/ [ y ]) ≡ ([ y ] ·/ [ x ])
commEq x y i = [ CommRingStr.·Comm (snd R) x y i ]
[_]/ : {R : CommRing ℓ} {I : IdealsIn R} → (a : fst R) → fst (R / I)
[ a ]/ = [ a ]
--
module Quotient-FGideal-CommRing-Ring
(A'@(A , Ar) : CommRing ℓ)
(B'@(B , Br) : Ring ℓ')
(g'@(g , gr) : RingHom (CommRing→Ring A') B')
where
open CommRingStr Ar using ()
renaming
( 0r to 0A
; 1r to 1A
; _+_ to _+A_
; -_ to -A_
; _·_ to _·A_ )
open RingStr Br using ()
renaming
( 0r to 0B
; 1r to 1B
; _+_ to _+B_
; -_ to -B_
; _·_ to _·B_
; +Lid to +BIdL
; is-set to isSetB)
open CommRingStr
open IsRingHom
module _
{n : ℕ}
(v : FinVec A n)
(gnull : (k : Fin n) → g ( v k) ≡ 0B)
where
f : RingHom (CommRing→Ring (A' / (generatedIdeal _ v))) B'
fst f = SQ.rec (isSetB)
g
λ a b → PT.rec (isSetB _ _)
λ x → g a ≡⟨ cong g (sym (+Rid Ar a)) ⟩
g (a +A 0A) ≡⟨ cong (λ X → g (a +A X)) (sym (snd (+Inv Ar b))) ⟩
g (a +A ((-A b) +A b)) ≡⟨ cong g (+Assoc Ar a (-A b) b) ⟩
g ((a +A -A b) +A b) ≡⟨ pres+ gr (a +A -A b) b ⟩
(g(a +A -A b) +B g b) ≡⟨ cong (λ X → g X +B g b) (snd x) ⟩
(g (linearCombination A' (fst x) v) +B g b) ≡⟨ cong (λ X → X +B g b) (cancelLinearCombination A' B' g' n (fst x) v gnull) ⟩
0B +B g b ≡⟨ +BIdL (g b) ⟩
g b ∎
snd f = makeIsRingHom
(pres1 gr)
(elimProp (λ x p q i y j → isSetB _ _ (p y) (q y) i j)
λ a → elimProp (λ _ → isSetB _ _)
λ a' → pres+ gr a a')
(elimProp (λ x p q i y j → isSetB _ _ (p y) (q y) i j)
λ a → elimProp (λ _ → isSetB _ _)
λ a' → pres· gr a a')
module Quotient-FGideal-CommRing-CommRing
(A'@(A , Ar) : CommRing ℓ)
(B'@(B , Br) : CommRing ℓ')
(g'@(g , gr) : CommRingHom A' B')
{n : ℕ}
(v : FinVec A n)
(gnull : (k : Fin n) → g ( v k) ≡ CommRingStr.0r (snd B'))
where
f : CommRingHom (A' / (generatedIdeal _ v)) B'
f = Quotient-FGideal-CommRing-Ring.f A' (CommRing→Ring B') g' v gnull
|
{"hexsha": "057300ee3915a28511c57482b620746d9ea9cc0b", "size": 3555, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Cubical/Algebra/CommRing/QuotientRing.agda", "max_stars_repo_name": "xekoukou/cubical", "max_stars_repo_head_hexsha": "b6fbca9e83e553c5c2e4a16a2df7f9e9039034dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cubical/Algebra/CommRing/QuotientRing.agda", "max_issues_repo_name": "xekoukou/cubical", "max_issues_repo_head_hexsha": "b6fbca9e83e553c5c2e4a16a2df7f9e9039034dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cubical/Algebra/CommRing/QuotientRing.agda", "max_forks_repo_name": "xekoukou/cubical", "max_forks_repo_head_hexsha": "b6fbca9e83e553c5c2e4a16a2df7f9e9039034dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2242990654, "max_line_length": 145, "alphanum_fraction": 0.4683544304, "num_tokens": 1251}
|
[STATEMENT]
lemma asEnv_pickE:
assumes "goodEnv rho" shows "asEnv (pickE rho) xs x = rho xs x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. asEnv (pickE rho) xs x = rho xs x
[PROOF STEP]
using assms asTerm_pick
[PROOF STATE]
proof (prove)
using this:
goodEnv rho
good ?X \<Longrightarrow> asTerm (pick ?X) = ?X
goal (1 subgoal):
1. asEnv (pickE rho) xs x = rho xs x
[PROOF STEP]
by (cases "rho xs x") (auto simp: goodEnv_def liftAll_def asEnv_def pickE_def lift_comp lift_def)
|
{"llama_tokens": 205, "file": "Binding_Syntax_Theory_Transition_QuasiTerms_Terms", "length": 2}
|
module TestDefComposite
using Test
using Mimi
using MacroTools
import Mimi: ComponentPath, build, @defmodel
@defcomp Comp1 begin
par_1_1 = Parameter(index=[time]) # external input
var_1_1 = Variable(index=[time]) # computed
foo = Parameter()
function run_timestep(p, v, d, t)
v.var_1_1[t] = p.par_1_1[t]
end
end
@defcomp Comp2 begin
par_2_1 = Parameter(index=[time]) # connected to Comp1.var_1_1
par_2_2 = Parameter(index=[time]) # external input
var_2_1 = Variable(index=[time]) # computed
foo = Parameter()
function run_timestep(p, v, d, t)
v.var_2_1[t] = p.par_2_1[t] + p.foo * p.par_2_2[t]
end
end
@defcomposite A begin
component(Comp1)
component(Comp2)
# imports
bar = Comp1.par_1_1
foo2 = Comp2.foo
# linked imports
# foo = Comp1.foo, Comp2.foo
foo1 = Comp1.foo
foo2 = Comp2.foo
# connections
Comp2.par_2_1 = Comp1.var_1_1
Comp2.par_2_2 = Comp1.var_1_1
end
# doesn't work currently
# @defmodel m begin
# index[time] = 2005:2020
# component(A)
# A.foo1 = 10
# A.foo2 = 4
# end
m = Model()
years = 2005:2020
set_dimension!(m, :time, years)
add_comp!(m, A)
set_param!(m, "/A/Comp1", :par_1_1, 2:2:2*length(years))
a = m.md[:A]
set_param!(a, :Comp1, :foo, 10)
set_param!(a, :Comp2, :foo, 4) # TBD: why does this overwrite the 10 above??
build(m)
run(m)
end # module
m = TestDefComposite.m
A = TestDefComposite.A
md = m.md
nothing
|
{"hexsha": "7d9ec9ab0876207c26b62b1ba895009159679754", "size": 1512, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_defcomposite.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Mimi.jl-e4e893b0-ee5e-52ea-8111-44b3bdec128c", "max_stars_repo_head_hexsha": "c9336f1076996dca728c30befd561280dfc18318", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_defcomposite.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Mimi.jl-e4e893b0-ee5e-52ea-8111-44b3bdec128c", "max_issues_repo_head_hexsha": "c9336f1076996dca728c30befd561280dfc18318", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_defcomposite.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Mimi.jl-e4e893b0-ee5e-52ea-8111-44b3bdec128c", "max_forks_repo_head_hexsha": "c9336f1076996dca728c30befd561280dfc18318", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.9, "max_line_length": 81, "alphanum_fraction": 0.6302910053, "num_tokens": 534}
|
# Help Document
# -------------
#
# By convention, variable `i` is used to represent the index (or position) of a
# bit vector and `j` is used to represent the count (or cardinality) of a bit
# vector.
"""
rank0(rb, i)
Count the number of 0s (`false`s) within `bv[1:i]`.
"""
rank0
"""
rank1(bv, i)
Count the number of 1s (`true`s) within `bv[1:i]`.
"""
rank1
"""
rank(x, bv, i)
Count the number of `x`s within `bv[1:i]`.
"""
rank
"""
select0(bv, j)
Return the position of the `j`-th occurrence of 0 in `bv`.
"""
select0
"""
select1(bv, j)
Return the position of the `j`-th occurrence of 1 in `bv`.
"""
select1
"""
select(x, bv, j)
Return the position of the `j`-th occurrence of `x` in `bv`.
"""
select
"""
search(x, bv, i)
Search the position of the next `x` in `bv` starting from `i`.
"""
search
"""
search0(bv, i)
Search the position of the next 0 in `bv` starting from `i`.
"""
search0
"""
search1(bv, i)
Search the position of the next 1 in `bv` starting from `i`.
"""
search1
"""
rsearch(x, bv, i)
Search the position of the previous `x` in `bv` starting from `i`.
"""
rsearch
"""
rsearch0(bv, i)
Search the position of the previous 0 in `bv` starting from `i`.
"""
rsearch0
"""
rsearch1(bv, i)
Search the position of the previous 1 in `bv` starting from `i`.
"""
rsearch1
|
{"hexsha": "208ecb4dea810160c31a29864eae1af930fe3b69", "size": 1353, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/help.jl", "max_stars_repo_name": "bicycle1885/IndexedBitVectors.jl", "max_stars_repo_head_hexsha": "5f3a69a85bf9db2e274ca0a470341547711fbdc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-09-04T13:19:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T17:35:15.000Z", "max_issues_repo_path": "src/help.jl", "max_issues_repo_name": "bicycle1885/IndexedBitVectors.jl", "max_issues_repo_head_hexsha": "5f3a69a85bf9db2e274ca0a470341547711fbdc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-09-04T17:04:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-17T03:54:17.000Z", "max_forks_repo_path": "src/help.jl", "max_forks_repo_name": "bicycle1885/IndexedBitVectors.jl", "max_forks_repo_head_hexsha": "5f3a69a85bf9db2e274ca0a470341547711fbdc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-05-23T05:19:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-13T04:32:22.000Z", "avg_line_length": 14.5483870968, "max_line_length": 79, "alphanum_fraction": 0.6097560976, "num_tokens": 423}
|
Han is a badass.
Users/HelenWang i love my boyfriend because he is a badass. <3h
|
{"hexsha": "d05c560bbb8126fc5507b2837360b58a7a92ebc3", "size": 82, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/HanLwin.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/HanLwin.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/HanLwin.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.5, "max_line_length": 63, "alphanum_fraction": 0.756097561, "num_tokens": 24}
|
%% LyX 2.2.3 created this file. For more info, see http://www.lyx.org/.
%% Do not edit unless you really know what you are doing.
\documentclass{article}
\usepackage[latin9]{inputenc}
\usepackage{listings}
\renewcommand{\lstlistingname}{Listing}
\begin{document}
\part{Introduction}
CSCN files are easily editable text files (since the syntax is understood).
\part{Data types}
The data types used in scene files are these ones :
\emph{boolean} : a boolean (\emph{true} or \emph{false}).
\emph{int} : a simple integer.
\emph{real} : a floating point number, using dot ( . ) as the decimals
separator.
\emph{2, 3, 4 ints} : 2, 3 or 4 integers, separated by commas ( ,
) or spaces ( ).
\emph{2, 3, 4 reals} : 2, 3 or 4 floating point numbers, separated
by commas ( , ) or spaces ( ).
\emph{size} : 2 integers greater than or equal to 0.
\emph{2x2, 3x3, 4x4 reals matrix} : 2, 3 or 4 groups separated by
semi colons ( ; ) of 2, 3 or 4 floating point numbers separated by
commas ( , ) or spaces ( ).
\emph{rgb\_colour} : the RGB components of colour, expressed in floating
point numbers between 0.0 and 1.0.
\emph{rgba\_colour} : the RGBA components of colour, expressed in
floating point numbers between 0.0 and 1.0.
\emph{rgb\_hdr\_colour} : the RGB components of colour, expressed
in floating point numbers greater than or equal to 0.0.
\emph{rgba\_hdr\_colour} : the RGBA components of colour, expressed
in floating point numbers greater than or equal to 0.0.
\emph{value} : a string corresponding to a predefined value.
\emph{name} : a string, wrapped into double quotes ( \char`\"{} ).
\emph{file} : a string, wrapped into double quotes ( \char`\"{} ),
containing a file name and path.
\emph{folder} : a string, wrapped into double quotes ( \char`\"{}
), containing a folder path.
\part{Sections}
\section{Description}
The file is split into sections, defined as follows :
\begin{lstlisting}
[section_type] "[section_name]"
{
// Section description
}
\end{lstlisting}
Example:
\begin{lstlisting}
light "Light0"
{
type directional
colour 1.0 1.0 1.0
intensity 0.8 1.0
}
\end{lstlisting}
Some sections can have child subsections :
\begin{lstlisting}
material "Bronze"
{
pass
{
ambient 0.2125 0.1275 0.054
diffuse 0.714 0.4284 0.12144
emissive 0.0
specular 0.393548 0.271906 0.166721
shininess 25.6
}
}
\end{lstlisting}
\subsection{Sections list}
The possible sections are the following:
\begin{enumerate}
\item 'sampler'
Defines a texture sampler object.
\item 'material'
Defines a material.
\item 'mesh'
Defines a mesh.
\item 'font'
Defines a font used in text overlays.
\item 'window'
Defines a render window.
\item 'panel\_overlay'
Defines a simple panel overlay.
\item 'border\_panel\_overlay'
Defines a panel overlay with a border.
\item 'text\_overlay'
Defines a panel overlay with a text.
\item 'scene'
Defines a whole scene.
\end{enumerate}
\section{'sampler' section}
\begin{enumerate}
\item 'min\_filter' : \emph{value}
Value used for minification function. The possible values are :
\begin{itemize}
\item \emph{linear} : linear interpolation.
\item \emph{nearest} : no interpolation.
\end{itemize}
\item 'mag\_filter' : \emph{value}
Value used for magnification function. The possible values are :
\begin{itemize}
\item \emph{linear} : linear interpolation.
\item \emph{nearest} : no interpolation.
\end{itemize}
\item 'mip\_filter' : \emph{value}
Value used for mipmapping function. The possible values are :
\begin{itemize}
\item \emph{linear} : linear interpolation.
\item \emph{nearest} : no interpolation.
\end{itemize}
\item 'min\_lod' : \emph{real}
Defines minimum level of detail value.
\item 'max\_lod' : \emph{real}
Defines maximum level of detail value.
\item 'lod\_bias' : \emph{real}
Defines the MIP-Level.
\item 'u\_wrap\_mode' : \emph{value}
Defines the wrapping mode of the texture, for the U component. The
possible values are :
\begin{itemize}
\item \emph{repeat} : The texture is repeated.
\item \emph{mirrored\_repeat} : The texture is repeated, each instance of
2 being the mirror of the other one.
\item \emph{clamp\_to\_border} : The texture is stretched, the object edge
colour is defined as the texture edge colour.
\item \emph{clamp\_to\_edge} : The texture is stretched, the object edge
colour is defined as the average of the texture edge colour and the
border colour.
\end{itemize}
\item 'v\_wrap\_mode' : \emph{value}
Defines the wrapping mode of the texture, for the V component. The
possible values are :
\begin{itemize}
\item \emph{repeat} : The texture is repeated.
\item \emph{mirrored\_repeat} : The texture is repeated, each instance of
2 being the mirror of the other one.
\item \emph{clamp\_to\_border} : The texture is stretched, the object edge
colour is defined as the texture edge colour.
\item \emph{clamp\_to\_edge} : The texture is stretched, the object edge
colour is defined as the average of the texture edge colour and the
border colour.
\end{itemize}
\item 'w\_wrap\_mode' : \emph{value}
Defines the wrapping mode of the texture, for the W component. The
possible values are :
\begin{itemize}
\item \emph{repeat} : The texture is repeated.
\item \emph{mirrored\_repeat} : The texture is repeated, each instance of
2 being the mirror of the other one.
\item \emph{clamp\_to\_border} : The texture is stretched, the object edge
colour is defined as the texture edge colour.
\item \emph{clamp\_to\_edge} : The texture is stretched, the object edge
colour is defined as the average of the texture edge colour and the
border colour.
\end{itemize}
\item 'border\_colour' : \emph{rgba\_colour}
Defines the non textured border colour.
\item 'max\_anisotropy' : \emph{real}
Defines the maximum degree of anisotropy.
\end{enumerate}
\section{'material' section}
Materials can be multi-pass, so you can declare more than one pass
subsection.
\begin{enumerate}
\item 'pass' : \emph{new section}
Defines a new section describing a texture.
\end{enumerate}
\subsection{'pass' section}
\begin{enumerate}
\item 'diffuse' : \emph{rgb\_colour}
Defines diffuse colour of the pass (legacy materials only).
\item 'albedo' : \emph{rgb\_colour}
Defines albedo colour of the pass (not available on legacy materials).
\item 'specular' : \emph{rgb\_colour}
Defines specular colour of the pass (not available with metallic/roughness
materials).
\item 'metallic' : \emph{real (between 0 and 1)}
Defines the metallness of the pass (metallic/roughness materials only).
\item 'shininess' : \emph{real (between 0 and 128)}
Defines the specular exponent of the pass (legacy materials only).
\item 'glossiness' : \emph{real (between 0 and 1)}
Defines the glossiness of the pass (specular/glossiness materials
only).
\item 'roughness' : \emph{real (between 0 and 1)}
Defines the roughness of the pass (metallic/roughness materials only).
\item 'ambient' : \emph{real (between 0 and 1)}
Defines ambient factor of the pass (legacy materials only).
\item 'emissive' : \emph{real (between 0 and 1)}
Defines emissive factor of the pass.
\item 'alpha' : \emph{real (between 0 and 1)}
Defines the global alpha value for the pass.
\item 'two\_sided' : \emph{boolean}
Tells the pass is two sided (true) or not (false).
\item 'blend\_func' : src : \emph{value}, dst : \emph{value}
The two functions (source and destination) used during alpha blending
:
\begin{itemize}
\item \emph{zero} : The target (src or dst) won't be used during alpha blending.
\item \emph{one} : The target (src or dst) will be the only one used.
\item \emph{src\_colour} : The target colour will be the source colour (dst
only).
\item \emph{inv\_src\_colour} : The target colour will be one minus the
source colour (dst only).
\item \emph{dst\_colour} : The target colour will be the destination colour
(src only).
\item \emph{inv\_dst\_colour} : The target colour will be one minus the
destination colour (src only).
\item \emph{src\_alpha} : The target alpha will be the source alpha (dst
only).
\item \emph{inv\_src\_alpha} : The target alpha will be one minus the source
alpha (dst only).
\item \emph{dst\_alpha} : The target alpha will be the destination alpha
(src only).
\item \emph{inv\_dst\_alpha} : The target alpha will be one minus the destination
alpha (src only).
\item \emph{src\_alpha\_sat} : Sets source alpha to 1.
\end{itemize}
\item 'texture\_unit' : \emph{new section}
Defines a new section describing a texture unit.
\item 'alpha\_blend\_mode' : \emph{value}
Alpha blending mode name, can be one of:
\begin{itemize}
\item \emph{none} : No alpha blending.
\item \emph{additive} : Source and destination alpha values are added.
\item \emph{multiplicative} : Source and destination alpha values are multiplied.
\end{itemize}
\item 'colour\_blend\_mode' : \emph{value}
Colour blending mode name, can be one of:
\begin{itemize}
\item \emph{none} : No colour blending.
\item \emph{additive} : Source and destination colour values are added.
\item \emph{multiplicative} : Source and destination colour values are multiplied.
\end{itemize}
\item 'alpha\_func' : func : \emph{value} ref-val: \emph{real}
Defines the way alpha rejection is applied to the texture. The second
parameter is the reference value used in alpha rejection function.
The first parameter values can be :
\begin{itemize}
\item \emph{always} : The sample colour is always applied.
\item \emph{less} : The sample colour is applied if its alpha component
is less than the second parameter.
\item \emph{less\_or\_equal} : The sample colour is applied if its alpha
component is less than or equal to the second parameter.
\item \emph{equal} : The sample colour is applied if its alpha component
is equal to the second parameter.
\item \emph{not\_equal} : The sample colour is applied if its alpha component
is different from the second parameter.
\item \emph{greater\_or\_equal} : The sample colour is applied if its alpha
component is greater than or equal to the second parameter.
\item \emph{greater} : The sample colour is applied if its alpha component
is greater than the second parameter.
\item \emph{never} : The sample colour is never applied.
\end{itemize}
\item 'refraction\_ratio' : \emph{real}
Defines the refraction ratio of the pass. Note that if there is no
refraction map, the refraction is still applied, using only the skybox.
\item 'subsurface\_scattering' : \emph{new section}
Defines a new section describing subsurface scattering for the pass.
\item 'parallax\_occlusion' : \emph{boolean}
Enables or disables parallax occlusion mapping (needs a normal map
and a height map).
\end{enumerate}
\subsubsection{'texture\_unit' section}
\begin{enumerate}
\item 'image' : \emph{file}
Defines the image file name.
\item 'render\_target' : \emph{new section}
Defines a new section describing a render target.
\item 'colour' : \emph{colour}
Defines the base blending colour.
\item 'map\_type' : \emph{value}
Defines the way the texture is mapped to the object :
\begin{itemize}
\item \emph{none} : No effect.
\item \emph{reflection} : reflection mapping.
\item \emph{sphere} : sphere mapping.
\end{itemize}
\item 'rgb\_blend' : func : \emph{value} Arg0 : \emph{value} Arg1 : \emph{value}
Defines the texture behaviour during colour blending. The first parameter
is the blending function, the two other ones are the operands (Arg0
and Arg1) of this function. The firs parameter can take one of the
following values :
\begin{itemize}
\item \emph{none} : None of the two operands is used.
\item \emph{first\_arg} : Returns Arg0.
\item \emph{add} : Returns Arg0 + Arg1.
\item \emph{add\_signed} : Returns Arg0 + Arg1 - 0.5.
\item \emph{modulate} : Returns Arg0 x Arg1.
\item \emph{subtract} : Returns Arg0 - Arg1.
\item \emph{dot3\_rgb} : Returns 4 x {[}((Arg0r - 0.5) x (Arg1r - 0.5))
+ ((Arg0g - 0.5) x (Arg1g - 0.5)) + ((Arg0b - 0.5) x (Arg1b - 0.5)){]}.
\item \emph{dot3\_rgba} : Returns 4 x {[}((Arg0r - 0.5) x (Arg1r - 0.5))
+ ((Arg0g - 0.5) x (Arg1g - 0.5)) + ((Arg0b - 0.5) x (Arg1b - 0.5)){]}.
\end{itemize}
The two operands can be one of the following values :
\begin{itemize}
\item \emph{texture} : The current texture colour
\item \emph{texture0} : The first texture colour
\item \emph{texture1} : The second texture colour
\item \emph{texture2} : The third texture colour
\item \emph{texture3} : The fourth texture colour
\item \emph{constant} :
\item \emph{diffuse} :
\item \emph{previous} :
\end{itemize}
\item 'alpha\_blend' : func : \emph{value} Arg0 : \emph{value} Arg1 : \emph{value}
Defines the texture behaviour during alpha blending. The first parameter
is the blending function, the two other ones are the operands (Arg0
and Arg1) of this function. The first parameter can take one of the
following values :
\begin{itemize}
\item \emph{none} : None of the two operands is used.
\item \emph{first\_arg} : Returns Arg0.
\item \emph{add} : Returns Arg0 + Arg1.
\item \emph{add\_signed} : Returns Arg0 + Arg1 - 0.5.
\item \emph{modulate} : Returns Arg0 x Arg1.
\item \emph{subtract} : Returns Arg0 - Arg1.
\end{itemize}
The two operands can be one of the following values :
\begin{itemize}
\item \emph{texture} : The current texture colour
\item \emph{texture0} : The first texture colour
\item \emph{texture1} : The second texture colour
\item \emph{texture2} : The third texture colour
\item \emph{texture3} : The fourth texture colour
\item \emph{constant} :
\item \emph{diffuse} :
\item \emph{previous} :
\end{itemize}
\item 'channel' : \emph{value}
The channel at which the texture is bound. Can be one of the following
:
\begin{itemize}
\item \emph{colour} : Base colour.
\item \emph{ambient} : Ambient lighting colour.
\item \emph{diffuse} : Diffuse lighting colour.
\item \emph{normal} : Normals.
\item \emph{specular} : Specular lighting colour.
\item \emph{opacity} : Opacity.
\item \emph{gloss} : Specular exponent.
\end{itemize}
\item 'sampler' : \emph{name}
Defines the sampler object used by the texture.
\end{enumerate}
\subsubsection{'shader\_program' section}
\begin{enumerate}
\item 'vertex\_program' : \emph{new section}
Defines a new section describing the vertex program.
\item 'pixel\_program' : \emph{new section}
Defines a new section describing the pixel program.
\item 'geometry\_program' : \emph{new section}
Defines a new section describing the geometry program.
\item 'hull\_program' : \emph{new section}
Defines a new section describing the hull (tessellation control) program.
\item 'domain\_program' : \emph{new section}
Defines a new section describing the domain (tessellation evaluation)
program.
\item 'constants\_buffer' : \emph{new section}
Defines a new section dexcribing a constants buffer (uniform buffer).
\end{enumerate}
\subsubsection{'vertex/pixel/geometry/hull/domain\_program' section}
\begin{enumerate}
\item 'file' : \emph{file}
Shader file name
\item 'sampler' : \emph{name}
Creates a new variable of sample (1D, 2D, \ldots{}) type, for the
pixel shader.
\item 'input\_type' : \emph{value}
Defines the input faces data type, for geometry shader. Can be one
of the following :
\begin{itemize}
\item \emph{points} : Points.
\item \emph{lines} : Disjoint lines.
\item \emph{line\_loop} : Joint lines loop.
\item \emph{line\_strip} : Joint lines.
\item \emph{triangles} : Disjoint triangles.
\item \emph{triangle\_strip} : Joint triangles.
\item \emph{triangle\_fan} : Triangles joint using the first point.
\item \emph{quads} : Disjoint quads.
\item \emph{quad\_strip} : Joint quads.
\item \emph{polygon} : Polygons.
\end{itemize}
\item 'output\_type' : \emph{value}
Defines the geometry chader output data type. Can be one of the following
:
\begin{itemize}
\item \emph{points} : Points.
\item \emph{line\_strip} : Joint lines.
\item \emph{triangle\_strip} : Joint triangles.
\item \emph{quad\_strip} : Joint quads.
\end{itemize}
\item 'output\_vtx\_count' : \emph{int}
Defines the geometry shader output vertices.
\item 'variable' : \emph{new section}
Defines a new section describing a uniform variable.
\end{enumerate}
\subsubsection{'constants\_buffer' section}
\begin{enumerate}
\item 'shaders' : \emph{bitwise ORed values}
Shader types to which this buffer applies, can be one of:
\begin{itemize}
\item \emph{vertex}
\item \emph{hull}
\item \emph{domain}
\item \emph{geometry}
\item \emph{pixel}
\item \emph{compute}
\end{itemize}
\item 'variable' : \emph{name}, \emph{new section}
Defines a new section describing a variable for this buffer.
\end{enumerate}
\subsubsection{'variable' section}
\begin{enumerate}
\item 'type' : \emph{value}
Variable type name, can be :
\begin{itemize}
\item \emph{int} : 1 signed integer.
\item \emph{uint} : 1 unsigned integer.
\item \emph{float} : 1 simple precision floating point number.
\item \emph{double} : 1 double precision floating point number.
\item \emph{vec2i} : 2 signed integers.
\item \emph{vec3i} : 3 signed integers.
\item \emph{vec4i} : 4 signed integers.
\item \emph{vec2f} : 2 simple precision floating point numbers.
\item \emph{vec3f} : 3 simple precision floating point numbers.
\item \emph{vec4f} : 4 simple precision floating point numbers.
\item \emph{vec2d} : 2 double precision floating point numbers.
\item \emph{vec3d} : 3 double precision floating point numbers.
\item \emph{vec4d} : 4 double precision floating point numbers.
\item \emph{mat2x2i} : 2x2 signed integers matrix.
\item \emph{mat2x3i} : 2x3 signed integers matrix.
\item \emph{mat2x4i} : 2x4 signed integers matrix.
\item \emph{mat3x2i} : 3x2 signed integers matrix.
\item \emph{mat3x3i} : 3x3 signed integers matrix.
\item \emph{mat3x4i} : 3x4 signed integers matrix.
\item \emph{mat4x2i} : 4x2 signed integers matrix.
\item \emph{mat4x3i} : 4x3 signed integers matrix.
\item \emph{mat4x4i} : 4x4 signed integers matrix.
\item \emph{mat2x2f} : 2x2 simple precision floating point numbers matrix.
\item \emph{mat2x3f} : 2x3 simple precision floating point numbers matrix.
\item \emph{mat2x4f} : 2x4 simple precision floating point numbers matrix.
\item \emph{mat3x2f} : 3x2 simple precision floating point numbers matrix.
\item \emph{mat3x3f} : 3x3 simple precision floating point numbers matrix.
\item \emph{mat3x4f} : 3x4 simple precision floating point numbers matrix.
\item \emph{mat4x2f} : 4x2 simple precision floating point numbers matrix.
\item \emph{mat4x3f} : 4x3 simple precision floating point numbers matrix.
\item \emph{mat4x4f} : 4x4 simple precision floating point numbers matrix.
\item \emph{mat2x2d} : 2x2 double precision floating point numbers matrix.
\item \emph{mat2x3d} : 2x3 double precision floating point numbers matrix.
\item \emph{mat2x4d} : 2x4 double precision floating point numbers matrix.
\item \emph{mat3x2d} : 3x2 double precision floating point numbers matrix.
\item \emph{mat3x3d} : 3x3 double precision floating point numbers matrix.
\item \emph{mat3x4d} : 3x4 double precision floating point numbers matrix.
\item \emph{mat4x2d} : 4x2 double precision floating point numbers matrix.
\item \emph{mat4x3d} : 4x3 double precision floating point numbers matrix.
\item \emph{mat4x4d} : 4x4 double precision floating point numbers matrix.
\end{itemize}
\item 'count' : \emph{int}
Variable occurences count (array size).
\item 'value' :
Variable value, depends on the chosen type.
\end{enumerate}
\subsubsection{'subsurface\_scattering' section}
\begin{enumerate}
\item 'strength' : \emph{real}
Defines the strength of the effect.
\item 'gaussian\_width' : \emph{real}
Defines the width of the Gaussian blur.
\item 'transmittance\_profile' : \emph{new section}
Defines a new section describing the transmittance profile.
\end{enumerate}
\subsubsection{'transmittance\_profile' section}
\begin{enumerate}
\item 'factor' : \emph{vec4f}
Defines the three RGB components of the colour, and the fourth component
is used for the exponent of that colour.
\end{enumerate}
\section{'font' section}
\begin{enumerate}
\item 'file' : \emph{file}
Defines the file holding the font.
\item 'height' : \emph{int}
Defines the height (precision) of the font.
\end{enumerate}
\section{'scene' section}
\begin{enumerate}
\item 'ambient\_light' : \emph{colour}
Defines the ambient lighting colour. For PBR materials, defines the
influence of the IBL on the scene.
\item 'background\_colour' : \emph{colour}
Defines the background colour.
\item 'background\_image' : \emph{file}
Defines the background image.
\item 'import' : \emph{file}
Allows scene import from a CSCN file or another file format supported
by Castor3D importer plug-ins.
\item 'scene\_node' : \emph{new section}
Defines a new section describing a scene node for objects, lights
or billboards.
\item 'camera\_node' : \emph{new section}
Defines a new section describing a scene node for cameras.
\item 'light' : \emph{new section}
Defines a new section describing a light source.
\item 'object' : \emph{new section}
Defines a new section describing an object.
\item 'billboard' : \emph{new section}
Defines a new section describing a billboards list.
\item 'camera' : \emph{new section}
Defines a new section describing a camera.
\item 'panel\_overlay' : \emph{new section}
Defines a new section describing a simple panel overlay.
\item 'border\_panel\_overlay' : \emph{new section}
Defines a new section describing a simple panel overlay with a border.
\item 'text\_overlay' : \emph{new section}
Defines a new section describing a simple panel overlay with text.
\item 'animated\_object\_group' : \emph{new section}
Defines a new section describing an animated object group, with common
animations.
\item 'mesh' : \emph{new section}
Defines a new section describing a mesh, that can be used with one
or more objects.
\item 'particle\_system' : \emph{new section}
Defines a new section describing a particle system.
\item 'skybox' : \emph{new section}
Defines a new section describing the skybox.
\item 'include' : \emph{file}
Includes a scene file, allowing you to split your scene in multiple
files.
\item 'sampler' : \emph{new section}
Defines a new section describing a sampler.
\item 'fog\_type' : \emph{value}
Defines the fog type for the scene. Possible values are:
\begin{itemize}
\item \emph{linear}: Fog intensity increases linearly with distance to camera.
\item \emph{exponential}: Fog intensity increases exponentially with distance
to camera.
\item \emph{squared\_exponential}: Fog intensity increases even more with
distance to camera.
\end{itemize}
\item 'fog\_density' : \emph{real}
Defines the fog density, which is multiplied by the distance, according
to chosen fog type.
\item 'hdr\_config' : \emph{real}
Defines a new section describing the HDR configuration.
\end{enumerate}
\subsection{'hdr\_config' section}
\begin{enumerate}
\item 'exposure' : \emph{real}
Defines the scene's exposure.
\item 'gamma' : \emph{real}
Defines the gamma correction.
\end{enumerate}
\subsection{'scene\_node' and 'camera\_node' sections}
\begin{enumerate}
\item 'parent' : \emph{name}
Defines this node's parent. The default parent node is the root node.
\item 'position' : \emph{3 reals}
Node position relative to its parent.
\item 'orientation' : \emph{4 reals}
A quaternion holding node orientation relative to its parent.
\item 'scale' : \emph{3 reals}
Node scale relative to its parent.
\end{enumerate}
\subsection{'light' section}
\begin{enumerate}
\item 'type' : \emph{value}
Three light source types exist in Castor3D :
\begin{itemize}
\item \emph{directional} : directional light (like the sun).
\item \emph{point\_light} : a positioned source which emits in all directions.
\item \emph{spot\_light} : a positioned source which emits in an oriented
cone.
\end{itemize}
\item 'colour' : \emph{rgb\_colour}
Defines the colour for this source.
\item 'intensity' : \emph{2 reals}
Defines the diffuse and specular intensities for this source.
\item 'attenuation' : \emph{3 reals}
Defines the three attenuation components : constant, linear and quadratic.
This attenuation is computed from the distance to the light source.
Only for spot\_light and point\_light.
\item 'cut\_off' : \emph{real}
Defines the angle of the emission cone.
Only for spot\_light.
\item 'exponent' : \emph{real}
Defines the attenuation computed with the distance from the emission
cone centre.
Only for spot\_light.
\item 'parent' : \emph{name}
Defines the node which this light source is attached to.
\item 'shadow\_producer' : \emph{boolean}
Defines if the light produces shadows (\emph{true}) or not (\emph{false},
default value).
\end{enumerate}
\subsection{'object' section}
\begin{enumerate}
\item 'parent' : \emph{name}
Defines the node which this object is attached to.
\item 'mesh' : \emph{name}
Defines the mesh this object uses.
\item 'mesh' : \emph{name} \emph{new section}
Defines a new section describing a mesh with the given name.
\item 'material' : \emph{name}
Name of a material, defined in a .cmtl file or in this file. Applies
this material too all the submeshes.
\item 'materials' : \emph{new section}
New section used to specify each submesh's material.
\item 'cast\_shadows' : \emph{boolean}
Defines if the object casts shadows (\emph{true}, default value) or
not (\emph{false}).
\item 'receive\_shadows' : \emph{boolean}
Defines if the object receives shadows (\emph{true}, default value)
or not (\emph{false}).
\end{enumerate}
\subsubsection{'materials' section}
\begin{enumerate}
\item 'material' : \emph{int}, \emph{name}
Submesh index, and material name for this submesh.
\end{enumerate}
\subsection{'billboard' section}
Allows the definition of billboards that share the same material and
dimensions.
\begin{enumerate}
\item 'parent' : \emph{name}
Defines the parent scene node.
\item 'positions' : \emph{new section}
Defines a new section describing each billboard position.
\item 'material' : \emph{name}
Defines the material used by every billboard of this list.
\item 'dimensions' : \emph{size}
Defines billboards dimensions.
\item 'type' : \emph{value}
Defines the type of billboard. Possible values are:
\begin{itemize}
\item \emph{cylindrical}: The billboard faces the camera, except for their
Y axis, which remains still.
\item \emph{spherical}: The billboard faces the camera on all axes.
\end{itemize}
\item 'size' : \emph{value}
Defines the billboards sizing. Possible values are:
\begin{itemize}
\item \emph{dynamic}: The size varies, depending on the distance to camera.
\item \emph{fixed}: The size is fixed, where the camera is.
\end{itemize}
\end{enumerate}
\subsubsection{'positions' section}
\begin{enumerate}
\item 'pos' : \emph{3 reals}
Defines the relative position of a billboard.
\end{enumerate}
\subsection{'camera' section}
\begin{enumerate}
\item 'parent' : \emph{name}
Defines the parent CameraNode.
\item 'primitive' : \emph{value}
Defines the display type. Can be one of :
\begin{itemize}
\item \emph{points} : Points.
\item \emph{lines} : Disjointed lines.
\item \emph{line\_loop} : Jointed lines loop.
\item \emph{line\_strip} : Joined lines.
\item \emph{triangles} : Disjointed triangles.
\item \emph{triangle\_strip} : Jointed triangles.
\item \emph{triangle\_fan} : Triangles jointed using the first point.
\item \emph{quads} : Disjointed quads.
\item \emph{quad\_strip} : Jointed quads.
\item \emph{polygon} : Polygons.
\end{itemize}
\item 'viewport' : \emph{new section}
Defines the camera view port.
\end{enumerate}
\subsubsection{'viewport' section}
\begin{enumerate}
\item 'type' : \emph{value}
Window display type, 2d or 3d.
\item 'left' : \emph{real}
Defines the minimum displayed X coordinate.
\item 'right' : \emph{real}
Defines the maximum displayed X coordinate.
\item 'top' : \emph{real}
Defines the minimum displayed Y coordinate.
\item 'bottom' : \emph{real}
Defines the maximum displayed Y coordinate.
\item 'near' : \emph{real}
Defines the minimum displayed Z coordinate.
\item 'far' : \emph{real}
Defines the maximum displayed Z coordinate.
\item 'size' : \emph{size}
Defines the window display size (in pixels).
\item 'fov\_y' : \emph{real}
Defines the vertical field of view angle, in radians.
\item 'aspect\_ratio' : \emph{real}
Defines the global window aspect ratio (1.33333 for 4/3, 1.77777 for
16/9 \ldots{} ).
\end{enumerate}
\subsection{'animated\_object\_group' section}
\begin{enumerate}
\item 'animated\_object' : \emph{name}
Adds the object with the given name to the group.
\item 'animation' : \emph{name}
Adds the animation with the given name to the group's common animations
list.
\item 'start\_animation' : \emph{name}
Starts the given animation.
\item 'pause\_animation' : \emph{name}
Pauses the given animation (which must have been started first).
\end{enumerate}
\subsubsection{'animation' section}
\begin{enumerate}
\item 'looped' : \emph{boolean}
Defines if the animation is looped (\emph{true}) or not (\emph{false},
default value).
\item 'scale' : \emph{real}
Defines the time scale of the animation (can be negative, the animation
will then be played backwards).
\end{enumerate}
\section{'mesh' section}
\begin{enumerate}
\item 'type' : \emph{name}
Mesh type name, one of :
\begin{itemize}
\item \emph{custom} : manually defined mesh or imported mesh.
\item \emph{cube} : a cube, user must then define its three dimensions.
\item \emph{cone} : a cone, user must then define its radius and height.
\item \emph{cylinder} : a cylinder, user must then define its radius and
height.
\item \emph{sphere} : a sphere with quad faces, user must then define the
subdivisions count and the radius.
\item \emph{icosahedron} : a sphere with triangular faces, user must then
define the subdivisions count and the radius.
\item \emph{torus} : a torus, user must then define the internal and external
subdivisions count and the internal and external radius.
\item \emph{plane} : a plane, user must then define the width and depth
subdivisions count and the width and depth.
\end{itemize}
\item 'submesh' : \emph{new section}
Defines a new section describing a submesh. Only if the mesh type
is custom.
\item 'import' : \emph{file} \emph{\textless{}options\textgreater{}}
Allows import of mesh data from a file, in CMSH file format or any
format supported by Castor3D import plug-ins. Only if the mesh type
is custom. This directive can accept few optional parameters :
\begin{itemize}
\item \emph{smooth\_normals} : Computes normals per vertex during import.
\item \emph{flat\_normals} : Computes normals per face during the import.
\item \emph{tangent\_space} : Computes tangent space informations (tangent
and bi-tangent) during import.
\item \emph{rescale}=\emph{real} : Rescales the resulting mesh by given
factor, on three axes.
\end{itemize}
\item 'morph\_import' : \emph{file} \emph{\textless{}options}\textgreater{}
Allows import of mesh data from a file, to add mophing animation.
This directive must happen after a first import directive. Only if
the mesh type is custom. This directive can accept few optional parameters
:
\begin{itemize}
\item \emph{smooth\_normals} : Computes normals per vertex during import.
\item \emph{flat\_normals} : Computes normals per face during the import.
\item \emph{tangent\_space} : Computes tangent space informations (tangent
and bi-tangent) during import.
\item \emph{rescale}=\emph{real} : Rescales the resulting mesh by given
factor, on three axes.
\end{itemize}
\item 'division' : \emph{name} \emph{int}
Allows the mesh subdivision, using a supported Castor3D divider plug-in
algorithm. The second parameter is the application count of the algorithm
(its applied recursively).
\end{enumerate}
\subsection{'submesh' section}
\begin{enumerate}
\item 'vertex' : \emph{3 reals}
Defines a vertex position.
\item 'uv' : \emph{2 reals}
Defines the UV texture coordinates for the previously declared vertex.
\item 'uvw' : \emph{3 reals}
Defines the UVW texture coordinates for the previously declared vertex.
\item 'normal' : \emph{3 reals}
Defines the normal coordinates for the previously declared vertex.
\item 'tangent' : \emph{3 reals}
Defines the tangent coordinates for the previously declared vertex.
\item 'face' : \emph{3 or 4 integers}
Defines a face using the three or four vertices whose indices are
given. If more than three indices are given, creates the appropriate
count of triangular faces.
\item 'face\_uv' : \emph{as much uv as the face indices}
Defines the UV coordinates for each vertex of the previously declared
face.
\item 'face\_uvw' : \emph{as much uvw as the face indices}
Defines the UVW coordinates for each vertex of the previously declared
face.
\item 'face\_normals' : \emph{as much 3 reals groups as the face indices}
Defines the normals coordinates for each vertex of the previously
declared face.
\item 'face\_tangents' : \emph{as much 3 reals groups as the face indices}
Defines the tangents coordinates for each vertex of the previously
declared face.
\end{enumerate}
\section{'panel\_overlay' section}
\begin{enumerate}
\item 'material' : \emph{name}
Defines the material used by the panel.
\item 'position' : \emph{2 reals}
Defines the overlay position, relative to its parent (or to screen,
if no parent).
\item 'size' : \emph{2 reals}
Defines the overlay size, relative to its parent (or to screen, if
no parent).
\item 'pxl\_position' : \emph{2 ints}
Defines the absolute position for the overlay, in pixels.
\item 'pxl\_size' : \emph{2 ints}
Defines the absolute size for the overlay, in pixels.
\item 'uv' : \emph{4 reals}
Defines the UV for the overlay (left, top, right, bottom).
\item 'panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a simple panel children overlay.
\item 'border\_panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a border panel children overlay.
\item 'text\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a text panel children overlay.
\end{enumerate}
\section{'border\_panel\_overlay' section}
\begin{enumerate}
\item 'material' : \emph{name}
Defines the material used by the panel.
\item 'position' : \emph{2 reals}
Defines the overlay position, relative to its parent (or to screen,
if no parent).
\item 'size' : \emph{2 reals}
Defines the overlay size, relative to its parent (or to screen, if
no parent).
\item 'pxl\_position' : \emph{2 ints}
Defines the absolute position for the overlay, in pixels.
\item 'pxl\_size' : \emph{2 ints}
Defines the absolute size for the overlay, in pixels.
\item 'center\_uv' : \emph{4 reals}
Defines the UV for the center of the overlay (left, top, right, bottom).
\item 'border\_material' : \emph{name}
Defines the material used for the panel's border.
\item 'border\_position' : \emph{value}
Defines the border's position, can be one of:
\begin{itemize}
\item \emph{internal} : The border is inside the overlay.
\item \emph{middle} : The border is half inside, half outside the overlay.
\item \emph{external} : The border is outside the overlay.
\end{itemize}
\item 'border\_size' : \emph{4 reals}
Defines the borders sizes (left right, top, bottom).
\item 'pxl\_border\_size' : \emph{2 ints}
Defines the absolute border size, in pixels.
\item 'border\_inner\_uv' : \emph{4 reals}
Defines the UV for the border of the overlay, inner side (left, top,
right, bottom).
\item 'border\_outer\_uv' : \emph{4 reals}
Defines the UV for the border of the overlay, outer side (left, top,
right, bottom).
\item 'panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a simple panel children overlay.
\item 'border\_panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a border panel children overlay.
\item 'text\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a text panel children overlay.
\end{enumerate}
\section{'text\_overlay' section}
\begin{enumerate}
\item 'material' : \emph{name}
Defines the material used by the panel.
\item 'position' : \emph{2 reals}
Defines the overlay position, relative to its parent (or to screen,
if no parent).
\item 'size' : \emph{2 reals}
Defines the overlay size, relative to its parent (or to screen, if
no parent).
\item 'pxl\_position' : \emph{2 ints}
Defines the absolute position for the overlay, in pixels.
\item 'pxl\_size' : \emph{2 ints}
Defines the absolute size for the overlay, in pixels.
\item 'font' : \emph{name}
Defines the font used to display the text.
\item 'text' : \emph{texte}
Defines the displayed text.
\item 'text\_wrapping' : \emph{value}
Defines the way the text is cut when a line overflows the overlay
dimensions. Can be one of:
\begin{itemize}
\item \emph{none} : The text is not cut (the part that overflows won't be
displayed, though).
\item \emph{break} : The text is cut at the letter (the words will be cut).
\item \emph{break\_words} : The text is cut at the word (the words remain
uncut).
\end{itemize}
\item 'vertical\_align' : \emph{value}
Defines the text vertical alignment:
\begin{itemize}
\item \emph{top} : Align on top.
\item \emph{center} : Vertically center.
\item \emph{bottom} : Align on bottom.
\end{itemize}
\item 'horizontal\_align' : \emph{value}
Defines the text horizontal alignment:
\begin{itemize}
\item \emph{left} : Align on left.
\item \emph{center} : Horizontally center.
\item \emph{right} : Align on right.
\end{itemize}
\item 'texturing\_mode' : \emph{value}
Defines the way the texture is applied:
\begin{itemize}
\item \emph{letter} : The texture is applied on each letter.
\item \emph{text} : The texture is applied on the whole text.
\end{itemize}
\item 'line\_spacing\_mode' : \emph{value}
Defines the height of the lines:
\begin{itemize}
\item \emph{own\_height} : Each line has its own height.
\item \emph{max\_lines\_height} : Each line has the height of the line with
the maximum height.
\item \emph{max\_font\_height} : Each line has the height of the character
with the maximum height in the font.
\end{itemize}
\item 'panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a simple panel children overlay.
\item 'border\_panel\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a border panel children overlay.
\item 'text\_overlay' : \emph{name} \emph{new section}
Defines a new section describing a text panel children overlay.
\end{enumerate}
\section{Section 'window'}
\begin{enumerate}
\item 'render\_target' : \emph{new section}
Defines a new section describing the render target.
\item 'vsync' : \emph{boolean}
Defines the activation or deactivation of vertical synchronisation.
\item 'fullscreen' : \emph{boolean}
Defines the activation or deactivation of full-screen display.
\end{enumerate}
\section{Section 'render\_target'}
\begin{enumerate}
\item 'scene' : \emph{nom}
Defines the scene rendered in this target.
\item 'camera' : \emph{nom}
Defines the camera used to render the scene.
\item 'size' : \emph{size}
Defines the internal buffer dimensions.
\item 'format' : \emph{value}
Defines the colour buffer pixel format. Can be one of :
\begin{itemize}
\item \emph{l8} : Luminance 8 bits, one 8 bits integral number.
\item \emph{l16f} : Luminance 16 bits, one 16 bits floating point number
(half float).
\item \emph{l32f} : Luminance 32 bits, one 32 bits floating point number(float).
\item \emph{al16} : Alpha + Luminance, two 8 bits integral number.
\item \emph{al16f} : Alpha + Luminance, two 16 bits floating point number
(half float).
\item \emph{al32f} : Alpha + Luminance, two 32 bits floating point number
(float).
\item \emph{argb1555} : ARGB 16 bits, 1 bit for alpha and each other component
on a 5 bits integer.
\item \emph{rgb565} : RGB 16 bits, R and B on a 5 bits integer, G on a 6
bits integer.
\item \emph{argb16} : ARGB 16 bits, each component on a 4 bits integer.
\item \emph{rgb24} : RGB 24 bits, each component on a 8 bits integer.
\item \emph{argb32} : ARGB 32 bits, each component on a 8 bits integer.
\item \emph{argb16f} : ARGB 64 bits, each component on a 16 bits floating
point number (half float).
\item \emph{rgb32f} : RGB 96 bits, each component on a 32 bits floating
point number (half float).
\item \emph{argb32f} : ARGB 128 bits, each component on a 32 bits floating
point number (half float).
\end{itemize}
\item 'depth' : \emph{value}
Defines the depth/stencil buffer pixel format. Can be one of :
\begin{itemize}
\item \emph{depth16} : Depth on a 16 bits integer.
\item \emph{depth24} : Depth on a 24 bits integer.
\item \emph{depth24s8} : Depth on a 24 bits integer, Stencil on a 8 bits
integer.
\item \emph{depth32fs8} : Depth on a 32 bits floating point, Stencil on
a 8 bits integer.
\item \emph{depth32} : Depth on a 32 bits integer.
\item \emph{depth32f} : Depth on a 32 bits floating point.
\item \emph{stencil1} : Stencil on 1 bit.
\item \emph{stencil8} : Stencil on a 8 bits integer.
\end{itemize}
\item 'postfx' : \emph{value}
Defines a post render effect to use. The parameters depend on the
chosen effect.
\item 'stereo' : \emph{boolean}
Tells if we use stereoscopic display mode.
\item 'tone\_mapping' : \emph{name}
Defines the tone mapping operator to use with the render target.
\item 'ssao' : \emph{new section}
Defines a new section describing the Screen Space Ambient Occlusion.
\end{enumerate}
\subsection{'ssao' section}
\begin{enumerate}
\item 'enabled' : \emph{boolean}
Defines the activation status of SSAO.
\item 'high\_quality' : \emph{boolean}
Defines the quality of the effect.
\item 'radius' : \emph{real}
Defines the radius of the effect (expressesd in meters).
\item 'bias' : \emph{real}
Defines the bias of the effect.
\item 'intensity' : \emph{real}
Defines the intensity of the effect.
\item 'num\_samples' : \emph{int}
Defines the number of samples per pixel.
\item 'edge\_sharpness' : \emph{real}
Defines the edge sharpness, in the blur pass.
\item 'blur\_step\_size' : \emph{int}
Defines the size of a step in the blur pass.
\item 'blur\_radius' : \emph{int}
Defines the blur radius.
\end{enumerate}
\end{document}
|
{"hexsha": "0271de3178182e296c60961de64141c3a4f0c483", "size": 42354, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/Castor3D/wiki_scene_files-en.tex", "max_stars_repo_name": "Mu-L/Castor3D", "max_stars_repo_head_hexsha": "7b9c6e7be6f7373ad60c0811d136c0004e50e76b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 245, "max_stars_repo_stars_event_min_datetime": "2015-10-29T14:31:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:04:45.000Z", "max_issues_repo_path": "doc/Castor3D/wiki_scene_files-en.tex", "max_issues_repo_name": "Mu-L/Castor3D", "max_issues_repo_head_hexsha": "7b9c6e7be6f7373ad60c0811d136c0004e50e76b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2016-03-11T19:45:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:58:33.000Z", "max_forks_repo_path": "doc/Castor3D/wiki_scene_files-en.tex", "max_forks_repo_name": "Mu-L/Castor3D", "max_forks_repo_head_hexsha": "7b9c6e7be6f7373ad60c0811d136c0004e50e76b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-05-24T09:07:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T21:05:20.000Z", "avg_line_length": 32.58, "max_line_length": 83, "alphanum_fraction": 0.7353496718, "num_tokens": 12583}
|
Require Export Coq.NArith.NArith.
Require Export Bedrock.Memory Bedrock.Word.
Require Export
Fiat.Narcissus.Automation.SolverOpt
Fiat.Narcissus.BinLib.Bool
Fiat.Narcissus.BinLib.Core
Fiat.Narcissus.BinLib.Enum
Fiat.Narcissus.BinLib.FixInt
Fiat.Narcissus.Common.Compose
Fiat.Narcissus.Common.ComposeCheckSum
Fiat.Narcissus.Common.ComposeIf
Fiat.Narcissus.Common.Specs
Fiat.Narcissus.Common.WordFacts
Fiat.Narcissus.Lib.FixList
Fiat.Narcissus.Lib.IList
Fiat.Narcissus.Lib2.Bool
Fiat.Narcissus.Lib2.EnumOpt
Fiat.Narcissus.Lib2.FixListOpt
Fiat.Narcissus.Lib2.NatOpt
Fiat.Narcissus.Lib2.NoCache
Fiat.Narcissus.Lib2.SumTypeOpt
Fiat.Narcissus.Lib2.Vector
Fiat.Narcissus.Lib2.WordOpt
Fiat.Narcissus.Lib2.IPChecksum.
Unset Implicit Arguments.
Open Scope nat_scope.
Notation BoundedList A size := { ls: list A | List.length ls < size }.
Notation BoundedNat size := { n: nat | (n < pow2 size)%nat }.
Notation BoundedN size := { n: N | (n < FixInt.exp2 size)%N }.
Definition BoundedListLength {A size} (ls : BoundedList A (pow2 size)) : BoundedNat size :=
exist _ (length (` ls)) (proj2_sig ls).
Section Nat.
Context {B : Type}.
Context {cache : Cache}.
Context {cacheAddNat : CacheAdd cache nat}.
Context {transformer : Transformer B}.
Context {transformerUnit : TransformerUnitOpt transformer bool}.
(* TODO move *)
Definition EncodeBoundedNat {k} (n : BoundedNat k) (ce : CacheEncode) : B * CacheEncode :=
(* NToWord + N.of_nat needed for performance (otherwise [apply] doesn't terminate) *)
encode_word_Impl (@NToWord k (N.of_nat (`n))) ce.
End Nat.
(* TODO move *)
Definition BtoW (b: B) : W :=
(zext b 24).
|
{"author": "mit-plv", "repo": "fiat", "sha": "4c78284c3a88db32051bdba79202f40c645ffb7f", "save_path": "github-repos/coq/mit-plv-fiat", "path": "github-repos/coq/mit-plv-fiat/fiat-4c78284c3a88db32051bdba79202f40c645ffb7f/src/CertifiedExtraction/Extraction/BinEncoders/Basics.v"}
|
"""
This example of the double integrator demonstrates how to pass constraints to PyTrajectory.
"""
# imports
from pytrajectory import TransitionProblem
import numpy as np
def f(xx, uu, uuref, t, pp):
""" Right hand side of the vectorfield defining the system dynamics
:param xx: state
:param uu: input
:param uuref: reference input (not used)
:param t: time (not used)
:param pp: additionial free parameters (not used)
:return: xdot
"""
x1, x2 = xx
u1, = uu
ff = [x2,
u1]
return ff
# system state boundary values for a = 0.0 [s] and b = 2.0 [s]
xa = [0.0, 0.0]
xb = [1.0, 0.0]
# constraints dictionary
con = {'x2': [-0.1, 0.65]}
# create the trajectory object
S = TransitionProblem(f, a=0.0, b=2.0, xa=xa, xb=xb, constraints=con, use_chains=False)
# start
x, u = S.solve()
# the following code provides an animation of the system above
# for a more detailed explanation have a look at the 'Visualisation' section in the documentation
import sys
import matplotlib as mpl
from pytrajectory.visualisation import Animation
def draw(xt, image):
x = xt[0]
car_width = 0.05
car_heigth = 0.02
x_car = x
y_car = 0
car = mpl.patches.Rectangle((x_car-0.5*car_width, y_car-car_heigth), car_width, car_heigth,
fill=True, facecolor='grey', linewidth=2.0)
image.patches.append(car)
return image
if not 'no-pickle' in sys.argv:
# here we save the simulation results so we don't have to run
# the iteration again in case the following fails
S.save(fname='ex6_ConstrainedDoubleIntegrator.pcl')
if 'plot' in sys.argv or 'animate' in sys.argv:
A = Animation(drawfnc=draw, simdata=S.sim_data,
plotsys=[(0,'x'), (1,'dx')],
plotinputs=[(0,'u')])
xmin = np.min(S.sim_data[1][:,0])
xmax = np.max(S.sim_data[1][:,0])
A.set_limits(xlim=(xmin - 0.1, xmax + 0.1), ylim=(-0.1,0.1))
if 'plot' in sys.argv:
A.show(t=S.b)
if 'animate' in sys.argv:
A.animate()
A.save('ex6_ConstrainedDoubleIntegrator.gif')
|
{"hexsha": "f1f06b246afbf25ebd7c21903de9f68209aef724", "size": 2170, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ex6_ConstrainedDoubleIntegrator.py", "max_stars_repo_name": "TUD-RST/pytrajectory", "max_stars_repo_head_hexsha": "fa3c7e89450748d1d75800f89a831e608cec1d8f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2015-01-15T20:01:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-08T02:25:04.000Z", "max_issues_repo_path": "examples/ex6_ConstrainedDoubleIntegrator.py", "max_issues_repo_name": "TUD-RST/pytrajectory", "max_issues_repo_head_hexsha": "fa3c7e89450748d1d75800f89a831e608cec1d8f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-10-31T18:09:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-03T16:18:12.000Z", "max_forks_repo_path": "examples/ex6_ConstrainedDoubleIntegrator.py", "max_forks_repo_name": "TUD-RST/pytrajectory", "max_forks_repo_head_hexsha": "fa3c7e89450748d1d75800f89a831e608cec1d8f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-10-27T06:52:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-25T09:26:13.000Z", "avg_line_length": 26.1445783133, "max_line_length": 97, "alphanum_fraction": 0.6179723502, "include": true, "reason": "import numpy", "num_tokens": 641}
|
for (study in c("SDY212", "SDY400", "SDY404")) {
fn.ge = file.path(PROJECT_DIR, "generated_data", "HIPC",
paste0(study, "_GE_matrix_gene.txt"))
dat = fread(fn.ge, data.table = F)
fn.si = file.path(PROJECT_DIR, "generated_data", "HIPC",
paste0(study, "_sample_info.txt"))
info = fread(fn.si)
si.d0 = info$time == "d0"
dat.d0 = dat[,c(1,which(si.d0)+1)]
info.d0 = info[si.d0,]
fn.ge.d0 = sub(".txt", "_day0.txt", fn.ge)
fn.si.d0 = sub(".txt", "_day0.txt", fn.si)
fwrite(dat.d0, fn.ge.d0, sep="\t", quote=T)
fwrite(info.d0, fn.si.d0, sep="\t", quote=T)
si.hl = info.d0$Response %in% c("low","high")
dat.hl = dat.d0[,c(1,which(si.hl)+1)]
info.hl = info.d0[si.hl,]
fn.ge.hl = sub(".txt", "_ResponseLoHi.txt", fn.ge.d0)
fn.si.hl = sub(".txt", "_ResponseLoHi.txt", fn.si.d0)
fwrite(dat.hl, fn.ge.hl, sep="\t", quote=T)
fwrite(info.hl, fn.si.hl, sep="\t", quote=T)
}
|
{"hexsha": "116d686e353ec87d92a9b9ded5f5b0aabdef1361", "size": 963, "ext": "r", "lang": "R", "max_stars_repo_path": "R/hipc_dataprep/hipc_sample_filtering.r", "max_stars_repo_name": "niaid/wl-test", "max_stars_repo_head_hexsha": "9ac8aa781ed73b509e1410f147f6799e9a77da86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-04-10T05:08:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T18:41:28.000Z", "max_issues_repo_path": "R/hipc_dataprep/hipc_sample_filtering.r", "max_issues_repo_name": "niaid/wl-test", "max_issues_repo_head_hexsha": "9ac8aa781ed73b509e1410f147f6799e9a77da86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-01T13:24:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-06T17:39:19.000Z", "max_forks_repo_path": "R/hipc_dataprep/hipc_sample_filtering.r", "max_forks_repo_name": "niaid/wl-test", "max_forks_repo_head_hexsha": "9ac8aa781ed73b509e1410f147f6799e9a77da86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-02-25T18:33:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T02:45:05.000Z", "avg_line_length": 30.09375, "max_line_length": 59, "alphanum_fraction": 0.5617860852, "num_tokens": 344}
|
"""Training routine for models."""
from os.path import join
import json
from itertools import chain
import numpy as np
import tensorflow as tf
from typing import Callable
from tensorflow.keras.callbacks import (
CSVLogger, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard)
from .callbacks import (
EvaluationCallback, HParamsCallback, TimeHistory, WarmUpScheduler)
from .normalization import Normalizer
from .training_utils import (
build_training_iterator,
build_validation_iterator,
build_test_iterator,
build_hyperparameter_metrics,
init_hyperparam_space,
make_one_shot_iterator
)
from .logging_utils import NumpyEncoder
class TrainingLoop(Callable):
def __init__(self, model, dataset, task, epochs, hparams, early_stopping,
rundir, balance_dataset=True, additional_callbacks=None,
debug=False):
self.model = model
self.dataset = dataset
self.task = task
self.normalizer = Normalizer(dataset)
self.n_epochs = epochs
self.early_stopping = early_stopping
self.hparams = hparams
self.rundir = rundir
self.balance_dataset = balance_dataset
self.debug = debug
self.callbacks = self._build_callbacks(additional_callbacks)
def _build_callbacks(self, additional_callbacks):
callbacks = []
# Time per epoch callback
time_cb = TimeHistory()
callbacks.append(time_cb)
# Evaluation callback
# Repeat epochs + 1 times as we run an additional validation step at
# the end of training afer recovering the model.
val_iterator_cb, _ = build_validation_iterator(
self.dataset,
self.hparams['batch_size'],
self._normalize_and_preprocess()
)
val_cb = EvaluationCallback(
val_iterator_cb,
'val',
metrics=self.task.metrics
)
callbacks.append(val_cb)
# Early stopping callback
early_stopping_cb = EarlyStopping(
'val_' + self.task.monitor_quantity,
mode=self.task.direction_of_improvement,
patience=self.early_stopping,
min_delta=0.0001
)
callbacks.append(early_stopping_cb)
# LR scheduling
reduce_on_plateau_cb = ReduceLROnPlateau(
monitor='val_' + self.task.monitor_quantity,
factor=0.5,
patience=self.early_stopping // 2,
verbose=1,
mode=self.task.direction_of_improvement,
min_delta=0.0001,
cooldown=self.early_stopping // 2,
min_lr=1e-5
)
callbacks.append(reduce_on_plateau_cb)
callbacks.append(WarmUpScheduler(
self.hparams['learning_rate'],
warmup_steps=self.hparams['warmup_steps'],
verbose=1
))
# Logging callbacks
metrics = build_hyperparameter_metrics(self.task.metrics)
if self.rundir:
with open(join(self.rundir, 'model.json'), 'w') as f:
json.dump(self.model.get_config(), f)
init_hyperparam_space(
join(self.rundir, 'tb'),
self.hparams.get_hyperparameter_mapping().keys(),
metrics
)
callbacks.append(CSVLogger(join(self.rundir, 'metrics.csv')))
callbacks.append(ModelCheckpoint(
join(self.rundir, 'model_weights.hdf5'),
save_best_only=True,
save_weights_only=True,
monitor='val_' + self.task.monitor_quantity,
mode=self.task.direction_of_improvement
))
callbacks.append(
TensorBoard(
join(self.rundir, 'tb'),
update_freq=5,
histogram_freq=5
))
callbacks.append(
HParamsCallback(
join(self.rundir, 'tb'),
dict(chain(
{
'dataset': self.dataset,
'model': self.model.__class__.__name__
}.items(),
self.hparams.get_hyperparameter_mapping().items()
))
)
)
if additional_callbacks is not None:
callbacks.expand(additional_callbacks)
return callbacks
def _normalize_and_preprocess(self, with_weights=False):
"""Normalize input data and apply model specific preprocessing fn."""
if self.model.data_preprocessing_fn() is None:
return self.normalizer.get_normalization_fn()
def combined_fn(ts, labels):
normalized_ts, labels = \
self.normalizer.get_normalization_fn()(ts, labels)
preprocessed_ts, labels = \
self.model.data_preprocessing_fn()(normalized_ts, labels)
# No class weights
if self.task.class_weights is None or with_weights is False:
return preprocessed_ts, labels
# Evtl. use class weights (until now only relevant for
# physionet2019).
class_weights = self.task.class_weights
weights = \
tf.constant([class_weights[i] for i in range(len(class_weights))])
sample_weights = tf.gather(weights, tf.reshape(labels, (-1, )), axis=0)
sample_weights = tf.reshape(sample_weights, tf.shape(labels)[:-1])
return preprocessed_ts, labels, sample_weights
return combined_fn
def _prepare_dataset_for_training(self):
if self.balance_dataset:
class_balance = [
self.normalizer._class_balance[str(i)] for i in range(2)]
else:
class_balance = None
train_iterator, train_steps = build_training_iterator(
self.dataset,
self.n_epochs,
self.hparams['batch_size'],
self._normalize_and_preprocess(with_weights=True),
balance=self.balance_dataset,
class_balance=class_balance
)
# Repeat epochs + 1 times as we run an additional validation step at
# the end of training afer recovering the model.
val_iterator, val_steps = build_validation_iterator(
self.dataset,
self.hparams['batch_size'],
self._normalize_and_preprocess()
)
return train_iterator, train_steps, val_iterator, val_steps
def _get_train_metrics(self, history):
train_metrics = {
f'train_final_{metric}': history.history[metric][-1]
for metric in history.history.keys()
if not metric.startswith('val') and metric != 'lr'
}
if self.task.direction_of_improvement == 'min':
best_index = np.argmin(history.history['val_' + self.task.monitor_quantity])
elif self.task.direction_of_improvement == 'max':
best_index = np.argmax(history.history['val_' + self.task.monitor_quantity])
else:
raise ValueError()
train_metrics.update({
f'train_restored_{metric}': history.history[metric][best_index]
for metric in history.history.keys()
if not metric.startswith('val') and metric != 'lr'
})
return train_metrics
def _restore_and_evaluate_model(self, val_iter):
if self.rundir:
print(f'Loading model with {self.task.direction_of_improvement} '
f'validation {self.task.monitor_quantity}.')
self.model.load_weights(join(self.rundir, 'model_weights.hdf5'))
else:
print(
'Unable to load best model. No rundir provided.'
)
# Evaluate model on validation
validation_metrics = {}
self.callbacks[1].on_epoch_end(-1, validation_metrics)
val_eval = self.model.evaluate(val_iter)
val_eval = {
f'val_{name}': value
for name, value in zip(self.model.metrics_names, val_eval)
}
validation_metrics.update(val_eval)
# Evaluate model on testing
# Load data and build fake callback
test_iterator, _ = build_test_iterator(
self.dataset,
self.hparams['batch_size'],
self._normalize_and_preprocess()
)
test_cb = EvaluationCallback(
test_iterator,
'test',
metrics=self.task.metrics,
print_evaluations=False
)
test_cb.model = self.model
test_metrics = {}
test_cb.on_epoch_end(-1, test_metrics)
test_eval = self.model.evaluate(test_iterator)
test_eval = {
f'test_{name}': value
for name, value in zip(self.model.metrics_names, test_eval)
}
test_metrics.update(test_eval)
return validation_metrics, test_metrics
def _add_metrics_to_tensorboard(self, train_metrics, val_metrics,
test_metrics):
if self.rundir:
tf_dir = join(self.rundir, 'tb')
sess = tf.compat.v1.keras.backend.get_session()
with tf.compat.v2.summary.create_file_writer(tf_dir).as_default() as w:
sess.run(w.init())
for key, value in train_metrics.items():
sess.run(
tf.compat.v2.summary.scalar(key, data=value, step=0))
sess.run(w.flush())
for key, value in val_metrics.items():
sess.run(
tf.compat.v2.summary.scalar(
'best_' + key, data=value, step=0))
sess.run(w.flush())
for key, value in test_metrics.items():
sess.run(
tf.compat.v2.summary.scalar(key, data=value, step=0))
sess.run(w.flush())
def _build_results_summary(self, history, train_metrics, val_metrics,
test_metrics):
result = {}
result['history'] = history.history
result['mean_epoch_time'] = self.callbacks[0].get_average_epoch_time()
result.update(train_metrics)
result.update(val_metrics)
result.update(test_metrics)
result['hyperparameters'] = {
h.name: value
for h, value in self.hparams.get_hyperparameter_mapping().items()
}
result['max_epochs'] = self.n_epochs
result['early_stopping'] = self.early_stopping
return result
def _save_result(self, result):
print(result)
if self.rundir:
with open(join(self.rundir, 'results.json'), 'w') as f:
json.dump(result, f, cls=NumpyEncoder, indent=2)
def __call__(self):
if self.debug:
from tensorflow.python import debug as tf_debug
sess = tf.keras.backend.get_session()
sess = tf_debug.LocalCLIDebugWrapperSession(
sess, ui_type="readline")
tf.keras.backend.set_session(sess)
train_iter, steps_per_epoch, val_iter, val_steps = \
self._prepare_dataset_for_training()
optim = tf.keras.optimizers.Adam(
learning_rate=self.hparams['learning_rate'])
self.model.compile(
optimizer=optim,
loss=self.task.loss,
metrics=['accuracy'],
# TODO: Continue here
sample_weight_mode=(
None if self.task.class_weights is None else "temporal")
)
history = self.model.fit(
train_iter,
epochs=self.n_epochs,
callbacks=self.callbacks,
steps_per_epoch=steps_per_epoch,
# Pass a iterator over dataset with repeat, otherwise the cache is
# reset after each epoch. This has the disadvantage that we need to
# also pass validation_steps.
validation_data=val_iter,
validation_steps=val_steps,
verbose=1
)
# Eval and summary
train_metrics = self._get_train_metrics(history)
val_metrics, test_metrics = \
self._restore_and_evaluate_model(val_iter)
self._add_metrics_to_tensorboard(
train_metrics, val_metrics, test_metrics)
result = self._build_results_summary(
history, train_metrics, val_metrics, test_metrics)
self._save_result(result)
print('Successfully completed.')
|
{"hexsha": "b9fc820460bcc13fd667795f3cb43cb1bf9154ab", "size": 12606, "ext": "py", "lang": "Python", "max_stars_repo_path": "seft/training_routine.py", "max_stars_repo_name": "daniel-trejobanos/seft-hypoxia", "max_stars_repo_head_hexsha": "77f46086f44fde696bb18885549d559056a49714", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2020-07-14T08:20:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T06:17:51.000Z", "max_issues_repo_path": "seft/training_routine.py", "max_issues_repo_name": "daniel-trejobanos/seft-hypoxia", "max_issues_repo_head_hexsha": "77f46086f44fde696bb18885549d559056a49714", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-08-26T00:03:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-10T09:46:28.000Z", "max_forks_repo_path": "seft/training_routine.py", "max_forks_repo_name": "daniel-trejobanos/seft-hypoxia", "max_forks_repo_head_hexsha": "77f46086f44fde696bb18885549d559056a49714", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2020-07-14T18:41:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T15:07:49.000Z", "avg_line_length": 37.2958579882, "max_line_length": 88, "alphanum_fraction": 0.5905917817, "include": true, "reason": "import numpy", "num_tokens": 2483}
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2011 Bryce Lelbach
// Copyright (c) 2007-2013 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
////////////////////////////////////////////////////////////////////////////////
#if !defined(HPX_0C9D09E0_725D_4FA6_A879_8226DE97C6B9)
#define HPX_0C9D09E0_725D_4FA6_A879_8226DE97C6B9
#include <hpx/config.hpp>
#include <hpx/compat/condition_variable.hpp>
#include <hpx/compat/mutex.hpp>
#include <hpx/lcos/local/spinlock.hpp>
#include <hpx/runtime.hpp>
#include <hpx/runtime/naming/address.hpp>
#include <hpx/runtime/parcelset/parcelhandler.hpp>
#include <hpx/runtime/parcelset/parcelport.hpp>
#include <hpx/runtime/parcelset/put_parcel.hpp>
#include <hpx/runtime/parcelset/detail/parcel_await.hpp>
#include <hpx/util/connection_cache.hpp>
#include <hpx/util/io_service_pool.hpp>
#include <hpx/util_fwd.hpp>
#include <boost/lockfree/queue.hpp>
#include <cstddef>
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <hpx/config/warnings_prefix.hpp>
namespace hpx { namespace agas
{
struct notification_header;
struct HPX_EXPORT big_boot_barrier
{
public:
HPX_NON_COPYABLE(big_boot_barrier);
private:
parcelset::parcelport* pp;
parcelset::endpoints_type const& endpoints;
service_mode const service_type;
parcelset::locality const bootstrap_agas;
compat::condition_variable cond;
compat::mutex mtx;
std::size_t connected;
boost::lockfree::queue<util::unique_function_nonser<void()>* > thunks;
std::vector<parcelset::endpoints_type> localities;
void spin();
void notify();
public:
struct scoped_lock
{
private:
big_boot_barrier& bbb;
public:
scoped_lock(
big_boot_barrier& bbb_
)
: bbb(bbb_)
{
bbb.mtx.lock();
}
~scoped_lock()
{
bbb.notify();
}
};
big_boot_barrier(
parcelset::parcelport* pp_
, parcelset::endpoints_type const& endpoints_
, util::runtime_configuration const& ini_
);
~big_boot_barrier()
{
util::unique_function_nonser<void()>* f;
while (thunks.pop(f))
delete f;
}
parcelset::locality here() { return bootstrap_agas; }
parcelset::endpoints_type const &get_endpoints() { return endpoints; }
template <typename Action, typename... Args>
void apply(
std::uint32_t source_locality_id
, std::uint32_t target_locality_id
, parcelset::locality dest
, Action act
, Args &&... args)
{ // {{{
HPX_ASSERT(pp);
naming::address addr(naming::get_gid_from_locality_id(target_locality_id));
parcelset::parcel p(
parcelset::detail::create_parcel::call(std::false_type(),
naming::get_gid_from_locality_id(target_locality_id),
std::move(addr), act, std::forward<Args>(args)...));
#if defined(HPX_HAVE_PARCEL_PROFILING)
if (!p.parcel_id())
{
p.parcel_id() = parcelset::parcel::generate_unique_id(source_locality_id);
}
#endif
parcelset::detail::parcel_await(std::move(p), parcelset::write_handler_type(), 0,
[this, dest](parcelset::parcel&& p, parcelset::write_handler_type&&)
{
pp->send_early_parcel(dest, std::move(p));
}).apply();
} // }}}
template <typename Action, typename... Args>
void apply_late(
std::uint32_t source_locality_id
, std::uint32_t target_locality_id
, parcelset::locality const & dest
, Action act
, Args &&... args)
{ // {{{
naming::address addr(naming::get_gid_from_locality_id(target_locality_id));
parcelset::put_parcel(
naming::id_type(
naming::get_gid_from_locality_id(target_locality_id),
naming::id_type::unmanaged),
std::move(addr), act, std::forward<Args>(args)...);
} // }}}
void apply_notification(
std::uint32_t source_locality_id
, std::uint32_t target_locality_id
, parcelset::locality const& dest
, notification_header&& hdr);
void wait_bootstrap();
void wait_hosted(std::string const& locality_name,
naming::address::address_type primary_ns_ptr,
naming::address::address_type symbol_ns_ptr);
// no-op on non-bootstrap localities
void trigger();
void add_thunk(util::unique_function_nonser<void()>* f)
{
std::size_t k = 0;
while(!thunks.push(f))
{
// Wait until succesfully pushed ...
hpx::lcos::local::spinlock::yield(k);
++k;
}
}
void add_locality_endpoints(std::uint32_t locality_id,
parcelset::endpoints_type const& endpoints);
};
HPX_EXPORT void create_big_boot_barrier(
parcelset::parcelport* pp_
, parcelset::endpoints_type const& endpoints_
, util::runtime_configuration const& ini_
);
HPX_EXPORT void destroy_big_boot_barrier();
HPX_EXPORT big_boot_barrier& get_big_boot_barrier();
}}
#include <hpx/config/warnings_suffix.hpp>
#endif // HPX_0C9D09E0_725D_4FA6_A879_8226DE97C6B9
|
{"hexsha": "f89071bad7a47dee46c5d9602bb9c704b844e060", "size": 5389, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "hpx/runtime/agas/big_boot_barrier.hpp", "max_stars_repo_name": "atrantan/hpx", "max_stars_repo_head_hexsha": "6c214b2f3e3fc58648513c9f1cfef37fde59333c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-09-26T09:10:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-26T09:10:13.000Z", "max_issues_repo_path": "hpx/runtime/agas/big_boot_barrier.hpp", "max_issues_repo_name": "atrantan/hpx", "max_issues_repo_head_hexsha": "6c214b2f3e3fc58648513c9f1cfef37fde59333c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hpx/runtime/agas/big_boot_barrier.hpp", "max_forks_repo_name": "atrantan/hpx", "max_forks_repo_head_hexsha": "6c214b2f3e3fc58648513c9f1cfef37fde59333c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0677083333, "max_line_length": 89, "alphanum_fraction": 0.6366672852, "num_tokens": 1341}
|
import time
import subprocess
from io import BytesIO
import numpy as np
from PIL import Image
def cmd(command):
subp = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")
subp.wait(100)
if subp.poll() == 0:
print(subp.communicate()[0])
else:
print("..")
def read_imagefile(file) -> Image.Image:
image = Image.open(BytesIO(file))
return image
def predict(mycmd):
result = cmd(mycmd)
return result
|
{"hexsha": "2bbd45af3aa0a354e2b631b660ba0e4fa0ffdce8", "size": 503, "ext": "py", "lang": "Python", "max_stars_repo_path": "serve_model_yolov5.py", "max_stars_repo_name": "4nuragk/Yolov5-Facemask", "max_stars_repo_head_hexsha": "dce9c34d5edd092e8082fd00cf9d97748387a602", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-01-20T13:31:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T07:00:50.000Z", "max_issues_repo_path": "serve_model_yolov5.py", "max_issues_repo_name": "4nuragk/Yolov5-Facemask", "max_issues_repo_head_hexsha": "dce9c34d5edd092e8082fd00cf9d97748387a602", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "serve_model_yolov5.py", "max_forks_repo_name": "4nuragk/Yolov5-Facemask", "max_forks_repo_head_hexsha": "dce9c34d5edd092e8082fd00cf9d97748387a602", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-13T06:20:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T06:20:05.000Z", "avg_line_length": 18.6296296296, "max_line_length": 110, "alphanum_fraction": 0.6719681909, "include": true, "reason": "import numpy", "num_tokens": 126}
|
''' new Network definitions using the functional API from keras.
first part: model settings like input variables, outputs and transformations
second part: model definition, name must be def model(input_shape):
'''
import numpy as np
import keras
import keras.layers
from keras import backend as K
from keras import regularizers
from keras.utils import to_categorical
import sys
from collections import OrderedDict
import os
sys.path.append(os.path.abspath(".."))
sys.path.append(os.path.join(os.path.abspath(".."),'lib'))
import transformations as tr
import numpy as np
import block_units as bunit
import keras.layers
# *Settings*
# define inputs for each branch
def mask_definition(reco_vals):
mask = (reco_vals["mu_e_on_entry"] < 50)
return mask
inputs = OrderedDict()
inputs["Branch_IC_time"] = {"variables": ["IC_charge",'IC_time_first', 'IC_charge_10ns',
'IC_charge_50ns', 'IC_charge_100ns',
'IC_time_spread', 'IC_time_std', 'IC_time_weighted_median',
'IC_pulse_0_01_pct_charge_quantile',
'IC_pulse_0_03_pct_charge_quantile',
'IC_pulse_0_05_pct_charge_quantile',
'IC_pulse_0_11_pct_charge_quantile',
'IC_pulse_0_15_pct_charge_quantile',
'IC_pulse_0_2_pct_charge_quantile',
'IC_pulse_0_5_pct_charge_quantile',
'IC_pulse_0_8_pct_charge_quantile'],
"transformations": [tr.IC_divide_100, tr.IC_divide_10000, tr.IC_divide_100, tr.IC_divide_100,
tr.IC_divide_100, tr.IC_divide_10000,
tr.IC_divide_10000, tr.IC_divide_10000, tr.IC_divide_10000,tr.IC_divide_10000,
tr.IC_divide_10000, tr.IC_divide_10000, tr.IC_divide_10000, tr.IC_divide_10000,
tr.IC_divide_10000, tr.IC_divide_10000]}
# define outputs for each branch
outputs = OrderedDict()
outputs["Out1"] = {"variables": ["e_on_entry"],
"transformations": [tr.log10]}
loss_weights = {'Target1': 1.}
loss_functions = ["mse"]
metrics = ['mae']
# Step 4: Define the model using Keras functional API
output_names = {0: 'mu_E_on_entry'}
def inception_block4(input_tensor, n, t0=2, t1=4, t2=5, n_pool=3, scale=0.1):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
tower_0 = bunit.conv3d_bn(input_tensor, n, (t0,1,1), padding='same')
tower_0 = bunit.conv3d_bn(tower_0, n, (1,t0,1), padding='same')
tower_0 = bunit.conv3d_bn(tower_0, n, (1,1,t0), padding='same')
tower_1 = bunit.conv3d_bn(input_tensor, n, (t1,1,1), padding='same')
tower_1 = bunit.conv3d_bn(tower_1, n, (1,t1,1), padding='same')
tower_1 = bunit.conv3d_bn(tower_1, n, (1,1,t1), padding='same')
tower_4 = bunit.conv3d_bn(input_tensor, n, (1,1,t2), padding='same')
tower_3 = keras.layers.MaxPooling3D((n_pool, n_pool, n_pool),
strides=(1,1,1), padding='same')(input_tensor)
tower_3 = bunit.conv3d_bn(tower_3, n, (1,1,1), padding='same')
up = keras.layers.concatenate(
[tower_0, tower_1, tower_3, tower_4], axis = channel_axis)
return up
def inception_resnet(input_tensor, n, t1=2, t2=3, n_pool=3, scale=0.1):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
tower_1 = bunit.conv3d_bn(input_tensor, n, (1,1,1), padding='same')
tower_1 = bunit.conv3d_bn(tower_1, n, (t1,1,1), padding='same')
tower_1 = bunit.conv3d_bn(tower_1, n, (1,t1,1), padding='same')
tower_1 = bunit.conv3d_bn(tower_1, n, (1,1,t1), padding='same')
tower_2 = bunit.conv3d_bn(input_tensor, n, (1,1,1), padding='same')
tower_2 = bunit.conv3d_bn(tower_2, n, (t2,1,1), padding='same')
tower_2 = bunit.conv3d_bn(tower_2, n, (1,t2,1), padding='same')
tower_2 = bunit.conv3d_bn(tower_2, n, (1,1,t2), padding='same')
tower_3 = keras.layers.MaxPooling3D((n_pool, n_pool, n_pool),
strides=(1,1,1), padding='same')(input_tensor)
tower_3 = bunit.conv3d_bn(tower_3, n, (1,1,1), padding='same')
up = keras.layers.concatenate(
[tower_1, tower_2, tower_3], axis = channel_axis)
x = keras.layers.Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(input_tensor)[1:],
arguments={'scale': scale},)([input_tensor, up])
return x
def model(input_shapes, output_shapes):
### Most important: Define your model using the functional API of Keras
# https://keras.io/getting-started/functional-api-guide/
# The Input
input_b1 = keras.layers.Input(
shape=input_shapes['Branch_IC_time']['general'],
name = "Input-Branch1")
# Hidden Layers
z1 = inception_block4(input_b1, 24, t0=2, t1=5, t2=7)
z1 = inception_block4(input_b1, 24, t0=3, t1=7, t2=10)
z1 = inception_block4(z1, 24, t0=2, t1=3, t2=7)
z1 = inception_block4(z1, 24, t0=2, t1=4, t2=8)
z1 = inception_block4(z1, 24, t0=3, t1=5, t2=9)
z1 = inception_block4(z1, 24, t0=3, t1=8, t2=9)
z1 = keras.layers.AveragePooling3D(pool_size=(2, 2, 3))(z1)
z1 = keras.layers.BatchNormalization()(z1)
for i in range(8):
z1 = inception_resnet(z1, 32, t2=3)
z1 = inception_resnet(z1, 32, t2=4)
z1 = inception_resnet(z1, 32, t2=5)
z1 = keras.layers.AveragePooling3D(pool_size=(1, 1, 2))(z1)
z1 = keras.layers.BatchNormalization()(z1)
for i in range(8):
z1 = inception_resnet(z1, 32, t2=3)
z1 = inception_resnet(z1, 32, t2=4)
z1 = inception_resnet(z1, 32, t2=5)
z1 = keras.layers.Conv3D(1024, (1, 1, 1), activation='relu',
padding="same", name='conv1x1x1')(z1)
z1 = keras.layers.Conv3D(4, (5, 5, 10), activation='relu',
padding="valid", name='conv5x5x10')(z1)
z1 = keras.layers.Conv3D(64, (1, 1, 1), activation='relu',
padding="valid", name='conv_final')(z1)
z1 = keras.layers.Flatten(data_format=K.image_data_format())(z1)
print('out shape {}'.format(output_shapes["Out1"]["general"][0]))
output_b1 = keras.layers.Dense(output_shapes["Out1"]["general"][0],
activation="linear",
name="Target1")(z1)
# The Output
model= keras.models.Model(inputs=[input_b1],
outputs=[output_b1])
return model
|
{"hexsha": "fe23a0440fa60b191aca23030628611228f6a020", "size": 6764, "ext": "py", "lang": "Python", "max_stars_repo_path": "i3deepice/models/mu_energy_reco_full_range/model.py", "max_stars_repo_name": "tglauch/DeepIceLearning_Module", "max_stars_repo_head_hexsha": "8c05929ec97226f07ab9e13a1dfc539d0e47a2b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-10T22:02:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-17T08:39:35.000Z", "max_issues_repo_path": "i3deepice/models/mu_energy_reco_full_range/model.py", "max_issues_repo_name": "tglauch/DeepIceLearning_Module", "max_issues_repo_head_hexsha": "8c05929ec97226f07ab9e13a1dfc539d0e47a2b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "i3deepice/models/mu_energy_reco_full_range/model.py", "max_forks_repo_name": "tglauch/DeepIceLearning_Module", "max_forks_repo_head_hexsha": "8c05929ec97226f07ab9e13a1dfc539d0e47a2b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-11T15:49:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-11T15:49:46.000Z", "avg_line_length": 43.358974359, "max_line_length": 120, "alphanum_fraction": 0.6074807806, "include": true, "reason": "import numpy", "num_tokens": 2004}
|
# Copyright (C) 2020 Argonne National Laboratory
# Written by Alinson Santos Xavier <axavier@anl.gov>
using RELOG, Cbc, JuMP, Printf, JSON, MathOptInterface.FileFormats
@testset "build" begin
basedir = dirname(@__FILE__)
instance = RELOG.parsefile("$basedir/../../instances/s1.json")
graph = RELOG.build_graph(instance)
model = RELOG.build_model(instance, graph, Cbc.Optimizer)
set_optimizer_attribute(model, "logLevel", 0)
process_node_by_location_name =
Dict(n.location.location_name => n for n in graph.process_nodes)
shipping_node_by_loc_and_prod_names = Dict(
(n.location.location_name, n.product.name) => n for n in graph.plant_shipping_nodes
)
@test length(model[:flow]) == 76
@test length(model[:dispose]) == 16
@test length(model[:open_plant]) == 12
@test length(model[:capacity]) == 12
@test length(model[:expansion]) == 12
l1 = process_node_by_location_name["L1"]
v = model[:capacity][l1, 1]
@test lower_bound(v) == 0.0
@test upper_bound(v) == 1000.0
v = model[:expansion][l1, 1]
@test lower_bound(v) == 0.0
@test upper_bound(v) == 750.0
v = model[:dispose][shipping_node_by_loc_and_prod_names["L1", "P2"], 1]
@test lower_bound(v) == 0.0
@test upper_bound(v) == 1.0
end
|
{"hexsha": "27d0e7f92c86141bd27adbdec7a4b2db3fa0e804", "size": 1297, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/model/build_test.jl", "max_stars_repo_name": "ANL-CEEESA/RELOG", "max_stars_repo_head_hexsha": "92d30460b9f2c227770dd426415c1ee34dac5300", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-13T18:11:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T16:20:13.000Z", "max_issues_repo_path": "test/model/build_test.jl", "max_issues_repo_name": "ANL-CEEESA/RELOG", "max_issues_repo_head_hexsha": "92d30460b9f2c227770dd426415c1ee34dac5300", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-23T23:03:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T23:03:01.000Z", "max_forks_repo_path": "test/model/build_test.jl", "max_forks_repo_name": "ANL-CEEESA/RELOG", "max_forks_repo_head_hexsha": "92d30460b9f2c227770dd426415c1ee34dac5300", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2564102564, "max_line_length": 91, "alphanum_fraction": 0.6738627602, "num_tokens": 386}
|
import numpy as np
from numpy import sqrt, real, conj
from glob import glob
from apertools.utils import take_looks
import apertools.sario as sario
from apertools.log import get_log
logger = get_log()
EPS = np.finfo(np.float32).eps
def abs2(x):
# Weird, but it seems to be faster...
# %timeit np.abs(b)**2
# 13 ms ± 3.31 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
# %timeit b.real**2 + b.imag**2
# 1.53 ms ± 2.04 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
return x.real ** 2 + x.imag ** 2
def make_igam(slc1, slc2, rowlooks, collooks):
return take_looks(slc1 * conj(slc2), rowlooks, collooks)
def powlooks(image, rowlooks, collooks):
return take_looks(abs2(image), rowlooks, collooks)
def make_int_cor(
slc1,
slc2,
rowlooks,
collooks,
):
igram = make_igam(slc1, slc2, rowlooks, collooks)
return _make_cor(igram, slc1, slc2, rowlooks, collooks)
def _make_cor(
igram,
slc1,
slc2,
rowlooks,
collooks,
):
ampslc1 = sqrt(powlooks(slc1, rowlooks, collooks))
ampslc2 = sqrt(powlooks(slc2, rowlooks, collooks))
amp = np.abs(igram)
# @show extrema(ampslc1), extrema(ampslc2), extrema(amp)
cor = real(amp / (EPS + (ampslc1 * ampslc2)))
return cor, amp, igram
# julia> lines = readlines("sbas_list")
# 4-element Array{String,1}:
# "./S1A_20141104.ge ./S1A_2014128.ge 24.0 29539676548307892 " ...
def form_igram_names(igram_ext=".int"):
with open("sbas_list") as f:
sbas_lines = f.read().splitlines()
# TODO: use the parsers to get the dates...
out = []
for line in sbas_lines:
# early_file, late_file, temp, spatial = line.split()
early_file, late_file, _, _ = line.split()
# "./S1A_20141104.ge
igram_name = "_".join(map(_get_date, [early_file, late_file])) + igram_ext
out.append((igram_name, str(early_file), str(late_file)))
# Note: orting so that ALL igrams with `early_file` are formed in a row
return sorted(out)
def _get_date(geo_name):
# "./S1A_20141128.geo" -> "20141128"
return geo_name.split("_")[1].split(".")[0]
def _load_gdal(fname):
import rasterio as rio
with rio.open(fname) as src:
return src.read(1)
def create_igrams(rowlooks=1, collooks=1, igram_ext=".int"):
current_ints = glob("*" + igram_ext)
current_cors = glob("*.cc")
fulldemrsc = sario.load("../elevation.dem.rsc")
demrsc = take_looks(fulldemrsc, rowlooks, collooks)
sario.save("dem.rsc", demrsc)
cur_early_file = ""
for (igram_name, early_file, late_file) in form_igram_names():
cor_name = igram_name.replace(igram_ext, ".cc")
if (igram_name in current_ints) and (cor_name in current_cors):
logger.debug(f"Skipping {igram_name} and {cor_name}: exists")
continue
else:
logger.debug(f"Forming {igram_name} and {cor_name}")
# Keep early in memory for all pairs: only load for new set
if cur_early_file != early_file:
logger.debug(f"Loading {early_file}")
# early = sario.load(early_file)
early = sario.load(early_file)
cur_early_file = early_file
# But we load late every time
logger.debug(f"Loading {late_file}")
late = sario.load(late_file)
# TODO: check if window_read with rasterio is faster than loading huge files?
logger.debug("Forming amps")
ampslc1 = sqrt(powlooks(early, rowlooks, collooks))
ampslc2 = sqrt(powlooks(late, rowlooks, collooks))
logger.debug("Forming igram")
igram = make_igam(early, late, rowlooks, collooks)
logger.debug("Forming cor")
amp = real(np.abs(igram))
cor = real(amp / (EPS + (ampslc1 * ampslc2)))
logger.info(f"Saving {cor_name}, {igram_name}")
sario.save(cor_name, np.stack([amp, cor], axis=0))
sario.save(igram_name, igram)
def _get_weights(wsize):
assert wsize % 2 == 1
return 1 - np.abs(2 * (np.arange(wsize) - wsize // 2)) / (wsize + 1)
def _get_weights_square(wsize):
w = _get_weights(wsize)
return w.reshape((-1, 1)) * w.reshape((1, -1))
try:
import numba
import cupy as cp
from cupyx.scipy.ndimage import correlate as correlate_gpu
from scipy.ndimage import correlate
except ImportError:
print("cupy/numba not installed, no gpu")
from apertools.utils import read_blocks, block_iterator
def make_igram_gpu(
early_filename,
late_filename,
block_size=(500, 500),
overlaps=None,
wsize=5,
out_ifg="out.int",
out_cor="out.cor",
):
from rasterio.windows import Window
import rasterio as rio
if overlaps is None:
overlaps = (wsize // 2, wsize // 2)
out_cor = "testcor.tif"
with rio.open(early_filename) as src:
full_shape = src.shape
blks1 = read_blocks(early_filename, window_shape=block_size, overlaps=overlaps)
blks2 = read_blocks(late_filename, window_shape=block_size, overlaps=overlaps)
blk_slices = block_iterator(src.shape, block_size, overlaps=overlaps)
# Write empty file
_write(out_ifg, None, early_filename, "ROI_PAC", dtype=np.complex64)
_write(out_cor, None, early_filename, "GTiff", dtype=np.float32)
w_cpu = _get_weights_square(wsize)
w = cp.asarray(w_cpu)
# w = w_cpu
for slc1_cpu, slc2_cpu, win_slice in zip(blks1, blks2, blk_slices):
print(f"Forming {win_slice = }")
# slc1 = cp.asarray(slc1_cpu)
# slc2 = cp.asarray(slc2_cpu)
slc1 = slc1_cpu
slc2 = slc2_cpu
ifg = slc1 * slc2.conj()
# Correlation
amp1 = slc1.real ** 2 + slc1.imag ** 2
amp2 = slc2.real ** 2 + slc2.imag ** 2
denom = correlate_gpu(cp.sqrt(amp1 * amp2), w)
numer = correlate_gpu(cp.abs(ifg), w)
# denom = correlate(np.sqrt(amp1 * amp2), w)
# numer = correlate(np.abs(ifg), w)
cor = numer / (EPS + denom)
ifg_cpu = cp.asnumpy(ifg)
cor_cpu = cp.asnumpy(cor)
# ifg_cpu = ifg
# cor_cpu = cor
_write(
out_ifg,
ifg_cpu,
early_filename,
"ROI_PAC",
window=Window.from_slices(*win_slice),
mode="r+",
)
_write(
out_cor,
cor_cpu,
early_filename,
"GTiff",
window=Window.from_slices(*win_slice),
mode="r+",
)
def _write(outname, img, in_name, driver, mode="w", window=None, dtype=None):
import rasterio as rio
if dtype is None:
dtype = img.dtype
with rio.open(in_name) as src:
full_height, full_width = src.shape
transform, crs, nodata = src.transform, src.crs, src.nodata
with rio.open(
outname,
mode,
driver=driver,
width=full_width,
height=full_height,
count=1,
dtype=dtype,
transform=transform,
crs=crs,
nodata=nodata,
) as dst:
if img is not None:
dst.write(img, window=window, indexes=1)
from apertools.utils import memmap_blocks
def make_igram_blocks(
early_filename,
late_filename,
full_shape,
looks=(1, 1),
block_rows=1000,
out_ifg="out.int",
out_cor="out.cor",
wsize=5,
):
blks1 = memmap_blocks(early_filename, full_shape, block_rows, "complex64")
blks2 = memmap_blocks(late_filename, full_shape, block_rows, "complex64")
w = _get_weights_square(wsize)
with open("testifg.int", "wb") as f, open("testcor.cor", "wb") as g:
for idx, (slc1, slc2) in enumerate(zip(blks1, blks2)):
print(f"Forming {idx = }")
ifg = slc1 * slc2.conj()
take_looks(ifg, *looks).tofile(f)
# Correlation
amp1 = slc1.real ** 2 + slc1.imag ** 2
amp2 = slc2.real ** 2 + slc2.imag ** 2
denom = correlate_gpu(cp.sqrt(amp1 * amp2), w)
numer = correlate_gpu(cp.abs(ifg), w)
# denom = correlate(np.sqrt(amp1 * amp2), w)
# numer = correlate(np.abs(ifg), w)
cor = numer / (EPS + denom)
take_looks(cor, *looks).tofile(g)
|
{"hexsha": "f3c1dbdc9b8a2d7b2ce3fb10a7d32691d66e0777", "size": 8234, "ext": "py", "lang": "Python", "max_stars_repo_path": "insar/form_igrams.py", "max_stars_repo_name": "scottstanie/insar", "max_stars_repo_head_hexsha": "61724be3cef7faf1e977e1b0ffad89dcae342761", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "insar/form_igrams.py", "max_issues_repo_name": "scottstanie/insar", "max_issues_repo_head_hexsha": "61724be3cef7faf1e977e1b0ffad89dcae342761", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-07-28T20:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-28T20:55:30.000Z", "max_forks_repo_path": "insar/form_igrams.py", "max_forks_repo_name": "scottstanie/insar", "max_forks_repo_head_hexsha": "61724be3cef7faf1e977e1b0ffad89dcae342761", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3024911032, "max_line_length": 85, "alphanum_fraction": 0.6133106631, "include": true, "reason": "import numpy,from numpy,from scipy,import numba,import cupy,from cupy", "num_tokens": 2413}
|
export searchvtx
# recursive function for applying search criteria
function keycheck(data::Dict{<:Any,<:Any},str::Array{String,1},mode::Array{Symbol,1})
found = false
for key in keys(data)
if :deps in mode
for s in str
(key == s) && (found = true)
end
end
if ((:search in mode) || (Symbol(key) in mode)) && (typeof(data[key]) == String)
for s in lowercase.(str)
occursin(s,lowercase(data[key])) && (found = true)
end
end
if (((key == "label") && (:label in mode)) ||
((key == "ref") && (:ref in mode)))
for s in str
for g in data[key]
(g == s) && (found = true)
end
end
end
if (key ≠ "ids" ) && (typeof(data[key]) <: Dict{<:Any,<:Any})
keycheck(data[key],str,mode) && (found = true)
end
end
return found
end
# directory search for VerTeX toml
function searchvtx(mode::Array{Symbol,1},str::Array{String,1})
list = Dict[]
depos = getdepot()
for depot in keys(depos)
for (root, dirs, files) in walkdir(checkhome(depos[depot]))
for dir in dirs
for file in readdir(joinpath(root,dir))
found = false
data = nothing
if endswith(file, ".vtx")
data = TOML.parsefile(joinpath(root,dir,file))
if keycheck(data,str,mode)
found = true
end
end
found && push!(list,data)
end
end
end
end
return list
end
searchvtx(mode::Symbol,str::String...) = searchvtx([mode],collect(str))
searchvtx(str::String,mode::Symbol...) = searchvtx(collect(mode),[str])
|
{"hexsha": "b605be815a270c04241f6537fa843729762f945f", "size": 1886, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/search.jl", "max_stars_repo_name": "UnofficialJuliaMirror/VerTeX.jl-cc48e778-429c-5593-b60f-2bcf41d5649c", "max_stars_repo_head_hexsha": "41ddb48918c789767511ceeaf135790eeab86d68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-08-12T20:38:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T19:20:42.000Z", "max_issues_repo_path": "src/search.jl", "max_issues_repo_name": "UnofficialJuliaMirror/VerTeX.jl-cc48e778-429c-5593-b60f-2bcf41d5649c", "max_issues_repo_head_hexsha": "41ddb48918c789767511ceeaf135790eeab86d68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-03T10:56:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-07T19:22:09.000Z", "max_forks_repo_path": "src/search.jl", "max_forks_repo_name": "UnofficialJuliaMirror/VerTeX.jl-cc48e778-429c-5593-b60f-2bcf41d5649c", "max_forks_repo_head_hexsha": "41ddb48918c789767511ceeaf135790eeab86d68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-01T19:31:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:31:02.000Z", "avg_line_length": 33.0877192982, "max_line_length": 88, "alphanum_fraction": 0.480911983, "num_tokens": 464}
|
import argparse
import time
import numpy as np
import os
def main():
parser = argparse.ArgumentParser(description = "WGAN-GP")
# Saving parameters
parser.add_argument("--name", "-n", "-id", type = str, default = str(int(time.time())),
help = "Name/ID of the current training model")
parser.add_argument("--resume_from", "-rf", type = int, default = 0,
help = "Number of epoch to resume from (if existing)")
parser.add_argument("--checkpoint_interval", "-ci", type = int, default = 20,
help = "Number of epoch before saving a checkpoint (0 to disable checkpoints) (default = 20)")
# Model hyper parameters
parser.add_argument("--learning_rate_d", "-lrd", type = float, default = 2e-4,
help = "Learning rate of the critic (default = 2e-4)")
parser.add_argument("--learning_rate_g", "-lrg", type = float, default = 2e-4,
help = "Learning rate of the generator (default = 2e-4)")
parser.add_argument("--beta_1", "-b1", type = float, default = 0.5,
help = "BETA 1 of the optimizer (default = 0.5)")
parser.add_argument("--beta_2", "-b2", type = float, default = 0.9,
help = "BETA 2 of the optimizer (default = 0.9)")
parser.add_argument("--training_ratio", "-tr", type = int, default = 5,
help = "Training ratio of the critic (default = 5)")
parser.add_argument("--gradient_penalty_weight", "-gpd", type = int, default = 10,
help = "Gradient penalty weight applied to the critic (default = 10)")
parser.add_argument("--z_size", type = int, default = 128,
help = "Size of the noise vector of the generator (default = 128)")
# General hyper parameters
parser.add_argument("--epoch", type = int, default = 10000,
help = "Number of epoch to train (default = 10000)")
parser.add_argument("--batch_size", "-bs", type = int, default = 512,
help = "Size of the dataset mini-batch (default = 512)")
parser.add_argument("--buffer_size", "-bus", type = int, default = 2048,
help = "Size of the buffer of the dataset iterator (default = 2048)")
parser.add_argument("--prefetch_size", "-ps", type = int, default = 10,
help = "Size of prefetching of the dataset iterator (default = 10)")
# Layers hyper parameters
parser.add_argument("--bn_momentum", "-bm", type = float, default = 0.8,
help = "Momentum of the batch normalization layer (default = 0.8)")
parser.add_argument("--lr_alpha", "-la", type = float, default = 0.2,
help = "Alpha of the LeakyReLU layer (default = 0.2)")
parser.add_argument("--kernel_size", "-ks", type = int, default = 5,
help = "Size of the kernel of the convolutional layer (best if odd) (default = 5)")
parser.add_argument("--rn_stddev", "-rs", type = float, default = 0.02,
help = "Standard deviation of the initialization of the weights of each layers (default = 0.02)")
parser.add_argument("--min_weight", "-mw", type = int, default = 5,
help = "Minimum size pow(2, mw) of the first layer of convolutional layer (doubles each times) (default = 5)")
# Dataset parameters
parser.add_argument("--type", "-t", type = str, default = "digits",
choices = ["custom", "digits", "fashion", "cifar10",
"cifar100", "celebA_128", "LAG48", "LAG128",
"cars64"],
help = "Type of dataset to use (default = 'digits')")
args = parser.parse_args()
print(args)
from wgan_gp import WGAN_GP
from toolbox import extract_mnist
from tensorflow.keras.datasets import mnist, fashion_mnist
from tensorflow.keras.datasets import cifar10, cifar100
if args.type == "custom":
print("Custom type is not yet implemented !")
return
elif args.type in ["digits", "fashion"]:
sample_shape = (7, 7)
output_shape = (28, 28, 1)
min_wh = 7
tensor_to_img = False
nb_layers = 3
data_dir = "keras"
X_train = extract_mnist((mnist, fashion_mnist)[args.type == "fashion"])
elif args.type in ["cifar10", "cifar100"]:
sample_shape = (7, 7)
output_shape = (32, 32, 3)
min_wh = 4
tensor_to_img = False
nb_layers = 4
data_dir = "keras"
X_train = (cifar10, cifar100)[args.type == "cifar100"]
X_train = extract_mnist(X_train, img_shape = output_shape) # , label = 1)
elif args.type == "celebA_128":
sample_shape = (5, 5)
output_shape = (128, 128, 3)
min_wh = 4
data_dir = './datasets/celebA_128'
X_train = np.array(os.listdir(data_dir))
tensor_to_img = True
nb_layers = 6
elif args.type == "LAG48":
sample_shape = (5, 5)
output_shape = (48, 48, 3)
min_wh = 3
data_dir = './datasets/_binarynumpy/normalized_LAGdataset_48.npy'
X_train = np.load(data_dir)
tensor_to_img = False
nb_layers = 5
elif args.type == "LAG128":
sample_shape = (5, 5)
output_shape = (128, 128, 3)
min_wh = 4
data_dir = './datasets/_binarynumpy/normalized_LAGdataset_128.npy'
X_train = np.load(data_dir)
tensor_to_img = False
nb_layers = 6
elif args.type == "cars64":
sample_shape = (5, 5)
output_shape = (64, 64, 3)
min_wh = 4
data_dir = './datasets/_binarynumpy/normalized_cars.npy'
X_train = np.load(data_dir)
tensor_to_img = False
nb_layers = 5
name = f"wgan-gp_{args.type}_{args.name}"
weights = [pow(2, i) for i in range(args.min_weight, args.min_weight+nb_layers)]
model = WGAN_GP(name, args.learning_rate_d, args.learning_rate_g,
args.beta_1, args.beta_2, args.training_ratio,
args.gradient_penalty_weight, args.z_size,
args.bn_momentum, args.lr_alpha, args.kernel_size,
args.rn_stddev)
model.feed_data(X_train, data_dir, tensor_to_img, args.batch_size,
args.buffer_size, args.prefetch_size)
model.set_output(sample_shape, output_shape)
model.create_model(args.min_weight, min_wh, weights, nb_layers)
model.print_desc(args.resume_from)
model.train(args.epoch, args.checkpoint_interval, args.resume_from)
if __name__ == "__main__":
main()
else:
print("Launcher.py is to be used on its own")
|
{"hexsha": "d7497318d119a6f029e99d49c39b8ccdedf38f31", "size": 5845, "ext": "py", "lang": "Python", "max_stars_repo_path": "launcher.py", "max_stars_repo_name": "saundersp/wgan-gp", "max_stars_repo_head_hexsha": "27f1afbee348a71edb0275e2dbb7c57f29b74adf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-15T11:07:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-24T22:42:49.000Z", "max_issues_repo_path": "launcher.py", "max_issues_repo_name": "saundersp/wgan-gp", "max_issues_repo_head_hexsha": "27f1afbee348a71edb0275e2dbb7c57f29b74adf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "launcher.py", "max_forks_repo_name": "saundersp/wgan-gp", "max_forks_repo_head_hexsha": "27f1afbee348a71edb0275e2dbb7c57f29b74adf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-14T15:16:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-14T15:16:28.000Z", "avg_line_length": 41.75, "max_line_length": 116, "alphanum_fraction": 0.6800684346, "include": true, "reason": "import numpy", "num_tokens": 1680}
|
#!/usr/bin/env python3
"""
Simple exercise to construct a controller that controls the simulated Duckiebot using pose.
"""
import time
import sys
import argparse
import math
import numpy as np
import gym
from gym_duckietown.envs import DuckietownEnv
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default=None)
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--no-pause', action='store_true', help="don't pause on failure")
args = parser.parse_args()
if args.env_name is None:
env = DuckietownEnv(
map_name = args.map_name,
domain_rand = False,
draw_bbox = False
)
else:
env = gym.make(args.env_name)
obs = env.reset()
env.render()
total_reward = 0
while True:
lane_pose = env.get_lane_pos2(env.cur_pos, env.cur_angle)
distance_to_road_center = lane_pose.dist
angle_from_straight_in_rads = lane_pose.angle_rad
###### Start changing the code here.
# TODO: Decide how to calculate the speed and direction.
k_p = 10
k_d = 1
# The speed is a value between [0, 1] (which corresponds to a real speed between 0m/s and 1.2m/s)
speed = 0.2 # TODO: You should overwrite this value
# angle of the steering wheel, which corresponds to the angular velocity in rad/s
steering = k_p*distance_to_road_center + k_d*angle_from_straight_in_rads # TODO: You should overwrite this value
###### No need to edit code below.
obs, reward, done, info = env.step([speed, steering])
total_reward += reward
print('Steps = %s, Timestep Reward=%.3f, Total Reward=%.3f' % (env.step_count, reward, total_reward))
env.render()
if done:
if reward < 0:
print('*** CRASHED ***')
print ('Final Reward = %.3f' % total_reward)
break
|
{"hexsha": "0ca4bbfd36593fb28354dec59a8d08498c71b6ab", "size": 1818, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym-duckietown/exercises/basic_control.py", "max_stars_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_stars_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-04-28T15:48:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-03T14:03:01.000Z", "max_issues_repo_path": "gym-duckietown/exercises/basic_control.py", "max_issues_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_issues_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym-duckietown/exercises/basic_control.py", "max_forks_repo_name": "lyf44/CS4278-5478-Project-Materials", "max_forks_repo_head_hexsha": "685419c65847e72450e99586e9e0f3794369b4a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2020-04-28T16:38:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T14:21:08.000Z", "avg_line_length": 26.7352941176, "max_line_length": 116, "alphanum_fraction": 0.6771177118, "include": true, "reason": "import numpy", "num_tokens": 463}
|
import itertools
import operator
import numpy as np
from sklearn import cross_validation
from sklearn import neighbors
train = np.load('train.npy')
# Remove the labels
test = np.load('test_distribute.npy')[:,1:]
data = train[:,1:]
target = train[:,0]
np.set_printoptions(threshold='nan')
print target
#print neighbors.KNeighborsClassifier(n_neighbors=1).fit(data, target).predict(data)
# I use the following code to find good hyperparameter values
#scores = cross_validation.cross_val_score(
#clf, data, target, cv=5)
#print("Accuracy: %f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
|
{"hexsha": "62c5106f638f3e87763773985d405ff121f5d9bc", "size": 599, "ext": "py", "lang": "Python", "max_stars_repo_path": "Black-Box/opt.py", "max_stars_repo_name": "bcspragu/Machine-Learning-Projects", "max_stars_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Black-Box/opt.py", "max_issues_repo_name": "bcspragu/Machine-Learning-Projects", "max_issues_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Black-Box/opt.py", "max_forks_repo_name": "bcspragu/Machine-Learning-Projects", "max_forks_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-26T13:13:03.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-26T13:13:03.000Z", "avg_line_length": 24.9583333333, "max_line_length": 84, "alphanum_fraction": 0.7328881469, "include": true, "reason": "import numpy", "num_tokens": 150}
|
"""
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import threading
import numpy as np
import cv2
import sys
from PyQt5 import QtWidgets
from PyQt5.QtGui import QPixmap
import qimage2ndarray
from keras_preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
import random
from tkinter import filedialog
import tensorflow as tf
import keras
import gui_5
INPUT_SHAPE = (64, 64, 3)
PREDICT_SHAPE = (-1, 64, 64, 3)
KERNEL_ACTIVATION = 'relu'
MLP_ACTIVATION = 'relu'
OPTIMIZER = 'adam'
class LR5(QtWidgets.QMainWindow, gui_5.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.btn_train_browse.clicked.connect(self.train_browse)
self.btn_train_load.clicked.connect(self.train_load)
self.btn_test_browse.clicked.connect(self.test_browse)
self.btn_test_load.clicked.connect(self.test_load)
self.btn_tfn_train.clicked.connect(self.tfn_train)
self.btn_tfn_save.clicked.connect(self.tfn_save)
self.btn_tfn_load.clicked.connect(self.tfn_load)
self.btn_tfn_predict.clicked.connect(self.tfn_predict)
self.btn_tfn_predict_camera.clicked.connect(self.predict_camera)
self.btn_tfn_predict_camera_stop.clicked.connect(self.predict_camera_stop)
self.train_data = None
self.test_data = None
self.model = None
self.camera_running = False
self.cv_cap = None
def train_browse(self):
file_path = filedialog.askdirectory()
if file_path is not None:
self.line_train_folder.setText(file_path)
def train_load(self):
datagen = ImageDataGenerator(rescale=1 / 255.0)
self.train_data = datagen.flow_from_directory(
self.line_train_folder.text(),
target_size=INPUT_SHAPE[:2],
batch_size=self.spin_train_n.value(),
class_mode='categorical',
shuffle=True
)
def test_browse(self):
file_path = filedialog.askdirectory()
if file_path is not None:
self.line_test_folder.setText(file_path)
def test_load(self):
datagen = ImageDataGenerator(rescale=1 / 255.0)
self.test_data = datagen.flow_from_directory(
self.line_test_folder.text(),
target_size=INPUT_SHAPE[:2],
batch_size=self.spin_test_n.value(),
class_mode='categorical'
)
def tfn_train(self):
# Create model
self.model = tf.keras.models.Sequential([
layers.Conv2D(64, (6, 6), padding='same', activation=KERNEL_ACTIVATION, input_shape=INPUT_SHAPE),
layers.Conv2D(64, (6, 6), padding='same', activation=KERNEL_ACTIVATION),
layers.MaxPooling2D((2, 2)),
#
layers.Conv2D(32, (2, 2), padding='same', activation=KERNEL_ACTIVATION),
layers.Conv2D(32, (2, 2), padding='same', activation=KERNEL_ACTIVATION),
layers.MaxPooling2D((2, 2)),
#
layers.Flatten(),
layers.Dense(128, activation=MLP_ACTIVATION),
layers.Dense(self.train_data.num_classes, activation='softmax')
])
# Compile model
self.model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'])
# Train model
self.model.fit(self.train_data,
epochs=self.spin_tfn_epochs.value(),
validation_data=self.test_data)
print('Training done.')
loss, accuracy = self.model.evaluate(self.test_data, verbose=2)
print('Accuracy: ' + str(accuracy))
def tfn_save(self):
self.model.save('LR5_data/model.h5')
def tfn_load(self):
self.model = keras.models.load_model('LR5_data/model.h5')
def tfn_predict(self):
class_names = []
for folder, dirs, files in os.walk(self.line_test_folder.text()):
for directory in dirs:
class_names.append(directory)
random_class = random.randrange(0, self.test_data.num_classes)
test_path = self.line_test_folder.text() + '/' + class_names[random_class]
random_image = random.randrange(0, len(os.listdir(test_path)))
img_array = cv2.cvtColor(cv2.imread(
os.path.join(test_path, os.listdir(test_path)[random_image])), cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img_array, INPUT_SHAPE[:2])
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(
cv2.resize(img_array, (320, 240), interpolation=cv2.INTER_NEAREST))))
new_array = np.expand_dims(new_array, axis=0)
new_array = np.array(new_array).reshape(PREDICT_SHAPE)
new_array = new_array / 255.0
result_1 = self.model.predict([new_array])
result = int(np.argmax(result_1[0]))
print(class_names[result] + ' / ' + class_names[random_class])
self.label_tfn_result.setText(class_names[result] + ' on photo. ' + class_names[random_class] + ' in reality.')
def predict_camera(self):
self.camera_running = True
self.cv_cap = cv2.VideoCapture(self.camera_id.value(), cv2.CAP_DSHOW)
thread = threading.Thread(target=self.camera_prediction)
thread.start()
def predict_camera_stop(self):
self.camera_running = False
def camera_prediction(self):
class_names = []
for folder, dirs, files in os.walk(self.line_test_folder.text()):
for directory in dirs:
class_names.append(directory)
while self.camera_running:
ret, img = self.cv_cap.read()
img_array = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img_array, INPUT_SHAPE[:2])
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(
cv2.resize(img_array, (320, 240), interpolation=cv2.INTER_NEAREST))))
new_array = np.expand_dims(new_array, axis=0)
new_array = np.array(new_array).reshape(PREDICT_SHAPE)
new_array = new_array / 255.0
result_1 = self.model.predict([new_array])
result = int(np.argmax(result_1[0]))
self.label_tfn_result.setText(class_names[result] + ' on photo.')
self.cv_cap.release()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.setStyle('fusion')
window = LR5()
window.show()
app.exec_()
|
{"hexsha": "9124e3de78258a3a2126a65e27a8a30b612a0e04", "size": 7160, "ext": "py", "lang": "Python", "max_stars_repo_path": "LR5/LR5.py", "max_stars_repo_name": "XxOinvizioNxX/NSvZTZiU", "max_stars_repo_head_hexsha": "6eeab20503cddf299c1258969fba6e94915112fb", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LR5/LR5.py", "max_issues_repo_name": "XxOinvizioNxX/NSvZTZiU", "max_issues_repo_head_hexsha": "6eeab20503cddf299c1258969fba6e94915112fb", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LR5/LR5.py", "max_forks_repo_name": "XxOinvizioNxX/NSvZTZiU", "max_forks_repo_head_hexsha": "6eeab20503cddf299c1258969fba6e94915112fb", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.345177665, "max_line_length": 120, "alphanum_fraction": 0.637849162, "include": true, "reason": "import numpy", "num_tokens": 1550}
|
import numpy as np
from time import time
import datetime
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import h5py
import sys
from os import listdir, remove
from os.path import isfile, join, exists, basename, splitext
#from laspy.file import File
from random import randint
from enum import Enum
from math import *
import re
from sys import path
#from notify_run import Notify
#notifyDevice = Notify()
# import firebase_admin
# from firebase_admin import credentials
# from firebase_admin import firestore
# cred = credentials.Certificate('./online-app-600-firebase-adminsdk-zx0e7-bb28b4d148.json')
# firebase_admin.initialize_app(cred)
# firestoreDB = firestore.client()
class FireStroreCollection:
train = "trainMesages"
dataProc = "dataProcMesages"
if(os.path.exists("C:/Users/Jonas")):
mainPath = "G:/PointCloud DataSets/"
elif(os.path.exists("C:/Users/JStanke")):
mainPath = "E:/PointClouds/"
else:
mainPath = "/content/drive/My Drive/object detection/"
class Paths:
datasets = mainPath
class Semantic3D:
pointCloudPath = mainPath + "semantic3d/"
rawTrain = pointCloudPath+"rawTrain/"
rawTest = pointCloudPath+"rawTest/"
rawTestReduced = pointCloudPath+"rawTestReduced/"
processedTrain = pointCloudPath+"processedTrain/"
processedTest = pointCloudPath+"processedTest/"
processedTestReduced = pointCloudPath+"processedTestReduced/"
generatedTest = pointCloudPath+"generatedTest/"
rawSmallPc = rawTrain + "bildstein_station3_xyz_intensity_rgb.hdf5"
procSmallPc = processedTrain + "bildstein_station3_xyz_intensity_rgb.npy"
class Curbs:
pointCloudPath = mainPath + "curbs/"
processedTrain = pointCloudPath+"processedTrain/forSegmentation(10cmVoxels)/"
processedTest = pointCloudPath+"processedTest/forSegmentation(10cmVoxels)/"
class NPM3D:
pointCloudPath = mainPath + "NPM3D/"
rawTrain = pointCloudPath+"training_10_classes/"
rawTest = pointCloudPath+"test_10_classes/"
# processedTrain = pointCloudPath+"processedTrain/"
processedTrain = pointCloudPath+"torch_generated_data/train_pointclouds/"
processedTrainVoxels = pointCloudPath+"processedTrainVoxels/"
processedTest = pointCloudPath+"processedTest/"
generatedTest = pointCloudPath+"generatedTest/"
class VGTU:
pointCloudPath = mainPath + "VGTU/"
dataPath = "./data/"
checkPointFilePath = "check.point"
pausePointFilePath = "pause.point"
trainLogPath = "./training.log"
dataProcPath = "./dataProc.log"
if(not os.path.exists(dataPath)):
os.mkdir(dataPath)
@staticmethod
def GetFiles(folder, excludeFiles = None, onlyNames = False, withoutExtension = False, findExtesions = ('.hdf5', '.npy', '.las', '.ply')):
if(isinstance(findExtesions, list)):
findExtesions = tuple(findExtesions)
if(excludeFiles is None):
excludeFiles = []
if(not isinstance(excludeFiles, list)):
excludeFiles = [excludeFiles]
excludeNames = [splitext(basename(name))[0] for name in excludeFiles]
path = folder + "/"
if(onlyNames):
path = ""
pcFiles = [splitext(basename(path+f))[0] if withoutExtension else path+f
for f in listdir(folder)
if isfile(join(folder, f))
and f.endswith(findExtesions)
and not f.startswith('_')
and not (splitext(basename(f))[0] in excludeNames)
and splitext(basename(f))[0] != "small"]
return pcFiles
@staticmethod
def JoinPaths(basePath, paths):
assert(isinstance(paths, list))
return list(map(lambda path: os.path.join(basePath, path), paths))
@staticmethod
def GetBestModel(withPrefix = "0"):
if(not exists(Paths.dataPath)):
return None
modelFiles = [[Paths.dataPath+"/"+f, float(re.findall("\d+\.\d+", splitext(basename(f))[0])[0])] for f in listdir(Paths.dataPath) if
isfile(join(Paths.dataPath, f))
and f.endswith('.h5')
and basename(f).startswith(withPrefix)]
if(len(modelFiles) == 0):
return None
scores = np.array([modelFiles[i][1] for i in range(len(modelFiles))])
index = np.argmax(scores)
return modelFiles[index][0]
@staticmethod
def FileName(path, withoutExt = True):
name = os.path.basename(path)
if(withoutExt):
name = os.path.splitext(name)[0]
return name
class Label:
class Semantic3D:
unlabeled = int(0)
manMadeTerrain = int(1)
naturalTerrain = int(2)
highVegetation = int(3)
lowVegetation = int(4)
buildings = int(5)
hardScape = int(6)
scanningArtefacts = int(7)
cars = int(8)
Count = int(9)
Names = ["unlabeled", "manMadeTerrain", "naturalTerrain", "highVegetation", "lowVegetation", "buildings", "hardScape", "scanningArtefacts", "cars"]
class Curbs:
other = int(0)
curbs = int(1)
Names = ["other", "curbs"]
class NPM3D:
unclassified = int(0)
ground = int(1)
building = int(2)
pole_roadSign_trafficLight = int(3)
bollard_smallPole = int(4)
trash_can = int(5)
barrier = int(6)
pedestrian = int(7)
car = int(8)
natural_vegetation = int(9)
Count = int(10)
Names = ["unclassified", "ground", "building", "pole", "smallPole", "trash_can", "barrier", "pedestrian", "car", "vegetation"]
class Colors:
grey = np.array([128, 128, 128])/255
red = np.array([136, 0, 1])/255
mint = np.array([170, 255, 195])/255
teal = np.array([0, 128, 128])/255
green = np.array([60, 180, 75])/255
verygreen = np.array([0, 255, 0])/255
brown = np.array([170, 110, 40])/255
# white = np.array([255, 255, 255])/255
black = np.array([0, 0, 0])/255
blue = np.array([0, 0, 255])/255
pink = np.array([255, 56, 152])/255
Npm3D = [grey, red, blue, teal, mint, brown, pink, black, green]
Sema3D = [grey, verygreen, green, mint, red, blue, brown, black]
|
{"hexsha": "89d058df08112781643cac812829df70e275950d", "size": 6617, "ext": "py", "lang": "Python", "max_stars_repo_path": "imports.py", "max_stars_repo_name": "eglrp/ConvPoint_Keras", "max_stars_repo_head_hexsha": "66c94479ff8dc8ad174ed4da8e6bb1d641a8a8c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "imports.py", "max_issues_repo_name": "eglrp/ConvPoint_Keras", "max_issues_repo_head_hexsha": "66c94479ff8dc8ad174ed4da8e6bb1d641a8a8c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imports.py", "max_forks_repo_name": "eglrp/ConvPoint_Keras", "max_forks_repo_head_hexsha": "66c94479ff8dc8ad174ed4da8e6bb1d641a8a8c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-15T13:52:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-15T13:52:13.000Z", "avg_line_length": 35.5752688172, "max_line_length": 155, "alphanum_fraction": 0.5998186489, "include": true, "reason": "import numpy", "num_tokens": 1666}
|
[STATEMENT]
lemma inorder_eq_mset: "mset (inorder t) = relations_mset t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mset (inorder t) = relations_mset t
[PROOF STEP]
by(induction t) (auto)
|
{"llama_tokens": 84, "file": "Query_Optimization_JoinTree", "length": 1}
|
# -*- coding: utf-8 -*-
"""
@Time:Created on 2019/5/20 19:40
@author: LiFan Chen
@Filename: model_glu.py
@Software: PyCharm
"""
# -*- coding: utf-8 -*-
"""
@Time:Created on 2019/5/7 13:40
@author: LiFan Chen
@Filename: model.py
@Software: PyCharm
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
import numpy as np
from sklearn.metrics import roc_auc_score, precision_score, recall_score
class SelfAttention(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
self.hid_dim = hid_dim
self.n_heads = n_heads
assert hid_dim % n_heads == 0
self.w_q = nn.Linear(hid_dim, hid_dim)
self.w_k = nn.Linear(hid_dim, hid_dim)
self.w_v = nn.Linear(hid_dim, hid_dim)
self.fc = nn.Linear(hid_dim, hid_dim)
self.do = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim // n_heads])).to(device)
def forward(self, query, key, value, mask=None):
bsz = query.shape[0]
# query = key = value [batch size, sent len, hid dim]
Q = self.w_q(query)
K = self.w_k(key)
V = self.w_v(value)
# Q, K, V = [batch size, sent len, hid dim]
Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)
K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)
V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads).permute(0, 2, 1, 3)
# K, V = [batch size, n heads, sent len_K, hid dim // n heads]
# Q = [batch size, n heads, sent len_q, hid dim // n heads]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
# energy = [batch size, n heads, sent len_Q, sent len_K]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = self.do(F.softmax(energy, dim=-1))
# attention = [batch size, n heads, sent len_Q, sent len_K]
x = torch.matmul(attention, V)
# x = [batch size, n heads, sent len_Q, hid dim // n heads]
x = x.permute(0, 2, 1, 3).contiguous()
# x = [batch size, sent len_Q, n heads, hid dim // n heads]
x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads))
# x = [batch size, src sent len_Q, hid dim]
x = self.fc(x)
# x = [batch size, sent len_Q, hid dim]
return x
class Encoder(nn.Module):
"""protein feature extraction."""
def __init__(self, protein_dim, hid_dim, n_layers,kernel_size , dropout, device):
super().__init__()
assert kernel_size % 2 == 1, "Kernel size must be odd (for now)"
self.input_dim = protein_dim
self.hid_dim = hid_dim
self.kernel_size = kernel_size
self.dropout = dropout
self.n_layers = n_layers
self.device = device
#self.pos_embedding = nn.Embedding(1000, hid_dim)
self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(device)
self.convs = nn.ModuleList([nn.Conv1d(hid_dim, 2*hid_dim, kernel_size, padding=(kernel_size-1)//2) for _ in range(self.n_layers)]) # convolutional layers
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(self.input_dim,self.hid_dim)
def forward(self, protein):
#pos = torch.arange(0, protein.shape[1]).unsqueeze(0).repeat(protein.shape[0], 1).to(self.device)
#protein = protein + self.pos_embedding(pos)
#protein = [batch size, protein len,protein_dim]
conv_input = self.fc(protein)
# conv_input=[batch size,protein len,hid dim]
#permute for convolutional layer
conv_input = conv_input.permute(0, 2, 1)
#conv_input = [batch size, hid dim, protein len]
for i, conv in enumerate(self.convs):
#pass through convolutional layer
conved = conv(self.dropout(conv_input))
#conved = [batch size, 2*hid dim, protein len]
#pass through GLU activation function
conved = F.glu(conved, dim=1)
#conved = [batch size, hid dim, protein len]
#apply residual connection / high way
conved = (conved + conv_input) * self.scale
#conved = [batch size, hid dim, protein len]
#set conv_input to conved for next loop iteration
conv_input = conved
conved = conved.permute(0,2,1)
# conved = [batch size,protein len,hid dim]
return conved
class PositionwiseFeedforward(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.hid_dim = hid_dim
self.pf_dim = pf_dim
self.fc_1 = nn.Conv1d(hid_dim, pf_dim, 1) # convolution neural units
self.fc_2 = nn.Conv1d(pf_dim, hid_dim, 1) # convolution neural units
self.do = nn.Dropout(dropout)
def forward(self, x):
# x = [batch size, sent len, hid dim]
x = x.permute(0, 2, 1)
# x = [batch size, hid dim, sent len]
x = self.do(F.relu(self.fc_1(x)))
# x = [batch size, pf dim, sent len]
x = self.fc_2(x)
# x = [batch size, hid dim, sent len]
x = x.permute(0, 2, 1)
# x = [batch size, sent len, hid dim]
return x
class DecoderLayer(nn.Module):
def __init__(self, hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device):
super().__init__()
self.ln = nn.LayerNorm(hid_dim)
self.sa = self_attention(hid_dim, n_heads, dropout, device)
self.ea = self_attention(hid_dim, n_heads, dropout, device)
self.pf = positionwise_feedforward(hid_dim, pf_dim, dropout)
self.do = nn.Dropout(dropout)
def forward(self, trg, src, trg_mask=None, src_mask=None):
# trg = [batch_size, compound len, atom_dim]
# src = [batch_size, protein len, hid_dim] # encoder output
# trg_mask = [batch size, compound sent len]
# src_mask = [batch size, protein len]
trg = self.ln(trg + self.do(self.sa(trg, trg, trg, trg_mask)))
trg = self.ln(trg + self.do(self.ea(trg, src, src, src_mask)))
trg = self.ln(trg + self.do(self.pf(trg)))
return trg
class Decoder(nn.Module):
""" compound feature extraction."""
def __init__(self, atom_dim, hid_dim, n_layers, n_heads, pf_dim, decoder_layer, self_attention,
positionwise_feedforward, dropout, device):
super().__init__()
self.ln = nn.LayerNorm(hid_dim)
self.output_dim = atom_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.n_heads = n_heads
self.pf_dim = pf_dim
self.decoder_layer = decoder_layer
self.self_attention = self_attention
self.positionwise_feedforward = positionwise_feedforward
self.dropout = dropout
self.device = device
self.sa = self_attention(hid_dim, n_heads, dropout, device)
self.layers = nn.ModuleList(
[decoder_layer(hid_dim, n_heads, pf_dim, self_attention, positionwise_feedforward, dropout, device)
for _ in range(n_layers)])
self.ft = nn.Linear(atom_dim, hid_dim)
self.do = nn.Dropout(dropout)
self.fc_1 = nn.Linear(hid_dim, 256)
self.fc_2 = nn.Linear(256, 2)
def forward(self, trg, src, trg_mask=None,src_mask=None):
# trg = [batch_size, compound len, atom_dim]
# src = [batch_size, protein len, hid_dim] # encoder output
trg = self.ft(trg)
# trg = [batch size, compound len, hid dim]
for layer in self.layers:
trg = layer(trg, src)
# trg = [batch size, compound len, hid dim]
"""Use norm to determine which atom is significant. """
norm = torch.norm(trg,dim=2)
# norm = [batch size,compound len]
norm = F.softmax(norm,dim=1)
# norm = [batch size,compound len]
trg = torch.squeeze(trg,dim=0)
norm = torch.squeeze(norm,dim=0)
sum = torch.zeros((self.hid_dim)).to(self.device)
for i in range(norm.shape[0]):
v = trg[i,]
v = v * norm[i]
sum += v
sum = sum.unsqueeze(dim=0)
# trg = [batch size,hid_dim]
label = F.relu(self.fc_1(sum))
label = self.fc_2(label)
return label
class Predictor(nn.Module):
def __init__(self, encoder, decoder, device, atom_dim=34):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
self.weight = nn.Parameter(torch.FloatTensor(atom_dim, atom_dim))
self.init_weight()
def init_weight(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def gcn(self, input, adj):
# input =[num_node, atom_dim]
# adj = [num_node, num_node]
support = torch.mm(input, self.weight)
# support =[num_node,atom_dim]
output = torch.mm(adj, support)
# output = [num_node,atom_dim]
return output
def forward(self, compound, adj, protein):
# compound = [atom_num, atom_dim]
# adj = [atom_num, atom_num]
# protein = [protein len, 100]
compound = self.gcn(compound, adj)
compound = torch.unsqueeze(compound, dim=0)
# compound = [batch size=1 ,atom_num, atom_dim]
protein = torch.unsqueeze(protein, dim=0)
# protein =[ batch size=1,protein len, protein_dim]
enc_src = self.encoder(protein)
# enc_src = [batch size, protein len, hid dim]
out = self.decoder(compound, enc_src)
# out = [batch size, 2]
#out = torch.squeeze(out, dim=0)
return out
def __call__(self, data, train=True):
inputs, correct_interaction = data[:-1], data[-1]
compound, adj, protein = inputs
Loss = nn.CrossEntropyLoss()
if train:
predicted_interaction = self.forward(compound, adj, protein)
loss = Loss(predicted_interaction, correct_interaction)
return loss
else:
predicted_interaction = self.forward(compound, adj, protein)
correct_labels = correct_interaction.to('cpu').data.numpy().item()
ys = F.softmax(predicted_interaction,1).to('cpu').data.numpy()
predicted_labels = np.argmax(ys)
predicted_scores = ys[0,1]
return correct_labels, predicted_labels, predicted_scores
class Trainer(object):
def __init__(self, model, lr, weight_decay, batch):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(),
lr=lr, weight_decay=weight_decay)
self.batch = batch
def train(self, dataset, device):
self.model.train()
np.random.shuffle(dataset)
N = len(dataset)
loss_total = 0
i = 0
self.optimizer.zero_grad()
for data in dataset:
i = i+1
loss = self.model(data)
loss = loss / self.batch
loss.backward()
if i % self.batch == 0 or i == N:
self.optimizer.step()
self.optimizer.zero_grad()
loss_total += loss.item()
return loss_total
class Tester(object):
def __init__(self, model):
self.model = model
def test(self, dataset):
self.model.eval()
N = len(dataset)
T, Y, S = [], [], []
with torch.no_grad():
for data in dataset:
correct_labels, predicted_labels, predicted_scores = self.model(data, train=False)
T.append(correct_labels)
Y.append(predicted_labels)
S.append(predicted_scores)
AUC = roc_auc_score(T, S)
precision = precision_score(T, Y)
recall = recall_score(T, Y)
return AUC, precision, recall
def save_AUCs(self, AUCs, filename):
with open(filename, 'a') as f:
f.write('\t'.join(map(str, AUCs)) + '\n')
def save_model(self, model, filename):
torch.save(model.state_dict(), filename)
|
{"hexsha": "fe80781ff85830f912f1f4b2b50d08a871664187", "size": 12216, "ext": "py", "lang": "Python", "max_stars_repo_path": "Human,C.elegans/model_glu.py", "max_stars_repo_name": "nepp1d0/transformerCPI", "max_stars_repo_head_hexsha": "a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 68, "max_stars_repo_stars_event_min_datetime": "2020-02-27T03:01:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:20:00.000Z", "max_issues_repo_path": "Human,C.elegans/model_glu.py", "max_issues_repo_name": "nepp1d0/transformerCPI", "max_issues_repo_head_hexsha": "a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-08-06T05:28:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-17T08:21:50.000Z", "max_forks_repo_path": "Human,C.elegans/model_glu.py", "max_forks_repo_name": "nepp1d0/transformerCPI", "max_forks_repo_head_hexsha": "a84c1e9b23b35ba3f02ad13621a1413f0ae7c62a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2020-05-30T08:45:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T16:20:44.000Z", "avg_line_length": 33.5604395604, "max_line_length": 163, "alphanum_fraction": 0.5951211526, "include": true, "reason": "import numpy", "num_tokens": 3157}
|
from collections import deque
from copy import deepcopy
from typing import Any, Deque, Dict, List, Optional, Tuple
import numpy as np
from abides_core import Message, NanosecondTime
from abides_core.generators import ConstantTimeGenerator, InterArrivalTimeGenerator
from abides_core.utils import str_to_ns
from abides_markets.agents.trading_agent import TradingAgent
from abides_markets.messages.marketdata import (
MarketDataMsg,
L2SubReqMsg,
TransactedVolDataMsg,
)
from abides_markets.messages.marketdata import (
L2DataMsg,
L2SubReqMsg,
TransactedVolSubReqMsg,
)
from abides_markets.orders import Order, Side
class CoreBackgroundAgent(TradingAgent):
def __init__(
self,
id: int,
symbol: str,
starting_cash: int,
subscribe_freq: int = int(1e8),
lookback_period: Optional[int] = None, # for volume subscription
subscribe: bool = True,
subscribe_num_levels: Optional[int] = None,
wakeup_interval_generator: InterArrivalTimeGenerator = ConstantTimeGenerator(
step_duration=str_to_ns("1min")
),
order_size_generator=None, # TODO: not sure about this one
state_buffer_length: int = 2,
market_data_buffer_length: int = 5,
first_interval: Optional[NanosecondTime] = None,
log_orders: bool = False,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
) -> None:
super().__init__(
id,
starting_cash=starting_cash,
log_orders=log_orders,
name=name,
type=type,
random_state=random_state,
)
self.symbol: str = symbol
# Frequency of agent data subscription up in ns-1
self.subscribe_freq: int = subscribe_freq
self.subscribe: bool = subscribe
self.subscribe_num_levels: int = subscribe_num_levels
self.first_interval: Optional[NanosecondTime] = first_interval
self.wakeup_interval_generator: InterArrivalTimeGenerator = (
wakeup_interval_generator
)
self.order_size_generator = (
order_size_generator # TODO: no diea here for typing
)
if hasattr(self.wakeup_interval_generator, "random_generator"):
self.wakeup_interval_generator.random_generator = self.random_state
self.state_buffer_length: int = state_buffer_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: Optional[NanosecondTime] = first_interval
if self.order_size_generator != None: # TODO: check this one
self.order_size_generator.random_generator = self.random_state
self.lookback_period: NanosecondTime = self.wakeup_interval_generator.mean()
# internal variables
self.has_subscribed: bool = False
self.episode_executed_orders: List[
Order
] = [] # list of executed orders during full episode
# list of executed orders between steps - is reset at every step
self.inter_wakeup_executed_orders: List[
Order
] = [] # list of executed orders between steps - is reset at every step
self.parsed_episode_executed_orders: List[Tuple[int, int]] = [] # (price, qty)
self.parsed_inter_wakeup_executed_orders: List[
Tuple[int, int]
] = [] # (price, qty)
self.parsed_mkt_data: Dict[str, Any] = {}
self.parsed_mkt_data_buffer: Deque[Dict[str, Any]] = deque(
maxlen=self.market_data_buffer_length
)
self.parsed_volume_data = {}
self.parsed_volume_data_buffer: Deque[Dict[str, Any]] = deque(
maxlen=self.market_data_buffer_length
)
self.raw_state: Deque[Dict[str, Any]] = deque(maxlen=self.state_buffer_length)
# dictionary to track order status:
# - keys = order_id
# - value = dictionary {'active'|'cancelled'|'executed', Order, 'active_qty','executed_qty', 'cancelled_qty }
self.order_status: Dict[int, Dict[str, Any]] = {}
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime) -> bool:
# TODO: parent class (TradingAgent) returns bool of "ready to trade"
"""Agent interarrival wake up times are determined by wakeup_interval_generator"""
super().wakeup(current_time)
if not self.has_subscribed:
super().request_data_subscription(
L2SubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
depth=self.subscribe_num_levels,
)
)
super().request_data_subscription(
TransactedVolSubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
lookback=self.lookback_period,
)
)
self.has_subscribed = True
# compute the following wake up
if (self.mkt_open != None) and (
current_time >= self.mkt_open
): # compute the state (returned to the Gym Env)
raw_state = self.act_on_wakeup()
# TODO: wakeup function should return bool
return raw_state
##return non None value so the kernel catches it and stops
# return raw_state
def act_on_wakeup(self):
# Needs type signature
raise NotImplementedError
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
:param simulation current time
:param message received by self from ExchangeAgent
:type current_time: pd.Timestamp
:type message: str
:return:
"""
# TODO: will prob need to see for transacted volume if we enrich the state
super().receive_message(current_time, sender_id, message)
if self.subscribe:
if isinstance(message, MarketDataMsg):
if isinstance(message, L2DataMsg):
self.parsed_mkt_data = self.get_parsed_mkt_data(message)
self.parsed_mkt_data_buffer.append(self.parsed_mkt_data)
elif isinstance(message, TransactedVolDataMsg):
self.parsed_volume_data = self.get_parsed_volume_data(message)
self.parsed_volume_data_buffer.append(self.parsed_volume_data)
def get_wake_frequency(self) -> NanosecondTime:
# first wakeup interval from open
time_first_wakeup = (
self.first_interval
if self.first_interval != None
else self.wakeup_interval_generator.next()
)
return time_first_wakeup
def apply_actions(self, actions: List[Dict[str, Any]]) -> None:
# take action from kernel in general representation
# convert in ABIDES-SIMULATOR API
# print(actions)
# TODO Add cancel in actions
# print(actions)
for action in actions:
if action["type"] == "MKT":
side = Side.BID if action["direction"] == "BUY" else Side.ASK
# print(action['direction'])
# print(side)
self.place_market_order(self.symbol, action["size"], side)
elif action["type"] == "LMT":
side = Side.BID if action["direction"] == "BUY" else Side.ASK
self.place_limit_order(
self.symbol, action["size"], side, action["limit_price"]
)
# TODO: test the cancel based on the id
elif action["type"] == "CCL_ALL":
# order = self.order_status[action['order_id']]
self.cancel_all_orders()
else:
raise ValueError(f"Action Type {action['type']} is not supported")
def update_raw_state(self) -> None:
# mkt data
parsed_mkt_data_buffer = deepcopy(self.parsed_mkt_data_buffer)
# internal data
internal_data = self.get_internal_data()
# volume data
parsed_volume_data_buffer = deepcopy(self.parsed_volume_data_buffer)
new = {
"parsed_mkt_data": parsed_mkt_data_buffer,
"internal_data": internal_data,
"parsed_volume_data": parsed_volume_data_buffer,
}
self.raw_state.append(new)
def get_raw_state(self) -> Dict:
# TODO: Incompatible return value type (got "deque[Any]", expected "Dict[Any, Any]")
return self.raw_state
def get_parsed_mkt_data(self, message: L2DataMsg) -> Dict[str, Any]:
# TODO: probaly will need to include what type of subscription in parameters here
bids = message.bids
asks = message.asks
last_transaction = message.last_transaction
exchange_ts = message.exchange_ts
mkt_data = {
"bids": bids,
"asks": asks,
"last_transaction": last_transaction,
"exchange_ts": exchange_ts,
}
return mkt_data
def get_parsed_volume_data(self, message: TransactedVolDataMsg) -> Dict[str, Any]:
last_transaction = message.last_transaction
exchange_ts = message.exchange_ts
bid_volume = message.bid_volume
ask_volume = message.ask_volume
total_volume = bid_volume + ask_volume
volume_data = {
"last_transaction": last_transaction,
"exchange_ts": exchange_ts,
"bid_volume": bid_volume,
"ask_volume": ask_volume,
"total_volume": total_volume,
}
return volume_data
def get_internal_data(self) -> Dict[str, Any]:
holdings = self.get_holdings(self.symbol)
cash = self.get_holdings("CASH")
inter_wakeup_executed_orders = self.inter_wakeup_executed_orders
episode_executed_orders = self.episode_executed_orders
parsed_episode_executed_orders = self.parsed_episode_executed_orders
parsed_inter_wakeup_executed_orders = self.parsed_inter_wakeup_executed_orders
current_time = self.current_time
order_status = self.order_status
mkt_open = self.mkt_open
mkt_close = self.mkt_close
internal_data = {
"holdings": holdings,
"cash": cash,
"inter_wakeup_executed_orders": inter_wakeup_executed_orders,
"episode_executed_orders": episode_executed_orders,
"parsed_episode_executed_orders": parsed_episode_executed_orders,
"parsed_inter_wakeup_executed_orders": parsed_inter_wakeup_executed_orders,
"starting_cash": self.starting_cash,
"current_time": current_time,
"order_status": order_status,
"mkt_open": mkt_open,
"mkt_close": mkt_close,
}
return internal_data
def order_executed(self, order: Order) -> None:
super().order_executed(order)
# parsing of the order message
executed_qty = order.quantity
executed_price = order.fill_price
assert executed_price is not None
order_id = order.order_id
# step lists
self.inter_wakeup_executed_orders.append(order)
self.parsed_inter_wakeup_executed_orders.append((executed_qty, executed_price))
# episode lists
self.episode_executed_orders.append(order)
self.parsed_episode_executed_orders.append((executed_qty, executed_price))
# update order status dictionnary
# test if it was mkt order and first execution received from it
try:
self.order_status[order_id]
flag = True
except KeyError:
flag = False
if flag:
self.order_status[order_id]["executed_qty"] += executed_qty
self.order_status[order_id]["active_qty"] -= executed_qty
if self.order_status[order_id]["active_qty"] <= 0:
self.order_status[order_id]["status"] = "executed"
else:
self.order_status[order_id] = {
"status": "mkt_immediately_filled",
"order": order,
"active_qty": 0,
"executed_qty": executed_qty,
"cancelled_qty": 0,
}
def order_accepted(self, order: Order) -> None:
super().order_accepted(order)
# update order status dictionnary
self.order_status[order.order_id] = {
"status": "active",
"order": order,
"active_qty": order.quantity,
"executed_qty": 0,
"cancelled_qty": 0,
}
def order_cancelled(self, order: Order) -> None:
super().order_cancelled(order)
order_id = order.order_id
quantity = order.quantity
self.order_status[order_id] = {
"status": "cancelled",
"order": order,
"cancelled_qty": quantity,
}
def new_inter_wakeup_reset(self) -> None:
self.inter_wakeup_executed_orders = (
[]
) # list of executed orders between steps - is reset at every step
self.parsed_inter_wakeup_executed_orders = [] # just tuple (price, qty)
def act(self, raw_state):
# used by the background agent
raise NotImplementedError
def new_step_reset(self) -> None:
self.inter_wakeup_executed_orders = (
[]
) # list of executed orders between steps - is reset at every step
self.parsed_inter_wakeup_executed_orders = [] # just tuple (price, qty)
|
{"hexsha": "cb2e032a5085c9671aba01fecb5c65998077669c", "size": 13819, "ext": "py", "lang": "Python", "max_stars_repo_path": "abides-markets/abides_markets/agents/background_v2/core_background_agent.py", "max_stars_repo_name": "jpmorganchase/ABIDES-jpmc-gym", "max_stars_repo_head_hexsha": "198736a1b1316190072356c980412569579f15a6", "max_stars_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-23T13:17:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T13:17:26.000Z", "max_issues_repo_path": "abides-markets/abides_markets/agents/background_v2/core_background_agent.py", "max_issues_repo_name": "jpmorganchase/ABIDES-gym", "max_issues_repo_head_hexsha": "198736a1b1316190072356c980412569579f15a6", "max_issues_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abides-markets/abides_markets/agents/background_v2/core_background_agent.py", "max_forks_repo_name": "jpmorganchase/ABIDES-gym", "max_forks_repo_head_hexsha": "198736a1b1316190072356c980412569579f15a6", "max_forks_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8242074928, "max_line_length": 117, "alphanum_fraction": 0.6281930675, "include": true, "reason": "import numpy", "num_tokens": 2960}
|
import numpy as np
from scipy.optimize import curve_fit
def exp_func(x, a, b, c):
"""
An exponential function.
Inputs:
x : (1D array) x-values to be input into the exponential function.
a : (float) multiplicative factor for the exponential.
b : (float) multiplicative factor for the exponentiated x.
c : (float) additive factor for the exponential function.
Outputs:
y : (1D array) The exponentiated function. Same length as x.
"""
y = a * np.exp(-b * x) + c
return y
def fit_line(xdata, ydata, func):
"""
Fits a line given data.
Inputs:
xdata : (1D array) x-values of observed / provided data.
ydata : (1D array) y-values of observed / provided data.
func : (function) functional form of curve to be fit.
Outputs:
fit_y : (1D array) y-values computed from applying fit
function to xdata. Same length as
xdata and ydata.
popt : (1D array) best-fit parameters for func.
"""
popt, pcov = curve_fit(func, xdata, ydata)
fit_y = func(xdata, *popt)
return fit_y, popt
|
{"hexsha": "59cbcd0dca5b8ea30b6c7e4da7bee40aabe4916b", "size": 1044, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/template/fit_line.py", "max_stars_repo_name": "arjunsavel/template-repo", "max_stars_repo_head_hexsha": "4dcffb2fe0cd748c76304e872a2fced5ea2b597d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/template/fit_line.py", "max_issues_repo_name": "arjunsavel/template-repo", "max_issues_repo_head_hexsha": "4dcffb2fe0cd748c76304e872a2fced5ea2b597d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/template/fit_line.py", "max_forks_repo_name": "arjunsavel/template-repo", "max_forks_repo_head_hexsha": "4dcffb2fe0cd748c76304e872a2fced5ea2b597d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-02-07T15:21:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T21:14:43.000Z", "avg_line_length": 23.2, "max_line_length": 68, "alphanum_fraction": 0.6714559387, "include": true, "reason": "import numpy,from scipy", "num_tokens": 297}
|
import re
import string
from math import pi
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, NumeralTickFormatter
from bokeh.plotting import figure
from bokeh.transform import cumsum
from bokeh.palettes import Category10
from numpy import histogram
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import LocallyLinearEmbedding
from twitter import Status
from wordcloud import WordCloud
from user_timeline_statistics import UserTimelineStatistics
from guess_language import guess_language
account = UserTimelineStatistics()
def create_replies_graph(t):
# tworzy wykres kołowy przedstawiający ilość odpowiedzi
replies = account.replies_count(t)
retweets = account.retweets_count(t)
x = {
"tweets": len(t) - replies - retweets,
"replies": replies,
"retweets": retweets
}
data = pd.Series(x).reset_index(name="value").rename(columns={"index": "tweet_type"})
data["angle"] = data["value"] / data["value"].sum() * 2 * pi
data["color"] = ["#00c4a6", "#007fc4", "#49b6f9"]
p = figure(plot_height=300, plot_width=450, title="Replies Count", toolbar_location=None,
tools="hover", tooltips="@tweet_type: @value", x_range=(-0.5, 1.0))
p.wedge(x=0, y=1, radius=0.4, start_angle=cumsum("angle", include_zero=True),
end_angle=cumsum("angle"), line_color="white", fill_color='color',
legend="tweet_type", source=data)
p.axis.axis_label = None
p.axis.visible = False
p.grid.grid_line_color = None
return p
def create_favorites_graph(t, bins):
# tworzy histogram polubień
hist, bin_edges = histogram([post.favorite_count for post in t], bins=bins)
bin_edges = [round(i) for i in bin_edges]
m = max(hist)
colors = ["#00c4a6" if v != m else "#007fc4" for v in hist]
source = ColumnDataSource(data=dict(
left=bin_edges[:-1],
right=bin_edges[1:],
top=hist,
bottom=[0 for x in range(len(hist))],
colors=colors
))
tooltips = [
("Favorites range", "<@left{0,0}; @right{0,0})"),
("Tweets", "@top")
]
p = figure(plot_height=300, plot_width=450, title="Favorites", toolbar_location="right",
x_axis_label="Favorites count", y_axis_label="Number of tweets", tooltips=tooltips)
p.quad("left", "right", "top", "bottom", fill_color="colors", source=source)
p.xaxis.ticker = bin_edges
if bin_edges[-1] >= 10000:
p.xaxis.major_label_orientation = pi/4
p.xaxis.formatter = NumeralTickFormatter(format="0,0")
return p
def create_posts_in_days_graph(t):
"""Tworzenie wykresu przedstawiającego ilości postów opublikowanych w określonych dniach tygodnia."""
# tworzy wykres słupkowy pokazujący ilość opublikowanych postów w poszczególnych dniach tygodnia
days = account.day_counter(t)
# ta lista pełni rolę pojemnika - każda cyfra odpowiada kolejnemu dniowi tygodnia zaczynając od poniedziałku
lista = [0,0,0,0,0,0,0]
# ta lista wykorzystywana jest zarówno przez tooltips jak i pętlę przygotowującą etykiety
week = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
# pseudosegregująca pętla - zamienia rekord tabeli 'lista' na liczbę postów w zależności,
# jaki dzień tygodnia zawiera klucz słownika 'Days'
for d, posts in days.items():
if d == 'Mon':
lista[0] = posts
elif d == 'Tue':
lista[1] = posts
elif d == 'Wed':
lista[2] = posts
elif d == 'Thu':
lista[3] = posts
elif d == 'Fri':
lista[4] = posts
elif d == 'Sat':
lista[5] = posts
elif d == 'Sun':
lista[6] = posts
# zmienna przechowująca element listy zawierający największą wartość
m = max(lista)
# kolory, jakimi zostaną wypełnione poszczególne "słupki" wykresu
colors = ["#00c4a6" if v != m else "#007fc4" for v in lista]
source = ColumnDataSource(data=dict(
x = [i for i in range(1, 8)],
width = [0.5,0.5,0.5,0.5,0.5,0.5,0.5],
bottom =[0,0,0,0,0,0,0],
top=lista,
fill_color=colors
))
tooltips = [
("Number of posts", "@top")
]
p = figure(plot_height=300, plot_width=450, title="Published posts in the days of week", toolbar_location="right",
x_axis_label="Day of week", y_axis_label="Number of tweets", tooltips=tooltips)
p.vbar(x="x", width="width", bottom="bottom", top="top", fill_color="fill_color", source=source)
labels = {}
# pętla przygotowywuje słownik wykorzystywany do etykiety wykresu
# - co prawda zawsze będzie 7 punktów wykresu, ale nie ma problemu z tickami
n = 1
for d in week:
labels[n] = d
n += 1
p.xaxis.major_label_overrides = labels
return p
def create_length_graph(t, bins):
# tworzy histogram dlugosci postow
hist, bin_edges = histogram([len(post.full_text) for post in t], bins=bins)
bin_edges = [round(i) for i in bin_edges]
m = max(hist)
colors = ["#00c4a6" if v != m else "#007fc4" for v in hist]
source = ColumnDataSource(data=dict(
left=bin_edges[:-1],
right=bin_edges[1:],
top=hist,
bottom=[0 for x in range(len(hist))],
colors=colors
))
tooltips = [
("Tweet length", "<@left{0,0}; @right{0,0})"),
("Tweets", "@top")
]
p = figure(plot_height=300, plot_width=450, title="Tweets Length",
toolbar_location="right", x_axis_label="Tweet length", y_axis_label="Number of tweets",
tooltips=tooltips)
p.quad("left", "right", "top", "bottom", fill_color="colors", source=source)
p.xaxis.ticker = bin_edges
if bin_edges[-1] >= 10000:
p.xaxis.major_label_orientation = pi/4
p.xaxis.formatter = NumeralTickFormatter(format="0,0")
return p
def create_posts_in_hours_graph(t):
"""Tworzy wykres słupkowy pokazujący ilość opublikowanych postów w poszczególnych godzinach."""
hours = account.hours_count(t)
top = []
x = []
for hour, posts in hours.items():
top.append(posts)
x.append(hour)
tooltips = [
("Hour", "@x"),
("Tweets", "@top")
]
m = max(top)
colors = ["#00c4a6" if v != m else "#007fc4" for v in top]
p = figure(plot_height=300, plot_width=450, title="Published posts in hours of a day (UTC +0)",
toolbar_location="right", x_axis_label="Hours", y_axis_label="Number of tweets", tooltips=tooltips)
p.vbar(x=x, width=0.5, bottom=0, top=top, color="#007fc4", fill_color=colors)
return p
def preprocessing(line):
"""Preprocessing and tokenizing"""
line = line.lower()
line = re.sub(r"[{}]".format(string.punctuation), " ", line)
return line
def KMeans_clusters(X, n=10):
"""
Optymalizacja liczby klastrów
Wyznacza liczbę klastrów z podanego przedziału, dla której użycie algorytmu KMeans daje największy współczynnik
Silhouette Score.
Parameters
----------
X : tablica lub macierz rzadka, kształt = [n_samples, n_features]
Zbiór, który ma być sklasteryzowany
n : int
Maksymalna liczba klastrów, jaka ma być testowana. Domyślnie 10.
Musi być mniejsza niż liczba elementów w zbiorze X. Nie może być mniejsza niż 2.
Returns
-------
int
Liczba klastrów z największym Silhouette Score
"""
max_score = 0
max = 1
for k in range(2, n + 1):
model = KMeans(n_clusters=k).fit(X)
clusters = model.predict(X)
score = metrics.silhouette_score(X, clusters)
if score > max_score:
max_score = score
max = k
return max
def create_tfidf_graph(t):
"""Tworzy wykres przedstawiający posty sklasteryzowane wg TF-IDF."""
if len(t) < 3: # manifold nie działa, kiedy postów jest mniej niż 3
p = figure(title="Too small number of tweets")
return p
post_list = [] # lista zawierająca treści tweetów
for post in t:
if not isinstance(post, Status):
raise TypeError("Expected Status class instance, got %s" % type(post))
text = post.full_text
post_list.append(text)
file = open('stopwords.txt','r').read()
rows = file.split('\n')
stopwords = list(rows)
tfidf_vectorizer = TfidfVectorizer(preprocessor=preprocessing, stop_words=stopwords)
tfidf = tfidf_vectorizer.fit_transform(post_list)
mf = LocallyLinearEmbedding(n_neighbors=min(5, len(t)-1))
df = pd.DataFrame(mf.fit_transform(tfidf.toarray()))
# Jeśli postów jest mniej niż cztery, maksymlna liczba klastrów wynosi len(t)-1
n = KMeans_clusters(tfidf, min(4, len(t)-1))
# Klasteryzacja
kmeans = KMeans(n_clusters=n).fit(tfidf)
clusters = kmeans.predict(tfidf)
df["class"] = clusters
colors = np.hstack([Category10[10]] * 20)
source = ColumnDataSource(data=dict(
x=df[0],
y=df[1],
color=colors[clusters].tolist(),
desc=post_list,
))
tooltips = """
<div style="width:250px;">
@desc
</div>
"""
p = figure(title="Tweets grouped by content", tooltips=tooltips)
p.scatter(x='x', y='y', color='color', source=source)
return p
def wordcloud(timeline, pid):
file = open('stopwords.txt','r').read()
rows = file.split('\n')
stopwords = set(rows)
text = ""
for tweet in timeline:
text = text + " " + tweet.full_text
wc = WordCloud(stopwords=stopwords, width=1920, height=1080, max_words=50, background_color="white").generate(text)
wc.to_file("static/temp/WC" + pid + ".png")
def create_languages_graph(timeline):
# tworzy wykres kołowy jezykow (narazie robi mape {jezyk: ilosc postow})
languages = {}
for tweet in timeline:
language=guess_language(tweet.full_text)
if language in languages:
languages[language]=languages[language]+1
else:
languages[language]=1
return languages
|
{"hexsha": "c8632628f84c05459260defc31ee0e3b632d136e", "size": 10140, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/graphs.py", "max_stars_repo_name": "profilator/profilator", "max_stars_repo_head_hexsha": "6161efffee00b95216c8d60fcbd637171fae6980", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-19T17:56:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-25T06:54:47.000Z", "max_issues_repo_path": "source/graphs.py", "max_issues_repo_name": "profilator/profilator", "max_issues_repo_head_hexsha": "6161efffee00b95216c8d60fcbd637171fae6980", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-23T11:47:01.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-23T11:47:01.000Z", "max_forks_repo_path": "source/graphs.py", "max_forks_repo_name": "profilator/profilator", "max_forks_repo_head_hexsha": "6161efffee00b95216c8d60fcbd637171fae6980", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-03-12T18:18:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-11T12:08:12.000Z", "avg_line_length": 32.2929936306, "max_line_length": 119, "alphanum_fraction": 0.6379684418, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2921}
|
import cv2
import tensorflow as tf
import numpy as np
OUTPUT_PATH = "../events/"
NUM_FILTERS = 10
FILTER_SIZE = (3, 3)
STRIDES = (1, 1)
def nn(input_node):
with tf.variable_scope('nn'):
w = tf.get_variable(
name='weight',
shape=[FILTER_SIZE[0], FILTER_SIZE[1], 3, NUM_FILTERS],
dtype=tf.float32)
b = tf.get_variable(
name='bias',
shape=[NUM_FILTERS],
dtype=tf.float32)
out = tf.nn.conv2d(input_node, filter=w, strides=(1, 1),
padding='SAME')
out = out + b
return out
def layer(input_node):
out = tf.layers.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same', name='layer')
return out
def slim(input_node):
out = tf.contrib.slim.conv2d(input_node, NUM_FILTERS, FILTER_SIZE, stride=STRIDES, padding='SAME',
activation_fn=None, scope='slim')
return out
def keras(input_node):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(NUM_FILTERS, FILTER_SIZE, strides=STRIDES, padding='same')
], name='keras')
return model(input_node)
if __name__ == '__main__':
node = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
nn_out = nn(node)
layer_out = layer(node)
slim_out = slim(node)
keras_out = keras(node)
tf.summary.FileWriter(OUTPUT_PATH, graph=tf.get_default_graph())
image = cv2.imread('ithome.jpg')
image = np.expand_dims(image, 0)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
nn_result, layer_result, slim_result, keras_result = \
sess.run([nn_out, layer_out, slim_out, keras_out], feed_dict={node: image})
print(f'nn shape: {nn_result.shape}')
print(f'layer shape: {layer_result.shape}')
print(f'slim shape: {slim_result.shape}')
print(f'keras shape: {keras_result.shape}')
|
{"hexsha": "81dc4f11e5576ffd637d347eccc3eb3c8593cb6c", "size": 2010, "ext": "py", "lang": "Python", "max_stars_repo_path": "05/conv2d.py", "max_stars_repo_name": "jason9075/ithome_tensorflow_series", "max_stars_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2019-10-06T17:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:52:22.000Z", "max_issues_repo_path": "05/conv2d.py", "max_issues_repo_name": "jason9075/ithome_tensorflow_series", "max_issues_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-03T10:13:10.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-08T06:55:12.000Z", "max_forks_repo_path": "05/conv2d.py", "max_forks_repo_name": "jason9075/ithome_tensorflow_series", "max_forks_repo_head_hexsha": "e8f92de2a73a88e7b03a9ac58ece4c4a604f066e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-10-06T17:11:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T08:52:26.000Z", "avg_line_length": 27.9166666667, "max_line_length": 111, "alphanum_fraction": 0.6243781095, "include": true, "reason": "import numpy", "num_tokens": 500}
|
[STATEMENT]
lemma is_strict_if:
assumes "\<And>f. ide f \<Longrightarrow> f \<star> src f = f"
and "\<And>f. ide f \<Longrightarrow> trg f \<star> f = f"
and "\<And>a. obj a \<Longrightarrow> ide \<i>[a]"
and "\<And>f g h. \<lbrakk>ide f; ide g; ide h; src f = trg g; src g = trg h\<rbrakk> \<Longrightarrow> ide \<a>[f, g, h]"
shows "strict_bicategory V H \<a> \<i> src trg"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. strict_bicategory (\<cdot>) (\<star>) \<a> \<i> src trg
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>f. ide f \<Longrightarrow> \<l>[f] = f
2. \<And>f. ide f \<Longrightarrow> \<r>[f] = f
3. \<And>f g h. \<lbrakk>ide f; ide g; ide h; src f = trg g; src g = trg h\<rbrakk> \<Longrightarrow> ide \<a>[f, g, h]
[PROOF STEP]
show "\<And>f g h. \<lbrakk>ide f; ide g; ide h; src f = trg g; src g = trg h\<rbrakk> \<Longrightarrow> ide \<a>[f, g, h]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>f g h. \<lbrakk>ide f; ide g; ide h; src f = trg g; src g = trg h\<rbrakk> \<Longrightarrow> ide \<a>[f, g, h]
[PROOF STEP]
by fact
[PROOF STATE]
proof (state)
this:
\<lbrakk>ide ?f; ide ?g; ide ?h; src ?f = trg ?g; src ?g = trg ?h\<rbrakk> \<Longrightarrow> ide \<a>[?f, ?g, ?h]
goal (2 subgoals):
1. \<And>f. ide f \<Longrightarrow> \<l>[f] = f
2. \<And>f. ide f \<Longrightarrow> \<r>[f] = f
[PROOF STEP]
fix f
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>f. ide f \<Longrightarrow> \<l>[f] = f
2. \<And>f. ide f \<Longrightarrow> \<r>[f] = f
[PROOF STEP]
assume f: "ide f"
[PROOF STATE]
proof (state)
this:
ide f
goal (2 subgoals):
1. \<And>f. ide f \<Longrightarrow> \<l>[f] = f
2. \<And>f. ide f \<Longrightarrow> \<r>[f] = f
[PROOF STEP]
show "\<l>[f] = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<l>[f] = f
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<l>[f] = f
[PROOF STEP]
have "f = \<l>[f]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f = \<l>[f]
[PROOF STEP]
using assms f unit_simps(5)
[PROOF STATE]
proof (prove)
using this:
ide ?f \<Longrightarrow> ?f \<star> src ?f = ?f
ide ?f \<Longrightarrow> trg ?f \<star> ?f = ?f
obj ?a \<Longrightarrow> ide \<i>[?a]
\<lbrakk>ide ?f; ide ?g; ide ?h; src ?f = trg ?g; src ?g = trg ?h\<rbrakk> \<Longrightarrow> ide \<a>[?f, ?g, ?h]
ide f
obj ?a \<Longrightarrow> cod \<i>[?a] = ?a
goal (1 subgoal):
1. f = \<l>[f]
[PROOF STEP]
by (intro lunit_eqI) (auto simp add: comp_arr_ide)
[PROOF STATE]
proof (state)
this:
f = \<l>[f]
goal (1 subgoal):
1. \<l>[f] = f
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
f = \<l>[f]
goal (1 subgoal):
1. \<l>[f] = f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<l>[f] = f
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<l>[f] = f
goal (1 subgoal):
1. \<And>f. ide f \<Longrightarrow> \<r>[f] = f
[PROOF STEP]
show "\<r>[f] = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<r>[f] = f
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<r>[f] = f
[PROOF STEP]
have "f = \<r>[f]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f = \<r>[f]
[PROOF STEP]
proof (intro runit_eqI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ide f
2. \<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>
3. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
show "ide f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ide f
[PROOF STEP]
by fact
[PROOF STATE]
proof (state)
this:
ide f
goal (2 subgoals):
1. \<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>
2. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
show "\<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>
[PROOF STEP]
using f assms(1)
[PROOF STATE]
proof (prove)
using this:
ide f
ide ?f \<Longrightarrow> ?f \<star> src ?f = ?f
goal (1 subgoal):
1. \<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<guillemotleft>f : f \<star> src f \<Rightarrow> f\<guillemotright>
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
show "f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
have "(f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = (f \<star> src f) \<cdot> \<a>[f, src f, src f]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = (f \<star> src f) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
using f assms(2-3) unit_simps(5)
[PROOF STATE]
proof (prove)
using this:
ide f
ide ?f \<Longrightarrow> trg ?f \<star> ?f = ?f
obj ?a \<Longrightarrow> ide \<i>[?a]
obj ?a \<Longrightarrow> cod \<i>[?a] = ?a
goal (1 subgoal):
1. (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = (f \<star> src f) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = (f \<star> src f) \<cdot> \<a>[f, src f, src f]
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = (f \<star> src f) \<cdot> \<a>[f, src f, src f]
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
have "... = (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<star> src f) \<cdot> \<a>[f, src f, src f] = (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
using f assms(1-2) ideD(1) trg_src src.preserves_ide
[PROOF STATE]
proof (prove)
using this:
ide f
ide ?f \<Longrightarrow> ?f \<star> src ?f = ?f
ide ?f \<Longrightarrow> trg ?f \<star> ?f = ?f
ide ?a \<Longrightarrow> arr ?a
arr ?\<mu> \<Longrightarrow> trg (src ?\<mu>) = src ?\<mu>
ide ?a \<Longrightarrow> ide (src ?a)
goal (1 subgoal):
1. (f \<star> src f) \<cdot> \<a>[f, src f, src f] = (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
(f \<star> src f) \<cdot> \<a>[f, src f, src f] = (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f]
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(f \<star> src f) \<cdot> \<a>[f, src f, src f] = (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f]
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
have "... = f \<star> src f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f] = f \<star> src f
[PROOF STEP]
using f comp_arr_ide assms(1,4) assoc_in_hom [of f "src f" "src f"]
[PROOF STATE]
proof (prove)
using this:
ide f
\<lbrakk>ide ?a; seq ?f ?a\<rbrakk> \<Longrightarrow> ?f \<cdot> ?a = ?f
ide ?f \<Longrightarrow> ?f \<star> src ?f = ?f
\<lbrakk>ide ?f; ide ?g; ide ?h; src ?f = trg ?g; src ?g = trg ?h\<rbrakk> \<Longrightarrow> ide \<a>[?f, ?g, ?h]
\<lbrakk>ide f; ide (src f); ide (src f); src f = trg (src f); src (src f) = trg (src f)\<rbrakk> \<Longrightarrow> \<guillemotleft>\<a>[f, src f, src f] : src (src f) \<rightarrow> trg f\<guillemotright>
\<lbrakk>ide f; ide (src f); ide (src f); src f = trg (src f); src (src f) = trg (src f)\<rbrakk> \<Longrightarrow> \<guillemotleft>\<a>[f, src f, src f] : (local.dom f \<star> local.dom (src f)) \<star> local.dom (src f) \<Rightarrow> cod f \<star> cod (src f) \<star> cod (src f)\<guillemotright>
goal (1 subgoal):
1. (f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f] = f \<star> src f
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(f \<star> src f \<star> src f) \<cdot> \<a>[f, src f, src f] = f \<star> src f
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = f \<star> src f
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f] = f \<star> src f
goal (1 subgoal):
1. f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
f \<star> src f = (f \<star> \<i>[src f]) \<cdot> \<a>[f, src f, src f]
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
f = \<r>[f]
goal (1 subgoal):
1. \<r>[f] = f
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
f = \<r>[f]
goal (1 subgoal):
1. \<r>[f] = f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<r>[f] = f
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<r>[f] = f
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4317, "file": "Bicategory_Strictness", "length": 44}
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import dgl
import sys
import numpy as np
import time
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from multiprocessing import Process, Manager, Condition, Value
import multiprocessing as mp
from dgl.graph_index import create_graph_index
from dgl.data.utils import load_graphs, save_graphs
from dgl.distributed import DistGraphServer, DistGraph
from dgl.distributed import partition_graph
import backend as F
import unittest
import pickle
server_namebook = {0: [0, '127.0.0.1', 30000, 1]}
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo') != 0).astype(np.int64)
ig = create_graph_index(arr, readonly=True)
return dgl.DGLGraph(ig)
def run_server(graph_name, server_id, num_clients, barrier):
g = DistGraphServer(server_id, server_namebook, num_clients, graph_name,
'/tmp/{}.json'.format(graph_name))
barrier.wait()
print('start server', server_id)
g.start()
def run_client(graph_name, barrier, num_nodes, num_edges):
barrier.wait()
g = DistGraph(server_namebook, graph_name)
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats1 = g.ndata['features'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges() / 2))
feats1 = g.edata['features'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.init_ndata('test1', new_shape, F.int32)
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# Test init edge data
new_shape = (g.number_of_edges(), 2)
g.init_edata('test1', new_shape, F.int32)
feats = g.edata['test1'][eids]
assert np.all(F.asnumpy(feats) == 0)
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.ndata['features']) == g.number_of_nodes()
assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
assert g.ndata['features'].dtype == F.int64
assert g.node_attr_schemes()['features'].dtype == F.int64
assert g.node_attr_schemes()['test1'].dtype == F.int32
assert g.node_attr_schemes()['features'].shape == (1,)
g.shut_down()
print('end')
def run_server_client():
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'test'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
barrier = mp.Barrier(2)
serv_ps = []
for serv_id in range(1):
p = Process(target=run_server, args=(graph_name, serv_id, 1, barrier))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(1):
print('start client', cli_id)
p = Process(target=run_client, args=(graph_name, barrier, g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
print('clients have terminated')
if __name__ == '__main__':
run_server_client()
|
{"hexsha": "62c3f69b117167cdebc522346d1837d857cfcf6c", "size": 3704, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/distributed/test_dist_graph_store.py", "max_stars_repo_name": "m30m/dgl", "max_stars_repo_head_hexsha": "2190c39d674f76c65db9ee8da7b43d3021f19c29", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-09T12:36:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T21:22:36.000Z", "max_issues_repo_path": "tests/distributed/test_dist_graph_store.py", "max_issues_repo_name": "m30m/dgl", "max_issues_repo_head_hexsha": "2190c39d674f76c65db9ee8da7b43d3021f19c29", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/distributed/test_dist_graph_store.py", "max_forks_repo_name": "m30m/dgl", "max_forks_repo_head_hexsha": "2190c39d674f76c65db9ee8da7b43d3021f19c29", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-07T09:34:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-13T06:18:58.000Z", "avg_line_length": 32.2086956522, "max_line_length": 86, "alphanum_fraction": 0.6584773218, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1026}
|
# -*- coding: utf-8 -*-
from zaifapi import ZaifPublicApi, ZaifTradeApi
from decimal import Decimal, ROUND_DOWN
from TickChanger import Tick_int
import numpy
import time
import traceback
import re
import datetime
class EXCaccess:
def __init__(self):
self.investment = 10000 # 投資制限額
# 予想利益額の閾値(閾値以上で注文)========================
self.threshold_jpy_btc_mona = 300
self.threshold_jpy_btc_bch = 150
self.threshold_jpy_btc_xem = 150
self.threshold_jpy_btc_eth = 150
# 手数料 ( % )
self.fee_BTC_JPY = 0
self.fee_MONA_BTC = 0.1
self.fee_MONA_JPY = 0.1
self.fee_BCH_BTC = 0.3
self.fee_BCH_JPY = 0.3
self.fee_XEM_BTC = 0.1
self.fee_XEM_JPY = 0.1
self.fee_ETH_BTC = 0.1
self.fee_ETH_JPY = 0.1
# 注文確認の手がかりとして注文に付けるコメント
self.order_comment = 'AutoTA'
# API用キーの読み込み
f = open('key_secret.txt', 'r') # 本番用
# f = open('test_keys.txt', 'r') # テスト用
txt = f.read()
f.close()
data = txt.split('\n')
self.key = data[0]
self.secret = data[1]
def createErrorLog(self, locate: str):
now = datetime.datetime.now()
Path = 'error.log'
with open(Path, 'a', newline='', encoding='shift-jis') as f:
f.write('=====================================\n')
f.write(str(now))
f.write(locate)
traceback.print_exc(file=f)
# 公開情報に接続
def getPublicAPI(self):
while (1):
try:
self.zaif = ZaifPublicApi()
break
except:
time.sleep(1)
# 個人の情報に接続
def getPrivateAPI(self):
while (1):
try:
self.zaifp = ZaifTradeApi(self.key, self.secret)
break
except:
time.sleep(1)
# 余力額取得
def getFunds(self):
self.getPrivateAPI()
while (1):
try:
result = self.zaifp.get_info2()['funds']
break
except:
traceback.print_exc()
self.createErrorLog('getFunds')
time.sleep(1)
return result
# 各通貨ペアの相場から買い気配値と売り気配値の中間値を返す
# 公開情報の.currency_pairsで取得できるaux_unit_step(取引単価?)
# BTC/JPY:5.0
# MONA/BTC:0.00000001
# MONA/JPY:0.1
# BCH/BTC:0.0001
# BCH/JPY:5.0
# XEM/BTC:0.00000001
# XEM/JPY:0.0001
# ETH/BTC:0.0001
# ETH/JPY:5.0
# ====================================================
# fund type: JPY BTC MONA ETH XEM BCH
# ask:sell, bid:buy
# [0]:value [1]:quantity
def getBTC_JPY(self):
while (1):
try:
Btc_Jpy = self.zaif.depth('btc_jpy')
break
except:
traceback.print_exc()
self.createErrorLog('getBTC_JPY')
time.sleep(1)
askBtcJpy = Btc_Jpy['asks'][0][0]
bidBtcJpy = Btc_Jpy['bids'][0][0]
aveBtcJpy = (askBtcJpy + bidBtcJpy) / 2.0
T_aveBtcJpy = Tick_int(int(aveBtcJpy), 5)
return T_aveBtcJpy
def getMONA_JPY(self):
while (1):
try:
Mona_Jpy = self.zaif.depth('mona_jpy')
break
except:
traceback.print_exc()
self.createErrorLog('getMONA_JPY')
time.sleep(1)
askMonaJpy = Mona_Jpy['asks'][0][0]
bidMonaJpy = Mona_Jpy['bids'][0][0]
aveMonaJpy = (askMonaJpy + bidMonaJpy) / 2.0
T_aveMonaJpy = Decimal(aveMonaJpy).quantize(Decimal('0.0'),
rounding=ROUND_DOWN)
return T_aveMonaJpy
def getMONA_BTC(self):
while (1):
try:
Mona_Btc = self.zaif.depth('mona_btc')
break
except:
traceback.print_exc()
self.createErrorLog('getMONA_BTC')
time.sleep(1)
askMonaBtc = Mona_Btc['asks'][0][0]
bidMonaBtc = Mona_Btc['bids'][0][0]
aveMonaBtc = (askMonaBtc + bidMonaBtc) / 2.0
T_aveMonaBtc = Decimal(aveMonaBtc).quantize(Decimal('0.00000000'),
rounding=ROUND_DOWN)
return T_aveMonaBtc
def getBCH_JPY(self):
while (1):
try:
Bch_Jpy = self.zaif.depth('bch_jpy')
break
except:
traceback.print_exc()
self.createErrorLog('getBCH_JPY')
time.sleep(1)
askBchJpy = Bch_Jpy['asks'][0][0]
bidBchJpy = Bch_Jpy['bids'][0][0]
aveBchJpy = (askBchJpy + bidBchJpy) / 2.0
T_aveBchJpy = Tick_int(int(aveBchJpy), 5)
return T_aveBchJpy
def getBCH_BTC(self):
while (1):
try:
Bch_Btc = self.zaif.depth('bch_btc')
break
except:
traceback.print_exc()
self.createErrorLog('getBCH_BTC')
time.sleep(1)
askBchBtc = Bch_Btc['asks'][0][0]
bidBchBtc = Bch_Btc['bids'][0][0]
aveBchBtc = (askBchBtc + bidBchBtc) / 2.0
T_aveBchBtc = Decimal(aveBchBtc).quantize(Decimal('0.0000'),
rounding=ROUND_DOWN)
return T_aveBchBtc
def getXEM_JPY(self):
while (1):
try:
Xem_Jpy = self.zaif.depth('xem_jpy')
break
except:
traceback.print_exc()
self.createErrorLog('getXEM_JPY')
time.sleep(1)
askXemJpy = Xem_Jpy['asks'][0][0]
bidXemJpy = Xem_Jpy['bids'][0][0]
aveXemJpy = (askXemJpy + bidXemJpy) / 2.0
T_aveXemJpy = Decimal(aveXemJpy).quantize(Decimal('0.0000'),
rounding=ROUND_DOWN)
return T_aveXemJpy
def getXEM_BTC(self):
while (1):
try:
Xem_Btc = self.zaif.depth('xem_btc')
break
except:
traceback.print_exc()
self.createErrorLog('getXEM_BTC')
time.sleep(1)
askXemBtc = Xem_Btc['asks'][0][0]
bidXemBtc = Xem_Btc['bids'][0][0]
aveXemBtc = (askXemBtc + bidXemBtc) / 2.0
T_aveXemBtc = Decimal(aveXemBtc).quantize(Decimal('0.00000000'),
rounding=ROUND_DOWN)
return T_aveXemBtc
def getETH_JPY(self):
while (1):
try:
Eth_Jpy = self.zaif.depth('eth_jpy')
break
except:
traceback.print_exc()
self.createErrorLog('getETH_JPY')
time.sleep(1)
askEthJpy = Eth_Jpy['asks'][0][0]
bidEthJpy = Eth_Jpy['bids'][0][0]
aveEthJpy = (askEthJpy + bidEthJpy) / 2.0
T_aveEthJpy = Tick_int(int(aveEthJpy), 5)
return T_aveEthJpy
def getETH_BTC(self):
while (1):
try:
Eth_Btc = self.zaif.depth('eth_btc')
break
except:
traceback.print_exc()
self.createErrorLog('getETH_BTC')
time.sleep(1)
askEthBtc = Eth_Btc['asks'][0][0]
bidEthBtc = Eth_Btc['bids'][0][0]
aveEthBtc = (askEthBtc + bidEthBtc) / 2.0
T_aveEthBtc = Decimal(aveEthBtc).quantize(Decimal('0.0000'),
rounding=ROUND_DOWN)
return T_aveEthBtc
# 三角裁定で利益が出るか計算
def Monitoring(self):
fj = self.getFunds()['jpy']
if fj > self.investment:
fj = self.investment
self.getPublicAPI()
aveBtcJpy = self.getBTC_JPY()
aveMonaJpy = self.getMONA_JPY()
aveMonaBtc = self.getMONA_BTC()
aveBchJpy = self.getBCH_JPY()
aveBchBtc = self.getBCH_BTC()
aveXemJpy = self.getXEM_JPY()
aveXemBtc = self.getXEM_BTC()
aveEthJpy = self.getETH_JPY()
aveEthBtc = self.getETH_BTC()
# 買いと売りの中間点
aves = [aveBtcJpy,
aveMonaBtc,
aveMonaJpy,
aveBchBtc,
aveBchJpy,
aveXemBtc,
aveXemJpy,
aveEthBtc,
aveEthJpy]
# ===========================================================
# JPY->BTC, BTC->MONA, MONA->JPY
# BTC-ask, MONA-ask, MONA-bid
# JPY->MONA, MONA->BTC, BTC->JPY
# MONA-ask, MONA-bid, BTC-bid
# 手数料抜き============================================
# Jpy_Btc_Mona = ((aveMonaJpy * fj) / (aveBtcJpy * aveMonaBtc))
# Jpy_Mona_Btc = ((aveBtcJpy * aveMonaBtc * fj) / aveMonaJpy)
# Jpy_Btc_Bch = ((aveBchJpy * fj) / (aveBtcJpy * aveBchBtc))
# Jpy_Bch_Btc = ((aveBtcJpy * aveBchBtc * fj) / aveBchJpy)
# Jpy_Btc_Xem = ((aveXemJpy * fj) / (aveBtcJpy * aveXemBtc))
# Jpy_Xem_Btc = ((aveBtcJpy * aveXemBtc * fj) / aveXemJpy)
# Jpy_Btc_Eth = ((aveEthJpy * fj) / (aveBtcJpy * aveEthBtc))
# Jpy_Eth_Btc = ((aveBtcJpy * aveEthBtc * fj) / aveEthJpy)
# 手数料あり============================================
# ※decimal * floatができない.同じ型にキャストする必要あり
Jpy_Btc_Mona = ((100 * float(aveMonaJpy))
/ (float(aveMonaBtc) * (100 + self.fee_MONA_JPY))) * \
(100 / (float(aveBtcJpy) * (100 + self.fee_MONA_BTC))) * \
((100 * fj) / (100 + self.fee_BTC_JPY))
Jpy_Mona_Btc = ((100 * float(aveBtcJpy) * float(aveMonaBtc))
/ (100 + self.fee_BTC_JPY)) * \
(100 / (float(aveMonaJpy) * (100 + self.fee_MONA_BTC))) * \
((100 * fj) / (100 + self.fee_MONA_JPY))
Jpy_Btc_Bch = ((100 * float(aveBchJpy))
/ (float(aveBchBtc) * (100 + self.fee_BCH_JPY))) * \
(100 / (float(aveBtcJpy) * (100 + self.fee_BCH_BTC))) * \
((100 * fj) / (100 + self.fee_BTC_JPY))
Jpy_Bch_Btc = ((100 * float(aveBtcJpy) * float(aveBchBtc))
/ (100 + self.fee_BTC_JPY)) * \
(100 / (float(aveBchJpy) * (100 + self.fee_BCH_BTC))) * \
((100 * fj) / (100 + self.fee_BCH_JPY))
Jpy_Btc_Xem = ((100 * float(aveXemJpy))
/ (float(aveXemBtc) * (100 + self.fee_XEM_JPY))) * \
(100 / (float(aveBtcJpy) * (100 + self.fee_XEM_BTC))) * \
((100 * fj) / (100 + self.fee_BTC_JPY))
Jpy_Xem_Btc = ((100 * float(aveBtcJpy) * float(aveXemBtc))
/ (100 + self.fee_BTC_JPY)) * \
(100 / (float(aveXemJpy) * (100 + self.fee_XEM_BTC))) * \
((100 * fj) / (100 + self.fee_XEM_JPY))
Jpy_Btc_Eth = ((100 * float(aveEthJpy))
/ (float(aveEthBtc) * (100 + self.fee_ETH_JPY))) * \
(100 / (float(aveBtcJpy) * (100 + self.fee_ETH_BTC))) * \
((100 * fj) / (100 + self.fee_BTC_JPY))
Jpy_Eth_Btc = ((100 * float(aveBtcJpy) * float(aveEthBtc))
/ (100 + self.fee_BTC_JPY)) * \
(100 / (float(aveEthJpy) * (100 + self.fee_ETH_BTC))) * \
((100 * fj) / (100 + self.fee_ETH_JPY))
# 各予想結果額
estimates = [Jpy_Btc_Mona,
Jpy_Mona_Btc,
Jpy_Btc_Bch,
Jpy_Bch_Btc,
Jpy_Btc_Xem,
Jpy_Xem_Btc,
Jpy_Btc_Eth,
Jpy_Eth_Btc]
# 最も高い予想結果額
maxIndex = numpy.argmax(estimates)
if maxIndex == 0 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_mona):
judge = 'Jpy_Btc_Mona'
elif maxIndex == 1 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_mona):
judge = 'Jpy_Mona_Btc'
elif maxIndex == 2 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_bch):
judge = 'Jpy_Btc_Bch'
elif maxIndex == 3 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_bch):
judge = 'Jpy_Bch_Btc'
elif maxIndex == 4 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_xem):
judge = 'Jpy_Btc_Xem'
elif maxIndex == 5 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_xem):
judge = 'Jpy_Xem_Btc'
elif maxIndex == 6 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_eth):
judge = 'Jpy_Btc_Eth'
elif maxIndex == 7 \
and estimates[maxIndex] > (fj + self.threshold_jpy_btc_eth):
judge = 'Jpy_Eth_Btc'
else:
judge = 'no routes'
# 各予想利益額
diffs = [int(Jpy_Btc_Mona - fj),
int(Jpy_Mona_Btc - fj),
int(Jpy_Btc_Bch - fj),
int(Jpy_Bch_Btc - fj),
int(Jpy_Btc_Xem - fj),
int(Jpy_Xem_Btc - fj),
int(Jpy_Btc_Eth - fj),
int(Jpy_Eth_Btc - fj)]
resultList = [judge, fj, diffs[maxIndex], diffs, aves]
return resultList
# コメント'AutoTA'の注文が残っているか確認
def checkActiveOrders(self, pair: str):
# active_ordersの形式:
# {'270906448': {
# 'currency_pair': 'mona_btc',
# 'action': 'ask',
# 'amount': 33.0,
# 'price': 0.00035692,
# 'timestamp': '1510122394',
# 'comment': ''}}
time.sleep(0.3)
self.getPrivateAPI()
orderID = 0 # 注文が無ければこのまま0を返す
price = 0
amount = 0
orders = []
while 1:
try:
if pair == 'btc_jpy':
orders.append(
self.zaifp.active_orders(currency_pair='btc_jpy'))
elif pair == 'mona_btc':
orders.append(
self.zaifp.active_orders(currency_pair='mona_btc'))
elif pair == 'mona_jpy':
orders.append(
self.zaifp.active_orders(currency_pair='mona_jpy'))
elif pair == 'xem_btc':
orders.append(
self.zaifp.active_orders(currency_pair='xem_btc'))
elif pair == 'xem_jpy':
orders.append(
self.zaifp.active_orders(currency_pair='xem_jpy'))
elif pair == 'bch_btc':
orders.append(
self.zaifp.active_orders(currency_pair='bch_btc'))
elif pair == 'bch_jpy':
orders.append(
self.zaifp.active_orders(currency_pair='bch_jpy'))
elif pair == 'eth_btc':
orders.append(
self.zaifp.active_orders(currency_pair='eth_btc'))
elif pair == 'eth_jpy':
orders.append(
self.zaifp.active_orders(currency_pair='eth_jpy'))
else:
pass
break
except:
traceback.print_exc()
time.sleep(1)
# コメントが'AutoTA'のオーダーがあればID,価格,数量を返す
for i in range(len(orders)):
if orders[i] != []:
for j in orders[i].keys():
if re.search(self.order_comment,
str(orders[i][j]['comment'])):
orderID = int(j)
price = orders[i][j]['price']
amount = orders[i][j]['amount']
resultList = [orderID, price, amount]
return resultList
# 注文IDと通貨ペアを指定して注文を取り消し
def cancelOrder(self, orderID: int, pair: str):
cancelFlag = False
time.sleep(1)
try:
self.zaifp.cancel_order(order_id=orderID, currency_pair=pair)
print('order canceled')
cancelFlag = True
except:
traceback.print_exc()
self.createErrorLog('cancelOrder')
time.sleep(1)
return cancelFlag
# コメント'AutoTA'を付けて注文実行
def tradePairs(self, pair, act, price, amount):
while 1:
try:
self.getPrivateAPI()
self.zaifp.trade(currency_pair=pair, action=act, price=price,
amount=amount, comment=self.order_comment)
break
except:
traceback.print_exc()
self.createErrorLog('tradePairs')
time.sleep(1)
# 各取引======================================
# JPY->BTC->MONA
def order_JPY_BTC(self, prevFund, T_aveBtcJpy):
print('JPY->BTC: ', end='', flush=True)
# 手数料分を残して取引
tradeJPY = (100 * prevFund) / (100 + self.fee_BTC_JPY)
pair = 'btc_jpy' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveBtcJpy
amount = Decimal(tradeJPY / float(price)).quantize(Decimal('0.0000'),
rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_BTC_MONA(self, T_aveMonaBtc):
print('BTC->MONA: ', end='', flush=True)
while 1:
try:
myBTC = self.getFunds()['btc']
# 手数料分を残して取引
tradeBTC = (100 * myBTC) / (100 + self.fee_MONA_BTC)
pair = 'mona_btc' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveMonaBtc
amount = Decimal(tradeBTC / float(price)).quantize(
Decimal('0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('order_BTC_MONA')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_MONA_JPY(self, T_aveMonaJpy):
print('MONA->JPY: ', end='', flush=True)
while 1:
try:
myMONA = self.getFunds()['mona']
# 手数料分を残して取引
tradeMONA = (100 * myMONA) / (100 + self.fee_MONA_JPY)
pair = 'mona_jpy' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveMonaJpy
amount = int(tradeMONA)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('order_MONA_JPY')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
return price * amount
# JPY->MONA->BTC
def order_JPY_MONA(self, prevFund, T_aveMonaJpy):
print('JPY->MONA: ', end='', flush=True)
while 1:
try:
# 手数料分を残して取引
tradeJPY = (100 * prevFund) / (100 + self.fee_MONA_JPY)
pair = 'mona_jpy' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveMonaJpy
amount = Decimal(tradeJPY / float(price)).quantize(
Decimal('0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('JPY->MONA')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_MONA_BTC(self, T_aveMonaBtc):
print('MONA->BTC: ', end='', flush=True)
while 1:
try:
myMONA = self.getFunds()['mona']
# 手数料分を残して取引
tradeMONA = (100 * myMONA) / (100 + self.fee_MONA_BTC)
pair = 'mona_btc' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveMonaBtc
amount = int(tradeMONA)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('MONA->BTC')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_BTC_JPY(self, T_aveBtcJpy):
print('BTC->JPY: ', end='', flush=True)
while 1:
try:
myBTC = self.getFunds()['btc']
# 手数料分を残して取引
tradeBTC = (100 * myBTC) / (100 + self.fee_BTC_JPY)
pair = 'btc_jpy' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveBtcJpy
amount = Decimal(tradeBTC).quantize(Decimal('0.0000'),
rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BTC->JPY')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
return price * amount
# JPY->BTC->BCH
def order_BTC_BCH(self, T_aveBchBtc):
print('BTC->BCH: ', end='', flush=True)
while 1:
try:
myBTC = self.getFunds()['btc']
# 手数料分を残して取引
tradeBTC = (100 * myBTC) / (100 + self.fee_BCH_BTC)
pair = 'bch_btc' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveBchBtc
amount = Decimal(tradeBTC / float(price)).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BTC->BCH')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_BCH_JPY(self, T_aveBchJpy):
print('BCH->JPY: ', end='', flush=True)
while 1:
try:
myBCH = self.getFunds()['BCH']
# 手数料分を残して取引
tradeBCH = (100 * myBCH) / (100 + self.fee_BCH_JPY)
pair = 'bch_jpy' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveBchJpy
amount = Decimal(tradeBCH).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BCH->JPY')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
return price * amount
# JPY->BCH->BTC
def order_JPY_BCH(self, prevFund, T_aveBchJpy):
print('JPY->BCH: ', end='', flush=True)
# 手数料分を残して取引
tradeJPY = (100 * prevFund) / (100 + self.fee_BCH_JPY)
pair = 'bch_jpy' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveBchJpy
amount = Decimal(tradeJPY / float(price)).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_BCH_BTC(self, T_aveBchBtc):
print('BCH->BTC: ', end='', flush=True)
while 1:
try:
myBCH = self.getFunds()['BCH']
# 手数料分を残して取引
tradeBCH = (100 * myBCH) / (100 + self.fee_BCH_BTC)
pair = 'bch_btc' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveBchBtc
amount = Decimal(tradeBCH).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BCH->BTC')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
# JPY->BTC->XEM
def order_BTC_XEM(self, T_aveXemBtc):
print('BTC->XEM: ', end='', flush=True)
while 1:
try:
myBTC = self.getFunds()['btc']
# 手数料分を残して取引
tradeBTC = (100 * myBTC) / (100 + self.fee_XEM_BTC)
pair = 'xem_btc' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveXemBtc
amount = Decimal(tradeBTC / float(price)).quantize(
Decimal('0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BTC->XEM')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_XEM_JPY(self, T_aveXemJpy):
print('XEM->JPY: ', end='', flush=True)
while 1:
try:
myXEM = self.getFunds()['xem']
# 手数料分を残して取引
tradeXEM = (100 * myXEM) / (100 + self.fee_XEM_JPY)
pair = 'xem_jpy' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveXemJpy
amount = Decimal(tradeXEM).quantize(
Decimal('0.0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('XEM->JPY')
time.sleep(1)
time.sleep(1)
if amount >= 0.1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
return price * amount
# JPY->XEM->BTC
def order_JPY_XEM(self, prevFund, T_aveXemJpy):
print('JPY->XEM: ', end='', flush=True)
# 手数料分を残して取引
tradeJPY = (100 * prevFund) / (100 + self.fee_XEM_JPY)
pair = 'xem_jpy' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveXemJpy
amount = Decimal(tradeJPY / float(price)).quantize(
Decimal('0.0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
if amount >= 0.1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_XEM_BTC(self, T_aveXemBtc):
print('XEM->BTC: ', end='', flush=True)
while 1:
try:
myXEM = self.getFunds()['xem']
# 手数料分を残して取引
tradeXEM = (100 * myXEM) / (100 + self.fee_XEM_BTC)
pair = 'xem_btc' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveXemBtc
amount = Decimal(tradeXEM).quantize(
Decimal('0'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('XEM->BTC')
time.sleep(1)
time.sleep(1)
if amount >= 1:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
# JPY->BTC->ETH
def order_BTC_ETH(self, T_aveEthBtc):
print('BTC->ETH: ', end='', flush=True)
while 1:
try:
myBTC = self.getFunds()['btc']
# 手数料分を残して取引
tradeBTC = (100 * myBTC) / (100 + self.fee_ETH_BTC)
pair = 'eth_btc' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveEthBtc
amount = Decimal(tradeBTC / float(price)).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('BTC->ETH')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_ETH_JPY(self, T_aveEthJpy):
print('ETH->JPY: ', end='', flush=True)
while 1:
try:
myETH = self.getFunds()['ETH']
# 手数料分を残して取引
tradeETH = (100 * myETH) / (100 + self.fee_ETH_JPY)
pair = 'eth_jpy' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveEthJpy
amount = Decimal(tradeETH).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('ETH->JPY')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
return price * amount
# JPY->ETH->BTC
def order_JPY_ETH(self, prevFund, T_aveEthJpy):
print('JPY->ETH: ', end='', flush=True)
# 手数料分を残して取引
tradeJPY = (100 * prevFund) / (100 + self.fee_ETH_JPY)
pair = 'eth_jpy' # trade pair
act = 'bid' # ask ( sell order ) or bid ( buy order )
price = T_aveEthJpy
amount = Decimal(tradeJPY / float(price)).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
def order_ETH_BTC(self, T_aveEthBtc):
print('ETH->BTC: ', end='', flush=True)
while 1:
try:
myETH = self.getFunds()['ETH']
# 手数料分を残して取引
tradeETH = (100 * myETH) / (100 + self.fee_ETH_BTC)
pair = 'eth_btc' # trade pair
act = 'ask' # ask ( sell order ) or bid ( buy order )
price = T_aveEthBtc
amount = Decimal(tradeETH).quantize(
Decimal('0.0000'), rounding=ROUND_DOWN)
print(price, end=' amount: ', flush=True)
print(amount)
break
except:
traceback.print_exc()
self.createErrorLog('ETH->BTC')
time.sleep(1)
time.sleep(1)
if amount >= 0.0001:
self.tradePairs(pair, act, price, amount)
else:
print('Do not Trade. Checking active orders...')
|
{"hexsha": "59d9fc3c02acfcd8dd496358c5283fde3d44b387", "size": 33487, "ext": "py", "lang": "Python", "max_stars_repo_path": "EXCaccess.py", "max_stars_repo_name": "v2okimochi/AutoTA-TriangularArbitrage", "max_stars_repo_head_hexsha": "1b00cc672ed688d833a37611c934da2bb29154ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-19T08:16:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T13:20:41.000Z", "max_issues_repo_path": "EXCaccess.py", "max_issues_repo_name": "v2okimochi/AutoTA-TriangularArbitrage", "max_issues_repo_head_hexsha": "1b00cc672ed688d833a37611c934da2bb29154ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EXCaccess.py", "max_forks_repo_name": "v2okimochi/AutoTA-TriangularArbitrage", "max_forks_repo_head_hexsha": "1b00cc672ed688d833a37611c934da2bb29154ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-16T08:58:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-16T08:58:13.000Z", "avg_line_length": 37.5836139169, "max_line_length": 82, "alphanum_fraction": 0.4785140502, "include": true, "reason": "import numpy", "num_tokens": 9214}
|
"""
This file constructs the functions to find lower and upper bound of optimal
set of sourcing countries according to Jia's algorithm.
"""
## Define module and things to be exported
module JiaAlgorithm
export lowerbound_setup, lowerbound, upperbound_setup, upperbound, optimalset
## Load packages
using LinearAlgebra, Random, Distributions, Statistics, DataFrames, StatsBase
using Combinatorics
## Initialize lower bound and source potential matrix
function lowerbound_setup(N, ξ, my_exp)
J_lb = zeros(1, N) # the lower bound is empty set
source_start_lb = (J_lb * ξ).^my_exp
temp = repeat(J_lb, N, 1) + I
temp2 = ones(size(temp,1), size(temp,2))
check_matrix_lb = min.(temp, temp2)
source_check_lb = (check_matrix_lb * ξ).^my_exp
return source_start_lb, source_check_lb
end
## Jia's lower bound algorithm
function lowerbound(source_start, source_check, ϕ_σ_B, fc, N, ξ, my_exp, firm)
# Start iteration for Marginal Benefit (MB)
k = 1
Z_start = zeros(1, N)
while k <= N
# compute MB
if k > 1
source_start = (Z_start * ξ).^my_exp
source_check = (Z_start * ξ .+ ξ .* (1 .- Z_start')).^my_exp
end
source_potential_start = ϕ_σ_B[firm] .* source_start
source_potential_new_vec = ϕ_σ_B[firm] .* source_check
# generate matrix with 1 if MB positive and update set of sourcing countries
MB = (source_potential_new_vec' - fc[firm, :]' .- source_potential_start .> 0)
Z_new = min.(Z_start + MB, ones(size(MB,1), size(MB,2)))
if Z_start == Z_new
return Z_new
@goto end_lb_algorithm
end
k += 1
Z_start = Z_new
return Z_new
end
@label end_lb_algorithm
return Z_new, k
end
## Initialize upper bound and source potential matrix
function upperbound_setup(N, ξ, my_exp)
J_ub = ones(1, N) # the upper bound is full set
source_start_ub = (J_ub * ξ).^my_exp
temp = repeat(J_ub, N, 1) - I
temp2 = zeros(size(temp,1), size(temp,2))
check_matrix_ub = max.(temp, temp2)
source_check_ub = (check_matrix_ub * ξ).^my_exp
return source_start_ub, source_check_ub
end
## Jia's upper bound algorithm
function upperbound(source_start, source_check, ϕ_σ_B, fc, N, ξ, my_exp, firm)
# Start iteration for Marginal Benefit (MB)
k = 1
Z_start = ones(1, N)
while k <= N
# compute MB
if k > 1
source_start = (Z_start * ξ).^my_exp
source_check = (Z_start * ξ .- ξ .* (Z_start')).^my_exp
end
source_potential_start = ϕ_σ_B[firm] .* source_start
source_potential_new_vec = ϕ_σ_B[firm] .* source_check
# generate matrix with 1 if MB positive and update set of sourcing countries
MB = (source_potential_start .- source_potential_new_vec' - fc[firm, :]' .< 0)
Z_new = max.(Z_start - MB, zeros(size(MB,1), size(MB,2)))
if Z_start == Z_new
return Z_new
@goto end_ub_algorithm
end
k += 1
Z_start = Z_new
return Z_new
end
@label end_ub_algorithm
return Z_new, k
end
## Check if Jia's algorithm produce the same lower and upper bound
function optimalset(Z, gap_bounds, firm, Z_lb, Z_ub, S, N, num_rand_checks, rand_check_matrix, fc, ξ, my_exp, ϕ_σ_B)
if Z_lb == Z_ub
print("")
Z[firm,:] = Z_lb
# I don't know how to do lines 64-88 of gmm_objective.m: the case when lower and
# upper bounds are not too different
else
#print("WARNING! The sourcing strategy may not be solved correctly")
#print("for firm number")
#println("$firm")
Z_check = repeat(Z_lb, num_rand_checks, 1)
ind_diffZ = zeros(1,N)
for i in 1:N
if Z_ub[i] == Z_lb[i]
ind_diffZ[i] = 0.0
else
ind_diffZ[i] = 1.0
end
end
gap_bounds[firm] = sum(ind_diffZ)
K_top = Int(sum(ind_diffZ))
K_diff = [i[2] for i in findall(x->x!=0.0, ind_diffZ)]
for K in 1:K_top
Z_check[:,K_diff[K]] = (rand_check_matrix[:,K_diff[K]] .> 0.5)
end
# Use the check and both bounds to find new set of sourcing countries
Z_check = [Z_check; Z_lb; Z_ub]
fc_payments = sum(Z_check .* fc[firm,:]', dims=2)
source_potential_shock_mat_check = (Z_check * ξ).^my_exp
source_potential_vec = ϕ_σ_B[firm] .* source_potential_shock_mat_check
tot_profit = source_potential_vec - fc_payments
loc_firm_best = findmax(tot_profit, dims = 1)[2]
# replace Z as the set of locations from Z_check that maximize profits
Z[firm,:] = Z_check[loc_firm_best[1][1],:]
end
return Z
end
end # end module
|
{"hexsha": "0ab26d09aed979ae49e35062298d2dfeac85bf21", "size": 4807, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Code/JiaAlgorithm.jl", "max_stars_repo_name": "loforteg/AFT2017-Replication", "max_stars_repo_head_hexsha": "4b2abdc4584550c8c31d210d4cfa8cb7c3d20400", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code/JiaAlgorithm.jl", "max_issues_repo_name": "loforteg/AFT2017-Replication", "max_issues_repo_head_hexsha": "4b2abdc4584550c8c31d210d4cfa8cb7c3d20400", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/JiaAlgorithm.jl", "max_forks_repo_name": "loforteg/AFT2017-Replication", "max_forks_repo_head_hexsha": "4b2abdc4584550c8c31d210d4cfa8cb7c3d20400", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7844311377, "max_line_length": 116, "alphanum_fraction": 0.6259621385, "num_tokens": 1392}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torch.autograd import Function
try:
import expansion_penalty
except:
pass
import math
import sys
from numbers import Number
from collections import Set, Mapping, deque
def square_distance(src, dst):
"""
code borrowed from: http://www.programmersought.com/article/8737853003/#14_query_ball_point_93
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1)) # 2*(xn * xm + yn * ym + zn * zm)
dist += torch.sum(src ** 2, -1).view(B, N, 1) # xn*xn + yn*yn + zn*zn
dist += torch.sum(dst ** 2, -1).view(B, 1, M) # xm*xm + ym*ym + zm*zm
return dist
def farthest_point_sample(xyz, npoint):
"""
code borrowed from: http://www.programmersought.com/article/8737853003/#14_query_ball_point_93
Input:
xyz: pointcloud data, [B, N, C]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
# Update the i-th farthest point
centroids[:, i] = farthest
# Take the xyz coordinate of the farthest point
centroid = xyz[batch_indices, farthest, :].view(B, 1, C)
# Calculate the Euclidean distance from all points in the point set to this farthest point
dist = torch.sum((xyz - centroid) ** 2, -1)
# Update distances to record the minimum distance of each point in the sample from all existing sample points
mask = dist < distance
distance[mask] = dist[mask]
# Find the farthest point from the updated distances matrix, and use it as the farthest point for the next iteration
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, xyz, new_xyz, nsample=500, density_only=True):
"""
code borrowed from: http://www.programmersought.com/article/8737853003/#14_query_ball_point_93
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
# sqrdists: [B, S, N] Record the Euclidean distance between the center point and all points
sqrdists = square_distance(new_xyz, xyz) # shape (B, S, N)
# Find all distances greater than radius^2, its group_idx is directly set to N; the rest retain the original value
if not density_only:
group_idx[sqrdists > radius ** 2] = N
# Do ascending order, the front is greater than radius^2 are N, will be the maximum, so will take the first nsample points directly in the remaining points
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
# Considering that there may be points in the previous nsample points that are assigned N (ie, less than nsample points in the spherical area), this point needs to be discarded, and the first point can be used instead.
# group_first: [B, S, k], actually copy the value of the first point in group_idx to the dimension of [B, S, K], which is convenient for subsequent replacement.
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
# Find the point where group_idx is equal to N
mask = group_idx == N
# Replace the value of these points with the value of the first point
group_idx[mask] = group_first[mask]
return group_idx
else:
raw_mat = torch.zeros(B,S,N)
density_mat = torch.zeros(B,S)
raw_mat[sqrdists <= radius ** 2] = 1
density_mat = torch.sum(raw_mat,2)
# print(torch.max(sqrdists))
return density_mat
class kNNRepulsionLoss(nn.Module):
"""
adapted PU-Net's uniform loss
"""
def __init__(self, k=10, n_seeds=20, h=0.01):
super(kNNRepulsionLoss,self).__init__()
self.k = k
self.n_seeds = n_seeds
self.h = h
def forward(self, pcs):
tic = time.time()
n_seeds = self.n_seeds
k = self.k
seeds = farthest_point_sample(pcs,n_seeds) # which gives index
temp = time.time()
seeds_value = torch.stack([pc[seed] for pc, seed in zip(pcs,seeds)]) # grad
pcs_new = pcs.unsqueeze(2).repeat(1,1,n_seeds,1)
seeds_new = seeds_value.unsqueeze(1).repeat(1,2048,1,1)
dist = pcs_new.add(-seeds_new)
dist_value = torch.norm(dist,dim=3)
toc = time.time()
dist_new = dist_value.transpose(1,2)
tac = time.time()
top_dist, idx = torch.topk(dist_new, k+1, dim=2, largest=False)
top_dist_net = top_dist[:,:,1:]
weights = torch.exp(-torch.pow(top_dist_net,2)*(1/(self.h**2)))
repulsion = torch.mul(-top_dist_net,weights)
return repulsion.sum(2).sum(1).mean()
class kNNLoss(nn.Module):
"""
Proposed PatchVariance component
"""
def __init__(self, k=10, n_seeds=20):
super(kNNLoss,self).__init__()
self.k = k
self.n_seeds = n_seeds
def forward(self, pcs):
n_seeds = self.n_seeds
k = self.k
seeds = farthest_point_sample(pcs,n_seeds) # which gives index
seeds_value = torch.stack([pc[seed] for pc, seed in zip(pcs,seeds)])
pcs_new = pcs.unsqueeze(2).repeat(1,1,n_seeds,1)
seeds_new = seeds_value.unsqueeze(1).repeat(1,2048,1,1)
dist = pcs_new.add(-seeds_new)
dist_value = torch.norm(dist,dim=3)
dist_new = dist_value.transpose(1,2)
top_dist, idx = torch.topk(dist_new, k+1, dim=2, largest=False)
overall_mean = top_dist[:,:,1:].mean()
top_dist = top_dist/overall_mean
var = torch.var(top_dist.mean(dim=2)).mean()
return var
class expansionPenaltyFunction(Function):
@staticmethod
def forward(ctx, xyz, primitive_size, alpha):
assert(primitive_size <= 512)
batchsize, n, _ = xyz.size()
assert(n % primitive_size == 0)
xyz = xyz.contiguous().float().cuda()
dist = torch.zeros(batchsize, n, device='cuda').contiguous()
assignment = torch.zeros(batchsize, n, device='cuda', dtype=torch.int32).contiguous() - 1
neighbor = torch.zeros(batchsize, n * 512, device='cuda', dtype=torch.int32).contiguous()
cost = torch.zeros(batchsize, n * 512, device='cuda').contiguous()
mean_mst_length = torch.zeros(batchsize, device='cuda').contiguous()
expansion_penalty.forward(xyz, primitive_size, assignment, dist, alpha, neighbor, cost, mean_mst_length)
ctx.save_for_backward(xyz, assignment)
return dist, assignment, mean_mst_length / (n / primitive_size)
@staticmethod
def backward(ctx, grad_dist, grad_idx, grad_mml):
xyz, assignment = ctx.saved_tensors
grad_dist = grad_dist.contiguous()
grad_xyz = torch.zeros(xyz.size(), device='cuda').contiguous()
expansion_penalty.backward(xyz, grad_xyz, grad_dist, assignment)
return grad_xyz, None, None
class expansionPenaltyModule(nn.Module):
"""
MSN's expansion penalty
"""
def __init__(self):
super(expansionPenaltyModule, self).__init__()
def forward(self, input, primitive_size, alpha):
return expansionPenaltyFunction.apply(input, primitive_size, alpha)
class DiscriminatorLoss(object):
"""
feature distance from discriminator
"""
def __init__(self, data_parallel=False):
self.l2 = nn.MSELoss()
self.data_parallel = data_parallel
def __call__(self, D, fake_pcd, real_pcd):
if self.data_parallel:
with torch.no_grad():
d, real_feature = nn.parallel.data_parallel(
D, real_pcd.detach())
d, fake_feature = nn.parallel.data_parallel(D, fake_pcd)
else:
with torch.no_grad():
d, real_feature = D(real_pcd.detach())
d, fake_feature = D(fake_pcd)
D_penalty = F.l1_loss(fake_feature, real_feature)
return D_penalty
class DirectedHausdorff(object):
"""
Hausdorf distance
"""
def __init__(self, reduce_mean=True):
# super(DirectedHausdorff,self).__init__()
self.reduce_mean = reduce_mean
def __call__(self, point_cloud1, point_cloud2):
"""
:param point_cloud1: (B, 3, N) partial
:param point_cloud2: (B, 3, M) output
:return: directed hausdorff distance, A -> B
"""
n_pts1 = point_cloud1.shape[2]
n_pts2 = point_cloud2.shape[2]
pc1 = point_cloud1.unsqueeze(3)
pc1 = pc1.repeat((1, 1, 1, n_pts2)) # (B, 3, N, M)
pc2 = point_cloud2.unsqueeze(2)
pc2 = pc2.repeat((1, 1, n_pts1, 1)) # (B, 3, N, M)
l2_dist = torch.sqrt(torch.sum((pc1 - pc2) ** 2, dim=1)) # (B, N, M)
shortest_dist, _ = torch.min(l2_dist, dim=2)
hausdorff_dist, _ = torch.max(shortest_dist, dim=1) # (B, )
if self.reduce_mean:
hausdorff_dist = torch.mean(hausdorff_dist)
return hausdorff_dist
|
{"hexsha": "f058d4131e3dc9aa303ac5c79dd01b9243dfd880", "size": 9837, "ext": "py", "lang": "Python", "max_stars_repo_path": "loss.py", "max_stars_repo_name": "benedictlee21/FYP_SCSE21_0204", "max_stars_repo_head_hexsha": "b5fdefac0fbec1291def5d47c780e8e7dced3b50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-07T04:25:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T04:25:38.000Z", "max_issues_repo_path": "loss.py", "max_issues_repo_name": "benedictlee21/FYP_SCSE21_0204", "max_issues_repo_head_hexsha": "b5fdefac0fbec1291def5d47c780e8e7dced3b50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loss.py", "max_forks_repo_name": "benedictlee21/FYP_SCSE21_0204", "max_forks_repo_head_hexsha": "b5fdefac0fbec1291def5d47c780e8e7dced3b50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1510204082, "max_line_length": 226, "alphanum_fraction": 0.6312900274, "include": true, "reason": "import numpy", "num_tokens": 2679}
|
from __future__ import division
import argparse
import os
import glob
import time
from datetime import datetime
import torch.distributed as dist
import torch
import utils
import logging
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
import yaml
import sys
from tensorboardX import SummaryWriter
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from devkit.core import (init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler, CrossEntropyLoss)
from devkit.dataset.imagenet_dataset import ImagenetDataset
from network_eval import ShuffleNetV2_OneShot
parser = argparse.ArgumentParser(
description='Pytorch Imagenet Training')
parser.add_argument('--SinglePath', action='store_true', default=False, help='true if using SinglePath')
parser.add_argument('--config', default='configs/shufflenet_v2_bn.yaml')
parser.add_argument("--local_rank", type=int)
parser.add_argument(
'--port', default=29500, type=int, help='port of server')
parser.add_argument('--world-size', default=1, type=int)
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--model_dir', type=str)
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--off-ms', action='store_true')
parser.add_argument('--epochs', type=int, default=240, help='num of training epochs')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--loc_mean', type=float, default=1, help='initial mean value to generate the location')
parser.add_argument('--loc_std', type=float, default=0.01, help='initial std to generate the location')
parser.add_argument('--bn_affine', action='store_true', default=False, help='bn affine flag')
parser.add_argument('--bn_eps', type=float, default=1e-2, help='initial mean value to generate the location')
parser.add_argument('--remark', type=str, default='none', help='experiment details')
args = parser.parse_args()
def main():
global args, best_prec1
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
for key in config:
for k, v in config[key].items():
setattr(args, k, v)
print('Enabled distributed training.')
rank, world_size = init_dist(
backend='nccl', port=args.port)
args.rank = rank
args.world_size = world_size
np.random.seed(args.seed*args.rank)
torch.manual_seed(args.seed*args.rank)
torch.cuda.manual_seed(args.seed*args.rank)
torch.cuda.manual_seed_all(args.seed*args.rank)
# create model
print("=> creating model '{}'".format(args.model))
if args.SinglePath:
architecture = args.arch
scale_list = 8*[1.0]
scale_ids = [6, 5, 3, 5, 2, 6, 3, 4, 2, 5, 7, 5, 4, 6, 7, 4, 4, 5, 4, 3]
channels_scales = []
for i in range(len(scale_ids)):
channels_scales.append(scale_list[scale_ids[i]])
model = ShuffleNetV2_OneShot(args=args, architecture=architecture, channels_scales=channels_scales)
model.cuda()
broadcast_params(model)
# auto resume from a checkpoint
remark = 'imagenet_'
if args.remark != 'none':
remark += args.remark
args.save = 'search-{}-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"), remark)
args.save_log = 'nas-{}-{}'.format(time.strftime("%Y%m%d-%H%M%S"), remark)
generate_date = str(datetime.now().date())
path = os.path.join(generate_date, args.save)
if args.rank == 0:
log_format = '%(asctime)s %(message)s'
utils.create_exp_dir(generate_date, path, scripts_to_save=glob.glob('*.py'))
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(path, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("args = %s", args)
writer = SummaryWriter('./runs/' + generate_date + '/' + args.save_log)
else:
writer = None
#model_dir = args.model_dir
model_dir = path
start_epoch = 0
if args.evaluate:
load_state_ckpt(args.checkpoint_path, model)
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_dataset = ImagenetDataset(
args.val_root,
args.val_source,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
val_sampler = DistributedSampler(val_dataset)
val_loader = DataLoader(
val_dataset, batch_size=50, shuffle=False,
num_workers=args.workers, pin_memory=False, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, 0, writer, logging)
return
def validate(val_loader, model, epoch, writer, logging):
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
world_size = args.world_size
rank = args.rank
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target)
output = model(input_var)
# measure accuracy
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
dist.all_reduce_multigpu([reduced_prec1])
dist.all_reduce_multigpu([reduced_prec5])
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and rank == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
top1=top1, top5=top5))
if rank == 0:
niter = (epoch + 1)
logging.info('valid %f %f', top1.avg, top5.avg)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
{"hexsha": "54f42456ef2101d14b97370affe2b111d506642b", "size": 7967, "ext": "py", "lang": "Python", "max_stars_repo_path": "DSNAS/eval_imagenet.py", "max_stars_repo_name": "cwlacewe/SNAS-Series", "max_stars_repo_head_hexsha": "92ac8031f718235aecaefb9967851f8f355dbca0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 133, "max_stars_repo_stars_event_min_datetime": "2020-03-23T02:36:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T03:33:44.000Z", "max_issues_repo_path": "DSNAS/eval_imagenet.py", "max_issues_repo_name": "cwlacewe/SNAS-Series", "max_issues_repo_head_hexsha": "92ac8031f718235aecaefb9967851f8f355dbca0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-04-05T16:47:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-03T09:08:24.000Z", "max_forks_repo_path": "DSNAS/eval_imagenet.py", "max_forks_repo_name": "cwlacewe/SNAS-Series", "max_forks_repo_head_hexsha": "92ac8031f718235aecaefb9967851f8f355dbca0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2020-03-29T05:35:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T03:24:24.000Z", "avg_line_length": 34.047008547, "max_line_length": 149, "alphanum_fraction": 0.6395129911, "include": true, "reason": "import numpy", "num_tokens": 1959}
|
!#################################################################################################!
!BSD 3-Clause License
!
!Copyright (c) 2017, Ricardo Torres
!All rights reserved.
!
!Redistribution and use in source and binary forms, with or without
!modification, are permitted provided that the following conditions are met:
!
!* Redistributions of source code must retain the above copyright notice, this
! list of conditions and the following disclaimer.
!* Redistributions in binary form must reproduce the above copyright notice,
! this list of conditions and the following disclaimer in the documentation
! and/or other materials provided with the distribution.
!* Neither the name of the copyright holder nor the names of its
! contributors may be used to endorse or promote products derived from
! this software without specific prior written permission.
!
!THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
!AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
!IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
!DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
!FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
!DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
!SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
!CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
!OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
!OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!#################################################################################################!
module H5_OO_mod
use H5_Func_mod
use Types_mod
implicit none
!#################################################################################################!
type :: H5Attributable
integer(I32) :: id
contains
procedure, public :: Attr_exists
procedure, public :: getNumberAttrs
procedure, public :: getAttTypeSize
procedure, public :: getAttNameByIdx
procedure, public :: getAttDims
procedure, private :: set_Int16_Attr0
procedure, private :: set_Int16_Attr1
procedure, private :: set_Int32_Attr0
procedure, private :: set_Int32_Attr1
procedure, private :: set_Real32_Attr0
procedure, private :: set_Real32_Attr1
procedure, private :: set_Real64_Attr0
procedure, private :: set_Real64_Attr1
procedure, private :: set_Char_Attr0
procedure, private :: set_Char_Attr1
procedure, private :: get_Char_Attr0
procedure, private :: get_Char_Attr1
procedure, private :: get_Int_Attr0
procedure, private :: get_Int_Attr1
procedure, private :: get_Real32_Attr0
procedure, private :: get_Real32_Attr1
procedure, private :: get_Real64_Attr0
procedure, private :: get_Real64_Attr1
generic, public :: setAttribute => &
set_Int16_Attr0, &
set_Int16_Attr1, &
set_Int32_Attr0, &
set_Int32_Attr1, &
set_Real32_Attr0, &
set_Real32_Attr1, &
set_Real64_Attr0, &
set_Real64_Attr1, &
set_Char_Attr0, &
set_Char_Attr1
generic, public :: getAttribute => &
get_Char_Attr0, &
get_Char_Attr1, &
get_Int_Attr0, &
get_Int_Attr1, &
get_Real32_Attr0, &
get_Real32_Attr1, &
get_Real64_Attr0, &
get_Real64_Attr1
end type H5Attributable
!#################################################################################################!
type, extends(H5Attributable) :: H5Group
contains
procedure, public :: setGroup
procedure, public :: closeGroup
procedure, public :: openGroup
procedure, public :: getNumObj
procedure, public :: getObjNameByIdx
procedure, public :: isDset
procedure, public :: isGrp
end type H5Group
!#################################################################################################!
type, extends(H5Group) :: H5File
contains
procedure, public :: closeFile
end type H5File
interface H5File
procedure newH5File
end interface H5File
!#################################################################################################!
type, extends(H5Attributable) :: H5Dataset
character(len=255) :: d_name
integer(kind=I32), private :: parent_id
integer(kind=I32), private :: compression_level
integer(kind=I32), private :: chunk_size
integer(kind=I32), private :: fill_value
integer(kind=I32), private :: extendable
contains
procedure, public :: showstatus
procedure, public :: setChunkSize
procedure, public :: setCompressionLevel
procedure, public :: setFillValue
procedure, public :: setExtendable
procedure, public :: getRank
procedure, public :: getDims
procedure, public :: getDTypeSize ! H5T_NO_CLASS_F -1
! H5T_INTEGER_F 0
! H5T_FLOAT_F 1
! H5T_STRING_F 2
! H5T_BITFIELD_F 3
! H5T_OPAQUE_F 4
! H5T_COMPOUND_F 5
! H5T_REFERENCE_F 6
! H5T_ENUM_F 7
! H5T_VLEN_F 8
! H5T_ARRAY_F 9
procedure, public :: setEmpty
procedure, public :: defScale
procedure, public :: setScale
procedure, private :: set_Int8_1d
procedure, private :: set_Int16_1d
procedure, private :: set_Int32_1d
procedure, private :: set_Real32_1d
procedure, private :: set_Real64_1d
procedure, private :: set_Int8_2d
procedure, private :: set_Int16_2d
procedure, private :: set_Int32_2d
procedure, private :: set_Real32_2d
procedure, private :: set_Real64_2d
procedure, private :: set_Int8_3d
procedure, private :: set_Int16_3d
procedure, private :: set_Int32_3d
procedure, private :: set_Real32_3d
procedure, private :: set_Real64_3d
procedure, private :: set_Int8_4d
procedure, private :: set_Int16_4d
procedure, private :: set_Int32_4d
procedure, private :: set_Real32_4d
procedure, private :: set_Real64_4d
procedure, private :: set_Int8_5d
procedure, private :: set_Int16_5d
procedure, private :: set_Int32_5d
procedure, private :: set_Real32_5d
procedure, private :: set_Real64_5d
procedure, private :: set_Int8_6d
procedure, private :: set_Int16_6d
procedure, private :: set_Int32_6d
procedure, private :: set_Real32_6d
procedure, private :: set_Real64_6d
procedure, private :: get_Int_1d
procedure, private :: get_Int_2d
procedure, private :: get_Int_3d
procedure, private :: get_Int_4d
procedure, private :: get_Int_5d
procedure, private :: get_Int_6d
procedure, private :: get_Real32_1d
procedure, private :: get_Real32_2d
procedure, private :: get_Real32_3d
procedure, private :: get_Real32_4d
procedure, private :: get_Real32_5d
procedure, private :: get_Real32_6d
procedure, private :: get_Real64_1d
procedure, private :: get_Real64_2d
procedure, private :: get_Real64_3d
procedure, private :: get_Real64_4d
procedure, private :: get_Real64_5d
procedure, private :: get_Real64_6d
procedure, private :: get_Int_Slab1d
procedure, private :: get_Int_Slab2d
procedure, private :: get_Int_Slab3d
procedure, private :: get_Int_Slab4d
procedure, private :: get_Int_Slab5d
procedure, private :: get_Real_Slab1d
procedure, private :: get_Real_Slab2d
procedure, private :: get_Real_Slab3d
procedure, private :: get_Real_Slab4d
procedure, private :: get_Real_Slab5d
procedure, private :: Extend_Int8_1d
procedure, private :: Extend_Int16_1d
procedure, private :: Extend_Int32_1d
procedure, private :: Extend_Real32_1d
procedure, private :: Extend_Real64_1d
procedure, private :: Extend_Int8_2d
procedure, private :: Extend_Int16_2d
procedure, private :: Extend_Int32_2d
procedure, private :: Extend_Real32_2d
procedure, private :: Extend_Real64_2d
procedure, private :: Extend_Int8_3d
procedure, private :: Extend_Int16_3d
procedure, private :: Extend_Int32_3d
procedure, private :: Extend_Real32_3d
procedure, private :: Extend_Real64_3d
procedure, private :: Extend_Int8_4d
procedure, private :: Extend_Int16_4d
procedure, private :: Extend_Int32_4d
procedure, private :: Extend_Real32_4d
procedure, private :: Extend_Real64_4d
procedure, private :: Extend_Int8_5d
procedure, private :: Extend_Int16_5d
procedure, private :: Extend_Int32_5d
procedure, private :: Extend_Real32_5d
procedure, private :: Extend_Real64_5d
procedure, private :: Extend_Int8_6d
procedure, private :: Extend_Int16_6d
procedure, private :: Extend_Int32_6d
procedure, private :: Extend_Real32_6d
procedure, private :: Extend_Real64_6d
generic, public :: setDataset => &
set_Int8_1d, &
set_Int16_1d, &
set_Int32_1d, &
set_Real32_1d, &
set_Real64_1d, &
set_Int8_2d, &
set_Int16_2d, &
set_Int32_2d, &
set_Real32_2d, &
set_Real64_2d, &
set_Int8_3d, &
set_Int16_3d, &
set_Int32_3d, &
set_Real32_3d, &
set_Real64_3d, &
set_Int8_4d, &
set_Int16_4d, &
set_Int32_4d, &
set_Real32_4d, &
set_Real64_4d, &
set_Int8_5d, &
set_Int16_5d, &
set_Int32_5d, &
set_Real32_5d, &
set_Real64_5d, &
set_Int8_6d, &
set_Int16_6d, &
set_Int32_6d, &
set_Real32_6d, &
set_Real64_6d
generic, public :: getDataset => &
get_Int_1d, &
get_Int_2d, &
get_Int_3d, &
get_Int_4d, &
get_Int_5d, &
get_Int_6d, &
get_Real32_1d, &
get_Real32_2d, &
get_Real32_3d, &
get_Real32_4d, &
get_Real32_5d, &
get_Real32_6d, &
get_Real64_1d, &
get_Real64_2d, &
get_Real64_3d, &
get_Real64_4d, &
get_Real64_5d, &
get_Real64_6d
generic, public :: getBlock => &
get_Int_Slab1d, &
get_Int_Slab2d, &
get_Int_Slab3d, &
get_Int_Slab4d, &
get_Int_Slab5d, &
get_Real_Slab1d, &
get_Real_Slab2d, &
get_Real_Slab3d, &
get_Real_Slab4d, &
get_Real_Slab5d
generic, public :: extendDataset => &
Extend_Int8_1d, &
Extend_Int16_1d, &
Extend_Int32_1d, &
Extend_Real32_1d, &
Extend_Real64_1d, &
Extend_Int8_2d, &
Extend_Int16_2d, &
Extend_Int32_2d, &
Extend_Real32_2d, &
Extend_Real64_2d, &
Extend_Int8_3d, &
Extend_Int16_3d, &
Extend_Int32_3d, &
Extend_Real32_3d, &
Extend_Real64_3d, &
Extend_Int8_4d, &
Extend_Int16_4d, &
Extend_Int32_4d, &
Extend_Real32_4d, &
Extend_Real64_4d, &
Extend_Int8_5d, &
Extend_Int16_5d, &
Extend_Int32_5d, &
Extend_Real32_5d, &
Extend_Real64_5d, &
Extend_Int8_6d, &
Extend_Int16_6d, &
Extend_Int32_6d, &
Extend_Real32_6d, &
Extend_Real64_6d
end type H5Dataset
interface H5Dataset
procedure newH5Dataset
end interface H5Dataset
!#################################################################################################!
contains
!#################################################################################################!
!###################################### Attribute Methods ########################################!
!#################################################################################################!
function Attr_exists(self, a_name)
class(H5Attributable), intent(in) :: self
character(len=*), intent (in):: a_name
logical :: Attr_exists
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
Attr_exists = Ch_Attr_exist(dset_id,a_name)
error = close_dset(dset_id)
class default
Attr_exists = Ch_Attr_exist(self%id,a_name)
end select
end function Attr_exists
subroutine getAttNameByIdx(self, idx, a_name)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent(in) :: idx
character(len=*), intent(out) :: a_name
character(len=80) :: obj_name
integer :: dset_id
integer :: hdferr
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
hdferr = get_att_name_idx(dset_id, self%d_name, idx, a_name)
hdferr = close_dset(dset_id)
class default
hdferr = get_obj_name(self%id, obj_name)
hdferr = get_att_name_idx(self%id, trim(obj_name), idx, a_name)
end select
end subroutine getAttNameByIdx
subroutine getNumberAttrs(self, n_attrs)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent(out) :: n_attrs
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
n_attrs = number_attrs(dset_id)
error = close_dset(dset_id)
class default
n_attrs = number_attrs(self%id)
end select
end subroutine getNumberAttrs
subroutine getAttTypeSize(self, a_name, att_type, att_type_size)
class(H5Attributable), intent(in) :: self
character(len=*), intent (in):: a_name
integer(kind=I32), intent(out) :: att_type
integer(kind=I64), intent(out) :: att_type_size
integer(kind=I32) :: hdferr
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
call attr_type_size(dset_id, a_name, att_type, att_type_size, hdferr)
hdferr = close_dset(dset_id)
class default
call attr_type_size(self%id, a_name, att_type, att_type_size, hdferr)
end select
end subroutine getAttTypeSize
subroutine getAttDims(self, a_name, dims)
class(H5Attributable), intent(in) :: self
character(len=*), intent (in):: a_name
integer, intent(out) :: dims(:)
integer(kind=I32) :: hdferr
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
hdferr = get_att_dims(dset_id, a_name, dims)
hdferr = close_dset(dset_id)
class default
hdferr = get_att_dims(self%id, a_name, dims)
end select
end subroutine getAttDims
subroutine set_Int16_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I16), intent (in):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Int16_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Int16_Attr0(self%id, a_name, val)
end select
end subroutine set_Int16_Attr0
subroutine set_Int16_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I16), intent (in):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Int16_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Int16_Attr1(self%id, a_name, val)
end select
end subroutine set_Int16_Attr1
subroutine set_Int32_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent (in):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Int32_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Int32_Attr0(self%id, a_name, val)
end select
end subroutine set_Int32_Attr0
subroutine set_Int32_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent (in):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Int32_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Int32_Attr1(self%id, a_name, val)
end select
end subroutine set_Int32_Attr1
subroutine set_Real32_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=SP), intent (in):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Real32_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Real32_Attr0(self%id, a_name, val)
end select
end subroutine set_Real32_Attr0
subroutine set_Real32_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=SP), intent (in):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Real32_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Real32_Attr1(self%id, a_name, val)
end select
end subroutine set_Real32_Attr1
subroutine set_Real64_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=DP), intent (in):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Real64_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Real64_Attr0(self%id, a_name, val)
end select
end subroutine set_Real64_Attr0
subroutine set_Real64_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=DP), intent (in):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Real64_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Real64_Attr1(self%id, a_name, val)
end select
end subroutine set_Real64_Attr1
subroutine set_Char_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
character(len=*), intent (in):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Char_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Char_Attr0(self%id, a_name, val)
end select
end subroutine set_Char_Attr0
subroutine set_Char_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
character(len=*), intent (in):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Create_Char_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Create_Char_Attr1(self%id, a_name, val)
end select
end subroutine set_Char_Attr1
subroutine get_Char_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
character(len=*), intent (out):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Char_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Char_Attr0(self%id, a_name, val)
end select
end subroutine get_Char_Attr0
subroutine get_Char_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
character(len=*), intent (out):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Char_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Char_Attr1(self%id, a_name, val)
end select
end subroutine get_Char_Attr1
subroutine get_Int_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent (out):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Int_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Int_Attr0(self%id, a_name, val)
end select
end subroutine get_Int_Attr0
subroutine get_Int_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
integer(kind=I32), intent (out):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Int_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Int_Attr1(self%id, a_name, val)
end select
end subroutine get_Int_Attr1
subroutine get_Real32_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=SP), intent (out):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Real32_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Real32_Attr0(self%id, a_name, val)
end select
end subroutine get_Real32_Attr0
subroutine get_Real32_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=SP), intent (out):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Real32_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Real32_Attr1(self%id, a_name, val)
end select
end subroutine get_Real32_Attr1
subroutine get_Real64_Attr0(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=DP), intent (out):: val
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Real64_Attr0(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Real64_Attr0(self%id, a_name, val)
end select
end subroutine get_Real64_Attr0
subroutine get_Real64_Attr1(self, a_name, val)
class(H5Attributable), intent(in) :: self
real(kind=DP), intent (out):: val(:)
character(len=*), intent (in):: a_name
integer(kind=I32) :: error
integer(kind=I32) :: dset_id
select type (self)
class is (H5Dataset)
dset_id = open_dset(self%parent_id,self%d_name)
error = Read_Real64_Attr1(dset_id, a_name, val)
error = close_dset(dset_id)
class default
error = Read_Real64_Attr1(self%id, a_name, val)
end select
end subroutine get_Real64_Attr1
!#################################################################################################!
!######################################## Group Methods ##########################################!
!#################################################################################################!
subroutine openGroup(self, g_name, newGroup)
class(H5Group) :: self
type(H5Group), intent(out) :: newGroup
character(len=*), intent(in) :: g_name
newGroup%id = hdf_open_group(self%id, g_name)
end subroutine openGroup
subroutine setGroup(self, g_name, newGroup)
class(H5Group) :: self
character(len=*), intent(in) :: g_name
type(H5Group), intent(out) :: newGroup
newGroup%id=hdf_create_group(self%id, g_name)
end subroutine setGroup
subroutine closeGroup(self)
class(H5Group) :: self
integer(kind=I32) :: error
error=hdf_close_group(self%id)
end subroutine closeGroup
subroutine getNumObj(self, nlinks)
class(H5Group) :: self
integer(kind=I32), intent(out) :: nlinks
integer(kind=I32) :: error
error=grp_num_of_obj(self%id, nlinks)
end subroutine getNumObj
subroutine getObjNameByIdx(self, idx, obj_name)
class(H5Group) :: self
integer(kind=I32), intent(in) :: idx
character(len=*), intent(out) :: obj_name
integer(kind=I32) :: hdferr
hdferr = grp_obj_name_idx(self%id, idx, obj_name)
end subroutine getObjNameByIdx
function isDset(self, obj_name)
class(H5Group) :: self
character(len=*), intent(in) :: obj_name
logical :: isDset
integer(kind=I32) :: hdferr
hdferr = obj_is_dset(self%id, obj_name, isDset)
end function isDset
function isGrp(self, obj_name)
class(H5Group) :: self
character(len=*), intent(in) :: obj_name
logical :: isGrp
integer(kind=I32) :: hdferr
hdferr = obj_is_grp(self%id, obj_name, isGrp)
end function isGrp
!#################################################################################################!
!######################################### File Methods ##########################################!
!#################################################################################################!
function newH5File( filename, state, mode )
type(h5file) :: newH5File
character(len=*), intent(in) :: filename !< the HDF5 filename
character(len=*), optional, intent(in) :: state !< file state (OLD, NEW, REPLACE)
character(len=*), optional, intent(in) :: mode !< file mode (READ, WRITE, READWRITE)
integer(kind=I32) :: error
newH5File%id = hdf_open_file(filename, state, mode)
end function newH5File
subroutine closeFile( self )
class(H5File), intent(in) :: self
integer(kind=I32) :: error
error = hdf_close_file(self%id)
end subroutine closeFile
!#################################################################################################!
!######################################## Dataset Methods ########################################!
!#################################################################################################!
function newH5Dataset(dset_name, parent_Group)
type(H5Dataset) :: newH5Dataset
character(len=*), intent(in) :: dset_name
class(H5Group), intent(in) :: parent_Group
newH5Dataset%d_name = dset_name
newH5Dataset%parent_id = parent_Group%id
newH5Dataset%compression_level = 9
newH5Dataset%chunk_size = 100
newH5Dataset%extendable = 0
newH5Dataset%fill_value = 0
end function newH5Dataset
subroutine setEmpty(self)
class(H5Dataset), intent(inout) :: self
self%id=Create_Empty_Dataset(self%parent_id,self%d_name)
end subroutine setEmpty
subroutine get_Int_1d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:)
self%id = Read_Int_1d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_1d
subroutine get_Int_2d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:,:)
self%id = Read_Int_2d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_2d
subroutine get_Int_3d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:,:,:)
self%id = Read_Int_3d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_3d
subroutine get_Int_4d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:,:,:,:)
self%id = Read_Int_4d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_4d
subroutine get_Int_5d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:,:,:,:,:)
self%id = Read_Int_5d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_5d
subroutine get_Int_6d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(out) :: val(:,:,:,:,:,:)
self%id = Read_Int_6d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Int_6d
subroutine get_Real32_1d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:)
self%id = Read_Real32_1d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_1d
subroutine get_Real32_2d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:,:)
self%id = Read_Real32_2d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_2d
subroutine get_Real32_3d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:,:,:)
self%id = Read_Real32_3d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_3d
subroutine get_Real32_4d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:,:,:,:)
self%id = Read_Real32_4d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_4d
subroutine get_Real32_5d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:,:,:,:,:)
self%id = Read_Real32_5d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_5d
subroutine get_Real32_6d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(out) :: val(:,:,:,:,:,:)
self%id = Read_Real32_6d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real32_6d
subroutine get_Real64_1d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:)
self%id = Read_Real64_1d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_1d
subroutine get_Real64_2d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:,:)
self%id = Read_Real64_2d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_2d
subroutine get_Real64_3d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:,:,:)
self%id = Read_Real64_3d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_3d
subroutine get_Real64_4d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:,:,:,:)
self%id = Read_Real64_4d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_4d
subroutine get_Real64_5d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:,:,:,:,:)
self%id = Read_Real64_5d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_5d
subroutine get_Real64_6d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(out) :: val(:,:,:,:,:,:)
self%id = Read_Real64_6d_dataset(self%parent_id, self%d_name, val)
end subroutine get_Real64_6d
subroutine get_Int_Slab1d(self, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Int_1dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Int_Slab1d
subroutine get_Int_Slab2d(self, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Int_2dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Int_Slab2d
subroutine get_Int_Slab3d(self, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Int_3dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Int_Slab3d
subroutine get_Int_Slab4d(self, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Int_4dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Int_Slab4d
subroutine get_Int_Slab5d(self, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Int_5dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Int_Slab5d
subroutine get_Real_Slab1d(self, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(out) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Real_1dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Real_Slab1d
subroutine get_Real_Slab2d(self, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(out) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Real_2dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Real_Slab2d
subroutine get_Real_Slab3d(self, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(out) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Real_3dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Real_Slab3d
subroutine get_Real_Slab4d(self, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(out) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Real_4dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Real_Slab4d
subroutine get_Real_Slab5d(self, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(out) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
self%id = Read_Real_5dSlab(self%parent_id, self%d_name, offset, dshape, val)
end subroutine get_Real_Slab5d
subroutine showstatus(self)
class(H5Dataset) :: self
print*,self%parent_id
print*,self%compression_level
print*,self%chunk_size
print*,trim(self%d_name)
print*,self%id
end subroutine showstatus
subroutine setChunkSize(self, chunksize)
class(H5Dataset) :: self
integer(kind=I32) :: chunksize
self%chunk_size=chunksize
end subroutine setChunkSize
subroutine setCompressionLevel(self, comp_level)
class(H5Dataset) :: self
integer(kind=I32) :: comp_level
self%compression_level=comp_level
end subroutine setCompressionLevel
subroutine setFillValue(self, fillvalue)
class(H5Dataset) :: self
integer(kind=I32) :: fillvalue
self%fill_value=fillvalue
end subroutine setFillValue
subroutine setExtendable(self,extdims)
class(H5Dataset) :: self
integer(kind=I32) :: extdims
self%extendable=extdims
end subroutine setExtendable
subroutine getRank(self, d_rank)
class(H5Dataset) :: self
integer, intent(out) :: d_rank
integer(kind=I32) :: error
error = hdf_get_rank(self%parent_id,self%d_name,d_rank)
end subroutine getRank
subroutine getDims(self, dims)
class(H5Dataset) :: self
integer, intent(out) :: dims(:)
integer(kind=I32) :: error
error = hdf_get_dims(self%parent_id,self%d_name,dims)
end subroutine getDims
subroutine getDTypeSize(self,dset_type, dset_type_size)
class(H5Dataset) :: self
integer(kind=I32), intent(out) :: dset_type
integer(kind=I64), intent(out) :: dset_type_size
integer(kind=I32) :: error
call get_dset_type(self%parent_id, self%d_name, dset_type, dset_type_size, error)
end subroutine getDTypeSize
subroutine set_Int8_1d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:)
self%id = Create_Int8_1d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_1d
subroutine set_Int16_1d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:)
self%id = Create_Int16_1d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_1d
subroutine set_Int32_1d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:)
self%id = Create_Int32_1d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_1d
subroutine set_Real32_1d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:)
self%id = Create_Real32_1d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_1d
subroutine set_Real64_1d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:)
self%id = Create_Real64_1d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_1d
subroutine set_Int8_2d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:,:)
self%id = Create_Int8_2d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_2d
subroutine set_Int16_2d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:,:)
self%id = Create_Int16_2d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_2d
subroutine set_Int32_2d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:,:)
self%id = Create_Int32_2d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_2d
subroutine set_Real32_2d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:,:)
self%id = Create_Real32_2d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_2d
subroutine set_Real64_2d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:,:)
self%id = Create_Real64_2d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_2d
subroutine set_Int8_3d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:,:,:)
self%id = Create_Int8_3d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_3d
subroutine set_Int16_3d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:,:,:)
self%id = Create_Int16_3d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_3d
subroutine set_Int32_3d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:,:,:)
self%id = Create_Int32_3d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_3d
subroutine set_Real32_3d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:,:,:)
self%id = Create_Real32_3d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_3d
subroutine set_Real64_3d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:,:,:)
self%id = Create_Real64_3d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_3d
subroutine set_Int8_4d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:)
self%id = Create_Int8_4d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_4d
subroutine set_Int16_4d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:)
self%id = Create_Int16_4d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_4d
subroutine set_Int32_4d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:)
self%id = Create_Int32_4d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_4d
subroutine set_Real32_4d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:,:,:,:)
self%id = Create_Real32_4d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_4d
subroutine set_Real64_4d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:,:,:,:)
self%id = Create_Real64_4d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_4d
subroutine set_Int8_5d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:,:)
self%id = Create_Int8_5d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_5d
subroutine set_Int16_5d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:,:)
self%id = Create_Int16_5d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_5d
subroutine set_Int32_5d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:,:)
self%id = Create_Int32_5d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_5d
subroutine set_Real32_5d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:,:,:,:,:)
self%id = Create_Real32_5d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_5d
subroutine set_Real64_5d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:,:,:,:,:)
self%id = Create_Real64_5d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_5d
subroutine set_Int8_6d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:,:,:)
self%id = Create_Int8_6d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int8_6d
subroutine set_Int16_6d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:,:,:)
self%id = Create_Int16_6d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int16_6d
subroutine set_Int32_6d(self, val)
class(H5Dataset), intent(inout) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:,:,:)
self%id = Create_Int32_6d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Int32_6d
subroutine set_Real32_6d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=SP), intent(in) :: val(:,:,:,:,:,:)
self%id = Create_Real32_6d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real32_6d
subroutine set_Real64_6d(self, val)
class(H5Dataset), intent(inout) :: self
real(kind=DP), intent(in) :: val(:,:,:,:,:,:)
self%id = Create_Real64_6d_Dataset(self%parent_id, self%d_name, val, self%fill_value, &
self%chunk_size, self%compression_level, self%extendable)
end subroutine set_Real64_6d
subroutine Extend_Int8_1d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_1d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_1d
subroutine Extend_Int16_1d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_1d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_1d
subroutine Extend_Int32_1d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_1d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_1d
subroutine Extend_Real32_1d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_1d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_1d
subroutine Extend_Real64_1d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_1d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_1d
subroutine Extend_Int8_2d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_2d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_2d
subroutine Extend_Int16_2d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_2d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_2d
subroutine Extend_Int32_2d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_2d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_2d
subroutine Extend_Real32_2d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_2d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_2d
subroutine Extend_Real64_2d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_2d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_2d
subroutine Extend_Int8_3d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_3d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_3d
subroutine Extend_Int16_3d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_3d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_3d
subroutine Extend_Int32_3d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_3d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_3d
subroutine Extend_Real32_3d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_3d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_3d
subroutine Extend_Real64_3d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_3d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_3d
subroutine Extend_Int8_4d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_4d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_4d
subroutine Extend_Int16_4d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_4d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_4d
subroutine Extend_Int32_4d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_4d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_4d
subroutine Extend_Real32_4d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_4d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_4d
subroutine Extend_Real64_4d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_4d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_4d
subroutine Extend_Int8_5d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_5d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_5d
subroutine Extend_Int16_5d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_5d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_5d
subroutine Extend_Int32_5d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_5d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_5d
subroutine Extend_Real32_5d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_5d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_5d
subroutine Extend_Real64_5d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_5d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_5d
subroutine Extend_Int8_6d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I8), intent(in) :: val(:,:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int8_6d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int8_6d
subroutine Extend_Int16_6d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I16), intent(in) :: val(:,:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int16_6d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int16_6d
subroutine Extend_Int32_6d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
integer(kind=I32), intent(in) :: val(:,:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Int32_6d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Int32_6d
subroutine Extend_Real32_6d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=SP), intent(in) :: val(:,:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real32_6d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real32_6d
subroutine Extend_Real64_6d(self, new_size, offset, dshape, val)
class(H5Dataset) :: self
real(kind=DP), intent(in) :: val(:,:,:,:,:,:)
integer(kind=I32), parameter :: D_RANK=rank(val)
integer(kind=I64), intent(in) :: new_size(D_RANK)
integer(kind=I64), intent(in) :: offset(D_RANK)
integer(kind=I64), intent(in) :: dshape(D_RANK)
integer(kind=I32) :: error
error = Extend_Real64_6d_Dataset(self%parent_id, self%d_name, new_size, offset, dshape, val)
end subroutine Extend_Real64_6d
subroutine defScale(self,dim_name)
class(H5Dataset) :: self
character(len=*), intent(in), optional :: dim_name
integer(kind=I32) :: ierr
integer(kind=I32) :: dim_id, dset_id
dim_id = open_dset(self%parent_id,self%d_name)
if (present(dim_name)) then
ierr = def_scale(dim_id,dim_name)
else
ierr = def_scale(dim_id)
end if
end subroutine defScale
subroutine setScale(self,dim_dset,idx_dim)
class(H5Dataset) :: self
class(H5Dataset), intent(in) :: dim_dset
integer(kind=I32), intent(in) :: idx_dim
integer(kind=I32) :: ierr
integer(kind=I32) :: dim_id, dset_id
dset_id = open_dset(self%parent_id,self%d_name)
dim_id = open_dset(dim_dset%parent_id,dim_dset%d_name)
ierr = set_scale(dset_id,dim_id,idx_dim)
ierr = close_dset(dim_id)
ierr = close_dset(dset_id)
end subroutine setScale
end module H5_OO_mod
|
{"hexsha": "d48e9c3df391bb6a07945df3b7860221976d062f", "size": 65616, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/H5_OO_mod.f90", "max_stars_repo_name": "rjgtorres/oo_hdf", "max_stars_repo_head_hexsha": "486f7cb0ad6581ed134a9a514da88e58c2f8adf7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2017-03-23T00:38:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T21:21:06.000Z", "max_issues_repo_path": "src/H5_OO_mod.f90", "max_issues_repo_name": "rjgtorres/oo_hdf", "max_issues_repo_head_hexsha": "486f7cb0ad6581ed134a9a514da88e58c2f8adf7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-19T22:03:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-03T16:59:10.000Z", "max_forks_repo_path": "src/H5_OO_mod.f90", "max_forks_repo_name": "rjgtorres/oo_hdf", "max_forks_repo_head_hexsha": "486f7cb0ad6581ed134a9a514da88e58c2f8adf7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-03-19T14:43:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T07:59:37.000Z", "avg_line_length": 37.0711864407, "max_line_length": 99, "alphanum_fraction": 0.6524628139, "num_tokens": 18704}
|
#include <iostream>
#include <armadillo>
#include <cmath>
#include <cstdlib>
#include <time.h>
#include <fstream>
using namespace std;
using namespace arma;
void RHO_A_FILL(vec &rho, mat &A, int N,double rhoN); //rho, kind of like a linespace
//A, Tridiagonal matrix
void Maxoff(mat &A, int N,int &k, int &l, double &max); //Finds the max element of a matrix
void Maxoff_test(double &max,int &k,int &l);
void Ortho_test(mat &S,int N);
int main(){
int N = 400; //matrix size; N x N
double rhoN = 60;
double max;
int k,l;
Maxoff_test(max,k,l);
mat A = mat(N,N,fill::zeros); //indexes go from (0) to (N-1)
mat S = mat(N,N,fill::eye);
vec rho = vec(N,fill::zeros);
vec eigen = vec(N);
RHO_A_FILL(rho,A,N,rhoN);
Maxoff(A,N,k,l,max);
int iterations = 0;
double eps = 1E-10;
double tau, t, s, c, il, ik, kk, ll, s_ik, s_il;
double start, finish;
start = clock(); //clock value before eigen solve
while(max > eps){
tau = (A(l,l)-A(k,k))/(double(2)*A(k,l));
if(tau>0){
t = (-tau - sqrt(1.0 + tau*tau));}
else{t = ( -tau + sqrt(1.0 + tau*tau));}
//cosine and sine
c = double(1)/sqrt(1.+t*t);
s = t*c;
//Jacobi rotating A round theta in N-dim space
for(int i = 0; i<N; i++){
if ((i != k) && (i !=l)){
ik = A(i,k)*c - A(i,l)*s;
il = A(i,l)*c + A(i,k)*s;
A(i,k) = ik;
A(k,i) = ik;
A(i,l) = il;
A(l,i) = il;
}
s_ik = S(i,k);
s_il = S(i,l);
S(i,k) = c*s_ik - s*s_il;
S(i,l) = c*s_il + s*s_ik;
}
kk = A(k,k)*c*c - 2.*A(k,l)*c*s + A(l,l)*s*s;
ll = A(l,l)*c*c + 2.*A(k,l)*c*s + A(k,k)*s*s;
A(k,k) = kk;
A(l,l) = ll;
A(k,l) = 0;
A(l,k) = 0;
iterations++;
Maxoff(A,N,k,l,max);
//Ortho test
if (iterations%N*2==0){
Ortho_test(S,N);
}
} //end of while
finish = clock(); //clock value after eigen solve
//[eigen] is now eigenvalues
for(int i = 0; i<N;i++){
eigen(i) = A(i,i);
}
eigen.print();
double time = (finish -start)/CLOCKS_PER_SEC/iterations;
fstream outfile;
outfile.open("eigenvectors5.dat",ios::out);
for (int i = 0; i<N; i++){
for (int j = 0; j<N; j++){
if(j%N == 0){outfile<<endl;}
outfile << S(i,j)<<" ";
}
}
outfile.close();
fstream utfile;
utfile.open("eigenvalues5.dat",ios::out);
for (int i = 0; i<N; i++){
utfile << eigen(i)<<endl;
}
utfile.close();
//fstream Outfile;
//Outfile.open("steps.dat",ios::out);
//Outfile << "N epsilon steps step_time"<<endl;
//Outfile <<N <<" "<<eps<<" "<<iterations<<" "<<time;
//Outfile<<endl;
//Outfile.close();
} //end of main
void RHO_A_FILL(vec &rho, mat &A, int N,double rhoN){
double h = rhoN/(N);
rho(0) = 0.000001;
for(int i = 0; i < N;i++){
if(i > 0) rho(i) = i*h;
A(i,i) = 2./(h*h)+ double(25)*rho(i)*rho(i)+(double(1)/rho(i));
if(i<N-1){
A(i+1,i) = -1./(h*h);
A(i,i+1) = -1./(h*h);
}
}
}
void Maxoff(mat &A, int N,int &k, int &l, double &max){
N = A.n_rows;
max = 0;
for(int i = 0; i < N ; i++){
for(int j = i+1; j < N ; j++){
if ( A(i,j)*A(i,j) > max){
max =A(i,j)*A(i,j);
k = i;
l = j;
}
}
}
}
////////////Unit tests
void Maxoff_test(double &max,int &k,int &l){
mat matrix = mat(5,5,fill::zeros);
matrix(0,3) = -9;
matrix(3,0) = -9;
matrix(4,4) = -3;
matrix(3,4) = -1;
matrix(4,3) = -1;
Maxoff(matrix,5,k,l,max);
if(abs(max-81) < 1E-13){cout << "Maxoff function passes the test"<<endl;}
else{cout<<"Maxoff might not be working properly"<<endl;}
}
void Ortho_test(mat &S,int N){
double error = 0;
error = dot(S.col(int(double(rand())/RAND_MAX*N)),S.col(int(double(rand())/RAND_MAX*N)));
if (abs(error) < 1e-10){
cout << "ortho pass" << endl;}
else{cout<<"ortho fail" << endl;}
}
|
{"hexsha": "de0292302ec5175b48f8a9c3707eb398a3dc22ce", "size": 4506, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "project2/code-joseph/jacobi_interact.cpp", "max_stars_repo_name": "frxstrem/fys3150", "max_stars_repo_head_hexsha": "35c0310f48fca07444ec5924267bf646d121b147", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project2/code-joseph/jacobi_interact.cpp", "max_issues_repo_name": "frxstrem/fys3150", "max_issues_repo_head_hexsha": "35c0310f48fca07444ec5924267bf646d121b147", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project2/code-joseph/jacobi_interact.cpp", "max_forks_repo_name": "frxstrem/fys3150", "max_forks_repo_head_hexsha": "35c0310f48fca07444ec5924267bf646d121b147", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8965517241, "max_line_length": 98, "alphanum_fraction": 0.4454061252, "num_tokens": 1431}
|
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import numpy as np
import pandas as pd
import datetime
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
import sys
""" TODO:
- Add possibility to color different segments of the time series
"""
class TimeSeriesVisualizer():
def __init__(self, fig, rect, repo, callbacks):
register_matplotlib_converters()
plt.ion()
# the preprocessing window slide step
self.step = 10
# show all entities or just selected ones
self.show_all = True
self.fig = fig
# add 10% margin to rect [left, bottom, width, height]
self.rect = [rect[0]+rect[2]*0.065, rect[1]+rect[3]*0.05, rect[2]*0.9, rect[3]*0.9]
self.repo = repo
self.callbacks = callbacks
if 'select_entities' not in callbacks:
callbacks['select_entities'] = []
if 'select_attributes' not in callbacks:
callbacks['select_attributes'] = []
self.callbacks['select_entities'].append(self.set_selected_entities)
self.callbacks['select_attributes'].append(self.set_selected_attributes)
self.draginfo = False
self.indexattr = False
self.timeattr = False
self.entityattr = False
self.classattr = False
self.anomalyattr = False
self.classcolfunc = None
self.anomalycolfunc = None
self.anomaly_threshold = 0.5
self.all_entities = []
self.selectedEntities = []
self.selectedFeatures = []
self.zoompropmin = 0.0
self.zoompropmax = 1.0
#self.timeWindowInHours = self.repo.get_time_window_seconds()/60/60
# Create a list with rectangles for the plots
rectangleList = self._getRectangleList(self.rect, len(self.selectedFeatures))
# Create dict to hold all the feature information
self.features = {}
# Create anomalies dict to hold all the deviation plot info
self.anomalies = { 'axes': fig.add_axes(rectangleList[0]),
'xMin': np.inf,#datetime.datetime.now(),
'xMax': -np.inf,#datetime.datetime(1900, 1, 1),
'yMin': sys.maxsize,
'yMax': -sys.maxsize - 1,
'entities': [] }
self.anomalies['axes'].set_ylabel('')
self.anomalies['axes'].set_title(self.anomalyattr if self.anomalyattr else "", fontdict={'fontSize': 10}, loc='left')
self.anomalies['axes'].tick_params(labelsize=10)
def add_entity(self, entity):
# Only add entity when its not already in list
if not entity in self.selectedEntities:
# Add entity to list
self.selectedEntities.append(entity)
# Add entity to anomaly plot
plotLine = self.anomalies['axes'].plot([], [])[0] #.scatter([], [])
plotLabel = 'Entity - ' + str(entity)
# Append entity anomaly plot info to list
self.anomalies['entities'].append({'entity': entity,
'label': plotLabel,
'line': plotLine})
#
# # Add the entity to the legend (somehow the get_legend_handles_labels() did not return anything,
# # thats why the get_legend() workaround is used)
# currentLegend = self.anomalies['axes'].get_legend()
# legendLabels = [] if currentLegend is None else [str(x._text) for x in currentLegend.texts]
# legendPlotLines = [] if currentLegend is None else currentLegend.legendHandles
#
# # Append the label and the plotline to the legend
# legendPlotLines = np.append(legendPlotLines, plotLine)
# legendLabels = np.append(legendLabels, plotLabel)
#
# # Set the legend for the entities / features
# self.anomalies['axes'].legend(legendPlotLines, legendLabels)
# Add entity to all feature plots
for feature in self.features:
plotInfo = { 'entity': entity,
'line': self.features[feature]['axes'].plot([], [])[0] } #.scatter([], [])}
# Match the entity color in the anomaly plot
# plotInfo['line'].set_color(plotLine.get_color())
# Add entity plot info to feature entity list
self.features[feature]['entities'].append(plotInfo)
def remove_entity(self, entity):
# Only if entity is in list
if entity in self.selectedEntities:
# Remove entity from list
self.selectedEntities.remove(entity)
# Remove entity entry from from all the feature plots
for feature in self.features:
for index, entityPlotInfo in enumerate(self.features[feature]['entities']):
if entityPlotInfo['entity'] == entity:
# Remove line from plot
#self.features[feature]['axes'].lines.remove(entityPlotInfo['line'])
# Remove list entry
self.features[feature]['entities'].pop(index)
break
# # Prepare to remove the entity from the legend (somehow the get_legend_handles_labels() did not return anything,
# # thats why the get_legend() workaround is used)
# currentLegend = self.anomalies['axes'].get_legend()
# legendLabels = [] if currentLegend is None else [str(x._text) for x in currentLegend.texts]
# legendPlotLines = [] if currentLegend is None else currentLegend.legendHandles
#
# Remove entity from anomaly plot
for index, entityPlotInfo in enumerate(self.anomalies['entities']):
if entityPlotInfo['entity'] == entity:
# Remove line from legend and plot (with workaround to remove plotline)
# legendPlotLines.pop(legendLabels.index(entityPlotInfo['label']))
# legendLabels.remove(entityPlotInfo['label'])
#self.anomalies['axes'].lines.remove(entityPlotInfo['line'])
# Remove list entry
self.anomalies['entities'].pop(index)
break
# Set the legend for the entities / features
# self.anomalies['axes'].legend(legendPlotLines, legendLabels)
def add_feature(self, feature):
# Only if feature is not already show
if not feature in self.features:
# Add the feature to the selected features
self.selectedFeatures.append(feature)
# Add a new feature dict in self.features, the None values will be replaced later
self.features[feature] = { 'axes': None,
'feature': feature,
'showTicks': None,
'xMin': np.inf,#datetime.datetime.now(),
'xMax': -np.inf,#datetime.datetime(1900, 1, 1),
'yMin': sys.maxsize,
'yMax': -sys.maxsize - 1,
'entities': [] }
# Add all the selected entities to the new feature, the None value will be replaced later
for entity in self.selectedEntities:
plotInfo = { 'entity': entity,
'line': None }
self.features[feature]['entities'].append(plotInfo)
# Get new rectangles for the feature plots
rectangleList = self._getRectangleList(self.rect, len(self.selectedFeatures))
# Replace axes for the features based on new rectangle layout
for index, feature in enumerate(self.selectedFeatures):
if self.features[feature]['axes'] != None:
self.features[feature]['axes'].remove()
self.features[feature]['axes'] = self.fig.add_axes(rectangleList[index + 1])
self.features[feature]['showTicks'] = False #(index == 0)
self.features[feature]['axes'].set_ylabel('')
if not self.features[feature]['showTicks']:
self.features[feature]['axes'].set_xticklabels([])
self.features[feature]['axes'].set_title(feature, fontdict={'fontSize': 10}, loc='left')
# Rebuild the entity list of the feature plot
for entityIndex, entity in enumerate(self.selectedEntities):
plotInfo = { 'entity': entity,
'line': self.features[feature]['axes'].plot([], [])[0] } # .scatter([], [])}
# Match the entity color in the anomalies plot
anomalyEntityPlotInfo = next(item for item in self.anomalies['entities'] if item['entity'] == entity)
# plotInfo['line'].set_color(anomalyEntityPlotInfo['line'].get_color())
self.features[feature]['entities'][entityIndex] = plotInfo
def remove_feature(self, feature):
# Only if feature is shown
if feature in self.features:
# Remove the feature
self.features[feature]['axes'].remove()
del self.features[feature]
self.selectedFeatures.remove(feature)
# Get new rectangles for the features left
rectangleList = self._getRectangleList(self.rect, len(self.selectedFeatures))
# Replace axes for the still visible features based on new rectangle layout
for index, feature in enumerate(self.selectedFeatures):
self.features[feature]['axes'].remove()
self.features[feature]['axes'] = self.fig.add_axes(rectangleList[index + 1])
self.features[feature]['showTicks'] = False #(index == 0)
self.features[feature]['axes'].set_ylabel('')
if not self.features[feature]['showTicks']:
self.features[feature]['axes'].set_xticklabels([])
self.features[feature]['axes'].set_title(feature, fontdict={'fontSize': 10}, loc='left')
# Rebuild the entity list of the feature plot
for entityIndex, entity in enumerate(self.selectedEntities):
plotInfo = { 'entity': entity,
'line': self.features[feature]['axes'].plot([], [])[0] } #.scatter([], [])}
# Match the entity color in the anomalies plot
anomalyEntityPlotInfo = next(item for item in self.anomalies['entities'] if item['entity'] == entity)
# plotInfo['line'].set_color(anomalyEntityPlotInfo['line'].get_color())
self.features[feature]['entities'][entityIndex] = plotInfo
def _get_cols(self, ano, cl):
cols = [None]*len(cl)
for i in range(len(cl)):
if ano[i] is not None and ano[i] > self.anomaly_threshold and self.anomalycolfunc is not None:
cols[i] = self.anomalycolfunc(ano[i])
elif cl[i] is not None and self.classcolfunc is not None:
cols[i] = self.classcolfunc(cl[i])
else:
cols[i] = hsl_color(0.5, 0.5, -0.5)
return cols
def _to_lines(self, points, colors):
if len(points) != len(colors):
print('Error!! Points and colors are not the same length')
return
x0, y0 = points[0]
lines=[]
new_colors=[]
for i in range(1, len(points)):
x1, y1 = points[i]
#skip line if missing data in between
if x1-x0 > self.step:
x0,y0 = x1,y1
continue
lines.append( ((x0,y0) , (x1,y1)) )
new_colors.append(colors[i])
x0,y0 = x1,y1
return lines, new_colors
def redraw_features(self):
# this will be a lits of lists in the form [[time, entity, anomaly, class, sel_feat_1, sel_feat_2, ...],...]
#all_data = self.repo.aget_values([self.anomalyattr, self.classattr]+self.selectedFeatures, True)
all_data = self.repo.current_data()
ent_dat = {}
for d in all_data:
t = mdates.epoch2num(d[self.timeattr]) if self.timeattr is not False else d[self.indexattr]
e = d[self.entityattr] if self.entityattr is not False else True
if e not in self.all_entities:
self.all_entities.append(e)
v = [t, e] + [d[f] for f in [self.anomalyattr, self.classattr]+self.selectedFeatures]
if e not in ent_dat.keys():
ent_dat[e] = [v]
else:
ent_dat[e].append(v)
# transpose for easy access to features
for k in ent_dat:
ent_dat[k] = list(zip(*ent_dat[k]))
if self.show_all:
if len(self.selectedEntities) < len(self.all_entities):
self.set_selected_entities([])
#print(all_data)
#print(ent_dat)
#all_data[0] = [ datetime.fromtimestamp(t) for t in all_data[0]]
# for all selected features
for f in enumerate(self.selectedFeatures,start=4): # start=4 because 0-time, 1-entity, 2-anomaly, 3-class, 4-features
# clear old data
self.features[f[1]]['axes'].clear()
self.features[f[1]]['axes'].set_title(f[1], fontdict={'fontSize': 10}, loc='left')
self.features[f[1]]['axes'].set_ylabel('')
self.features[f[1]]['axes'].set_xticklabels([])
# and each entity in feature
for entityPlotInfo in self.features[f[1]]['entities']:
entity = entityPlotInfo['entity']
dat = ent_dat[entity]
if(len(dat) == 0):
continue
p = list(zip(dat[0], dat[f[0]]))
c = self._get_cols(dat[2],dat[3])
l,cc = self._to_lines(p,c)
colored_lines = LineCollection(l, colors=cc, linewidths=(3,))
self.features[f[1]]['axes'].add_collection(colored_lines)
#entityPlotInfo['line'].set_offsets( p )
#entityPlotInfo['line'].set_color(c)
self._reAdjustMultiPlotLimits(self.features[f[1]], min(dat[0]), min(dat[f[0]]))
self._reAdjustMultiPlotLimits(self.features[f[1]], max(dat[0]), max(dat[f[0]]))
#Update the deviation plot
self.anomalies['axes'].clear()
self.anomalies['axes'].set_ylabel('')
self.anomalies['axes'].set_title(self.anomalyattr if self.anomalyattr else "", fontdict={'fontSize': 10}, loc='left')
for entityPlotInfo in self.anomalies['entities']:
entity = entityPlotInfo['entity']
dat = ent_dat[entity]
if len(dat) > 0:
p = list(zip(dat[0], dat[2]))
c = self._get_cols(dat[2], dat[3])
l,cc = self._to_lines(p,c)
colored_lines = LineCollection(l, colors=cc, linewidths=(3,))
self.anomalies['axes'].add_collection(colored_lines)
#entityPlotInfo['line'].set_offsets( list(zip(dat[0], dat[2])) )
#entityPlotInfo['line'].set_color(self._get_cols(dat[3]))
dd = [d for d in dat[2] if d is not None]
if dd:
self._reAdjustMultiPlotLimits(self.anomalies, min(dat[0]), min(dd))
self._reAdjustMultiPlotLimits(self.anomalies, max(dat[0]), max(dd))
if len(dat)>0 and self.timeattr is not False:
ax = self.anomalies['axes']
#ax.xaxis.set_major_locator(mdates.MinuteLocator(byminute=[0,15,30,45]))
#ax.xaxis.set_minor_locator(mdates.MinuteLocator())
monthFmt = mdates.DateFormatter("%H:%M")
ax.xaxis_date()
ax.xaxis.set_major_formatter(monthFmt)
def handle_data(self, dict_msg):
entity = (dict_msg[self.entityattr] if self.entityattr is not False else True)
if not entity in self.all_entities:
self.all_entities.append(entity)
# Only act if the entity is in the entity list
if entity in self.selectedEntities:
if self.timeattr is not False:
newXvalue = mdates.epoch2num(dict_msg[self.timeattr])
#newXvalue = datetime.datetime.fromtimestamp(dict_msg[self.timeattr])
else:
newXvalue = dict_msg[self.indexattr]
# For every selected feature
for feature in self.features:
newYvalue = dict_msg[feature]
featurePlotInfo = self.features[feature]
# Get the entity information from the feature plot
entityPlotInfo = next(item for item in featurePlotInfo['entities'] if item["entity"] == entity)
newXvalues = np.append(entityPlotInfo['line'].get_xdata(), newXvalue)
newYvalues = np.append(entityPlotInfo['line'].get_ydata(), newYvalue)
self._set_data(entityPlotInfo['line'], newXvalues, newYvalues)
self._reAdjustMultiPlotLimits(featurePlotInfo, newXvalue, newYvalue)
self._removeDataNotVisible(featurePlotInfo['xMin'], entityPlotInfo['line'])
# Update the deviation plot of the entity
if self.anomalyattr is not False and dict_msg[self.anomalyattr] is not None:
anomalyPlotInfo = next(item for item in self.anomalies['entities'] if item['entity'] == entity)
xx = np.append(anomalyPlotInfo['line'].get_xdata(), newXvalue)
yy = np.append(anomalyPlotInfo['line'].get_ydata(), dict_msg[self.anomalyattr])
self._set_data(anomalyPlotInfo['line'], xx, yy)
self._reAdjustMultiPlotLimits(self.anomalies,
newXvalue,
dict_msg[self.anomalyattr])
self._removeDataNotVisible(self.anomalies['xMin'], anomalyPlotInfo['line'])
self.anomalies['axes'].set_ylim(0, 1)
def _set_data(self, line, xx, yy):
line.set_xdata(xx)
line.set_ydata(yy)
def _removeDataNotVisible(self, xMin, line):
pass
# Remove the data that is not shown to improve memory use
#line.set_xdata([x for x in line.get_xdata() if x >= xMin])
#line.set_ydata(line.get_ydata()[-len(line.get_xdata()):])
def _reAdjustMultiPlotLimits(self, plotInfo, x, y):
if x is not None and y is not None:
# Determine the new min and max values
if x < plotInfo['xMin']:
plotInfo['xMin'] = x
if x > plotInfo['xMax']:
plotInfo['xMax'] = x
if y < plotInfo['yMin']:
plotInfo['yMin'] = y
if y > plotInfo['yMax']:
plotInfo['yMax'] = y
# Readjusting the x axis according the requested time window
#xMax = datetime.fromtimestamp(self.repo.get_time_now())#plotInfo['xMax']
#xMin = xMax - timedelta(hours = self.timeWindowInHours)
xMax = plotInfo['xMax']
xMin = plotInfo['xMin']
# Set the x axis limits
if xMin != xMax:
plotInfo['axes'].set_xlim(xMin + (xMax-xMin)*self.zoompropmin, xMin + (xMax-xMin)*self.zoompropmax)
# Adjust the y min and y max
ydiff = plotInfo['yMax'] - plotInfo['yMin']
if (ydiff > 0.0):
yMin = plotInfo['yMin'] - 0.05 * ydiff
yMax = plotInfo['yMax'] + 0.05 * ydiff
else:
yMin = plotInfo['yMin'] - 0.1
yMax = plotInfo['yMax'] + 0.1
plotInfo['axes'].set_ylim(yMin, yMax)
def _getRectangleList(self, rect, numberOfFeatures):
# Holds the list with rectangles to be returned
rectangles = []
# Just for easier reading
margin = 0.01
areaLeft = rect[0]
areaBottom = rect[1]
areaWidth = rect[2]
areaHeight = rect[3]
# The deviation rectangle takes half the space on the bottom
rectangles.append([areaLeft, areaBottom, areaWidth, areaHeight / 4 - margin])
if numberOfFeatures > 0:
# The top half of the space is split among the feature rectangles
featureRectangleHeight = (areaHeight*3 / 4 / numberOfFeatures) - margin
previousBottom = 0
for featureOrder in range(0, numberOfFeatures):
if featureOrder == 0:
featureBottom = (areaBottom + (areaHeight / 4)) + (1.5 * margin)
else:
featureBottom = previousBottom + featureRectangleHeight + (2.5 * margin)
rectangles.append([areaLeft, featureBottom, areaWidth, featureRectangleHeight])
previousBottom = featureBottom
return rectangles
def set_selected_entities(self,entity_list):
if not entity_list:
#the list is empty which means all entities
entity_list = self.all_entities
self.show_all = True
else:
self.show_all = False
toadd = [x for x in entity_list if x not in self.selectedEntities]
toremove = [x for x in self.selectedEntities if x not in entity_list]
for i in toadd:
self.add_entity(i)
for i in toremove:
self.remove_entity(i)
self.redraw_features()
def set_selected_attributes(self,attribute_list):
features_to_show = 3
if attribute_list:
toadd = [x for x in attribute_list if x not in self.selectedFeatures]
toremove = [x for x in self.selectedFeatures if x not in attribute_list]
n_remove = len(toadd)+len(self.selectedFeatures)-features_to_show
for i in toadd:
self.add_feature(i)
for i in range(0,n_remove):
self.remove_feature(toremove[i])
self.redraw_features()
def default_params(self):
return { 'index_attribute': False,
'time_attribute': False,
'entity_attribute': False,
'class_attribute': False,
'class_color': None,
'anomaly_attribute': False,
'anomaly_color': None,
'anomaly_threshold': 0.5 }
def set_params(self, dic):
if 'time_attribute' in dic:
self.timeattr = dic['time_attribute']
if 'index_attribute' in dic:
self.indexattr = dic['index_attribute']
if 'entity_attribute' in dic:
self.entityattr = dic['entity_attribute']
if 'class_attribute' in dic:
self.classattr = dic['class_attribute']
if 'class_color' in dic:
self.classcolfunc = dic['class_color']
if 'anomaly_attribute' in dic:
self.anomalyattr = dic['anomaly_attribute']
if 'anomaly_color' in dic:
self.anomalycolfunc = dic['anomaly_color']
if 'anomaly_threshold' in dic:
self.anomaly_threshold = dic['anomaly_threshold']
def redraw_model(self, moddict):
pass
def scroll_event(self, event):
# p, pmin, pmax, fact -> qmin, qmax
# (qmx-qmin) = (pmax-pmin)*fact
# qmin + p*(qmax-qmin) = pmin + p*(pmax-pmin)
ax = self.anomalies['axes']
tr = ax.transAxes.inverted().transform((event.x,event.y))
pp = tr[0]
if event.button == "up":
fact = 0.8
else:
fact = 1.25
pdiff = (self.zoompropmax - self.zoompropmin)
qdiff = min(1.0, (self.zoompropmax - self.zoompropmin)*fact)
self.zoompropmin = min(1.0, max(0.0, self.zoompropmin + pp*(pdiff-qdiff)))
self.zoompropmax = max(0.0, min(1.0, self.zoompropmin + qdiff))
for feature in self.features:
self._reAdjustMultiPlotLimits(self.features[feature], None, None)
self._reAdjustMultiPlotLimits(self.anomalies, None, None)
plt.draw()
def button_press_event(self, event):
if event.button == 1 and event.key == None:
ax = self.anomalies['axes']
trans = ax.transAxes.inverted()
pp = trans.transform((event.x,event.y))[0]
if pp >= 0.0 and pp <= 1.0:
self.draginfo = (trans, pp, self.zoompropmin, self.zoompropmax)
else:
self.draginfo = False
def motion_notify_event(self, event):
if self.draginfo is not False:
(trans, oldpp, oldzmin, oldzmax) = self.draginfo
pp = max(0.0, min(1.0, trans.transform((event.x,event.y))[0]))
diff = -(pp-oldpp)*(oldzmax-oldzmin)
self.zoompropmin = min(1.0-(oldzmax-oldzmin), max(0.0, oldzmin + diff))
self.zoompropmax = max(0.0+(oldzmax-oldzmin), min(1.0, oldzmax + diff))
for feature in self.features:
self._reAdjustMultiPlotLimits(self.features[feature], None, None)
self._reAdjustMultiPlotLimits(self.anomalies, None, None)
plt.draw()
def button_release_event(self, event):
if event.button == 1 and self.draginfo is not False:
self.draginfo = False
|
{"hexsha": "11f274b04f96d2de39efa6c13a439af1ff1d52cc", "size": 26826, "ext": "py", "lang": "Python", "max_stars_repo_path": "bidaf/TimeSeriesVisualizer.py", "max_stars_repo_name": "RI-SE/BIDAF", "max_stars_repo_head_hexsha": "ebb7c2f96ab65cb9cec0859f49d9dd951b93874c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-27T10:45:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-30T07:46:16.000Z", "max_issues_repo_path": "bidaf/TimeSeriesVisualizer.py", "max_issues_repo_name": "RI-SE/BIDAF", "max_issues_repo_head_hexsha": "ebb7c2f96ab65cb9cec0859f49d9dd951b93874c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bidaf/TimeSeriesVisualizer.py", "max_forks_repo_name": "RI-SE/BIDAF", "max_forks_repo_head_hexsha": "ebb7c2f96ab65cb9cec0859f49d9dd951b93874c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4462025316, "max_line_length": 126, "alphanum_fraction": 0.5430179676, "include": true, "reason": "import numpy", "num_tokens": 5819}
|
#include <boost/metaparse/get_col.hpp>
|
{"hexsha": "067a541dd8775c1f7f1c9be9643f048e753fa10d", "size": 39, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_metaparse_get_col.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_metaparse_get_col.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_metaparse_get_col.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 19.5, "max_line_length": 38, "alphanum_fraction": 0.7948717949, "num_tokens": 10}
|
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2TokenizerFast, GPT2Model
from sklearn.preprocessing import MultiLabelBinarizer
from mitnewsclassify2 import tfidf, tfidf_bi, download
import os
import gc
import gzip
import pickle
import csv
def print_f(*args):
print(*args, flush=True)
print_f('All imports seem good!')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print_f('Using device:', device)
# tokenizing
train_size = None # None for full dataset
MODEL = 'gpt2'
NR = 'first'
class EmbeddedDataset(Dataset):
def __init__(self, X):
self.X = X
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], idx
print_f('Loading NYT dataset...')
# open the train data given to us by Max
with gzip.open('../data/NYTcorpus_train.p.gz', mode='r') as f:
train_data = pickle.load(f)
print_f('Data loaded.')
# train and test data labels are coded in numbers,
# but the models predict human-readable labels,
# so we need to re-map these.
# Let's use one of the files downloaded by the mitnewsclassify package
with open('../data/nyt-theme-tags.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
tags_dict = {row['tags_id']: row['tag'] for row in reader}
# extract actual article texts from data samples
train_articles = [d[2] for d in train_data]
# map the number-coded labels to human-readable labels
train_labels_lists = [list(map(tags_dict.get, d[3:])) for d in train_data]
X_train, y_train = train_articles[:train_size], train_labels_lists[:train_size]
print_f('X_train', len(X_train))
print_f('y_train', len(y_train))
# start actual vectorization
dataset, output_path = X_train, y_train, f'/gpfs/space/projects/stud_nlp_share/kristjan/ensemble/embedded_train_FULL_ensemble'
####
chunk_size = 50_000
total_chunks = len(dataset) // chunk_size + 1
print_f('total chunks', total_chunks)
iterator = DataLoader(dataset, batch_size=chunk_size)
print_f(f'Vectorizing dataset for ', output_path)
X_train = []
chunk_id = 1
print_f('Starting at chunk id', chunk_id)
for i, batch in enumerate(iterator):
inputs, attention_mask = batch
real_batch_size = inputs.shape[0]
inputs = inputs.to(device)
attention_mask = attention_mask.to(device)
with torch.no_grad():
output = model(input_ids=inputs, attention_mask=attention_mask)
output = output[0]
# indices of last non-padded elements in each sequence
# adopted from https://github.com/huggingface/transformers/blob/master/src/transformers/models/gpt2/modeling_gpt2.py#L1290-L1302
last_non_padded_ids = torch.ne(inputs, tokenizer.pad_token_id).sum(-1) - 1
embeddings = output[range(real_batch_size), last_non_padded_ids, :]
X_train += embeddings.detach().cpu()
if len(X_train) >= chunk_size:
saved_dataset = EmbeddedDataset(torch.stack(X_train))
torch.save(saved_dataset, f'{output_path}_chunk{chunk_id}of{total_chunks}.pt', pickle_protocol=4)
X_train = []
chunk_id += 1
# take care of what's left after loop
if len(X_train) > 0:
saved_dataset = EmbeddedDataset(torch.stack(X_train))
torch.save(saved_dataset, f'{output_path}_chunk{chunk_id}of{total_chunks}.pt', pickle_protocol=4)
print_f('All done!')
|
{"hexsha": "22aa5463592d13a85eb06dca77cc5fb3424e7d64", "size": 3395, "ext": "py", "lang": "Python", "max_stars_repo_path": "NYT/ensemble/vectorize-first.py", "max_stars_repo_name": "kristjanr/ut-mit-news-classify", "max_stars_repo_head_hexsha": "d85e32256f36d4a22d727e678adfaa7e0a4a3108", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NYT/ensemble/vectorize-first.py", "max_issues_repo_name": "kristjanr/ut-mit-news-classify", "max_issues_repo_head_hexsha": "d85e32256f36d4a22d727e678adfaa7e0a4a3108", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NYT/ensemble/vectorize-first.py", "max_forks_repo_name": "kristjanr/ut-mit-news-classify", "max_forks_repo_head_hexsha": "d85e32256f36d4a22d727e678adfaa7e0a4a3108", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-21T09:18:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-21T09:18:59.000Z", "avg_line_length": 28.7711864407, "max_line_length": 132, "alphanum_fraction": 0.7310751105, "include": true, "reason": "import numpy", "num_tokens": 861}
|
export Closed, Partial, Open, closure
"""
Trait to indicate that a binary operation • is closed over set S. Only methods
of • with the signature •(x::S, y::S) are to be considered.
The definition of closed is that •(x::S, y::S) shall not throw an error, and
•(x::S, y::S) shall return a result of type S.
"""
abstract Closed
"""
Trait to indicate that a binary operation • is a partial function over set S.
Only methods of • with the signature •(x::S, y::S) are to be considered.
The definition of a partial function in this context is that •(x::S, y::S) shall
either throw an error or return a result of type S.
"""
abstract Partial
"""
Trait to indicate that a binary operation • is not known to be closed or to be a
partial function over set S. Note that this trait does not imply the existence
of a counterexample to closure.
"""
abstract Open
"""
Return the closure trait of the set S under the binary operation •.
"""
closure(::Type, ::Function) = Open
# numbers should be closed under + and *
closure{N<:Number}(::Type{N}, ::typeof(+)) = Closed
closure{N<:Number}(::Type{N}, ::typeof(*)) = Closed
# Strings are closed under *
closure(::Type{String}, ::typeof(*)) = Closed
|
{"hexsha": "207d5c551dc5ef92b756e5baa180471ea6b08d1b", "size": 1187, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/closure.jl", "max_stars_repo_name": "TotalVerb/AlgebraicTraits.jl", "max_stars_repo_head_hexsha": "7dc81229d31d2c9afc11003398c6c1fdd3468cde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-07-19T02:33:47.000Z", "max_stars_repo_stars_event_max_datetime": "2016-07-19T02:33:47.000Z", "max_issues_repo_path": "src/closure.jl", "max_issues_repo_name": "TotalVerb/AlgebraicTraits.jl", "max_issues_repo_head_hexsha": "7dc81229d31d2c9afc11003398c6c1fdd3468cde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/closure.jl", "max_forks_repo_name": "TotalVerb/AlgebraicTraits.jl", "max_forks_repo_head_hexsha": "7dc81229d31d2c9afc11003398c6c1fdd3468cde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4358974359, "max_line_length": 80, "alphanum_fraction": 0.7026116259, "num_tokens": 310}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scipy import stats
from matplotlib import pyplot as plt
from pandas import DataFrame
import numpy as np
from abra.utils import dict_to_object
from abra.stats import Samples
NPTS = 100
LABEL_Y_OFFSET_FACTOR = 30.
COLORS = dict_to_object(
{
"blue": "#4257B2",
"light_blue": "#A1C4FD",
"cyan": "#3CCFCF",
"green": "#388E34",
"light_green": "#28CC7D",
"dark_green": "#006060",
"yellow": "#FFCD1F",
"salmon": "#FF725B",
"red": "#FB3640",
"dark_red": "#AE2024",
"purple": "#8842C0",
"gray": "#687174",
"dark_gray": "#455357",
"light_gray": "#C0CACE",
"brown": "#665000"
}
)
CONTROL_COLOR = COLORS.blue
VARIATION_COLOR = COLORS.green
DIFF_COLOR = COLORS.dark_gray
RESULTS_FIGSIZE = (15, 10)
class Plottable(object):
def __init__(self, label=None, color=None):
self.label = label
self.color = color
class Pdf(Plottable):
"""
Base class for plotting probability density functions.
"""
def __init__(self, fill=True, *args, **kwargs):
super(Pdf, self).__init__(*args, **kwargs)
self.fill = fill
def density(self, xs):
"""
Evaluate
"""
raise NotImplementedError("Implement Me")
def xgrid(self):
"""
Return the default x-values for plotting
"""
raise NotImplementedError("Implement Me")
def ppf(self, x):
return self.dist.ppf(x)
def cdf(self, x):
return self.dist.cdf(x)
def get_series(self):
xs = self.xgrid().flatten()
ys = self.density(xs)
return xs, ys
def plot(self, **plot_args):
xs, ys, = self.get_series()
plt.plot(xs, ys, label=self.label, color=self.color, **plot_args)
if self.fill:
self.plot_area(xs, ys)
def plot_area(self, xs=None, ys=None, color=None, alpha=.25, label=None):
xs = self.xgrid().flatten() if xs is None else xs
ys = self.density(xs) if ys is None else ys
color = self.color if color is None else color
plt.fill_between(xs, ys, color=color, alpha=alpha, label=label)
def sample(self, size):
return self.dist.rvs(size=size)
class KdePdf(Pdf):
"""
Estimate the shape of a PDF using a kernel density estimate.
"""
def __init__(self, samples, *args, **kwargs):
super(KdePdf, self).__init__(*args, **kwargs)
self.kde = stats.gaussian_kde(samples)
low = min(samples)
high = max(samples)
self._xgrid = np.linspace(low, high, NPTS + 1)
def density(self, xs):
return self.kde.evaluate(xs)
def xgrid(self):
return self._xgrid
class Pdfs(object):
"""
Plot a sequence of Pdf instances.
"""
def __init__(self, pdfs):
self.pdfs = pdfs
def plot(self):
# labels = []
for p in self.pdfs:
p.plot()
plt.legend()
class Gaussian(Pdf):
"""
Plot a Gaussian PDF
"""
def __init__(self, mean=0., std=1., *args, **kwargs):
super(Gaussian, self).__init__(*args, **kwargs)
self.mean = mean
self.std = std
self.dist = stats.norm(loc=mean, scale=std)
def density(self, xs):
return self.dist.pdf(xs)
def xgrid(self):
_min = self.mean - 4 * self.std,
_max = self.mean + 4 * self.std
return np.linspace(_min, _max, NPTS + 1)
class Pmf(Plottable):
"""
Base class for plotting probability mass functions.
"""
def density(self, xs):
raise NotImplementedError("Implement Me")
def xgrid(self):
"""
Return the default x-values for plotting
"""
raise NotImplementedError("Implement Me")
def get_series(self):
xs = self.xgrid()
ys = self.density(xs)
return xs, ys
def plot(self, plot_type='step', **plot_args):
"""
Parameters
---------
plot_type: str
The type of plot mode to use, can one of matplotlib's default plot
types (e.g. 'bar', 'plot', 'scatter')
"""
xs, ys, = self.get_series()
plotfun = getattr(plt, plot_type)
plotfun(xs, ys, label=self.label, color=self.color, **plot_args)
def sample(self, size):
return self.dist.rvs(size=size)
class Binomial(Pmf):
"""
Plot a Binomial PMF
"""
def __init__(self, n=20, p=.5, *args, **kwargs):
super(Binomial, self).__init__(*args, **kwargs)
self.n = n
self.p = p
self.dist = stats.binom(n, p)
def density(self, xs):
return self.dist.pmf(xs)
def xgrid(self):
return np.arange(0, self.n)
class Bernoulli(Pmf):
"""
Plot a Bernoulli PDF
"""
def __init__(self, plot_type='bar', p=0.5, *args, **kwargs):
super(Bernoulli, self).__init__(*args, **kwargs)
self.plot_type = plot_type
self.p = p
self.dist = stats.bernoulli(p)
def density(self, xs):
return self.dist.pmf(xs)
def xgrid(self):
return np.linspace(0., 1., 2)
class Poisson(Pmf):
"""
Plot a Binomial PMF
"""
def __init__(self, mu=1, *args, **kwargs):
super(Poisson, self).__init__(*args, **kwargs)
self.mu = mu
self.dist = stats.poisson(mu)
def density(self, xs):
return self.dist.pmf(xs)
def xgrid(self):
return np.arange(0, max([1 + self.mu * 2., 11]))
def plot_interval(
left, right, middle,
color=None, display_text=False,
label=None, y=0., offset=.005, fontsize=14
):
color = color if color else 'k'
text_y = y + offset
if middle in (-np.inf, np.inf) and (left in (np.inf, -np.inf) or right in (np.inf, -np.inf)):
raise ValueError('too many interval values are inf')
_left = middle - 4 * np.abs(right) if left in (np.inf, -np.inf) else left
_right = middle + 4 * np.abs(left) if right in (np.inf, -np.inf) else right
plt.plot((_left, _right), (y, y), color=color, linewidth=3, label=label)
plt.plot(middle, y, 'o', color=color, markersize=10)
if display_text:
label = "{}\n({}, {})".format(round(middle, 2), round(left, 2), round(right, 2))
plt.text(middle, text_y, label, ha='center', fontsize=fontsize, color=color)
def raise_y(ax, baseline=0):
ylims = ax.get_ylim()
ax.set_ylim(baseline, ylims[1])
return ax
def lower_y(ax, baseline=None):
ylims = ax.get_ylim()
baseline = baseline if baseline else ylims[0] - np.abs(ylims[1]) * .05
ax.set_ylim(baseline, ylims[1])
return ax
def visualize_gaussian_results(results, figsize=(15, 10), outfile=None, *args, **kwargs):
"""
Visualize the results that use Gaussian approximation.
"""
pdf_control = Gaussian(
mean=results.control.mean,
std=results.control.std,
label=results.control.name,
color=CONTROL_COLOR
)
pdf_variation = Gaussian(
mean=results.variation.mean,
std=results.variation.std,
label=results.variation.name,
color=VARIATION_COLOR
)
pdfs = Pdfs([pdf_control, pdf_variation])
mean_diff = results.variation.mean - results.control.mean
std_diff = ((results.control.var / results.control.nobs) + \
(results.variation.var / results.control.nobs)) ** .5
pdf_diff = Gaussian(mean_diff, std_diff, label='Difference', color=DIFF_COLOR)
fig, axs = plt.subplots(3, 1, figsize=figsize)
plt.sca(axs[0])
pdfs.plot()
raise_y(axs[0])
plt.gca().get_yaxis().set_ticks([])
plt.title("Sample Comparison")
x_min, x_max = plt.xlim()
plt.sca(axs[1])
y_min, y_max = plt.ylim()
y_dist = (y_max - y_min) / LABEL_Y_OFFSET_FACTOR
plot_interval(
*results.control.std_err(),
middle=results.control.mean,
y=y_dist,
offset=-.015,
color=CONTROL_COLOR,
display_text=True,
label=results.control.name
)
plot_interval(
*results.variation.std_err(),
middle=results.variation.mean,
y=-y_dist,
offset=0.005,
color=VARIATION_COLOR,
display_text=True,
label=results.variation.name
)
plt.legend()
plt.xlim(x_min, x_max)
plt.gca().get_yaxis().set_ticks([])
plt.title("Mean +/- Standard Error")
# plot differences distribution
plt.sca(axs[2])
plt.axvline(0., color=DIFF_COLOR, linestyle='--', linewidth=1.5)
if results.inference_procedure.hypothesis == 'larger':
left_bound = results.ci[0][0]
right_bound = np.inf
elif results.inference_procedure.hypothesis == 'smaller':
right_bound = results.ci[0][1]
left_bound = np.inf
else:
left_bound = results.ci[0][0]
right_bound = results.ci[0][1]
plot_interval(
left_bound,
right_bound,
mean_diff,
color=DIFF_COLOR,
display_text=True
)
plt.gca().get_yaxis().set_ticks([])
plt.title(results.comparison_type)
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
def visualize_binomial_results(results, figsize=(15, 10), outfile=None, *args, **kwargs):
"""
Visualize the results that use Gaussian approximation.
"""
tol = 1e-4
pmf_control = Binomial(
p=results.control.mean,
n=results.control.nobs,
label=results.control.name,
color=CONTROL_COLOR
)
pmf_variation = Binomial(
p=results.variation.mean,
n=results.variation.nobs,
label=results.variation.name,
color=VARIATION_COLOR
)
xy_control = zip(pmf_control.xgrid(), pmf_control.density(pmf_control.xgrid()))
xy_variation = zip(pmf_variation.xgrid(), pmf_variation.density(pmf_variation.xgrid()))
valid_xy_control = sorted([x for x in xy_control if x[1] >= tol], key=lambda x: x[0])
valid_xy_variation = sorted([x for x in xy_variation if x[1] >= tol], key=lambda x: x[0])
x_min = int(min(valid_xy_control[0][0], valid_xy_variation[0][0]))
x_max = int(max(valid_xy_control[-1][0], valid_xy_variation[-1][0]))
mean_diff = results.variation.mean - results.control.mean
std_diff = (results.control.var / results.control.nobs + \
results.variation.var / results.control.nobs) ** .5
pdf_diff = Gaussian(mean_diff, std_diff, label='Difference', color=DIFF_COLOR)
fig, axs = plt.subplots(3, 1, figsize=figsize)
plt.sca(axs[0])
# make plotting more scalable
if pmf_control.n > 1000 or pmf_variation.n > 1000:
plot_type = 'step'
else:
plot_type = 'bar'
pmf_control.plot(plot_type=plot_type, alpha=.5)
pmf_variation.plot(plot_type=plot_type, alpha=.5)
raise_y(axs[0])
plt.xlim(x_min, x_max)
# plt.gca().get_xaxis().set_ticks([])
# plt.gca().get_yaxis().set_ticks([])
plt.legend()
plt.title("Sample Comparison")
plt.sca(axs[1])
y_min, y_max = plt.ylim()
y_dist = (y_max - y_min) / LABEL_Y_OFFSET_FACTOR
plot_interval(
*results.control.std_err(),
middle=results.control.mean,
y=y_dist,
offset=-0.015,
color=CONTROL_COLOR,
display_text=True,
label=results.control.name
)
plot_interval(
*results.variation.std_err(),
middle=results.variation.mean,
y=-y_dist,
offset=0.005,
color=VARIATION_COLOR,
display_text=True,
label=results.variation.name
)
plt.legend()
plt.gca().get_yaxis().set_ticks([])
plt.title("Proportions +/- Standard Error")
# Differences plot
plt.sca(axs[2])
plt.axvline(0., color=DIFF_COLOR, linestyle='--', linewidth=1.5)
# xs = pdf_diff.xgrid()
if results.inference_procedure.hypothesis == 'larger':
left_bound = results.ci[0][0]
right_bound = np.inf
elif results.inference_procedure.hypothesis == 'smaller':
right_bound = results.ci[0][1]
left_bound = np.inf
else:
left_bound = results.ci[0][0]
right_bound = results.ci[0][1]
plot_interval(
left_bound,
right_bound,
mean_diff,
color=DIFF_COLOR,
display_text=True
)
plt.gca().get_yaxis().set_ticks([])
plt.title(results.comparison_type)
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
def visualize_rates_results(results, figsize=(15, 10), outfile=None, *args, **kwargs):
fig, axs = plt.subplots(3, 1, figsize=figsize)
# Sample Comparison plot
plt.sca(axs[0])
control_pmf = Poisson(
results.control.mean,
color=CONTROL_COLOR,
label=results.control.name
)
variation_pmf = Poisson(
results.variation.mean,
color=VARIATION_COLOR,
label=results.variation.name
)
control_pmf.plot(plot_type='bar', alpha=.5)
variation_pmf.plot(plot_type='bar', alpha=.5)
plt.legend()
plt.title("Sample Comparison")
# Rates +/- standard error plot
plt.sca(axs[1])
y_min, y_max = plt.ylim()
y_dist = (y_max - y_min) / LABEL_Y_OFFSET_FACTOR
plot_interval(
*results.control.std_err(),
middle=results.control.mean,
y=y_dist,
offset=-0.015,
color=CONTROL_COLOR,
display_text=True,
label=results.control.name
)
plot_interval(
*results.variation.std_err(),
middle=results.variation.mean,
y=-y_dist,
offset=0.005,
color=VARIATION_COLOR,
display_text=True,
label=results.variation.name
)
plt.legend()
plt.gca().get_yaxis().set_ticks([])
plt.title("Rates +/- Standard Error")
# Differences plot
plt.sca(axs[2])
plot_interval(
*results.ci[0],
middle=results.delta,
color=DIFF_COLOR,
display_text=True
)
plt.axvline(1., color=DIFF_COLOR, linestyle='--', linewidth=1.5)
plt.gca().get_yaxis().set_ticks([])
plt.title(results.comparison_type)
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
def visualize_bootstrap_results(results, figsize=(15, 10), outfile=None, plot_type='bar', *args, **kwargs):
fig, axs = plt.subplots(3, 1, figsize=figsize)
# Sample Comparison plot
plt.sca(axs[0])
if plot_type == 'bar':
bins = 50 if results.control.nobs >= 100 or results.variation.nobs >= 100 else 20
results.control.hist(bins=bins, color=CONTROL_COLOR, alpha=.5, label=results.control.name)
results.variation.hist(bins=bins, color=VARIATION_COLOR, alpha=.5, label=results.variation.name)
else:
control_pmf = KdePdf(
samples=results.control.data,
color=CONTROL_COLOR,
label=results.control.name
)
variation_pmf = KdePdf(
samples=results.variation.data,
color=VARIATION_COLOR,
label=results.variation.name
)
control_pmf.plot(alpha=.5)
variation_pmf.plot(alpha=.5)
plt.legend()
plt.title("Sample Comparison")
# Bootstrapped statistic +/- HDI
plt.sca(axs[1])
y_min, y_max = plt.ylim()
y_dist = (y_max - y_min) / LABEL_Y_OFFSET_FACTOR
plot_interval(
*results.aux['control'].hdi(),
middle=results.aux['control'].mean,
y=y_dist,
offset=-0.015,
color=CONTROL_COLOR,
display_text=True,
label=results.control.name
)
plot_interval(
*results.aux['variation'].hdi(),
middle=results.aux['variation'].mean,
y=-y_dist,
offset=0.005,
color=VARIATION_COLOR,
display_text=True,
label=results.variation.name
)
plt.legend()
plt.gca().get_yaxis().set_ticks([])
plt.title(f"Bootstrap({results.test_statistic}) +/- 95% HDI")
# Differences plot
plt.sca(axs[2])
plot_interval(
*results.ci[0],
middle=results.delta,
color=DIFF_COLOR,
display_text=True
)
plt.axvline(0., color=DIFF_COLOR, linestyle='--', linewidth=1.5)
plt.gca().get_yaxis().set_ticks([])
plt.title(f"{results.comparison_type}({results.test_statistic})")
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
def visualize_bayesian_results(results, figsize=RESULTS_FIGSIZE, outfile=None, *args, **kwargs):
fig, axs = plt.subplots(3, 1, figsize=figsize)
def get_central_tendency_params(results):
if 'p_control' in results.traces.variables:
return 'p_control', 'p_variation', '$p$ (proportion)'
elif 'mu_control' in results.traces.variables:
return 'mu_control', 'mu_variation', '$\\mu$ (mean)'
elif 'lambda_control' in results.traces.variables:
return 'lambda_control', 'lambda_variation', '$\\lambda$ (rate)'
HDI = 0.95
HDI_PRCT = round(HDI * 100)
plt.sca(axs[0])
ctps = get_central_tendency_params(results)
results.traces.plot(
ctps[0],
label=results.control.name,
color=COLORS.blue,
alpha=.4
)
results.traces.plot(
ctps[1],
label=results.variation.name,
color=COLORS.green,
alpha=.4,
title='Comparison of {}'.format(ctps[2])
)
plt.legend()
lower_y(axs[0])
x_min, x_max = plt.xlim()
plt.sca(axs[1])
y_min, y_max = plt.ylim()
y_dist = (y_max - y_min) / LABEL_Y_OFFSET_FACTOR
results.traces.plot(
ctps[0],
label=results.control.name,
color=COLORS.blue,
hdi=HDI,
include_hist=False,
y=y_dist,
offset=-0.015
)
results.traces.plot(
ctps[1],
label=results.variation.name,
color=COLORS.green,
hdi=HDI,
include_hist=False,
y=-y_dist,
offset=0.005
)
lower_y(axs[1])
plt.xlim([x_min, x_max])
plt.title(f"{ctps[2]} +/- {HDI_PRCT}% HDI")
plt.sca(axs[2])
results.traces.plot(
'delta',
hdi=1 - results.alpha,
ref_val=0.0,
color=COLORS.dark_gray,
title='Differences in {}'.format(ctps[2])
)
lower_y(axs[2])
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
class Traces(object):
"""
Container class for analyzing the results of Bayesian inference procedure.
Parameters
----------
traces: dict
Key-value pairs of parameters:samples, extracted from a Bayesian inference
procedure.
"""
def __init__(self, traces):
self.variables = []
for k, v in list(traces.items()):
if k != "lp__":
self.variables.append(k)
setattr(self, k, Samples(v))
self.summarize()
def summarize(self):
prct = [2.5, 25, 50, 75, 97.5]
values = []
columns = []
for v in self.variables:
trace = getattr(self, v)
_mean = trace.mean
_hdi = trace.hdi()
_std = trace.std
_percentiles = trace.percentiles(prct)
values.append(np.r_[_mean, _hdi, _std, _percentiles])
columns = ['mean', 'hdi_lower', 'hdi_upper', 'std'] + ["{}%".format(p) for p in prct]
self._summary = DataFrame(values, columns=columns, index=self.variables)
@property
def summary(self):
return self._summary
def plot(
self, variable, label=None,
color=None, ref_val=None, alpha=.25,
bins=None, title=None,
hdi=None, outfile=None,
include_hist=True,
offset=5,
y=0.
):
"""
Plot the histogram of a variable trace
Parameters
----------
variable : str
The name of one of self.variables to plot
label : str
Alternative label for the legend
ref_val : float
A reference value location at which to draw a vertical line
alpha : float in [0 1)
The transparency of the histogram. Ignored if `include_hist=False`
bins : int
The number of histogram bins. Ignored if `include_hist=False`
title : str
The title of the plot
hdi : float in [0, 1]
The amount of probability mass within the Highest Density Interval
to display on the histogram.
y : float
The y offset for interval plots. Ignored if `hdi is None`
offset : float
The text offset for interval plots. Ignored if `hdi is None`
outfile : str
The name of an output file to save the figure to.
"""
from matplotlib import pyplot as plt # lazy import
from abra.vis import plot_interval
if (include_hist is False) and (hdi is None):
raise ValueError('include_hist must be True if hdi is None')
if variable not in self.variables:
raise ValueError('Variable `{}` not available'.format(variable))
label = label if label else variable
trace = getattr(self, variable)
if include_hist:
if bins is None:
bins = int(len(trace.data) / 50.)
trace.hist(
color=color,
alpha=alpha,
bins=bins,
ref_val=ref_val,
label=label
)
if hdi is not None: # highest density interval
median = round(trace.percentiles(50), 3)
_hdi = [round(h, 3) for h in trace.hdi(1 - hdi)]
plot_interval(
*_hdi,
middle=median,
display_text=True,
offset=offset,
y=y,
color=color
)
if title is None:
if ref_val is not None:
gt = round(100 * trace.prob_greater_than(ref_val))
title = " {}% < {} = {} < {}%".format(100 - gt, variable, ref_val, gt)
else:
title = ''
plt.title(title, fontsize=14)
if outfile:
plt.savefig(
outfile,
bbox_inches='tight',
dpi=300
)
|
{"hexsha": "8936da7c769ae6d29e3659a964aed726bf10e759", "size": 22475, "ext": "py", "lang": "Python", "max_stars_repo_path": "abra/vis.py", "max_stars_repo_name": "quizlet/abracadabra", "max_stars_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-06-12T16:12:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T12:25:38.000Z", "max_issues_repo_path": "abra/vis.py", "max_issues_repo_name": "quizlet/abracadabra", "max_issues_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-06-12T06:26:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:57:51.000Z", "max_forks_repo_path": "abra/vis.py", "max_forks_repo_name": "quizlet/abracadabra", "max_forks_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-14T12:14:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T15:36:44.000Z", "avg_line_length": 28.1994981179, "max_line_length": 107, "alphanum_fraction": 0.5816240267, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5727}
|
from sympy import Eq, expand, Function, solve, symbols
from devito import t, time, x, y, z, Dimension
from devito.interfaces import DenseData, TimeData, Forward, Backward
from devito.foreign import Operator
from numpy.random import randint
def acoustic_laplacian(v, rho):
# Derive stencil from symbolic equation
if rho is not None:
if isinstance(rho, DenseData):
if len(v.shape[:-1]) == 3:
Lap = (1/rho * v.dx2 - (1/rho)**2 * rho.dx * v.dx +
1/rho * v.dy2 - (1/rho)**2 * rho.dy * v.dy +
1/rho * v.dz2 - (1/rho)**2 * rho.dz * v.dz)
else:
Lap = (1/rho * v.dx2 - (1/rho)**2 * rho.dx * v.dx +
1/rho * v.dy2 - (1/rho)**2 * rho.dy * v.dy)
else:
if len(v.shape[:-1]) == 3:
Lap = (1/rho * v.dx2 +
1/rho * v.dy2 +
1/rho * v.dz2)
else:
Lap = (1/rho * v.dx2 +
1/rho * v.dy2)
else:
Lap = v.laplace
rho = 1
return Lap, rho
def ForwardOperator(model, u, src, rec, data, q, time_order=2, spc_order=6,
save=False, tsave=4.0, free_surface=False, **kwargs):
nt = data.shape[0]
dt = model.critical_dt
s = t.spacing
m, damp, rho = model.m, model.damp, model.rho
Lap, rho = acoustic_laplacian(u, rho)
# Derive stencil from symbolic equation
eqn = m / rho * u.dt2 - Lap + damp * u.dt + q
# stencil = solve(eqn, u.forward)[0]
stencil = solve(eqn, u.forward, rational=False)[0]
# Add substitutions for spacing (temporal and spatial)
subs = dict([(s, dt)] + [(i.spacing, model.get_spacing()[j]) for i, j
in zip(u.indices[1:], range(len(model.shape)))])
stencils = [Eq(u.forward,stencil)]
# Create stencil expressions for operator, source and receivers
ti = u.indices[0]
src_term = src.inject(field=u.forward, offset=model.nbpml,
expr=rho * src * dt**2 / m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=u, offset=model.nbpml)
stencils = stencils + src_term + rec_term
if save:
nsave = int(nt/(tsave/dt) +1)
rate = int(nt/nsave)+1
usave = TimeData(name="usave", shape=model.shape_domain, time_dim=nt,
time_order=2, space_order=spc_order, save=True,
dtype=model.dtype)
stencils += [Eq(usave.subs(usave.indices[0], Function('INT')(time/rate)), u)]
if free_surface:
fs = Dimension(name="fs", size = model.nbpml)
stencils+= [Eq(u.forward.subs({u.indices[-1]: fs}), -u.forward.subs({u.indices[-1] :2*model.nbpml - fs}))]
dse = kwargs.get('dse', 'advanced')
dle = kwargs.get('dle', 'advanced')
op = Operator(stencils, subs=subs, dse=dse, dle=dle,
time_axis=Forward, name="Forward%s" % randint(1e5),
profiler=False, external=True)
return op
def AdjointOperator(model, v, srca, rec, data, time_order=2, spc_order=6,
save=False, free_surface=False, **kwargs):
nt = data.shape[0]
dt = model.critical_dt
s = t.spacing
m, damp, rho = model.m, model.damp, model.rho
# Derive stencil from symbolic equation
Lap, rho = acoustic_laplacian(v, rho)
# Create the stencil by hand instead of calling numpy solve for speed purposes
# Simple linear solve of a u(t+dt) + b u(t) + c u(t-dt) = L for u(t+dt)
eqn = m / rho * v.dt2 - Lap - damp * v.dt
stencil = solve(eqn, v.backward, rational=False)[0]
# Add substitutions for spacing (temporal and spatial)
subs = dict([(s, dt)] + [(i.spacing, model.get_spacing()[j]) for i, j
in zip(v.indices[1:], range(len(model.shape)))])
dse = kwargs.get('dse', 'advanced')
dle = kwargs.get('dle', 'advanced')
# Create stencil expressions for operator, source and receivers
eqn = Eq(v.backward, stencil)
# Construct expression to inject receiver values
ti = v.indices[0]
receivers = rec.inject(field=v.backward, offset=model.nbpml,
expr=rho * rec * dt**2 / m)
# Create interpolation expression for the adjoint-source
source_a = srca.interpolate(expr=v, offset=model.nbpml)
stencils = [eqn] + source_a + receivers
if free_surface:
fs = Dimension(name="fs", size = model.nbpml)
stencils+= [Eq(v.backward.subs({v.indices[-1]: fs}), -v.backward.subs({v.indices[-1]:2*model.nbpml - fs}))]
op = Operator(stencils, subs=subs, dse=dse, dle=dle,
time_axis=Backward, name="Adjoint%s" % randint(1e5),
profiler=False, external=True)
return op
def GradientOperator(model, v, grad, rec, u, data,
time_order=2, spc_order=6, tsave=4.0,
free_surface=False, **kwargs):
"""
Class to setup the gradient operator in an acoustic media
:param model: :class:`Model` object containing the physical parameters
:param src: None ot IShot() (not currently supported properly)
:param data: IShot() object containing the acquisition geometry and field data
:param: recin : receiver data for the adjoint source
:param: time_order: Time discretization order
:param: spc_order: Space discretization order
"""
nt = data.shape[0]
s = t.spacing
dt = model.critical_dt
m, damp, rho = model.m, model.damp, model.rho
Lap, rho = acoustic_laplacian(v, rho)
# Derive stencil from symbolic equation
eqn = m / rho * v.dt2 - Lap - damp * v.dt
stencil = solve(eqn, v.backward, rational=False)[0]
nsave = int(nt/(tsave/dt) +1)
rate = int(nt/nsave)+1
gradient_update = Eq(grad, grad - ((time%(Function('INT')(rate)))<1) * u.subs(u.indices[0], Function('INT')(time/rate))* v.dt2 / rho)
# Add substitutions for spacing (temporal and spatial)
subs = dict([(s, dt)] + [(i.spacing, model.get_spacing()[j]) for i, j
in zip(v.indices[1:], range(len(model.shape)))])
dse = kwargs.get('dse', 'advanced')
dle = kwargs.get('dle', 'advanced')
# Create stencil expressions for operator, source and receivers
eqn = Eq(v.backward, stencil)
# Add expression for receiver injection
ti = v.indices[0]
receivers = rec.inject(field=v.backward, offset=model.nbpml,
expr=rho * rec * dt * dt / m)
stencils = [eqn] + receivers + [gradient_update]
if free_surface:
fs = Dimension(name="fs", size = model.nbpml)
stencils+= [Eq(v.backward.subs({v.indices[-1]: fs}), -v.backward.subs({v.indices[-1]:2*model.nbpml - fs}))]
op = Operator(stencils, subs=subs, dse=dse, dle=dle,
time_axis=Backward, name="Gradient%s" % randint(1e5),
profiler=False, external=True)
return op
def BornOperator(model, u, du, src, Linrec, dm, data,
time_order=2, spc_order=6, save=False,
free_surface=False, **kwargs):
"""
Class to setup the linearized modelling operator in an acoustic media
:param model: :class:`Model` object containing the physical parameters
:param src: None ot IShot() (not currently supported properly)
:param data: IShot() object containing the acquisition geometry and field data
:param: dmin : square slowness perturbation
:param: recin : receiver data for the adjoint source
:param: time_order: Time discretization order
:param: spc_order: Space discretization order
"""
nt = data.shape[0]
s = t.spacing
dt = model.critical_dt
m, damp, rho = model.m, model.damp, model.rho
Lap, rho = acoustic_laplacian(u, rho)
LapU, _ = acoustic_laplacian(du, rho)
# Derive stencils from symbolic equation
first_eqn = m / rho * u.dt2 - Lap + damp * u.dt
first_stencil = solve(first_eqn, u.forward, rational=False)[0]
second_eqn = m / rho * du.dt2 - LapU + damp * du.dt + dm / rho * u.dt2
second_stencil = solve(second_eqn, du.forward, rational=False)[0]
# Add substitutions for spacing (temporal and spatial)
subs = dict([(s, dt)] + [(i.spacing, model.get_spacing()[j]) for i, j
in zip(u.indices[1:], range(len(model.shape)))])
# Add Born-specific updates and resets
dse = kwargs.get('dse', 'advanced')
dle = kwargs.get('dle', 'advanced')
# Create stencil expressions for operator, source and receivers
eqn1 = [Eq(u.forward, first_stencil)]
eqn2 = [Eq(du.forward, second_stencil)]
# Add source term expression for u
ti = u.indices[0]
source = src.inject(field=u.forward, offset=model.nbpml,
expr=rho * src * dt * dt / m)
# Create receiver interpolation expression from U
receivers = Linrec.interpolate(expr=du, offset=model.nbpml)
stencils = eqn1 + source + eqn2 + receivers
if free_surface:
fs = Dimension(name="fs", size = model.nbpml)
stencils+= [Eq(u.forward.subs({u.indices[-1]: fs}), -u.forward.subs({u.indices[-1]:2*model.nbpml - fs}))]
stencils+= [Eq(du.forward.subs({du.indices[-1]: fs}), -du.forward.subs({du.indices[-1]:2*model.nbpml - fs}))]
op = Operator(stencils, subs=subs, dse=dse, dle=dle,
time_axis=Forward, name="Born%s" % randint(1e5),
profiler=False, external=True)
return op
|
{"hexsha": "295cd8f8a7368e6a39761eab40c90b3a0d4c3dd2", "size": 9570, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/operators/acoustic/Jfwi_operators.py", "max_stars_repo_name": "SINBADconsortium/opesciSLIM", "max_stars_repo_head_hexsha": "8c3af2d8e9809fcd4d53fa160c01dbd624b6bb25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-03T04:46:21.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-09T06:36:49.000Z", "max_issues_repo_path": "Python/operators/acoustic/Jfwi_operators.py", "max_issues_repo_name": "SINBADconsortium/opesciSLIM", "max_issues_repo_head_hexsha": "8c3af2d8e9809fcd4d53fa160c01dbd624b6bb25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/operators/acoustic/Jfwi_operators.py", "max_forks_repo_name": "SINBADconsortium/opesciSLIM", "max_forks_repo_head_hexsha": "8c3af2d8e9809fcd4d53fa160c01dbd624b6bb25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.698630137, "max_line_length": 137, "alphanum_fraction": 0.5947753396, "include": true, "reason": "from numpy,from sympy", "num_tokens": 2639}
|
# Working with Sampling Distributions
Most statistical analysis involves working with distributions - usually of sample data.
## Sampling and Sampling Distributions
As we discussed earlier, when working with statistics, we usually base our calculations on a sample and not the full population of data. This means we need to allow for some variation between the sample statistics and the true parameters of the full population.
In the previous example, we knew the probability that a security search would be triggered was 25%, so it's pretty easy to calculate that the expected value for a random variable indicating the number of searches per 100 passengers is 25. What if we hadn't known the probability of a search? How could we estimate the expected mean number of searches for a given number of passengers based purely on sample data collected by observing passengers go through security?
### Creating a Proportion Distribution from a Sample
We know that the each passenger will either be searched or not searched, and we can assign the values ***0*** (for not searched) and ***1*** (for searched) to these outcomes. We can conduct a Bernoulli trial in which we sample 16 passengers and calculate the fraction (or *proportion*) of passengers that were searched (which we'll call ***p***), and the remaining proportion of passengers (which are the ones who weren't searched, and can be calculated as ***1-p***).
Let's say we record the following values for our 16-person sample:
0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0
In this sample, there were 3 searches out of 16 passengers; which as a proportion is <sup>3</sup>/<sub>16</sub> or 0.1875. This is our proportion (or **p**); but because we know that this is based on a sample, we call it **p̂** (or p-hat). The remaining proportion of passengers is 1-p; in this case 1 - 0.1875, which is 0.8125.
The data itself is *qualitative* (categorical) - we're indicating "no search" or "search"; but because we're using numeric values (0 and 1), we can treat these values as numeric and create a binomial distribution from them - it's the simplest form of a binomial distribution - a Bernoulli distribution with two values.
Because we're treating the results as a numberic distribution, we can also calculate statistics like *mean* and *standard deviation*:
To calculate these, you can use the following formulae:
\begin{equation}\mu_{\hat{p}} = \hat{p}\end{equation}
\begin{equation}\sigma_{\hat{p}} = \sqrt{\hat{p}(1-\hat{p})}\end{equation}
The mean is just the value of **p̂**, so in the case of the passenger search sample it is 0.1875.
The standard deviation is calculated as:
\begin{equation}\sigma_{\hat{p}} = \sqrt{0.1875 \times 0.8125} \approx 0.39\end{equation}
We can use Python to plot the sample distribution and calculate the mean and standard deviation of our sample like this:
```python
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
searches = np.array([0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0])
# Set up the graph
plt.xlabel('Search Results')
plt.ylabel('Frequency')
plt.hist(searches)
plt.show()
print('Mean: ' + str(np.mean(searches)))
print('StDev: ' + str(np.std(searches)))
```
When talking about probability, the *mean* is also known as the *expected value*; so based on our single sample of 16 passengers, should we expect the proportion of searched passengers to be 0.1875 (18.75%)?
Well, using a single sample like this can be misleading because the number of searches can vary with each sample. Another person observing 100 passengers may get a (very) different result from you. One way to address this problem is to take multiple samples and combine the resulting means to form a *sampling* distribution. This will help us ensure that the distribution and statistics of our sample data is closer to the true values; even if we can't measure the full population.
### Creating a Sampling Distribution of a Sample Proportion
So, let's collect mulitple 16-passenger samples - here are the resulting sample proportions for 12 samples:
| Sample | Result |
|--------|--------|
| p̂<sub>1</sub>| 0.1875 |
| p̂<sub>2</sub>| 0.2500 |
| p̂<sub>3</sub>| 0.3125 |
| p̂<sub>4</sub>| 0.1875 |
| p̂<sub>5</sub>| 0.1250 |
| p̂<sub>6</sub>| 0.3750 |
| p̂<sub>7</sub>| 0.2500 |
| p̂<sub>8</sub>| 0.1875 |
| p̂<sub>9</sub>| 0.3125 |
| p̂<sub>10</sub>| 0.2500 |
| p̂<sub>11</sub>| 0.2500 |
| p̂<sub>12</sub>| 0.3125 |
We can plot these as a sampling distribution like this:
```python
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
searches = np.array([0.1875,0.25,0.3125,0.1875,0.125,0.375,0.25,0.1875,0.3125,0.25,0.25,0.3125])
# Set up the graph
plt.xlabel('Search Results')
plt.ylabel('Frequency')
plt.hist(searches)
plt.show()
```
#### The Central Limit Theorem
You saw previously with the binomial probability distribution, with a large enough sample size (the *n* value indicating the number of binomial experiments), the distribution of values for a random variable started to form an approximately *normal* curve. This is the effect of the *central limit theorem*, and it applies to any distribution of sample data if the size of the sample is large enough. For our airport passenger data, if we collect a large enough number of samples, each based on a large enough number of passenger observations, the sampling distribution will be approximately normal. The larger the sample size, the closer to a perfect *normal* distribution the data will be, and the less variance around the mean there will be.
Run the cell below to see a simulated distribution created by 10,000 random 100-passenger samples:
```python
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
n, p, s = 100, 0.25, 10000
df = pd.DataFrame(np.random.binomial(n,p,s)/n, columns=['p-hat'])
# Plot the distribution as a histogram
means = df['p-hat']
means.plot.hist(title='Simulated Sampling Distribution')
plt.show()
print ('Mean: ' + str(means.mean()))
print ('Std: ' + str(means.std()))
```
### Mean and Standard Error of a Sampling Distribution of Proportion
The sampling distribution is created from the means of multiple samples, and its mean is therefore the mean of all the sample means. For a distribution of proportion means, this is considered to be the same as **p** (the population mean). In the case of our passenger search samples, this is 0.25.
Because the sampling distribution is based on means, and not totals, its standard deviation is referred to as its *standard error*, and its formula is:
\begin{equation}\sigma_{\hat{p}} = \sqrt{\frac{p(1-p)}{n}}\end{equation}
In this formula, *n* is the size of each sample; and we divide by this to correct for the error introduced by the average values used in the sampling distribution. In this case, our samples were based on observing 16-passengers, so:
\begin{equation}\sigma_{\hat{p}} = \sqrt{\frac{0.25 \times 0.75}{16}} \approx 0.11\end{equation}
In our simulation of 100-passenger samples, the mean remains 0.25. The standard error is:
\begin{equation}\sigma_{\hat{p}} = \sqrt{\frac{0.25 \times 0.75}{100}} \approx 0.043\end{equation}
Note that the effect of the central limit theorem is that as you increase the number and/or size of samples, the mean remains constant but the amount of variance around it is reduced.
Being able to calculate the mean (or *expected value*) and standard error is useful, because we can apply these to what we know about an approximately normal distribution to estimate probabilities for particular values. For example, we know that in a normal distribution, around 95.4% of the values are within two standard deviations of the mean. If we apply that to our sampling distribution of ten thousand 100-passenger samples, we can determine that the proportion of searched passengers in 95.4% of the samples was between 0.164 (16.4%) and 0.336 (36.6%).
How do we know this?
We know that the mean is ***0.25*** and the standard error (which is the same thing as the standard deviation for our sampling distribution) is ***0.043***. We also know that because this is a *normal* distribution, ***95.4%*** of the data lies within two standard deviations (so 2 x 0.043) of the mean, so the value for 95.4% of our samples is 0.25 ± (*plus or minus*) 0.086.
The *plus or minus* value is known as the *margin of error*, and the range of values within it is known as a *confidence interval* - we'll look at these in more detail later. For now, run the following cell to see a visualization of this interval:
```python
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
n, p, s = 100, 0.25, 10000
df = pd.DataFrame(np.random.binomial(n,p,s)/n, columns=['p-hat'])
# Plot the distribution as a histogram
means = df['p-hat']
m = means.mean()
sd = means.std()
moe1 = m - (sd * 2)
moe2 = m + (sd * 2)
means.plot.hist(title='Simulated Sampling Distribution')
plt.axvline(m, color='red', linestyle='dashed', linewidth=2)
plt.axvline(moe1, color='magenta', linestyle='dashed', linewidth=2)
plt.axvline(moe2, color='magenta', linestyle='dashed', linewidth=2)
plt.show()
```
### Creating a Sampling Distribution of Sample Means
In the previous example, we created a sampling distribution of proportions; which is a suitable way to handle discrete values, like the number of passengers searched or not searched. When you need to work with continuous data, you use slightly different formulae to work with the sampling distribution.
For example, suppose we want to examine the weight of the hand luggage carried by each passenger. It's impractical to weigh every bag that is carried through security, but we could weigh one or more samples, for say, 5 passengers at a time, on twelve occassions. We might end up with some data like this:
| Sample | Weights |
|--------|---------|
| 1 | [4.020992,2.143457,2.260409,2.339641,4.699211] |
| 2 | [3.38532,4.438345,3.170228,3.499913,4.489557] |
| 3 | [3.338228,1.825221,3.53633,3.507952,2.698669] |
| 4 | [2.992756,3.292431,3.38148,3.479455,3.051273] |
| 5 | [2.969977,3.869029,4.149342,2.785682,3.03557] |
| 6 | [3.138055,2.535442,3.530052,3.029846,2.881217] |
| 7 | [1.596558,1.486385,3.122378,3.684084,3.501813] |
| 8 | [2.997384,3.818661,3.118434,3.455269,3.026508] |
| 9 | [4.078268,2.283018,3.606384,4.555053,3.344701] |
| 10 | [2.532509,3.064274,3.32908,2.981303,3.915995] |
| 11 | [4.078268,2.283018,3.606384,4.555053,3.344701] |
| 12 | [2.532509,3.064274,3.32908,2.981303,3.915995] |
Just as we did before, we could take the mean of each of these samples and combine them to form a sampling distribution of the sample means (which we'll call **<span style="text-decoration: overline;">X</span>**, and which will contain a mean for each sample, which we'll label x̄<sub>n</sub>):
| Sample | Mean Weight |
|--------|---------|
| x̄<sub>1</sub> | 3.092742 |
| x̄<sub>2</sub> | 3.7966726 |
| x̄<sub>3</sub> | 2.98128 |
| x̄<sub>4</sub> | 3.239479 |
| x̄<sub>5</sub> | 3.36192 |
| x̄<sub>6</sub> | 3.0229224 |
| x̄<sub>7</sub> | 2.6782436 |
| x̄<sub>8</sub> | 3.2832512 |
| x̄<sub>9</sub> | 3.5734848 |
| x̄<sub>10</sub> | 3.1646322 |
| x̄<sub>11</sub> | 3.5734848 |
| x̄<sub>12</sub> | 3.1646322 |
We can plot the distribution for the sampling distribution like this:
```python
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
meanweights = np.array([3.092742,
3.7966726,
2.98128,
3.239479,
3.36192,
3.0229224,
2.6782436,
3.2832512,
3.5734848,
3.1646322,
3.5734848,
3.1646322])
# Set up the graph
plt.xlabel('Mean Weights')
plt.ylabel('Frequency')
plt.hist(meanweights, bins=6)
plt.show()
print('Mean: ' + str(meanweights.mean()))
print('Std: ' + str(meanweights.std()))
```
Just as before, as we increase the sample size, the central limit theorem ensures that our sampling distribution starts to approximate a normal distribution. Our current distribution is based on the means generated from twelve samples, each containing 5 weight observations. Run the following code to see a distribution created from a simulation of 10,000 samples each containing weights for 500 passengers:
>This may take a few minutes to run. The code is not the most efficient way to generate a sample distribution, but it reflects the principle that our sampling distribution is made up of the means from multiple samples. In reality, you could simulate the sampling by just creating a single sample from the ***random.normal*** function with a larger ***n*** value.
```python
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
mu, sigma, n = 3.2, 1.2, 500
samples = list(range(0, 10000))
# data will hold all of the sample data
data = np.array([])
# sampling will hold the means of the samples
sampling = np.array([])
# Perform 10,000 samples
for s in samples:
# In each sample, get 500 data points from a normal distribution
sample = np.random.normal(mu, sigma, n)
data = np.append(data,sample)
sampling = np.append(sampling,sample.mean())
# Create a dataframe with the sampling of means
df = pd.DataFrame(sampling, columns=['mean'])
# Plot the distribution as a histogram
means = df['mean']
means.plot.hist(title='Simulated Sampling Distribution', bins=100)
plt.show()
# Print the Mean and StdDev for the full sample and for the sampling distribution
print('Sample Mean: ' + str(data.mean()))
print('Sample StdDev: ' + str(data.std()))
print ('Sampling Mean: ' + str(means.mean()))
print ('Sampling StdErr: ' + str(means.std()))
```
### Mean and Variance of the Sampling Distribution
The following variables are printed beneath the histogram:
- **Sample Mean**: This is the mean for the complete set of sample data - all 10,000 x 500 bag weights.
- **Sample StdDev**: This is the standard deviation for the complete set of sample data - all 10,000 x 500 bag weights.
- **Sampling Mean**: This is the mean for the sampling distribution - the means of the means!
- **Sampling StdErr**: This is the standard deviation (or *standard error*) for the sampling distribution
If we assume that **X** is a random variable representing every possible bag weight, then its mean (indicated as **μ<sub>x</sub>**) is the population mean (**μ**). The mean of the **<span style="text-decoration: overline;">X</span>** sampling distribution (which is indicated as **μ<sub>x̄</sub>**) is considered to have the same value. Or, as an equation:
\begin{equation}\mu_{x} = \mu_{\bar{x}}\end{equation}
In this case, the full population mean is unknown (unless we weigh every bag in the world!), but we do have the mean of the full set of sample observations we collected (**x̄**), and if we check the values generated by Python for the sample mean and the sampling mean, they're more or less the same: around 3.2.
To find the standard deviation of the sample mean, which is technically the *standard error*, we can use this formula:
\begin{equation}\sigma_{\bar{x}} = \frac{\sigma}{\sqrt{n}}\end{equation}
In this formula, ***σ*** is the population standard deviation and ***n*** is the size of each sample.
Since our the population standard deviation is unknown, we can use the full sample standard deviation instead:
\begin{equation}SE_{\bar{x}} \approx \frac{s}{\sqrt{n}}\end{equation}
In this case, the standard deviation of our set of sample data is around 1.2, and we have used 500 variables in each sample to calculate our sample means, so:
\begin{equation}SE_{\bar{x}} \approx \frac{1.2}{\sqrt{500}} = \frac{1.2}{22.36} \approx 0.053\end{equation}
## Confidence Intervals
A confidence interval is a range of values around a sample statistic within which we are confident that the true parameter lies. For example, our bag weight sampling distribution is based on samples of the weights of bags carried by passengers through our airport security line. We know that the mean weight (the *expected value* for the weight of a bag) in our sampling distribution is 3.2, and we assume this is also the population mean for all bags; but how confident can we be that the true mean weight of all carry-on bags is close to the value?
Let's start to put some precision onto these terms. We could state the question another way. What's the range of weights within which are confident that the mean weight of a carry-on bag will be 95% of the time? To calculate this, we need to determine the range of values within which the population mean weight is likely to be in 95% of samples. This is known as a *confidence interval*; and it's based on the Z-scores inherent in a normal distribution.
Confidence intervals are expressed as a sample statistic ± (*plus or minus*) a margin of error. To calculate the margin of error, you need to determine the confidence level you want to find (for example, 95%), and determine the Z score that marks the threshold above or below which the values that are *not* within the chosen interval reside. For example, to calculate a 95% confidence interval, you need the critical Z scores that exclude 5% of the values under the curve; with 2.5% of them being lower than the values in the confidence interval range, and 2.5% being higher. In a normal distribution, 95% of the area under the curve is between a Z score of ± 1.96. The following table shows the critical Z values for some other popular confidence interval ranges:
| Confidence | Z Score |
|-------------|---------|
| 90% | 1.645 |
| 95% | 1.96 |
| 99% | 2.576 |
To calculate a confidence interval around a sample statistic, we simply calculate the *standard error* for that statistic as described previously, and multiply this by the approriate Z score for the confidence interval we want.
To calculate the 95% confidence interval margin of error for our bag weights, we multiply our standard error of 0.053 by the Z score for a 95% confidence level, which is 1.96:
\begin{equation}MoE = 0.053 \times 1.96 = 0.10388 \end{equation}
So we can say that we're confident that the population mean weight is in the range of the sample mean ± 0.10388 with 95% confidence. Thanks to the central limit theorem, if we used an even bigger sample size, the confidence interval would become smaller as the amount of variance in the distribution is reduced. If the number of samples were infinite, the standard error would be 0 and the confidence interval would become a certain value that reflects the true mean weight for all carry-on bags:
\begin{equation}\lim_{n \to \infty} \frac{\sigma}{\sqrt{n}} = 0\end{equation}
In Python, you can use the *scipy.stats.**norm.interval*** function to calculate a confidence interval for a normal distribution. Run the following code to recreate the sampling distribution for bag searches with the same parameters, and display the 95% confidence interval for the mean (again, this may take some time to run):
```python
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
mu, sigma, n = 3.2, 1.2, 500
samples = list(range(0, 10000))
# data will hold all of the sample data
data = np.array([])
# sampling will hold the means of the samples
sampling = np.array([])
# Perform 10,000 samples
for s in samples:
# In each sample, get 500 data points from a normal distribution
sample = np.random.normal(mu, sigma, n)
data = np.append(data,sample)
sampling = np.append(sampling,sample.mean())
# Create a dataframe with the sampling of means
df = pd.DataFrame(sampling, columns=['mean'])
# Get the Mean, StdDev, and 95% CI of the means
means = df['mean']
m = means.mean()
sd = means.std()
ci = stats.norm.interval(0.95, m, sd)
# Plot the distribution, mean, and CI
means.plot.hist(title='Simulated Sampling Distribution', bins=100)
plt.axvline(m, color='red', linestyle='dashed', linewidth=2)
plt.axvline(ci[0], color='magenta', linestyle='dashed', linewidth=2)
plt.axvline(ci[1], color='magenta', linestyle='dashed', linewidth=2)
plt.show()
# Print the Mean, StdDev and 95% CI
print ('Sampling Mean: ' + str(m))
print ('Sampling StdErr: ' + str(sd))
print ('95% Confidence Interval: ' + str(ci))
```
```python
```
|
{"hexsha": "23e6479da4671c3e54019de692ca2505d18378d2", "size": 91264, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Statistics and Probability by Hiren/04-05-Sampling Distributions.ipynb", "max_stars_repo_name": "serkin/Basic-Mathematics-for-Machine-Learning", "max_stars_repo_head_hexsha": "ac0ae9fad82a9f0429c93e3da744af6e6d63e5ab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Statistics and Probability by Hiren/04-05-Sampling Distributions.ipynb", "max_issues_repo_name": "serkin/Basic-Mathematics-for-Machine-Learning", "max_issues_repo_head_hexsha": "ac0ae9fad82a9f0429c93e3da744af6e6d63e5ab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Statistics and Probability by Hiren/04-05-Sampling Distributions.ipynb", "max_forks_repo_name": "serkin/Basic-Mathematics-for-Machine-Learning", "max_forks_repo_head_hexsha": "ac0ae9fad82a9f0429c93e3da744af6e6d63e5ab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 148.88091354, "max_line_length": 11508, "alphanum_fraction": 0.8596818022, "converted": true, "num_tokens": 5524}
|
#!/usr/bin/env python
# /***************************************************************************
#
# @package: panda_siimulator_examples
# @metapackage: panda_simulator
# @author: Saif Sidhik <sxs1412@bham.ac.uk>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, Saif Sidhik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
"""
This is a demo showing task-space control on the
simulator robot using the ROS topics and messages directly
from panda_simulator. The task-space force for the desired
pose is computed using a simple PD law, and the corresponding
joint torques are computed and sent to the robot.
By using this file you can set a equilibrium pose by using interactive marker. You can also set the target
By publishing the topic "panda_simulator/equili_pose" .
"""
import copy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point, TransformStamped,PoseStamped
from visualization_msgs.msg import *
from interactive_markers.interactive_marker_server import *
from franka_core_msgs.msg import EndPointState, JointCommand, RobotState
# -- add to pythonpath for finding rviz_markers.py
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# -------------------------------------------------
from multi_rviz_markers import RvizMarkers
# --------- Modify as required ------------
# Task-space controller parameters
# stiffness gains
P_pos = 50
P_ori = 0
# damping gains
D_pos = 1
D_ori = 0
# -----------------------------------------
publish_rate = 100
JACOBIAN = None
CARTESIAN_POSE = None
CARTESIAN_VEL = None
destination_marker = RvizMarkers()
def _on_robot_state(msg):
"""
Callback function for updating jacobian and EE velocity from robot state
"""
global JACOBIAN, CARTESIAN_VEL
JACOBIAN = np.asarray(msg.O_Jac_EE).reshape(6,7,order = 'F')
CARTESIAN_VEL = {
'linear': np.asarray([msg.O_dP_EE[0], msg.O_dP_EE[1], msg.O_dP_EE[2]]),
'angular': np.asarray([msg.O_dP_EE[3], msg.O_dP_EE[4], msg.O_dP_EE[5]]) }
def _on_endpoint_state(msg):
"""
Callback function to get current end-point state
"""
# pose message received is a vectorised column major transformation matrix
global CARTESIAN_POSE
cart_pose_trans_mat = np.asarray(msg.O_T_EE).reshape(4,4,order='F')
CARTESIAN_POSE = {
'position': cart_pose_trans_mat[:3,3],
'orientation': quaternion.from_rotation_matrix(cart_pose_trans_mat[:3,:3]) }
def quatdiff_in_euler(quat_curr, quat_des):
"""
Compute difference between quaternions and return
Euler angles as difference
"""
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
def control_thread(rate):
"""
Actual control loop. Uses goal pose from the feedback thread
and current robot states from the subscribed messages to compute
task-space force, and then the corresponding joint torques.
"""
while not rospy.is_shutdown():
error = 100.
while error > 0.005:
curr_pose = copy.deepcopy(CARTESIAN_POSE)
curr_pos, curr_ori = curr_pose['position'],curr_pose['orientation']
curr_vel = (CARTESIAN_VEL['linear']).reshape([3,1])
curr_omg = CARTESIAN_VEL['angular'].reshape([3,1])
delta_pos = (goal_pos - curr_pos).reshape([3,1])
delta_ori = quatdiff_in_euler(curr_ori, goal_ori).reshape([3,1])
# Desired task-space force using PD law
F = np.vstack([P_pos*(delta_pos), P_ori*(delta_ori)]) + \
np.vstack([D_pos*(curr_vel), D_ori*(curr_omg)])
error = np.linalg.norm(delta_pos) + np.linalg.norm(delta_ori)
J = copy.deepcopy(JACOBIAN)
# joint torques to be commanded
tau = np.dot(J.T,F)
# publish joint commands
command_msg.effort = tau.flatten()
joint_command_publisher.publish(command_msg)
rate.sleep()
def process_feedback(feedback):
"""
InteractiveMarker callback function. Update target pose.
"""
global goal_pos, goal_ori
'''
if feedback.event_type == InteractiveMarkerFeedback.MOUSE_UP:
'''
p = feedback.pose.position
q = feedback.pose.orientation
goal_pos = np.array([p.x,p.y,p.z])
goal_ori = np.quaternion(q.w, q.x,q.y,q.z)
def _on_shutdown():
"""
Clean shutdown controller thread when rosnode dies.
"""
global ctrl_thread, cartesian_state_sub, \
robot_state_sub, joint_command_publisher
if ctrl_thread.is_alive():
ctrl_thread.join()
robot_state_sub.unregister()
cartesian_state_sub.unregister()
joint_command_publisher.unregister()
if __name__ == "__main__":
# global goal_pos, goal_ori, ctrl_thread
rospy.init_node("ts_control_sim_only")
# if not using franka_ros_interface, you have to subscribe to the right topics
# to obtain the current end-effector state and robot jacobian for computing
# commands
cartesian_state_sub = rospy.Subscriber(
'panda_simulator/custom_franka_state_controller/tip_state',
EndPointState,
_on_endpoint_state,
queue_size=1,
tcp_nodelay=True)
robot_state_sub = rospy.Subscriber(
'panda_simulator/custom_franka_state_controller/robot_state',
RobotState,
_on_robot_state,
queue_size=1,
tcp_nodelay=True)
# create joint command message and fix its type to joint torque mode
command_msg = JointCommand()
command_msg.names = ['panda_joint1','panda_joint2','panda_joint3',\
'panda_joint4','panda_joint5','panda_joint6','panda_joint7']
command_msg.mode = JointCommand.TORQUE_MODE
# Also create a publisher to publish joint commands
joint_command_publisher = rospy.Publisher(
'panda_simulator/motion_controller/arm/joint_commands',
JointCommand,
tcp_nodelay=True,
queue_size=1)
# wait for messages to be populated before proceeding
rospy.loginfo("Subscribing to robot state topics...")
while (True):
if not (JACOBIAN is None or CARTESIAN_POSE is None):
break
rospy.loginfo("Recieved messages; Starting Demo.")
pose = copy.deepcopy(CARTESIAN_POSE)
start_pos, start_ori = pose['position'],pose['orientation']
goal_pos, goal_ori = start_pos, start_ori # set goal pose a starting pose in the beginning
# start controller thread
rospy.on_shutdown(_on_shutdown)
rate = rospy.Rate(publish_rate)
ctrl_thread = threading.Thread(target=control_thread, args = [rate])
ctrl_thread.start()
# ------------------------------------------------------------------------------------
end_target_sub = rospy.Subscriber("panda_simulator/equili_pose",PoseStamped,process_feedback,queue_size=1)
server = InteractiveMarkerServer("basic_control")
position = Point( start_pos[0], start_pos[1], start_pos[2])
marker = destination_marker.makeMarker( False, InteractiveMarkerControl.MOVE_ROTATE_3D, \
position, quaternion.as_float_array(start_ori), True)
server.insert(marker, process_feedback)
server.applyChanges()
rospy.spin()
# ------------------------------------------------------------------------------------
|
{"hexsha": "2981bd8ba691abca1967085c830d265aca3f97de", "size": 8415, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/simu/scripts/task_space_control.py", "max_stars_repo_name": "Grossbier/simulation_multirobots", "max_stars_repo_head_hexsha": "1fe00bf81932ad6de20709ad85f677f4cf196333", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/simu/scripts/task_space_control.py", "max_issues_repo_name": "Grossbier/simulation_multirobots", "max_issues_repo_head_hexsha": "1fe00bf81932ad6de20709ad85f677f4cf196333", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/simu/scripts/task_space_control.py", "max_forks_repo_name": "Grossbier/simulation_multirobots", "max_forks_repo_head_hexsha": "1fe00bf81932ad6de20709ad85f677f4cf196333", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-04T09:16:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T09:16:28.000Z", "avg_line_length": 35.8085106383, "max_line_length": 111, "alphanum_fraction": 0.6411170529, "include": true, "reason": "import numpy", "num_tokens": 1897}
|
import numpy as np
"""
Implementation of the non-separable blending modes as described in
https://www.w3.org/TR/compositing-1/#blendingnonseparable
"""
"""
four non-separable utility functions as described on the aforementioned page
Lum(C) = 0.3 x Cred + 0.59 x Cgreen + 0.11 x Cblue
ClipColor(C)
L = Lum(C)
n = min(Cred, Cgreen, Cblue)
x = max(Cred, Cgreen, Cblue)
if(n < 0)
C = L + (((C - L) * L) / (L - n))
if(x > 1)
C = L + (((C - L) * (1 - L)) / (x - L))
return C
SetLum(C, l)
d = l - Lum(C)
Cred = Cred + d
Cgreen = Cgreen + d
Cblue = Cblue + d
return ClipColor(C)
Sat(C) = max(Cred, Cgreen, Cblue) - min(Cred, Cgreen, Cblue)
"""
def _lum(_c):
"""
:param c: x by x by 3 matrix of rgb color components of pixels
:return: x by x by 3 matrix of luminosity of pixels
"""
return (_c[:, :, 0] * 0.299) + (_c[:, :, 1] * 0.587) + (_c[:, :, 2] * 0.114)
def _setLum(c_orig, l):
_c = c_orig.copy()
_l = _lum(_c)
d = l - _l
_c[:, :, 0] += d
_c[:, :, 1] += d
_c[:, :, 2] += d
_l = _lum(_c)
_n = np.min(_c, axis=2)
_x = np.max(_c, axis=2)
for i in range(_c.shape[0]):
for j in range(_c.shape[1]):
c = _c[i][j]
l = _l[i, j]
n = _n[i, j]
x = _x[i, j]
if n < 0:
_c[i][j] = l + (((c - l) * l) / (l - n))
if x > 1:
_c[i][j] = l + (((c - l) * (1 - l)) / (x - l))
return _c
def _sat(_c):
"""
:param c: x by x by 3 matrix of rgb color components of pixels
:return: int of saturation of pixels
"""
return np.max(_c, axis=2) - np.min(_c, axis=2)
# def _setSatKern(c):
# max_i = np.argmax(c)
# min_i = np.argmin(c)
# if max_i != 2 and min_i != 2:
# mid_i = 2
# elif max_i != 1 and min_i != 1:
# mid_i = 1
# else:
# mid_i = 0
#
# if c[max_i] > c[min_i]:
# c[mid_i] = (((c[mid_i] - c[min_i]) * s) / (c[max_i] - c[min_i]))
# c[max_i] = s
# else:
# c[mid_i] = 0
# c[max_i] = 0
# c[min_i] = 0
# return c
#setSatKern = np.vectorize(_setSatKern)
def _setSat(c_orig, s):
"""
Set a new saturation value for the matrix of color
The current implementation cannot be vectorized in an efficient manner, so it is very slow,
O(m*n) at least. This might be able to be improved with openCL if that is the direction that the lib takes.
:param c: x by x by 3 matrix of rgb color components of pixels
:param s: int of the new saturation value for the matrix
:return: x by x by 3 matrix of luminosity of pixels
"""
_c = c_orig.copy()
for i in range(_c.shape[0]):
for j in range(_c.shape[1]):
c = _c[i][j]
min_i = 0
mid_i = 1
max_i = 2
if c[mid_i] < c[min_i]:
min_i, mid_i = mid_i, min_i
if c[max_i] < c[mid_i]:
mid_i, max_i = max_i, mid_i
if c[mid_i] < c[min_i]:
min_i, mid_i = mid_i, min_i
if c[max_i] - c[min_i] > 0.0:
_c[i][j][mid_i] = (((c[mid_i] - c[min_i]) * s[i, j]) / (c[max_i] - c[min_i]))
_c[i][j][max_i] = s[i, j]
else:
_c[i][j][mid_i] = 0
_c[i][j][max_i] = 0
_c[i][j][min_i] = 0
return _c
import math
#
# def _general_blend(source, destination, offsets, blend_func):
# """
# This function is slightly different than the one in the blend module, because the inside functions do not use the
# alpha channel.
# """
#
# source = reshape_dest(source, destination, offsets)
#
# destination_norm = destination / 255.0
# source_norm = source / 255.0
#
# Cb = destination_norm[:, :, :3]
# Cs = source_norm[:, :, :3]
#
# comp = blend_func(Cs, Cb)
#
# # new algo, we apply the blend_func everywhere, except where dest does not exist, at all
# # (where it does not exist, we just put src)
# idxs = destination_norm[:, :, 3] == 0
#
# ratio_rs = np.reshape(np.repeat(idxs, 3), [comp.shape[0], comp.shape[1], comp.shape[2]])
# img_out = comp + (source_norm[:, :, :3] * ratio_rs)
# img_out = np.nan_to_num(np.dstack((img_out, source_norm[:, :, 3]))) # add alpha channel and replace nans
#
# return img_out * 255.0
# def _colourCompositingFormula(_as, ab, ar, Cs, Cb, Bbs):
# return (1 - (_as / ar)) * Cb + (_as / ar) * math.floor((1 - ab) * Cs + ab * Bbs)
def hue(lower_rgb, upper_rgb):
"""
Creates a color with the hue of the lower_rgb color and the saturation and luminosity of the backdrop color.
"""
return _setLum(_setSat(upper_rgb, _sat(lower_rgb)), _lum(lower_rgb))
def saturation(lower_rgb, upper_rgb):
"""
Creates a color with the saturation of the lower_rgb color and the hue and luminosity of the backdrop color.
"""
return _setLum(_setSat(lower_rgb, _sat(upper_rgb)), _lum(lower_rgb))
def color(lower_rgb, upper_rgb):
"""
Creates a color with the hue and saturation of the lower_rgb color and the luminosity of the backdrop color.
"""
return _setLum(upper_rgb, _lum(lower_rgb))
def luminosity(lower_rgb, upper_rgb):
"""
Creates a color with the luminosity of the lower_rgb color and the hue and saturation of the backdrop color.
"""
return _setLum(lower_rgb, _lum(upper_rgb))
|
{"hexsha": "7e5dc5bf042ce7414afd9f8cfd32e63a6f7a4d16", "size": 5574, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyora/BlendNonSep.py", "max_stars_repo_name": "FredHappyface/pyora-mirror", "max_stars_repo_head_hexsha": "23d90239183f18be40d1bc47ac89fa4259996cee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyora/BlendNonSep.py", "max_issues_repo_name": "FredHappyface/pyora-mirror", "max_issues_repo_head_hexsha": "23d90239183f18be40d1bc47ac89fa4259996cee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyora/BlendNonSep.py", "max_forks_repo_name": "FredHappyface/pyora-mirror", "max_forks_repo_head_hexsha": "23d90239183f18be40d1bc47ac89fa4259996cee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9275362319, "max_line_length": 119, "alphanum_fraction": 0.5398277718, "include": true, "reason": "import numpy", "num_tokens": 1766}
|
import os
import matplotlib.pyplot as plt
import numpy as np
import visdom
from tensorboardX import SummaryWriter
TENSORBOARD_DIR = 'tensorboard/runs/'
class Plotter:
def on_new_point(self, label, x, y):
pass
def on_finish(self):
pass
class MatplotlibPlotter(Plotter):
def __init__(self, title):
super(MatplotlibPlotter, self).__init__()
self.title = title
self.plots = {}
def on_new_point(self, label, x, y):
if label not in self.plots:
self.plots[label] = PlotData()
self.plots[label].x.append(x)
self.plots[label].y.append(y)
def on_finish(self):
for label in self.plots:
plt.plot(self.plots[label].x, self.plots[label].y, label=label)
plt.title(self.title)
plt.legend()
plt.show()
class VisdomPlotter(Plotter):
def __init__(self, title, plots):
super(VisdomPlotter, self).__init__()
self.title = title
self.vis = visdom.Visdom()
self.plots = set(plots)
self.vis.line(
X=np.zeros((1, len(plots))),
Y=np.zeros((1, len(plots))),
win=self.title,
opts=dict(legend=plots)
)
def on_new_point(self, label, x, y):
if label not in self.plots:
raise Exception('Plot should be in plots set!')
self.vis.line(
X=np.array([x]),
Y=np.array([y]),
win=self.title,
name=label,
update='append'
)
class TensorboardPlotter(Plotter):
def __init__(self, title):
path = os.path.join(os.getcwd(), TENSORBOARD_DIR + title)
self.writer = SummaryWriter(path)
def on_new_point(self, label, x, y):
self.writer.add_scalar(
tag=label,
scalar_value=y,
global_step=x
)
class TensorboardPlotterCombined(Plotter):
"""x is step, y is two values: one for for non terminals, one for terminals."""
def __init__(self, title):
path = os.path.join(os.getcwd(), TENSORBOARD_DIR + title)
self.writer = SummaryWriter(path)
def on_new_point(self, label, x, y):
self.writer.add_scalar(
tag=label + ' non-terminals',
scalar_value=y[0],
global_step=x
)
self.writer.add_scalar(
tag=label + ' terminals',
scalar_value=y[1],
global_step=x
)
class PlotData:
def __init__(self):
self.x = []
self.y = []
def add(self, x, y):
self.x.append(x)
self.y.append(y)
if __name__ == '__main__':
plotter = VisdomPlotter(title='x', plots=['y', 'z'])
|
{"hexsha": "7acccb65d0dca05bbb16fdcd03d18899e66d73dc", "size": 2706, "ext": "py", "lang": "Python", "max_stars_repo_path": "zerogercrnn/lib/visualization/plotter.py", "max_stars_repo_name": "zerogerc/rnn-autocomplete", "max_stars_repo_head_hexsha": "39dc8dd7c431cb8ac9e15016388ec823771388e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-02-27T09:48:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T19:01:01.000Z", "max_issues_repo_path": "zerogercrnn/lib/visualization/plotter.py", "max_issues_repo_name": "ZeRoGerc/rnn-autocomplete", "max_issues_repo_head_hexsha": "39dc8dd7c431cb8ac9e15016388ec823771388e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zerogercrnn/lib/visualization/plotter.py", "max_forks_repo_name": "ZeRoGerc/rnn-autocomplete", "max_forks_repo_head_hexsha": "39dc8dd7c431cb8ac9e15016388ec823771388e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9469026549, "max_line_length": 83, "alphanum_fraction": 0.5668883962, "include": true, "reason": "import numpy", "num_tokens": 654}
|
import numpy as np
try:
import keplertools.Cyeccanom
haveCyeccanom = True
except ImportError:
haveCyeccanom = False
pass
def eccanom(M, e, epsmult=4.01, maxIter=100, returnIter=False, noc=False):
"""Finds eccentric anomaly from mean anomaly and eccentricity
This method uses Newton-Raphson iteration to find the eccentric
anomaly from mean anomaly and eccentricity, assuming a closed (0<e<1)
orbit.
Args:
M (float or ndarray):
mean anomaly (rad)
e (float or ndarray):
eccentricity (eccentricity may be a scalar if M is given as
an array, but otherwise must match the size of M.)
epsmult (float):
Precision of convergence (multiplied by precision of floating data type).
Optional, defaults to 4.01.
maxiter (int):
Maximum numbr of iterations. Optional, defaults to 100.
returnIter (bool):
Return number of iterations (defaults false, only available in python version)
noc (bool):
Don't use C version even if it can be loaded.
Returns:
tuple:
E (float or ndarray):
eccentric anomaly (rad)
numIter (int):
Number of iterations (returned only if returnIter=True)
Notes:
If either M or e are scalar, and the other input is an array, the scalar input
will be expanded to the same size array as the other input. So, a scalar M
and array e will result in the calculation of the eccentric anomaly for one
mean anomaly at a variety of eccentricities, and a scalar e and array M input
will result in the calculation of eccentric anomalies for one eccentricity at
a variety of mean anomalies. If both inputs are arrays then they are matched
element by element.
"""
noc = noc and haveCyeccanom
# make sure M and e are of the correct format.
# if either is scalar, expand to match sizes
M = np.array(M, ndmin=1).astype(float).flatten()
e = np.array(e, ndmin=1).astype(float).flatten()
if e.size != M.size:
if e.size == 1:
e = np.array([e[0]] * len(M))
if M.size == 1:
M = np.array([M[0]] * len(e))
assert e.shape == M.shape, "Incompatible inputs."
assert np.all((e >= 0) & (e < 1)), "e defined outside [0,1)"
# force M into [0, 2*pi)
M = np.mod(M, 2 * np.pi)
if noc:
# initial values for E
E = M / (1 - e)
mask = e * E ** 2 > 6 * (1 - e)
E[mask] = (6 * M[mask] / e[mask]) ** (1.0 / 3.0)
# Newton-Raphson setup
tolerance = np.finfo(float).eps * epsmult
numIter = 0
err = 1.0
while err > tolerance and numIter < maxIter:
E = E - (M - E + e * np.sin(E)) / (e * np.cos(E) - 1)
err = np.max(abs(M - (E - e * np.sin(E))))
numIter += 1
if numIter == maxIter:
raise Exception("eccanom failed to converge. Final error of %e" % err)
else:
E = keplertools.Cyeccanom.Cyeccanom(M, e, epsmult, maxIter)
returnIter = False
if returnIter:
return E, numIter
else:
return E
def trueanom(E, e):
"""Finds true anomaly from eccentric anomaly and eccentricity
The implemented method corresponds to Eq. 6.28 in Green assuming a closed
(0<e<1) orbit.
Args:
E (float or ndarray):
eccentric anomaly (rad)
e (float or ndarray):
eccentricity (eccentricity may be a scalar if M is given as
an array, but otherwise must match the size of M.)
Returns:
ndarray:
true anomaly (rad)
Notes:
If either E or e are scalar, and the other input is an array, the scalar
input will be expanded to the same size array as the other input.
"""
E = np.array(E, ndmin=1).astype(float).flatten()
e = np.array(e, ndmin=1).astype(float).flatten()
if e.size != E.size:
if e.size == 1:
e = np.array([e[0]] * len(E))
if E.size == 1:
E = np.array([E[0]] * len(e))
assert e.shape == E.shape, "Incompatible inputs."
assert np.all((e >= 0) & (e < 1)), "e defined outside [0,1)"
nu = 2.0 * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(E / 2.0))
nu[nu < 0] += 2 * np.pi
return nu
def vec2orbElem2(rs, vs, mus):
"""Convert position and velocity vectors to Keplerian orbital elements
Implements the algorithm from Vallado
Args:
rs (ndarray):
3n x 1 stacked initial position vectors:
[r1(1);r1(2);r1(3);r2(1);r2(2)r2(3);...;rn(1);rn(2);rn(3)]
or 3 x n or n x 3 matrix of position vecotrs.
vs (ndarray):
3n x 1 stacked initial velocity vectors or 3 x n or n x3 matrix
mus (ndarray or float)
nx1 array of gravitational parameters (G*m_i) where G is the
gravitational constant and m_i is the mass of the ith body.
if all vectors represent the same body, mus may be a scalar.
Returns:
tuple:
a (ndarray):
Semi-major axes
e (ndarray):
eccentricities
E (ndarray):
eccentric anomalies
O (ndarray):
longitudes of ascending nodes (rad)
I (ndarray):
inclinations (rad)
w (ndarray):
arguments of pericenter (rad)
P (ndarray):
orbital periods
tau (ndarray):
time of periapsis crossing
Notes:
All units must be complementary, i.e., if positions are in AU, and time is in
days, vs must be in AU/day, mus must be in AU^3/day^2
"""
assert (np.mod(rs.size, 3) == 0) and (
vs.size == rs.size
), "rs and vs must be of the same size and contain 3n elements."
nplanets = rs.size / 3.0
assert np.isscalar(mus) or mus.size == nplanets, "mus must be scalar or of size n"
assert rs.ndim < 3, "rs cannot have more than two dimensions"
if rs.ndim == 1:
rs = np.reshape(rs, (nplanets, 3)).T
else:
assert 3 in rs.shape, "rs must be 3xn or nx3"
if rs.shape[0] != 3:
rs = rs.T
assert vs.ndim < 3, "vs cannot have more than two dimensions"
if vs.ndim == 1:
vs = np.reshape(vs, (nplanets, 3)).T
else:
assert 3 in vs.shape, "vs must be 3xn or nx3"
if vs.shape[0] != 3:
vs = vs.T
v2s = np.sum(vs ** 2.0, axis=0) # orbital velocity squared
rmag = np.sqrt(np.sum(rs ** 2.0, axis=0)) # orbital radius
hvec = np.vstack(
(
rs[1] * vs[2] - rs[2] * vs[1],
rs[2] * vs[0] - rs[0] * vs[2],
rs[0] * vs[1] - rs[1] * vs[0],
)
) # angular momentum vector
nvec = np.vstack(
(-hvec[1], hvec[0], np.zeros(len(hvec[2])))
) # node-pointing vector
evec = (
np.tile((v2s - mus / rmag) / mus, (3, 1)) * rs
- np.tile(np.sum(rs * vs, axis=0) / mus, (3, 1)) * vs
) # eccentricity vector
nmag = np.sqrt(np.sum(nvec ** 2.0, axis=0))
e = np.sqrt(np.sum(evec ** 2.0, axis=0))
En = v2s / 2 - mus / rmag
a = -mus / 2 / En
ell = a * (1 - e ** 2)
if np.any(e == 1):
tmp = np.sum(hvec ** 2.0, axis=0) / mus
ell[e == 1] = tmp[e == 1]
# angles
I = np.arccos(hvec[2] / np.sqrt(np.sum(hvec ** 2.0, axis=0)))
O = np.arccos(nvec[0] / nmag)
O[nvec[2] < 0] = 2 * np.pi - O[nvec[2] < 0]
w = np.arccos(np.sum(nvec * evec, axis=0) / e / nmag)
w[evec[2] < 0] = 2 * np.pi - w[evec[2] < 0]
# ecentric anomaly
cosE = (1.0 - rmag / a) / e
sinE = np.sum(rs * vs, axis=0) / (e * np.sqrt(mus * a))
E = np.mod(np.arctan2(sinE, cosE), 2 * np.pi)
# orbital periods
P = 2 * np.pi * np.sqrt(a ** 3.0 / mus)
# time of periapsis crossing
tau = -(E - e * np.sin(E)) / np.sqrt(mus * a ** -3.0)
return a, e, E, O, I, w, P, tau
def vec2orbElem(rs, vs, mus):
"""Convert position and velocity vectors to Keplerian orbital elements
Implements the (corrected) algorithm from Vinti
Args:
rs (ndarray):
3n x 1 stacked initial position vectors:
[r1(1);r1(2);r1(3);r2(1);r2(2)r2(3);...;rn(1);rn(2);rn(3)]
or 3 x n or n x 3 matrix of position vecotrs.
vs (ndarray):
3n x 1 stacked initial velocity vectors or 3 x n or n x3 matrix
mus (ndarray or float)
nx1 array of gravitational parameters (G*m_i) where G is the
gravitational constant and m_i is the mass of the ith body.
if all vectors represent the same body, mus may be a scalar.
Returns:
tuple:
a (ndarray):
Semi-major axes
e (ndarray):
eccentricities
E (ndarray):
eccentric anomalies
O (ndarray):
longitudes of ascending nodes (rad)
I (ndarray):
inclinations (rad)
w (ndarray):
arguments of pericenter (rad)
P (ndarray):
orbital periods
tau (ndarray):
time of periapsis crossing
Notes:
All units must be complementary, i.e., if positions are in AU, and time is in
days, vs must be in AU/day, mus must be in AU^3/day^2
"""
assert (np.mod(rs.size, 3) == 0) and (
vs.size == rs.size
), "rs and vs must be of the same size and contain 3n elements."
nplanets = rs.size / 3.0
assert np.isscalar(mus) or mus.size == nplanets, "mus must be scalar or of size n"
assert rs.ndim < 3, "rs cannot have more than two dimensions"
if rs.ndim == 1:
rs = np.reshape(rs, (nplanets, 3)).T
else:
assert 3 in rs.shape, "rs must be 3xn or nx3"
if rs.shape[0] != 3:
rs = rs.T
assert vs.ndim < 3, "vs cannot have more than two dimensions"
if vs.ndim == 1:
vs = np.reshape(vs, (nplanets, 3)).T
else:
assert 3 in vs.shape, "vs must be 3xn or nx3"
if vs.shape[0] != 3:
vs = vs.T
v2s = np.sum(vs ** 2.0, axis=0) # orbital velocity squared
r = np.sqrt(np.sum(rs ** 2.0, axis=0)) # orbital radius
Ws = 0.5 * v2s - mus / r # Keplerian orbital energy
a = -mus / 2.0 / Ws
# semi-major axis
L = np.vstack(
(
rs[1] * vs[2] - rs[2] * vs[1],
rs[2] * vs[0] - rs[0] * vs[2],
rs[0] * vs[1] - rs[1] * vs[0],
)
) # angular momentum vector
L2s = np.sum(L ** 2.0, axis=0) # angular momentum squared
Ls = np.sqrt(L2s) # angular momentum
p = L2s / mus # semi-parameter
e = np.sqrt(1.0 - p / a) # eccentricity
# ecentric anomaly
cosE = (1.0 - r / a) / e
sinE = np.sum(rs * vs, axis=0) / (e * np.sqrt(mus * a))
E = np.mod(np.arctan2(sinE, cosE), 2 * np.pi)
# inclination (strictly in (0,pi))
I = np.arccos(L[2] / Ls)
sinI = np.sqrt(L[0] ** 2 + L[1] ** 2.0) / Ls
# argument of pericenter
esinwsinI = (vs[0] * L[1] - vs[1] * L[0]) / mus - rs[2] / r
ecoswsinI = (Ls * vs[2, :]) / mus - (L[0] * rs[1] - L[1] * rs[0]) / (Ls * r)
w = np.mod(np.arctan2(esinwsinI, ecoswsinI), 2 * np.pi)
# longitude of ascending node
cosO = -L[1] / (Ls * sinI)
sinO = L[0] / (np.sqrt(L2s) * sinI)
O = np.mod(np.arctan2(sinO, cosO), 2 * np.pi)
# orbital periods
P = 2 * np.pi * np.sqrt(a ** 3.0 / mus)
# time of periapsis crossing
tau = -(E - e * np.sin(E)) / np.sqrt(mus * a ** -3.0)
return a, e, E, O, I, w, P, tau
def calcAB(a, e, O, I, w):
"""Calculate inertial frame components of perifocal frame unit vectors scaled
by orbit semi-major and semi-minor axes.
Note that these quantities are closely related to the Thiele-Innes constants
Args:
a (ndarray):
Semi-major axes
e (ndarray):
eccentricities
O (ndarray):
longitudes of ascending nodes (rad)
I (ndarray):
inclinations (rad)
w (ndarray):
arguments of pericenter (rad)
Returns:
tuple:
A (ndarray):
Components of eccentricity vector scaled by a
B (ndarray):
Components of q vector (orthogonal to e and h) scaled by b (=a\sqrt{1-e^2})
Notes:
All inputs must be of same size. Outputs are 3xn for n input points.
See Vinti (1998) for details on element/coord sys defintions.
"""
assert a.size == e.size == O.size == I.size == w.size
A = np.vstack(
(
a * (np.cos(O) * np.cos(w) - np.sin(O) * np.cos(I) * np.sin(w)),
a * (np.sin(O) * np.cos(w) + np.cos(O) * np.cos(I) * np.sin(w)),
a * np.sin(I) * np.sin(w),
)
)
B = np.vstack(
(
-a
* np.sqrt(1 - e ** 2)
* (np.cos(O) * np.sin(w) + np.sin(O) * np.cos(I) * np.cos(w)),
a
* np.sqrt(1 - e ** 2)
* (-np.sin(O) * np.sin(w) + np.cos(O) * np.cos(I) * np.cos(w)),
a * np.sqrt(1 - e ** 2) * np.sin(I) * np.cos(w),
)
)
return A, B
def orbElem2vec(E, mus, orbElem=None, AB=None, returnAB=False):
"""Convert Keplerian orbital elements to position and velocity vectors
Args:
E (ndarray)
nx1 array of eccentric anomalies (rad)
mus (ndarray or float)
nx1 array of gravitational parameters (G*m_i) where G is the
gravitational constant and m_i is the mass of the ith body.
if all vectors represent the same body, mus may be a scalar.
orbElem (tuple):
(a,e,O,I,w) Exact inputs to calcAB. Either this or AB input must be set
AB (tuple):
(A,B) Exact outpus from calcAB
returnAB (bool):
Default False. If True, returns (A,B) as thrid output.
Returns:
tuple:
rs (ndarray):
3 x n stacked position vectors
vs (ndarray):
3 x n stacked velocity vectors
AB (tuple):
(A,B)
Notes:
All units are complementary, i.e., if mus are in AU^3/day^2 then
positions will be in AU, and velocities will be AU/day.
Possible combinations or inputs are:
1. E scalar, mu scalar - single body, single position.
A, B should be 3x1 (or orbElem should be all scalars).
2. E vector, mu scalar - single body, many orbital positions.
A, B should be 3x1 (or orbElem should be all scalars).
3. E vector, mu vector - multiple bodies at varying orbital positions.
A, B should be 3xn where E.size==n (or all orbElem should be size n)
and mus.size must equal E.size.
"""
assert (orbElem is not None) or (
AB is not None
), "You must supply either orbElem or AB inputs."
if np.isscalar(E):
assert np.isscalar(mus), "Scalar E input requires scalar mus input (one body)."
E = np.array(E, ndmin=1)
else:
assert np.isscalar(mus) or (
mus.size == E.size
), "mus must be of the same size as E or scalar."
if orbElem is not None:
assert AB is None, "You can only set orbElem or AB."
A, B = calcAB(orbElem[0], orbElem[1], orbElem[2], orbElem[3], orbElem[4])
a = orbElem[0]
e = orbElem[1]
if AB is not None:
assert orbElem is None, "You can only set orbElem or AB."
A = AB[0]
B = AB[1]
a = np.linalg.norm(A, axis=0)
e = np.sqrt(1 - (np.linalg.norm(B, axis=0) / a) ** 2.0)
if np.isscalar(E) or np.isscalar(mus):
assert (A.size == 3) and (
B.size == 3
), "A and B must be 3x1 for scalar E or mu (one body)."
if not (np.isscalar(E)) and not (np.isscalar(mus)):
assert (A.size == 3 * E.size) and (
B.size == 3 * E.size
), "A and B must be 3xn for vector E (multiple bodies)."
if np.isscalar(mus) and not (np.isscalar(E)):
r = np.matmul(A, np.array((np.cos(E) - e), ndmin=2)) + np.matmul(
B, np.array(np.sin(E), ndmin=2)
)
v = (
np.matmul(-A, np.array(np.sin(E), ndmin=2))
+ np.matmul(B, np.array(np.cos(E), ndmin=2))
) * np.tile(np.sqrt(mus * a ** (-3.0)) / (1 - e * np.cos(E)), (3, 1))
else:
r = np.matmul(A, np.diag(np.cos(E) - e)) + np.matmul(B, np.diag(np.sin(E)))
v = np.matmul(
np.matmul(-A, np.diag(np.sin(E))) + np.matmul(B, np.diag(np.cos(E))),
np.diag(np.sqrt(mus * a ** (-3.0)) / (1 - e * np.cos(E))),
)
if returnAB:
return r, v, (A, B)
else:
return r, v
|
{"hexsha": "02baff70ee2a7c272b70f5a9e764c254a02bae38", "size": 16926, "ext": "py", "lang": "Python", "max_stars_repo_path": "keplertools/fun.py", "max_stars_repo_name": "dsavransky/keplertools", "max_stars_repo_head_hexsha": "52de5f7ec6cb57a6363a6fac1925e39c10391b49", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keplertools/fun.py", "max_issues_repo_name": "dsavransky/keplertools", "max_issues_repo_head_hexsha": "52de5f7ec6cb57a6363a6fac1925e39c10391b49", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keplertools/fun.py", "max_forks_repo_name": "dsavransky/keplertools", "max_forks_repo_head_hexsha": "52de5f7ec6cb57a6363a6fac1925e39c10391b49", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-11T15:59:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-11T15:59:02.000Z", "avg_line_length": 32.9941520468, "max_line_length": 91, "alphanum_fraction": 0.5353302611, "include": true, "reason": "import numpy", "num_tokens": 5091}
|
'''
intra_blob recursively evaluates each blob for three forks of extended internal cross-comparison and sub-clustering:
- comp_r: incremental range cross-comp in low-variation flat areas of +v--vg: the trigger is positive deviation of negated -vg,
- comp_a: angle cross-comp in high-variation edge areas of positive deviation of gradient, forming gradient of angle,
- slice_blob forms roughly edge-orthogonal Ps, their stacks evaluated for rotation, comp_d, and comp_slice
Please see diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_blob_scheme.png
Blob structure, for all layers of blob hierarchy:
root_dert__,
Dert = A, Ly, I, Dy, Dx, G, M, Day, Dax, Ga, Ma
# A: area, Ly: vertical dimension, I: input; Dy, Dx: renamed Gy, Gx; G: gradient; M: match; Day, Dax, Ga, Ma: angle Dy, Dx, G, M
sign,
box, # y0, yn, x0, xn
dert__, # box of derts, each = i, dy, dx, g, m, day, dax, ga, ma
# next fork:
f_root_a, # flag: input is from comp angle
f_comp_a, # flag: current fork is comp angle
rdn, # redundancy to higher layers
rng, # comparison range
sub_layers # [sub_blobs ]: list of layers across sub_blob derivation tree
# deeper layers are nested, multiple forks: no single set of fork params?
'''
import numpy as np
from frame_blobs import assign_adjacents, flood_fill, CBlob
from intra_comp import comp_r, comp_a
from draw_frame_blobs import visualize_blobs
from itertools import zip_longest
from comp_slice_ import *
from slice_utils import *
# filters, All *= rdn:
ave = 50 # fixed cost per dert, from average m, reflects blob definition cost, may be different for comp_a?
aveB = 50 # fixed cost per intra_blob comp and clustering
# --------------------------------------------------------------------------------------------------------------
# functions:
def intra_blob(blob, **kwargs): # slice_blob or recursive input rng+ | angle cross-comp within input blob
Ave = int(ave * blob.rdn)
AveB = int(aveB * blob.rdn)
verbose = kwargs.get('verbose')
if kwargs.get('render') is not None: # don't render small blobs
if blob.A < 100: kwargs['render'] = False
spliced_layers = [] # to extend root_blob sub_layers
if blob.f_root_a:
# root fork is comp_a -> slice_blob
if blob.mask__.shape[0] > 2 and blob.mask__.shape[1] > 2 and False in blob.mask__: # min size in y and x, at least one dert in dert__
if (-blob.M * blob.Ma - AveB > 0) and blob.Dx: # vs. G reduced by Ga: * (1 - Ga / (4.45 * A)), max_ga=4.45
blob.f_comp_a = 0
blob.prior_forks.extend('p')
if kwargs.get('verbose'): print('\nslice_blob fork\n')
derP_ = slice_blob(blob, []) # cross-comp of vertically consecutive Ps in selected stacks
blob.PP_ = derP_2_PP_(derP_, blob.PP_) # form vertically contiguous patterns of patterns
else:
# root fork is frame_blobs or comp_r
ext_dert__, ext_mask__ = extend_dert(blob) # dert__ boundaries += 1, for cross-comp in larger kernels
if blob.G > AveB: # comp_a fork, replace G with borrow_M when known
adert__, mask__ = comp_a(ext_dert__, Ave, blob.prior_forks, ext_mask__) # compute ma and ga
blob.f_comp_a = 1
if kwargs.get('verbose'): print('\na fork\n')
blob.prior_forks.extend('a')
if mask__.shape[0] > 2 and mask__.shape[1] > 2 and False in mask__: # min size in y and x, least one dert in dert__
sign__ = adert__[3] * adert__[8] > 0 # g * (ma / ave: deviation rate, no independent value, not co-measurable with g)
dert__ = tuple([adert__[0], adert__[1], adert__[2], adert__[3], adert__[4],
adert__[5][0], adert__[5][1], adert__[6][0], adert__[6][1],
adert__[7], adert__[8]]) # flatten adert
cluster_sub_eval(blob, dert__, sign__, mask__, **kwargs) # forms sub_blobs of sign in unmasked area
spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in
zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])]
elif blob.M > AveB * 1.2: # comp_r fork, ave M = ave G * 1.2
dert__, mask__ = comp_r(ext_dert__, Ave, blob.f_root_a, ext_mask__)
blob.f_comp_a = 0
if kwargs.get('verbose'): print('\na fork\n')
blob.prior_forks.extend('r')
if mask__.shape[0] > 2 and mask__.shape[1] > 2 and False in mask__: # min size in y and x, at least one dert in dert__
sign__ = dert__[4] > 0 # m__ is inverse deviation of SAD
cluster_sub_eval(blob, dert__, sign__, mask__, **kwargs) # forms sub_blobs of sign in unmasked area
spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in
zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])]
return spliced_layers
def cluster_sub_eval(blob, dert__, sign__, mask__, **kwargs): # comp_r or comp_a eval per sub_blob:
AveB = aveB * blob.rdn
sub_blobs, idmap, adj_pairs = flood_fill(dert__, sign__, verbose=False, mask__=mask__, blob_cls=CBlob, accum_func=accum_blob_Dert)
assign_adjacents(adj_pairs, CBlob)
if kwargs.get('render', False):
visualize_blobs(idmap, sub_blobs, winname=f"Deep blobs (f_comp_a = {blob.f_comp_a}, f_root_a = {blob.f_root_a})")
blob.Ls = len(sub_blobs) # for visibility and next-fork rdn
blob.sub_layers = [sub_blobs] # 1st layer of sub_blobs
for sub_blob in sub_blobs: # evaluate sub_blob
G = blob.G # Gr, Grr...
adj_M = blob.adj_blobs[3] # adj_M is incomplete, computed within current dert_only, use root blobs instead:
# adjacent valuable blobs of any sign are tracked from frame_blobs to form borrow_M?
# track adjacency of sub_blobs: wrong sub-type but right macro-type: flat blobs of greater range?
# G indicates or dert__ extend per blob G?
borrow_M = min(G, adj_M / 2)
sub_blob.prior_forks = blob.prior_forks.copy() # increments forking sequence: g->a, g->a->p, etc.
if sub_blob.G > AveB: # replace with borrow_M when known
# comp_a:
sub_blob.f_root_a = 1
sub_blob.a_depth += blob.a_depth # accumulate a depth from blob to sub_blob, currently not used
sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
blob.sub_layers += intra_blob(sub_blob, **kwargs)
elif sub_blob.M - borrow_M > AveB:
# comp_r:
sub_blob.rng = blob.rng * 2
sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
blob.sub_layers += intra_blob(sub_blob, **kwargs)
def extend_dert(blob): # extend dert borders (+1 dert to boundaries)
y0, yn, x0, xn = blob.box # extend dert box:
rY, rX = blob.root_dert__[0].shape # higher dert size
# determine pad size
y0e = max(0, y0 - 1)
yne = min(rY, yn + 1)
x0e = max(0, x0 - 1)
xne = min(rX, xn + 1) # e is for extended
# take ext_dert__ from part of root_dert__
ext_dert__ = []
for dert in blob.root_dert__:
if type(dert) == list: # tuple of 2 for day, dax - (Dyy, Dyx) or (Dxy, Dxx)
ext_dert__.append(dert[0][y0e:yne, x0e:xne])
ext_dert__.append(dert[1][y0e:yne, x0e:xne])
else:
ext_dert__.append(dert[y0e:yne, x0e:xne])
ext_dert__ = tuple(ext_dert__) # change list to tuple
# extended mask__
ext_mask__ = np.pad(blob.mask__,
((y0 - y0e, yne - yn),
(x0 - x0e, xne - xn)),
constant_values=True, mode='constant')
return ext_dert__, ext_mask__
def accum_blob_Dert(blob, dert__, y, x):
blob.I += dert__[0][y, x]
blob.Dy += dert__[1][y, x]
blob.Dx += dert__[2][y, x]
blob.G += dert__[3][y, x]
blob.M += dert__[4][y, x]
if len(dert__) > 5: # past comp_a fork
blob.Dyy += dert__[5][y, x]
blob.Dyx += dert__[6][y, x]
blob.Dxy += dert__[7][y, x]
blob.Dxx += dert__[8][y, x]
blob.Ga += dert__[9][y, x]
blob.Ma += dert__[10][y, x]
|
{"hexsha": "81dc72276b2041b06ce127471003d48bfcf86148", "size": 8351, "ext": "py", "lang": "Python", "max_stars_repo_path": "frame_2D_alg/intra_blob.py", "max_stars_repo_name": "aqibmumtaz/CogAlg", "max_stars_repo_head_hexsha": "36009f456be93833dd44038c8adbd99b6037383c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-30T00:09:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-31T02:31:41.000Z", "max_issues_repo_path": "frame_2D_alg/intra_blob.py", "max_issues_repo_name": "aqibmumtaz/CogAlg", "max_issues_repo_head_hexsha": "36009f456be93833dd44038c8adbd99b6037383c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "frame_2D_alg/intra_blob.py", "max_forks_repo_name": "aqibmumtaz/CogAlg", "max_forks_repo_head_hexsha": "36009f456be93833dd44038c8adbd99b6037383c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3858695652, "max_line_length": 142, "alphanum_fraction": 0.615255658, "include": true, "reason": "import numpy", "num_tokens": 2402}
|
#include "converter.h"
#include <string>
#include <iostream>
#include <sstream>
#include <boost/gil/image.hpp>
#include <boost/gil/typedefs.hpp>
#include <boost/gil/io/io.hpp>
#include <boost/gil/extension/io/jpeg.hpp>
#include <boost/gil/extension/io/png.hpp>
#include "utils.h"
using namespace boost::gil;
using namespace std;
ImageType resolveImageType(const string ext){
if (ext == ".jpg" | ext==".jpeg"){
return JpegImage;
} else if (ext == ".png"){
return PngImage;
} else {
return UnknownImage;
}
}
void convert_image(char** inp, int& insize, ImageType intype, char** outp,
int& outsize, ImageType outtype){
cout << "Decoding image..." << endl;
cout << "First 10 bytes: ";
utils::print_first_bytes(*inp, 10);
rgb8_image_t imageData;
char_array_to_image(inp, insize, imageData, intype);
cout << "Image decoded! Encoding..." << endl;
image_to_char_array(imageData, outtype, outp, outsize);
cout << "Image encoded to desired format!" << endl;
}
void char_array_to_image(char **arr, int arr_size, rgb8_image_t &img, ImageType type) {
stringstream arr_stream(ios_base::in | ios_base::out | ios_base::binary);
arr_stream.write(*arr, arr_size);
arr_stream.seekg(0);
switch (type) {
case JpegImage:
read_image( arr_stream, img, jpeg_tag() );
break;
case PngImage:
read_image( arr_stream, img, png_tag() );
break;
default:
cout << "Cannot convert input image type! Exiting." << endl;
break;
}
}
void image_to_char_array(rgb8_image_t &img, ImageType type, char **arr, int &size){
stringstream arr_stream(ios_base::in | ios_base::out | ios_base::binary);
switch (type) {
case JpegImage:
write_view(arr_stream, view(img), jpeg_tag() );
break;
case PngImage:
write_view(arr_stream, view(img), png_tag() );
break;
default:
cout << "Cannot write to output image type! Exiting." << endl;
break;
}
arr_stream.seekg(0, ios::end);
size = arr_stream.tellg();
cout << "Allocating " << to_string(size/1024) << "KB for the output image."<< endl;
// allocate memory for file
*arr = new char [size];
arr_stream.seekg (0, ios::beg);
arr_stream.read (*arr, size);
}
|
{"hexsha": "03f910cdd3321cdb7cb81c2a3ed7dc27e5435daa", "size": 2244, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/converter.cpp", "max_stars_repo_name": "kopytjuk/wasm-image-converter", "max_stars_repo_head_hexsha": "5f20492a9ce60e942b6432ce251618908afbc4f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-09-21T16:43:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-21T16:52:34.000Z", "max_issues_repo_path": "src/converter.cpp", "max_issues_repo_name": "kopytjuk/wasm-image-converter", "max_issues_repo_head_hexsha": "5f20492a9ce60e942b6432ce251618908afbc4f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/converter.cpp", "max_forks_repo_name": "kopytjuk/wasm-image-converter", "max_forks_repo_head_hexsha": "5f20492a9ce60e942b6432ce251618908afbc4f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.375, "max_line_length": 87, "alphanum_fraction": 0.6573083779, "num_tokens": 612}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.