text stringlengths 0 27.1M | meta dict |
|---|---|
#!/usr/bin/env Rscript
f1="./sheet1.csv";
f2="./sheet2.csv";
f3="./sheet3.csv";
res1 = read.csv(f1, sep=',',header=TRUE);
res2 = read.csv(f2, sep=',',header=TRUE);
res3 = read.csv(f3, sep=',',header=TRUE);
img_xlab = "log2(fold_change)";
img_ylab = "-log10(p.Value)";
p1="./image1.pdf";
p2="./image2.pdf";
p3="./image3.pdf";
p4="./image4.pdf";
p5="./image5.pdf";
pdf(p1,width=60,height=40);
par(mai=c(5,5,5,5),mgp=c(3, 3, 0));
with(res1,plot(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab=""));
with(res2,points(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab="",col="red"));
mtext('black dots of sheet1 red dots of sheet2',side=3,line=8,cex=6);
mtext(img_xlab,side=1,line=8,cex=6);
mtext(img_ylab,side=2,line=8,cex=6);
dev.off();
pdf(p2,width=60,height=40);
par(mai=c(5,5,5,5),mgp=c(3, 3, 0));
with(res3,plot(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab="",col="green"));
mtext('green dots of sheet3',side=3,line=8,cex=6);
mtext(img_xlab,side=1,line=8,cex=6);
mtext(img_ylab,side=2,line=8,cex=6);
dev.off();
pdf(p3,width=60,height=40);
par(mai=c(5,5,5,5),mgp=c(3, 3, 0));
with(res1,plot(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab=""));
with(res2,points(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab="",col="red"));
with(res3,points(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab="",col="green"));
mtext('black dots of sheet1,red dots of sheet2 ,green dots of sheet3',side=3,line=8,cex=6);
mtext(img_xlab,side=1,line=8,cex=6);
mtext(img_ylab,side=2,line=8,cex=6);
dev.off();
pdf(p4,width=60,height=40);
par(mai=c(5,5,5,5),mgp=c(3, 3, 0));
with(res2,plot(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab="",col="red"));
mtext('red dots of sheet2',side=3,line=8,cex=6);
mtext(img_xlab,side=1,line=8,cex=6);
mtext(img_ylab,side=2,line=8,cex=6);
dev.off();
pdf(p5,width=60,height=40);
par(mai=c(5,5,5,5),mgp=c(3, 3, 0));
with(res1,plot(fold_change,-log10(p.Value),pch=19,cex=2,cex.axis=4,main="",xlab="",ylab=""));
mtext('black dots of sheet1',side=3,line=8,cex=6);
mtext(img_xlab,side=1,line=8,cex=6);
mtext(img_ylab,side=2,line=8,cex=6);
dev.off();
| {
"alphanum_fraction": 0.6591111111,
"author": null,
"avg_line_length": 32.1428571429,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "97704df09ff2eb2462e6c5a291a8533501d1827e",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f03cb266556a5de5cb86dfb1dbeb9ee98215833a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hongshunyang/biotools.plot",
"max_forks_repo_path": "tools/custom/plotCustom.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f03cb266556a5de5cb86dfb1dbeb9ee98215833a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hongshunyang/biotools.plot",
"max_issues_repo_path": "tools/custom/plotCustom.r",
"max_line_length": 107,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f03cb266556a5de5cb86dfb1dbeb9ee98215833a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hongshunyang/biotools.plot",
"max_stars_repo_path": "tools/custom/plotCustom.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 916,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2250
} |
import numpy as np
import matplotlib.pyplot as plt
#import katfile
from matplotlib.backends.backend_pdf import PdfPages
import optparse
import katholog
import os
def radial_data(data,annulus_width=1,working_mask=None,x=None,y=None,rmax=None):
"""
r = radial_data(data,annulus_width,working_mask,x,y)
A function to reduce an image to a radial cross-section.
INPUT:
------
data - whatever data you are radially averaging. Data is
binned into a series of annuli of width 'annulus_width'
pixels.
annulus_width - width of each annulus. Default is 1.
working_mask - array of same size as 'data', with zeros at
whichever 'data' points you don't want included
in the radial data computations.
x,y - coordinate system in which the data exists (used to set
the center of the data). By default, these are set to
integer meshgrids
rmax -- maximum radial value over which to compute statistics
OUTPUT:
-------
r - a data structure containing the following
statistics, computed across each annulus:
.r - the radial coordinate used (outer edge of annulus)
.mean - mean of the data in the annulus
.std - standard deviation of the data in the annulus
.median - median value in the annulus
.max - maximum value in the annulus
.min - minimum value in the annulus
.per10 - 10% percental value in the annulus
.per25 - 25% percentail value in the annulus
.numel - number of elements in the annulus
"""
# 2010-03-10 19:22 IJC: Ported to python from Matlab
# 2005/12/19 Added 'working_region' option (IJC)
# 2005/12/15 Switched order of outputs (IJC)
# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.
# 2005/11/04 by Ian Crossfield at the Jet Propulsion Laboratory
import numpy as ny
class radialDat:
"""Empty object container.
"""
def __init__(self):
self.mean = None
self.std = None
self.median = None
self.numel = None
self.max = None
self.min = None
self.r = None
#---------------------
# Set up input parameters
#---------------------
data = ny.array(data)
if working_mask==None:
working_mask = ny.ones(data.shape,bool)
npix, npiy = data.shape
if x==None or y==None:
x1 = ny.arange(-npix/2.,npix/2.)
y1 = ny.arange(-npiy/2.,npiy/2.)
x,y = ny.meshgrid(y1,x1)
r = abs(x+1j*y)
if rmax==None:
rmax = r[working_mask].max()
#---------------------
# Prepare the data container
#---------------------
dr = ny.abs([x[0,0] - x[0,1]]) * annulus_width
radial = ny.arange(rmax/dr)*dr + dr/2.
nrad = len(radial)
radialdata = radialDat()
radialdata.mean = ny.zeros(nrad)
radialdata.std = ny.zeros(nrad)
radialdata.median = ny.zeros(nrad)
radialdata.numel = ny.zeros(nrad)
radialdata.max = ny.zeros(nrad)
radialdata.per10 = ny.zeros(nrad)
radialdata.per25 = ny.zeros(nrad)
radialdata.min = ny.zeros(nrad)
radialdata.r = radial
#---------------------
# Loop through the bins
#---------------------
for irad in range(nrad): #= 1:numel(radial)
minrad = irad*dr
maxrad = minrad + dr
thisindex = (r>=minrad) * (r<maxrad) * working_mask
if not thisindex.ravel().any():
radialdata.mean[irad] = ny.nan
radialdata.std[irad] = ny.nan
radialdata.median[irad] = ny.nan
radialdata.numel[irad] = ny.nan
radialdata.per10[irad] = ny.nan
radialdata.per10[irad] = ny.nan
radialdata.max[irad] = ny.nan
radialdata.min[irad] = ny.nan
else:
radialdata.mean[irad] = data[thisindex].mean()
radialdata.std[irad] = data[thisindex].std()
radialdata.median[irad] = ny.median(data[thisindex])
radialdata.numel[irad] = data[thisindex].size
radialdata.max[irad] = data[thisindex].max()
radialdata.min[irad] = data[thisindex].min()
radialdata.per10[irad] = ny.percentile(data[thisindex],10)
radialdata.per25[irad] = ny.percentile(data[thisindex],25)
return radialdata
# Parse command-line options and arguments
parser = optparse.OptionParser(usage='%prog [options] <data file>',
description='This script reduces a data file to produce a tipping curve plot in a pdf file.')
parser.add_option("-f",'--freq', default='1650',
help="Comma delimatated list of frequencys in MHz to calculate maps for, default %default)")
parser.add_option("--in",
help="Name of directory containing spillover models")
parser.add_option( "--emss-pattern", default='Dish_2000.pat',
help="Name of map of sky tempreture in fits format', default = '%default'")
(opts, args) = parser.parse_args()
if len(args) < 1:
raise RuntimeError('Please specify the data file to reduce')
interactive = True
dataset=katholog.Dataset(args[0],'kat7')
if interactive: dataset.flagplot()#TODO add automated rfi flagging
plt.show()
for freq in np.array(opts.freq.split(',')).astype(float).tolist():
nice_filename = os.path.splitext(os.path.basename(args[0]))[0]+'_'+str(freq)+'_holography'
pp = PdfPages(nice_filename+'.pdf')
#freq = [1650]
emssdataset=katholog.Dataset(opts.emss_pattern,'kat7emss',freq_MHz=freq,clipextent=40)
emssbeam=katholog.BeamCube(emssdataset,extent=40)
fullbeam=katholog.BeamCube(dataset,freqMHz=freq,applypointing='center')
extents=[fullbeam.margin[0],fullbeam.margin[-1],fullbeam.margin[0],fullbeam.margin[-1]]
power=np.abs(fullbeam.Gx)**2+np.abs(fullbeam.Gy)**2+np.abs(fullbeam.Dx)**2+np.abs(fullbeam.Dy)**2
power/=np.max(power)
emsspower=np.abs(emssbeam.Gx)**2+np.abs(emssbeam.Gy)**2+np.abs(emssbeam.Dx)**2+np.abs(emssbeam.Dy)**2
emsspower/=np.max(emsspower)
aperturemap=katholog.ApertureMap(dataset,freqMHz=freq)
text = []
if True : # R.RS.P.40 Req
#radial = radial_data(power[0,:,:])
radial = radial_data(power[0,:,:],annulus_width=5)
#TODO Fix the line below
ind_2nd_sidelobe = (np.diff(radial.mean)>0).nonzero()[0][0:-1][np.diff((np.diff(radial.mean)>0).nonzero()[0])>1][1] + 1
text.append("R.RS.P.40 Max 2nd Sidelevel %f dB at %f Degrees"%(10*np.log10(radial.max[ind_2nd_sidelobe]),np.abs(fullbeam.margin[256+ind_2nd_sidelobe*5])))
pixel_per_deg = 512./(fullbeam.margin[-1]-fullbeam.margin[0])
y,x = np.ogrid[-256:256, -256:256]
mask = x*x + y*y > (10.*pixel_per_deg)**2
pix_sr = ( (fullbeam.margin[-1]-fullbeam.margin[0])**2/(512.0**2) )/(180/np.pi)**2 #pixel to sr
text.append("R.RS.P.40 Total amount of sky > 10 degrees for boresight with sidelobes > -40 dB is %f sr."%((10*np.log10(power[0,...]*mask)>-40).sum()*pix_sr))
if True : # R.RC.P.4
#innerbeam=katholog.BeamCube(dataset,freqMHz=1800,extent=4, applypointing='center') #scanantennaname='ant5'
mask_3dB=10.0*np.log10(power)>-3
text.append("R.RC.P.4 Mean varation between model and data within 3 dB area is %f percent"%(((emsspower[mask_3dB]-power[mask_3dB])/emsspower[mask_3dB]).mean()*100))
fig = plt.figure()
jones = fullbeam.plot('Gx','amp')
fig.savefig(pp,format='pdf')
plt.close(fig)
fig = plt.figure()
jones = fullbeam.plot('Gy','amp')
fig.savefig(pp,format='pdf')
plt.close(fig)
fig = plt.figure()
jones = fullbeam.plot('Gx','phase')
fig.savefig(pp,format='pdf')
plt.close(fig)
fig = plt.figure()
jones = fullbeam.plot('Gy','phase')
fig.savefig(pp,format='pdf')
plt.close(fig)
innerbeam=katholog.BeamCube(dataset,freqMHz=freq,extent=4,applypointing='center') #scanantennaname='ant5'
centralbeam = innerbeam
mainlobe=np.abs(centralbeam.Gx)**2+np.abs(centralbeam.Gy)**2+np.abs(centralbeam.Dx)**2+np.abs(centralbeam.Dy)**2
scalefunc = np.max(mainlobe)
mainlobe/=scalefunc
if True : # R.RC.P.2 & R.RC.P.3
innerextents=[innerbeam.margin[0],innerbeam.margin[-1],innerbeam.margin[0],innerbeam.margin[-1]]
mask_3dB=10.0*np.log10(mainlobe)>-3 ## 3dB Mask for R.RC.P.3
mask_1dB=10.0*np.log10(mainlobe)>-1 ## 1dB Mask for R.RC.P.2
dx = (20*np.log10(np.abs(innerbeam.Dx[mask_3dB])/scalefunc**2)>-20).sum()
dy = (20*np.log10(np.abs(innerbeam.Dy[mask_3dB])/scalefunc**2)>-20).sum()
text.append("R.RC.P.3 Number of points in Jones Dx^2 matrix in 3dB contour > -20 dB is %i"%(dx))
text.append("R.RC.P.3 Number of points in Jones Dy^2 matrix in 3dB contour > -20 dB is %i"%(dy))
dx = (20*np.log10(np.abs(innerbeam.Dx[mask_1dB])/scalefunc)>-26).sum()
dy = (20*np.log10(np.abs(innerbeam.Dy[mask_1dB])/scalefunc)>-26).sum()
text.append("R.RC.P.2 Number of points in Jones Dx^2 matrix in 1dB contour > -26 dB is %i"%(dx))
text.append("R.RC.P.2 Number of points in Jones Dy^2 matrix in 1dB contour > -26 dB is %i"%(dy))
fig = plt.figure()
plt.imshow(20*np.log10(np.abs(innerbeam.Dx[0,...]))*mask_3dB[0,...],extent=innerextents)
plt.colorbar()
plt.title('Jones Dx Matrix (amplitude)')
plt.xlabel('Degrees')
plt.ylabel('Degrees')
fig.savefig(pp,format='pdf')
plt.close(fig)
fig = plt.figure()
plt.imshow(20*np.log10(np.abs(innerbeam.Dy[0,...]))*mask_3dB[0,...],extent=innerextents)
plt.colorbar()
plt.xlabel('Degrees')
plt.ylabel('Degrees')
plt.title('Jones Dy Matrix (amplitude)')
fig.savefig(pp,format='pdf')
plt.close(fig)
#crosscoupling_x_feed_in_mainlobe=np.max(10*np.log10(np.abs(centralbeam.Dx.reshape(-1)[mainlobeind])**2))
##crosscoupling_x_feed_in_mainlobe=np.max(10*np.log10(np.abs(centralbeam.Dx.reshape(-1)[mainlobeind])**2/np.abs(centralbeam.Gx.reshape(-1)[mainlobeind])))
#plt.figure()
#fullbeam.plot('jones')
if True : #R.A.P.3 & R.T.P.98
plt.figure()
aperturemap.plot('amp')
fig.savefig(pp,format='pdf')
plt.close(fig)
plt.figure()
aperturemap.plot('phase')
fig.savefig(pp,format='pdf')
plt.close(fig)
plt.figure()
aperturemap.plot('unwrap')
fig.savefig(pp,format='pdf')
plt.close(fig)
plt.figure()
aperturemap.plot('model')
fig.savefig(pp,format='pdf')
plt.close(fig)
plt.figure()
aperturemap.plot('dev')
fig.savefig(pp,format='pdf')
plt.close(fig)
plt.figure()
aperturemap.plot('flat')
fig.savefig(pp,format='pdf')
plt.close(fig)
text.append("R.T.P.98 Surface roughness is %f mm RMS unwraped or %f mm RMS flaterned "%(aperturemap.rms0_mm,aperturemap.rms_mm))
text.append('Measured gain with observed illumination: %.2f dB'%(aperturemap.gainmeasured_dB))
text.append('Theorectical gain with uniform illumination: %.2f dB'%(aperturemap.gainuniform_dB))
text.append('Gain with no panel errors: %.2f dB'%(aperturemap.gainnopanelerr_dB))
text.append('Gain with only feed offset: %.2f dB'%(aperturemap.gainmodel_dB))
#text.append('Aperture efficiency: %f'%(aperturemap.eff_aperture))
text.append('Illumination efficiency: %f'%(aperturemap.eff_illumination))
text.append('Taper efficiency: %.3f'%(aperturemap.eff_taper))
text.append('Phase efficiency: %.3f'%(aperturemap.eff_phase))
text.append('Spillover efficiency: %.3f'%(aperturemap.eff_spillover))
text.append('Surface-error efficiency: %.3f'%(aperturemap.eff_surface))
fig = plt.figure(None,figsize = (10,16))
plt.figtext(0.1,0.1,'\n'.join(text),fontsize=10)
fig.savefig(pp,format='pdf')
pp.close()
plt.close(fig)
| {
"alphanum_fraction": 0.6214563187,
"author": null,
"avg_line_length": 38.167192429,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0a24e5035746bf0b114a8d24719d42ff7b3b8e41",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-11T11:47:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-11T11:47:54.000Z",
"max_forks_repo_head_hexsha": "f9eaa867aad8b94c715f7286953124df00b5781c",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "ska-sa/katsdpscripts",
"max_forks_repo_path": "RTS/3.5-Error_Beam/holography_reduction.py",
"max_issues_count": 21,
"max_issues_repo_head_hexsha": "f9eaa867aad8b94c715f7286953124df00b5781c",
"max_issues_repo_issues_event_max_datetime": "2022-01-11T09:14:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-09-16T15:26:53.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "ska-sa/katsdpscripts",
"max_issues_repo_path": "RTS/3.7-Polarization/holography_reduction.py",
"max_line_length": 172,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f9eaa867aad8b94c715f7286953124df00b5781c",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ska-sa/katsdpscripts",
"max_stars_repo_path": "RTS/3.7-Polarization/holography_reduction.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3445,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12099
} |
import torch
from torch.nn import functional as F
from mmdet3d.ops import Voxelization
from mmdet.models import DETECTORS
from .. import builder
from .two_stage import TwoStage3DDetector
from .voxelnet import Fusion
import numpy as np
@DETECTORS.register_module()
class PartA2(TwoStage3DDetector):
r"""Part-A2 detector.
Please refer to the `paper <https://arxiv.org/abs/1907.03670>`_
"""
def __init__(self,
voxel_layer,
voxel_encoder,
middle_encoder,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(PartA2, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
)
self.voxel_layer = Voxelization(**voxel_layer)
self.voxel_encoder = builder.build_voxel_encoder(voxel_encoder)
self.middle_encoder = builder.build_middle_encoder(middle_encoder)
# 转换RangeImage部分相关代码
self.H = 48
self.W = 512
self.fov_up = 3
self.fov_down = -15.0
self.pi = torch.tensor(np.pi)
fov_up = self.fov_up * self.pi / 180.0
fov_down = self.fov_down * self.pi / 180.0
fov = abs(fov_up) + abs(fov_down)
self.uv = torch.zeros((2, self.H, self.W))
self.uv[1] = torch.arange(0, self.W)
self.uv.permute((0, 2, 1))[0] = torch.arange(0, self.H)
self.uv[0] = ((self.H - self.uv[0]) * fov - abs(fov_down) * self.H) / self.H
self.uv[1] = (self.uv[1] * 2.0 - self.W) * self.pi / (self.W * 4) # 最后一个 4 用来控制水平范围
# self.range_encoder = RangeEncoder(5, 64, use_img=True)
self.fusion = Fusion(5, 3)
def extract_feat(self, points, img_metas):
"""Extract features from points."""
voxel_dict = self.voxelize(points)
voxel_features = self.voxel_encoder(voxel_dict['voxels'],
voxel_dict['num_points'],
voxel_dict['coors'])
batch_size = voxel_dict['coors'][-1, 0].item() + 1
feats_dict = self.middle_encoder(voxel_features, voxel_dict['coors'],
batch_size)
x = self.backbone(feats_dict['spatial_features'])
if self.with_neck:
neck_feats = self.neck(x)
feats_dict.update({'neck_feats': neck_feats})
return feats_dict, voxel_dict
@torch.no_grad()
def voxelize(self, points):
"""Apply hard voxelization to points."""
voxels, coors, num_points, voxel_centers = [], [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.voxel_layer(res)
res_voxel_centers = (
res_coors[:, [2, 1, 0]] + 0.5) * res_voxels.new_tensor(
self.voxel_layer.voxel_size) + res_voxels.new_tensor(
self.voxel_layer.point_cloud_range[0:3])
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxel_centers.append(res_voxel_centers)
voxels = torch.cat(voxels, dim=0)
num_points = torch.cat(num_points, dim=0)
voxel_centers = torch.cat(voxel_centers, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
voxel_dict = dict(
voxels=voxels,
num_points=num_points,
coors=coors_batch,
voxel_centers=voxel_centers)
return voxel_dict
def forward_train(self,
points,
img_metas,
gt_bboxes_3d,
gt_labels_3d,
img=None,
gt_bboxes_ignore=None,
proposals=None):
"""Training forward function.
Args:
points (list[torch.Tensor]): Point cloud of each sample.
img_metas (list[dict]): Meta information of each sample
gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth
boxes for each sample.
gt_labels_3d (list[torch.Tensor]): Ground truth labels for
boxes of each sampole
gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
boxes to be ignored. Defaults to None.
Returns:
dict: Losses of each branch.
"""
# 转换成range提取特征后转回lidar
batchsize = len(points)
rangeImage = []
for i in range(batchsize):
rangeImage.append(self.lidar_to_range_gpu(points[i]).unsqueeze(0))
rangeImage = torch.cat(rangeImage, dim=0)
# 是否加入img信息
# range_feat = self.range_encoder(rangeImage, img) #用自编码器的形式
range_feat = self.fusion(rangeImage, img)
range_ori = torch.cat((rangeImage[:, 0:2], range_feat), dim=1)
pts_with_range = []
for i in range(batchsize):
pts_with_range.append(self.range_to_lidar_gpu(range_ori[i].squeeze(0)))
feats_dict, voxels_dict = self.extract_feat(pts_with_range, img_metas) #point 换成pts_with_range
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(feats_dict['neck_feats'])
rpn_loss_inputs = rpn_outs + (gt_bboxes_3d, gt_labels_3d,
img_metas)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_metas, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(feats_dict, voxels_dict,
img_metas, proposal_list,
gt_bboxes_3d, gt_labels_3d)
losses.update(roi_losses)
return losses
def simple_test(self, points, img_metas, imgs=None, proposals=None, rescale=False):
"""Test function without augmentaiton."""
# 转换成range提取特征后转回lidar
batchsize = len(points)
rangeImage = []
for i in range(batchsize):
rangeImage.append(self.lidar_to_range_gpu(points[i]).unsqueeze(0))
rangeImage = torch.cat(rangeImage, dim=0)
# 是否加入img信息
# range_feat = self.range_encoder(rangeImage, img) #用自编码器的形式
range_feat = self.fusion(rangeImage, imgs)
range_ori = torch.cat((rangeImage[:, 0:2], range_feat), dim=1)
pts_with_range = []
for i in range(batchsize):
pts_with_range.append(self.range_to_lidar_gpu(range_ori[i].squeeze(0)))
feats_dict, voxels_dict = self.extract_feat(pts_with_range, img_metas)
if self.with_rpn:
rpn_outs = self.rpn_head(feats_dict['neck_feats'])
proposal_cfg = self.test_cfg.rpn
bbox_inputs = rpn_outs + (img_metas, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*bbox_inputs)
else:
proposal_list = proposals
return self.roi_head.simple_test(feats_dict, voxels_dict, img_metas,
proposal_list)
def lidar_to_range_gpu(self, points):
device = points.device
pi = torch.tensor(np.pi).to(device)
fov_up = self.fov_up * pi / 180.0
fov_down = self.fov_down * pi / 180.0
fov = abs(fov_up) + abs(fov_down)
depth = torch.norm(points, 2, dim=1)
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
yaw = torch.atan2(y, x)
pitch = torch.asin(z / depth)
u = 0.5 * (1 - 4 * yaw / pi) * self.W # 最后一个 4 用来控制水平范围
v = (1 - (pitch + abs(fov_down)) / fov) * self.H
zero_tensor = torch.zeros_like(u)
W_tensor = torch.ones_like(u) * (self.W - 1)
H_tensor = torch.ones_like(v) * (self.H - 1)
u = torch.floor(u)
u = torch.min(u, W_tensor)
u = torch.max(u, zero_tensor).long()
v = torch.floor(v)
v = torch.min(v, H_tensor)
v = torch.max(v, zero_tensor).long()
range_image = torch.full((5, self.H, self.W), 0, dtype=torch.float32).to(device)
range_image[0][v, u] = depth
range_image[1][v, u] = points[:, 3]
range_image[2][v, u] = points[:, 0]
range_image[3][v, u] = points[:, 1]
range_image[4][v, u] = points[:, 2]
return range_image
def range_to_lidar_gpu(self, range_img):
device = range_img.device
self.uv = self.uv.to(device)
lidar_out = torch.zeros((12, self.H, self.W)).to(device)
lidar_out[0] = range_img[0] * torch.cos(self.uv[0]) * torch.cos(self.uv[1])
lidar_out[1] = range_img[0] * torch.cos(self.uv[0]) * torch.sin(self.uv[1]) * (-1)
lidar_out[2] = range_img[0] * torch.sin(self.uv[0])
lidar_out[3:] = range_img[1:]
lidar_out = lidar_out.permute((2, 1, 0)).reshape([-1, 12])
lidar_out = lidar_out[torch.where(lidar_out[:, 0] != 0)]
return lidar_out | {
"alphanum_fraction": 0.5683386549,
"author": null,
"avg_line_length": 38.6812749004,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "997b5b29c353c02bcd7aadc927f950d1b642b9f6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-09-25T04:25:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-04-17T10:30:57.000Z",
"max_forks_repo_head_hexsha": "45c65bceb4bd0ffc7d59da279b6c053c059533f0",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "adept-thu/RI-Fusion",
"max_forks_repo_path": "mmdet3d/models/detectors/parta2.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "45c65bceb4bd0ffc7d59da279b6c053c059533f0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "adept-thu/RI-Fusion",
"max_issues_repo_path": "mmdet3d/models/detectors/parta2.py",
"max_line_length": 107,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "45c65bceb4bd0ffc7d59da279b6c053c059533f0",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "adept-thu/RI-Fusion",
"max_stars_repo_path": "mmdet3d/models/detectors/parta2.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-16T01:43:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-18T12:18:08.000Z",
"num_tokens": 2511,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9709
} |
From Coqprime Require Import PocklingtonRefl.
Local Open Scope positive_scope.
Lemma primo76:
prime 201471189889->
prime 39488365236169.
Proof.
intro H.
apply (Pocklington_refl
(Ell_certif
39488365236169
196
((201471189889,1)::nil)
0
3584
8
64)
((Proof_certif _ H) :: nil)).
native_cast_no_check (refl_equal true).
Time Qed.
| {
"alphanum_fraction": null,
"author": "mukeshtiwari",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/mukeshtiwari-Formally_Verified_Verifiable_Group_Generator/Formally_Verified_Verifiable_Group_Generator-e80e8d43e81b5201d6ab82a8ebc07a5cef03476b/primality/p7_76.v",
"reason": null,
"repo": "Formally_Verified_Verifiable_Group_Generator",
"save_path": "github-repos/coq/mukeshtiwari-Formally_Verified_Verifiable_Group_Generator",
"sha": "e80e8d43e81b5201d6ab82a8ebc07a5cef03476b",
"size": null
} |
"""
Export function for the JEOL field-emission gun electron probe microanalyser (EPMA)
using "probe for EPMA".
"""
import csv
import numpy as np
import pandas as pd
from pathlib import Path
def write_pos(
df, filepath=Path("./exportedpoints.pos"), encoding="cp1252", z=10.7, **kwargs
):
"""
Export an dataframe of coordinates to a .pos file.
Parameters
------------
df : :class:`pandas.DataFrame`
Dataframe containing points to serialise.
filepath : :class:`str` | :class:`pathlib.Path`
Filepath for export.
encoding : :class:`str`
Encoding for the output file.
z : :class:`int`
Optional specification of default focus value to use.
Returns
--------
:class:`pandas.DataFrame`
"""
# requires FOCUS, X, Y, spotnames
# lets save them so we can directly import them
pos = pd.DataFrame(
index=df.index,
columns=[
"two",
"index",
"label",
"X",
"Y",
"Z",
"type1",
"type2",
"type3",
"type4",
"blank",
],
)
pos["two"] = 2
pos["index"] = df.index + 1
pos["label"] = df["name"].apply(lambda x: '"' + str(x) + '"')
pos["X"] = df["x"]
pos["Y"] = df["y"]
pos["Z"] = z
pos["type1"] = 0
pos["type2"] = 1
pos["type3"] = 0
pos["type4"] = 0
pos["blank"] = '""'
# export to text file with no blank row at end
s = pos.to_csv(None, quoting=csv.QUOTE_NONE, index=False, header=False)
s = s.replace("\r\n", "\n")
s = "0,0,0,0\n0,0,0,0\n0,0,0,0\n" + s
with open(str(filepath), "w") as f: # str for Python 3.5 compatibility
f.write(s[:-1])
| {
"alphanum_fraction": 0.5262857143,
"author": null,
"avg_line_length": 25,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "edc7292ba5088326a1778d02ffedfa73dd05497d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-05-23T04:11:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-15T16:02:54.000Z",
"max_forks_repo_head_hexsha": "131666dcbb72fc982714a19ee8a6d5fd31bb733d",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "morganjwilliams/autopew",
"max_forks_repo_path": "autopew/io/EPMA/JEOL.py",
"max_issues_count": 23,
"max_issues_repo_head_hexsha": "131666dcbb72fc982714a19ee8a6d5fd31bb733d",
"max_issues_repo_issues_event_max_datetime": "2021-11-11T08:01:47.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-10-10T04:27:49.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "morganjwilliams/autopew",
"max_issues_repo_path": "autopew/io/EPMA/JEOL.py",
"max_line_length": 83,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "131666dcbb72fc982714a19ee8a6d5fd31bb733d",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "morganjwilliams/autopew",
"max_stars_repo_path": "autopew/io/EPMA/JEOL.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-16T19:47:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-11T20:59:55.000Z",
"num_tokens": 505,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1750
} |
"""
Script to show how the COVID-19 cases in the US are changing over time.
Plot shows the new daily confirmed cases and is sorted by number of new cases.
To run: `python us_cases.py`
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from statsmodels.nonparametric.smoothers_lowess import lowess
# Style some of the plot parameters
mpl.rcParams['font.sans-serif'] = 'Clear Sans'
mpl.rcParams['lines.solid_capstyle'] = 'round'
def local_fit(y):
"""
LOWESS fit of the data (set to 1 week fraction). Gives better view than rolling avg
"""
x = np.arange(len(y))
f = lowess(y, x, frac=1/7.)
return f[:, 1]
def clean_plot(ax):
"""
Cleans up the axes (removes spines, etc.)
"""
ax.set_xticks([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
# Path to JHU data on Github
path = 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/\
csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
df_confirm = pd.read_csv(path)
# Tidy up the initial dataframe and add column for new cases
df = df_confirm.groupby('Province_State').sum()
df['delta'] = df.iloc[:, -1] - df.iloc[:, -2]
# Sort the new dataframe by new cases
df = df.sort_values(by='delta', ascending=False)
# ignore the cruise lines and some of the outlying territories (e.g., Guam)
exclude = ['American Samoa', 'Northern Mariana Islands', 'Diamond Princess',
'Grand Princess', 'Guam', 'Virgin Islands']
df = df[~df.index.isin(exclude)].drop(columns='delta')
# List of states to include in the plot
states = list(df.index)
width, height = 5, len(states)*.5
fig, ax = plt.subplots(nrows=len(states), ncols=1,
figsize=(width, height),
tight_layout=True)
fig.subplots_adjust(hspace=0)
for i, state in enumerate(states):
confirm = df.loc[state]
# Calculate the daily change in number of cases
daily = (confirm - confirm.shift(1))
daily = daily[1:]
ax[i].fill_between(daily.index, local_fit(daily.values), color='r', label=state)
ax[i].annotate(f'{state} (yesterday: {int(daily.values[-1])})',
(0, .5), xycoords='axes fraction')
ax[i].set_xlim(left=daily.index[0], right=daily.index[-1])
ax[i].set_ylim(bottom=0)
clean_plot(ax[i])
plt.savefig('us_cases.png', dpi=300, transparent=False)
| {
"alphanum_fraction": 0.6571651699,
"author": null,
"avg_line_length": 31.2317073171,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "02c3d182be71663e74df0dcc7babe11b701cc935",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5c33bc047f9ebc22f4f3ae1f659aa7cb486b02db",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "michlkallen/covid_us_cases",
"max_forks_repo_path": "us_cases.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5c33bc047f9ebc22f4f3ae1f659aa7cb486b02db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "michlkallen/covid_us_cases",
"max_issues_repo_path": "us_cases.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5c33bc047f9ebc22f4f3ae1f659aa7cb486b02db",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "michlkallen/covid_us_cases",
"max_stars_repo_path": "us_cases.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 658,
"path": null,
"reason": "import numpy,from statsmodels",
"repo": null,
"save_path": null,
"sha": null,
"size": 2561
} |
import numpy as np
import sympy as sp
import vorpy.linalg
def test_scalar_cross_product_tensor_symbolically ():
scalar_cross_product_tensor = vorpy.linalg.scalar_cross_product_tensor(dtype=sp.Integer)
u = np.array(sp.var('u_0,u_1'))
v = np.array(sp.var('v_0,v_1'))
# The awkward `[()]` notation on the left is to extract the scalar value from the 0-tensor result of np.cross
# when it is given 2d vector arguments. The awkward, nested np.dot expression on the right is to avoid the lack
# of support for dtype=object in np.einsum.
assert np.all(np.cross(u, v)[()] == np.dot(np.dot(scalar_cross_product_tensor, v), u))
print('test_scalar_cross_product_tensor_symbolically passed.')
def test_scalar_cross_product_tensor_float ():
scalar_cross_product_tensor = vorpy.linalg.scalar_cross_product_tensor(dtype=float)
rng = np.random.RandomState(666)
for i in range(100):
u = rng.randn(2)
for j in range(100):
v = rng.randn(2)
assert np.max(np.abs(np.cross(u,v)[()] - np.einsum('jk,j,k', scalar_cross_product_tensor, u, v))) < 1.0e-10
print('test_scalar_cross_product_tensor_float passed.')
def test_cross_product_tensor_symbolically ():
cross_product_tensor = vorpy.linalg.cross_product_tensor(dtype=sp.Integer)
u = np.array(sp.var('u_0,u_1,u_2'))
v = np.array(sp.var('v_0,v_1,v_2'))
# The awkward, nested np.dot expression on the right is to avoid the lack of support for dtype=object in np.einsum.
assert np.all(np.cross(u, v) == np.dot(np.dot(cross_product_tensor, v), u))
print('test_cross_product_tensor_symbolically passed.')
def test_cross_product_tensor_float ():
cross_product_tensor = vorpy.linalg.cross_product_tensor(dtype=float)
rng = np.random.RandomState(666)
for i in range(100):
u = rng.randn(3)
for j in range(100):
v = rng.randn(3)
assert np.max(np.abs(np.cross(u,v) - np.einsum('ijk,j,k', cross_product_tensor, u, v))) < 1.0e-10
print('test_cross_product_tensor_float passed.')
if __name__ == '__main__':
test_scalar_cross_product_tensor_symbolically()
test_scalar_cross_product_tensor_float()
test_cross_product_tensor_symbolically()
test_cross_product_tensor_float()
| {
"alphanum_fraction": 0.7137203166,
"author": null,
"avg_line_length": 47.375,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4e0fe310a991b0a6a04a18e60900eedbee423528",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "68b6525ae43d99f451cf85ce254ffb0311521320",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vdods/vorpy",
"max_forks_repo_path": "tests/test_linalg.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "68b6525ae43d99f451cf85ce254ffb0311521320",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vdods/vorpy",
"max_issues_repo_path": "tests/test_linalg.py",
"max_line_length": 119,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "68b6525ae43d99f451cf85ce254ffb0311521320",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vdods/vorpy",
"max_stars_repo_path": "tests/test_linalg.py",
"max_stars_repo_stars_event_max_datetime": "2020-02-11T17:33:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-08T14:41:46.000Z",
"num_tokens": 578,
"path": null,
"reason": "import numpy,import sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2274
} |
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++, Cuda
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
from collections.abc import Iterable
import numpy as np
def _extend_mode_to_code(mode):
"""Convert an extension mode to the corresponding integer code.
"""
if mode == 'nearest':
return 0
elif mode == 'wrap':
return 1
elif mode == 'reflect':
return 2
elif mode == 'mirror':
return 3
elif mode == 'constant':
return 4
else:
raise RuntimeError('boundary mode not supported')
def _normalize_sequence(inp, rank):
"""If inp is a scalar, create a sequence of length equal to the
rank by duplicating the inp. If inp is a sequence,
check if its length is equal to the length of array.
"""
is_str = isinstance(inp, str)
if not is_str and isinstance(inp, Iterable):
normalized = list(inp)
if len(normalized) != rank:
err = "sequence argument must have length equal to inp rank"
raise RuntimeError(err)
else:
normalized = [inp] * rank
return normalized
def _get_output(output, inp, shape=None, complex_output=False):
if shape is None:
shape = inp.shape
if output is None:
if not complex_output:
output = np.zeros(shape, dtype=input.dtype.name)
else:
complex_type = np.promote_types(input.dtype, np.complex64)
output = np.zeros(shape, dtype=complex_type)
elif isinstance(output, (type, np.dtype)):
# Classes (like `np.float32`) and dtypes are interpreted as dtype
if complex_output and np.dtype(output).kind != 'c':
raise RuntimeError("output must have complex dtype")
output = np.zeros(shape, dtype=output)
elif isinstance(output, str):
output = np.typeDict[output]
if complex_output and np.dtype(output).kind != 'c':
raise RuntimeError("output must have complex dtype")
output = np.zeros(shape, dtype=output)
elif output.shape != shape:
raise RuntimeError("output shape not correct")
elif complex_output and output.dtype.kind != 'c':
raise RuntimeError("output must have complex dtype")
return output
| {
"alphanum_fraction": 0.6109640832,
"author": null,
"avg_line_length": 31.1176470588,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8a5f583c4f7fcd68f942be3d9d01258b292898cc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-01-01T10:37:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-01-01T10:37:55.000Z",
"max_forks_repo_head_hexsha": "e077a81e8d5bb3d38039ff9289a93996b1133411",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lucasace/caer",
"max_forks_repo_path": "caer/ndi/cndsupport.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e077a81e8d5bb3d38039ff9289a93996b1133411",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lucasace/caer",
"max_issues_repo_path": "caer/ndi/cndsupport.py",
"max_line_length": 73,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e077a81e8d5bb3d38039ff9289a93996b1133411",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lucasace/caer",
"max_stars_repo_path": "caer/ndi/cndsupport.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 634,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2645
} |
ITensors.itensor(A::ITensor) = A
# Insert missing diagonal blocks
function insert_diag_blocks!(T::Tensor)
for b in eachdiagblock(T)
blockT = blockview(T, b)
if isnothing(blockT)
# Block was not found in the list, insert it
insertblock!(T, b)
end
end
end
insert_diag_blocks!(T::ITensor) = insert_diag_blocks!(tensor(T))
# Reshape into an order-2 ITensor
matricize(T::ITensor, inds::Index...) = matricize(T, inds)
function matricize(T::ITensor, inds)
left_inds = commoninds(T, inds)
right_inds = uniqueinds(T, inds)
return matricize(T, left_inds, right_inds)
end
function matricize(T::ITensor, left_inds, right_inds)
CL = combiner(left_inds; dir=ITensors.Out, tags="CL")
CR = combiner(right_inds; dir=ITensors.In, tags="CR")
M = (T * CL) * CR
return M, CL, CR
end
function setdims(t::NTuple{N,Pair{QN,Int}}, dims::NTuple{N,Int}) where {N}
return first.(t) .=> dims
end
# XXX: generalize this function
function _getindex(T::DenseTensor{ElT,N}, I1::Colon, I2::UnitRange{Int64}) where {ElT,N}
A = array(T)[I1, I2]
return tensor(Dense(vec(A)), setdims(inds(T), size(A)))
end
function getblock(i::Index, n::Integer)
return ITensors.space(i)[n]
end
# Make `Pair{QN,Int}` act like a regular `dim`
NDTensors.dim(qnv::Pair{QN,Int}) = last(qnv)
Base.:*(qnv::Pair{QN,Int}, d::ITensors.Arrow) = qn(qnv) * d => dim(qnv)
function getblock_preserve_qns(T::Tensor, b::Block)
# TODO: make `T[b]` preserve QNs
Tb = T[b]
indsTb = getblock.(inds(T), Tuple(b)) .* dir.(inds(T))
return ITensors.setinds(Tb, indsTb)
end
function blocksparsetensor(blocks::Dict{B,TB}) where {B,TB}
b1, Tb1 = first(pairs(blocks))
N = length(b1)
indstypes = typeof.(inds(Tb1))
blocktype = eltype(Tb1)
indsT = getindex.(indstypes)
# Determine the indices from the blocks
for (b, Tb) in pairs(blocks)
indsTb = inds(Tb)
for n in 1:N
bn = b[n]
indsTn = indsT[n]
if bn > length(indsTn)
resize!(indsTn, bn)
end
indsTn[bn] = indsTb[n]
end
end
T = BlockSparseTensor(blocktype, indsT)
for (b, Tb) in pairs(blocks)
if !isempty(Tb)
T[b] = Tb
end
end
return T
end
function _nullspace_hermitian(M::Tensor; atol::Real=0.0)
tol = atol
# Insert any missing diagonal blocks
insert_diag_blocks!(M)
#D, U = eigen(Hermitian(M))
Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian=true)
D = tensor(Dᵢₜ)
U = tensor(Uᵢₜ)
nullspace_blocks = Dict()
for bU in nzblocks(U)
bM = Block(bU[1], bU[1])
bD = Block(bU[2], bU[2])
# Assume sorted from largest to smallest
indstart = sum(d -> abs(d) .> tol, storage(D[bD])) + 1
Ub = getblock_preserve_qns(U, bU)
indstop = lastindex(Ub, 2)
Nb = _getindex(Ub, :, indstart:indstop)
nullspace_blocks[bU] = Nb
end
return blocksparsetensor(nullspace_blocks)
end
function LinearAlgebra.nullspace(M::Hermitian{<:Number,<:Tensor}; kwargs...)
return _nullspace_hermitian(parent(M); kwargs...)
end
function LinearAlgebra.nullspace(::Order{2}, M::ITensor, left_inds, right_inds; kwargs...)
@assert order(M) == 2
M² = prime(dag(M), right_inds) * M
M² = permute(M², right_inds'..., right_inds...)
M²ₜ = tensor(M²)
Nₜ = nullspace(Hermitian(M²ₜ); kwargs...)
indsN = (Index(ind(Nₜ, 1); dir=ITensors.In), Index(ind(Nₜ, 2); dir=ITensors.In))
N = dag(itensor(ITensors.setinds(Nₜ, indsN)))
# Make the index match the input index
Ñ = replaceinds(N, (ind(N, 1),) => right_inds)
return Ñ
end
function LinearAlgebra.nullspace(T::ITensor, is...; kwargs...)
M, CL, CR = matricize(T, is...)
@assert order(M) == 2
cL = commoninds(M, CL)
cR = commoninds(M, CR)
N₂ = nullspace(Order(2), M, cL, cR; kwargs...)
return N₂ * CR
end
| {
"alphanum_fraction": 0.6590234691,
"author": null,
"avg_line_length": 28.0833333333,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "b7bbd0b32278f9837911a256495d201dd87b9945",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "LHerviou/ITensorInfiniteMPS.jl",
"max_forks_repo_path": "src/nullspace.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "LHerviou/ITensorInfiniteMPS.jl",
"max_issues_repo_path": "src/nullspace.jl",
"max_line_length": 90,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "LHerviou/ITensorInfiniteMPS.jl",
"max_stars_repo_path": "src/nullspace.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1288,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3707
} |
[STATEMENT]
lemma PO_l2_inv8 [iff]: "reach l2 \<subseteq> l2_inv8"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. reach l2 \<subseteq> l2_inv8
[PROOF STEP]
by (rule_tac J="l2_inv1 \<inter> l2_inv3" in inv_rule_incr) (auto) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Key_Agreement_Strong_Adversaries_pfslvl2",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 106,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
using Documenter, SparseRegression
makedocs(
format = Documenter.HTML(),
sitename = "SparseRegression.jl",
authors = "Josh Day",
clean = true,
pages = [
"index.md",
"usage.md",
"algorithms.md"
]
)
deploydocs(
repo = "github.com/joshday/SparseRegression.jl.git",
)
| {
"alphanum_fraction": 0.59375,
"author": null,
"avg_line_length": 17.7777777778,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "9b8f74cca7243d54d8a8838577fbbb30f17292e0",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2021-12-13T10:40:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-30T20:53:44.000Z",
"max_forks_repo_head_hexsha": "86fd83243061899f4c2830a7a9f060b4f3d94725",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SparseRegression.jl-ca6142a6-a6a7-57f5-b674-4a8484b22e92",
"max_forks_repo_path": "docs/make.jl",
"max_issues_count": 17,
"max_issues_repo_head_hexsha": "86fd83243061899f4c2830a7a9f060b4f3d94725",
"max_issues_repo_issues_event_max_datetime": "2019-01-16T00:38:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-03-05T15:05:16.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SparseRegression.jl-ca6142a6-a6a7-57f5-b674-4a8484b22e92",
"max_issues_repo_path": "docs/make.jl",
"max_line_length": 58,
"max_stars_count": 37,
"max_stars_repo_head_hexsha": "86fd83243061899f4c2830a7a9f060b4f3d94725",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SparseRegression.jl-ca6142a6-a6a7-57f5-b674-4a8484b22e92",
"max_stars_repo_path": "docs/make.jl",
"max_stars_repo_stars_event_max_datetime": "2022-01-21T03:29:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-29T16:28:53.000Z",
"num_tokens": 84,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 320
} |
from data import common
from data.sr import dataclass
import numpy as np
from PIL import Image
_parent_class = dataclass.SRData
class Demo(_parent_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def get_kwargs(cfg, train=False):
kwargs = _parent_class.get_kwargs(cfg, train=False)
kwargs['is_binary'] = False
return kwargs
def get_path(self, degradation, scale):
path_lr = self.dpath
path_dict = {'lr': path_lr, 'hr': path_lr}
return path_dict
def get_patch(self, **kwargs):
pil = Image.fromarray(kwargs['hr'])
w, h = pil.size
w = int(self.scale[0] * w)
h = int(self.scale[0] * h)
pil = pil.resize((w, h), resample=Image.BICUBIC)
arr = np.array(pil)
img_dict = {'lr': kwargs['lr'], 'hr': arr}
return img_dict
| {
"alphanum_fraction": 0.6064301552,
"author": null,
"avg_line_length": 25.7714285714,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ff6e1332653f4847d17165988282c8c135420897",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2022-02-18T08:25:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-16T14:54:33.000Z",
"max_forks_repo_head_hexsha": "d7cc08db5ba5ec9103f1813f76d1da825afe1a5b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sanghyun-son/srwarp",
"max_forks_repo_path": "src/data/sr/demo.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "d7cc08db5ba5ec9103f1813f76d1da825afe1a5b",
"max_issues_repo_issues_event_max_datetime": "2022-02-21T10:10:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-04T15:40:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sanghyun-son/srwarp",
"max_issues_repo_path": "src/data/sr/demo.py",
"max_line_length": 59,
"max_stars_count": 82,
"max_stars_repo_head_hexsha": "d7cc08db5ba5ec9103f1813f76d1da825afe1a5b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sanghyun-son/srwarp",
"max_stars_repo_path": "src/data/sr/demo.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T03:06:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-22T09:22:46.000Z",
"num_tokens": 238,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 902
} |
import streamlit as st
import pandas as pd
import numpy as np
import json
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import RendererAgg
import logging
import sys
logging.basicConfig(level=logging.DEBUG)
_lock = RendererAgg.lock
sys.path.append('../src/')
sys.path.append('..')
sys.path.append('.')
import src.data_retrieval as dr
# st.beta_set_page_config(
# page_title="Analysis of Glioblastoma papers from PubMed",
# layout="wide",
# initial_sidebar_state="expanded")
st.title('Analysis of Glioblastoma papers from PubMed')
get_data = dr.GetPubmedData(create_db=False)
logging.info("Make DB connection")
cnx = get_data.db_con
st.subheader("Newest papers")
n_articles = st.number_input(label="Newest N papers",
value=10,
min_value=1,
max_value=50)
if st.button('Refresh latest data (WARNING: this can be slow)'):
st.text('Fetching latest day of data, please wait a moment...')
get_data.get_recent_data()
st.write("Done")
st.text('Creating summary table')
sub_df = pd.read_sql_query(f"""SELECT * FROM abstracts
ORDER BY retrieval_date DESC
LIMIT {n_articles}""", cnx)
def title_with_link(x):
link = "https://doi.org/"+x[1]
html_link = f'<a target="_blank" href="{link}">{x[0]}</a>'
return html_link
@st.cache(allow_output_mutation=True, max_entries=5, ttl=3000)
def get_abstract_table(sub_df, n_articles):
logging.info("Get top entities")
sub_df['top_entities'] = [pd.Series(json.loads(x)).value_counts()[:10].
index.tolist() for x in sub_df['entities']]
logging.info("Iterate over year")
sub_df["year"] = sub_df.date.apply(lambda x: int(x[:4]))
logging.info("Return subbed df")
sub_df["title_link"] = sub_df[["title", "doi"]].apply(title_with_link,
axis=1)
sub_df = sub_df[["date",
"title_link",
"full_journal_name",
"last_author",
"top_entities",
"genes",
"retrieval_date"]].tail(n_articles)[::-1]
return sub_df
sub_df_new = get_abstract_table(sub_df, n_articles)
df = sub_df_new.to_html(escape=False)
st.write(df, unsafe_allow_html=True)
#st.table(sub_df_new)
logging.info("Run query over full table")
df = pd.read_sql_query("SELECT * FROM abstracts", cnx)
logging.info("Load entities from json")
df.entities = df.entities.apply(lambda x: json.loads(x))
logging.info("Apply substrining to years")
df["year"] = df.date.apply(lambda x: int(x[:4]))
logging.info("Sort values by date")
df.sort_values(by="retrieval_date",
ascending=True,
inplace=True)
year_since = st.slider("Display data since",
min_value=int(df.year.min()),
max_value=int(df.year.max()),
value=2020,
step=1)
logging.info("Get year index")
year_index = df.year >= year_since
logging.info("Subset df by year")
sub_year1 = df.genes[year_index]
st.write(f"Computed on {len(sub_year1)} abstracts")
logging.info("Get gene value counts")
value_counts = get_data.preprocessor.get_gene_value_counts(sub_year1)
# logging(f"df: {sub_year1}")
# logging(f"Value counts: {value_counts}")
TOP = 20
st.subheader(f'Plot top {TOP} Gene counts accross all abstracts')
logging.info("Plot word freqs")
with _lock:
fig, ax = plt.subplots()
ax.bar(value_counts.index[:TOP],
value_counts.values[:TOP])
plt.xticks(rotation=45)
plt.tick_params(axis='x', which='major', labelsize=5)
plt.tight_layout()
st.pyplot(fig)
st.subheader('Plot heatmap of TOP N co-occurences of terms in abstracts')
# year_since2 = st.slider("Display data since year",
# min_value=int(df.year.min()),
# max_value=int(df.year.max()),
# value=2020,
# step=1)
max_features = st.number_input(label="Number of top co-occurences",
value=20,
min_value=2,
max_value=50)
max_sample_size = st.number_input(label="Number of abstracts to sample",
value=500,
min_value=2,
max_value=10000)
# year_index2 = df.year >= year_since2
#
# sub_year2 = df.genes[year_index2]
@st.cache(allow_output_mutation=True, max_entries=5, ttl=3000)
def plot_heatmap(year_df, max_features, sample_size=500):
logging.info("Sample dataframe")
sample_df = year_df.sample(np.min([sample_size, len(year_df)]))
logging.info("Run heatmap function")
fig_map = get_data.preprocessor.plot_entity_heatmap(sample_df,
font_scale=.9,
max_entities=max_features)
return fig_map, len(sample_df)
with _lock:
heatmp_plot, n_samples = plot_heatmap(sub_year1,
max_features,
sample_size=max_sample_size)
st.write(f"Computed on {n_samples} abstracts")
logging.info("Set axis")
plt.setp(heatmp_plot.ax_heatmap.get_xticklabels(), rotation=45)
logging.info("Plot heatmap")
st.pyplot(heatmp_plot)
# logging.info("Close DB connection")
# cnx.close()
| {
"alphanum_fraction": 0.6073873874,
"author": null,
"avg_line_length": 32.4561403509,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d50f007b29126007a51b185be002176f7052f133",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d5e474d477d8c624917ccfa4f73edea17cf18ae0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "omacshane/glioblastoma-research-dashboard",
"max_forks_repo_path": "src/streamlit_app.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "d5e474d477d8c624917ccfa4f73edea17cf18ae0",
"max_issues_repo_issues_event_max_datetime": "2021-06-17T16:02:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-17T16:01:22.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "omacshane/glioblastoma-research-dashboard",
"max_issues_repo_path": "src/streamlit_app.py",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d5e474d477d8c624917ccfa4f73edea17cf18ae0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "omacshane/glioblastoma-research-dashboard",
"max_stars_repo_path": "src/streamlit_app.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1242,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5550
} |
"""
Test cases for the regi0.geographic.outliers.find_value_outliers function.
"""
import numpy as np
import pandas as pd
from regi0.geographic.outliers import find_value_outliers
def test_iqr(records):
result = find_value_outliers(
records, "scientificName", "minimumElevationInMeters", method="iqr"
)
expected = pd.Series(
[
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
True,
np.nan,
False,
np.nan,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected)
def test_std(records):
result = find_value_outliers(
records, "scientificName", "minimumElevationInMeters", method="std"
)
expected = pd.Series(
[
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
True,
np.nan,
False,
np.nan,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected)
def test_zscore(records):
result = find_value_outliers(
records, "scientificName", "minimumElevationInMeters", method="zscore"
)
expected = pd.Series(
[
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
True,
np.nan,
False,
np.nan,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected)
def test_std_higher_threshold(records):
result = find_value_outliers(
records,
"scientificName",
"minimumElevationInMeters",
method="std",
threshold=3.0,
)
expected = pd.Series(
[
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
np.nan,
False,
np.nan,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected)
def test_zscore_higher_threshold(records):
result = find_value_outliers(
records,
"scientificName",
"minimumElevationInMeters",
method="zscore",
threshold=3.0,
)
expected = pd.Series(
[
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
np.nan,
False,
np.nan,
np.nan,
]
)
pd.testing.assert_series_equal(result, expected)
| {
"alphanum_fraction": 0.42,
"author": null,
"avg_line_length": 20.1657458564,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "760c1b671279934e12c0ec04df37f9e6485603ce",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0d64587d5d87f57cddfc7a67bb8baf74cd70adf2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PEM-Humboldt/regi0",
"max_forks_repo_path": "tests/geographic/outliers/test_find_value_outliers.py",
"max_issues_count": 15,
"max_issues_repo_head_hexsha": "0d64587d5d87f57cddfc7a67bb8baf74cd70adf2",
"max_issues_repo_issues_event_max_datetime": "2022-03-09T23:23:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-02-03T11:38:37.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PEM-Humboldt/regi0",
"max_issues_repo_path": "tests/geographic/outliers/test_find_value_outliers.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0d64587d5d87f57cddfc7a67bb8baf74cd70adf2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PEM-Humboldt/regi0",
"max_stars_repo_path": "tests/geographic/outliers/test_find_value_outliers.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 700,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3650
} |
import math
import os
import textwrap
from functools import partial
from multiprocessing import Pool, Manager
from os.path import join
import matplotlib
matplotlib.use('Agg')
import numpy as np
import seaborn as sns
sns.set()
from tqdm import tqdm
import argparse
from config import MovieQAPath
from data.data_loader import Subtitle
import matplotlib.pyplot as plt
import utils.data_utils as du
import utils.func_utils as fu
_mp = MovieQAPath()
def l2_norm(x, axis=1, eps=1e-6):
return x / np.maximum(np.linalg.norm(x, axis=axis, keepdims=True), eps)
def do_attn(index, subtitle, counter, qa):
qa_embed = np.load(join(_mp.encode_dir, qa['qid'] + '.npy'))
ques_embed, ans_embed = l2_norm(qa_embed[[0]]), l2_norm(qa_embed[1:])
subt_embed = np.load(join(_mp.encode_dir, qa['imdb_key'] + '.npy'))
spec = np.load(join(_mp.encode_dir, qa['qid'] + '_spec' + '.npy'))
subt_embed = l2_norm(subt_embed[spec == 1])
sq = np.matmul(subt_embed, ques_embed.transpose())
sa = np.matmul(subt_embed, ans_embed.transpose())
sqa = np.expand_dims(sq + sa, axis=0)
sqa = np.transpose(sqa, [2, 1, 0])
sqas = np.expand_dims(subt_embed, axis=0) * sqa
sqas = np.sum(sqas, axis=1)
sqas = l2_norm(sqas)
output = np.sum(sqas * ans_embed, axis=1)
choice = np.argmax(output)
counter.value += int(int(choice) == qa['correct_index'])
if args.img:
iid = [idx for i, idx in enumerate(index[qa['imdb_key']]) if spec[i] == 1]
sentences = [textwrap.fill(subtitle[qa['imdb_key']]['lines'][idx], 40) for idx in iid]
attn = np.concatenate([
np.matmul(subt_embed, ques_embed.transpose()),
np.matmul(subt_embed, ans_embed.transpose())
], axis=1)
qa_cor = np.matmul(ques_embed, ans_embed.transpose()).squeeze()
h = attn.shape[0]
num = int(math.ceil(h / 30))
for i in range(num):
s, e = i * 30, min((i + 1) * 30, h)
fig = plt.figure(figsize=(12, int(math.ceil((e - s) / 2))))
ax = sns.heatmap(attn[s:e])
ax.set_xticklabels([textwrap.fill(qa['question'], 40)] +
[textwrap.fill(ans + ' %.4f' % qa_cor[idx], 40) for idx, ans in
enumerate(qa['answers'])], rotation=45, ha='right')
temp_sentences = sentences[s:e]
temp_sentences.reverse()
ax.set_yticklabels(temp_sentences, rotation=0)
ax.set_title('%d %d' % (qa['correct_index'], int(choice)))
fig.savefig(os.path.join(stat_dir, '%s_%d.jpg') % (qa['qid'].replace(':', ''), i), bbox_inches='tight')
plt.close()
def main():
video_qa = [qa for qa in du.json_load(_mp.qa_file) if qa['video_clips']]
train = [qa for qa in video_qa if 'train' in qa['qid']]
val = [qa for qa in video_qa if 'val' in qa['qid']]
test = [qa for qa in video_qa if 'tests' in qa['qid']]
with Pool(4) as pool, Manager() as manager:
index = manager.dict(du.json_load(_mp.sample_index_file))
subtitle = manager.dict(Subtitle().get())
counter = manager.Value(int, 0)
func = partial(do_attn, index, subtitle, counter)
for _ in pool.imap_unordered(func, tqdm(train)):
pass
print('train acc: %.4f' % (counter.value / len(train)))
counter = manager.Value(int, 0)
func = partial(do_attn, index, subtitle, counter)
for _ in pool.imap_unordered(func, tqdm(val)):
pass
print('val acc: %.4f' % (counter.value / len(val)))
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--img', action='store_true', help='Create attention images.')
return parser.parse_args()
if __name__ == '__main__':
args = args_parse()
if args.img:
stat_dir = os.path.join('stat')
if os.path.exists(stat_dir):
os.system('rm -rf %s' % stat_dir)
fu.make_dirs(stat_dir)
main()
| {
"alphanum_fraction": 0.6110133266,
"author": null,
"avg_line_length": 34.2844827586,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c8fc30a47fc53e09a0e20ce970b00303cfd85092",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4281bf4a731aa14a0d19f18adda31d59a4a297cb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Tommy-Liu/MovieQA_Contest",
"max_forks_repo_path": "statistics.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4281bf4a731aa14a0d19f18adda31d59a4a297cb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Tommy-Liu/MovieQA_Contest",
"max_issues_repo_path": "statistics.py",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4281bf4a731aa14a0d19f18adda31d59a4a297cb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Tommy-Liu/MovieQA_Contest",
"max_stars_repo_path": "statistics.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1071,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3977
} |
/**
* Copyright (c) 2018, University Osnabrück
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University Osnabrück nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL University Osnabrück BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* LVRPickingInteractor.hpp
*
* @date Feb 19, 2014
* @author Thomas Wiemann
*/
#ifndef LVRPICKINGINTERACTOR_HPP_
#define LVRPICKINGINTERACTOR_HPP_
#include <QObject>
#include <QMessageBox>
#include <vtkIdTypeArray.h>
#include <vtkTextActor.h>
#include <QTreeWidgetItem>
#include <vtkInteractorStyleTrackballCamera.h>
#include <vtkInteractorStyleRubberBandPick.h>
#include <vtkSmartPointer.h>
#include <vtkRenderer.h>
#include <vtkMath.h>
#include <vtkDataSetMapper.h>
#include <boost/shared_array.hpp>
#include <memory>
#include "LVRInteractorStylePolygonPick.hpp"
#include <map>
namespace lvr2
{
class LVRPickingInteractor : public QObject, public LVRInteractorStylePolygonPick
{
Q_OBJECT
public:
static LVRPickingInteractor* New();
LVRPickingInteractor();
vtkTypeMacro(LVRPickingInteractor, LVRInteractorStylePolygonPick);
void setRenderer(vtkSmartPointer<vtkRenderer> renderer);
void removeLabel(const int&);
//LVRPickingInteractor(vtkSmartPointer<vtkRenderer> renderer);
virtual ~LVRPickingInteractor();
/**
* @brief Overloaded mouse event handling.
*/
virtual void OnLeftButtonDown();
virtual void OnLeftButtonUp();
virtual void OnMouseMove();
virtual void OnMiddleButtonUp();
virtual void OnMiddleButtonDown();
virtual void OnRightButtonUp();
virtual void OnRightButtonDown();
virtual void OnMouseWheelBackward();
virtual void OnMouseWheelForward();
virtual void OnChar();
virtual void OnTimer();
/**
* @brief Overloaded keyboard press event handling
*/
virtual void OnKeyPress();
/**
* @brief Overloaded keyboard release event handling
*/
virtual void OnKeyRelease();
virtual void OnKeyDown();
virtual void Dolly();
virtual void Dolly(double factor);
virtual void Pan();
virtual void Spin();
virtual void Zoom();
virtual void Rotate();
/**
* @brief returns the text-actor, needed to readd-it after clearing the render window
*/
vtkSmartPointer<vtkTextActor> getTextActor(){ return m_textActor; }
void updateFocalPoint();
void setPoints(vtkSmartPointer<vtkPolyData> points);
vtkSmartPointer<vtkPolyData> getPoints();
std::vector<uint16_t>& getLabeles();
public Q_SLOTS:
void correspondenceSearchOn();
void correspondenceSearchOff();
void labelingOn();
void labelingOff();
void setLabeledPointVisibility(int, bool);
void setLabel(int, std::vector<int>);
void requestLabels();
void labelModeChanged(bool);
void newLabel(QTreeWidgetItem*);
void setLassoTool(bool);
void labelSelected(uint16_t);
void setMotionFactor(double factor);
void setRotationFactor(double factor);
void setFocalPointRendering(int state);
void setStereoMode(int state);
void pickFocalPoint();
void modeTerrain();
void modeTrackball();
void modeShooter();
void resetCamera();
Q_SIGNALS:
void clusterSelected(double*);
void firstPointPicked(double*);
void secondPointPicked(double*);
void pointSelected(vtkActor*, int);
void pointsLabeled(uint16_t, int);
void responseLabels(std::vector<uint16_t>);
void labelingStarted(bool);
private:
enum InteractorMode {TRACKBALL, SHOOTER, TERRAIN};
enum ShooterMode {LOOK, HOVER};
enum PickMode {None, PickPoint, PickFirst, PickSecond, PickFocal, PickLabel};
void handlePicking();
// ------------------------- TRACKBALL
void dollyTrackball();
void dollyTrackball(double factor);
void panTrackball();
void spinTrackball();
void zoomTrackball();
void rotateTrackball();
void onLeftButtonDownTrackball();
void onLeftButtonUpTrackball();
void onMouseMoveTrackball();
void onMiddleButtonUpTrackball();
void onMiddleButtonDownTrackball();
void onRightButtonUpTrackball();
void onRightButtonDownTrackball();
void onMouseWheelBackwardTrackball();
void onMouseWheelForwardTrackball();
// ------------------------ TERRAIN
void dollyTerrain();
void dollyTerrain(double factor);
void panTerrain();
void spinTerrain();
void zoomTerrain();
void rotateTerrain();
void onLeftButtonDownTerrain();
void onLeftButtonUpTerrain();
void onMouseMoveTerrain();
void onMiddleButtonUpTerrain();
void onMiddleButtonDownTerrain();
void onRightButtonUpTerrain();
void onRightButtonDownTerrain();
void onMouseWheelBackwardTerrain();
void onMouseWheelForwardTerrain();
// ------------------------ SHOOTER
void dollyShooter();
void dollyShooter(double factor);
void panShooter();
void spinShooter();
void zoomShooter();
void rotateShooter();
void hoverShooter();
void resetViewUpShooter();
void strafeShooter(double factor);
void onLeftButtonDownShooter();
void onLeftButtonUpShooter();
void onMouseMoveShooter();
void onMiddleButtonUpShooter();
void onMiddleButtonDownShooter();
void onRightButtonUpShooter();
void onRightButtonDownShooter();
void onMouseWheelBackwardShooter();
void onMouseWheelForwardShooter();
//Labeling
bool isInside(std::vector<vtkVector2i>* polygon, int& pX, int& pY);
void calculateSelection(bool select);
void saveCurrentLabelSelection();
void discardChanges();
void updateActor(int);
/// Indicates picking mode
PickMode m_pickMode;
/// Text actor to display info if in picking mode
vtkSmartPointer<vtkTextActor> m_textActor;
vtkSmartPointer<vtkActor> m_sphereActor;
vtkSmartPointer<vtkActor> m_cubeActor;
vtkSmartPointer<vtkActor> m_polyActor;
std::vector<bool> m_selectedPoints;
std::map<uint16_t, vtkSmartPointer<vtkActor>> m_labelActors;
vtkSmartPointer<vtkActor> m_selectedActor;
std::vector<uint16_t> m_pointLabels;
vtkSmartPointer<vtkPolyData> m_points;
vtkSmartPointer<vtkDataSetMapper> m_selectedMapper;
vtkSmartPointer<vtkIdTypeArray> m_selectedIds;
vtkSmartPointer<vtkRenderer> m_renderer;
bool m_correspondenceMode;
bool m_labelingMode;
bool m_modified;
unsigned int m_numberOfClicks;
int m_previousPosition[2];
int m_startCameraMovePosition[2];
int m_selectedLabel;
double m_viewUp[3];
float m_motionFactor;
float m_rotationFactor;
InteractorMode m_interactorMode;
ShooterMode m_shooterMode;
std::map<uint16_t, QColor> m_labelColors;
};
} /* namespace lvr2 */
#endif /* LVRPICKINGINTERACTOR_HPP_ */
| {
"alphanum_fraction": 0.6934453384,
"author": null,
"avg_line_length": 30.7345454545,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "3df103d5ec723f7d269e5b21f5c1c945a7b31979",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 13,
"max_forks_repo_forks_event_max_datetime": "2020-11-26T07:47:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-16T11:50:32.000Z",
"max_forks_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "uos/lvr",
"max_forks_repo_path": "src/tools/lvr2_viewer/vtkBridge/LVRPickingInteractor.hpp",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5",
"max_issues_repo_issues_event_max_datetime": "2021-09-17T08:31:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-19T16:19:51.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "uos/lvr",
"max_issues_repo_path": "src/tools/lvr2_viewer/vtkBridge/LVRPickingInteractor.hpp",
"max_line_length": 91,
"max_stars_count": 38,
"max_stars_repo_head_hexsha": "9bb03a30441b027c39db967318877e03725112d5",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "uos/lvr",
"max_stars_repo_path": "src/tools/lvr2_viewer/vtkBridge/LVRPickingInteractor.hpp",
"max_stars_repo_stars_event_max_datetime": "2022-02-16T03:08:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-19T15:10:35.000Z",
"num_tokens": 1884,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8452
} |
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import imgfileutils as imf
from aicsimageio import AICSImage, imread
import progressbar
import shutil
from apeer_ometiff_library import io, omexmlClass
import tifffile
import itertools as it
"""
def update5dstack(image5d, image2d,
dimstring5d='TCZYX',
t=0,
z=0,
c=0):
# remove XY
dimstring5d = dimstring5d.replace('X', '').replace('Y', '')
if dimstring5d == 'TZC':
image5d[t, z, c, :, :] = image2d
if dimstring5d == 'TCZ':
image5d[t, c, z, :, :] = image2d
if dimstring5d == 'ZTC':
image5d[z, t, c, :, :] = image2d
if dimstring5d == 'ZCT':
image5d[z, c, t, :, :] = image2d
if dimstring5d == 'CTZ':
image5d[c, t, z, :, :] = image2d
if dimstring5d == 'CZT':
image5d[c, z, t, :, :] = image2d
return image5d
"""
###################################################################
#filename = r"testdata/WP96_4Pos_B4-10_DAPI.czi"
#filename = r'testdata\WP96_2Pos_B2+B4_S=2_T=2_Z=4_C=3_X=512_Y=256.czi'
#filename = r"C:\Users\m1srh\OneDrive - Carl Zeiss AG\Testdata_Zeiss\CZI_Testfiles\W96_B2+B4_S=2_T=2=Z=4_C=3_Tile=4x8.czi"
filename = r'/datadisk1/tuxedo/testpictures/Testdata_Zeiss/CZI_Testfiles/W96_B2+B4_S=2_T=2=Z=4_C=3_Tile=4x8.czi'
savename = filename.split('.')[0] + '.ome.tiff'
# get the metadata
md, additional_mdczi = imf.get_metadata(filename)
# get AICSImageIO object using the python wrapper for libCZI
img = AICSImage(filename)
dims_dict, dimindex_list, numvalid_dims = imf.get_dimorder(md['Axes_aics'])
shape5d = list(md['Shape_aics'])
shape5d.pop(dims_dict['S'])
# create image5d for the current scene
image5d = np.zeros(shape5d, dtype=md['NumPy.dtype'])
# remove the S dimension from the dimstring
dimstring5d = md['Axes_aics'].replace('S', '')
# open the TiffWriter in order to save as Multi-Series OME-TIFF
with tifffile.TiffWriter(savename, append=False) as tif:
for s in progressbar.progressbar(range(md['SizeS']), redirect_stdout=True):
for t in range(md['SizeT']):
for z in range(md['SizeZ']):
for c in range(md['SizeC']):
image2d = img.get_image_data("YX", S=s, T=t, Z=z, C=c)
print('Image2D Shape : ', image2d.shape)
# do some processing with the image2d
# ....
# update the 5d stack
image5d = imf.update5dstack(image5d, image2d,
dimstring5d=dimstring5d,
t=t,
z=z,
c=c)
# write scene as OME-TIFF series
tif.save(image5d,
photometric='minisblack',
metadata={'axes': dimstring5d,
'PhysicalSizeX': np.round(md['XScale'], 3),
'PhysicalSizeXUnit': md['XScaleUnit'],
'PhysicalSizeY': np.round(md['YScale'], 3),
'PhysicalSizeYUnit': md['YScaleUnit'],
'PhysicalSizeZ': np.round(md['ZScale'], 3),
'PhysicalSizeZUnit': md['ZScaleUnit'] # ,
# 'Channel': {'Name': ['DAPI']}
}
)
# close the AICSImage object at the end
img.close()
print('Done')
| {
"alphanum_fraction": 0.5362195812,
"author": null,
"avg_line_length": 34.6470588235,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c5309214badfbfb1fea8ee6df9e7a6cd0920b88e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b3f7801f46de0138a8a1ac245e9c80787e0a3f17",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sebi06/czi_demos",
"max_forks_repo_path": "test_write_OME-TIFF_series.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b3f7801f46de0138a8a1ac245e9c80787e0a3f17",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sebi06/czi_demos",
"max_issues_repo_path": "test_write_OME-TIFF_series.py",
"max_line_length": 122,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "b3f7801f46de0138a8a1ac245e9c80787e0a3f17",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sebi06/czi_demos",
"max_stars_repo_path": "test_write_OME-TIFF_series.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-09T13:44:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-31T10:06:42.000Z",
"num_tokens": 988,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3534
} |
[STATEMENT]
lemma doctor_optimal_match_unique:
assumes "doctor_optimal_match ds X"
assumes "doctor_optimal_match ds Y"
shows "X = Y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X = Y
[PROOF STEP]
proof(rule iffD2[OF set_eq_iff, rule_format])
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (x \<in> X) = (x \<in> Y)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (x \<in> X) = (x \<in> Y)
[PROOF STEP]
from Pd_linear'[where d="Xd x"] Pd_Xd[where d="Xd x"]
stable_on_allocation[OF doctor_optimal_match_stable_on[OF assms(1)]]
stable_on_allocation[OF doctor_optimal_match_stable_on[OF assms(2)]]
assms
[PROOF STATE]
proof (chain)
picking this:
Linear_order (Pd (Xd x))
(?x, ?y) \<in> Pd (Xd x) \<Longrightarrow> Xd ?x = Xd x \<and> Xd ?y = Xd x
inj_on Xd X
inj_on Xd Y
doctor_optimal_match ds X
doctor_optimal_match ds Y
[PROOF STEP]
show "x \<in> X \<longleftrightarrow> x \<in> Y"
[PROOF STATE]
proof (prove)
using this:
Linear_order (Pd (Xd x))
(?x, ?y) \<in> Pd (Xd x) \<Longrightarrow> Xd ?x = Xd x \<and> Xd ?y = Xd x
inj_on Xd X
inj_on Xd Y
doctor_optimal_match ds X
doctor_optimal_match ds Y
goal (1 subgoal):
1. (x \<in> X) = (x \<in> Y)
[PROOF STEP]
unfolding doctor_optimal_match_def order_on_defs
[PROOF STATE]
proof (prove)
using this:
((Refl (Pd (Xd x)) \<and> trans (Pd (Xd x))) \<and> antisym (Pd (Xd x))) \<and> Total (Pd (Xd x))
(?x, ?y) \<in> Pd (Xd x) \<Longrightarrow> Xd ?x = Xd x \<and> Xd ?y = Xd x
inj_on Xd X
inj_on Xd Y
stable_on ds X \<and> (\<forall>Xa. \<forall>x\<in>Xa. stable_on ds Xa \<longrightarrow> (\<exists>y\<in>X. (x, y) \<in> Pd (Xd x)))
stable_on ds Y \<and> (\<forall>X. \<forall>x\<in>X. stable_on ds X \<longrightarrow> (\<exists>y\<in>Y. (x, y) \<in> Pd (Xd x)))
goal (1 subgoal):
1. (x \<in> X) = (x \<in> Y)
[PROOF STEP]
by - (rule iffI; metis antisymD inj_on_eq_iff)
[PROOF STATE]
proof (state)
this:
(x \<in> X) = (x \<in> Y)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Stable_Matching_Contracts",
"hexsha": null,
"include": null,
"lang": null,
"length": 7,
"llama_tokens": 932,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
/-
Copyright (c) 2020 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author(s): Simon Hudon
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.testing.slim_check.sampleable
import Mathlib.PostPort
universes l v u_1 u u_2
namespace Mathlib
/-!
# `testable` Class
Testable propositions have a procedure that can generate counter-examples
together with a proof that they invalidate the proposition.
This is a port of the Haskell QuickCheck library.
## Creating Customized Instances
The type classes `testable` and `sampleable` are the means by which
`slim_check` creates samples and tests them. For instance, the proposition
`∀ i j : ℕ, i ≤ j` has a `testable` instance because `ℕ` is sampleable
and `i ≤ j` is decidable. Once `slim_check` finds the `testable`
instance, it can start using the instance to repeatedly creating samples
and checking whether they satisfy the property. This allows the
user to create new instances and apply `slim_check` to new situations.
### Polymorphism
The property `testable.check (∀ (α : Type) (xs ys : list α), xs ++ ys
= ys ++ xs)` shows us that type-polymorphic properties can be
tested. `α` is instantiated with `ℤ` first and then tested as normal
monomorphic properties.
The monomorphisation limits the applicability of `slim_check` to
polymorphic properties that can be stated about integers. The
limitation may be lifted in the future but, for now, if
one wishes to use a different type than `ℤ`, one has to refer to
the desired type.
### What do I do if I'm testing a property about my newly defined type?
Let us consider a type made for a new formalization:
```lean
structure my_type :=
(x y : ℕ)
(h : x ≤ y)
```
How do we test a property about `my_type`? For instance, let us consider
`testable.check $ ∀ a b : my_type, a.y ≤ b.x → a.x ≤ b.y`. Writing this
property as is will give us an error because we do not have an instance
of `sampleable my_type`. We can define one as follows:
```lean
instance : sampleable my_type :=
{ sample := do
x ← sample ℕ,
xy_diff ← sample ℕ,
return { x := x, y := x + xy_diff, h := /- some proof -/ } }
```
We can see that the instance is very simple because our type is built
up from other type that have `sampleable` instances. `sampleable` also
has a `shrink` method but it is optional. We may want to implement one
for ease of testing as:
```lean
/- ... -/
/- no specialized sampling -/
-- discard
-- x := 1
-- discard
-- x := 41
-- discard
-- x := 3
-- discard
-- x := 5
-- discard
-- x := 5
-- discard
-- x := 197
-- discard
-- x := 469
-- discard
-- x := 9
-- discard
-- ===================
-- Found problems!
-- x := 552
-- -------------------
/- let us define a specialized sampling instance -/
-- ===================
-- Found problems!
-- x := 358
-- -------------------
namespace slim_check
/-- Result of trying to disprove `p`
The constructors are:
* `success : (psum unit p) → test_result`
succeed when we find another example satisfying `p`
In `success h`, `h` is an optional proof of the proposition.
Without the proof, all we know is that we found one example
where `p` holds. With a proof, the one test was sufficient to
prove that `p` holds and we do not need to keep finding examples.
* `gave_up {} : ℕ → test_result`
give up when a well-formed example cannot be generated.
`gave_up n` tells us that `n` invalid examples were tried.
Above 100, we give up on the proposition and report that we
did not find a way to properly test it.
* `failure : ¬ p → (list string) → ℕ → test_result`
a counter-example to `p`; the strings specify values for the relevant variables.
`failure h vs n` also carries a proof that `p` does not hold. This way, we can
guarantee that there will be no false positive. The last component, `n`,
is the number of times that the counter-example was shrunk.
-/
inductive test_result (p : Prop)
where
| success : psum Unit p → test_result p
| gave_up : ℕ → test_result p
| failure : ¬p → List string → ℕ → test_result p
/-- format a `test_result` as a string. -/
protected def test_result.to_string {p : Prop} : test_result p → string :=
sorry
/-- configuration for testing a property -/
structure slim_check_cfg
where
num_inst : ℕ
max_size : ℕ
trace_discarded : Bool
trace_success : Bool
trace_shrink : Bool
trace_shrink_candidates : Bool
random_seed : Option ℕ
quiet : Bool
protected instance test_result.has_to_string {p : Prop} : has_to_string (test_result p) :=
has_to_string.mk test_result.to_string
/--
`printable_prop p` allows one to print a proposition so that
`slim_check` can indicate how values relate to each other.
-/
class printable_prop (p : Prop)
where
print_prop : Option string
protected instance default_printable_prop {p : Prop} : printable_prop p :=
printable_prop.mk none
/-- `testable p` uses random examples to try to disprove `p`. -/
class testable (p : Prop)
where
run : slim_check_cfg → Bool → gen (test_result p)
/-- applicative combinator proof carrying test results -/
def combine {p : Prop} {q : Prop} : psum Unit (p → q) → psum Unit p → psum Unit q :=
sorry
/-- Combine the test result for properties `p` and `q` to create a test for their conjunction. -/
def and_counter_example {p : Prop} {q : Prop} : test_result p → test_result q → test_result (p ∧ q) :=
sorry
/-- Combine the test result for properties `p` and `q` to create a test for their disjunction -/
def or_counter_example {p : Prop} {q : Prop} : test_result p → test_result q → test_result (p ∨ q) :=
sorry
/-- If `q → p`, then `¬ p → ¬ q` which means that testing `p` can allow us
to find counter-examples to `q`. -/
def convert_counter_example {p : Prop} {q : Prop} (h : q → p) : test_result p → optParam (psum Unit (p → q)) (psum.inl Unit.unit) → test_result q :=
sorry
/-- Test `q` by testing `p` and proving the equivalence between the two. -/
def convert_counter_example' {p : Prop} {q : Prop} (h : p ↔ q) (r : test_result p) : test_result q :=
convert_counter_example (iff.mpr h) r (psum.inr (iff.mp h))
/-- When we assign a value to a universally quantified variable,
we record that value using this function so that our counter-examples
can be informative. -/
def add_to_counter_example (x : string) {p : Prop} {q : Prop} (h : q → p) : test_result p → optParam (psum Unit (p → q)) (psum.inl Unit.unit) → test_result q :=
sorry
/-- Add some formatting to the information recorded by `add_to_counter_example`. -/
def add_var_to_counter_example {γ : Type v} [has_repr γ] (var : string) (x : γ) {p : Prop} {q : Prop} (h : q → p) : test_result p → optParam (psum Unit (p → q)) (psum.inl Unit.unit) → test_result q :=
add_to_counter_example
(var ++
string.str
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat (bit0 (bit1 (bit0 (bit1 (bit1 1)))))))
(char.of_nat (bit1 (bit0 (bit1 (bit1 (bit1 1)))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
repr x)
h
/-- Gadget used to introspect the name of bound variables.
It is used with the `testable` typeclass so that
`testable (named_binder "x" (∀ x, p x))` can use the variable name
of `x` in error messages displayed to the user. If we find that instantiating
the above quantifier with 3 falsifies it, we can print:
```
==============
Problem found!
==============
x := 3
```
-/
@[simp] def named_binder (n : string) (p : Prop) :=
p
/-- Is the given test result a failure? -/
def is_failure {p : Prop} : test_result p → Bool :=
sorry
protected instance and_testable (p : Prop) (q : Prop) [testable p] [testable q] : testable (p ∧ q) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
let xp ← testable.run p cfg min
let xq ← testable.run q cfg min
pure (and_counter_example xp xq)
protected instance or_testable (p : Prop) (q : Prop) [testable p] [testable q] : testable (p ∨ q) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
testable.run p cfg min
sorry
protected instance iff_testable (p : Prop) (q : Prop) [testable (p ∧ q ∨ ¬p ∧ ¬q)] : testable (p ↔ q) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
let xp ← testable.run (p ∧ q ∨ ¬p ∧ ¬q) cfg min
return (convert_counter_example' sorry xp)
protected instance dec_guard_testable (var : string) (p : Prop) [printable_prop p] [Decidable p] (β : p → Prop) [(h : p) → testable (β h)] : testable (named_binder var (∀ (h : p), β h)) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
dite p (fun (h : p) => sorry)
fun (h : ¬p) =>
ite (↥(slim_check_cfg.trace_discarded cfg) ∨ ↥(slim_check_cfg.trace_success cfg)) sorry
(return (test_result.gave_up 1))
/-- Type tag that replaces a type's `has_repr` instance with its `has_to_string` instance. -/
def use_has_to_string (α : Type u_1) :=
α
protected instance use_has_to_string.inhabited (α : Type u) [I : Inhabited α] : Inhabited (use_has_to_string α) :=
I
/-- Add the type tag `use_has_to_string` to an expression's type. -/
def use_has_to_string.mk {α : Type u_1} (x : α) : use_has_to_string α :=
x
protected instance use_has_to_string.has_repr (α : Type u) [has_to_string α] : has_repr (use_has_to_string α) :=
has_repr.mk to_string
protected instance all_types_testable (var : string) (f : Type → Prop) [testable (f ℤ)] : testable (named_binder var (∀ (x : Type), f x)) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
let r ← testable.run (f ℤ) cfg min
return
(add_var_to_counter_example var
(use_has_to_string.mk
(string.str string.empty
(char.of_nat
(bit0 (bit0 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 (bit0 1))))))))))))))))
sorry r (psum.inl Unit.unit))
/-- Trace the value of sampled variables if the sample is discarded. -/
def trace_if_giveup {p : Prop} {α : Type u_1} {β : Type u_2} [has_repr α] (tracing_enabled : Bool) (var : string) (val : α) : test_result p → thunk β → β :=
sorry
/-- testable instance for a property iterating over the element of a list -/
protected instance test_forall_in_list (var : string) (var' : string) (α : Type u) (β : α → Prop) [(x : α) → testable (β x)] [has_repr α] (xs : List α) : testable (named_binder var (∀ (x : α), named_binder var' (x ∈ xs → β x))) :=
sorry
/-- Test proposition `p` by randomly selecting one of the provided
testable instances. -/
def combine_testable (p : Prop) (t : List (testable p)) (h : 0 < list.length t) : testable p :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
(fun (this : 0 < list.length (list.map (fun (t : testable p) => testable.run p cfg min) t)) =>
gen.one_of (list.map (fun (t : testable p) => testable.run p cfg min) t) this)
sorry
/--
Format the counter-examples found in a test failure.
-/
def format_failure (s : string) (xs : List string) (n : ℕ) : string := sorry
-------------------
/--
Format the counter-examples found in a test failure.
-/
def format_failure' (s : string) {p : Prop} : test_result p → string :=
sorry
/--
Increase the number of shrinking steps in a test result.
-/
def add_shrinks {p : Prop} (n : ℕ) : test_result p → test_result p :=
sorry
/-- Shrink a counter-example `x` by using `shrink x`, picking the first
candidate that falsifies a property and recursively shrinking that one.
The process is guaranteed to terminate because `shrink x` produces
a proof that all the values it produces are smaller (according to `sizeof`)
than `x`. -/
def minimize_aux (α : Type u) (β : α → Prop) [sampleable_ext α] [(x : α) → testable (β x)] (cfg : slim_check_cfg) (var : string) : sampleable_ext.proxy_repr α →
ℕ → option_t gen (sigma fun (x : sampleable_ext.proxy_repr α) => test_result (β (sampleable_ext.interp α x))) := sorry
/-- Once a property fails to hold on an example, look for smaller counter-examples
to show the user. -/
def minimize (α : Type u) (β : α → Prop) [sampleable_ext α] [(x : α) → testable (β x)] (cfg : slim_check_cfg) (var : string) (x : sampleable_ext.proxy_repr α) (r : test_result (β (sampleable_ext.interp α x))) : gen (sigma fun (x : sampleable_ext.proxy_repr α) => test_result (β (sampleable_ext.interp α x))) := sorry
protected instance exists_testable (var : string) (var' : string) (α : Type u) (β : α → Prop) (p : Prop) [testable (named_binder var (∀ (x : α), named_binder var' (β x → p)))] : testable (named_binder var' (named_binder var (∃ (x : α), β x) → p)) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
let x ← testable.run (named_binder var (∀ (x : α), named_binder var' (β x → p))) cfg min
pure (convert_counter_example' sorry x)
/-- Test a universal property by creating a sample of the right type and instantiating the
bound variable with it -/
protected instance var_testable (var : string) (α : Type u) (β : α → Prop) [sampleable_ext α] [(x : α) → testable (β x)] : testable (named_binder var (∀ (x : α), β x)) := sorry
/-- Test a universal property about propositions -/
protected instance prop_var_testable (var : string) (β : Prop → Prop) [I : (b : Bool) → testable (β ↥b)] : testable (named_binder var (∀ (p : Prop), β p)) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
(fun (ᾰ : test_result (∀ (b : Bool), β ↥b)) => convert_counter_example sorry ᾰ (psum.inl Unit.unit)) <$>
testable.run (named_binder var (∀ (b : Bool), β ↥b)) cfg min
protected instance unused_var_testable (var : string) (α : Type u) (β : Prop) [Inhabited α] [testable β] : testable (named_binder var (α → β)) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
let r ← testable.run β cfg min
pure (convert_counter_example sorry r (psum.inr sorry))
protected instance subtype_var_testable (var : string) (var' : string) (α : Type u) (β : α → Prop) {p : α → Prop} [(x : α) → printable_prop (p x)] [(x : α) → testable (β x)] [I : sampleable_ext (Subtype p)] : testable (named_binder var (∀ (x : α), named_binder var' (p x → β x))) :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
let test : (x : Subtype p) → testable (β ↑x) :=
fun (x : Subtype p) =>
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
do
testable.run (β (subtype.val x)) cfg min
sorry;
do
let r ← testable.run (∀ (x : Subtype p), β (subtype.val x)) cfg min
pure (convert_counter_example' sorry r)
protected instance decidable_testable (p : Prop) [printable_prop p] [Decidable p] : testable p :=
testable.mk
fun (cfg : slim_check_cfg) (min : Bool) =>
return (dite p (fun (h : p) => test_result.success (psum.inr h)) fun (h : ¬p) => sorry)
protected instance eq.printable_prop {α : Type u_1} [has_repr α] (x : α) (y : α) : printable_prop (x = y) :=
printable_prop.mk
(some
(string.empty ++ to_string (repr x) ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat (bit1 (bit0 (bit1 (bit1 (bit1 1)))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string (repr y) ++
string.empty)))
protected instance ne.printable_prop {α : Type u_1} [has_repr α] (x : α) (y : α) : printable_prop (x ≠ y) :=
printable_prop.mk
(some
(string.empty ++ to_string (repr x) ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit0 (bit0 (bit0 (bit0 (bit0 (bit1 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string (repr y) ++
string.empty)))
protected instance le.printable_prop {α : Type u_1} [HasLessEq α] [has_repr α] (x : α) (y : α) : printable_prop (x ≤ y) :=
printable_prop.mk
(some
(string.empty ++ to_string (repr x) ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit0 (bit0 (bit1 (bit0 (bit0 (bit1 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string (repr y) ++
string.empty)))
protected instance lt.printable_prop {α : Type u_1} [HasLess α] [has_repr α] (x : α) (y : α) : printable_prop (x < y) :=
printable_prop.mk
(some
(string.empty ++ to_string (repr x) ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat (bit0 (bit0 (bit1 (bit1 (bit1 1)))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string (repr y) ++
string.empty)))
protected instance perm.printable_prop {α : Type u_1} [has_repr α] (xs : List α) (ys : List α) : printable_prop (xs ~ ys) :=
printable_prop.mk
(some
(string.empty ++ to_string (repr xs) ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat (bit0 (bit1 (bit1 (bit1 (bit1 (bit1 1))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string (repr ys) ++
string.empty)))
protected instance and.printable_prop (x : Prop) (y : Prop) [printable_prop x] [printable_prop y] : printable_prop (x ∧ y) :=
printable_prop.mk
(do
let x' ← printable_prop.print_prop x
let y' ← printable_prop.print_prop y
some
(string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit1 (bit0 1)))))) ++ to_string x' ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit1 (bit1 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string y' ++
string.str string.empty (char.of_nat (bit1 (bit0 (bit0 (bit1 (bit0 1)))))))))
protected instance or.printable_prop (x : Prop) (y : Prop) [printable_prop x] [printable_prop y] : printable_prop (x ∨ y) :=
printable_prop.mk
(do
let x' ← printable_prop.print_prop x
let y' ← printable_prop.print_prop y
some
(string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit1 (bit0 1)))))) ++ to_string x' ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit0 (bit0 (bit0 (bit1 (bit0 (bit1 (bit0 (bit0 (bit0 (bit1 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string y' ++
string.str string.empty (char.of_nat (bit1 (bit0 (bit0 (bit1 (bit0 1)))))))))
protected instance iff.printable_prop (x : Prop) (y : Prop) [printable_prop x] [printable_prop y] : printable_prop (x ↔ y) :=
printable_prop.mk
(do
let x' ← printable_prop.print_prop x
let y' ← printable_prop.print_prop y
some
(string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit1 (bit0 1)))))) ++ to_string x' ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit0 (bit0 (bit1 (bit0 (bit1 (bit0 (bit0 (bit1 (bit1 (bit0 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string y' ++
string.str string.empty (char.of_nat (bit1 (bit0 (bit0 (bit1 (bit0 1)))))))))
protected instance imp.printable_prop (x : Prop) (y : Prop) [printable_prop x] [printable_prop y] : printable_prop (x → y) :=
printable_prop.mk
(do
let x' ← printable_prop.print_prop x
let y' ← printable_prop.print_prop y
some
(string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit1 (bit0 1)))))) ++ to_string x' ++
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(char.of_nat
(bit0 (bit1 (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 (bit1 (bit0 (bit0 (bit0 (bit0 1)))))))))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string y' ++
string.str string.empty (char.of_nat (bit1 (bit0 (bit0 (bit1 (bit0 1)))))))))
protected instance not.printable_prop (x : Prop) [printable_prop x] : printable_prop (¬x) :=
printable_prop.mk
(do
let x' ← printable_prop.print_prop x
some
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit1 (bit1 (bit0 (bit1 (bit0 1)))))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))) ++
to_string x' ++
string.empty))
protected instance true.printable_prop : printable_prop True :=
printable_prop.mk
(some
(string.str
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit1 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit0 (bit1 (bit0 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))))))
protected instance false.printable_prop : printable_prop False :=
printable_prop.mk
(some
(string.str
(string.str
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit1 (bit1 (bit0 (bit0 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit0 (bit0 (bit0 (bit1 1))))))))
(char.of_nat (bit0 (bit0 (bit1 (bit1 (bit0 (bit1 1))))))))
(char.of_nat (bit1 (bit1 (bit0 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))))))
protected instance bool.printable_prop (b : Bool) : printable_prop ↥b :=
printable_prop.mk
(some
(ite (↥b)
(string.str
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit1 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit0 (bit1 (bit0 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit0 (bit1 1))))))))
(string.str
(string.str
(string.str
(string.str (string.str string.empty (char.of_nat (bit0 (bit1 (bit1 (bit0 (bit0 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit0 (bit0 (bit0 (bit1 1))))))))
(char.of_nat (bit0 (bit0 (bit1 (bit1 (bit0 (bit1 1))))))))
(char.of_nat (bit1 (bit1 (bit0 (bit0 (bit1 (bit1 1))))))))
(char.of_nat (bit1 (bit0 (bit1 (bit0 (bit0 (bit1 1))))))))))
/-- Execute `cmd` and repeat every time the result is `gave_up` (at most
`n` times). -/
def retry {p : Prop} (cmd : rand (test_result p)) : ℕ → rand (test_result p) :=
sorry
/-- Count the number of times the test procedure gave up. -/
def give_up {p : Prop} (x : ℕ) : test_result p → test_result p :=
sorry
/-- Try `n` times to find a counter-example for `p`. -/
def testable.run_suite_aux (p : Prop) [testable p] (cfg : slim_check_cfg) : test_result p → ℕ → rand (test_result p) :=
sorry
/-- Try to find a counter-example of `p`. -/
def testable.run_suite (p : Prop) [testable p] (cfg : optParam slim_check_cfg
(slim_check_cfg.mk (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) false false
false false none false)) : rand (test_result p) :=
testable.run_suite_aux p cfg (test_result.success (psum.inl Unit.unit)) (slim_check_cfg.num_inst cfg)
/-- Run a test suite for `p` in `io`. -/
def testable.check' (p : Prop) [testable p] (cfg : optParam slim_check_cfg
(slim_check_cfg.mk (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) false false
false false none false)) : io (test_result p) :=
sorry
namespace tactic
/-!
## Decorations
Instances of `testable` use `named_binder` as a decoration on
propositions in order to access the name of bound variables, as in
`named_binder "x" (forall x, x < y)`. This helps the
`testable` instances create useful error messages where variables
are matched with values that falsify a given proposition.
The following functions help support the gadget so that the user does
not have to put them in themselves.
-/
/-- `add_existential_decorations p` adds `a `named_binder` annotation at the
root of `p` if `p` is an existential quantification. -/
/-- Traverse the syntax of a proposition to find universal quantifiers
and existential quantifiers and add `named_binder` annotations next to
them. -/
/-- `decorations_of p` is used as a hint to `mk_decorations` to specify
that the goal should be satisfied with a proposition equivalent to `p`
with added annotations. -/
def decorations_of (p : Prop) :=
Prop
/-- In a goal of the shape `⊢ tactic.decorations_of p`, `mk_decoration` examines
the syntax of `p` and add `named_binder` around universal quantifications and
existential quantifications to improve error messages.
This tool can be used in the declaration of a function as follows:
```lean
def foo (p : Prop) (p' : tactic.decorations_of p . mk_decorations) [testable p'] : ...
```
`p` is the parameter given by the user, `p'` is an equivalent proposition where
the quantifiers are annotated with `named_binder`.
-/
end tactic
/-- Run a test suite for `p` and return true or false: should we believe that `p` holds? -/
def testable.check (p : Prop) (cfg : optParam slim_check_cfg
(slim_check_cfg.mk (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) (bit0 (bit0 (bit1 (bit0 (bit0 (bit1 1)))))) false false
false false none false)) (p' : autoParam (tactic.decorations_of p)
(Lean.Syntax.ident Lean.SourceInfo.none (String.toSubstring "Mathlib.slim_check.tactic.mk_decorations")
(Lean.Name.mkStr
(Lean.Name.mkStr (Lean.Name.mkStr (Lean.Name.mkStr Lean.Name.anonymous "Mathlib") "slim_check") "tactic")
"mk_decorations")
[])) [testable p'] : io PUnit :=
do
sorry
sorry
| {
"alphanum_fraction": null,
"author": "AurelienSaue",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/testing/slim_check/testable.lean",
"reason": null,
"repo": "Mathlib4_auto",
"save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto",
"sha": "590df64109b08190abe22358fabc3eae000943f2",
"size": null
} |
# Build a neural network for approximate Q learning
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
import keras.layers as L
def get_action(state, epsilon=0):
"""
sample actions with epsilon-greedy policy
recap: with p = epsilon pick random action, else pick action with highest Q(s,a)
"""
q_values = network.predict(state[None])[0]
exploration = np.random.random()
if exploration < epsilon:
action = np.random.choice(n_actions, 1)[0]
else:
action = np.argmax(q_values)
return action
def generate_session(t_max=1000, epsilon=0, train=False):
"""play env with approximate q-learning agent and train it at the same time"""
total_reward = 0
s = env.reset()
for t in range(t_max):
a = get_action(s, epsilon=epsilon)
next_s, r, done, _ = env.step(a)
if train:
sess.run(train_step, {
states_ph: [s], actions_ph: [a], rewards_ph: [r],
next_states_ph: [next_s], is_done_ph: [done]
})
total_reward += r
s = next_s
if done: break
return total_reward
if __name__ == "__main__":
# OpenAI gym CartPole-v0 example
env = gym.make("CartPole-v0").env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
tf.reset_default_graph()
sess = tf.InteractiveSession()
keras.backend.set_session(sess)
network = keras.models.Sequential()
network.add(L.InputLayer(state_dim))
# create a network for approximate q-learning following guidelines above
network.add(L.Dense(100, activation='relu'))
network.add(L.Dense(100, activation='relu'))
network.add(L.Dense(n_actions, activation='linear'))
# Q-learning via gradient descent - train the agent's Q-function by minimizing the TD loss
# Create placeholders for the <s, a, r, s'> tuple and a special indicator for game end (is_done = True)
states_ph = keras.backend.placeholder(dtype='float32', shape=(None,) + state_dim)
actions_ph = keras.backend.placeholder(dtype='int32', shape=[None])
rewards_ph = keras.backend.placeholder(dtype='float32', shape=[None])
next_states_ph = keras.backend.placeholder(dtype='float32', shape=(None,) + state_dim)
is_done_ph = keras.backend.placeholder(dtype='bool', shape=[None])
# get q-values for all actions in current states
predicted_qvalues = network(states_ph)
# select q-values for chosen actions
predicted_qvalues_for_actions = tf.reduce_sum(predicted_qvalues * tf.one_hot(actions_ph, n_actions), axis=1)
gamma = 0.99
# compute q-values for all actions in next states
predicted_next_qvalues = network(next_states_ph)
# compute V * (next_states) using predicted next q-values
next_state_values = tf.reduce_max(predicted_next_qvalues, axis=1)
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
target_qvalues_for_actions = rewards_ph + gamma * next_state_values
# at the last state use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
target_qvalues_for_actions = tf.where(is_done_ph, rewards_ph, target_qvalues_for_actions)
# mean squared error loss to minimize
loss = (predicted_qvalues_for_actions - tf.stop_gradient(target_qvalues_for_actions)) ** 2
loss = tf.reduce_mean(loss)
# training function that resembles agent.update(state, action, reward, next_state) from tabular agent
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
epsilon = 0.5
for i in range(1000):
session_rewards = [generate_session(epsilon=epsilon, train=True) for _ in range(100)]
print("epoch #{}\tmean reward = {:.3f}\tepsilon = {:.3f}".format(i, np.mean(session_rewards), epsilon))
epsilon *= 0.99
assert epsilon >= 1e-4, "Make sure epsilon is always nonzero during training"
if np.mean(session_rewards) > 300:
print("Iteration ended, session mean reward > 300")
break
| {
"alphanum_fraction": 0.6874391431,
"author": null,
"avg_line_length": 35.1111111111,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a3107c6d9c9903dce7f4a7bdb21b64d5db20c380",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "02a93e4cc32a6707c018386f2f745f9937f94adc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gesuwen/Machine-Learning",
"max_forks_repo_path": "Reinforcement learning/approx_q.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "02a93e4cc32a6707c018386f2f745f9937f94adc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gesuwen/Machine-Learning",
"max_issues_repo_path": "Reinforcement learning/approx_q.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "02a93e4cc32a6707c018386f2f745f9937f94adc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gesuwen/Machine-Learning",
"max_stars_repo_path": "Reinforcement learning/approx_q.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 985,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4108
} |
import os
import shutil
import glob
import json
import argparse
import numpy as np
from transformations_np import TransformationSpherical, Transformation3D
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--src', required=True,
help='Path to original ZInD')
args = parser.parse_args()
label_fnames = glob.glob(os.path.join(args.src, '*', 'zind_data.json'))
simple_label_file = os.path.join(args.src, 'room_shape_simplicity_labels.json')
with open(simple_label_file, 'r') as f: simple_data = json.load(f)
self_simple_list = []
official_simple_list = []
num = 0
excepts = 0
for path in tqdm(label_fnames):
with open(path, 'r') as f: data = json.load(f)
dirname = os.path.dirname(path)
label_dir = os.path.join(dirname, 'label')
single_dir = os.path.join(label_dir, 'singled')
pair_dir = os.path.join(label_dir, 'paired')
if os.path.isdir(label_dir):
shutil.rmtree(label_dir)
os.mkdir(label_dir)
scene_id = os.path.basename(dirname)
merger = data['merger']
for floor_id, floor_data in merger.items():
for complete_room_id, complete_room_data in floor_data.items():
for partial_room_id, partial_room_data in complete_room_data.items():
for pano_id, pano_data in partial_room_data.items():
subject = f'{scene_id}_{floor_id}_{complete_room_id}_{partial_room_id}_{pano_id}'
if simple_data[subject] and 'layout_visible' in pano_data:
vertices = np.array(pano_data['layout_visible']['vertices'])
ceiling_height = pano_data['ceiling_height']
camera_height = pano_data['camera_height']
xyz_floor, xyz_ceil = Transformation3D(ceiling_height, camera_height).to_3d(vertices)
xyz_floor = xyz_floor[::-1]
xyz_ceil = xyz_ceil[::-1]
xyzs = np.empty((xyz_floor.shape[0]*2, xyz_floor.shape[1]), dtype=xyz_floor.dtype)
xyzs[0::2] = xyz_ceil
xyzs[1::2] = xyz_floor
cor = TransformationSpherical.cartesian_to_pixel(xyzs, 1024).astype(int)
cor = np.roll(cor[:, :2], -2 * np.argmin(cor[::2, 0]), 0)
file_prefix = f'{floor_id}_{partial_room_id}_{pano_id}'
if pano_data['is_primary']:
file_prefix = f'{file_prefix}_primary'
txt_filename = os.path.join(label_dir, f'{file_prefix}.txt')
np.savetxt(txt_filename, cor, delimiter=' ', fmt='%d')
json_filename = os.path.join(label_dir, f'{file_prefix}.json')
with open(json_filename, 'w') as f: json.dump(pano_data, f)
num += 1
print(f'Number of Data: {num}')
| {
"alphanum_fraction": 0.5311004785,
"author": null,
"avg_line_length": 45.1891891892,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "52b1636679522a0b92297ec8c19feced5ca40527",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8a1dcffb231128c144507b186b925c1e3f313db9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "joshua049/Stereo-360-Layout",
"max_forks_repo_path": "preprocess_zind.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8a1dcffb231128c144507b186b925c1e3f313db9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "joshua049/Stereo-360-Layout",
"max_issues_repo_path": "preprocess_zind.py",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8a1dcffb231128c144507b186b925c1e3f313db9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "joshua049/Stereo-360-Layout",
"max_stars_repo_path": "preprocess_zind.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 692,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3344
} |
// Luaponte library
// Copyright (c) 2012 Peter Colberg
// Luaponte is based on Luabind, a library, inspired by and similar to
// Boost.Python, that helps you create bindings between C++ and Lua,
// Copyright (c) 2003-2010 Daniel Wallin and Arvid Norberg.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef LUAPONTE_ADOPT_POLICY_HPP
#define LUAPONTE_ADOPT_POLICY_HPP
#include <luaponte/config.hpp>
#include <luaponte/wrapper_base.hpp>
#include <luaponte/detail/policy.hpp>
#include <luaponte/back_reference_fwd.hpp>
#include <luaponte/wrapper_base.hpp>
#include <boost/type_traits/is_polymorphic.hpp>
#include <memory>
namespace luaponte {
namespace detail {
template <class T>
void adjust_backref_ownership(T* ptr, mpl::true_)
{
if (wrap_base* p = dynamic_cast<wrap_base*>(ptr))
{
wrapped_self_t& wrapper = wrap_access::ref(*p);
wrapper.get(wrapper.state());
wrapper.m_strong_ref.set(wrapper.state());
}
}
inline void adjust_backref_ownership(void*, mpl::false_)
{}
template <class Pointer, class Direction = lua_to_cpp>
struct adopt_pointer : pointer_converter
{
typedef adopt_pointer type;
int consumed_args(...) const
{
return 1;
}
template<class T>
T* apply(lua_State* L, by_pointer<T>, int index)
{
T* ptr = pointer_converter::apply(
L, LUAPONTE_DECORATE_TYPE(T*), index);
object_rep* obj = static_cast<object_rep*>(
lua_touserdata(L, index));
obj->release();
adjust_backref_ownership(ptr, boost::is_polymorphic<T>());
return ptr;
}
template<class T>
int match(lua_State* L, by_pointer<T>, int index)
{
return pointer_converter::match(
L, LUAPONTE_DECORATE_TYPE(T*), index);
}
template<class T>
void converter_postcall(lua_State*, T, int) {}
};
template <class Pointer, class T>
struct pointer_or_default
{
typedef Pointer type;
};
template <class T>
struct pointer_or_default<void, T>
{
typedef std::unique_ptr<T> type;
};
template <class Pointer>
struct adopt_pointer<Pointer, cpp_to_lua>
{
typedef adopt_pointer type;
template<class T>
void apply(lua_State* L, T* ptr)
{
if (ptr == 0)
{
lua_pushnil(L);
return;
}
// if there is a back_reference, then the
// ownership will be removed from the
// back reference and put on the lua stack.
if (luaponte::move_back_reference(L, ptr))
return;
typedef typename pointer_or_default<Pointer, T>::type
pointer_type;
make_instance(L, pointer_type(ptr));
}
};
template <int N, class Pointer = void>
struct adopt_policy : conversion_policy<N>
{
static void precall(lua_State*, const index_map&) {}
static void postcall(lua_State*, const index_map&) {}
struct only_accepts_nonconst_pointers {};
template<class T, class Direction>
struct apply
{
typedef luaponte::detail::is_nonconst_pointer<T> is_nonconst_p;
typedef typename boost::mpl::if_<
is_nonconst_p
, adopt_pointer<Pointer, Direction>
, only_accepts_nonconst_pointers
>::type type;
};
};
} // namespace detail
} // namespace luaponte
namespace luaponte {
template<int N>
detail::policy_cons<detail::adopt_policy<N>, detail::null_type>
adopt(LUAPONTE_PLACEHOLDER_ARG(N))
{
return detail::policy_cons<detail::adopt_policy<N>, detail::null_type>();
}
template <class Pointer, int N>
detail::policy_cons<detail::adopt_policy<N, Pointer>, detail::null_type>
adopt(LUAPONTE_PLACEHOLDER_ARG(N))
{
return detail::policy_cons<detail::adopt_policy<N, Pointer>, detail::null_type>();
}
} // namespace luaponte
#endif // LUAPONTE_ADOPT_POLICY_HPP_INCLUDE
| {
"alphanum_fraction": 0.6778370145,
"author": null,
"avg_line_length": 24.7735849057,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "ad977b39ea8dff6964951549355517e421b07896",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "165328485954a51524a0b1aec27518861c6be719",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "halmd-org/luaponte",
"max_forks_repo_path": "luaponte/adopt_policy.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "165328485954a51524a0b1aec27518861c6be719",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "halmd-org/luaponte",
"max_issues_repo_path": "luaponte/adopt_policy.hpp",
"max_line_length": 86,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "06bfe551bce23e411e75895797b8bb84bb662ed2",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "jb--/luaponte",
"max_stars_repo_path": "luaponte/adopt_policy.hpp",
"max_stars_repo_stars_event_max_datetime": "2018-08-27T06:54:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-08-30T10:02:10.000Z",
"num_tokens": 956,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3939
} |
subroutine sub ()
print *, "output string ..."
end subroutine sub
end
| {
"alphanum_fraction": 0.6712328767,
"author": null,
"avg_line_length": 14.6,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "1afb1e0dac3f7e507891055390206bea59c91631",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 146,
"max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z",
"max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "sujankh/rose-matlab",
"max_forks_repo_path": "tests/CompileTests/Fortran_tests/test2007_119.f90",
"max_issues_count": 174,
"max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "sujankh/rose-matlab",
"max_issues_repo_path": "tests/CompileTests/Fortran_tests/test2007_119.f90",
"max_line_length": 31,
"max_stars_count": 488,
"max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "maurizioabba/rose",
"max_stars_repo_path": "tests/CompileTests/Fortran_tests/test2007_119.f90",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z",
"num_tokens": 17,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 73
} |
import cupy as cp
from model.utils.nms import non_maximum_suppression
from model.utils.loc2bbox_gpu import loc2bbox
from torch.utils.dlpack import to_dlpack
from torch.utils.dlpack import from_dlpack
class ProposalCreator:
def __init__(self,
parent_model,
nms_thresh=0.7,
n_train_pre_nms=12000,
n_train_post_nms=1000,
n_test_pre_nms=6000,
n_test_post_nms=300, # due to the limited memory, n_test_post_nms is reduced to 50
min_size=16
):
self.parent_model=parent_model
self.nms_thresh=nms_thresh
self.n_train_pre_nms=n_train_pre_nms
self.n_train_post_nms=n_train_post_nms
self.n_test_pre_nms=n_test_pre_nms
self.n_test_post_nms=n_test_post_nms
self.min_size=min_size
def __call__(self, loc, fg_score, anchor, img_size, scale=1.):
"""
Arg:
- loc: (N,4)
- fg_score: (N,)
- anchor: (9, 4)
- img_size: (2)
"""
if self.parent_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
loc = cp.fromDlpack(to_dlpack(loc))
fg_score = cp.fromDlpack(to_dlpack(fg_score))
anchor = cp.asarray(anchor)
roi=loc2bbox(anchor, loc)
# clip
roi[:,slice(0,4,2)]=cp.clip(roi[:,slice(0,4,2)], 0, img_size[1])
roi[:,slice(1,4,2)]=cp.clip(roi[:,slice(1,4,2)], 0, img_size[0])
# remove small box less than threshold
min_size=self.min_size * scale
hs = roi[:,3]-roi[:,1]
ws = roi[:,2]-roi[:,0]
keep=cp.where((hs>min_size) & (ws>min_size))[0]
roi=roi[keep,:]
fg_score=fg_score[keep]
# sort the score
order= cp.argsort(fg_score.ravel())[::-1]
if n_pre_nms>0:
order= order[0:n_pre_nms]
roi=roi[order,:]
keep = non_maximum_suppression(cp.ascontiguousarray(cp.asarray(roi)), thresh = self.nms_thresh)
if n_post_nms>0:
keep = keep[:n_post_nms]
roi=roi[keep]
return roi | {
"alphanum_fraction": 0.5826771654,
"author": null,
"avg_line_length": 32.1971830986,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "df05e1dad811f42e31eaf4dc99f968da363f6f48",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "68ecc3f32fc3be797cdf74757b3e57d328662e57",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "xufangda/Faster_RCNN_without_pain",
"max_forks_repo_path": "model/utils/proposal_creator_gpu.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "68ecc3f32fc3be797cdf74757b3e57d328662e57",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "xufangda/Faster_RCNN_without_pain",
"max_issues_repo_path": "model/utils/proposal_creator_gpu.py",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "68ecc3f32fc3be797cdf74757b3e57d328662e57",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "xufangda/Faster_RCNN_without_pain",
"max_stars_repo_path": "model/utils/proposal_creator_gpu.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 639,
"path": null,
"reason": "import cupy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2286
} |
import logging
import yaml
from pathlib import Path
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from regions import CircleSkyRegion
from gammapy.analysis import Analysis, AnalysisConfig
from gammapy.maps import MapAxis
from gammapy.modeling import Fit
from gammapy.data import DataStore
from gammapy.modeling.models import PowerLawSpectralModel
from gammapy.cube import SafeMaskMaker
from gammapy.spectrum import (
SpectrumDatasetMaker,
FluxPointsEstimator,
ReflectedRegionsBackgroundMaker,
)
log = logging.getLogger(__name__)
with open("targets.yaml", "r") as stream:
targets = yaml.safe_load(stream)
# If DEBUG is True, analyzes only 1 run, complutes only 1 flux point and does
# not re-optimize the bkg during flux points computation
DEBUG = True
E_RECO = MapAxis.from_bounds(
0.1, 100, nbin=72, unit="TeV", name="energy", interp="log"
).edges
NBIN = 24 if DEBUG is False else 1
FLUXP_EDGES = MapAxis.from_bounds(
0.1, 100, nbin=NBIN, unit="TeV", name="energy", interp="log"
).edges
def main(analyse1d=True, analyse3d=True, plot_fluxp=False):
# TODO: add the rxj1713 validation
sources = ["crab", "msh1552", "pks2155"]
for source in sources:
# read config for the target
target_filter = filter(lambda _: _["tag"] == source, targets)
target_dict = list(target_filter)[0]
if analyse1d:
run_analysis_1d(target_dict)
if analyse3d:
run_analysis_3d(target_dict)
if plot_fluxp:
plot_flux_points()
def write_fit_summary(parameters, outfile):
"""Store fit results with uncertainties"""
fit_results_dict = {}
for parameter in parameters:
value = parameter.value
error = parameters.error(parameter)
unit = parameter.unit
name = parameter.name
string = "{0:.2e} +- {1:.2e} {2}".format(value, error, unit)
fit_results_dict.update({name: string})
with open(str(outfile), "w") as f:
yaml.dump(fit_results_dict, f)
def plot_flux_points():
raise NotImplementedError
def run_analysis_1d(target_dict):
"""Run spectral analysis for the selected target"""
tag = target_dict["tag"]
name = target_dict["name"]
log.info(f"running 1d analysis, {tag}")
path_res = Path(tag + "/results/")
ra = target_dict["ra"]
dec = target_dict["dec"]
on_size = target_dict["on_size"]
e_decorr = target_dict["e_decorr"]
target_pos = SkyCoord(ra, dec, unit="deg", frame="icrs")
on_radius = Angle(on_size * u.deg)
containment_corr = True
# Observations selection
data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1/")
mask = data_store.obs_table["TARGET_NAME"] == name
obs_table = data_store.obs_table[mask]
observations = data_store.get_observations(obs_table["OBS_ID"])
if DEBUG is True:
observations = [observations[0]]
# Reflected regions background estimation
on_region = CircleSkyRegion(center=target_pos, radius=on_radius)
dataset_maker = SpectrumDatasetMaker(
region=on_region,
e_reco=E_RECO,
e_true=E_RECO,
containment_correction=containment_corr,
)
bkg_maker = ReflectedRegionsBackgroundMaker()
safe_mask_masker = SafeMaskMaker(methods=["edisp-bias"], bias_percent=10)
datasets = []
for observation in observations:
dataset = dataset_maker.run(observation, selection=["counts", "aeff", "edisp"])
dataset_on_off = bkg_maker.run(dataset, observation)
dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
datasets.append(dataset_on_off)
# Fit spectrum
model = PowerLawSpectralModel(
index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=e_decorr * u.TeV
)
for dataset in datasets:
dataset.model = model
fit_joint = Fit(datasets)
result_joint = fit_joint.run()
parameters = model.parameters
parameters.covariance = result_joint.parameters.covariance
write_fit_summary(parameters, str(path_res / "results-summary-fit-1d.yaml"))
# Flux points
fpe = FluxPointsEstimator(datasets=datasets, e_edges=FLUXP_EDGES)
flux_points = fpe.run()
flux_points.table["is_ul"] = flux_points.table["ts"] < 4
keys = ["e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul"]
flux_points.table_formatted[keys].write(
path_res / "flux-points-1d.ecsv", format="ascii.ecsv"
)
def run_analysis_3d(target_dict):
"""Run 3D analysis for the selected target"""
tag = target_dict["tag"]
name = target_dict["name"]
log.info(f"running 3d analysis, {tag}")
path_res = Path(tag + "/results/")
ra = target_dict["ra"]
dec = target_dict["dec"]
e_decorr = target_dict["e_decorr"]
config_str = f"""
general:
logging:
level: INFO
outdir: .
observations:
datastore: $GAMMAPY_DATA/hess-dl3-dr1/
filters:
- filter_type: par_value
value_param: {name}
variable: TARGET_NAME
datasets:
dataset-type: MapDataset
stack-datasets: true
offset-max: 2.5 deg
geom:
skydir: [{ra}, {dec}]
width: [5, 5]
binsz: 0.02
coordsys: CEL
proj: TAN
axes:
- name: energy
hi_bnd: 100
lo_bnd: 0.1
nbin: 24
interp: log
node_type: edges
unit: TeV
energy-axis-true:
name: energy
hi_bnd: 100
lo_bnd: 0.1
nbin: 72
interp: log
node_type: edges
unit: TeV
"""
print(config_str)
config = AnalysisConfig(config_str)
# Observation selection
analysis = Analysis(config)
analysis.get_observations()
if DEBUG is True:
analysis.observations.list = [analysis.observations.list[0]]
# Data reduction
analysis.get_datasets()
# Set runwise energy threshold. See reference paper, section 5.1.1.
for dataset in analysis.datasets:
# energy threshold given by the 10% edisp criterium
e_thr_bias = dataset.edisp.get_bias_energy(0.1)
# energy at which the background peaks
background_model = dataset.background_model
bkg_spectrum = background_model.map.get_spectrum()
peak = bkg_spectrum.data.max()
idx = list(bkg_spectrum.data).index(peak)
e_thr_bkg = bkg_spectrum.energy.center[idx]
esafe = max(e_thr_bias, e_thr_bkg)
dataset.mask_fit = dataset.counts.geom.energy_mask(emin=esafe)
# Model fitting
spatial_model = target_dict["spatial_model"]
model_config = f"""
components:
- name: {tag}
type: SkyModel
spatial:
type: {spatial_model}
frame: icrs
parameters:
- name: lon_0
value: {ra}
unit: deg
- name: lat_0
value: {dec}
unit: deg
spectral:
type: PowerLawSpectralModel
parameters:
- name: amplitude
value: 1.0e-12
unit: cm-2 s-1 TeV-1
- name: index
value: 2.0
unit: ''
- name: reference
value: {e_decorr}
unit: TeV
frozen: true
"""
model_npars = 5
if spatial_model == "DiskSpatialModel":
model_config = yaml.load(model_config)
parameters = model_config["components"][0]["spatial"]["parameters"]
parameters.append(
{
"name": "r_0",
"value": 0.2,
"unit": "deg",
"frozen": False
}
)
parameters.append(
{
"name": "e",
"value": 0.8,
"unit": "",
"frozen": False
}
)
parameters.append(
{
"name": "phi",
"value": 150,
"unit": "deg",
"frozen": False
}
)
parameters.append(
{
"name": "edge",
"value": 0.01,
"unit": "deg",
"frozen": True
}
)
model_npars += 4
analysis.set_model(model=model_config)
for dataset in analysis.datasets:
dataset.background_model.norm.frozen = False
analysis.run_fit()
parameters = analysis.model.parameters
parameters.covariance = analysis.fit_result.parameters.covariance[0:model_npars, 0:model_npars]
write_fit_summary(parameters, str(path_res / "results-summary-fit-3d.yaml"))
# Flux points
# TODO: This is a workaround to re-optimize the bkg in each energy bin. Add has to be added to the Analysis class
datasets = analysis.datasets.copy()
for dataset in datasets:
for par in dataset.parameters:
if par is not dataset.background_model.norm:
par.frozen = True
reoptimize = True if DEBUG is False else False
fpe = FluxPointsEstimator(
datasets=datasets, e_edges=FLUXP_EDGES, source=tag, reoptimize=reoptimize
)
flux_points = fpe.run()
flux_points.table["is_ul"] = flux_points.table["ts"] < 4
keys = ["e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn"]
flux_points.table_formatted[keys].write(
path_res / "flux-points-3d.ecsv", format="ascii.ecsv"
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| {
"alphanum_fraction": 0.6041838417,
"author": null,
"avg_line_length": 30.1366459627,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ded9909b954f87930f7fdd0eef9ffd314069414b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7f6170e88284958056fbdf468fb890787a13f153",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "QRemy/gammapy-benchmarks",
"max_forks_repo_path": "validation/hess-dl3-dr1/run.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7f6170e88284958056fbdf468fb890787a13f153",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "QRemy/gammapy-benchmarks",
"max_issues_repo_path": "validation/hess-dl3-dr1/run.py",
"max_line_length": 117,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7f6170e88284958056fbdf468fb890787a13f153",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "QRemy/gammapy-benchmarks",
"max_stars_repo_path": "validation/hess-dl3-dr1/run.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2394,
"path": null,
"reason": "import astropy,from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9704
} |
"""need to refactor with common backend interface"""
from __future__ import annotations
import ast
import functools
import os
import typing
from typing_extensions import TypedDict
import ctc.config
from ctc import binary
from ctc import directory
from ctc import spec
from ctc import rpc
from ctc.toolbox import backend_utils
from ctc.toolbox import filesystem_utils
from ... import abi_utils
from ... import block_utils
from ... import evm_spec
#
# # paths
#
def get_events_root(
network: typing.Optional[spec.NetworkReference] = None,
) -> str:
network_name = directory.get_network_name(network)
return os.path.join(ctc.config.get_data_dir(), network_name, 'events')
def get_events_contract_dir(
contract_address: spec.Address,
network: typing.Optional[spec.NetworkReference] = None,
) -> str:
contract_address = contract_address.lower()
return os.path.join(
get_events_root(network=network), 'contract__' + contract_address
)
def get_events_event_dir(
contract_address: spec.Address,
event_hash: typing.Optional[str] = None,
event_abi: typing.Optional[spec.EventABI] = None,
network: typing.Optional[spec.NetworkReference] = None,
) -> str:
contract_address = contract_address.lower()
if event_hash is None:
if event_abi is None:
raise Exception('must specify more event data')
event_hash = binary.get_event_hash(event_abi)
contract_dir = get_events_contract_dir(contract_address, network=network)
return os.path.join(contract_dir, 'event__' + event_hash)
def get_events_filepath(
contract_address: spec.Address,
start_block: int,
end_block: int,
event_hash: typing.Optional[str] = None,
event_abi: typing.Optional[spec.EventABI] = None,
network: typing.Optional[spec.NetworkReference] = None,
) -> str:
# create lowercase versions of contract_address and event_hash
contract_address = contract_address.lower()
if event_hash is None:
if event_abi is None:
raise Exception('must specify more event data')
event_hash = binary.get_event_hash(event_abi)
event_hash = event_hash.lower()
# assemble items into subpath
subpath = evm_spec.filesystem_layout['evm_events_path'].format(
contract_address=contract_address,
event_hash=event_hash,
start_block=start_block,
end_block=end_block,
)
# add parent directory
network_name = directory.get_network_name(network)
return os.path.join(ctc.config.get_data_dir(), network_name, subpath)
#
# # list saved data
#
def list_events_contracts(
network: typing.Optional[spec.NetworkReference] = None,
) -> list[str]:
contracts = []
events_root = get_events_root(network=network)
if not os.path.isdir(events_root):
return []
for contract_dir in os.listdir(events_root):
contract_address = contract_dir.split('__')[-1]
contracts.append(contract_address)
return contracts
_PathEventsResult = typing.Dict[str, typing.Tuple[int, int]]
class _ListEventsResult(TypedDict):
paths: _PathEventsResult
block_range: spec.NumpyArray
block_mask: spec.NumpyArray
missing_blocks: spec.NumpyArray
def list_contract_events(
contract_address: spec.Address,
event_hash: typing.Optional[str] = None,
event_abi: typing.Optional[spec.EventABI] = None,
allow_missing_blocks: bool = False,
network: typing.Optional[spec.NetworkReference] = None,
) -> dict[str, _ListEventsResult]:
if event_hash is not None:
query_event_hash = event_hash
elif event_abi is not None:
query_event_hash = binary.get_event_hash(event_abi)
else:
query_event_hash = None
# compile path data
contract_address = contract_address.lower()
contract_dir = get_events_contract_dir(contract_address, network=network)
paths: dict[str, _PathEventsResult] = {}
if not os.path.isdir(contract_dir):
return {}
for event_dirname in os.listdir(contract_dir):
event_dir = os.path.join(contract_dir, event_dirname)
_, event_hash = event_dirname.split('__')
if query_event_hash is not None and event_hash != query_event_hash:
continue
for filename in os.listdir(event_dir):
path = os.path.join(event_dir, filename)
start_block_str, _, end_block_str = os.path.splitext(filename)[
0
].split('__')
paths.setdefault(event_hash, {})
paths[event_hash][path] = (int(start_block_str), int(end_block_str))
import numpy as np
# create block_range and block_mask
events: dict[str, _ListEventsResult] = {}
for event_hash in paths.keys():
# gather start and end blocks
start_blocks = []
end_blocks = []
for path, (start_block, end_block) in paths[event_hash].items():
start_blocks.append(start_block)
end_blocks.append(end_block)
# create block_range
min_block = min(start_blocks)
max_block = max(end_blocks) + 1
block_range = np.arange(min_block, max_block)
# create block_mask
n_blocks = block_range.size
block_mask = np.zeros(n_blocks)
for path, (start_block, end_block) in paths[event_hash].items():
start_index = start_block - min_block
end_index = n_blocks - (max_block - end_block) + 1
block_mask[start_index:end_index] += 1
if (block_mask > 1).sum() > 0:
raise Exception('overlapping chunks')
block_mask = block_mask.astype(bool)
# check if blocks missing
missing_blocks = block_mask.sum() != n_blocks
if missing_blocks and not allow_missing_blocks:
raise Exception('missing blocks')
events[event_hash] = {
'paths': paths[event_hash],
'block_range': block_range,
'block_mask': block_mask,
'missing_blocks': missing_blocks,
}
return events
def list_events(
contract_address: str,
event_hash: typing.Optional[str] = None,
event_abi: typing.Optional[spec.EventABI] = None,
allow_missing_blocks: bool = False,
network: typing.Optional[spec.NetworkReference] = None,
) -> typing.Optional[_ListEventsResult]:
contract_events = list_contract_events(
contract_address=contract_address,
event_hash=event_hash,
event_abi=event_abi,
allow_missing_blocks=allow_missing_blocks,
network=network,
)
if len(contract_events) == 1:
event_hash = list(contract_events.keys())[0]
return contract_events[event_hash]
else:
return None
def list_contracts_events(
network: typing.Optional[spec.NetworkReference] = None,
**kwargs: typing.Any,
) -> dict[str, dict[str, _ListEventsResult]]:
contracts_events = {}
for contract_address in list_events_contracts(network=network):
contracts_events[contract_address] = list_contract_events(
contract_address=contract_address, network=network, **kwargs
)
return contracts_events
#
# # disk
#
def print_events_summary() -> None:
print_events_summary_filesystem()
def print_events_summary_filesystem() -> None:
contracts_events = list_contracts_events()
print('## Contracts (' + str(len(contracts_events)) + ')')
for contract_address in sorted(contracts_events.keys()):
n_events = len(contracts_events[contract_address])
print('-', contract_address, '(' + str(n_events) + ' events)')
contract_events = contracts_events[contract_address]
for event_hash, event_data in contract_events.items():
block_range = [
event_data['block_range'][0],
event_data['block_range'][-1],
]
n_files = str(len(event_data['paths']))
dirpath = get_events_event_dir(
contract_address=contract_address, event_hash=event_hash
)
n_bytes = filesystem_utils.get_directory_nbytes_human(dirpath)
short_hash = event_hash[:6] + '...' + event_hash[-6:]
print(
' -',
short_hash,
block_range,
'(' + n_bytes + 'B in ' + n_files + ' files)',
)
async def async_save_events_to_filesystem(
events: spec.DataFrame,
contract_address: spec.Address,
start_block: int,
end_block: int,
event_abi: typing.Optional[spec.EventABI] = None,
event_hash: typing.Optional[str] = None,
event_name: typing.Optional[str] = None,
overwrite: bool = False,
verbose: bool = True,
provider: spec.ProviderSpec = None,
network: typing.Optional[spec.NetworkReference] = None,
) -> spec.DataFrame:
if network is None:
provider = rpc.get_provider(provider)
network = provider['network']
if network is None:
raise Exception('could not determine network')
else:
network = directory.get_network_name(network)
contract_address = contract_address.lower()
if event_abi is None:
event_abi = await abi_utils.async_get_event_abi(
contract_address=contract_address,
event_name=event_name,
event_hash=event_hash,
network=network,
)
# compute path
path = get_events_filepath(
contract_address=contract_address,
event_hash=event_hash,
event_abi=event_abi,
start_block=start_block,
end_block=end_block,
network=network,
)
if os.path.exists(path) and not overwrite:
raise Exception('path already exists, use overwrite=True')
if verbose:
print('saving events to file:', path)
# save
os.makedirs(os.path.dirname(path), exist_ok=True)
events.to_csv(path)
return events
async def async_get_events_from_filesystem(
contract_address: spec.ContractAddress,
event_hash: typing.Optional[str] = None,
event_name: typing.Optional[str] = None,
event_abi: typing.Optional[spec.EventABI] = None,
verbose: bool = True,
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
provider: spec.ProviderSpec = None,
network: spec.NetworkReference = None,
) -> spec.DataFrame:
# get network
if network is None:
provider = rpc.get_provider(provider)
network = provider['network']
if network is None:
raise Exception('could not determine network')
else:
network = directory.get_network_name(network)
# resolve start_block and end_block
if start_block is not None:
start_block = await block_utils.async_block_number_to_int(
start_block,
provider=provider,
)
if end_block is not None:
end_block = await block_utils.async_block_number_to_int(
end_block,
provider=provider,
)
# get event hash
if event_hash is None:
if event_abi is None:
if event_name is None:
raise Exception('must specify more event information')
event_abi = await abi_utils.async_get_event_abi(
contract_address=contract_address,
event_name=event_name,
network=network,
)
event_hash = binary.get_event_hash(event_abi)
events = list_contract_events(
contract_address=contract_address,
event_abi=event_abi,
event_hash=event_hash,
network=network,
)
if event_hash not in events or len(events[event_hash]['paths']) == 0:
raise backend_utils.DataNotFound('no files for event')
# get paths to load
paths_to_load = []
for path, (path_start, path_end) in events[event_hash]['paths'].items():
if start_block is not None:
if path_end < start_block:
continue
if end_block is not None:
if end_block < path_start:
continue
paths_to_load.append(path)
if len(paths_to_load) == 0:
raise backend_utils.DataNotFound('no files for event')
# print summary
if verbose:
if len(paths_to_load) > 0:
import toolstr
n_files = len(paths_to_load)
n_bytes_int = sum(os.path.getsize(path) for path in paths_to_load)
n_bytes = toolstr.format(n_bytes_int / 1024 / 1024) + 'M'
else:
n_bytes = '0'
n_files = 0
print('loading events (' + n_bytes + 'B', 'across', n_files, 'files)')
if verbose >= 2:
for path in paths_to_load:
print('-', path)
import pandas as pd
# load paths
dfs = []
for path in paths_to_load:
df = pd.read_csv(path)
df = df.set_index(['block_number', 'transaction_index', 'log_index'])
dfs.append(df)
df = pd.concat(dfs, axis=0)
df = df.sort_index()
# trim unwanted
if start_block is not None:
if start_block < events[event_hash]['block_range'][0]:
raise backend_utils.DataNotFound(
'start_block outside of filesystem contents'
)
mask = df.index.get_level_values(level='block_number') >= start_block
df = df[mask]
if end_block is not None:
if end_block > events[event_hash]['block_range'][-1]:
raise backend_utils.DataNotFound(
'end_block outside of filesystem contents'
)
mask = df.index.get_level_values(level='block_number') <= end_block
df = df[mask]
# convert any bytes
prefix = 'arg__'
if event_abi is None:
event_abi = await abi_utils.async_get_event_abi(
contract_address=contract_address,
event_name=event_name,
event_hash=event_hash,
network=network,
)
for arg in event_abi['inputs']:
if arg['type'] in ['bytes32']:
column = prefix + arg['name']
lam = functools.partial(
binary.convert,
output_format='prefix_hex',
)
df[column] = df[column].map(ast.literal_eval).map(lam)
return df
| {
"alphanum_fraction": 0.6492625987,
"author": null,
"avg_line_length": 31.7228381375,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a5562bc3c22191a61d0f153535538ac3875acc39",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2022-03-17T19:14:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-15T17:53:07.000Z",
"max_forks_repo_head_hexsha": "ec838f3d0d44af228f45394d9ba8d8eb7f677520",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fei-protocol/checkthechain",
"max_forks_repo_path": "src/ctc/evm/event_utils/event_backends/filesystem_events.py",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "ec838f3d0d44af228f45394d9ba8d8eb7f677520",
"max_issues_repo_issues_event_max_datetime": "2022-03-11T18:41:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-03T02:58:47.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fei-protocol/checkthechain",
"max_issues_repo_path": "src/ctc/evm/event_utils/event_backends/filesystem_events.py",
"max_line_length": 80,
"max_stars_count": 94,
"max_stars_repo_head_hexsha": "ec838f3d0d44af228f45394d9ba8d8eb7f677520",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fei-protocol/checkthechain",
"max_stars_repo_path": "src/ctc/evm/event_utils/event_backends/filesystem_events.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T19:26:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-15T19:34:49.000Z",
"num_tokens": 3113,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 14307
} |
#!/usr/bin/python
# DQN implementation of https://github.com/matthiasplappert/keras-rl for Keras
# was used with epsilon-greedy per-episode decay policy.
import numpy as np
import gym
from gym import wrappers
from tfinterface.utils import get_run
from tfinterface.reinforcement import DQN, ExpandedStateEnv
import random
import tensorflow as tf
ENV_NAME = 'LunarLander-v2'
run = get_run()
#env
env = gym.make(ENV_NAME)
env = wrappers.Monitor(env, "monitor/{run}".format(run = run), video_callable=lambda step: step % 50 == 0)
env = ExpandedStateEnv(env, 3)
# To get repeatable results.
sd = 16
np.random.seed(sd)
random.seed(sd)
env.seed(sd)
#parameters
state_temporal_augmentation = 3
nb_actions = env.action_space.n
nb_states = env.observation_space.shape[0] * state_temporal_augmentation
class Network(object):
def __init__(self, inputs, nb_actions):
ops = dict(
kernel_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.01),
bias_initializer=tf.random_uniform_initializer(minval=0.0, maxval=0.01)
)
net = inputs.s
net = tf.layers.dense(net, 64, activation=tf.nn.elu, name="elu_layer")
net = tf.nn.dropout(net, inputs.keep_prob)
self.Qs = tf.layers.dense(net, nb_actions)
with tf.device("cpu:0"):
dqn = DQN(
lambda inputs: Network(inputs, nb_actions),
nb_states,
seed = sd,
eps = 0.1,
target_update = 0.001
)
dqn.fit(env)
tf.train.exponential_decay
| {
"alphanum_fraction": 0.6990033223,
"author": null,
"avg_line_length": 23.8888888889,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7a0446bd1f46369e61a80f9012348734f7488ae2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-10-25T20:46:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-10-25T20:46:15.000Z",
"max_forks_repo_head_hexsha": "bc4036c9f4d19e042ca7801c51fbafd8220afc76",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cgarciae/tf-interface",
"max_forks_repo_path": "examples/lunar-lander-dqn.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "bc4036c9f4d19e042ca7801c51fbafd8220afc76",
"max_issues_repo_issues_event_max_datetime": "2018-04-27T21:01:00.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-01T19:35:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cgarciae/tf-interface",
"max_issues_repo_path": "examples/lunar-lander-dqn.py",
"max_line_length": 106,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bc4036c9f4d19e042ca7801c51fbafd8220afc76",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cgarciae/tf-interface",
"max_stars_repo_path": "examples/lunar-lander-dqn.py",
"max_stars_repo_stars_event_max_datetime": "2017-07-06T19:50:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-06T19:50:49.000Z",
"num_tokens": 398,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1505
} |
[STATEMENT]
lemma heap_upds_ok_upd:
"heap_upds_ok (\<Gamma>, Upd x # S) \<Longrightarrow> x \<notin> domA \<Gamma> \<and> x \<notin> upds S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. heap_upds_ok (\<Gamma>, Upd x # S) \<Longrightarrow> x \<notin> domA \<Gamma> \<and> x \<notin> upds S
[PROOF STEP]
by auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Call_Arity_SestoftConf",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 129,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import numpy as np
from .sysEngVals import SysEngVals
__all__ = ['m5_flat_sed', 'm5_scale']
def m5_scale(expTime, nexp, airmass, FWHMeff, musky, darkSkyMag, Cm, dCm_infinity, kAtm,
tauCloud=0, baseExpTime=15):
""" Return m5 (scaled) value for all filters.
Parameters
----------
expTime : float
Exposure time (in seconds) for each exposure
nexp : int
Number of exposures
airmass : float
Airmass of the observation
FWHMeff : np.ndarray or pd.DataFrame
FWHM (in arcseconds) per filter
musky : np.ndarray or pd.DataFrame
Sky background (in magnitudes/sq arcsecond) per filter of the observation
darkSkyMag : np.ndarray or pd.DataFrame
Dark Sky, zenith magnitude/sq arcsecond - to scale musky. per filter
Cm : np.ndarray or pd.DataFrame
Cm value for the throughputs per filter
dCm_infinity : np.ndarray or pd.DataFrame
dCm_infinity values for the throughputs, per filter
kAtm : np.ndarray or pd.DataFrame
Atmospheric extinction values, per filter
tauCloud : float, optional
Extinction due to clouds
baseExpTime : float, optional
The exposure time used to calculate Cm / dCm_infinity. Used to scale expTime.
This is the individual exposure exposure time.
Returns
-------
np.ndarray or pd.DataFrame
m5 values scaled for the visit conditions
Note: The columns required as input for m5_scale can be calculated using
the makeM5 function in lsst.syseng.throughputs.
"""
# Calculate adjustment if readnoise is significant for exposure time
# (see overview paper, equation 7)
Tscale = expTime / baseExpTime * np.power(10.0, -0.4 * (musky - darkSkyMag))
dCm = 0.
dCm += dCm_infinity
dCm -= 1.25 * np.log10(1 + (10**(0.8 * dCm_infinity) - 1)/Tscale)
# Calculate m5 for 1 exp - constants here come from definition of Cm/dCm_infinity
m5 = (Cm + dCm + 0.50 * (musky - 21.0) + 2.5 * np.log10(0.7 / FWHMeff) +
1.25 * np.log10(expTime / 30.0) - kAtm * (airmass - 1.0) - 1.1 * tauCloud)
if nexp > 1:
m5 = 1.25 * np.log10(nexp * 10**(0.8 * m5))
return m5
def m5_flat_sed(visitFilter, musky, FWHMeff, expTime, airmass, nexp=1, tauCloud=0):
"""Calculate the m5 value, using photometric scaling. Note, does not include shape of the object SED.
Parameters
----------
visitFilter : str
One of u,g,r,i,z,y
musky : float
Surface brightness of the sky in mag/sq arcsec
FWHMeff : float
The seeing effective FWHM (arcsec)
expTime : float
Exposure time for each exposure in the visit.
airmass : float
Airmass of the observation (unitless)
nexp : int, optional
The number of exposures. Default 1. (total on-sky time = expTime * nexp)
tauCloud : float (0.)
Any extinction from clouds in magnitudes (positive values = more extinction)
Returns
-------
m5 : float
The five-sigma limiting depth of a point source observed in the given conditions.
"""
# Set up expected extinction (kAtm) and m5 normalization values (Cm) for each filter.
# The Cm values must be changed when telescope and site parameters are updated.
#
# These values are calculated using $SYSENG_THROUGHPUTS/python/calcM5.py.
# This set of values are calculated using v1.2 of the SYSENG_THROUGHPUTS repo.
# The exposure time scaling depends on knowing the value of the exposure time used to calculate Cm/etc.
# Only define the dicts once on initial call
if not hasattr(m5_flat_sed, 'Cm'):
# Using Cm / dCm_infinity values calculated for a 1x30s visit.
# This results in an error of about 0.01 mag in u band for 2x15s visits (< in other bands)
# See https://github.com/lsst-pst/survey_strategy/blob/master/fbs_1.3/m5FlatSed%20update.ipynb
# for a more in-depth evaluation.
sev = SysEngVals()
m5_flat_sed.baseExpTime = sev.exptime
m5_flat_sed.Cm = sev.Cm
m5_flat_sed.dCm_infinity = sev.dCm_infinity
m5_flat_sed.kAtm = sev.kAtm
m5_flat_sed.msky = sev.skyMag
# Calculate adjustment if readnoise is significant for exposure time
# (see overview paper, equation 7)
Tscale = expTime / m5_flat_sed.baseExpTime * np.power(10.0, -0.4 * (musky - m5_flat_sed.msky[visitFilter]))
dCm = 0.
dCm += m5_flat_sed.dCm_infinity[visitFilter]
dCm -= 1.25 * np.log10(1 + (10**(0.8 * m5_flat_sed.dCm_infinity[visitFilter]) - 1) / Tscale)
# Calculate m5 for 1 exp - 30s and other constants here come from definition of Cm/dCm_infinity
m5 = (m5_flat_sed.Cm[visitFilter] + dCm + 0.50 * (musky - 21.0) + 2.5 * np.log10(0.7 / FWHMeff) +
1.25 * np.log10(expTime / 30.0) - m5_flat_sed.kAtm[visitFilter] * (airmass - 1.0) - 1.1 * tauCloud)
# Then combine with coadd if >1 exposure
if nexp > 1:
m5 = 1.25 * np.log10(nexp * 10**(0.8 * m5))
return m5
| {
"alphanum_fraction": 0.661341853,
"author": null,
"avg_line_length": 42.4406779661,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "624e00add82e70b7b646f00bc28ea37d83f2c31b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RileyWClarke/flarubin",
"max_forks_repo_path": "rubin_sim/utils/m5_flat_sed.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RileyWClarke/flarubin",
"max_issues_repo_path": "rubin_sim/utils/m5_flat_sed.py",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RileyWClarke/flarubin",
"max_stars_repo_path": "rubin_sim/utils/m5_flat_sed.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1541,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5008
} |
from fastapi import FastAPI, WebSocket
import asyncio
import numpy as np
app = FastAPI()
@app.websocket("/pressureTaps")
# This is where the serial sensor data integrates to the Streamlit app using websockets
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
await websocket.send_json({
"data": list(np.random.uniform(0.5,0.95, size=4))
}
)
await asyncio.sleep(0.1)
| {
"alphanum_fraction": 0.652173913,
"author": null,
"avg_line_length": 25.4210526316,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fc34532bbd5eefee0ea5d24f7c551a95f8fc9ce2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-13T16:07:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-13T16:07:57.000Z",
"max_forks_repo_head_hexsha": "c80b084570676882f4f98bb1aaed62b022dd7116",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "raihaan123/ConDiNozzle",
"max_forks_repo_path": "serial-data/daq.py",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "c80b084570676882f4f98bb1aaed62b022dd7116",
"max_issues_repo_issues_event_max_datetime": "2021-09-10T20:39:23.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-17T16:14:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "raihaan123/ConDiNozzle",
"max_issues_repo_path": "serial-data/daq.py",
"max_line_length": 88,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c80b084570676882f4f98bb1aaed62b022dd7116",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "raihaan123/ConDiNozzle",
"max_stars_repo_path": "serial-data/daq.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-09T21:42:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-09T21:42:34.000Z",
"num_tokens": 105,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 483
} |
@testset "Data" begin
@test artificialIn_SAR_image(2) == 2 * π * ones(2, 2)
@test artificial_S1_slope_signal(20, 0.0) == repeat([-π / 2], 20)
@test ismissing(artificial_S1_signal(-1.0))
@test ismissing(artificial_S1_signal(2.0))
@test artificial_S1_signal(2) == [-3 * π / 4, -3 * π / 4]
# for the remainder check data types only
@test length(artificial_S1_signal(20)) == 20
@test size(artificial_S2_whirl_image(64)) == (64, 64)
@test length(artificial_S2_whirl_image(64)[1, 1]) == 3
@test size(artificial_S2_rotation_image(64)) == (64, 64)
@test length(artificial_S2_rotation_image(64)[1, 1]) == 3
@test size(artificial_S2_whirl_patch(8)) == (8, 8)
@test length(artificial_S2_whirl_patch(8)[1, 1]) == 3
@test size(artificial_SPD_image(8)) == (8, 8)
@test size(artificial_SPD_image(8)[1, 1]) == (3, 3)
@test size(artificial_SPD_image2(8)) == (8, 8)
@test size(artificial_SPD_image2(8)[1, 1]) == (3, 3)
@test eltype(artificial_SPD_image2(8)) == Array{Float64,2}
@test length(artificial_S2_lemniscate([0.0, 0.0, 1.0], 20)) == 20
@test length(artificial_S2_lemniscate([0.0, 0.0, 1.0], 20)[1]) == 3
end
| {
"alphanum_fraction": 0.6470092671,
"author": null,
"avg_line_length": 37.09375,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "815de23bdc3509dc3ec0261ff97bd166c2fec1b7",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 26,
"max_forks_repo_forks_event_max_datetime": "2022-03-16T21:36:59.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-14T11:49:01.000Z",
"max_forks_repo_head_hexsha": "89c60404c7cf756102bcf45dd58dc443ef2b2d4e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fkastner/Manopt.jl",
"max_forks_repo_path": "test/helpers/test_data.jl",
"max_issues_count": 90,
"max_issues_repo_head_hexsha": "89c60404c7cf756102bcf45dd58dc443ef2b2d4e",
"max_issues_repo_issues_event_max_datetime": "2022-03-27T08:55:37.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-30T08:00:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fkastner/Manopt.jl",
"max_issues_repo_path": "test/helpers/test_data.jl",
"max_line_length": 71,
"max_stars_count": 141,
"max_stars_repo_head_hexsha": "89c60404c7cf756102bcf45dd58dc443ef2b2d4e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fkastner/Manopt.jl",
"max_stars_repo_path": "test/helpers/test_data.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T09:37:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-30T08:00:06.000Z",
"num_tokens": 469,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1187
} |
import csv
import numpy as np
import torch
import matplotlib.pyplot as plt
color_bar = ['black', 'red', 'blue', 'green', 'brown', 'yellow', 'cyan', 'magenta']
def TrainHistoryPlot(his, his_label, save_name, title, axis_name, save = True):
#history must be input as list[0]: iter or epoch
#and otehr of history list is the acc or loss of different model
plt.figure(figsize = (10, 4))
for i in range(1, len(his)):
plt.plot(his[0], his[i])
plt.title(title)
plt.xlabel(axis_name[0])
plt.ylabel(axis_name[1])
plt.legend(his_label, loc = 'upper right')
if save:
plt.savefig(save_name + '.png')
print('Picture: ' + save_name + '.png done.')
else:
plt.show()
def ModelWeightPlot(reduced_weight, model_name, save_name, title, save = True):
#the plot of the reduced dimensional weight vector
if len(reduced_weight) > 8:
raise IndexError('Default colors are up to eight colors.')
if len(reduced_weight) != len(model_name):
raise RuntimeError('Please check the list of model and model_name.')
plt.figure(figsize = (10, 8))
for weight in range(len(reduced_weight)):
for state in range(reduced_weight[weight].shape[0]):
if state == 0:
plt.scatter(reduced_weight[weight][state, 0], reduced_weight[weight][state, 1], c = color_bar[weight], label = model_name[weight])
else:
plt.scatter(reduced_weight[weight][state, 0], reduced_weight[weight][state, 1], c = color_bar[weight])
plt.title(title)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(loc = 'upper right')
if save:
plt.savefig(save_name + '.png')
print('Picture: ' + save_name + '.png done.')
else:
plt.show()
def MinimumRatioPlot(minimum_ratio, loss, save_name, save = True):
#the plot will plot the scatter plot of minimum ratio vs loss
if minimum_ratio.shape[0] != loss.shape[0]:
raise RuntimeError('Please check the loss and minimum ratio array.')
plt.figure(figsize = (10, 8))
plt.scatter(minimum_ratio, loss)
plt.xlabel('minimum_ratio')
plt.ylabel('loss')
plt.ylim(28.38825, 28.38925)
if save:
plt.savefig(save_name + '.png')
print('Picture: ' + save_name + '.png done.')
else:
plt.show()
| {
"alphanum_fraction": 0.6355460385,
"author": null,
"avg_line_length": 33.8405797101,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3ed243b281562ca4905d4db58f20f5f678fd8b2f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0591a1a6f461da0a02b9e1b83f37ad3579f36f4d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "b05611038/MLDS_2019SPRING",
"max_forks_repo_path": "HW1-2/lib/visualize.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0591a1a6f461da0a02b9e1b83f37ad3579f36f4d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "b05611038/MLDS_2019SPRING",
"max_issues_repo_path": "HW1-2/lib/visualize.py",
"max_line_length": 146,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "0591a1a6f461da0a02b9e1b83f37ad3579f36f4d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "b05611038/MLDS_2019SPRING",
"max_stars_repo_path": "HW1-2/lib/visualize.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-05T03:16:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-20T06:47:30.000Z",
"num_tokens": 600,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2335
} |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Casual Volumetric Capture datasets.
Note: Please benchmark before submitted changes to this module. It's very easy
to introduce data loading bottlenecks!
"""
import json
from typing import List, Tuple
from absl import logging
import cv2
import numpy as np
from nerfies import gpath
from nerfies import types
from nerfies import utils
from nerfies.datasets import core
def load_scene_info(data_dir: types.PathType) -> Tuple[np.ndarray, float, float, float]:
"""Loads the scene scale from scene_scale.npy.
Args:
data_dir: the path to the dataset.
Returns:
scene_center: the center of the scene (unscaled coordinates).
scene_scale: the scale of the scene.
near: the near plane of the scene (scaled coordinates).
far: the far plane of the scene (scaled coordinates).
Raises:
ValueError if scene_scale.npy does not exist.
"""
scene_json_path = gpath.GPath(data_dir, "scene.json")
with scene_json_path.open("r") as f:
scene_json = json.load(f)
scene_center = np.array(scene_json["center"])
scene_scale = scene_json["scale"]
near = scene_json["near"]
far = scene_json["far"]
return scene_center, scene_scale, near, far
def _load_image(path: types.PathType) -> np.ndarray:
path = gpath.GPath(path)
with path.open("rb") as f:
raw_im = np.asarray(bytearray(f.read()), dtype=np.uint8)
image = cv2.imdecode(raw_im, cv2.IMREAD_COLOR)[:, :, ::-1] # BGR -> RGB
image = np.asarray(image).astype(np.float32) / 255.0
return image
def _load_dataset_ids(data_dir: types.PathType) -> Tuple[List[str], List[str]]:
"""Loads dataset IDs."""
dataset_json_path = gpath.GPath(data_dir, "dataset.json")
logging.info("*** Loading dataset IDs from %s", dataset_json_path)
with dataset_json_path.open("r") as f:
dataset_json = json.load(f)
train_ids = dataset_json["train_ids"]
val_ids = dataset_json["val_ids"]
train_ids = [str(i) for i in train_ids]
val_ids = [str(i) for i in val_ids]
return train_ids, val_ids
class NerfiesDataSource(core.DataSource):
"""Data loader for videos."""
def __init__(
self,
data_dir,
image_scale: int,
shuffle_pixels=False,
camera_type="json",
test_camera_trajectory="orbit-extreme",
**kwargs,
):
self.data_dir = gpath.GPath(data_dir)
super().__init__(**kwargs)
self.scene_center, self.scene_scale, self._near, self._far = load_scene_info(
self.data_dir
)
self.test_camera_trajectory = test_camera_trajectory
self.image_scale = image_scale
self.shuffle_pixels = shuffle_pixels
# Load IDs from JSON if it exists. This is useful since COLMAP fails on
# some images so this gives us the ability to skip invalid images.
self._train_ids, self._val_ids = _load_dataset_ids(self.data_dir)
self.rgb_dir = gpath.GPath(data_dir, "rgb", f"{image_scale}x")
self.depth_dir = gpath.GPath(data_dir, "depth", f"{image_scale}x")
self.camera_type = camera_type
self.camera_dir = gpath.GPath(data_dir, "camera")
metadata_path = self.data_dir / "metadata.json"
self.metadata_dict = None
if metadata_path.exists():
with metadata_path.open("r") as f:
self.metadata_dict = json.load(f)
@property
def near(self):
return self._near
@property
def far(self):
return self._far
@property
def camera_ext(self):
if self.camera_type == "json":
return ".json"
raise ValueError(f"Unknown camera_type {self.camera_type}")
@property
def train_ids(self):
return self._train_ids
@property
def val_ids(self):
return self._val_ids
def get_rgb_path(self, item_id):
return self.rgb_dir / f"{item_id}.png"
def load_rgb(self, item_id):
return _load_image(self.rgb_dir / f"{item_id}.png")
def load_camera(self, item_id, scale_factor=1.0):
if isinstance(item_id, gpath.GPath):
camera_path = item_id
else:
if self.camera_type == "json":
camera_path = self.camera_dir / f"{item_id}{self.camera_ext}"
else:
raise ValueError(f"Unknown camera type {self.camera_type!r}.")
return core.load_camera(
camera_path,
scale_factor=scale_factor / self.image_scale,
scene_center=self.scene_center,
scene_scale=self.scene_scale,
)
def glob_cameras(self, path):
path = gpath.GPath(path)
return sorted(path.glob(f"*{self.camera_ext}"))
def load_test_cameras(self, count=None):
camera_dir = self.data_dir / "camera-paths" / self.test_camera_trajectory
if not camera_dir.exists():
logging.warning("test camera path does not exist: %s", str(camera_dir))
return []
camera_paths = sorted(camera_dir.glob(f"*{self.camera_ext}"))
if count is not None:
stride = max(1, len(camera_paths) // count)
camera_paths = camera_paths[::stride]
cameras = utils.parallel_map(self.load_camera, camera_paths)
return cameras
def load_points(self):
with (self.data_dir / "points.npy").open("rb") as f:
points = np.load(f)
points = (points - self.scene_center) * self.scene_scale
return points.astype(np.float32)
def get_appearance_id(self, item_id):
return self.metadata_dict[item_id]["appearance_id"]
def get_camera_id(self, item_id):
return self.metadata_dict[item_id]["camera_id"]
def get_warp_id(self, item_id):
return self.metadata_dict[item_id]["warp_id"]
| {
"alphanum_fraction": 0.6558248632,
"author": null,
"avg_line_length": 32.6275510204,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9c69cf57cb3582ca7ee3c6d1e960b7cbae6a0da3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b30fe19edb6435e770b35dc07aab44ae62c96278",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "dukebw/nerfies",
"max_forks_repo_path": "nerfies/datasets/nerfies.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b30fe19edb6435e770b35dc07aab44ae62c96278",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "dukebw/nerfies",
"max_issues_repo_path": "nerfies/datasets/nerfies.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b30fe19edb6435e770b35dc07aab44ae62c96278",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dukebw/nerfies",
"max_stars_repo_path": "nerfies/datasets/nerfies.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1486,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6395
} |
[STATEMENT]
lemma len_space_left:
"left (space ts v c) \<le> right (ext v) \<longrightarrow> left (len v ts c) \<ge> left (space ts v c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. left (space ts v c) \<le> right (ext v) \<longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. left (space ts v c) \<le> right (ext v) \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
assume assm:"left (space ts v c) \<le> right (ext v)"
[PROOF STATE]
proof (state)
this:
left (space ts v c) \<le> right (ext v)
goal (1 subgoal):
1. left (space ts v c) \<le> right (ext v) \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
left (space ts v c) \<le> right (ext v)
[PROOF STEP]
show "left (len v ts c) \<ge> left (space ts v c)"
[PROOF STATE]
proof (prove)
using this:
left (space ts v c) \<le> right (ext v)
goal (1 subgoal):
1. left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
proof (cases "right ((space ts v) c) < left (ext v)" )
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
2. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
right (space ts v c) < left (ext v)
goal (2 subgoals):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
2. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
right (space ts v c) < left (ext v)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
right (space ts v c) < left (ext v)
goal (1 subgoal):
1. left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
using len_def len_left real_int.left_leq_right
[PROOF STATE]
proof (prove)
using this:
right (space ts v c) < left (ext v)
len ?v ?ts ?c \<equiv> if right (ext ?v) < left (space ?ts ?v ?c) then Abs_real_int (right (ext ?v), right (ext ?v)) else if right (space ?ts ?v ?c) < left (ext ?v) then Abs_real_int (left (ext ?v), left (ext ?v)) else Abs_real_int (max (left (ext ?v)) (left (space ?ts ?v ?c)), min (right (ext ?v)) (right (space ?ts ?v ?c)))
left (ext ?v) \<le> left (len ?v ?ts ?c)
left ?r \<le> right ?r
goal (1 subgoal):
1. left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
by (meson le_less_trans not_less order.asym)
[PROOF STATE]
proof (state)
this:
left (space ts v c) \<le> left (len v ts c)
goal (1 subgoal):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> right (space ts v c) < left (ext v)
goal (1 subgoal):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> right (space ts v c) < left (ext v)
[PROOF STEP]
have "len v ts c =
Abs_real_int ((max (left (ext v)) (left ((space ts v) c))),
min (right (ext v)) (right ((space ts v) c)))"
[PROOF STATE]
proof (prove)
using this:
\<not> right (space ts v c) < left (ext v)
goal (1 subgoal):
1. len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
[PROOF STEP]
using len_def assm
[PROOF STATE]
proof (prove)
using this:
\<not> right (space ts v c) < left (ext v)
len ?v ?ts ?c \<equiv> if right (ext ?v) < left (space ?ts ?v ?c) then Abs_real_int (right (ext ?v), right (ext ?v)) else if right (space ?ts ?v ?c) < left (ext ?v) then Abs_real_int (left (ext ?v), left (ext ?v)) else Abs_real_int (max (left (ext ?v)) (left (space ?ts ?v ?c)), min (right (ext ?v)) (right (space ?ts ?v ?c)))
left (space ts v c) \<le> right (ext v)
goal (1 subgoal):
1. len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
goal (1 subgoal):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
[PROOF STEP]
have "left (len v ts c) = max (left (ext v)) (left ((space ts v) c))"
[PROOF STATE]
proof (prove)
using this:
len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
goal (1 subgoal):
1. left (len v ts c) = max (left (ext v)) (left (space ts v c))
[PROOF STEP]
using Abs_real_int_inverse False assm real_int.left_leq_right
[PROOF STATE]
proof (prove)
using this:
len v ts c = Abs_real_int (max (left (ext v)) (left (space ts v c)), min (right (ext v)) (right (space ts v c)))
?y \<in> {r. fst r \<le> snd r} \<Longrightarrow> Rep_real_int (Abs_real_int ?y) = ?y
\<not> right (space ts v c) < left (ext v)
left (space ts v c) \<le> right (ext v)
left ?r \<le> right ?r
goal (1 subgoal):
1. left (len v ts c) = max (left (ext v)) (left (space ts v c))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
left (len v ts c) = max (left (ext v)) (left (space ts v c))
goal (1 subgoal):
1. \<lbrakk>left (space ts v c) \<le> right (ext v); \<not> right (space ts v c) < left (ext v)\<rbrakk> \<Longrightarrow> left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
left (len v ts c) = max (left (ext v)) (left (space ts v c))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
left (len v ts c) = max (left (ext v)) (left (space ts v c))
goal (1 subgoal):
1. left (space ts v c) \<le> left (len v ts c)
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
left (space ts v c) \<le> left (len v ts c)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
left (space ts v c) \<le> left (len v ts c)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Hybrid_Multi_Lane_Spatial_Logic_Length",
"hexsha": null,
"include": null,
"lang": null,
"length": 25,
"llama_tokens": 2836,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
function [dx,dy]= centered_gradient(input,dx,dy, nx,ny)
dx = zeros(1,round(nx*ny*+nx));
dy = zeros(1,round(nx*ny*+nx));
nx = round(nx);
ny = round(ny);
size(input);
for i = 1:ny-1
for j = 1: nx-1
k = round(i * nx + j);
if(nx+k < length(input))
dx(k) = 0.5*(input(k+1) - input(k-1));
dy(k) = 0.5*(input(k+nx) - input(k-nx));
end
end
end
z = 1;
for j = 2: nx-1
dx(z) = 0.5*(input(z+1) - input(j-1));
dy(z) = 0.5*(input(z+nx) - input(z));
k = (ny - 1) * nx + z;
if(k < length(input))
dx(k) = 0.5*(input(k+1) - input(k-1));
dy(k) = 0.5*(input(k) - input(k-nx));
end
z = z +1;
end
for i = 1: ny-1
p = (i * nx)+1;
if(p+nx < length(input))
dx(p) = 0.5*(input(p+1) - input(p));
dy(p) = 0.5*(input(p+nx) - input(p-nx));
end
k = (i+1) * nx - 1;
if(k+nx < length(input))
dx(k) = 0.5*(input(k) - input(k-1));
dy(k) = 0.5*(input(k+nx) - input(k-nx));
end
dx(1) = 0.5*(input(2) - input(1));
dy(1) = 0.5*(input(nx) - input(1));
dx(nx-1) = 0.5*(input(nx-1) - input(nx-2));
dy(nx-1) = 0.5*(input(2*nx-1) - input(nx-1));
dx((ny-1)*nx) = 0.5*(input((ny-1)*nx + 1) - input((ny-1)*nx));
dy((ny-1)*nx) = 0.5*(input((ny-1)*nx) - input((ny-2)*nx));
if(ny*nx-2 <length(input))
dx(ny*nx-2) = 0.5*(input(ny*nx-2) - input(ny*nx-1-1-1));
dy(ny*nx-2) = 0.5*(input(ny*nx-2) - input((ny-2)*nx-2));
disp('at least once');
end
end
end | {
"alphanum_fraction": null,
"author": "zhangqianqianQQ",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/zhangqianqianQQ-MachineVisionAlgorithm/MachineVisionAlgorithm-683338f6c3b1aab9fa2b80026915fe936aebf0ee/去噪算法/SPTWO_matlab-master/centered_gradient.m",
"reason": null,
"repo": "MachineVisionAlgorithm",
"save_path": "github-repos/MATLAB/zhangqianqianQQ-MachineVisionAlgorithm",
"sha": "683338f6c3b1aab9fa2b80026915fe936aebf0ee",
"size": null
} |
{-# OPTIONS --without-K --rewriting #-}
open import lib.Base
open import lib.PathFunctor
open import lib.PathGroupoid
open import lib.Equivalence
{- Structural lemmas about paths over paths
The lemmas here have the form
[↓-something-in] : introduction rule for the something
[↓-something-out] : elimination rule for the something
[↓-something-β] : β-reduction rule for the something
[↓-something-η] : η-reduction rule for the something
The possible somethings are:
[cst] : constant fibration
[cst2] : fibration constant in the second argument
[cst2×] : fibration constant and nondependent in the second argument
[ap] : the path below is of the form [ap f p]
[fst×] : the fibration is [fst] (nondependent product)
[snd×] : the fibration is [snd] (nondependent product)
The rule of prime: The above lemmas should choose
between [_∙_] and [_∙'_] in a way that, if the underlying path is [idp],
then the entire lemma reduces to an identity function.
Otherwise, the lemma would have the suffix [in'] or [out'], meaning that
all the choices of [_∙_] or [_∙'_] are exactly the opposite ones.
You can also go back and forth between dependent paths and homogeneous paths
with a transport on one side with the functions
[to-transp], [from-transp], [to-transp-β]
[to-transp!], [from-transp!], [to-transp!-β]
More lemmas about paths over paths are present in the lib.types.* modules
(depending on the type constructor of the fibration)
-}
module lib.PathOver where
{- Dependent paths in a constant fibration -}
module _ {i j} {A : Type i} {B : Type j} where
↓-cst-in : {x y : A} {p : x == y} {u v : B}
→ u == v
→ u == v [ (λ _ → B) ↓ p ]
↓-cst-in {p = idp} q = q
↓-cst-out : {x y : A} {p : x == y} {u v : B}
→ u == v [ (λ _ → B) ↓ p ]
→ u == v
↓-cst-out {p = idp} q = q
↓-cst-β : {x y : A} (p : x == y) {u v : B} (q : u == v)
→ (↓-cst-out (↓-cst-in {p = p} q) == q)
↓-cst-β idp q = idp
{- Interaction of [↓-cst-in] with [_∙_] -}
↓-cst-in-∙ : {x y z : A} (p : x == y) (q : y == z) {u v w : B}
(p' : u == v) (q' : v == w)
→ ↓-cst-in {p = p ∙ q} (p' ∙ q')
== ↓-cst-in {p = p} p' ∙ᵈ ↓-cst-in {p = q} q'
↓-cst-in-∙ idp idp idp idp = idp
{- Interaction of [↓-cst-in] with [_∙'_] -}
↓-cst-in-∙' : {x y z : A} (p : x == y) (q : y == z) {u v w : B}
(p' : u == v) (q' : v == w)
→ ↓-cst-in {p = p ∙' q} (p' ∙' q')
== ↓-cst-in {p = p} p' ∙'ᵈ ↓-cst-in {p = q} q'
↓-cst-in-∙' idp idp idp idp = idp
{- Introduction of an equality between [↓-cst-in]s (used to deduce the
recursor from the eliminator in HIT with 2-paths) -}
↓-cst-in2 : {a a' : A} {u v : B}
{p₀ : a == a'} {p₁ : a == a'} {q₀ q₁ : u == v} {q : p₀ == p₁}
→ q₀ == q₁
→ (↓-cst-in {p = p₀} q₀ == ↓-cst-in {p = p₁} q₁ [ (λ p → u == v [ (λ _ → B) ↓ p ]) ↓ q ])
↓-cst-in2 {p₀ = idp} {p₁ = .idp} {q₀} {q₁} {idp} k = k
-- Dependent paths in a fibration constant in the second argument
module _ {i j k} {A : Type i} {B : A → Type j} {C : A → Type k} where
↓-cst2-in : {x y : A} (p : x == y) {b : C x} {c : C y}
(q : b == c [ C ↓ p ]) {u : B x} {v : B y}
→ u == v [ B ↓ p ]
→ u == v [ (λ xy → B (fst xy)) ↓ (pair= p q) ]
↓-cst2-in idp idp r = r
↓-cst2-out : {x y : A} (p : x == y) {b : C x} {c : C y}
(q : b == c [ C ↓ p ]) {u : B x} {v : B y}
→ u == v [ (λ xy → B (fst xy)) ↓ (pair= p q) ]
→ u == v [ B ↓ p ]
↓-cst2-out idp idp r = r
-- Dependent paths in a fibration constant and non dependent in the
-- second argument
module _ {i j k} {A : Type i} {B : A → Type j} {C : Type k} where
↓-cst2×-in : {x y : A} (p : x == y) {b c : C}
(q : b == c) {u : B x} {v : B y}
→ u == v [ B ↓ p ]
→ u == v [ (λ xy → B (fst xy)) ↓ (pair×= p q) ]
↓-cst2×-in idp idp r = r
↓-cst2×-out : {x y : A} (p : x == y) {b c : C}
(q : b == c) {u : B x} {v : B y}
→ u == v [ (λ xy → B (fst xy)) ↓ (pair×= p q) ]
→ u == v [ B ↓ p ]
↓-cst2×-out idp idp r = r
-- Dependent paths in the universal fibration over the universe
↓-idf-out : ∀ {i} {A B : Type i} (p : A == B) {u : A} {v : B}
→ u == v [ (λ x → x) ↓ p ]
→ coe p u == v
↓-idf-out idp = idf _
↓-idf-in : ∀ {i} {A B : Type i} (p : A == B) {u : A} {v : B}
→ coe p u == v
→ u == v [ (λ x → x) ↓ p ]
↓-idf-in idp = idf _
-- Dependent paths over [ap f p]
module _ {i j k} {A : Type i} {B : Type j} (C : B → Type k) (f : A → B) where
↓-ap-in : {x y : A} {p : x == y} {u : C (f x)} {v : C (f y)}
→ u == v [ C ∘ f ↓ p ]
→ u == v [ C ↓ ap f p ]
↓-ap-in {p = idp} idp = idp
↓-ap-out : {x y : A} (p : x == y) {u : C (f x)} {v : C (f y)}
→ u == v [ C ↓ ap f p ]
→ u == v [ C ∘ f ↓ p ]
↓-ap-out idp idp = idp
-- Dependent paths over [ap2 f p q]
module _ {i j k l} {A : Type i} {B : Type j} {C : Type k} (D : C → Type l)
(f : A → B → C) where
↓-ap2-in : {x y : A} {p : x == y} {w z : B} {q : w == z}
{u : D (f x w)} {v : D (f y z)}
→ u == v [ D ∘ uncurry f ↓ pair×= p q ]
→ u == v [ D ↓ ap2 f p q ]
↓-ap2-in {p = idp} {q = idp} α = α
↓-ap2-out : {x y : A} {p : x == y} {w z : B} {q : w == z}
{u : D (f x w)} {v : D (f y z)}
→ u == v [ D ↓ ap2 f p q ]
→ u == v [ D ∘ uncurry f ↓ pair×= p q ]
↓-ap2-out {p = idp} {q = idp} α = α
apd↓ : ∀ {i j k} {A : Type i} {B : A → Type j} {C : (a : A) → B a → Type k}
(f : {a : A} (b : B a) → C a b) {x y : A} {p : x == y}
{u : B x} {v : B y} (q : u == v [ B ↓ p ])
→ f u == f v [ (λ xy → C (fst xy) (snd xy)) ↓ pair= p q ]
apd↓ f {p = idp} idp = idp
apd↓=apd : ∀ {i j} {A : Type i} {B : A → Type j} (f : (a : A) → B a) {x y : A}
(p : x == y) → (apd f p == ↓-ap-out _ _ p (apd↓ {A = Unit} f {p = idp} p))
apd↓=apd f idp = idp
-- Paths in the fibrations [fst] and [snd]
module _ {i j} where
↓-fst×-out : {A A' : Type i} {B B' : Type j} (p : A == A') (q : B == B')
{u : A} {v : A'}
→ u == v [ fst ↓ pair×= p q ]
→ u == v [ (λ X → X) ↓ p ]
↓-fst×-out idp idp h = h
↓-snd×-in : {A A' : Type i} {B B' : Type j} (p : A == A') (q : B == B')
{u : B} {v : B'}
→ u == v [ (λ X → X) ↓ q ]
→ u == v [ snd ↓ pair×= p q ]
↓-snd×-in idp idp h = h
-- Mediating dependent paths with the transport version
module _ {i j} {A : Type i} where
from-transp : (B : A → Type j) {a a' : A} (p : a == a')
{u : B a} {v : B a'}
→ (transport B p u == v)
→ (u == v [ B ↓ p ])
from-transp B idp idp = idp
to-transp : {B : A → Type j} {a a' : A} {p : a == a'}
{u : B a} {v : B a'}
→ (u == v [ B ↓ p ])
→ (transport B p u == v)
to-transp {p = idp} idp = idp
to-transp-β : (B : A → Type j) {a a' : A} (p : a == a')
{u : B a} {v : B a'}
(q : transport B p u == v)
→ to-transp (from-transp B p q) == q
to-transp-β B idp idp = idp
to-transp-η : {B : A → Type j} {a a' : A} {p : a == a'}
{u : B a} {v : B a'}
(q : u == v [ B ↓ p ])
→ from-transp B p (to-transp q) == q
to-transp-η {p = idp} idp = idp
to-transp-equiv : (B : A → Type j) {a a' : A} (p : a == a')
{u : B a} {v : B a'} → (u == v [ B ↓ p ]) ≃ (transport B p u == v)
to-transp-equiv B p =
equiv to-transp (from-transp B p) (to-transp-β B p) (to-transp-η)
from-transp! : (B : A → Type j)
{a a' : A} (p : a == a')
{u : B a} {v : B a'}
→ (u == transport! B p v)
→ (u == v [ B ↓ p ])
from-transp! B idp idp = idp
to-transp! : {B : A → Type j}
{a a' : A} {p : a == a'}
{u : B a} {v : B a'}
→ (u == v [ B ↓ p ])
→ (u == transport! B p v)
to-transp! {p = idp} idp = idp
to-transp!-β : (B : A → Type j)
{a a' : A} (p : a == a')
{u : B a} {v : B a'}
(q : u == transport! B p v)
→ to-transp! (from-transp! B p q) == q
to-transp!-β B idp idp = idp
to-transp!-η : {B : A → Type j} {a a' : A} {p : a == a'}
{u : B a} {v : B a'}
(q : u == v [ B ↓ p ])
→ from-transp! B p (to-transp! q) == q
to-transp!-η {p = idp} idp = idp
to-transp!-equiv : (B : A → Type j) {a a' : A} (p : a == a')
{u : B a} {v : B a'} → (u == v [ B ↓ p ]) ≃ (u == transport! B p v)
to-transp!-equiv B p =
equiv to-transp! (from-transp! B p) (to-transp!-β B p) (to-transp!-η)
{- Various other lemmas -}
{- Used for defining the recursor from the eliminator for 1-HIT -}
apd=cst-in : ∀ {i j} {A : Type i} {B : Type j} {f : A → B}
{a a' : A} {p : a == a'} {q : f a == f a'}
→ apd f p == ↓-cst-in q → ap f p == q
apd=cst-in {p = idp} x = x
↓-apd-out : ∀ {i j k} {A : Type i} {B : A → Type j} (C : (a : A) → B a → Type k)
{f : Π A B} {x y : A} {p : x == y}
{q : f x == f y [ B ↓ p ]} (r : apd f p == q)
{u : C x (f x)} {v : C y (f y)}
→ u == v [ uncurry C ↓ pair= p q ]
→ u == v [ (λ z → C z (f z)) ↓ p ]
↓-apd-out C {p = idp} idp idp = idp
↓-ap-out= : ∀ {i j k} {A : Type i} {B : Type j} (C : (b : B) → Type k)
(f : A → B) {x y : A} (p : x == y)
{q : f x == f y} (r : ap f p == q)
{u : C (f x)} {v : C (f y)}
→ u == v [ C ↓ q ]
→ u == v [ (λ z → C (f z)) ↓ p ]
↓-ap-out= C f idp idp idp = idp
-- No idea what that is
to-transp-weird : ∀ {i j} {A : Type i} {B : A → Type j}
{u v : A} {d : B u} {d' d'' : B v} {p : u == v}
(q : d == d' [ B ↓ p ]) (r : transport B p d == d'')
→ (from-transp B p r ∙'ᵈ (! r ∙ to-transp q)) == q
to-transp-weird {p = idp} idp idp = idp
-- Something not really clear yet
module _ {i j k} {A : Type i} {B : Type j} {C : Type k} (f : A → C) (g : B → C)
where
↓-swap : {a a' : A} {p : a == a'} {b b' : B} {q : b == b'}
(r : f a == g b') (s : f a' == g b)
→ (ap f p ∙' s == r [ (λ x → f a == g x) ↓ q ])
→ (r == s ∙ ap g q [ (λ x → f x == g b') ↓ p ])
↓-swap {p = idp} {q = idp} r s t = (! t) ∙ ∙'-unit-l s ∙ ! (∙-unit-r s)
↓-swap! : {a a' : A} {p : a == a'} {b b' : B} {q : b == b'}
(r : f a == g b') (s : f a' == g b)
→ (r == s ∙ ap g q [ (λ x → f x == g b') ↓ p ])
→ (ap f p ∙' s == r [ (λ x → f a == g x) ↓ q ])
↓-swap! {p = idp} {q = idp} r s t = ∙'-unit-l s ∙ ! (∙-unit-r s) ∙ (! t)
↓-swap-β : {a a' : A} {p : a == a'} {b b' : B} {q : b == b'}
(r : f a == g b') (s : f a' == g b)
(t : ap f p ∙' s == r [ (λ x → f a == g x) ↓ q ])
→ ↓-swap! r s (↓-swap r s t) == t
↓-swap-β {p = idp} {q = idp} r s t = coh (∙'-unit-l s) (∙-unit-r s) t where
coh : ∀ {i} {X : Type i} {x y z t : X} (p : x == y) (q : z == y) (r : x == t)
→ p ∙ ! q ∙ ! (! r ∙ p ∙ ! q) == r
coh idp idp idp = idp
transp-↓ : ∀ {i j} {A : Type i} (P : A → Type j) {a₁ a₂ : A}
(p : a₁ == a₂) (y : P a₂) → transport P (! p) y == y [ P ↓ p ]
transp-↓ _ idp _ = idp
transp-ap-↓ : ∀ {i j k} {A : Type i} {B : Type j} (P : B → Type k) (h : A → B)
{a₁ a₂ : A} (p : a₁ == a₂) (y : P (h a₂))
→ transport P (! (ap h p)) y == y [ P ∘ h ↓ p ]
transp-ap-↓ _ _ idp _ = idp
| {
"alphanum_fraction": 0.4413413039,
"author": null,
"avg_line_length": 34.6472491909,
"converted": null,
"ext": "agda",
"file": null,
"hexsha": "1a276be7495fc14b1af17d24bcd328f9920e6f95",
"include": null,
"lang": "Agda",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-12-26T21:31:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-12-26T21:31:57.000Z",
"max_forks_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "timjb/HoTT-Agda",
"max_forks_repo_path": "core/lib/PathOver.agda",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "timjb/HoTT-Agda",
"max_issues_repo_path": "core/lib/PathOver.agda",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "timjb/HoTT-Agda",
"max_stars_repo_path": "core/lib/PathOver.agda",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4855,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 10706
} |
import networkx as nx
import numpy as np
import tensorflow as tf
from keras import Input
from keras import backend as K
from keras.models import Model
from matplotlib import pyplot as plt
from skimage import segmentation, color, filters
from skimage.color import rgb2gray, gray2rgb
from skimage.filters import sobel
from skimage.future import graph
from skimage.io import imread
from spektral.layers import MinCutPool
from tqdm import tqdm
def weight_boundary(graph, src, dst, n):
"""
Handle merging of nodes of a region boundary region adjacency graph.
This function computes the `"weight"` and the count `"count"`
attributes of the edge between `n` and the node formed after
merging `src` and `dst`.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The vertices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dictionary with the "weight" and "count" attributes to be
assigned for the merged node.
"""
default = {'weight': 0.0, 'count': 0}
count_src = graph[src].get(n, default)['count']
count_dst = graph[dst].get(n, default)['count']
weight_src = graph[src].get(n, default)['weight']
weight_dst = graph[dst].get(n, default)['weight']
count = count_src + count_dst
return {
'count': count,
'weight': (count_src * weight_src + count_dst * weight_dst) / count
}
def merge_boundary(graph, src, dst):
"""Call back called before merging 2 nodes.
In this case we don't need to do any computation here.
"""
pass
OVER_SEG = "felzen"
PLOTS_ON = True
ALGO = "GNN"
ITER = 40000
n_clust = 4
ACTIV = None
H_ = None
# DATA
img = imread('data/horse2.jpeg')
# BUILD GRAPH
# Hyper-segmentation
if OVER_SEG == "slic":
segments = segmentation.slic(img, compactness=3, n_segments=600, sigma=1)
elif OVER_SEG == "felzen":
segments = segmentation.felzenszwalb(img, scale=100, sigma=1.0, min_size=50)
elif OVER_SEG == "quick":
segments = segmentation.quickshift(gray2rgb(img), kernel_size=3, max_dist=6, ratio=0.5)
elif OVER_SEG == "water":
gradient = sobel(rgb2gray(img))
segments = segmentation.watershed(gradient, markers=400, compactness=0.001)
segments -= 1
else:
raise ValueError(OVER_SEG)
# Region Adjacency Graph
if ALGO == "hier":
edges = filters.sobel(color.rgb2gray(img))
g = graph.rag_boundary(segments, edges)
labels = graph.merge_hierarchical(segments, g, thresh=0.08, rag_copy=True,
in_place_merge=True,
merge_func=merge_boundary,
weight_func=weight_boundary)
elif ALGO == "ncut":
g = graph.rag_mean_color(img, segments, mode='similarity')
labels = graph.cut_normalized(segments, g, thresh=0.0002, num_cuts=20, in_place=False)
elif ALGO == "thresh":
g = graph.rag_mean_color(img, segments, mode='distance')
labels = graph.cut_threshold(segments, g, 30, in_place=False)
elif ALGO == "GNN":
g = graph.rag_mean_color(img, segments, mode='similarity')
A = nx.to_scipy_sparse_matrix(g)
X_m = np.empty((A.shape[0], 3))
X_t = np.empty((A.shape[0], 3))
y = np.empty((A.shape[0],))
for n, d in g.nodes(data=True):
X_m[n] = d['mean color']
X_t[n] = d['total color']
y[n] = d['labels'][0]
X_m = (X_m / np.max(X_m)).astype(np.float32)
X_t = (X_t / np.max(X_t)).astype(np.float32)
X = np.concatenate((X_m, X_t), axis=-1)
n_feat = X.shape[1]
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in'))
A_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, None)), name='A_in')
S_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
pool1, adj1, seg1, C = MinCutPool(n_clust, activation=ACTIV, h=H_)([X_in, A_in, S_in])
model = Model([X_in, A_in, S_in], [pool1, seg1])
model.compile('adam', None)
# Setup
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=1e-3)
train_step = opt.minimize(loss)
# Initialize all variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Fit layer
tr_feed_dict = {X_in: X,
A_in: A.todense(), # sp_matrix_to_sp_tensor_value(A),
S_in: y}
layer_out = [sess.run([loss], feed_dict=tr_feed_dict)[0]]
try:
for _ in tqdm(range(ITER)):
outs = sess.run([train_step, loss], feed_dict=tr_feed_dict)
layer_out.append(outs[1])
x_pool_, seg_pool_ = sess.run([model.output], feed_dict=tr_feed_dict)[0]
except KeyboardInterrupt:
print('training interrupted!')
if PLOTS_ON:
plt.plot(layer_out, label='Unsupervised Loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.show()
C_ = sess.run([C], feed_dict=tr_feed_dict)[0]
c = np.argmax(C_, axis=-1)
labels = c[segments]
else:
raise ValueError(ALGO)
A = nx.to_scipy_sparse_matrix(g)
print(len(g.nodes), 'nodes')
if PLOTS_ON:
out_seg = color.label2rgb(segments, img, kind='avg')
out_seg_bound = segmentation.mark_boundaries(out_seg, segments, (0, 0, 0))
out_clust = color.label2rgb(labels, img, kind='avg')
fig, ax = plt.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(15, 5))
ax[0].imshow(out_seg)
ax[0].set_title('Oversegmentation', fontsize=15)
ax1 = graph.show_rag(segments, g, img, border_color=None, img_cmap='gray', edge_cmap='magma', ax=ax[1])
# plt.colorbar(ax1, ax=ax[1])
ax[1].set_title('Region Adjacency Graph', fontsize=15)
ax[2].imshow(out_clust)
ax[2].set_title('MinCutPool', fontsize=15)
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
# segments = segmentation.felzenszwalb(img, scale=50, sigma=1.5, min_size=50)
# out_seg = color.label2rgb(segments, img, kind='avg')
# plt.imshow(out_seg)
# ax = plt.gca()
# # ax.set_title('Oversegmentation',fontsize=15)
# ax.axis('off')
# plt.tight_layout()
#
# plt.imshow(img)
# ax = plt.gca()
# # ax.set_title('Original image',fontsize=15)
# ax.axis('off')
# plt.tight_layout()
#
# graph.show_rag(segments, g, img, border_color=None, img_cmap='gray', edge_cmap='magma')
# ax = plt.gca()
# # ax.set_title('Region Adjacency Graph',fontsize=15)
# ax.axis('off')
# plt.tight_layout()
# plt.show()
# plt.imshow(out_clust)
# ax = plt.gca()
# # ax.set_title('Segmentation', fontsize=15)
# ax.axis('off')
# plt.tight_layout()
# plt.show()
| {
"alphanum_fraction": 0.6376106195,
"author": null,
"avg_line_length": 31.9811320755,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "65c8e103cc734279f494cb1f0dce85fba8190293",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 37,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T09:36:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-15T09:26:17.000Z",
"max_forks_repo_head_hexsha": "4be907473fdf2f241265bc20a8a23aa12fb8f0df",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hoangdzung/Spectral-Clustering-with-Graph-Neural-Networks-for-Graph-Pooling",
"max_forks_repo_path": "Segmentation.py",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "4be907473fdf2f241265bc20a8a23aa12fb8f0df",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T01:56:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-28T08:47:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hoangdzung/Spectral-Clustering-with-Graph-Neural-Networks-for-Graph-Pooling",
"max_issues_repo_path": "Segmentation.py",
"max_line_length": 107,
"max_stars_count": 183,
"max_stars_repo_head_hexsha": "4be907473fdf2f241265bc20a8a23aa12fb8f0df",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hoangdzung/Spectral-Clustering-with-Graph-Neural-Networks-for-Graph-Pooling",
"max_stars_repo_path": "Segmentation.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T22:30:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-11T11:39:04.000Z",
"num_tokens": 1895,
"path": null,
"reason": "import numpy,import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 6780
} |
"""Classification using random forest."""
import logging
import pickle
import numpy as np
from sklearn.ensemble import RandomForestClassifier
logger = logging.getLogger(__name__)
class RandomForest:
"""Train or classify using a RandomForest model."""
def __init__(self, num_features, model=None):
"""Create instance of RandomForest.
Args:
num_features (int): Number of features to train or classify.
num_trees (int, optional): [description]. Defaults to 200.
model ([type], optional): [description]. Defaults to None.
"""
self.num_features = num_features
self.model = self.load_model(model)
def load_model(self, model):
"""Load trained sklearn.ensemble.RandomForestClassifier model.
Args:
model_path (str): path to the trained model
Returns:
sklearn.ensemble.RandomForestClassifier: Trained model, see reference for details.
"""
if model is None:
return None
# Check if the model_input is a path or an sklearn random forest model
if isinstance(model, str):
try:
model = pickle.load(open(model, "rb"))
return self.validate_model(model)
except OSError:
logger.error("Could not load RandomForestModel")
return None
elif isinstance(model, RandomForestClassifier):
# Validate model based on parameters
return self.validate_model(model)
return None
def validate_model(self, model):
"""Validate a model with the current class instantiation.
Args:
model (sklearn.ensemble.RandomForestClassifier): A trained RandomForestClassifier
Returns:
[sklearn.ensemble.RandomForestClassifier]: A valid trained RandomForestClassifier
"""
if not isinstance(model, RandomForestClassifier):
logger.error(
"Can not validate model, is not of instance sklearn.ensemble.forest.RandomForestClassifier"
)
return None
if not model.n_features_ == self.num_features:
logger.error(
"Number of features is different from model parameter. Model has: %d, input was: %d",
model.n_features_,
self.num_features,
)
return None
return model
def train(self, X, y, num_trees=100, processors=-1):
"""Train/Fit a RandomForestClassifier using the observation matrix X and class vector y.
Args:
X (np.array): 2D Matrix of feature observations.
y (np.array): 1D vector of class labels.
num_tress (int): Number of tress used in the forest.
processors (int): Number of parallel jobs used to train, -1 means all processors.
Returns:
sklearn.ensemble.RandomForestClassifier: A trained RandomForestClassifier model.
"""
# If a model is already defined, something is wrong. Does not support training multiple times in a row.
if self.model is not None:
logger.error(
"Surfclass does not support training an already existing model.."
)
return None
# validate X fits the parameters given in init
assert isinstance(X, np.ndarray), "X is not a valid numpy.ndarray"
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert y.ndim == 1, "y does not have the correct shape, should be 1D vector"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
assert (
X.shape[0] == y.shape[0]
), "Number of class observations does not match number of feature observations."
rf = RandomForestClassifier(
n_estimators=num_trees, oob_score=False, verbose=0, n_jobs=processors
)
# fit the model
rf_trained = rf.fit(X, y)
# save the model to the instanced class (useful when one want to run classify immediately after)
self.model = rf_trained
# return the trained model
return rf_trained
def classify(self, X, prob=False, processors=None):
"""Classify X using the instantiated RandomForestClassifier model.
Args:
X (np.array): 2D Matrix of feature observations.
prob (bool): If true returns tuple with classified vector and highest class probability vector
processors (int): Number of parallel jobs used to train. -1 means all processors, None means model default.
Returns:
np.array or tuple (np.array,np.array): classified vector or tuple of classified vector and probability vector
"""
assert (
self.model is not None
), "Could not find a model, please either train a model or initialise the class with a valid model path"
# TODO: This might be double-work but the model attribute can have been changed
model = self.validate_model(self.model)
if isinstance(processors, int):
model.n_jobs = processors
# Test the X input is acceptable for the given model.
assert (
X.ndim == 2
), "X does not have the correct shape, should be of form (n,f): observations 1D, and feature"
assert isinstance(X, np.ndarray), "X is not a valid numpy array"
assert (
X.shape[1] == self.num_features
), "Model and input does have the same number of features"
# run the classificaiton using X
classes = self.model.classes_
class_prediction_prob = model.predict_proba(X)
class_prediction = classes[np.argmax(class_prediction_prob, axis=1)]
# return tuple with class prediction and highest class probability if prob
if prob:
return (class_prediction, np.amax(class_prediction_prob, axis=1))
return class_prediction
| {
"alphanum_fraction": 0.6272653061,
"author": null,
"avg_line_length": 36.8975903614,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "00fd43fe8cb0ed869dc7154a98997ca2f36c5693",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-12-18T15:39:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-18T15:22:09.000Z",
"max_forks_repo_head_hexsha": "0534d6400f0e3636150079ac3bb2cf676f472233",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "plimkilde/surfclass",
"max_forks_repo_path": "src/surfclass/randomforest.py",
"max_issues_count": 17,
"max_issues_repo_head_hexsha": "0534d6400f0e3636150079ac3bb2cf676f472233",
"max_issues_repo_issues_event_max_datetime": "2019-12-17T08:12:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-10-25T12:57:34.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "plimkilde/surfclass",
"max_issues_repo_path": "src/surfclass/randomforest.py",
"max_line_length": 121,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "0534d6400f0e3636150079ac3bb2cf676f472233",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "plimkilde/surfclass",
"max_stars_repo_path": "src/surfclass/randomforest.py",
"max_stars_repo_stars_event_max_datetime": "2019-11-08T12:41:33.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-30T10:05:58.000Z",
"num_tokens": 1222,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6125
} |
import numpy as np
import pandas as pd
from ..base import sim_aux_rdash, MatchingResult
from ..sim_measures import sim_zero_one
def _score_min(rdash, aux, sim_scalar):
return sim_aux_rdash(rdash, aux, sim_scalar).min(axis=1)
def algorithm1a(rdash, aux,
sim_scalar=sim_zero_one,
alpha=0.01) -> MatchingResult:
def score(r, a):
return _score_min(r, a, sim_scalar)
scores = score(rdash, aux)
matches = rdash[scores > alpha]
if matches.empty:
return MatchingResult(match=matches, pr=None, scores=scores, info=None)
num_matches = len(matches.index)
pr_distribution = pd.Series(
np.repeat(1.0/num_matches, num_matches),
index=matches.index)
return MatchingResult(match=matches, pr=pr_distribution, scores=scores,
info=None)
| {
"alphanum_fraction": 0.667844523,
"author": null,
"avg_line_length": 27.3870967742,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "39ca6b549a9a98bc2175b736c3f630e9ff01862d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-12-22T20:29:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-12-22T20:29:12.000Z",
"max_forks_repo_head_hexsha": "a8b94ad03b11b15c3d6a9b1f39ed24ad4684db62",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "eugenma/pyrobdean",
"max_forks_repo_path": "pyrobdean/algorithms/algorithm1a.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a8b94ad03b11b15c3d6a9b1f39ed24ad4684db62",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "eugenma/pyrobdean",
"max_issues_repo_path": "pyrobdean/algorithms/algorithm1a.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a8b94ad03b11b15c3d6a9b1f39ed24ad4684db62",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "eugenma/pyrobdean",
"max_stars_repo_path": "pyrobdean/algorithms/algorithm1a.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 208,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 849
} |
SUBROUTINE POLY_OPNF ( nop, lunp, outfil, iret )
C************************************************************************
C* POLY_OPNF *
C* *
C* This subroutine opens a Common Alerting Protocol (CAP) file. The *
C* naming format: YYMMDDHH_cc_nn.xml, where cc is the center name, *
C* nn nth polygon. *
C* *
C* POLY_OPNF ( NOP, LUNP, OUTFIL, IRET ) *
C* *
C* Input parameters: *
C* NOP INTEGER Nth polygon *
C* *
C* Output parameters: *
C* LUNP INTEGER LUN for CAP message file *
C* OUTFIL INTEGER Cap output file name *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* -1 = cannot open file *
C* *
C** *
C* Log: *
C* T. Lee/SAIC 2/08 *
C************************************************************************
INCLUDE 'gpolyg.cmn'
CHARACTER outfil*(*)
CHARACTER cap*3, xml*4, cnop*3
DATA cap /'CAP'/, xml/'.xml'/
C-----------------------------------------------------------------------
iret = 0
C
CALL ST_LCUC ( cap, cap, ier )
CALL ST_INCH ( nop, cnop, ier )
CALL ST_LSTR ( cnop, nc, ier )
CALL ST_LSTR ( gdattm, ngd, ier )
C
IF ( idxzon .eq. 1 ) THEN
outfil = gdattm (:ngd) // '_pac_' // cnop ( : nc ) // xml
ELSE IF ( idxzon .eq. 2 ) THEN
outfil = gdattm (:ngd) // '_atl_' // cnop ( : nc ) // xml
ELSE IF ( idxzon .eq. 3 ) THEN
outfil = gdattm (:ngd) // '_tpc_' // cnop ( : nc ) // xml
END IF
CALL FL_SWOP ( outfil, lunp, ier )
C
IF ( ier .lt. 0 ) THEN
iret = -4
RETURN
END IF
C*
RETURN
END
| {
"alphanum_fraction": 0.4762808349,
"author": null,
"avg_line_length": 29.8301886792,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "ceea14743e182bdb8e5ec589d94a25ae39eb2887",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 27,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z",
"max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "oxelson/gempak",
"max_forks_repo_path": "gempak/source/programs/gd/gpolyg/poly_opnf.f",
"max_issues_count": 60,
"max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "oxelson/gempak",
"max_issues_repo_path": "gempak/source/programs/gd/gpolyg/poly_opnf.f",
"max_line_length": 73,
"max_stars_count": 42,
"max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "oxelson/gempak",
"max_stars_repo_path": "gempak/source/programs/gd/gpolyg/poly_opnf.f",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z",
"num_tokens": 568,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1581
} |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_adjoint
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
LinearOperatorAdjoint = linear_operator_adjoint.LinearOperatorAdjoint # pylint: disable=invalid-name
class LinearOperatorAdjointTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
def operator_and_matrix(self,
build_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
if ensure_self_adjoint_and_pd:
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
else:
matrix = linear_operator_test_util.random_tril_matrix(
shape, dtype, force_well_conditioned=True, remove_upper=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
if ensure_self_adjoint_and_pd:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(
lin_op_matrix, is_positive_definite=True, is_self_adjoint=True))
else:
operator = LinearOperatorAdjoint(
linalg.LinearOperatorLowerTriangular(lin_op_matrix))
return operator, linalg.adjoint(matrix)
def test_base_operator_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
operator_adjoint = LinearOperatorAdjoint(operator)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_supplied_hint_used(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
operator_adjoint = LinearOperatorAdjoint(
operator,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator_adjoint.is_positive_definite)
self.assertTrue(operator_adjoint.is_non_singular)
self.assertFalse(operator_adjoint.is_self_adjoint)
def test_contradicting_hints_raise(self):
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=False)
with self.assertRaisesRegexp(ValueError, "positive-definite"):
LinearOperatorAdjoint(operator, is_positive_definite=True)
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
with self.assertRaisesRegexp(ValueError, "self-adjoint"):
LinearOperatorAdjoint(operator, is_self_adjoint=True)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, name="my_operator", is_non_singular=True)
operator = LinearOperatorAdjoint(operator)
self.assertEqual("my_operator_adjoint", operator.name)
def test_matmul_adjoint_operator(self):
matrix1 = np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1, matrix2.T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.T, matrix2.T),
self.evaluate(
full_matrix1.matmul(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_matmul_adjoint_complex_operator(self):
if test.is_built_with_rocm():
self.skipTest("ROCm does not support BLAS operations for complex types")
matrix1 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorFullMatrix(matrix1)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
np.matmul(matrix1, matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T, matrix2),
self.evaluate(
full_matrix1.matmul(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
np.matmul(matrix1.conj().T, matrix2.conj().T),
self.evaluate(
full_matrix1.matmul(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_matvec(self):
matrix = np.array([[1., 2.], [3., 4.]])
x = np.array([1., 2.])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.assertAllClose(matrix.dot(x), self.evaluate(operator.matvec(x)))
self.assertAllClose(matrix.T.dot(x), self.evaluate(operator.H.matvec(x)))
def test_solve_adjoint_operator(self):
matrix1 = self.evaluate(
linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.float64, force_well_conditioned=True))
matrix2 = np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1, matrix2.T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(matrix1.T, matrix2.T, lower=False)),
self.evaluate(
full_matrix1.solve(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_solve_adjoint_complex_operator(self):
if test.is_built_with_rocm():
self.skipTest("ROCm does not support BLAS operations for complex types")
matrix1 = self.evaluate(linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128, force_well_conditioned=True) +
1j * linear_operator_test_util.random_tril_matrix(
[4, 4], dtype=dtypes.complex128,
force_well_conditioned=True))
matrix2 = np.random.randn(4, 4) + 1j * np.random.randn(4, 4)
full_matrix1 = linalg.LinearOperatorLowerTriangular(
matrix1, is_non_singular=True)
full_matrix2 = linalg.LinearOperatorFullMatrix(matrix2)
self.assertAllClose(
self.evaluate(linalg.triangular_solve(matrix1, matrix2.conj().T)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint_arg=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.conj().T, matrix2, lower=False)),
self.evaluate(
full_matrix1.solve(full_matrix2, adjoint=True).to_dense()))
self.assertAllClose(
self.evaluate(
linalg.triangular_solve(
matrix1.conj().T, matrix2.conj().T, lower=False)),
self.evaluate(
full_matrix1.solve(
full_matrix2, adjoint=True, adjoint_arg=True).to_dense()))
def test_solvevec(self):
matrix = np.array([[1., 2.], [3., 4.]])
inv_matrix = np.linalg.inv(matrix)
x = np.array([1., 2.])
operator = linalg.LinearOperatorFullMatrix(matrix)
self.assertAllClose(inv_matrix.dot(x), self.evaluate(operator.solvevec(x)))
self.assertAllClose(
inv_matrix.T.dot(x), self.evaluate(operator.H.solvevec(x)))
class LinearOperatorAdjointNonSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Tests done in the base class NonSquareLinearOperatorDerivedClassTest."""
def operator_and_matrix(self, build_info, dtype, use_placeholder):
shape_before_adjoint = list(build_info.shape)
# We need to swap the last two dimensions because we are taking the adjoint
# of this operator
shape_before_adjoint[-1], shape_before_adjoint[-2] = (
shape_before_adjoint[-2], shape_before_adjoint[-1])
matrix = linear_operator_test_util.random_normal(
shape_before_adjoint, dtype=dtype)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = LinearOperatorAdjoint(
linalg.LinearOperatorFullMatrix(lin_op_matrix))
return operator, linalg.adjoint(matrix)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorAdjointTest)
test.main()
| {
"alphanum_fraction": 0.6806895906,
"author": null,
"avg_line_length": 39.5296296296,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b948615235f2b7ec215e78569caa6c0bbad16a6d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e08079463bf43e5963acc41da1f57e95603f8080",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "uve/tensorflow",
"max_forks_repo_path": "tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e08079463bf43e5963acc41da1f57e95603f8080",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "uve/tensorflow",
"max_issues_repo_path": "tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py",
"max_line_length": 102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e08079463bf43e5963acc41da1f57e95603f8080",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "uve/tensorflow",
"max_stars_repo_path": "tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2395,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10673
} |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import paddle
import numpy as np
import unittest
class TestReaderReset(unittest.TestCase):
def prepare_data(self):
def fake_data_generator():
for n in range(self.total_ins_num):
yield np.ones(self.ins_shape) * n, n
# Prepare data
with fluid.program_guard(fluid.Program(), fluid.Program()):
reader = paddle.batch(fake_data_generator, batch_size=1)
feeder = fluid.DataFeeder(
feed_list=[
fluid.layers.data(
name='data', shape=[3], dtype='float32'),
fluid.layers.data(
name='label', shape=[1], dtype='int64'),
],
place=fluid.CPUPlace())
fluid.recordio_writer.convert_reader_to_recordio_file(
self.data_file_name, reader, feeder)
def setUp(self):
self.use_cuda = fluid.core.is_compiled_with_cuda()
self.data_file_name = './reader_reset_test.recordio'
self.ins_shape = [3]
self.batch_size = 5
self.total_ins_num = self.batch_size * 20
self.test_pass_num = 100
self.prepare_data()
def main(self, with_double_buffer):
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data_reader_handle = fluid.layers.io.open_files(
filenames=[self.data_file_name],
shapes=[[-1] + self.ins_shape, [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=1,
pass_num=1)
data_reader = fluid.layers.io.batch(data_reader_handle,
self.batch_size)
if with_double_buffer:
data_reader = fluid.layers.double_buffer(data_reader)
image, label = fluid.layers.read_file(data_reader)
fetch_list = [image.name, label.name]
place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
build_strategy = fluid.BuildStrategy()
if with_double_buffer:
build_strategy.enable_data_balance = True
exec_strategy = fluid.ExecutionStrategy()
parallel_exe = fluid.ParallelExecutor(
use_cuda=self.use_cuda,
main_program=main_prog,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
data_appeared = [False] * self.total_ins_num
pass_count = 0
while (True):
try:
data_val, label_val = parallel_exe.run(fetch_list,
return_numpy=True)
ins_num = data_val.shape[0]
broadcasted_label = np.ones((ins_num, ) + tuple(
self.ins_shape)) * label_val.reshape((ins_num, 1))
self.assertEqual(data_val.all(), broadcasted_label.all())
for l in label_val:
self.assertFalse(data_appeared[l[0]])
data_appeared[l[0]] = True
except fluid.core.EOFException:
pass_count += 1
if with_double_buffer:
data_appeared = data_appeared[:-parallel_exe.device_count *
self.batch_size]
for i in data_appeared:
self.assertTrue(i)
if pass_count < self.test_pass_num:
data_appeared = [False] * self.total_ins_num
data_reader_handle.reset()
else:
break
def test_all(self):
self.main(with_double_buffer=False)
self.main(with_double_buffer=True)
if __name__ == '__main__':
unittest.main()
| {
"alphanum_fraction": 0.5834786399,
"author": null,
"avg_line_length": 38.5546218487,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8ad11d76f683d556f05cafc3251acc942efef72f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2019-11-01T22:28:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-19T22:38:46.000Z",
"max_forks_repo_head_hexsha": "4fa3cee5499c6df0ad6043b0cfa220d09f2034e8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jichangjichang/Paddle",
"max_forks_repo_path": "python/paddle/fluid/tests/unittests/test_reader_reset.py",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "4fa3cee5499c6df0ad6043b0cfa220d09f2034e8",
"max_issues_repo_issues_event_max_datetime": "2018-10-15T08:57:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-12-05T20:29:08.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jichangjichang/Paddle",
"max_issues_repo_path": "python/paddle/fluid/tests/unittests/test_reader_reset.py",
"max_line_length": 79,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "4fa3cee5499c6df0ad6043b0cfa220d09f2034e8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jichangjichang/Paddle",
"max_stars_repo_path": "python/paddle/fluid/tests/unittests/test_reader_reset.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-03T14:46:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-04T02:58:01.000Z",
"num_tokens": 911,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4588
} |
\chapter{Conclusion}
The aim of our research was to apply deep learning techniques to a WF environment to automatically extract fingerprints from a variable-length trace.
We do in fact show that this is possible by introducing a novel approach to perform the attack, together with two deep learning models, namely a \textit{stacked autoencoder} and a \textit{sequence-to-sequence model}.
The attack works in three main stages.
First, all of the traffic is collected and preprocessed into Tor cells.
Next, the deep learning models attempt to learn underlying patterns in the traffic traces and use these to extract fingerprints through an unsupervised process.
Finally, the extracted fingerprints are used to classify traces into web pages.
Similarly, we also introduce a novel technique for evaluating the performance of such fingerprint generation models, which involves comparing them to a human benchmark.
We achieve this by training existing classifiers on both hand-picked features and the automatically generated features and compare their performance.
This allows us to see how well the deep learning models perform, compared to experts who have done thorough feature analysis.
During all of the performed experiments, we focused on an open-world setting with a local passive adversary against Tor.
We show that for our best setup, we manage to achieve a $93 \%$ accuracy in a binary classification task and a $39 \%$ accuracy in the multiclass classification task.
This is comparable to hand-picked features since they attain a maximum of $93 \%$ and $59 \%$ respectively.
In fact, we even observe that our generated features seem to perform better than certain hand-picked features, given that the classifier is trained on a large amount of unmonitored pages.
We also discovered that a sequence-to-sequence model continuously seemed to perform better than a stacked autoencoder within all the threat models that we examined.
This is most likely due to the fact that the autoencoder assumes that all of its inputs are independent of each other, which is not the case in our specific scenario.
However, the problem still remains that some traces can be extremely long, which results in a slow training of our model.
In fact, it took an average of $8$ hours to train the sequence-to-sequence model, which is considerably slower than current state-of-the-art attacks.
On the other hand, we also showed that once the deep learning model is trained, it can be used to extract fingerprints from traces that were recorded under different conditions.
Hence, the model would only need to be trained once on a large variety of data and it can then be used for a long period of time, without the need of retraining, unlike existing classifiers
On top of evaluating the results, we also made various observations about the traffic data.
For instance, we note that majority of the information is carried within the first couple seconds of the trace and that most traces can be represented using vectors of size $200$.
\newpage
Furthermore we do note that our attack has been based on several assumptions.
For instance, we assume that the adversary knows where the trace of a single page starts and ends, that the adversary can recreate the exact same conditions such as internet speed and TBB and finally that the content of web pages does not change.
Although the exact same assumptions have been made in previous WF works, we do note that some of these are not realistic and therefore might have a large impact on the scoring statistics if the attack were to be used in a real-life scenario.
Equally important is the impact that false negatives can have on the attack, as outlined by M. Perry \cite{wfpcritique}.
In conclusion, our research does not improve the results of existing works, but it does expose the possibility to automate the fingerprint extraction procedure.
Until now, almost all attacks have relied on a manual feature extraction process that require expertise in the WF domain.
However, we show that this time-consuming process can be automated.
Although currently the performance of our automatically generated features is not as high as the hand-picked ones, we believe given enough data the correct deep learning model, an adversary could potentially perform a WF by solely relying on automatically generated features.
\section{Future Work} \label{sec:future-works}
This work shows that the WF attacks currently still seem to perform better with hand-picked features rather than automatically generated ones but there is still much room for future improvements.
Here we consider several different manners how we could improve or extend this work.
Although we definitely will not cover all the different possible extensions, we try to list the most interesting ones.
As previously mentioned in section \ref{sec:classifier-training}, we could add a \textit{softmax layer} on top of the encoder in a trained sequence-to-sequence model.
Not only would this allow us to perform the classification with the sequence-to-sequence model, but it would also allow us to analyse how different evidence affects the classification.
You would technically only need one softmax layer, after the fingerprint has been extracted.
But having one after every cell, allows us to see how different packets change the prediction of our model.
This could then be used as a tool for traffic analysis.
There have also been a variety of different defenses, some of which have been outlined in section \ref{sec:defenses}.
Some works have examined the the effectiveness of their attack, when these defenses were used \cite{kfingerprinting,wang_cai_johnson_nithyanand_goldberg_2014}.
It would be interesting to see if the deep learning models might still be able to effectively extract fingerprints, even with these defenses deployed.
This could include both training the model on data where the defense was deployed or training it on normal data and analysing whether it can still extract the fingerprints if the defense is deployed during the attack stage.
Again, this could potentially be used as a tool for traffic analysis by trying to hide certain features, we could explore which features our model actually extracts.
Juarez et al. have already shown that WF attacks suffer from the rapid changing nature of the content of web pages \cite{wfpevaluation}.
Thus on top of analysing how defenses impact the attack, we could also potentially analyse how the performance of the fingerprint extraction process is affected over time.
We have already shown that the models are still successful when extracting fingerprints from other datasets.
However, this is not show that the models are not affected by content changes within web pages.
This could be fully examined by collecting our own data over a period of time and see how the performance of a trained model changes.
If the performance is not affected, we could save a large amount of time retraining the fingerprint extractor.
\newpage
We could also potentially research the possibility that training our model with data collected over time and under different circumstances would also make the model more robust.
Since technically, the more different training instances it sees, the better it should get at identifying features.
Additionally, we could also investigate how well the models perform at identifying features when given more realistic user behavior.
Hence, rather than visiting one page and waiting a certain amount of time before loading the next one, the data can be more realistic such as where the user has multiple tabs open at the same time.
On top of training the model with more realistic browsing data, we could also evaluate its performance on \textit{Tor hidden services}.
This is a protocol for Tor users to hide their location while offering a service such as hosting a website \cite{tor_hidden_services}.
There is already evidence that these services are vulnerable to WF attacks \cite{kfingerprinting} but it would be interesting to see how our models would perform on this data.
Rather than extending this work by using more of different kinds of data, we could also improve the deep learning models.
Currently, one of the main weaknesses is that the traces can be long, which in turn makes our sequence-to-sequence model very deep.
We solved this issue here by cutting the traces after a certain amount of time since most the first part of the trace carries more information than the latter.
However, this might not be the ideal solution.
There might be another solution or perhaps even another model that does not have this weakness but still manages to map variable-length sequences into a fixed-length representation.
Finally, one of the weaknesses in our evaluation model is that we did not perform a parameter search for our classifiers.
Instead, we re-used the same (hyper)parameters that were used for the hand-picked features in the respective works.
Consequently, we can expect that the performance on the hand-picked features will be higher than on the generated ones.
Therefore, in future works, we might consider performing a parameter search when training the classifiers on the generated features and see if this yields higher results.
| {
"alphanum_fraction": 0.8143660753,
"author": null,
"avg_line_length": 103.8651685393,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "89b73be46ef483ac5322547e32a69d0404d65215",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-01-25T11:33:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-30T14:23:05.000Z",
"max_forks_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "henghengxiong/website-fingerprinting",
"max_forks_repo_path": "report/chapters/conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "henghengxiong/website-fingerprinting",
"max_issues_repo_path": "report/chapters/conclusion.tex",
"max_line_length": 275,
"max_stars_count": 26,
"max_stars_repo_head_hexsha": "17b1c8d485c48fee2d1f963eeba7a03ddf8e4fc6",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "AxelGoetz/website-fingerprinting",
"max_stars_repo_path": "report/chapters/conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T03:38:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-08-26T15:54:21.000Z",
"num_tokens": 1769,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 9244
} |
[STATEMENT]
lemma imethds_norec:
"\<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow>
(md, mh) \<in> imethds G md sig"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> (md, mh) \<in> DeclConcepts.imethds G md sig
[PROOF STEP]
apply (subst imethds_rec)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> iface G md = Some ?i
2. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> ws_prog G
3. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> (md, mh) \<in> (Un_tables (DeclConcepts.imethds G ` set (isuperIfs ?i)) \<oplus>\<oplus> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods ?i)))) sig
[PROOF STEP]
apply assumption+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> (md, mh) \<in> (Un_tables (DeclConcepts.imethds G ` set (isuperIfs i)) \<oplus>\<oplus> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods i)))) sig
[PROOF STEP]
apply (rule iffD2)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> ((md, mh) \<in> (Un_tables (DeclConcepts.imethds G ` set (isuperIfs i)) \<oplus>\<oplus> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods i)))) sig) = ?Q1
2. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> ?Q1
[PROOF STEP]
apply (rule overrides_t_Some_iff)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> (md, mh) \<in> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods i))) sig \<or> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods i))) sig = {} \<and> (md, mh) \<in> Un_tables (DeclConcepts.imethds G ` set (isuperIfs i)) sig
[PROOF STEP]
apply (rule disjI1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>iface G md = Some i; ws_prog G; table_of (imethods i) sig = Some mh\<rbrakk> \<Longrightarrow> (md, mh) \<in> (set_option \<circ> table_of (map (\<lambda>(s, mh). (s, md, mh)) (imethods i))) sig
[PROOF STEP]
apply (auto elim: table_of_map_SomeI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 7,
"llama_tokens": 1159,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
"""
Analytic functions for this test are defined in "analytic_filter.ipynb" in the development/ directory.
"""
from hmf.density_field import filters
import numpy as np
from numpy import sin, cos, pi
import warnings
import pytest
# Need to do the following to catch repeated warnings.
warnings.simplefilter("always", UserWarning)
class TestTopHat:
@pytest.fixture(scope="class")
def cls(self):
k = np.logspace(-6, 0, 10000)
pk = k ** 2
return filters.TopHat(k, pk)
def test_sigma(self, cls):
R = 1.0
true = (
9 * R ** 2 * sin(R) ** 2 / 2
+ 9 * R ** 2 * cos(R) ** 2 / 2
+ 9 * R * sin(R) * cos(R) / 2
- 9 * sin(R) ** 2
) / (2 * pi ** 2 * R ** 6)
print(true, cls.sigma(R) ** 2)
assert np.isclose(cls.sigma(R)[0] ** 2, true)
def test_sigma1(self, cls):
R = 1.0
true = (
3 * R ** 2 * sin(R) ** 2 / 2
+ 3 * R ** 2 * cos(R) ** 2 / 2
+ 9 * R * sin(R) * cos(R) / 2
- 9 * sin(R) ** 2 / 4
+ 45 * cos(R) ** 2 / 4
- 45 * sin(R) * cos(R) / (4 * R)
) / (2 * pi ** 2 * R ** 6)
print(true, cls.sigma(R, 1) ** 2)
assert np.isclose(cls.sigma(R, 1)[0] ** 2, true)
def test_dwdlnkr(self, cls):
x = 1.0
true = x * (3 * sin(x) / x ** 2 - 3 * (-3 * x * cos(x) + 3 * sin(x)) / x ** 4)
assert np.isclose(cls.dw_dlnkr(x), true)
def test_dlnssdlnr(self, cls):
R = 1.0
true = (
2
* R ** 4
* (
-45 * sin(R) ** 2 / (4 * R ** 2)
- 27 * cos(R) ** 2 / (4 * R ** 2)
- 81 * sin(R) * cos(R) / (4 * R ** 3)
+ 27 * sin(R) ** 2 / R ** 4
)
/ (
9 * R ** 2 * sin(R) ** 2 / 2
+ 9 * R ** 2 * cos(R) ** 2 / 2
+ 9 * R * sin(R) * cos(R) / 2
- 9 * sin(R) ** 2
)
)
print(true, cls.dlnss_dlnr(R))
assert np.isclose(cls.dlnss_dlnr(R), true)
class TestSharpK:
@pytest.fixture(scope="class")
def cls(self):
k = np.logspace(-6, 0, 10000)
pk = k ** 2
return filters.SharpK(k, pk)
def test_sigma(self, cls):
R = 1.0
t = 2 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * R ** t)
print(true, cls.sigma(R) ** 2)
assert np.isclose(cls.sigma(R)[0] ** 2, true)
def test_sigma1(self, cls):
R = 1.0
t = 4 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * R ** t)
print(true, cls.sigma(R, 1) ** 2)
assert np.isclose(cls.sigma(R, 1)[0] ** 2, true)
def test_dlnssdlnr(self, cls):
R = 1.0
t = 2 + 2 + 1
sigma2 = 1.0 / (2 * pi ** 2 * t * R ** t)
true = -1.0 / (2 * pi ** 2 * sigma2 * R ** (3 + 2))
print(true, cls.dlnss_dlnr(R))
assert np.isclose(cls.dlnss_dlnr(R), true)
def test_sigma_R3(self, cls):
R = 3.0
t = 2 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * R ** t)
print(true, cls.sigma(R) ** 2)
assert np.isclose(cls.sigma(R)[0] ** 2, true)
def test_sigma1_R3(self, cls):
R = 3.0
t = 4 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * R ** t)
print(true, cls.sigma(R, 1) ** 2)
assert np.isclose(cls.sigma(R, 1)[0] ** 2, true)
def test_dlnssdlnr_R3(self, cls):
R = 3.0
t = 2 + 2 + 1
sigma2 = 1.0 / (2 * pi ** 2 * t * R ** t)
true = -1.0 / (2 * pi ** 2 * sigma2 * R ** (3 + 2))
print(true, cls.dlnss_dlnr(R))
assert np.isclose(cls.dlnss_dlnr(R), true)
def test_sigma_Rhalf(self, cls):
thisr = 1.0 / cls.k.max()
t = 2 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * thisr ** t)
with warnings.catch_warnings(record=True) as w:
# should also raise a warning
R = 0.5
s2 = cls.sigma(R)[0] ** 2
assert w
print(s2, true)
assert np.isclose(s2, true)
def test_sigma1_Rhalf(self, cls):
thisr = 1.0 / cls.k.max()
t = 4 + 2 + 1
true = 1.0 / (2 * pi ** 2 * t * thisr ** t)
with warnings.catch_warnings(record=True) as w:
# should also raise a warning
R = 0.5
s2 = cls.sigma(R, 1)[0] ** 2
assert w
print(s2, true)
assert np.isclose(s2, true)
def test_dlnssdlnr_Rhalf(self, cls):
R = 3.0
t = 2 + 2 + 1
sigma2 = 1.0 / (2 * pi ** 2 * t * R ** t)
true = -1.0 / (2 * pi ** 2 * sigma2 * R ** (3 + 2))
print(true, cls.dlnss_dlnr(R))
assert np.isclose(cls.dlnss_dlnr(R), true)
class TestGaussian:
@pytest.fixture(scope="class")
def cls(self):
k = np.logspace(-6, 1, 80)
pk = k ** 2
return filters.Gaussian(k, pk)
def test_sigma(self, cls):
R = 10.0
true = 3.0 / (16 * pi ** (3.0 / 2.0) * R ** 5)
print(true, cls.sigma(R) ** 2)
assert np.isclose(cls.sigma(R)[0] ** 2, true)
def test_sigma1(self, cls):
R = 10.0
true = 15 / (32 * pi ** (3.0 / 2.0) * R ** 7)
print(true, cls.sigma(R, 1) ** 2)
assert np.isclose(cls.sigma(R, 1)[0] ** 2, true)
def test_dlnssdlnr(self, cls):
R = 10.0
true = -5
print(true, cls.dlnss_dlnr(R))
assert np.isclose(cls.dlnss_dlnr(R), true)
| {
"alphanum_fraction": 0.439708561,
"author": null,
"avg_line_length": 27.7272727273,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9c5c7e9707d556e07fc850419336ea3ddef028d0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8b24f5df42cdf73d507ffc4a7c6138573769bb2c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "liuxx479/hmf-1",
"max_forks_repo_path": "tests/test_filters.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8b24f5df42cdf73d507ffc4a7c6138573769bb2c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "liuxx479/hmf-1",
"max_issues_repo_path": "tests/test_filters.py",
"max_line_length": 102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8b24f5df42cdf73d507ffc4a7c6138573769bb2c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "liuxx479/hmf-1",
"max_stars_repo_path": "tests/test_filters.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1962,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5490
} |
[STATEMENT]
lemma result_costD': assumes "result f_c = f \<and> cost f_c \<le> b"
"f_c = (a,c)"
shows "a = f" "c \<le> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a = f &&& c \<le> b
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
result f_c = f \<and> cost f_c \<le> b
f_c = (a, c)
goal (1 subgoal):
1. a = f &&& c \<le> b
[PROOF STEP]
by (auto simp: cost_simps) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "LLL_Basis_Reduction_Cost",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 190,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
@copyright 2016 J.T. Lapreste
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_SPLIT_HPP_INCLUDED
#define BOOST_SIMD_ARCH_COMMON_SIMD_FUNCTION_SPLIT_HPP_INCLUDED
#include <boost/simd/detail/overload.hpp>
// #include <boost/simd/meta/hierarchy/simd.hpp>
// #include <boost/simd/function/simd/combine.hpp>
// #include <boost/simd/function/simd/extract.hpp>
// #include <boost/simd/function/simd/split_high.hpp>
// #include <boost/simd/function/simd/split_low.hpp>
// namespace boost { namespace simd { namespace ext
// {
// namespace bd = boost::dispatch;
// namespace bs = boost::simd;
// BOOST_DISPATCH_OVERLOAD_IF(split_
// , (typename A0, typename A1, typename X, typename Y)
// , bd::cpu_
// , bs::pack_<bd::arithmetic_<A0>, X>
// , bs::pack_<bd::arithmetic_<A1>, X>
// , bs::pack_<bd::arithmetic_<A1>, X>
// )
// {
// using result = ;
// BOOST_FORCEINLINE result operator()(A0 const& a0,A1& a1, A1& a2) const
// {
// a1 = split_low(a0);
// a2 = split_high(a0);
// }
// };
// BOOST_DISPATCH_OVERLOAD(split_
// , (typename A0, typename X)
// , bd::cpu_
// , bs::pack_<bd::arithmetic_<A0>, X>
// )
// {
// using = ;
// BOOST_FORCEINLINE result operator()( const A0& a0) const BOOST_NOEXCEPT
// {
// return eval(a0, simd::meta::is_upgradable<A0>());
// }
// BOOST_FORCEINLINE result eval2(A0 const& a0, boost::mpl::true_) const
// {
// typename simd::meta::vector_of< base_t
// , A0::static_size/2
// >::type a00,a01;
// split(a0, a00, a01);
// return bs::combine(a00,a01);
// }
// BOOST_FORCEINLINE result eval2(A0 const& a0, boost::mpl::false_) const
// {
// return make<result>( static_cast<base_t>( extract<0>(a0) )
// , static_cast<base_t>( extract<1>(a0) )
// );
// }
// BOOST_FORCEINLINE result eval(A0 const& a0, boost::mpl::false_) const
// {
// return a0;
// }
// BOOST_FORCEINLINE result eval(A0 const& a0, boost::mpl::true_) const
// {
// return eval2(a0, boost::mpl::bool_<(A0::static_size>=4)>());
// }
// };
// } } }
#endif
| {
"alphanum_fraction": 0.4839905629,
"author": null,
"avg_line_length": 35.7469879518,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "157655fa2be2a4fc67f0df918bf875333422f2b0",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "yaeldarmon/boost.simd",
"max_forks_repo_path": "include/boost/simd/arch/common/simd/function/split.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "yaeldarmon/boost.simd",
"max_issues_repo_path": "include/boost/simd/arch/common/simd/function/split.hpp",
"max_line_length": 100,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "yaeldarmon/boost.simd",
"max_stars_repo_path": "include/boost/simd/arch/common/simd/function/split.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 711,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2967
} |
"""UNIT TESTS for the experimetn result maniupaltio
python -m unittest test_Result
"""
from commonLib.DBManager import DB
from stats import Result, Histogram, NumericStats, NumericList
import numpy as np
import os
import unittest
class TestResult(unittest.TestCase):
def setUp(self):
self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
os.getenv("TEST_DB_NAME", "test"),
os.getenv("TEST_DB_USER", "root"),
os.getenv("TEST_DB_PASS", ""))
def _del_table(self, table_name):
ok = self._db.doUpdate("drop table "+table_name+"")
self.assertTrue(ok, "Table was not created!")
def test_db(self):
self._db.connect()
self._db.disconnect()
def test_ResultInit(self):
res = Result("MyTable")
self.assertEqual(res._table_name, "MyTable")
self.assertEqual(res._data, {})
def test_SetGet(self):
res = Result("MyTable")
res._set("MyKey1", "MyVal1")
res._set("MyKey2", "MyVal2")
self.assertEqual(res._get("MyKey1"), "MyVal1")
self.assertEqual(res._get("MyKey2"), "MyVal2")
def test_table_create(self):
res = Result("MyTable")
res._create_query = self.create_query
self._table_name="MyTable"
self.addCleanup(self._del_table, "MyTable")
res.create_table(self._db)
def test_store_load(self):
res = Result("MyTable", keys=["MyKey1", "MyKey2"])
res._create_query = self.create_query
self._table_name="MyTable"
res._set("MyKey1", "MyVal1")
res._set("MyKey2", "MyVal2")
self.addCleanup(self._del_table, "MyTable")
res.create_table(self._db)
data_id = res.store(self._db, 1, "MyType")
self.assertNotEqual(data_id, None)
res = None
new_res = Result("MyTable", keys=["MyKey1", "MyKey2"])
new_res.load(self._db, 1, "MyType")
self.assertEqual(new_res._get("MyKey1"), "MyVal1")
self.assertEqual(new_res._get("MyKey2"), "MyVal2")
def create_query(self):
return """create table {0} (
id INT NOT NULL AUTO_INCREMENT,
trace_id INT(10) NOT NULL,
type VARCHAR(128) NOT NULL,
MyKey1 VARCHAR(100),
MyKey2 VARCHAR(100),
PRIMARY KEY(id, trace_id, type)
)""".format(self._table_name)
class TestHistogram(unittest.TestCase):
def setUp(self):
self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
os.getenv("TEST_DB_NAME", "test"),
os.getenv("TEST_DB_USER", "root"),
os.getenv("TEST_DB_PASS", ""))
def _del_table(self, table_name):
ok = self._db.doUpdate("drop table "+table_name+"")
self.assertTrue(ok, "Table was not created!")
def test_calculate(self):
hist = Histogram()
hist.calculate([1, 2, 3, 3, 5], 1)
bins, edges = hist.get_data()
self.assertEqual(edges, [1, 2, 3, 4, 5, 6])
self.assertEqual(list(bins), [0.2, 0.2, 0.4, 0, 0.2])
hist.calculate([1, 2, 3, 3, 5], 1, minmax=(1,3))
self.assertEqual(hist._get("edges"), [1, 2, 3, 4])
self.assertEqual(list(hist._get("bins")), [0.25, 0.25, 0.5])
hist.calculate([1, 2, 3, 3, 5], 1, minmax=(1,3), input_bins=[1,6])
self.assertEqual(hist._get("edges"), [1, 6])
self.assertEqual(list(hist._get("bins")), [1.0])
def test_save_load(self):
hist = Histogram()
self.addCleanup(self._del_table, "histograms")
hist.create_table(self._db)
hist.calculate([1, 2, 3, 3, 5], 1)
data_id=hist.store(self._db, 1, "MyHist")
hist=None
hist_new = Histogram()
hist_new.load(self._db, 1, "MyHist")
self.assertEqual(hist_new._get("edges"), [1, 2, 3, 4, 5, 6])
self.assertEqual(list(hist_new._get("bins")), [0.2, 0.2, 0.4, 0, 0.2])
class TestNumericStats(unittest.TestCase):
def setUp(self):
self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
os.getenv("TEST_DB_NAME", "test"),
os.getenv("TEST_DB_USER", "root"),
os.getenv("TEST_DB_PASS", ""))
def _del_table(self, table_name):
ok = self._db.doUpdate("drop table "+table_name+"")
self.assertTrue(ok, "Table was not created!")
def test_calculate(self):
num = NumericStats()
num.calculate(list(range(0,101)))
data = num.get_data()
self.assertEqual(data["count"], 101)
self.assertEqual(data["min"], 0)
self.assertEqual(data["max"], 100)
self.assertEqual(data["mean"], 50)
self.assertEqual(data["std"], np.std(list(range(0,101))))
self.assertEqual(data["median"], 50)
self.assertEqual(data["p05"], 5)
self.assertEqual(data["p25"], 25)
self.assertEqual(data["p50"], 50)
self.assertEqual(data["p75"], 75)
self.assertEqual(data["p95"], 95)
def test_save_load(self):
num = NumericStats()
self.addCleanup(self._del_table, "numericStats")
num.create_table(self._db)
num.calculate(list(range(0,101)))
data_id=num.store(self._db, 1, "MyStats")
num=None
num_new = NumericStats()
num_new.load(self._db, 1, "MyStats")
data = num_new.get_data()
self.assertEqual(data["count"], 101)
self.assertEqual(data["min"], 0)
self.assertEqual(data["max"], 100)
self.assertEqual(data["mean"], 50)
self.assertAlmostEqual(data["std"], np.std(list(range(0,101))))
self.assertEqual(data["median"], 50)
self.assertEqual(data["p05"], 5)
self.assertEqual(data["p25"], 25)
self.assertEqual(data["p50"], 50)
self.assertEqual(data["p75"], 75)
self.assertEqual(data["p95"], 95)
def assertEqualResult(test_obj, r_old, r_new, field):
d_old = r_old.get_data()
d_new = r_new.get_data()
if "_stats" in field:
for (v1, v2) in zip(list(d_old.values()), list(d_new.values())):
test_obj.assertAlmostEqual(v1,v2)
elif "_cdf" in field:
test_obj.assertListEqual(list(d_old[0]), list(d_new[0]))
test_obj.assertListEqual(list(d_old[1]), list(d_new[1]))
class TestNumericList(unittest.TestCase):
def setUp(self):
self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
os.getenv("TEST_DB_NAME", "test"),
os.getenv("TEST_DB_USER", "root"),
os.getenv("TEST_DB_PASS", ""))
def _del_table(self, table_name):
ok = self._db.doUpdate("drop table "+table_name+"")
self.assertTrue(ok, "Table was not created!")
def test_load_store(self):
nl = NumericList("my_table", ["utilization", "waste"])
self.addCleanup(self._del_table, "my_table")
nl.create_table(self._db)
nl.set_dic(dict(utilization=0.5, waste=100))
nl.store(self._db, 1, "usage")
nl_2 = NumericList("my_table", ["utilization", "waste"])
nl_2.load(self._db, 1, "usage")
self.assertEqual(nl._data, nl_2._data)
| {
"alphanum_fraction": 0.555086423,
"author": null,
"avg_line_length": 37.5198019802,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8bbb1bd2e25cf6d52d295d61dd9d71aa255c54ca",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-01-05T08:23:20.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-01-05T08:23:20.000Z",
"max_forks_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e",
"max_forks_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_forks_repo_name": "gonzalorodrigo/ScSFWorkload",
"max_forks_repo_path": "test/test_Result.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e",
"max_issues_repo_issues_event_max_datetime": "2020-12-17T21:35:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-12-17T21:33:15.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_issues_repo_name": "gonzalorodrigo/ScSFWorkload",
"max_issues_repo_path": "test/test_Result.py",
"max_line_length": 82,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e",
"max_stars_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_stars_repo_name": "gonzalorodrigo/ScSFWorkload",
"max_stars_repo_path": "test/test_Result.py",
"max_stars_repo_stars_event_max_datetime": "2019-03-18T18:27:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-18T18:27:49.000Z",
"num_tokens": 1948,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7579
} |
import sys
import time
import numpy as np
import openml
from autogluon_benchmark.tasks import task_loader, task_utils
from openml.exceptions import OpenMLServerException
sys.path.append('../')
def get_dataset(task):
X, y, _, _ = task.get_dataset().get_data(task.target_name)
return X, y
if __name__ == '__main__':
task_dict = task_loader.get_task_dict()
task_name = 'adult' # task name in yaml config
task_id = task_dict[task_name]['openml_task_id'] # openml task id
n_folds = 5 # do 5 folds of train/val split
fit_args = {
'eval_metric': 'roc_auc',
}
task = task_id
if isinstance(task, int):
task_id = task
delay_exp = 0
while True:
try:
print(f'Getting task {task_id}')
task = openml.tasks.get_task(task_id)
print(f'Got task {task_id}')
except OpenMLServerException as e:
delay = 2**delay_exp
delay_exp += 1
if delay_exp > 10:
raise ValueError('Unable to get task after 10 retries')
print(e)
print(f'Retry in {delay}s...')
time.sleep(delay)
continue
break
n_repeats_full, n_folds_full, n_samples_full = task.get_split_dimensions()
X, y, _, _ = task.get_dataset().get_data(task.target_name)
print(type(X))
print(type(y))
predictors, scores = task_utils.run_task(
task_id, n_folds=n_folds, fit_args=fit_args)
score = float(np.mean(scores))
if len(scores) > 1:
score_std = np.std(scores, ddof=1)
else:
score_std = 0.0 # Should this be np.inf?
print(f'{task_name} score: {round(score, 5)} (+- {round(score_std, 5)})')
| {
"alphanum_fraction": 0.594488189,
"author": null,
"avg_line_length": 29.6333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4e414c42e039880b638a79a60f9379012a9bbb3d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2021-10-02T21:15:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-23T07:28:46.000Z",
"max_forks_repo_head_hexsha": "d630c78290a52f8c73885afb16884e18135c34f6",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Fanxingye/Autotabular",
"max_forks_repo_path": "examples/automlbechmark/run_autogluon_experiments/train_australian.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d630c78290a52f8c73885afb16884e18135c34f6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Fanxingye/Autotabular",
"max_issues_repo_path": "examples/automlbechmark/run_autogluon_experiments/train_australian.py",
"max_line_length": 78,
"max_stars_count": 48,
"max_stars_repo_head_hexsha": "fb407300adf97532a26d33f7442d2a606fa30512",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jianzhnie/AutoTabular",
"max_stars_repo_path": "examples/automlbechmark/run_autogluon_experiments/train_australian.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T13:02:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-06T08:09:26.000Z",
"num_tokens": 450,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1778
} |
"""
Generic helpers for LLVM code generation.
"""
from __future__ import print_function, division, absolute_import
import collections
from contextlib import contextmanager
import functools
from llvmlite import ir
from . import utils, config, types
bool_t = ir.IntType(1)
int8_t = ir.IntType(8)
int32_t = ir.IntType(32)
intp_t = ir.IntType(utils.MACHINE_BITS)
voidptr_t = int8_t.as_pointer()
true_bit = bool_t(1)
false_bit = bool_t(0)
true_byte = int8_t(1)
false_byte = int8_t(0)
def as_bool_bit(builder, value):
return builder.icmp_unsigned('!=', value, value.type(0))
def make_anonymous_struct(builder, values, struct_type=None):
"""
Create an anonymous struct containing the given LLVM *values*.
"""
if struct_type is None:
struct_type = ir.LiteralStructType([v.type for v in values])
struct_val = struct_type(ir.Undefined)
for i, v in enumerate(values):
struct_val = builder.insert_value(struct_val, v, i)
return struct_val
def make_bytearray(buf):
"""
Make a byte array constant from *buf*.
"""
b = bytearray(buf)
n = len(b)
return ir.Constant(ir.ArrayType(ir.IntType(8), n), b)
_struct_proxy_cache = {}
def create_struct_proxy(fe_type, kind='value'):
"""
Returns a specialized StructProxy subclass for the given fe_type.
"""
cache_key = (fe_type, kind)
res = _struct_proxy_cache.get(cache_key)
if res is None:
base = {'value': ValueStructProxy,
'data': DataStructProxy,
}[kind]
clsname = base.__name__ + '_' + str(fe_type)
bases = (base,)
clsmembers = dict(_fe_type=fe_type)
res = type(clsname, bases, clsmembers)
_struct_proxy_cache[cache_key] = res
return res
def copy_struct(dst, src, repl={}):
"""
Copy structure from *src* to *dst* with replacement from *repl*.
"""
repl = repl.copy()
# copy data from src or use those in repl
for k in src._datamodel._fields:
v = repl.pop(k, getattr(src, k))
setattr(dst, k, v)
# use remaining key-values in repl
for k, v in repl.items():
setattr(dst, k, v)
return dst
class _StructProxy(object):
"""
Creates a `Structure` like interface that is constructed with information
from DataModel instance. FE type must have a data model that is a
subclass of StructModel.
"""
# The following class members must be overridden by subclass
_fe_type = None
def __init__(self, context, builder, value=None, ref=None):
from numba import datamodel # Avoid circular import
self._context = context
self._datamodel = self._context.data_model_manager[self._fe_type]
if not isinstance(self._datamodel, datamodel.StructModel):
raise TypeError(
"Not a structure model: {0}".format(self._datamodel))
self._builder = builder
self._be_type = self._get_be_type(self._datamodel)
assert not is_pointer(self._be_type)
outer_ref, ref = self._make_refs(ref)
if ref.type.pointee != self._be_type:
raise AssertionError("bad ref type: expected %s, got %s"
% (self._be_type.as_pointer(), ref.type))
if value is not None:
if value.type != outer_ref.type.pointee:
raise AssertionError("bad value type: expected %s, got %s"
% (outer_ref.type.pointee, value.type))
self._builder.store(value, outer_ref)
self._value = ref
self._outer_ref = outer_ref
def _make_refs(self, ref):
"""
Return an (outer ref, value ref) pair. By default, these are
the same pointers, but a derived class may override this.
"""
if ref is None:
ref = alloca_once(self._builder, self._be_type, zfill=True)
return ref, ref
def _get_be_type(self, datamodel):
raise NotImplementedError
def _cast_member_to_value(self, index, val):
raise NotImplementedError
def _cast_member_from_value(self, index, val):
raise NotImplementedError
def _get_ptr_by_index(self, index):
return gep_inbounds(self._builder, self._value, 0, index)
def _get_ptr_by_name(self, attrname):
index = self._datamodel.get_field_position(attrname)
return self._get_ptr_by_index(index)
def __getattr__(self, field):
"""
Load the LLVM value of the named *field*.
"""
if not field.startswith('_'):
return self[self._datamodel.get_field_position(field)]
else:
raise AttributeError(field)
def __setattr__(self, field, value):
"""
Store the LLVM *value* into the named *field*.
"""
if field.startswith('_'):
return super(_StructProxy, self).__setattr__(field, value)
self[self._datamodel.get_field_position(field)] = value
def __getitem__(self, index):
"""
Load the LLVM value of the field at *index*.
"""
member_val = self._builder.load(self._get_ptr_by_index(index))
return self._cast_member_to_value(index, member_val)
def __setitem__(self, index, value):
"""
Store the LLVM *value* into the field at *index*.
"""
ptr = self._get_ptr_by_index(index)
value = self._cast_member_from_value(index, value)
if value.type != ptr.type.pointee:
if (is_pointer(value.type) and is_pointer(ptr.type.pointee)
and value.type.pointee == ptr.type.pointee.pointee):
# Differ by address-space only
# Auto coerce it
value = self._context.addrspacecast(self._builder,
value,
ptr.type.pointee.addrspace)
else:
raise TypeError("Invalid store of {value.type} to "
"{ptr.type.pointee} in "
"{self._datamodel} "
"(trying to write member #{index})"
.format(value=value, ptr=ptr, self=self,
index=index))
self._builder.store(value, ptr)
def __len__(self):
"""
Return the number of fields.
"""
return self._datamodel.field_count
def _getpointer(self):
"""
Return the LLVM pointer to the underlying structure.
"""
return self._outer_ref
def _getvalue(self):
"""
Load and return the value of the underlying LLVM structure.
"""
return self._builder.load(self._outer_ref)
def _setvalue(self, value):
"""
Store the value in this structure.
"""
assert not is_pointer(value.type)
assert value.type == self._be_type, (value.type, self._be_type)
self._builder.store(value, self._value)
class ValueStructProxy(_StructProxy):
"""
Create a StructProxy suitable for accessing regular values
(e.g. LLVM values or alloca slots).
"""
def _get_be_type(self, datamodel):
return datamodel.get_value_type()
def _cast_member_to_value(self, index, val):
return val
def _cast_member_from_value(self, index, val):
return val
class DataStructProxy(_StructProxy):
"""
Create a StructProxy suitable for accessing data persisted in memory.
"""
def _get_be_type(self, datamodel):
return datamodel.get_data_type()
def _cast_member_to_value(self, index, val):
model = self._datamodel.get_model(index)
return model.from_data(self._builder, val)
def _cast_member_from_value(self, index, val):
model = self._datamodel.get_model(index)
return model.as_data(self._builder, val)
class Structure(object):
"""
A high-level object wrapping a alloca'ed LLVM structure, including
named fields and attribute access.
"""
# XXX Should this warrant several separate constructors?
def __init__(self, context, builder, value=None, ref=None, cast_ref=False):
self._type = context.get_struct_type(self)
self._context = context
self._builder = builder
if ref is None:
self._value = alloca_once(builder, self._type, zfill=True)
if value is not None:
assert not is_pointer(value.type)
assert value.type == self._type, (value.type, self._type)
builder.store(value, self._value)
else:
assert value is None
assert is_pointer(ref.type)
if self._type != ref.type.pointee:
if cast_ref:
ref = builder.bitcast(ref, self._type.as_pointer())
else:
raise TypeError(
"mismatching pointer type: got %s, expected %s"
% (ref.type.pointee, self._type))
self._value = ref
self._namemap = {}
self._fdmap = []
self._typemap = []
base = int32_t(0)
for i, (k, tp) in enumerate(self._fields):
self._namemap[k] = i
self._fdmap.append((base, int32_t(i)))
self._typemap.append(tp)
def _get_ptr_by_index(self, index):
ptr = self._builder.gep(self._value, self._fdmap[index], inbounds=True)
return ptr
def _get_ptr_by_name(self, attrname):
return self._get_ptr_by_index(self._namemap[attrname])
def __getattr__(self, field):
"""
Load the LLVM value of the named *field*.
"""
if not field.startswith('_'):
return self[self._namemap[field]]
else:
raise AttributeError(field)
def __setattr__(self, field, value):
"""
Store the LLVM *value* into the named *field*.
"""
if field.startswith('_'):
return super(Structure, self).__setattr__(field, value)
self[self._namemap[field]] = value
def __getitem__(self, index):
"""
Load the LLVM value of the field at *index*.
"""
return self._builder.load(self._get_ptr_by_index(index))
def __setitem__(self, index, value):
"""
Store the LLVM *value* into the field at *index*.
"""
ptr = self._get_ptr_by_index(index)
if ptr.type.pointee != value.type:
fmt = "Type mismatch: __setitem__(%d, ...) expected %r but got %r"
raise AssertionError(fmt % (index,
str(ptr.type.pointee),
str(value.type)))
self._builder.store(value, ptr)
def __len__(self):
"""
Return the number of fields.
"""
return len(self._namemap)
def _getpointer(self):
"""
Return the LLVM pointer to the underlying structure.
"""
return self._value
def _getvalue(self):
"""
Load and return the value of the underlying LLVM structure.
"""
return self._builder.load(self._value)
def _setvalue(self, value):
"""Store the value in this structure"""
assert not is_pointer(value.type)
assert value.type == self._type, (value.type, self._type)
self._builder.store(value, self._value)
# __iter__ is derived by Python from __len__ and __getitem__
def alloca_once(builder, ty, size=None, name='', zfill=False):
"""Allocate stack memory at the entry block of the current function
pointed by ``builder`` withe llvm type ``ty``. The optional ``size`` arg
set the number of element to allocate. The default is 1. The optional
``name`` arg set the symbol name inside the llvm IR for debugging.
If ``zfill`` is set, fill the memory with zeros at the current
use-site location. Note that the memory is always zero-filled after the
``alloca`` at init-site (the entry block).
"""
if isinstance(size, utils.INT_TYPES):
size = ir.Constant(intp_t, size)
with builder.goto_entry_block():
ptr = builder.alloca(ty, size=size, name=name)
# Always zero-fill at init-site. This is safe.
builder.store(ty(None), ptr)
# Also zero-fill at the use-site
if zfill:
builder.store(ty(None), ptr)
return ptr
def alloca_once_value(builder, value, name=''):
"""
Like alloca_once(), but passing a *value* instead of a type. The
type is inferred and the allocated slot is also initialized with the
given value.
"""
storage = alloca_once(builder, value.type)
builder.store(value, storage)
return storage
def insert_pure_function(module, fnty, name):
"""
Insert a pure function (in the functional programming sense) in the
given module.
"""
fn = module.get_or_insert_function(fnty, name=name)
fn.attributes.add("readonly")
fn.attributes.add("nounwind")
return fn
def terminate(builder, bbend):
bb = builder.basic_block
if bb.terminator is None:
builder.branch(bbend)
def get_null_value(ltype):
return ltype(None)
def is_null(builder, val):
null = get_null_value(val.type)
return builder.icmp_unsigned('==', null, val)
def is_not_null(builder, val):
null = get_null_value(val.type)
return builder.icmp_unsigned('!=', null, val)
def if_unlikely(builder, pred):
return builder.if_then(pred, likely=False)
def if_likely(builder, pred):
return builder.if_then(pred, likely=True)
def ifnot(builder, pred):
return builder.if_then(builder.not_(pred))
def increment_index(builder, val):
"""
Increment an index *val*.
"""
one = val.type(1)
# We pass the "nsw" flag in the hope that LLVM understands the index
# never changes sign. Unfortunately this doesn't always work
# (e.g. ndindex()).
return builder.add(val, one, flags=['nsw'])
Loop = collections.namedtuple('Loop', ('index', 'do_break'))
@contextmanager
def for_range(builder, count, start=None, intp=None):
"""
Generate LLVM IR for a for-loop in [start, count).
*start* is equal to 0 by default.
Yields a Loop namedtuple with the following members:
- `index` is the loop index's value
- `do_break` is a no-argument callable to break out of the loop
"""
if intp is None:
intp = count.type
if start is None:
start = intp(0)
stop = count
bbcond = builder.append_basic_block("for.cond")
bbbody = builder.append_basic_block("for.body")
bbend = builder.append_basic_block("for.end")
def do_break():
builder.branch(bbend)
bbstart = builder.basic_block
builder.branch(bbcond)
with builder.goto_block(bbcond):
index = builder.phi(intp, name="loop.index")
pred = builder.icmp_signed('<', index, stop)
builder.cbranch(pred, bbbody, bbend)
with builder.goto_block(bbbody):
yield Loop(index, do_break)
# Update bbbody as a new basic block may have been activated
bbbody = builder.basic_block
incr = increment_index(builder, index)
terminate(builder, bbcond)
index.add_incoming(start, bbstart)
index.add_incoming(incr, bbbody)
builder.position_at_end(bbend)
@contextmanager
def for_range_slice(builder, start, stop, step, intp=None, inc=True):
"""
Generate LLVM IR for a for-loop based on a slice. Yields a
(index, count) tuple where `index` is the slice index's value
inside the loop, and `count` the iteration count.
Parameters
-------------
builder : object
Builder object
start : int
The beginning value of the slice
stop : int
The end value of the slice
step : int
The step value of the slice
intp :
The data type
inc : boolean, optional
Signals whether the step is positive (True) or negative (False).
Returns
-----------
None
"""
if intp is None:
intp = start.type
bbcond = builder.append_basic_block("for.cond")
bbbody = builder.append_basic_block("for.body")
bbend = builder.append_basic_block("for.end")
bbstart = builder.basic_block
builder.branch(bbcond)
with builder.goto_block(bbcond):
index = builder.phi(intp, name="loop.index")
count = builder.phi(intp, name="loop.count")
if (inc):
pred = builder.icmp_signed('<', index, stop)
else:
pred = builder.icmp_signed('>', index, stop)
builder.cbranch(pred, bbbody, bbend)
with builder.goto_block(bbbody):
yield index, count
bbbody = builder.basic_block
incr = builder.add(index, step)
next_count = increment_index(builder, count)
terminate(builder, bbcond)
index.add_incoming(start, bbstart)
index.add_incoming(incr, bbbody)
count.add_incoming(ir.Constant(intp, 0), bbstart)
count.add_incoming(next_count, bbbody)
builder.position_at_end(bbend)
@contextmanager
def for_range_slice_generic(builder, start, stop, step):
"""
A helper wrapper for for_range_slice(). This is a context manager which
yields two for_range_slice()-alike context managers, the first for
the positive step case, the second for the negative step case.
Use:
with for_range_slice_generic(...) as (pos_range, neg_range):
with pos_range as (idx, count):
...
with neg_range as (idx, count):
...
"""
intp = start.type
is_pos_step = builder.icmp_signed('>=', step, ir.Constant(intp, 0))
pos_for_range = for_range_slice(builder, start, stop, step, intp, inc=True)
neg_for_range = for_range_slice(builder, start, stop, step, intp, inc=False)
@contextmanager
def cm_cond(cond, inner_cm):
with cond:
with inner_cm as value:
yield value
with builder.if_else(is_pos_step, likely=True) as (then, otherwise):
yield cm_cond(then, pos_for_range), cm_cond(otherwise, neg_for_range)
@contextmanager
def loop_nest(builder, shape, intp, order='C'):
"""
Generate a loop nest walking a N-dimensional array.
Yields a tuple of N indices for use in the inner loop body,
iterating over the *shape* space.
If *order* is 'C' (the default), indices are incremented inside-out
(i.e. (0,0), (0,1), (0,2), (1,0) etc.).
If *order* is 'F', they are incremented outside-in
(i.e. (0,0), (1,0), (2,0), (0,1) etc.).
This has performance implications when walking an array as it impacts
the spatial locality of memory accesses.
"""
assert order in 'CF'
if not shape:
# 0-d array
yield ()
else:
if order == 'F':
_swap = lambda x: x[::-1]
else:
_swap = lambda x: x
with _loop_nest(builder, _swap(shape), intp) as indices:
assert len(indices) == len(shape)
yield _swap(indices)
@contextmanager
def _loop_nest(builder, shape, intp):
with for_range(builder, shape[0], intp=intp) as loop:
if len(shape) > 1:
with _loop_nest(builder, shape[1:], intp) as indices:
yield (loop.index,) + indices
else:
yield (loop.index,)
def pack_array(builder, values, ty=None):
"""
Pack a sequence of values in a LLVM array. *ty* should be given
if the array may be empty, in which case the type can't be inferred
from the values.
"""
n = len(values)
if ty is None:
ty = values[0].type
ary = ir.ArrayType(ty, n)(ir.Undefined)
for i, v in enumerate(values):
ary = builder.insert_value(ary, v, i)
return ary
def pack_struct(builder, values):
"""
Pack a sequence of values into a LLVM struct.
"""
structty = ir.LiteralStructType([v.type for v in values])
st = structty(ir.Undefined)
for i, v in enumerate(values):
st = builder.insert_value(st, v, i)
return st
def unpack_tuple(builder, tup, count=None):
"""
Unpack an array or structure of values, return a Python tuple.
"""
if count is None:
# Assuming *tup* is an aggregate
count = len(tup.type.elements)
vals = [builder.extract_value(tup, i)
for i in range(count)]
return vals
def get_item_pointer(builder, aryty, ary, inds, wraparound=False):
shapes = unpack_tuple(builder, ary.shape, count=aryty.ndim)
strides = unpack_tuple(builder, ary.strides, count=aryty.ndim)
return get_item_pointer2(builder, data=ary.data, shape=shapes,
strides=strides, layout=aryty.layout, inds=inds,
wraparound=wraparound)
def get_item_pointer2(builder, data, shape, strides, layout, inds,
wraparound=False):
if wraparound:
# Wraparound
indices = []
for ind, dimlen in zip(inds, shape):
negative = builder.icmp_signed('<', ind, ind.type(0))
wrapped = builder.add(dimlen, ind)
selected = builder.select(negative, wrapped, ind)
indices.append(selected)
else:
indices = inds
if not indices:
# Indexing with empty tuple
return builder.gep(data, [int32_t(0)])
intp = indices[0].type
# Indexing code
if layout in 'CF':
steps = []
# Compute steps for each dimension
if layout == 'C':
# C contiguous
for i in range(len(shape)):
last = intp(1)
for j in shape[i + 1:]:
last = builder.mul(last, j)
steps.append(last)
elif layout == 'F':
# F contiguous
for i in range(len(shape)):
last = intp(1)
for j in shape[:i]:
last = builder.mul(last, j)
steps.append(last)
else:
raise Exception("unreachable")
# Compute index
loc = intp(0)
for i, s in zip(indices, steps):
tmp = builder.mul(i, s)
loc = builder.add(loc, tmp)
ptr = builder.gep(data, [loc])
return ptr
else:
# Any layout
dimoffs = [builder.mul(s, i) for s, i in zip(strides, indices)]
offset = functools.reduce(builder.add, dimoffs)
return pointer_add(builder, data, offset)
def _scalar_pred_against_zero(builder, value, fpred, icond):
nullval = value.type(0)
if isinstance(value.type, (ir.FloatType, ir.DoubleType)):
isnull = fpred(value, nullval)
elif isinstance(value.type, ir.IntType):
isnull = builder.icmp_signed(icond, value, nullval)
else:
raise TypeError("unexpected value type %s" % (value.type,))
return isnull
def is_scalar_zero(builder, value):
"""
Return a predicate representing whether *value* is equal to zero.
"""
return _scalar_pred_against_zero(
builder, value, functools.partial(builder.fcmp_ordered, '=='), '==')
def is_not_scalar_zero(builder, value):
"""
Return a predicate representin whether a *value* is not equal to zero.
(not exactly "not is_scalar_zero" because of nans)
"""
return _scalar_pred_against_zero(
builder, value, functools.partial(builder.fcmp_unordered, '!='), '!=')
def is_scalar_zero_or_nan(builder, value):
"""
Return a predicate representing whether *value* is equal to either zero
or NaN.
"""
return _scalar_pred_against_zero(
builder, value, functools.partial(builder.fcmp_unordered, '=='), '==')
is_true = is_not_scalar_zero
is_false = is_scalar_zero
def is_scalar_neg(builder, value):
"""
Is *value* negative? Assumes *value* is signed.
"""
return _scalar_pred_against_zero(
builder, value, functools.partial(builder.fcmp_ordered, '<'), '<')
def guard_null(context, builder, value, exc_tuple):
"""
Guard against *value* being null or zero.
*exc_tuple* should be a (exception type, arguments...) tuple.
"""
with builder.if_then(is_scalar_zero(builder, value), likely=False):
exc = exc_tuple[0]
exc_args = exc_tuple[1:] or None
context.call_conv.return_user_exc(builder, exc, exc_args)
def guard_memory_error(context, builder, pointer, msg=None):
"""
Guard against *pointer* being NULL (and raise a MemoryError).
"""
assert isinstance(pointer.type, ir.PointerType), pointer.type
exc_args = (msg,) if msg else ()
with builder.if_then(is_null(builder, pointer), likely=False):
context.call_conv.return_user_exc(builder, MemoryError, exc_args)
@contextmanager
def if_zero(builder, value, likely=False):
"""
Execute the given block if the scalar value is zero.
"""
with builder.if_then(is_scalar_zero(builder, value), likely=likely):
yield
guard_zero = guard_null
def is_pointer(ltyp):
"""
Whether the LLVM type *typ* is a struct type.
"""
return isinstance(ltyp, ir.PointerType)
def get_record_member(builder, record, offset, typ):
pval = gep_inbounds(builder, record, 0, offset)
assert not is_pointer(pval.type.pointee)
return builder.bitcast(pval, typ.as_pointer())
def is_neg_int(builder, val):
return builder.icmp_signed('<', val, val.type(0))
def gep_inbounds(builder, ptr, *inds, **kws):
"""
Same as *gep*, but add the `inbounds` keyword.
"""
return gep(builder, ptr, *inds, inbounds=True, **kws)
def gep(builder, ptr, *inds, **kws):
"""
Emit a getelementptr instruction for the given pointer and indices.
The indices can be LLVM values or Python int constants.
"""
name = kws.pop('name', '')
inbounds = kws.pop('inbounds', False)
assert not kws
idx = []
for i in inds:
if isinstance(i, utils.INT_TYPES):
# NOTE: llvm only accepts int32 inside structs, not int64
ind = int32_t(i)
else:
ind = i
idx.append(ind)
return builder.gep(ptr, idx, name=name, inbounds=inbounds)
def pointer_add(builder, ptr, offset, return_type=None):
"""
Add an integral *offset* to pointer *ptr*, and return a pointer
of *return_type* (or, if omitted, the same type as *ptr*).
Note the computation is done in bytes, and ignores the width of
the pointed item type.
"""
intptr = builder.ptrtoint(ptr, intp_t)
if isinstance(offset, utils.INT_TYPES):
offset = intp_t(offset)
intptr = builder.add(intptr, offset)
return builder.inttoptr(intptr, return_type or ptr.type)
def memset(builder, ptr, size, value):
"""
Fill *size* bytes starting from *ptr* with *value*.
"""
fn = builder.module.declare_intrinsic('llvm.memset', (voidptr_t, size.type))
ptr = builder.bitcast(ptr, voidptr_t)
if isinstance(value, int):
value = int8_t(value)
builder.call(fn, [ptr, value, size, int32_t(0), bool_t(0)])
def global_constant(builder_or_module, name, value, linkage='internal'):
"""
Get or create a (LLVM module-)global constant with *name* or *value*.
"""
if isinstance(builder_or_module, ir.Module):
module = builder_or_module
else:
module = builder_or_module.module
data = module.add_global_variable(value.type, name=name)
data.linkage = linkage
data.global_constant = True
data.initializer = value
return data
def divmod_by_constant(builder, val, divisor):
"""
Compute the (quotient, remainder) of *val* divided by the constant
positive *divisor*. The semantics reflects those of Python integer
floor division, rather than C's / LLVM's signed division and modulo.
The difference lies with a negative *val*.
"""
assert divisor > 0
divisor = val.type(divisor)
one = val.type(1)
quot = alloca_once(builder, val.type)
with builder.if_else(is_neg_int(builder, val)) as (if_neg, if_pos):
with if_pos:
# quot = val / divisor
quot_val = builder.sdiv(val, divisor)
builder.store(quot_val, quot)
with if_neg:
# quot = -1 + (val + 1) / divisor
val_plus_one = builder.add(val, one)
quot_val = builder.sdiv(val_plus_one, divisor)
builder.store(builder.sub(quot_val, one), quot)
# rem = val - quot * divisor
# (should be slightly faster than a separate modulo operation)
quot_val = builder.load(quot)
rem_val = builder.sub(val, builder.mul(quot_val, divisor))
return quot_val, rem_val
def cbranch_or_continue(builder, cond, bbtrue):
"""
Branch conditionally or continue.
Note: a new block is created and builder is moved to the end of the new
block.
"""
bbcont = builder.append_basic_block('.continue')
builder.cbranch(cond, bbtrue, bbcont)
builder.position_at_end(bbcont)
return bbcont
def memcpy(builder, dst, src, count):
"""
Emit a memcpy to the builder.
Copies each element of dst to src. Unlike the C equivalent, each element
can be any LLVM type.
Assumes
-------
* dst.type == src.type
* count is positive
"""
# Note this does seem to be optimized as a raw memcpy() by LLVM
# whenever possible...
assert dst.type == src.type
with for_range(builder, count, intp=count.type) as loop:
out_ptr = builder.gep(dst, [loop.index])
in_ptr = builder.gep(src, [loop.index])
builder.store(builder.load(in_ptr), out_ptr)
def _raw_memcpy(builder, func_name, dst, src, count, itemsize, align):
size_t = count.type
if isinstance(itemsize, utils.INT_TYPES):
itemsize = ir.Constant(size_t, itemsize)
memcpy = builder.module.declare_intrinsic(func_name,
[voidptr_t, voidptr_t, size_t])
align = ir.Constant(ir.IntType(32), align)
is_volatile = false_bit
builder.call(memcpy, [builder.bitcast(dst, voidptr_t),
builder.bitcast(src, voidptr_t),
builder.mul(count, itemsize),
align,
is_volatile])
def raw_memcpy(builder, dst, src, count, itemsize, align=1):
"""
Emit a raw memcpy() call for `count` items of size `itemsize`
from `src` to `dest`.
"""
return _raw_memcpy(builder, 'llvm.memcpy', dst, src, count, itemsize, align)
def raw_memmove(builder, dst, src, count, itemsize, align=1):
"""
Emit a raw memmove() call for `count` items of size `itemsize`
from `src` to `dest`.
"""
return _raw_memcpy(builder, 'llvm.memmove', dst, src, count,
itemsize, align)
def muladd_with_overflow(builder, a, b, c):
"""
Compute (a * b + c) and return a (result, overflow bit) pair.
The operands must be signed integers.
"""
p = builder.smul_with_overflow(a, b)
prod = builder.extract_value(p, 0)
prod_ovf = builder.extract_value(p, 1)
s = builder.sadd_with_overflow(prod, c)
res = builder.extract_value(s, 0)
ovf = builder.or_(prod_ovf, builder.extract_value(s, 1))
return res, ovf
def printf(builder, format, *args):
"""
Calls printf().
Argument `format` is expected to be a Python string.
Values to be printed are listed in `args`.
Note: There is no checking to ensure there is correct number of values
in `args` and there type matches the declaration in the format string.
"""
assert isinstance(format, str)
mod = builder.module
# Make global constant for format string
cstring = voidptr_t
fmt_bytes = make_bytearray((format + '\00').encode('ascii'))
global_fmt = global_constant(mod, "printf_format", fmt_bytes)
fnty = ir.FunctionType(int32_t, [cstring], var_arg=True)
# Insert printf()
try:
fn = mod.get_global('printf')
except KeyError:
fn = ir.Function(mod, fnty, name="printf")
# Call
ptr_fmt = builder.bitcast(global_fmt, cstring)
return builder.call(fn, [ptr_fmt] + list(args))
def snprintf(builder, buffer, bufsz, format, *args):
"""Calls libc snprintf(buffer, bufsz, format, ...args)
"""
assert isinstance(format, str)
mod = builder.module
# Make global constant for format string
cstring = voidptr_t
fmt_bytes = make_bytearray((format + '\00').encode('ascii'))
global_fmt = global_constant(mod, "snprintf_format", fmt_bytes)
fnty = ir.FunctionType(
int32_t, [cstring, intp_t, cstring], var_arg=True,
)
# Actual symbol name of snprintf is different on win32.
symbol = 'snprintf'
if config.IS_WIN32:
symbol = '_' + symbol
# Insert snprintf()
try:
fn = mod.get_global(symbol)
except KeyError:
fn = ir.Function(mod, fnty, name=symbol)
# Call
ptr_fmt = builder.bitcast(global_fmt, cstring)
return builder.call(fn, [buffer, bufsz, ptr_fmt] + list(args))
def snprintf_stackbuffer(builder, bufsz, format, *args):
"""Similar to `snprintf()` but the buffer is stack allocated to size *bufsz*.
Returns the buffer pointer as i8*.
"""
assert isinstance(bufsz, int)
spacety = ir.ArrayType(ir.IntType(8), bufsz)
space = alloca_once(builder, spacety, zfill=True)
buffer = builder.bitcast(space, voidptr_t)
snprintf(builder, buffer, intp_t(bufsz), format, *args)
return buffer
if utils.PY3:
def normalize_ir_text(text):
"""
Normalize the given string to latin1 compatible encoding that is
suitable for use in LLVM IR.
"""
# Just re-encoding to latin1 is enough
return text.encode('utf8').decode('latin1')
else:
def normalize_ir_text(text):
"""
No-op for python2. Assume there won't be unicode names.
"""
return text
def hexdump(builder, ptr, nbytes):
"""Debug print the memory region in *ptr* to *ptr + nbytes*
as hex.
"""
bytes_per_line = 16
nbytes = builder.zext(nbytes, intp_t)
printf(builder, "hexdump p=%p n=%zu",
ptr, nbytes)
byte_t = ir.IntType(8)
ptr = builder.bitcast(ptr, byte_t.as_pointer())
# Loop to print the bytes in *ptr* as hex
with for_range(builder, nbytes) as idx:
div_by = builder.urem(idx.index, intp_t(bytes_per_line))
do_new_line = builder.icmp_unsigned("==", div_by, intp_t(0))
with builder.if_then(do_new_line):
printf(builder, "\n")
offset = builder.gep(ptr, [idx.index])
val = builder.load(offset)
printf(builder, " %02x", val)
printf(builder, "\n")
def is_nonelike(ty):
""" returns if 'ty' is none """
return (
ty is None or
isinstance(ty, types.NoneType) or
isinstance(ty, types.Omitted)
)
| {
"alphanum_fraction": 0.6217880776,
"author": null,
"avg_line_length": 31.5063176895,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6f6a540379a14969580f478d53b898587468901f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8044d6edc79e56847778164656a8c0856383503a",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "jpivarski/numba",
"max_forks_repo_path": "numba/cgutils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8044d6edc79e56847778164656a8c0856383503a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "jpivarski/numba",
"max_issues_repo_path": "numba/cgutils.py",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8044d6edc79e56847778164656a8c0856383503a",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "jpivarski/numba",
"max_stars_repo_path": "numba/cgutils.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8346,
"path": null,
"reason": "from numba",
"repo": null,
"save_path": null,
"sha": null,
"size": 34909
} |
/-
Stalk of rings on basis.
https://stacks.math.columbia.edu/tag/007L
(just says that the category of rings is a type of algebraic structure)
-/
import to_mathlib.opens
import topology.basic
import sheaves.stalk_on_basis
import sheaves.presheaf_of_rings_on_basis
universe u
open topological_space
namespace stalk_of_rings_on_standard_basis
variables {α : Type u} [topological_space α]
variables {B : set (opens α )} {HB : opens.is_basis B}
-- Standard basis. TODO: Move somewhere else?
variables (Bstd : opens.univ ∈ B ∧ ∀ {U V}, U ∈ B → V ∈ B → U ∩ V ∈ B)
variables (F : presheaf_of_rings_on_basis α HB) (x : α)
include Bstd
definition stalk_of_rings_on_standard_basis :=
stalk_on_basis F.to_presheaf_on_basis x
section stalk_of_rings_on_standard_basis_is_ring
open stalk_of_rings_on_standard_basis
-- Add.
protected def add_aux :
stalk_on_basis.elem F.to_presheaf_on_basis x →
stalk_on_basis.elem F.to_presheaf_on_basis x →
stalk_on_basis F.to_presheaf_on_basis x :=
λ s t,
⟦{U := s.U ∩ t.U,
BU := Bstd.2 s.BU t.BU,
Hx := ⟨s.Hx, t.Hx⟩,
s := F.res s.BU _ (set.inter_subset_left _ _) s.s +
F.res t.BU _ (set.inter_subset_right _ _) t.s}⟧
instance has_add : has_add (stalk_of_rings_on_standard_basis Bstd F x) :=
{ add := quotient.lift₂ (stalk_of_rings_on_standard_basis.add_aux Bstd F x) $
begin
intros a1 a2 b1 b2 H1 H2,
let F' := F.to_presheaf_on_basis,
rcases H1 with ⟨U1, ⟨BU1, ⟨HxU1, ⟨HU1a1U, HU1b1U, HresU1⟩⟩⟩⟩,
rcases H2 with ⟨U2, ⟨BU2, ⟨HxU2, ⟨HU2a2U, HU2b2U, HresU2⟩⟩⟩⟩,
have BU1U2 := Bstd.2 BU1 BU2,
apply quotient.sound,
use [U1 ∩ U2, BU1U2, ⟨HxU1, HxU2⟩],
use [set.inter_subset_inter HU1a1U HU2a2U, set.inter_subset_inter HU1b1U HU2b2U],
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
have HresU1' :
(F'.res BU1 BU1U2 (set.inter_subset_left _ _) ((F'.res a1.BU BU1 HU1a1U) (a1.s))) =
(F'.res BU1 BU1U2 (set.inter_subset_left _ _) ((F'.res b1.BU BU1 HU1b1U) (b1.s)))
:= by rw HresU1,
have HresU2' :
(F'.res BU2 BU1U2 (set.inter_subset_right _ _) ((F'.res a2.BU BU2 HU2a2U) (a2.s))) =
(F'.res BU2 BU1U2 (set.inter_subset_right _ _) ((F'.res b2.BU BU2 HU2b2U) (b2.s)))
:= by rw HresU2,
repeat { rw ←(presheaf_on_basis.Hcomp' F') at HresU1' },
repeat { rw ←(presheaf_on_basis.Hcomp' F') at HresU2' },
repeat { rw ←(presheaf_on_basis.Hcomp' F') },
rw [HresU1', HresU2'],
end }
@[simp] lemma has_add.mk : ∀ y z,
(⟦y⟧ + ⟦z⟧ : stalk_of_rings_on_standard_basis Bstd F x) =
(stalk_of_rings_on_standard_basis.add_aux Bstd F x) y z :=
λ y z, rfl
instance add_semigroup : add_semigroup (stalk_of_rings_on_standard_basis Bstd F x) :=
{ add_assoc :=
begin
intros a b c,
refine quotient.induction_on₃ a b c _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩ ⟨W, BW, HxW, sW⟩,
have BUVW := Bstd.2 (Bstd.2 BU BV) BW,
have HUVWsub : U ∩ V ∩ W ⊆ U ∩ (V ∩ W)
:= λ x ⟨⟨HxU, HxV⟩, HxW⟩, ⟨HxU, ⟨HxV, HxW⟩⟩,
apply quotient.sound,
use [U ∩ V ∩ W, BUVW, ⟨⟨HxU, HxV⟩, HxW⟩],
use [set.subset.refl _, HUVWsub],
dsimp,
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { erw ←presheaf_on_basis.Hcomp' },
rw add_assoc,
end,
..stalk_of_rings_on_standard_basis.has_add Bstd F x }
instance add_comm_semigroup : add_comm_semigroup (stalk_of_rings_on_standard_basis Bstd F x) :=
{ add_comm :=
begin
intros a b,
refine quotient.induction_on₂ a b _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩,
apply quotient.sound,
have BUV : U ∩ V ∈ B := Bstd.2 BU BV,
have HUVUV : U ∩ V ⊆ U ∩ V := λ x HxUV, HxUV,
have HUVVU : U ∩ V ⊆ V ∩ U := λ x ⟨HxU, HxV⟩, ⟨HxV, HxU⟩,
use [U ∩ V, BUV, ⟨HxU, HxV⟩, HUVUV, HUVVU],
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
rw add_comm,
end,
..stalk_of_rings_on_standard_basis.add_semigroup Bstd F x }
-- Zero.
protected def zero : stalk_of_rings_on_standard_basis Bstd F x :=
⟦{U := opens.univ, BU := Bstd.1, Hx := trivial, s:= 0}⟧
instance has_zero : has_zero (stalk_of_rings_on_standard_basis Bstd F x) :=
{ zero := stalk_of_rings_on_standard_basis.zero Bstd F x }
instance add_comm_monoid : add_comm_monoid (stalk_of_rings_on_standard_basis Bstd F x) :=
{ zero_add :=
begin
intros a,
refine quotient.induction_on a _,
rintros ⟨U, BU, HxU, sU⟩,
apply quotient.sound,
have HUsub : U ⊆ opens.univ ∩ U := λ x HxU, ⟨trivial, HxU⟩,
use [U, BU, HxU, HUsub, set.subset.refl U],
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
erw (is_ring_hom.map_zero ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
rw zero_add,
refl,
end,
add_zero :=
begin
intros a,
refine quotient.induction_on a _,
rintros ⟨U, BU, HxU, sU⟩,
apply quotient.sound,
have HUsub : U ⊆ U ∩ opens.univ := λ x HxU, ⟨HxU, trivial⟩,
use [U, BU, HxU, HUsub, set.subset.refl U],
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
dsimp,
erw (is_ring_hom.map_zero ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
rw add_zero,
refl,
end,
..stalk_of_rings_on_standard_basis.has_zero Bstd F x,
..stalk_of_rings_on_standard_basis.add_comm_semigroup Bstd F x }
-- Neg.
protected def neg_aux :
stalk_on_basis.elem F.to_presheaf_on_basis x →
stalk_on_basis F.to_presheaf_on_basis x :=
λ s, ⟦{U := s.U, BU := s.BU, Hx := s.Hx, s := -s.s}⟧
instance has_neg : has_neg (stalk_of_rings_on_standard_basis Bstd F x) :=
{ neg := quotient.lift (stalk_of_rings_on_standard_basis.neg_aux Bstd F x) $
begin
intros a b H,
rcases H with ⟨U, ⟨BU, ⟨HxU, ⟨HUaU, HUbU, HresU⟩⟩⟩⟩,
apply quotient.sound,
use [U, BU, HxU, HUaU, HUbU],
repeat { rw @is_ring_hom.map_neg _ _ _ _ _ (F.res_is_ring_hom _ _ _) },
rw HresU,
end }
instance add_comm_group : add_comm_group (stalk_of_rings_on_standard_basis Bstd F x) :=
{ add_left_neg :=
begin
intros a,
refine quotient.induction_on a _,
rintros ⟨U, BU, HxU, sU⟩,
apply quotient.sound,
have HUUU : U ⊆ U ∩ U := λ x HxU, ⟨HxU, HxU⟩,
have HUuniv : U ⊆ opens.univ := λ x HxU, trivial,
use [U, BU, HxU, HUUU, HUuniv],
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
erw (is_ring_hom.map_neg ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
rw add_left_neg,
erw (is_ring_hom.map_zero ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
end,
..stalk_of_rings_on_standard_basis.has_neg Bstd F x,
..stalk_of_rings_on_standard_basis.add_comm_monoid Bstd F x, }
-- Mul.
protected def mul_aux :
stalk_on_basis.elem F.to_presheaf_on_basis x →
stalk_on_basis.elem F.to_presheaf_on_basis x →
stalk_on_basis F.to_presheaf_on_basis x :=
λ s t,
⟦{U := s.U ∩ t.U,
BU := Bstd.2 s.BU t.BU,
Hx := ⟨s.Hx, t.Hx⟩,
s := F.res s.BU _ (set.inter_subset_left _ _) s.s *
F.res t.BU _ (set.inter_subset_right _ _) t.s}⟧
instance has_mul : has_mul (stalk_of_rings_on_standard_basis Bstd F x) :=
{ mul := quotient.lift₂ (stalk_of_rings_on_standard_basis.mul_aux Bstd F x) $
begin
intros a1 a2 b1 b2 H1 H2,
let F' := F.to_presheaf_on_basis,
rcases H1 with ⟨U1, ⟨BU1, ⟨HxU1, ⟨HU1a1U, HU1b1U, HresU1⟩⟩⟩⟩,
rcases H2 with ⟨U2, ⟨BU2, ⟨HxU2, ⟨HU2a2U, HU2b2U, HresU2⟩⟩⟩⟩,
have BU1U2 := Bstd.2 BU1 BU2,
apply quotient.sound,
use [U1 ∩ U2, BU1U2, ⟨HxU1, HxU2⟩],
use [set.inter_subset_inter HU1a1U HU2a2U, set.inter_subset_inter HU1b1U HU2b2U],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
have HresU1' :
(F'.res BU1 BU1U2 (set.inter_subset_left _ _) ((F'.res a1.BU BU1 HU1a1U) (a1.s))) =
(F'.res BU1 BU1U2 (set.inter_subset_left _ _) ((F'.res b1.BU BU1 HU1b1U) (b1.s)))
:= by rw HresU1,
have HresU2' :
(F'.res BU2 BU1U2 (set.inter_subset_right _ _) ((F'.res a2.BU BU2 HU2a2U) (a2.s))) =
(F'.res BU2 BU1U2 (set.inter_subset_right _ _) ((F'.res b2.BU BU2 HU2b2U) (b2.s)))
:= by rw HresU2,
repeat { rw ←(presheaf_on_basis.Hcomp' F') at HresU1' },
repeat { rw ←(presheaf_on_basis.Hcomp' F') at HresU2' },
repeat { rw ←(presheaf_on_basis.Hcomp' F') },
rw [HresU1', HresU2'],
end}
@[simp] lemma has_mul.mk : ∀ y z,
(⟦y⟧ * ⟦z⟧ : stalk_of_rings_on_standard_basis Bstd F x) =
(stalk_of_rings_on_standard_basis.mul_aux Bstd F x) y z :=
λ y z, rfl
instance mul_semigroup : semigroup (stalk_of_rings_on_standard_basis Bstd F x) :=
{ mul_assoc :=
begin
intros a b c,
refine quotient.induction_on₃ a b c _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩ ⟨W, BW, HxW, sW⟩,
have BUVW := Bstd.2 (Bstd.2 BU BV) BW,
have HUVWsub : U ∩ V ∩ W ⊆ U ∩ (V ∩ W)
:= λ x ⟨⟨HxU, HxV⟩, HxW⟩, ⟨HxU, ⟨HxV, HxW⟩⟩,
apply quotient.sound,
use [U ∩ V ∩ W, BUVW, ⟨⟨HxU, HxV⟩, HxW⟩],
use [set.subset.refl _, HUVWsub],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw ←presheaf_on_basis.Hcomp' },
rw mul_assoc,
end,
..stalk_of_rings_on_standard_basis.has_mul Bstd F x }
instance mul_comm_semigroup : comm_semigroup (stalk_of_rings_on_standard_basis Bstd F x) :=
{ mul_comm :=
begin
intros a b,
refine quotient.induction_on₂ a b _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩,
apply quotient.sound,
have BUV : U ∩ V ∈ B := Bstd.2 BU BV,
have HUVUV : U ∩ V ⊆ U ∩ V := λ x HxUV, HxUV,
have HUVVU : U ∩ V ⊆ V ∩ U := λ x ⟨HxU, HxV⟩, ⟨HxV, HxU⟩,
use [U ∩ V, BUV, ⟨HxU, HxV⟩, HUVUV, HUVVU],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw ←presheaf_on_basis.Hcomp' },
rw mul_comm,
end,
..stalk_of_rings_on_standard_basis.mul_semigroup Bstd F x }
-- One.
protected def one : stalk_of_rings_on_standard_basis Bstd F x :=
⟦{U := opens.univ, BU := Bstd.1, Hx := trivial, s:= 1}⟧
instance has_one : has_one (stalk_of_rings_on_standard_basis Bstd F x) :=
{ one := stalk_of_rings_on_standard_basis.one Bstd F x }
instance mul_comm_monoid : comm_monoid (stalk_of_rings_on_standard_basis Bstd F x) :=
{ one_mul :=
begin
intros a,
refine quotient.induction_on a _,
rintros ⟨U, BU, HxU, sU⟩,
apply quotient.sound,
have HUsub : U ⊆ opens.univ ∩ U := λ x HxU, ⟨trivial, HxU⟩,
use [U, BU, HxU, HUsub, set.subset.refl U],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw ←presheaf_on_basis.Hcomp' },
erw (is_ring_hom.map_one ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
rw one_mul,
refl,
end,
mul_one :=
begin
intros a,
refine quotient.induction_on a _,
rintros ⟨U, BU, HxU, sU⟩,
apply quotient.sound,
have HUsub : U ⊆ U ∩ opens.univ := λ x HxU, ⟨HxU, trivial⟩,
use [U, BU, HxU, HUsub, set.subset.refl U],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw ←presheaf_on_basis.Hcomp' },
dsimp,
erw (is_ring_hom.map_one ((F.to_presheaf_on_basis).res _ _ _));
try { apply_instance },
rw mul_one,
refl,
end,
..stalk_of_rings_on_standard_basis.has_one Bstd F x,
..stalk_of_rings_on_standard_basis.mul_comm_semigroup Bstd F x }
-- Stalks of rings on standard basis are rings.
instance comm_ring : comm_ring (stalk_of_rings_on_standard_basis Bstd F x) :=
{ left_distrib :=
begin
intros a b c,
refine quotient.induction_on₃ a b c _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩ ⟨W, BW, HxW, sW⟩,
have BUVW := Bstd.2 (Bstd.2 BU BV) BW,
have HUVWsub : U ∩ V ∩ W ⊆ U ∩ (V ∩ W)
:= λ x ⟨⟨HxU, HxV⟩, HxW⟩, ⟨HxU, ⟨HxV, HxW⟩⟩,
have HUVWsub2 : U ∩ V ∩ W ⊆ U ∩ V ∩ (U ∩ W)
:= λ x ⟨⟨HxU, HxV⟩, HxW⟩, ⟨⟨HxU, HxV⟩, ⟨HxU, HxW⟩⟩,
apply quotient.sound,
use [U ∩ V ∩ W, BUVW, ⟨⟨HxU, HxV⟩, HxW⟩, HUVWsub, HUVWsub2],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
rw mul_add,
end,
right_distrib :=
begin
intros a b c,
refine quotient.induction_on₃ a b c _,
rintros ⟨U, BU, HxU, sU⟩ ⟨V, BV, HxV, sV⟩ ⟨W, BW, HxW, sW⟩,
have BUVW := Bstd.2 (Bstd.2 BU BV) BW,
have HUVWrfl : U ∩ V ∩ W ⊆ U ∩ V ∩ W := λ x Hx, Hx,
have HUVWsub : U ∩ V ∩ W ⊆ U ∩ W ∩ (V ∩ W)
:= λ x ⟨⟨HxU, HxV⟩, HxW⟩, ⟨⟨HxU, HxW⟩, ⟨HxV, HxW⟩⟩,
apply quotient.sound,
use [U ∩ V ∩ W, BUVW, ⟨⟨HxU, HxV⟩, HxW⟩, HUVWrfl, HUVWsub],
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
repeat { rw (F.res_is_ring_hom _ _ _).map_mul },
repeat { rw (F.res_is_ring_hom _ _ _).map_add },
repeat { rw ←presheaf_on_basis.Hcomp' },
rw add_mul,
end,
..stalk_of_rings_on_standard_basis.add_comm_group Bstd F x,
..stalk_of_rings_on_standard_basis.mul_comm_monoid Bstd F x
}
end stalk_of_rings_on_standard_basis_is_ring
end stalk_of_rings_on_standard_basis
| {
"alphanum_fraction": null,
"author": "ramonfmir",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/ramonfmir-lean-scheme/lean-scheme-6d3ec18fecfd174b79d0ce5c85a783f326dd50f6/src/sheaves/stalk_of_rings_on_standard_basis.lean",
"reason": null,
"repo": "lean-scheme",
"save_path": "github-repos/lean/ramonfmir-lean-scheme",
"sha": "6d3ec18fecfd174b79d0ce5c85a783f326dd50f6",
"size": null
} |
#!/usr/bin/env python3
import os
import sys
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from optparse import OptionParser
from matplotlib.ticker import NullFormatter
# Usage: python3 genotypes_hwe_vs_maf.py -o output_file_prefix -e plink_hardy_file -f plink_afreq_file
def help():
print("====== genotypes_missing_vs_mapped_reads.py =====")
print("Plot SNPs hwe P value vs. MAF after genotypeing")
print("-o <output file prefix> the output file prefix")
print("-e <plink2 hardy file> the plink2 hardy file")
print("-f <plink2 afreq file> the plink2 afreq file")
print("Usage: python3 genotypes_hwe_vs_maf.py -o output_file_prefix -e plink_hardy_file -f plink_afreq_file")
sys.exit()
def read_hardy(file):
hwe = pd.read_csv(file, delimiter="\t", dtype=str,usecols=["ID", "P"])
hwe["chr"] = hwe["ID"].apply(lambda x: x.split(':')[0])
hwe["pos"] = hwe["ID"].apply(lambda x: x.split(':')[1])
hwe["pos"] = pd.to_numeric(hwe["pos"])
hwe["pos"] = hwe["pos"]/1e6
hwe["P"] = pd.to_numeric(hwe["P"])
hwe["-log(P)"] = -np.log10(hwe["P"])
hwe = hwe.sort_values(by=["chr", "pos"]).reset_index(drop=True)
return hwe
def read_afreq(file):
afreq = pd.read_csv(file, delimiter="\t", dtype=str, usecols=["ID", "ALT_FREQS"])
afreq["chr"] = afreq["ID"].apply(lambda x: x.split(':')[0])
afreq["pos"] = afreq["ID"].apply(lambda x: x.split(':')[1])
afreq["pos"] = pd.to_numeric(afreq["pos"])
afreq["pos"] = afreq["pos"]/1e6
afreq["ALT_FREQS"] = pd.to_numeric(afreq["ALT_FREQS"])
afreq["MAF"] = afreq["ALT_FREQS"].apply(lambda x: x if x <=0.5 else 1-x)
afreq = afreq.sort_values(by=["chr", "pos"]).reset_index(drop=True)
return afreq
def plot_hwe_vs_maf(hwe_maf, output_file):
nullfmt = NullFormatter()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_hist = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(10, 10))
axHist = plt.axes(rect_hist)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the main plot:
sns.histplot(ax=axHist, data=hwe_maf, bins=100,
x="MAF", y="HWE_-log(P)")
axHist.set(xlabel="Minor Allele Frequency", ylabel="Hardy–Weinberg -log(P)")
# sub plots
sns.histplot(ax=axHistx, data=hwe_maf,
x="MAF", bins=100)
axHistx.set(xlabel="", ylabel="Number of SNPs",
title="SNPs Hardy–Weinberg -log(P) vs Minor Allele Frequency, #: " + str(len(hwe_maf)))
sns.histplot(ax=axHisty, data=hwe_maf, y="HWE_-log(P)", bins=100)
axHisty.set(xlabel="Number of SNPs", ylabel="",title="")
plt.savefig(output_file)
plt.close()
def plot_hwe_vs_maf_poly(poly_hwe_maf, mono_threshold, output_file):
nullfmt = NullFormatter()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_hist = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(10, 10))
axHist = plt.axes(rect_hist)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the main plot:
sns.histplot(ax=axHist, data=poly_hwe_maf, bins=100,
x="MAF", y="HWE_-log(P)")
axHist.set(xlabel="Minor Allele Frequency", ylabel="Hardy–Weinberg -log(P)")
# sub plots
sns.histplot(ax=axHistx, data=poly_hwe_maf,
x="MAF", bins=100)
axHistx.set(xlabel="", ylabel="Number of SNPs",
title="Polymorphic SNPs Hardy–Weinberg -log(P) vs Minor Allele Frequency, #: "
+ str(len(poly_hwe_maf)) + " (MAF > " + str(mono_threshold) + ")")
sns.histplot(ax=axHisty, data=poly_hwe_maf, y="HWE_-log(P)", bins=100)
axHisty.set(xlabel="Number of SNPs", ylabel="",title="")
plt.savefig(output_file)
plt.close()
if __name__=="__main__":
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('-o', type="string", nargs=1, dest="out_file_prefix", help="<output file prefix>")
parser.add_option('-e', type="string", nargs=1, dest="hardy", help="<plink2 hardy file>")
parser.add_option('-f', type="string", nargs=1, dest="afreq", help="<plink2 afreq file")
parser.add_option('-H', action="store_true", dest="help", help="Displays help screen")
options, args = parser.parse_args()
if len(sys.argv) == 1 or options.help != None:
help()
if options.out_file_prefix != None:
output_file_prefix = options.out_file_prefix
else:
raise "Please provide a output file"
if options.hardy != None:
hardy = options.hardy
else:
raise "Please provide a plink2 hardy file"
if options.afreq != None:
afreq = options.afreq
else:
raise "Please provide a plink2 afreq file"
hwe = read_hardy(hardy)
maf = read_afreq(afreq)
hwe_maf = pd.merge(hwe[["chr", "pos", "-log(P)"]],
maf[["chr", "pos", "MAF"]], on=["chr", "pos"], how="left")
hwe_maf = hwe_maf.rename(columns={"-log(P)": "HWE_-log(P)"})
plot_hwe_vs_maf(hwe_maf, output_file_prefix + "SNPs_hwe_vs_maf.png")
mono_threshold = 0.005
plot_hwe_vs_maf_poly(hwe_maf[hwe_maf["MAF"] > mono_threshold].reset_index(drop=True), mono_threshold, output_file_prefix + "poly_SNPs_hwe_vs_maf.png")
| {
"alphanum_fraction": 0.6840962547,
"author": null,
"avg_line_length": 40.0507246377,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "19a884eb2c50cf0a9e0c4c7f11c5646c795a10e7",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "23547ddf27235249690036b1245533239b239c9d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Palmer-Lab-UCSD/HS-Rats-Genotyping-Pipeline",
"max_forks_repo_path": "HPC/quality_control/util/genotypes_hwe_vs_maf.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "23547ddf27235249690036b1245533239b239c9d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Palmer-Lab-UCSD/HS-Rats-Genotyping-Pipeline",
"max_issues_repo_path": "HPC/quality_control/util/genotypes_hwe_vs_maf.py",
"max_line_length": 151,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "23547ddf27235249690036b1245533239b239c9d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Palmer-Lab-UCSD/HS-Rats-Genotyping-Pipeline",
"max_stars_repo_path": "HPC/quality_control/util/genotypes_hwe_vs_maf.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1735,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5527
} |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def generate_colours(df, column, cmap_name):
# TODO: they get generated a little different than what pandas does automatically
labels = np.sort(df[column].unique())
cmap = plt.get_cmap(cmap_name)
colours = cmap(np.linspace(0,1,len(labels)+1))
colour_dict = dict(zip(labels,colours))
return colour_dict
def populate_plot_options(kind, alpha, cmap_name, df=pd.DataFrame(),
index=False, legend=False, stacked=True):
plot_options = dict()
plot_options['kind'] = kind
plot_options['alpha'] = alpha
if not df.empty:
colour_dict = generate_colours(df, legend, cmap_name)
label = df.loc[index,legend]
plot_options['c'] = colour_dict[label]
plot_options['label'] = str(label)
else:
plot_options['colormap'] = cmap_name
if kind == 'line':
plot_options['linewidth'] = 2
# plot_options['marker'] = '.'
# plot_options['markersize'] = 12
# TODO: move default marker size to MatplotlibSettings.py
elif kind == 'scatter':
plot_options['edgecolors'] = 'face'
plot_options['s'] = 12
elif 'bar' in kind:
plot_options['stacked'] = stacked
plot_options['edgecolor'] = 'none'
return plot_options | {
"alphanum_fraction": 0.6409495549,
"author": null,
"avg_line_length": 31.3488372093,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8f517e54dcbe1939d84f92545cb1b2edc806dcad",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6f215e59903395b2fccfd54f0d083cee563d39e0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "anaandresarroyo/GarminDataAnalyserOld",
"max_forks_repo_path": "database/plot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6f215e59903395b2fccfd54f0d083cee563d39e0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "anaandresarroyo/GarminDataAnalyserOld",
"max_issues_repo_path": "database/plot.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6f215e59903395b2fccfd54f0d083cee563d39e0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "anaandresarroyo/GarminDataAnalyserOld",
"max_stars_repo_path": "database/plot.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 322,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1348
} |
"""Power spectrum plotting functions.
Notes
-----
This file contains functions for plotting power spectra, that take in data directly.
"""
from inspect import isfunction
from itertools import repeat, cycle
import numpy as np
from scipy.stats import sem
from fooof.core.modutils import safe_import, check_dependency
from fooof.plts.settings import PLT_FIGSIZES
from fooof.plts.style import style_spectrum_plot, style_plot
from fooof.plts.utils import check_ax, add_shades, savefig, check_plot_kwargs
plt = safe_import('.pyplot', 'matplotlib')
###################################################################################################
###################################################################################################
@savefig
@style_plot
@check_dependency(plt, 'matplotlib')
def plot_spectra(freqs, power_spectra, log_freqs=False, log_powers=False,
colors=None, labels=None, ax=None, **plot_kwargs):
"""Plot one or multiple power spectra.
Parameters
----------
freqs : 1d or 2d array or list of 1d array
Frequency values, to be plotted on the x-axis.
power_spectra : 1d or 2d array or list of 1d array
Power values, to be plotted on the y-axis.
log_freqs : bool, optional, default: False
Whether to plot the frequency axis in log spacing.
log_powers : bool, optional, default: False
Whether to plot the power axis in log spacing.
colors : list of str, optional, default: None
Line colors of the spectra.
labels : list of str, optional, default: None
Legend labels for the spectra.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**plot_kwargs
Keyword arguments to pass into the ``style_plot``.
"""
ax = check_ax(ax, plot_kwargs.pop('figsize', PLT_FIGSIZES['spectral']))
# Create the plot
plot_kwargs = check_plot_kwargs(plot_kwargs, {'linewidth' : 2.0})
# Make inputs iterable if need to be passed multiple times to plot each spectrum
plt_powers = np.reshape(power_spectra, (1, -1)) if np.ndim(power_spectra) == 1 else \
power_spectra
plt_freqs = repeat(freqs) if isinstance(freqs, np.ndarray) and freqs.ndim == 1 else freqs
# Set labels
labels = plot_kwargs.pop('label') if 'label' in plot_kwargs.keys() and labels is None else labels
labels = repeat(labels) if not isinstance(labels, list) else cycle(labels)
colors = repeat(colors) if not isinstance(colors, list) else cycle(colors)
# Plot
for freqs, powers, color, label in zip(plt_freqs, plt_powers, colors, labels):
# Set plot data, logging if requested, and collect color, if absent
freqs = np.log10(freqs) if log_freqs else freqs
powers = np.log10(powers) if log_powers else powers
if color:
plot_kwargs['color'] = color
ax.plot(freqs, powers, label=label, **plot_kwargs)
style_spectrum_plot(ax, log_freqs, log_powers)
@savefig
@check_dependency(plt, 'matplotlib')
def plot_spectra_shading(freqs, power_spectra, shades, shade_colors='r',
add_center=False, ax=None, **plot_kwargs):
"""Plot one or multiple power spectra with a shaded frequency region (or regions).
Parameters
----------
freqs : 1d or 2d array or list of 1d array
Frequency values, to be plotted on the x-axis.
power_spectra : 1d or 2d array or list of 1d array
Power values, to be plotted on the y-axis.
shades : list of [float, float] or list of list of [float, float]
Shaded region(s) to add to plot, defined as [lower_bound, upper_bound].
shade_colors : str or list of string
Color(s) to plot shades.
add_center : bool, optional, default: False
Whether to add a line at the center point of the shaded regions.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**plot_kwargs
Keyword arguments to pass into :func:`~.plot_spectra`.
Notes
-----
Parameters for `plot_spectra` can also be passed into this function as keyword arguments.
This includes `log_freqs`, `log_powers` & `labels`. See `plot_spectra` for usage details.
"""
ax = check_ax(ax, plot_kwargs.pop('figsize', PLT_FIGSIZES['spectral']))
plot_spectra(freqs, power_spectra, ax=ax, **plot_kwargs)
add_shades(ax, shades, shade_colors, add_center, plot_kwargs.get('log_freqs', False))
style_spectrum_plot(ax, plot_kwargs.get('log_freqs', False),
plot_kwargs.get('log_powers', False))
@savefig
@style_plot
@check_dependency(plt, 'matplotlib')
def plot_spectra_yshade(freqs, power_spectra, shade='std', average='mean', scale=1,
log_freqs=False, log_powers=False, color=None, label=None,
ax=None, **plot_kwargs):
"""Plot standard deviation or error as a shaded region around the mean spectrum.
Parameters
----------
freqs : 1d array
Frequency values, to be plotted on the x-axis.
power_spectra : 1d or 2d array
Power values, to be plotted on the y-axis. ``shade`` must be provided if 1d.
shade : 'std', 'sem', 1d array or callable, optional, default: 'std'
Approach for shading above/below the mean spectrum.
average : 'mean', 'median' or callable, optional, default: 'mean'
Averaging approach for the average spectrum to plot. Only used if power_spectra is 2d.
scale : int, optional, default: 1
Factor to multiply the plotted shade by.
log_freqs : bool, optional, default: False
Whether to plot the frequency axis in log spacing.
log_powers : bool, optional, default: False
Whether to plot the power axis in log spacing.
color : str, optional, default: None
Line color of the spectrum.
label : str, optional, default: None
Legend label for the spectrum.
ax : matplotlib.Axes, optional
Figure axes upon which to plot.
**plot_kwargs
Keyword arguments to be passed to `plot_spectra` or to the plot call.
"""
if (isinstance(shade, str) or isfunction(shade)) and power_spectra.ndim != 2:
raise ValueError('Power spectra must be 2d if shade is not given.')
ax = check_ax(ax, plot_kwargs.pop('figsize', PLT_FIGSIZES['spectral']))
# Set plot data & labels, logging if requested
plt_freqs = np.log10(freqs) if log_freqs else freqs
plt_powers = np.log10(power_spectra) if log_powers else power_spectra
# Organize mean spectrum to plot
avg_funcs = {'mean' : np.mean, 'median' : np.median}
if isinstance(average, str) and plt_powers.ndim == 2:
avg_powers = avg_funcs[average](plt_powers, axis=0)
elif isfunction(average) and plt_powers.ndim == 2:
avg_powers = average(plt_powers)
else:
avg_powers = plt_powers
# Plot average power spectrum
ax.plot(plt_freqs, avg_powers, linewidth=2.0, color=color, label=label)
# Organize shading to plot
shade_funcs = {'std' : np.std, 'sem' : sem}
if isinstance(shade, str):
shade_vals = scale * shade_funcs[shade](plt_powers, axis=0)
elif isfunction(shade):
shade_vals = scale * shade(plt_powers)
else:
shade_vals = scale * shade
upper_shade = avg_powers + shade_vals
lower_shade = avg_powers - shade_vals
# Plot +/- yshading around spectrum
alpha = plot_kwargs.pop('alpha', 0.25)
ax.fill_between(plt_freqs, lower_shade, upper_shade,
alpha=alpha, color=color, **plot_kwargs)
style_spectrum_plot(ax, log_freqs, log_powers)
| {
"alphanum_fraction": 0.6618884797,
"author": null,
"avg_line_length": 38.9948717949,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c68acc69832badc8df555ef98e487364da9beb77",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 42,
"max_forks_repo_forks_event_max_datetime": "2022-02-19T00:00:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-01-31T17:24:58.000Z",
"max_forks_repo_head_hexsha": "f7b4ed8d074a3eaa03707c2c3c09413e6ff74192",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "gazfaris/fooof",
"max_forks_repo_path": "fooof/plts/spectra.py",
"max_issues_count": 128,
"max_issues_repo_head_hexsha": "f7b4ed8d074a3eaa03707c2c3c09413e6ff74192",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T22:09:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-11-03T16:07:05.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "gazfaris/fooof",
"max_issues_repo_path": "fooof/plts/spectra.py",
"max_line_length": 101,
"max_stars_count": 154,
"max_stars_repo_head_hexsha": "f7b4ed8d074a3eaa03707c2c3c09413e6ff74192",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dieloz/fooof",
"max_stars_repo_path": "fooof/plts/spectra.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T12:02:33.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-05T16:50:21.000Z",
"num_tokens": 1910,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7604
} |
""" Train a VAE model used to filter and enhance 3d points """
import json
from datetime import datetime
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import cameras
import data_utils
import viz
from top_vae_3d_pose import data_handler, losses, models
from top_vae_3d_pose.args_def import ENVIRON as ENV
matplotlib.use('Agg')
# matplotlib.use('TkAgg')
def to_world(points_3d, key2d, root_pos):
""" Trasform coordenates from camera to world coordenates """
_, _, rcams = data_handler.get_data_params()
n_cams = 4
n_joints_h36m = 32
# Add global position back
points_3d = points_3d + np.tile(root_pos, [1, n_joints_h36m])
# Load the appropriate camera
key3d = data_handler.get_key3d(key2d[:3])
subj, _, sname = key3d
subj = int(subj)
cname = sname.split('.')[1] # <-- camera name
scams = {(subj, c+1): rcams[(subj, c+1)] for c in range(n_cams)} # cams of this subject
scam_idx = [scams[(subj, c+1)][-1] for c in range(n_cams)].index(cname) # index of camera used
the_cam = scams[(subj, scam_idx+1)] # <-- the camera used
R, T, f, c, k, p, name = the_cam
assert name == cname
def cam2world_centered(data_3d_camframe):
data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
data_3d_worldframe = data_3d_worldframe.reshape((-1, n_joints_h36m*3))
# subtract root translation
return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3], (1, n_joints_h36m))
# Apply inverse rotation and translation
return cam2world_centered(points_3d)
def gen_sample_img(dataset, model=None, idx=None):
""" Plot 3d poses, real, with noise and decode from vae model if a model is provided
pass 'idx' to select samples otherwise idx will be randomly generated
"""
# select random samples
nsamples = 10
if idx is None:
idx = np.random.choice(dataset.x_data.shape[0], nsamples, replace=False)
keys2d = dataset.mapkeys[idx, :]
img_frames = data_handler.load_frames_from_keys(keys2d, efficientnet_preprocess=True)
img_frames2 = data_handler.load_frames_from_keys(keys2d, efficientnet_preprocess=False)
points_2d = dataset.x_data[idx, :]
points_3d = dataset.y_data[idx, :]
out_3d, out_3d_vae = model(points_2d, frame_inputs=img_frames, training=False)
# unnormalioze data
points_2d = data_utils.unNormalizeData(points_2d,
dataset.x_metadata.mean,
dataset.x_metadata.std,
dataset.x_metadata.dim_ignored)
points_3d = data_utils.unNormalizeData(points_3d,
dataset.y_metadata.mean,
dataset.y_metadata.std,
dataset.y_metadata.dim_ignored)
out_3d = data_utils.unNormalizeData(out_3d,
dataset.y_metadata.mean,
dataset.y_metadata.std,
dataset.y_metadata.dim_ignored)
out_3d_vae = data_utils.unNormalizeData(out_3d_vae,
dataset.y_metadata.mean,
dataset.y_metadata.std,
dataset.y_metadata.dim_ignored)
if ENV.FLAGS.camera_frame:
root_pos = dataset.y_metadata.root_positions[idx, :]
points_3d = np.array([to_world(p3d.reshape((1, -1)),
keys2d[i],
root_pos[i].reshape((1, 3)))[0]
for i, p3d in enumerate(points_3d)])
out_3d = np.array([to_world(p3d.reshape((1, -1)), keys2d[i],
root_pos[i].reshape((1, 3)))[0]
for i, p3d in enumerate(out_3d)])
out_3d_vae = np.array([to_world(p3d.reshape((1, -1)), keys2d[i],
root_pos[i].reshape((1, 3)))[0]
for i, p3d in enumerate(out_3d_vae)])
# 1080p = 1,920 x 1,080
for imgi in np.arange(nsamples):
subplot_idx, exidx = 1, 0
fig = plt.figure(figsize=(19.2, 5.4))
nfigs = 3
gs1 = gridspec.GridSpec(1, nfigs) # 5 rows, 9 columns
gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
plt.axis('off')
ax5 = plt.subplot(gs1[subplot_idx-1])
plt.imshow(img_frames2[imgi])
ax5.axis('off')
ax5.title.set_text('Imagen')
ax5.title.set_size(24)
# Plot 2d pose
ax1 = plt.subplot(gs1[subplot_idx])
p2d = points_2d[imgi, :]
viz.show2Dpose(p2d, ax1)
ax1.invert_yaxis()
ax1.title.set_text('Predicción Pose 2D')
ax1.title.set_size(24)
ax1.axis('off')
# Plot 3d predictions
ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
p3d = out_3d[imgi, :]
viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
ax3.title.set_text('Predicción Pose 3D')
ax3.title.set_size(24)
# # Plot 3d predictions + vae
# ax4 = plt.subplot(gs1[subplot_idx+2], projection='3d')
# p3d = out_3d_vae[exidx, :]
# viz.show3Dpose(p3d, ax4, lcolor="#9b59b6", rcolor="#2ecc71")
# ax4.title.set_text('Predicción Pose 3D')
# # Plot 3d gt
# ax2 = plt.subplot(gs1[subplot_idx+2], projection='3d')
# p3d = points_3d[exidx, :]
# viz.show3Dpose(p3d, ax2)
file_name = "imgs/3d_effnet_vae/test/test_%d.png" % imgi
plt.savefig(file_name)
print("Saved samples on: %s" % file_name)
# plt.show()
plt.close()
subplot_idx, exidx = 1, 0
fig = plt.figure(figsize=(19.2, 5.4))
nfigs = 3
gs1 = gridspec.GridSpec(1, nfigs+1) # 5 rows, 9 columns
gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
plt.axis('off')
ax5 = plt.subplot(gs1[subplot_idx-1])
plt.imshow(img_frames2[imgi])
ax5.axis('off')
ax5.title.set_text('Imagen')
ax5.title.set_size(24)
ax5.title.set_position([.5, 1.061])
# Plot 2d pose
ax1 = plt.subplot(gs1[subplot_idx])
p2d = points_2d[imgi, :]
viz.show2Dpose(p2d, ax1)
ax1.invert_yaxis()
ax1.title.set_text('Predicción Pose 2D')
ax1.title.set_size(24)
ax1.axis('off')
# Plot 3d predictions
ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
p3d = out_3d[imgi, :]
viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
ax3.title.set_text('Predicción Pose 3D')
ax3.title.set_size(24)
# Plot 3d gt
ax2 = plt.subplot(gs1[subplot_idx+2], projection='3d')
p3d = points_3d[imgi, :]
viz.show3Dpose(p3d, ax2)
ax2.title.set_text('GT')
ax2.title.set_size(24)
file_name = "imgs/3d_effnet_vae/test/test_gt%d.png" % imgi
plt.savefig(file_name)
print("Saved samples on: %s" % file_name)
# plt.show()
plt.close()
def train():
""" Train function """
data_train, data_test = data_handler.load_2d_3d_data(key2d_with_frame=True)
model = models.Pose3DVae(latent_dim=ENV.FLAGS.latent_dim,
enc_dim=ENV.FLAGS.enc_dim,
dec_dim=ENV.FLAGS.dec_dim,
efficient_net=0)
# Dummy input for creation for bach normalization weigths
ainput = np.ones((10, 32), dtype=np.float32)
model.pose3d(ainput, training=False)
# Load weights for 2d to 3d prediction
# model.load_weights('./experiments/3d_effnet_vae/last_model_weights')
# optimizer = get_optimizer()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), net=model)
manager = tf.train.CheckpointManager(ckpt, './experiments/3d_effnet_vae/tf_ckpts', max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
# Indexes for sampling
idx = np.random.choice(data_test.x_data.shape[0], 15, replace=False)
gen_sample_img(data_test, model=model, idx=idx)
def main():
""" Main """
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
with tf.device('/device:GPU:%d' % ENV.FLAGS.gpu_device):
train()
if __name__ == "__main__":
ENV.setup()
main()
| {
"alphanum_fraction": 0.5923769772,
"author": null,
"avg_line_length": 34.8812260536,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8501f01e66c97f9651f364dccf505f8d4342a6a2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2f521fe3008ddee81b666550606f7405efd2f547",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EsauPR/3d-pose-baseline",
"max_forks_repo_path": "src/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2f521fe3008ddee81b666550606f7405efd2f547",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EsauPR/3d-pose-baseline",
"max_issues_repo_path": "src/test.py",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2f521fe3008ddee81b666550606f7405efd2f547",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EsauPR/3d-pose-baseline",
"max_stars_repo_path": "src/test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2473,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9104
} |
# -*- coding:utf-8 -*-
"""
-------------------------------------------------------------------------------
Project Name : ESEP
File Name : interpolate.py
Start Date : 2022-03-25 05:26
Contributor : D.CW
Email : dengchuangwu@gmail.com
-------------------------------------------------------------------------------
Introduction:
-------------------------------------------------------------------------------
"""
import numpy as np
from scipy.interpolate import griddata
def station2grid(data, lon, lat, loc_range, det_grid=0.1, method='linear'):
"""Interpolation of station data to equally spaced latitude and longitude grids
Args:
data: The data from station
lon: The longitude of station
lat: The latitude of station
loc_range: The range of grids used for interpolation, (lat_min,lat_max,lon_min,lon_max)
det_grid: The spacing of interpolation grids
method: The method of Interpolation
Returns:
tuple: The longitude, latitude and data after interpolation
"""
# step1: 先将 lon,lat,data转换成 n*1 的array数组
lon = np.array(lon).reshape(-1, 1)
lat = np.array(lat).reshape(-1, 1)
data = np.array(data).reshape(-1, 1)
points = np.concatenate([lon, lat], axis=1)
# step2:确定插值区域的经纬度网格
lon_min, lon_max, lat_min, lat_max = loc_range
lon_grid, lat_grid = np.meshgrid(np.arange(lon_min, lon_max + det_grid, det_grid),
np.arange(lat_min, lat_max + det_grid, det_grid))
# step3:进行网格插值
grid_data = griddata(points, data, (lon_grid, lat_grid), method=method)[:, :, 0]
return lon_grid, lat_grid, grid_data
| {
"alphanum_fraction": 0.5130861505,
"author": null,
"avg_line_length": 32.1754385965,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1b6217ef04555a850deabec5af5d7c5406f27e81",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f951f404515f961126717e4395e3d8364e20b274",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lyingTree/ESEP",
"max_forks_repo_path": "esep/utils/interpolate.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f951f404515f961126717e4395e3d8364e20b274",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lyingTree/ESEP",
"max_issues_repo_path": "esep/utils/interpolate.py",
"max_line_length": 95,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f951f404515f961126717e4395e3d8364e20b274",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lyingTree/ESEP",
"max_stars_repo_path": "esep/utils/interpolate.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 406,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1834
} |
# coding: utf-8
"""Interatomic potential dataset for property of HDNNP. """
import numpy as np
from hdnnpy.dataset.property.property_dataset_base import PropertyDatasetBase
class InteratomicPotentialDataset(PropertyDatasetBase):
"""Interatomic potential dataset for property of HDNNP. """
PROPERTIES = ['energy', 'force', 'harmonic', 'third_order']
"""list [str]: Names of properties for each derivative order."""
COEFFICIENTS = [1.0, -1.0, 1.0, 1.0]
"""list [float]: Coefficient values of each properties."""
UNITS = ['eV/atom', 'eV/$\\AA$', 'eV/$\\AA$^2', 'eV/$\\AA$^3']
"""list [str]: Units of properties for each derivative order."""
name = 'interatomic_potential'
"""str: Name of this property class."""
n_property = 1
"""int: Number of dimensions of 0th property."""
def __init__(self, order, structures):
"""
It accepts 0 or 3 for ``order``.
Notes:
Currently you cannot use order = 2 or 3, since it is not
implemented.
Args:
order (int): passed to super class.
structures (list [AtomicStructure]): passed to super class.
"""
assert 0 <= order <= 3
super().__init__(order, structures)
def calculate_properties(self, structure):
"""Calculate required properties for a structure data.
Args:
structure (AtomicStructure):
A structure data to calculate properties.
Returns:
list [~numpy.ndarray]: Calculated properties.
The length is the same as ``order`` given at initialization.
"""
n_deriv = len(structure) * 3
dataset = []
if self._order >= 0:
energy = (self._calculate_energy(structure)
.astype(np.float32)
.reshape(self.n_property))
dataset.append(energy)
if self._order >= 1:
force = (self._calculate_force(structure)
.astype(np.float32)
.reshape(self.n_property, n_deriv))
dataset.append(force)
if self._order >= 2:
harmonic = (self._calculate_harmonic(structure)
.astype(np.float32)
.reshape(self.n_property, n_deriv, n_deriv))
dataset.append(harmonic)
if self._order >= 3:
third_order = (self._calculate_third_order(structure)
.astype(np.float32)
.reshape(self.n_property, n_deriv,
n_deriv, n_deriv))
dataset.append(third_order)
return dataset
@staticmethod
def _calculate_energy(structure):
"""Calculate atomic energy."""
return structure.get_potential_energy() / len(structure)
@staticmethod
def _calculate_force(structure):
"""Calculate interatomic forces."""
return structure.get_forces()
@staticmethod
def _calculate_harmonic(structure):
"""Calculate 2nd-order harmonic force constant."""
raise NotImplementedError
@staticmethod
def _calculate_third_order(structure):
"""Calculate 3rd-order anharmonic force constant."""
raise NotImplementedError
| {
"alphanum_fraction": 0.5902735562,
"author": null,
"avg_line_length": 35.376344086,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b607148f6201bed9d7bfbf4cf3c3a059670e1e38",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2021-12-01T17:37:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-08T03:52:21.000Z",
"max_forks_repo_head_hexsha": "aa7250219f8bcffdf48a2390f1fef9c89f642e8a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ogura-edu/HDNNP",
"max_forks_repo_path": "hdnnpy/dataset/property/interatomic_potential_dataset.py",
"max_issues_count": 137,
"max_issues_repo_head_hexsha": "aa7250219f8bcffdf48a2390f1fef9c89f642e8a",
"max_issues_repo_issues_event_max_datetime": "2019-03-18T08:38:02.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-07-21T00:59:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "s-okugawa/HDNNP",
"max_issues_repo_path": "hdnnpy/dataset/property/interatomic_potential_dataset.py",
"max_line_length": 77,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "aa7250219f8bcffdf48a2390f1fef9c89f642e8a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "s-okugawa/HDNNP",
"max_stars_repo_path": "hdnnpy/dataset/property/interatomic_potential_dataset.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-02T03:33:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-31T12:52:33.000Z",
"num_tokens": 705,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3290
} |
# Copyright (c) 2020 PHYTEC Messtechnik GmbH
# SPDX-License-Identifier: Apache-2.0
import os
import time
import cv2
import tflite_runtime.interpreter as tflite
import numpy as np
import json
import concurrent.futures
class Ai:
def __init__(self, model_path, embeddings_path, modeltype='quant'):
self.model_path = model_path
self.embeddings_path = embeddings_path
self.modeltype = modeltype
self.width = 224
self.height = 224
def initialize(self):
start = time.time()
self.init_tflite()
print('Create Embeddigns')
with open(self.embeddings_path, 'r') as f:
embeddings_data = json.load(f)
data = embeddings_data['Embedding']
self.embeddings = [np.array(data[str(i)]) for i in range(len(data))]
data = embeddings_data['Name']
self.names = [np.array(data[str(i)]) for i in range(len(data))]
data = embeddings_data['File']
self.files = [np.array(data[str(i)]) for i in range(len(data))]
self.celeb_embeddings = self.split_data_frame(
self.embeddings,
int(np.ceil(len(self.embeddings)/4)))
print('Initialization done (duration: {})'.format(time.time() - start))
def run_inference(self, face):
#Resize face
print('Resize face')
if face.shape > (self.width, self.height):
face = cv2.resize(face, (self.width, self.height),
interpolation=cv2.INTER_AREA)
elif face.shape < (self.width, self.height):
face = cv2.resize(face, (self.width, self.height),
interpolation=cv2.INTER_CUBIC)
print('Preprocess')
if self.modeltype is 'quant':
face = face.astype('float32')
samples = np.expand_dims(face, axis=0)
samples = self.preprocess_input(samples,
data_format='channels_last',
version=3).astype('int8')
else:
face = face.astype('float32')
samples = np.expand_dims(face, axis=0)
samples = self.preprocess_input(samples,
data_format='channels_last',
version=2)
output_data = self.run_tflite(samples)
print('Create EUdist')
start = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
result_1 = executor.submit(self.faceembedding, output_data,
np.array(self.celeb_embeddings[0]))
result_2 = executor.submit(self.faceembedding, output_data,
np.array(self.celeb_embeddings[1]))
result_3 = executor.submit(self.faceembedding, output_data,
np.array(self.celeb_embeddings[2]))
result_4 = executor.submit(self.faceembedding, output_data,
np.array(self.celeb_embeddings[3]))
EUdist = []
if result_1.done() & result_2.done() & result_3.done() & result_4.done():
EUdist.extend(result_1.result())
EUdist.extend(result_2.result())
EUdist.extend(result_3.result())
EUdist.extend(result_4.result())
idx = np.argpartition(EUdist, 5)
idx = idx[:5]
top5 = dict()
for id in idx:
top5[id] = [EUdist[id], self.names[id], self.files[id]]
top5 = {key: value for key, value in sorted(top5.items(), key=lambda item: item[1][0])}
print('EUdist duration: {}'.format(time.time() - start))
return top5
def init_tflite(self):
os.environ['VIV_VX_CACHE_BINARY_GRAPH_DIR'] = os.getcwd()
os.environ['VIV_VX_ENABLE_CACHE_GRAPH_BINARY'] = '1'
try:
self.interpreter = tflite.Interpreter(self.model_path)
except ValueError as e:
print('Failed to find model file: ' + str(e))
return
print('Allocate Tensors')
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
def run_tflite(self, samples):
print('Invoke TFlite')
start = time.time()
self.interpreter.set_tensor(self.input_details[0]['index'], samples)
self.interpreter.invoke()
output_data = self.interpreter.get_tensor(
self.output_details[0]['index'])
print('Interpreter done ({})'.format(time.time() - start))
return output_data
def split_data_frame(self, df, chunk_size):
list_of_df = list()
number_chunks = len(df) // chunk_size + 1
for i in range(number_chunks):
list_of_df.append(df[i*chunk_size:(i+1)*chunk_size])
return list_of_df
def preprocess_input(self, x, data_format, version):
x_temp = np.copy(x)
assert data_format in {'channels_last', 'channels_first'}
if version == 1:
if data_format == 'channels_first':
x_temp = x_temp[:, ::-1, ...]
x_temp[:, 0, :, :] -= 93.5940
x_temp[:, 1, :, :] -= 104.7624
x_temp[:, 2, :, :] -= 129.1863
else:
x_temp = x_temp[..., ::-1]
x_temp[..., 0] -= 93.5940
x_temp[..., 1] -= 104.7624
x_temp[..., 2] -= 129.1863
elif version == 2:
if data_format == 'channels_first':
x_temp = x_temp[:, ::-1, ...]
x_temp[:, 0, :, :] -= 91.4953
x_temp[:, 1, :, :] -= 103.8827
x_temp[:, 2, :, :] -= 131.0912
else:
x_temp = x_temp[..., ::-1]
x_temp[..., 0] -= 91.4953
x_temp[..., 1] -= 103.8827
x_temp[..., 2] -= 131.0912
elif version == 3:
if data_format == 'channels_first':
x_temp = x_temp[:, ::-1, ...]
x_temp[:, 0, :, :] -= np.round(91.4953).astype('uint8')
x_temp[:, 1, :, :] -= np.round(103.8827).astype('uint8')
x_temp[:, 2, :, :] -= np.round(131.0912).astype('uint8')
else:
x_temp = x_temp[..., ::-1]
x_temp[..., 0] -= np.round(91.4953).astype('uint8')
x_temp[..., 1] -= np.round(103.8827).astype('uint8')
x_temp[..., 2] -= np.round(131.0912).astype('uint8')
else:
raise NotImplementedError
return x_temp
def faceembedding(self, face, celebdata):
dist = []
for i in range(len(celebdata)):
celebs = np.array(celebdata[i])
dist.append(np.linalg.norm(face - celebs))
return dist
| {
"alphanum_fraction": 0.5294201861,
"author": null,
"avg_line_length": 36.9576719577,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "07336979b13bbc7af68942dae0d4e3df6a45051d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7687234cafabdd82b5fe4f3212842aaacfd88a81",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "phytec/aidemo-facematch",
"max_forks_repo_path": "ai.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7687234cafabdd82b5fe4f3212842aaacfd88a81",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "phytec/aidemo-facematch",
"max_issues_repo_path": "ai.py",
"max_line_length": 95,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7687234cafabdd82b5fe4f3212842aaacfd88a81",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "phytec/aidemo-facematch",
"max_stars_repo_path": "ai.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-20T06:49:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-20T06:49:40.000Z",
"num_tokens": 1614,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6985
} |
[STATEMENT]
lemma Mertens_convergent: "convergent (\<lambda>n::nat. \<MM> n - ln n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
obtain c where c: "summable (\<lambda>n. (\<MM> n - ln n + c) / n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>c. summable (\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (blast intro: mertens_summable)
[PROOF STATE]
proof (state)
this:
summable (\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x)
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
summable (\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x)
[PROOF STEP]
obtain l where l: "(\<lambda>n. (\<MM> n - ln n + c) / n) sums l"
[PROOF STATE]
proof (prove)
using this:
summable (\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x)
goal (1 subgoal):
1. (\<And>l. (\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x) sums l \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: summable_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. (\<MM> (real x) - ln (real x) + c) / real x) sums l
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
have *: "(\<lambda>n. \<MM> n - ln n + c) \<longlonglongrightarrow> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. \<MM> (real x) - ln (real x) + c) \<longlonglongrightarrow> 0
[PROOF STEP]
by (rule sum_goestozero_theorem[OF c]) auto
[PROOF STATE]
proof (state)
this:
(\<lambda>x. \<MM> (real x) - ln (real x) + c) \<longlonglongrightarrow> 0
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
hence "(\<lambda>n. \<MM> n - ln n) \<longlonglongrightarrow> -c"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. \<MM> (real x) - ln (real x) + c) \<longlonglongrightarrow> 0
goal (1 subgoal):
1. (\<lambda>x. \<MM> (real x) - ln (real x)) \<longlonglongrightarrow> - c
[PROOF STEP]
by (simp add: tendsto_iff dist_norm)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. \<MM> (real x) - ln (real x)) \<longlonglongrightarrow> - c
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. \<MM> (real x) - ln (real x)) \<longlonglongrightarrow> - c
goal (1 subgoal):
1. convergent (\<lambda>x. \<MM> (real x) - ln (real x))
[PROOF STEP]
by (rule convergentI)
[PROOF STATE]
proof (state)
this:
convergent (\<lambda>x. \<MM> (real x) - ln (real x))
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Prime_Number_Theorem_Prime_Number_Theorem",
"hexsha": null,
"include": null,
"lang": null,
"length": 13,
"llama_tokens": 1216,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
from __future__ import absolute_import
from __future__ import division
from tiny_faces.tiny_fd import TinyFacesDetector
import sys
import cv2
import os
import os.path as ops
import numpy as np
import json
import argparse
from manage_data.get_density_map import interpolate_scale
from manage_data.utils import mkdir_if_missing
parser = argparse.ArgumentParser(description='Tiny face detection for crowd counting')
# Datasets
parser.add_argument('-d', '--dataset', type=str, default='ucf-cc-50', help="dataset to create density masks")
parser.add_argument('-r', '--root-dir', type=str, default='/workspace/quispe/', help="root directory for datasets")
parser.add_argument('--save-plots', action='store_true', help="save plots of detected and interpolated tiny faces")
args = parser.parse_args()
detector = TinyFacesDetector(model_root='./tiny_faces/', prob_thresh=0.5, gpu_idx=0)
def UCF_CC_50():
base_dir = ops.join(args.root_dir, "ucf_cc_50/UCF_CC_50")
print("working over '{}'".format(base_dir))
img_dir_path = os.path.join(base_dir, "images")
den_dir_path = os.path.join(base_dir, "density_maps")
est_dir_path = os.path.join(base_dir, "faces")
lab_dir_path = os.path.join(base_dir, "labels")
return img_dir_path, den_dir_path, est_dir_path, lab_dir_path
def shanghai_tech(part, mode):
base_dir = ops.join(args.root_dir, "ShanghaiTech/" + part + "/" + mode)
print("working over '{}'".format(base_dir))
img_dir_path = os.path.join(base_dir, "images")
den_dir_path = os.path.join(base_dir, "density_maps")
est_dir_path = os.path.join(base_dir, "faces")
lab_dir_path = os.path.join(base_dir, "labels")
return img_dir_path, den_dir_path, est_dir_path, lab_dir_path
def run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path):
mkdir_if_missing(est_dir_path)
file_names = os.listdir(img_dir_path)
file_names.sort()
mae, mse = 0, 0
for file_name in file_names:
file_extention = file_name.split('.')[-1]
file_id = file_name[:len(file_name) - len(file_extention)]
file_path = os.path.join(img_dir_path, file_name)
density_map_path = os.path.join(den_dir_path, file_id + 'npy')
label_path = os.path.join(lab_dir_path, file_id + 'json')
img = cv2.imread(file_path)
boxes = detector.detect(img)
save_path = os.path.join(est_dir_path, file_id + 'npy')
np.save(save_path, boxes)
gt = np.load(density_map_path)
gt_count = np.sum(gt)
et_count = boxes.shape[0]
mae += abs(gt_count-et_count)
mse += ((gt_count-et_count)*(gt_count-et_count))
if args.save_plots:
for r in boxes:
cv2.rectangle(img, (r[0],r[1]), (r[2],r[3]), (255,255,0), 1)
points = [[p['y'], p['x']] for p in json.load(open(label_path))]
_, bb_sizes = interpolate_scale(img.shape, points, boxes)
for p, r in zip(points, bb_sizes):
cv2.rectangle(img, (r[0],r[1]), (r[2],r[3]), (255,0, 255), 1)
cv2.circle(img, (int(p[1]), int(p[0])), 1, (255,0, 255), 2)
#img = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
print('Image {}: gt count {:.1f}, est count {}'.format(file_id, gt_count, et_count))
if not os.path.exists(est_dir_path):
os.mkdir(est_dir_path)
save_path = os.path.join(est_dir_path, file_id + 'jpg')
print (save_path)
cv2.imwrite(save_path, img)
mae = mae/len(file_names)
mse = np.sqrt(mse/len(file_names))
print("final mse: {:.3f}, final mae: {:.3f}".format(mse, mae))
if __name__ == '__main__':
if args.dataset == 'ucf-cc-50':
img_dir_path, den_dir_path, est_dir_path, lab_dir_path = UCF_CC_50()
run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path)
if args.dataset == 'shanghai-tech':
img_dir_path, den_dir_path, est_dir_path, lab_dir_path = shanghai_tech("part_A", "train_data")
run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path)
img_dir_path, den_dir_path, est_dir_path, lab_dir_path = shanghai_tech("part_A", "test_data")
run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path)
img_dir_path, den_dir_path, est_dir_path, lab_dir_path = shanghai_tech("part_B", "train_data")
run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path)
img_dir_path, den_dir_path, est_dir_path, lab_dir_path = shanghai_tech("part_B", "test_data")
run_face_detector(img_dir_path, den_dir_path, est_dir_path, lab_dir_path)
| {
"alphanum_fraction": 0.6798018949,
"author": null,
"avg_line_length": 45.0873786408,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8477a5ea6a52f2119b4396dd0be436f2ef36784a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-16T22:33:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-16T22:33:53.000Z",
"max_forks_repo_head_hexsha": "4b1590499bd93ac09e62c4c7760b88ae92e6b301",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ChrisKonishi/multi-stream-crowd-counting-extended",
"max_forks_repo_path": "tiny_detection_mxnet.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4b1590499bd93ac09e62c4c7760b88ae92e6b301",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ChrisKonishi/multi-stream-crowd-counting-extended",
"max_issues_repo_path": "tiny_detection_mxnet.py",
"max_line_length": 115,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "4b1590499bd93ac09e62c4c7760b88ae92e6b301",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ChrisKonishi/multi-stream-crowd-counting-extended",
"max_stars_repo_path": "tiny_detection_mxnet.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-02T11:38:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-14T12:52:04.000Z",
"num_tokens": 1244,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4644
} |
""" Module to fit the electronic charge density of closed shell molecules obtained
by self-consistent siesta calculations with Gaussian functionsi
HOW TO
-------
1) run get_data()
2) run get_atom_pos()
3) run add_core_density()
4) run fit_poly()
"""
import sys
import numpy as np
import scipy.optimize as opt
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
from .gauss_util import *
from .mono_fit import *
import struct
rho = np.zeros(2)
unitcell = np.zeros(2)
grid = np.zeros(2)
rhopath = 'RHO.gauss'
def get_data(file_path):
"""Import data from RHO file
Structure of RHO file:
first three lines give the unit cell vectors
fourth line the grid dimensions
subsequent lines give density on grid
Parameters:
-----------
file_path: string; path to RHO file from which density is read
Returns:
--------
None
Other:
------
unitcell: (3,3) np.array; saves the unitcell dimension in euclidean coordinates
grid: (,3) np.array; number of grid points in each euclidean direction
rho: (grid[1],grid[2],grid[3]) np.array; density on grid
"""
global rho
global unitcell
global grid
global rhopath
rhopath = file_path
unitcell = np.zeros([3, 3])
grid = np.zeros([4])
try:
with open(file_path, 'r') as rhofile:
# unit cell (in Bohr)
for i in range(0, 3):
unitcell[i, :] = rhofile.readline().split()
grid[:] = rhofile.readline().split()
grid = grid.astype(int)
n_el = grid[0] * grid[1] * grid[2] * grid[3]
# initiatialize density with right shape
rho = np.zeros(grid)
for z in range(grid[2]):
for y in range(grid[1]):
for x in range(grid[0]):
rho[x, y, z, 0] = rhofile.readline()
# closed shell -> we don't care about spin.
rho = rho[:, :, :, 0]
sync_global(rho, grid, unitcell,rhopath)
except UnicodeDecodeError:
get_data_bin(file_path)
def get_data_bin(file_path):
""" Same as get_data for binary (unformatted) files
"""
#Warning: Only works for cubic cells!!!
#TODO: Implement for arb. cells
global rho
global unitcell
global grid
global rhopath
bin_file = open(file_path, mode = 'rb')
unitcell = '<I9dI'
grid = '<I4iI'
unitcell = np.array(struct.unpack(unitcell,
bin_file.read(struct.calcsize(unitcell))))[1:-1].reshape(3,3)
grid = np.array(struct.unpack(grid,bin_file.read(struct.calcsize(grid))))[1:-1]
if (grid[0] == grid[1] == grid[2]) and grid[3] == 1:
a = grid[0]
else:
raise Exception('get_data_bin cannot handle non-cubic unitcells or spin')
block = '<' + 'I{}fI'.format(a)*a*a
content = np.array(struct.unpack(block,bin_file.read(struct.calcsize(block))))
rho = content.reshape(a+2, a, a, order = 'F')[1:-1,:,:]
sync_global(rho, grid, unitcell,rhopath)
def set_rho(newrho):
global rho
gauss_util.rho = newrho
rho = newrho
def add_core_density(molecule_list):
"""Adds the core density to the valence charge density stored in the global
variable "rho"
Parmeters:
----------
molecule_list: list of Molecule objects
Returns:
--------
None
"""
if not isinstance(molecule_list,list):
molecule_list = [molecule_list]
global rho
if rho == np.zeros(2):
raise Exception("Valence density not loaded")
Xm, Ym, Zm = mesh_3d()
Z = Zm * unitcell[2, 2] / grid[2]
Y = Ym * unitcell[1, 1] / grid[1]
X = Xm * unitcell[0, 0] / grid[0]
box_vol = unitcell[0, 0] / grid[0] * unitcell[1, 1] / grid[1] * unitcell[
2, 2] / grid[2]
core_charge = np.zeros_like(X)
for molecule in molecule_list:
n_core_el = 0
for atom in molecule.get_atom_list():
if atom.is_core_corrected():
core_charge += atom.core_density(X, Y, Z)
n_core_el += atom.core_electrons
# Due to coarse graining around core normalization is not necessarily equal
# to number of electrons => Introduce ad-hoc correction to fix this
#correction = n_core_el / (np.sum(core_charge) * box_vol)
correction = 1.0
# Add core charge density to global(valence) density
rho[Xm, Ym, Zm] += core_charge * correction
# ========================= Main Routine - Fit ==================== #
def fit_poly(molecule_list,
rmax=0,
use_sym=False,
plot_out=False,
out_path=rhopath,
box_buffer=2.0,
write_xyz=False,
U = 0,
colin_reg = 0,
mp_reg = []):
""" Fits n_gauss Gaussians to charge density from any number of water molecules
Input parameters:
-----------------
molecule_list: list of Molecule objects/ Molecule object
rmax: 3d array/list; lower and upper box limits; rmax = 0 uses full grid,
rmax = -1: automatically determine smallest box around molecules
use_sym: boolean; use the symmetries specified for each molecule
plot_out: after fitting is done, plot the resulting charge density and
compare to exact density
out_path: path where coordinates are saved
box_buffer: float, if box size is determined automatically this determines
the buffer size in Bohr around the molecules.
write_xyz: boolean; write .xyz file
U: float; overlap penalty
colin_reg: float; regularization parameter that forces Gaussians lie on
bonds, if colin_reg > 1e5 simply enforce projection
mp_reg: [float]; regularization parameters for multipoles, the larger the more
accurate the dipole moment will be reproduced, sacrificing fit quality.
Returns:
--------
final_results, rmse, max_error
final_results: pandas DataFrame; fitting parameters, positions in Bohr
rmse: float; RMSE of fit
max_error: float; Maximum absolute error of fit
Other:
------
The final_results DataFrame has 4 columns containing the coordinates of the
Gaussian center and its width w defined as :
G(r) = 2 * (w * np.pi)**(-3/2) * np.exp(-(r - r0)**2 / w)
"""
# If single molecule is not passed as a list
if not isinstance(molecule_list,list):
molecule_list = [molecule_list]
n_molecules = len(molecule_list)
# Total amount of gaussians used to fit density
n_gauss = 0
# Number of atoms
n_atoms = 0
gauss_separator = []
atom_separator = []
for molecule in molecule_list:
gauss_separator.append(n_gauss)
atom_separator.append(n_atoms)
n_gauss += molecule.get_n_gauss()
n_atoms += molecule.get_n_atoms()
atom_pos = np.zeros([n_atoms, 3])
for m, molecule in enumerate(molecule_list):
n_a = molecule.get_n_atoms()
atom_pos[atom_separator[m]:atom_separator[m] + n_a, :] = molecule.get_atom_pos()
# automatically determine smallest box around molecules
if rmax == -1:
rmax = smallest_box(atom_pos, box_buffer)
# --------------- 3D fit -------------------- #
Xm, Ym, Zm = mesh_3d(rmin=[0, 0, 0], rmax=rmax)
X, Y, Z = mesh_3d(rmin=[0, 0, 0], rmax=rmax, scaled = True)
sol = rho[Xm, Ym, Zm]
V = unitcell[2, 2] / grid[2] * unitcell[1, 1] / grid[1] * unitcell[0, 0] / grid[0]
mesh_size = len(X.flatten())
fit_func = n_gauss_3d
cost_len = len(X.flatten())
# Molecule centered meshes
mcm = []
mcm_scaled = []
for m in molecule_list:
mcm.append(molecule_centered_mesh(m, buffer = box_buffer*2))
mcm_scaled.append(molecule_centered_mesh(m, buffer = box_buffer*2, scaled = True))
# Multipole moments
comp_dipole = False
comp_quadrupole = False
dipoles = []
for i, reg in enumerate(mp_reg):
if reg != 0:
if i == 0:
comp_dipole = True
for m, mol in enumerate(molecule_list):
dp = dipole_moment(*mcm_scaled[m], V, atom_pos, gauss_util.rho_val[mcm[m][0],mcm[m][1],mcm[m][2]])
print('Fitting dipole moment {} for molecule {}'.format(dp,m + 1))
dipoles.append(dp)
cost_len += 3
dp = dipole_moment(X,Y,Z, V, atom_pos, gauss_util.rho_val[Xm,Ym,Zm])
print('Fitting dipole moment {} for total charge density'.format(dp))
dipoles.append(dp)
mcm_scaled.append([X,Y,Z])
cost_len += 3
elif i ==1 :
comp_quadrupole = True
quadrupole = quadrupole_moment(X, Y, Z, V, atom_pos, gauss_util.rho_val[Xm,Ym,Zm])
print('Fitting quadrupole moment \n {} \n for total charge density'.format(quadrupole))
cost_len += 9
if U != 0:
comp_u = True
cost_len += 1
else:
comp_u = False
if colin_reg != 0:
comp_colin = True
cost_len += 1
else:
comp_colin = False
par_select = []
index = 0
for m,_ in enumerate(molecule_list):
par_select += list(range(index,index + 16))
index += 20
# Cost function that is minimized by gradient descent
def cost(par):
cost_array = np.zeros(cost_len)
start = 0
# Penalty on Gaussian position
if comp_colin:
if colin_reg > 1e5: # If regularization is large enough simply enforce constraint
par = restricted_to_euclid(par,molecule_list)
cost_array[start] = 0
start += 1
else:
cost_array[start] = colin_reg * colinear_cost(par, molecule_list)
start += 1
mp_par = np.array(par)[par_select].tolist()
# Enforce symmetries
if use_sym:
for molecule, gs in zip(molecule_list, gauss_separator):
n_g = molecule.get_n_gauss()
par[gs * 4:gs * 4 + 4 * n_g] = molecule.use_constraints(
par[gs * 4:gs * 4 + 4 * n_g])
# Density fit cost
cost_array[start:start + mesh_size] = (fit_func(X, Y, Z, par, n_gauss) - sol).flatten()
start += mesh_size
# Overlap cost
if comp_u:
cost_array[start] = U/mesh_size * gauss_overlap(par, n_gauss)
start += 1
# Multipole cost
if comp_dipole:
for mesh, dipole in zip(mcm_scaled,dipoles):
cost_array[start: start + 3] = mp_reg[0] * \
(dipole_moment(*mesh,V,atom_pos, fit_func(mesh[0],mesh[1],mesh[2],mp_par, n_gauss - len(molecule_list))) - dipole)
start += 3
if comp_quadrupole:
cost_array[start:start + 9] = mp_reg[1] * \
(quadrupole_moment(X,Y,Z,V,atom_pos,fit_func(X,Y,Z,mp_par, n_gauss - len(molecule_list))) - \
quadrupole).flatten()
start += 9
return cost_array
# Get initial parameters for fit
init_par = []
for molecule in molecule_list:
init_par += molecule.get_init()
if colin_reg > 1e5:
init_par = euclid_to_restricted(init_par, molecule_list)
# Actual fitting
fit = opt.least_squares(cost, init_par)
# Apply constraints used during fitting
if use_sym:
if colin_reg > 1e5: # If regularization is large enough simply enforce constraint
fit.x = restricted_to_euclid(fit.x,molecule_list)
for molecule, gs in zip(molecule_list, gauss_separator):
n_g = molecule.get_n_gauss()
fit.x[gs * 4:gs * 4 + 4 * n_g] = molecule.use_constraints(
fit.x[gs * 4:gs * 4 + 4 * n_g])
Xf, Yf, Zf = mesh_3d(rmin=[0, 0, 0], rmax=0, scaled = True)
print('Dipole_moment [D]: {}'.format(dipole_moment(Xf,Yf,Zf, V, atom_pos, fit_func(Xf,Yf,Zf,np.array(fit.x)[par_select].tolist(), n_gauss- len(molecule_list)))))
print('Quadrupole moment [a.u.]: \n{}'.format(quadrupole_moment(Xf,Yf,Zf, V, atom_pos, fit_func(Xf,Yf,Zf,np.array(fit.x)[par_select].tolist(), n_gauss - len(molecule_list)))))
sqrd_errors = ((fit_func(X, Y, Z, fit.x, n_gauss) - sol).flatten())**2
rmse = np.sqrt(np.mean(sqrd_errors))
max_error = np.max(np.sqrt(sqrd_errors))
if plot_out:
plot_overview(molecule_list, fit_func, fit.x, rmax)
# Save Gaussian parameters
final_results = pd.DataFrame(
[list(fit.x[i * 4:i * 4 + 4]) for i in range(n_gauss)])
final_results.to_csv(out_path, header=None, index=None)
# Bad Fit Warning
if rmse > 0.03 * n_molecules:
print('WARNING: RMSE/Molecule > 0.03')
if rmse > 0.05 * n_molecules:
print('!!!!!!!!!!! WARNING: RMSE/Molecule > 0.05 !!!!!!!!!!!!!!!!')
if write_xyz != False:
output_xyz(out_path[:-6] + '.xyz', molecule_list, fit.x, write_xyz)
# Needed by vary_parallel to determine wether all cores have finished
print('done')
return final_results, rmse, max_error
| {
"alphanum_fraction": 0.5926261027,
"author": null,
"avg_line_length": 31.5035629454,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fa024d2568ff0385378f6b44ce16d2e52bafa176",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5a7c8c1f5541d7388acc11909f06d20e920e9f8b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "semodi/gauss_fit",
"max_forks_repo_path": "gauss_fit/fitting/gauss_charge.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5a7c8c1f5541d7388acc11909f06d20e920e9f8b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "semodi/gauss_fit",
"max_issues_repo_path": "gauss_fit/fitting/gauss_charge.py",
"max_line_length": 179,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5a7c8c1f5541d7388acc11909f06d20e920e9f8b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "semodi/gauss_fit",
"max_stars_repo_path": "gauss_fit/fitting/gauss_charge.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-15T09:09:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-15T09:09:17.000Z",
"num_tokens": 3537,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13263
} |
(**
Here we define the signature for the coherent 2-groups.
A coherent 2-group has a unit, a multiplication, and an inverse operation.
The inverse laws are witnessed up to adjoint equivalence while associativity and unitality are witnessed as in a monoidal category.
For more details, see:
- https://ncatlab.org/nlab/show/2-group#definition
- Definition 7 in https://arxiv.org/pdf/math/0307200.pdf
*)
Require Import UniMath.Foundations.All.
Require Import UniMath.MoreFoundations.All.
Require Import UniMath.CategoryTheory.Core.Categories.
Require Import UniMath.Bicategories.Core.Bicat.
Require Import UniMath.Bicategories.Core.Examples.OneTypes.
Require Import prelude.all.
Require Import signature.hit_signature.
Require Import signature.hit.
Require Import algebra.one_types_polynomials.
Require Import algebra.one_types_endpoints.
Require Import algebra.one_types_homotopies.
Require Import displayed_algebras.displayed_algebra.
Local Open Scope cat.
(** The signature *)
Definition coh_2gr_point_constr
: poly_code
:= C unit_one_type + I + I * I.
Inductive coh_2gr_paths : UU :=
| lunit : coh_2gr_paths
| runit : coh_2gr_paths
| linv : coh_2gr_paths
| rinv : coh_2gr_paths
| massoc : coh_2gr_paths.
Inductive coh_2gr_homots : UU :=
| inv_adj_triangle_l : coh_2gr_homots
| inv_adj_triangle_r : coh_2gr_homots
| triangle : coh_2gr_homots
| pentagon : coh_2gr_homots.
Definition coh_2gr_paths_args
: coh_2gr_paths → poly_code.
Proof.
intro i.
induction i.
- (* lunit *)
exact I.
- (* runit *)
exact I.
- (* linv *)
exact I.
- (* rinv *)
exact I.
- (* massoc *)
exact (I * I * I).
Defined.
Definition unit_endpoint
(P : poly_code)
: endpoint coh_2gr_point_constr P I
:= comp
(comp
(comp
(c P (tt : unit_one_type))
(ι₁ _ _))
(ι₁ _ _))
constr.
Definition inv_endpoint
{P : poly_code}
(e : endpoint coh_2gr_point_constr P I)
: endpoint coh_2gr_point_constr P I
:= comp
(comp
(comp
e
(ι₂ _ _))
(ι₁ _ _))
constr.
Definition mult_endpoint
{P : poly_code}
(e₁ e₂ : endpoint coh_2gr_point_constr P I)
: endpoint coh_2gr_point_constr P I
:= comp
(comp
(pair e₁ e₂)
(ι₂ _ _))
constr.
Definition coh_2gr_paths_lhs
(i : coh_2gr_paths)
: endpoint coh_2gr_point_constr (coh_2gr_paths_args i) I.
Proof.
induction i.
- (* lunit *)
exact (mult_endpoint
(unit_endpoint _)
(id_e _ _)).
- (* runit *)
exact (mult_endpoint
(id_e _ _)
(unit_endpoint _)).
- (* linv *)
exact (mult_endpoint
(inv_endpoint (id_e _ _))
(id_e _ _)).
- (* rinv *)
exact (unit_endpoint _).
- (* massoc *)
exact (mult_endpoint
(comp (π₁ _ _) (π₁ _ _))
(mult_endpoint
(comp (π₁ _ _) (π₂ _ _))
(π₂ _ _))).
Defined.
Definition coh_2gr_paths_rhs
(i : coh_2gr_paths)
: endpoint coh_2gr_point_constr (coh_2gr_paths_args i) I.
Proof.
induction i.
- (* lunit *)
exact (id_e _ _).
- (* runit *)
exact (id_e _ _).
- (* linv *)
exact (unit_endpoint _).
- (* rinv *)
exact (mult_endpoint
(id_e _ _)
(inv_endpoint (id_e _ _))).
- (* massoc *)
exact (mult_endpoint
(mult_endpoint
(comp (π₁ _ _) (π₁ _ _))
(comp (π₁ _ _) (π₂ _ _)))
(π₂ _ _)).
Defined.
Definition coh_2gr_homots_point_arg
(i : coh_2gr_homots)
: poly_code.
Proof.
induction i.
- (* inv_adj_triangle_l *)
exact I.
- (* inv_adj_triangle_r *)
exact I.
- (* triangle *)
exact (I * I).
- (* pentagon *)
exact (I * I * I * I).
Defined.
Definition coh_2gr_homots_point_left_endpoint
(i : coh_2gr_homots)
: endpoint coh_2gr_point_constr (coh_2gr_homots_point_arg i) I.
Proof.
induction i.
- (* inv_adj_triangle_l *)
exact (mult_endpoint
(unit_endpoint _)
(id_e _ _)).
- (* inv_adj_triangle_r *)
exact(mult_endpoint
(inv_endpoint (id_e _ _))
(unit_endpoint _)).
- (* triangle *)
exact (mult_endpoint
(π₁ _ _)
(mult_endpoint
(unit_endpoint _)
(π₂ _ _))).
- (* pentagon *)
exact (mult_endpoint
(comp (π₁ _ _) (comp (π₁ _ _) (π₁ _ _)))
(mult_endpoint
(comp (π₁ _ _) (comp (π₁ _ _) (π₂ _ _)))
(mult_endpoint
(comp (π₁ _ _) (π₂ _ _))
(π₂ _ _)))).
Defined.
Definition coh_2gr_homots_point_right_endpoint
(i : coh_2gr_homots)
: endpoint coh_2gr_point_constr (coh_2gr_homots_point_arg i) I.
Proof.
induction i.
- (* inv_adj_triangle_l *)
exact (mult_endpoint
(id_e _ _)
(unit_endpoint _)).
- (* inv_adj_triangle_r *)
exact(mult_endpoint
(unit_endpoint _)
(inv_endpoint (id_e _ _))).
- (* triangle *)
exact (mult_endpoint
(π₁ _ _)
(π₂ _ _)).
- (* pentagon *)
exact (mult_endpoint
(mult_endpoint
(mult_endpoint
(comp (π₁ _ _) (comp (π₁ _ _) (π₁ _ _)))
(comp (π₁ _ _) (comp (π₁ _ _) (π₂ _ _))))
(comp (π₁ _ _) (π₂ _ _)))
(π₂ _ _)).
Defined.
Definition lunit_homot_endpoint
{P : poly_code}
(e : endpoint coh_2gr_point_constr P I)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint (unit_endpoint P) e)
e.
Proof.
refine (trans_e
_
(comp_id_r _)).
refine (trans_e
_
(path_constr
lunit
_)).
simpl.
unfold mult_endpoint.
refine (trans_e
_
(inv_e
(comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e
(comp_assoc _ _ _))).
apply ap_e.
unfold unit_endpoint.
refine (trans_e
(path_pair _ _)
(inv_e (comp_pair _ _ _))).
- refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply path_inl.
apply inv_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
apply comp_constant.
- apply inv_e.
apply comp_id_r.
Defined.
Definition runit_homot_endpoint
{P : poly_code}
(e : endpoint coh_2gr_point_constr P I)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint e (unit_endpoint _))
e.
Proof.
refine (trans_e
_
(comp_id_r _)).
refine (trans_e
_
(path_constr
runit
_)).
simpl.
unfold mult_endpoint.
refine (trans_e
_
(inv_e
(comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e
(comp_assoc _ _ _))).
apply ap_e.
unfold unit_endpoint.
refine (trans_e
(path_pair _ _)
(inv_e (comp_pair _ _ _))).
- apply inv_e.
apply comp_id_r.
- refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply path_inl.
apply inv_e.
refine (trans_e (comp_assoc _ _ _) _).
apply ap_e.
apply comp_constant.
Defined.
Definition linv_homot_endpoint
{P : poly_code}
(e : endpoint coh_2gr_point_constr P I)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint (inv_endpoint e) e)
(unit_endpoint P).
Proof.
refine (trans_e
(trans_e
_
(path_constr linv e))
_)
; cbn.
- unfold mult_endpoint.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_pair _ _ _))).
use path_pair.
+ unfold inv_endpoint.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (inv_e _).
apply comp_id_r.
+ refine (inv_e _).
apply comp_id_r.
- unfold unit_endpoint.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
apply comp_constant.
Defined.
Definition rinv_homot_endpoint
{P : poly_code}
(e : endpoint coh_2gr_point_constr P I)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(unit_endpoint P)
(mult_endpoint e (inv_endpoint e)).
Proof.
refine (trans_e
(trans_e
_
(path_constr rinv e))
_)
; cbn.
- unfold unit_endpoint.
cbn.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (inv_e _).
apply comp_constant.
- unfold mult_endpoint.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_pair _ _ _)
_).
use path_pair.
+ apply comp_id_r.
+ unfold inv_endpoint.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
apply comp_id_r.
Defined.
Definition assoc_homot_endpoint
{P : poly_code}
(e₁ e₂ e₃ : endpoint coh_2gr_point_constr P I)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint
e₁
(mult_endpoint
e₂
e₃))
(mult_endpoint
(mult_endpoint
e₁
e₂)
e₃).
Proof.
unfold mult_endpoint.
refine (trans_e
_
(trans_e
(path_constr massoc (pair (pair e₁ e₂) e₃))
_))
; simpl ; unfold mult_endpoint.
- refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
(path_pair _ _)
(inv_e (comp_pair _ _ _))).
+ apply inv_e.
refine (trans_e
(comp_assoc _ _ _)
_).
refine (trans_e
(ap_e _ (pair_π₁ _ _))
_).
apply pair_π₁.
+ refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
_
(inv_e (comp_assoc _ _ _))).
apply ap_e.
refine (trans_e
(path_pair _ _)
(inv_e (comp_pair _ _ _))).
* apply inv_e.
refine (trans_e
(comp_assoc _ _ _)
_).
refine (trans_e
(ap_e _ (pair_π₁ _ _))
_).
apply pair_π₂.
* apply inv_e.
apply pair_π₂.
- refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_pair _ _ _)
(path_pair _ _)).
+ refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_assoc _ _ _)
_).
apply ap_e.
refine (trans_e
(comp_pair _ _ _)
(path_pair _ _)).
* refine (trans_e
(comp_assoc _ _ _)
_).
refine (trans_e
(ap_e _ (pair_π₁ _ _))
_).
apply pair_π₁.
* refine (trans_e
(comp_assoc _ _ _)
_).
refine (trans_e
(ap_e _ (pair_π₁ _ _))
_).
apply pair_π₂.
+ apply pair_π₂.
Defined.
Definition lwhisker_endpoint
{P : poly_code}
(e₁ : endpoint coh_2gr_point_constr P I)
{e₂ e₃ : endpoint coh_2gr_point_constr P I}
(h : homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
e₂
e₃)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint e₁ e₂)
(mult_endpoint e₁ e₃).
Proof.
unfold mult_endpoint.
use ap_e.
use ap_e.
use path_pair.
- apply refl_e.
- exact h.
Defined.
Definition rwhisker_endpoint
{P : poly_code}
{e₁ e₂ : endpoint coh_2gr_point_constr P I}
(e₃ : endpoint coh_2gr_point_constr P I)
(h : homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
e₁
e₂)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c P (tt : unit_one_type))
(c P (tt : unit_one_type))
(mult_endpoint e₁ e₃)
(mult_endpoint e₂ e₃).
Proof.
unfold mult_endpoint.
use ap_e.
use ap_e.
use path_pair.
- exact h.
- apply refl_e.
Defined.
Definition coh_2gr_homots_point_lhs
(i : coh_2gr_homots)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c (coh_2gr_homots_point_arg i) (tt : unit_one_type))
(c (coh_2gr_homots_point_arg i) (tt : unit_one_type))
(coh_2gr_homots_point_left_endpoint i)
(coh_2gr_homots_point_right_endpoint i).
Proof.
induction i.
- (* inv_adj_triangle_l *)
exact (trans_e
(rwhisker_endpoint
_
(rinv_homot_endpoint (id_e _ _)))
(trans_e
(inv_e (assoc_homot_endpoint _ _ _))
(lwhisker_endpoint
_
(linv_homot_endpoint (id_e _ _))))).
- (* inv_adj_triangle_r *)
exact (trans_e
(lwhisker_endpoint
_
(rinv_homot_endpoint (id_e _ _)))
(trans_e
(assoc_homot_endpoint _ _ _)
(rwhisker_endpoint
_
(linv_homot_endpoint (id_e _ _))))).
- (* triangle *)
exact (lwhisker_endpoint
(π₁ I I)
(lunit_homot_endpoint _)).
- (* pentagon *)
exact (trans_e
(assoc_homot_endpoint _ _ _)
(assoc_homot_endpoint _ _ _)).
Defined.
Definition coh_2gr_homots_point_rhs
(i : coh_2gr_homots)
: homot_endpoint
coh_2gr_paths_lhs
coh_2gr_paths_rhs
(c (coh_2gr_homots_point_arg i) (tt : unit_one_type))
(c (coh_2gr_homots_point_arg i) (tt : unit_one_type))
(coh_2gr_homots_point_left_endpoint i)
(coh_2gr_homots_point_right_endpoint i).
Proof.
induction i.
- (* inv_adj_triangle_l *)
exact (trans_e
(lunit_homot_endpoint _)
(inv_e (runit_homot_endpoint _))).
- (* inv_adj_triangle_r *)
exact (trans_e
(runit_homot_endpoint _)
(inv_e (lunit_homot_endpoint _))).
- (* triangle *)
exact (trans_e
(assoc_homot_endpoint _ _ _)
(rwhisker_endpoint
(π₂ I I)
(runit_homot_endpoint _))).
- (* pentagon *)
exact (trans_e
(lwhisker_endpoint
_
(assoc_homot_endpoint _ _ _))
(trans_e
(assoc_homot_endpoint _ _ _)
(rwhisker_endpoint
_
(assoc_homot_endpoint _ _ _)))).
Defined.
Definition coh_2gr_signature
: hit_signature.
Proof.
simple refine (_ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _ ,, _).
- exact coh_2gr_point_constr.
- exact coh_2gr_paths.
- exact coh_2gr_paths_args.
- exact coh_2gr_paths_lhs.
- exact coh_2gr_paths_rhs.
- exact coh_2gr_homots.
- exact coh_2gr_homots_point_arg.
- exact (λ _, C unit_one_type).
- exact (λ _, @c _ _ unit_one_type tt).
- exact (λ _, @c _ _ unit_one_type tt).
- exact coh_2gr_homots_point_left_endpoint.
- exact coh_2gr_homots_point_right_endpoint.
- exact coh_2gr_homots_point_lhs.
- exact coh_2gr_homots_point_rhs.
Defined.
(**
Projections.
We define projections for both path algebras and algebras.
This allowing reusing results for the builder.
*)
Section Coherent2GroupPathAlgebraProjections.
Variable (X : hit_path_algebra_one_types coh_2gr_signature).
Definition coh_2gr_carrier_PA
: one_type
:= pr11 X.
Definition coh_2gr_unit_PA
: coh_2gr_carrier_PA
:= pr21 X (inl (inl tt)).
Definition coh_2gr_inv_PA
(x : coh_2gr_carrier_PA)
: coh_2gr_carrier_PA
:= pr21 X (inl (inr x)).
Definition coh_2gr_mult_PA
(x y : coh_2gr_carrier_PA)
: coh_2gr_carrier_PA
:= pr21 X (inr (x ,, y)).
Definition coh_2gr_lunit_PA
(x : coh_2gr_carrier_PA)
: coh_2gr_mult_PA coh_2gr_unit_PA x
=
x
:= pr2 X lunit x.
Definition coh_2gr_runit_PA
(x : coh_2gr_carrier_PA)
: coh_2gr_mult_PA x coh_2gr_unit_PA
=
x
:= pr2 X runit x.
Definition coh_2gr_linv_PA
(x : coh_2gr_carrier_PA)
: coh_2gr_mult_PA (coh_2gr_inv_PA x) x
=
coh_2gr_unit_PA
:= pr2 X linv x.
Definition coh_2gr_rinv_PA
(x : coh_2gr_carrier_PA)
: coh_2gr_unit_PA
=
coh_2gr_mult_PA x (coh_2gr_inv_PA x)
:= pr2 X rinv x.
Definition coh_2gr_assoc_PA
(x y z : coh_2gr_carrier_PA)
: coh_2gr_mult_PA x (coh_2gr_mult_PA y z)
=
coh_2gr_mult_PA (coh_2gr_mult_PA x y) z
:= pr2 X massoc ((x ,, y) ,, z).
Definition coh_2gr_inv_adj_triangle_l_l
(x : coh_2gr_carrier_PA)
: maponpaths (λ z, coh_2gr_mult_PA z x) (coh_2gr_rinv_PA x)
@ !(coh_2gr_assoc_PA x (coh_2gr_inv_PA x) x)
@ maponpaths (λ z, coh_2gr_mult_PA x z) (coh_2gr_linv_PA x)
=
sem_homot_endpoint_one_types
(homot_left_path coh_2gr_signature inv_adj_triangle_l)
(pr1 X) (pr2 X)
x (idpath tt).
Proof.
unfold coh_2gr_mult_PA, coh_2gr_linv_PA, coh_2gr_rinv_PA ;
unfold coh_2gr_assoc_PA, coh_2gr_inv_PA.
simpl.
rewrite !pathscomp0rid.
simpl.
refine (!_).
etrans.
{
apply maponpaths_2.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_l.
}
apply (maponpathscomp (λ q, q ,, x) inr).
}
apply (maponpathscomp (λ q, inr (q ,, x))).
}
do 2 apply maponpaths.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_r.
}
apply (maponpathscomp (λ q, x ,, q) inr).
}
apply (maponpathscomp (λ q, inr (x ,, q))).
Qed.
Definition coh_2gr_inv_adj_triangle_l_r
(x : coh_2gr_carrier_PA)
: sem_homot_endpoint_one_types
(homot_right_path coh_2gr_signature inv_adj_triangle_l)
(pr1 X) (pr2 X)
x (idpath tt)
= coh_2gr_lunit_PA x @ !(coh_2gr_runit_PA x).
Proof.
unfold coh_2gr_lunit_PA, coh_2gr_runit_PA.
simpl.
rewrite !pathscomp0rid.
apply idpath.
Qed.
Definition coh_2gr_inv_adj_triangle_r_l
(x : coh_2gr_carrier_PA)
: maponpaths (λ z, coh_2gr_mult_PA (coh_2gr_inv_PA x) z) (coh_2gr_rinv_PA x)
@ coh_2gr_assoc_PA (coh_2gr_inv_PA x) x (coh_2gr_inv_PA x)
@ maponpaths (λ z, coh_2gr_mult_PA z (coh_2gr_inv_PA x)) (coh_2gr_linv_PA x)
=
sem_homot_endpoint_one_types
(homot_left_path coh_2gr_signature inv_adj_triangle_r)
(pr1 X) (pr2 X)
x (idpath tt).
Proof.
unfold coh_2gr_mult_PA, coh_2gr_linv_PA, coh_2gr_rinv_PA ;
unfold coh_2gr_assoc_PA, coh_2gr_inv_PA.
simpl.
rewrite !pathscomp0rid.
simpl.
refine (!_).
etrans.
{
apply maponpaths_2.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_r.
}
apply (maponpathscomp _ inr).
}
apply (maponpathscomp (λ q, inr ((pr21 X) (inl (inr x)),, q)) (pr21 X)).
}
do 2 apply maponpaths.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_l.
}
apply (maponpathscomp (λ q, q ,, _) inr).
}
apply (maponpathscomp (λ q, inr (q ,, _))).
Qed.
Definition coh_2gr_inv_adj_triangle_r_r
(x : coh_2gr_carrier_PA)
: sem_homot_endpoint_one_types
(homot_right_path coh_2gr_signature inv_adj_triangle_r)
(pr1 X) (pr2 X)
x
(idpath tt)
=
coh_2gr_runit_PA (coh_2gr_inv_PA x)
@ !(coh_2gr_lunit_PA (coh_2gr_inv_PA x)).
Proof.
unfold coh_2gr_lunit_PA, coh_2gr_runit_PA.
simpl.
rewrite !pathscomp0rid.
apply idpath.
Qed.
Definition coh_2gr_triangle_l
(x y : coh_2gr_carrier_PA)
: maponpaths (λ z, coh_2gr_mult_PA x z) (coh_2gr_lunit_PA y)
=
sem_homot_endpoint_one_types
(homot_left_path coh_2gr_signature triangle)
(pr1 X) (pr2 X)
(x,, y) (idpath tt).
Proof.
unfold coh_2gr_mult_PA, coh_2gr_lunit_PA.
simpl.
rewrite !pathscomp0rid.
refine (!_).
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_r.
}
apply maponpathscomp.
}
exact (maponpathscomp (λ q, inr (x,, q)) (pr21 X) (pr2 X lunit y)).
Qed.
Definition coh_2gr_triangle_r
(x y : coh_2gr_carrier_PA)
: sem_homot_endpoint_one_types
(homot_right_path coh_2gr_signature triangle)
(pr1 X) (pr2 X)
(x,, y) (idpath tt)
=
coh_2gr_assoc_PA x coh_2gr_unit_PA y
@ maponpaths (λ z, coh_2gr_mult_PA z y) (coh_2gr_runit_PA x).
Proof.
unfold coh_2gr_assoc_PA, coh_2gr_runit_PA.
simpl.
rewrite !pathscomp0rid.
apply maponpaths.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_l.
}
exact (maponpathscomp (λ q, q ,, y) inr (pr2 X runit x)).
}
exact (maponpathscomp (λ q, inr (q,, y)) (pr21 X) (pr2 X runit x)).
Qed.
Definition coh_2gr_pentagon_l
(w x y z : coh_2gr_carrier_PA)
: coh_2gr_assoc_PA w x (coh_2gr_mult_PA y z)
@ coh_2gr_assoc_PA (coh_2gr_mult_PA w x) y z
=
sem_homot_endpoint_one_types
(homot_left_path coh_2gr_signature pentagon)
(pr1 X) (pr2 X)
(((w,, x),, y),, z) (idpath tt).
Proof.
simpl.
rewrite !pathscomp0rid.
apply idpath.
Qed.
Definition coh_2gr_pentagon_r
(w x y z : coh_2gr_carrier_PA)
: sem_homot_endpoint_one_types
(homot_right_path coh_2gr_signature pentagon)
(pr1 X) (pr2 X)
(((w,, x),, y),, z) (idpath tt)
=
maponpaths (λ q, coh_2gr_mult_PA w q) (coh_2gr_assoc_PA x y z)
@ coh_2gr_assoc_PA w (coh_2gr_mult_PA x y) z
@ maponpaths (λ q, coh_2gr_mult_PA q z) (coh_2gr_assoc_PA w x y).
Proof.
simpl.
rewrite !pathscomp0rid.
etrans.
{
apply maponpaths_2.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_r.
}
apply maponpathscomp.
}
apply (maponpathscomp (λ q, inr (w ,, q)) (pr21 X)).
}
do 2 apply maponpaths.
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
refine (!_).
apply ap_pair_l.
}
apply (maponpathscomp (λ q, q ,, z) inr).
}
apply (maponpathscomp (λ q, inr (q ,, z)) (pr21 X)).
Qed.
End Coherent2GroupPathAlgebraProjections.
Section Coherent2GroupAlgebraProjections.
Variable (X : hit_algebra_one_types coh_2gr_signature).
Definition coh_2gr_carrier
: one_type
:= coh_2gr_carrier_PA (pr1 X).
Definition coh_2gr_unit
: coh_2gr_carrier
:= coh_2gr_unit_PA (pr1 X).
Definition coh_2gr_inv
(x : coh_2gr_carrier)
: coh_2gr_carrier
:= coh_2gr_inv_PA (pr1 X) x.
Definition coh_2gr_mult
(x y : coh_2gr_carrier)
: coh_2gr_carrier
:= coh_2gr_mult_PA (pr1 X) x y.
Definition coh_2gr_lunit
(x : coh_2gr_carrier)
: coh_2gr_mult coh_2gr_unit x
=
x
:= coh_2gr_lunit_PA (pr1 X) x.
Definition coh_2gr_runit
(x : coh_2gr_carrier)
: coh_2gr_mult x coh_2gr_unit
=
x
:= coh_2gr_runit_PA (pr1 X) x.
Definition coh_2gr_linv
(x : coh_2gr_carrier)
: coh_2gr_mult (coh_2gr_inv x) x
=
coh_2gr_unit
:= coh_2gr_linv_PA (pr1 X) x.
Definition coh_2gr_rinv
(x : coh_2gr_carrier)
: coh_2gr_unit
=
coh_2gr_mult x (coh_2gr_inv x)
:= coh_2gr_rinv_PA (pr1 X) x.
Definition coh_2gr_assoc
(x y z : coh_2gr_carrier)
: coh_2gr_mult x (coh_2gr_mult y z)
=
coh_2gr_mult (coh_2gr_mult x y) z
:= coh_2gr_assoc_PA (pr1 X) x y z.
Definition coh_2gr_inv_adj_triangle_l
(x : coh_2gr_carrier)
: maponpaths
(λ z, coh_2gr_mult z x)
(coh_2gr_rinv x)
@ !(coh_2gr_assoc _ _ _)
@ maponpaths
(λ z, coh_2gr_mult x z)
(coh_2gr_linv x)
=
coh_2gr_lunit x @ !(coh_2gr_runit x).
Proof.
refine (_ @ pr2 X inv_adj_triangle_l x (idpath tt) @ _).
- exact (coh_2gr_inv_adj_triangle_l_l _ x).
- exact (coh_2gr_inv_adj_triangle_l_r _ x).
Qed.
Definition coh_2gr_inv_adj_triangle_r
(x : coh_2gr_carrier)
: maponpaths
(λ z, coh_2gr_mult _ z)
(coh_2gr_rinv x)
@ coh_2gr_assoc _ _ _
@ maponpaths
(λ z, coh_2gr_mult z _)
(coh_2gr_linv x)
=
coh_2gr_runit (coh_2gr_inv x) @ !(coh_2gr_lunit (coh_2gr_inv x)).
Proof.
refine (_ @ pr2 X inv_adj_triangle_r x (idpath tt) @ _).
- exact (coh_2gr_inv_adj_triangle_r_l _ x).
- exact (coh_2gr_inv_adj_triangle_r_r _ x).
Qed.
Definition coh_2gr_triangle
(x y : coh_2gr_carrier)
: maponpaths
(λ z, coh_2gr_mult x z)
(coh_2gr_lunit y)
=
(coh_2gr_assoc _ _ _)
@ maponpaths
(λ z, coh_2gr_mult z y)
(coh_2gr_runit x).
Proof.
refine (_ @ pr2 X triangle (x ,, y) (idpath tt) @ _).
- exact (coh_2gr_triangle_l _ x y).
- exact (coh_2gr_triangle_r _ x y).
Qed.
Definition coh_2gr_pentagon
(w x y z : coh_2gr_carrier)
: coh_2gr_assoc w x (coh_2gr_mult y z)
@ coh_2gr_assoc (coh_2gr_mult w x) y z
=
maponpaths
(λ q, coh_2gr_mult w q)
(coh_2gr_assoc x y z)
@ coh_2gr_assoc w (coh_2gr_mult x y) z
@ maponpaths
(λ q, coh_2gr_mult q z)
(coh_2gr_assoc w x y).
Proof.
refine (_ @ pr2 X pentagon (((w ,, x) ,, y) ,, z) (idpath tt) @ _).
- exact (coh_2gr_pentagon_l _ w x y z).
- exact (coh_2gr_pentagon_r _ w x y z).
Qed.
End Coherent2GroupAlgebraProjections.
(** Builder *)
Section Coherent2GroupBuilder.
Variable (A : one_type)
(e : A)
(i : A → A)
(m : A → A → A)
(unitl_m : ∏ (x : A), m e x = x)
(unitr_m : ∏ (x : A), m x e = x)
(invl_m : ∏ (x : A), m (i x) x = e)
(invr_m : ∏ (x : A), e = m x (i x))
(assoc_m : ∏ (x y z : A), m x (m y z) = m (m x y) z)
(m_invt_l : ∏ (x : A),
maponpaths (λ z, m z x) (invr_m x)
@ !(assoc_m _ _ _)
@ maponpaths (λ z, m x z) (invl_m x)
=
unitl_m x @ !(unitr_m x))
(m_invt_r : ∏ (x : A),
maponpaths (λ z, m _ z) (invr_m x)
@ assoc_m _ _ _
@ maponpaths (λ z, m z _) (invl_m x)
=
unitr_m (i x) @ !(unitl_m (i x)))
(triangle_m : ∏ (x y : A),
maponpaths (λ z, m x z) (unitl_m y)
=
(assoc_m _ _ _)
@ maponpaths (λ z, m z y) (unitr_m x))
(pentagon_m : ∏ (w x y z : A),
assoc_m w x (m y z)
@ assoc_m (m w x) y z
=
maponpaths (λ q, m w q) (assoc_m x y z)
@ assoc_m w (m x y) z
@ maponpaths (λ q, m q z) (assoc_m w x y)).
Local Definition make_2gr_prealgebra
: hit_prealgebra_one_types coh_2gr_signature.
Proof.
use make_hit_prealgebra.
- exact A.
- apply one_type_isofhlevel.
- intro x ; induction x as [x | x].
+ induction x as [ | x].
* exact e.
* exact (i x).
+ exact (m (pr1 x) (pr2 x)).
Defined.
Local Definition make_2gr_path_algebra
: hit_path_algebra_one_types coh_2gr_signature.
Proof.
use make_hit_path_algebra.
- exact make_2gr_prealgebra.
- intros j x.
induction j.
+ (* unitl *)
apply unitl_m.
+ (* unitr *)
apply unitr_m.
+ (* invl *)
apply invl_m.
+ (* invr *)
apply invr_m.
+ (* assoc *)
apply assoc_m.
Defined.
Definition make_2gr_path_algebra_is_algebra
: is_hit_algebra_one_types coh_2gr_signature make_2gr_path_algebra.
Proof.
intros j x p.
induction j.
- (* inv_adj_triangle_l *)
refine (_ @ m_invt_l x @ _).
+ exact (!(coh_2gr_inv_adj_triangle_l_l _ x)).
+ exact (!(coh_2gr_inv_adj_triangle_l_r _ x)).
- (* inv_adj_triangle_r *)
refine (_ @ m_invt_r x @ _).
+ exact (!(coh_2gr_inv_adj_triangle_r_l _ x)).
+ exact (!(coh_2gr_inv_adj_triangle_r_r _ x)).
- (* triangle *)
refine (_ @ triangle_m (pr1 x) (pr2 x) @ _).
+ exact (!(coh_2gr_triangle_l _ (pr1 x) (pr2 x))).
+ exact (!(coh_2gr_triangle_r _ (pr1 x) (pr2 x))).
- (* pentagon *)
refine (_ @ pentagon_m (pr111 x) (pr211 x) (pr21 x) (pr2 x) @ _).
+ exact (!(coh_2gr_pentagon_l _ (pr111 x) (pr211 x) (pr21 x) (pr2 x))).
+ exact (!(coh_2gr_pentagon_r _ (pr111 x) (pr211 x) (pr21 x) (pr2 x))).
Qed.
Definition make_2gr_algebra
: hit_algebra_one_types coh_2gr_signature.
Proof.
use make_algebra.
- exact make_2gr_path_algebra.
- exact make_2gr_path_algebra_is_algebra.
Defined.
End Coherent2GroupBuilder.
(** The loop space of a 2-type is a coherent 2-group *)
Definition loop_space_2gr
{X : UU}
(HX : isofhlevel 4 X)
(x : X)
: hit_algebra_one_types coh_2gr_signature.
Proof.
use make_2gr_algebra.
- use make_one_type.
+ exact (x = x).
+ exact (HX x x).
- exact (idpath x).
- exact (λ p, !p).
- exact (λ p q, p @ q).
- exact (λ p, idpath p).
- exact pathscomp0rid.
- exact pathsinv0l.
- exact (λ p, !(pathsinv0r p)).
- exact path_assoc.
- simpl.
intro p ; induction p.
apply idpath.
- intro p ; induction p.
apply idpath.
- intro p ; induction p.
exact (λ _, idpath _).
- intro p ; induction p.
intro p ; induction p.
exact (λ _ _, idpath _).
Defined.
(**
The automorphism 2-group of `X`.
Assuming univalence, this is just the loopspace of the type of 1-types with basepoint `X`.
Here we construct it without assuming univalence.
*)
Local Open Scope weq.
(**
We first give some principles to construct equalities between weak equivalences.
In addition, we prove some properties about them.
*)
Definition path_isweq
{X Y : UU}
{f : X → Y}
(Hf₁ Hf₂ : isweq f)
: Hf₁ = Hf₂.
Proof.
apply isapropisweq.
Defined.
Definition path_weq'
{X Y : UU}
{f g : X ≃ Y}
(p : pr1 f = pr1 g)
: f = g.
Proof.
induction f as [f Hf], g as [g Hg] ; simpl in *.
induction p.
refine (maponpaths (λ z, f ,, z) _).
apply path_isweq.
Defined.
Definition path_weq
{X Y : UU}
{f g : X ≃ Y}
(p : ∏ (x : X), f x = g x)
: f = g.
Proof.
apply path_weq'.
use funextsec.
exact p.
Defined.
Definition inv_path_weq'
{X Y : UU}
{f g : X ≃ Y}
(p : pr1 f = pr1 g)
: !(path_weq' p) = path_weq' (!p).
Proof.
induction f as [f Hf] ; induction g as [g Hg] ; simpl in *.
induction p.
simpl.
etrans.
{
refine (!_).
apply maponpathsinv0.
}
apply maponpaths.
apply isapropisweq.
Defined.
Definition comp_path_weq'
{X Y : UU}
{f g h : X ≃ Y}
(p : pr1 f = pr1 g)
(q : pr1 g = pr1 h)
: path_weq' p @ path_weq' q = path_weq' (p @ q).
Proof.
induction f as [f Hf]
; induction g as [g Hg]
; induction h as [h Hh]
; simpl in *.
induction p ; induction q ; simpl.
etrans.
{
refine (!_).
apply maponpathscomp0.
}
apply maponpaths.
apply isapropisweq.
Defined.
Definition precomp_path_weq'
{X Y Z : UU}
(f : X ≃ Y)
{g h : Y ≃ Z}
(p : pr1 g = pr1 h)
: maponpaths
(λ z, z ∘ f)
(path_weq' p)
=
@path_weq' X Z (g ∘ f) (h ∘ f) (maponpaths (λ z, z ∘ pr1 f)%functions p).
Proof.
induction g as [g Hg]
; induction h as [h Hh]
; simpl in *.
induction p ; simpl.
etrans.
{
apply maponpathscomp.
}
etrans.
{
refine (!_).
exact (maponpathscomp _ (λ z, _ ,, z) _).
}
simpl.
apply maponpaths.
apply isapropisweq.
Qed.
Definition postcomp_path_weq'
{X Y Z : UU}
{f g : X ≃ Y}
(h : Y ≃ Z)
(p : pr1 f = pr1 g)
: maponpaths
(λ z, h ∘ z)
(path_weq' p)
=
@path_weq' X Z (h ∘ f) (h ∘ g) (maponpaths (λ z, (λ x, h(z x))) p).
Proof.
induction f as [f Hf]
; induction g as [g Hg]
; simpl in *.
induction p ; simpl.
etrans.
{
apply (maponpathscomp _ (λ z, h ∘ z)).
}
unfold funcomp, weqcomp, make_weq ; simpl.
etrans.
{
refine (!_).
exact (maponpathscomp
(λ z, twooutof3c f h z (pr2 h))
(λ z, (λ q, h(f q)) ,, z)
_).
}
simpl.
apply maponpaths.
apply isapropisweq.
Qed.
(** Necessary lemmas about function extensionality *)
Definition funextsec_idpath
{X Y : UU}
(f : X → Y)
: funextsec _ _ _ (λ x, idpath (f x)) = idpath f.
Proof.
refine (_ @ funextsec_toforallpaths _).
apply idpath.
Qed.
Definition inv_toforallpaths
{X : UU}
{Y : X → UU}
{f g : ∏ (x : X), Y x}
(p : f = g)
: (λ x, !(toforallpaths _ _ _ p x)) = toforallpaths _ _ _ (!p).
Proof.
induction p.
apply idpath.
Qed.
Definition inv_funextsec
{X Y : UU}
{f g : X → Y}
(p : ∏ (x : X), f x = g x)
: !(funextsec _ _ _ p) = funextsec _ _ _ (λ x, !(p x)).
Proof.
refine (!(funextsec_toforallpaths _) @ _).
apply maponpaths.
refine (!(inv_toforallpaths (funextsec _ _ _ p)) @ _) ; simpl.
use funextsec ; intro x.
apply maponpaths.
exact (eqtohomot (toforallpaths_funextsec p) x).
Qed.
Definition comp_toforallpaths
{X : UU}
{Y : X → UU}
{f g h : ∏ (x : X), Y x}
(p : f = g) (q : g = h)
: toforallpaths _ _ _ (p @ q)
=
(λ x, toforallpaths _ _ _ p x @ toforallpaths _ _ _ q x).
Proof.
induction p, q.
apply idpath.
Qed.
Definition comp_funextsec
{X Y : UU}
{f g h : X → Y}
(p : ∏ (x : X), f x = g x)
(q : ∏ (x : X), g x = h x)
: funextsec _ _ _ p @ funextsec _ _ _ q
=
funextsec _ _ _ (λ x, p x @ q x).
Proof.
refine (!(funextsec_toforallpaths _) @ _).
apply maponpaths.
refine (comp_toforallpaths _ _ @ _).
use funextsec ; intro x ; simpl.
rewrite !toforallpaths_funextsec.
apply idpath.
Qed.
Definition precomp_toforallpaths
{X Y Z : UU}
(f : X → Y)
{g h : Y → Z}
(p : g = h)
: toforallpaths
_ _ _
(maponpaths (λ z : Y → Z, (z ∘ f)%functions) p)
=
(λ x, toforallpaths _ _ _ p (f x)).
Proof.
induction p.
apply idpath.
Qed.
Definition precomp_funextsec
{X Y Z : UU}
(f : X → Y)
{g h : Y → Z}
(p : ∏ (y : Y), g y = h y)
: maponpaths
(λ z : Y → Z, (z ∘ f)%functions)
(funextsec _ _ _ p)
=
funextsec _ _ _ (λ x : X, p (f x)).
Proof.
refine (!(funextsec_toforallpaths _) @ _).
apply maponpaths.
refine (precomp_toforallpaths f (funextsec _ _ _ p) @ _).
use funextsec ; intro x.
exact (eqtohomot (toforallpaths_funextsec _) (f x)).
Qed.
Definition postcomp_toforallpaths
{X Y Z : UU}
{f g : X → Y}
(h : Y → Z)
(p : f = g)
: toforallpaths
_ _ _
(maponpaths (λ q : X → Y, (λ x, h(q x))) p)
=
(λ x, maponpaths h (toforallpaths _ _ _ p x)).
Proof.
induction p.
apply idpath.
Qed.
Definition postcomp_funext
{X Y Z : UU}
{f g : X → Y}
(h : Y → Z)
(p : ∏ (x : X), f x = g x)
: maponpaths
(λ (z : X → Y) (x : X), h (z x))
(funextsec _ _ _ p)
=
funextsec _ _ _ (λ x : X, maponpaths h (p x)).
Proof.
refine (!(funextsec_toforallpaths _) @ _).
apply maponpaths.
refine (postcomp_toforallpaths h _ @ _).
use funextsec ; intro x ; simpl.
apply maponpaths.
exact (eqtohomot (toforallpaths_funextsec p) x).
Qed.
(** The lemmas on `path_weq` we need *)
Definition inv_path_weq
{X Y : UU}
{f g : X ≃ Y}
(p : ∏ (x : X), f x = g x)
: !(path_weq p) = path_weq (λ x, !(p x)).
Proof.
refine (inv_path_weq' _ @ _).
unfold path_weq ; apply maponpaths.
apply inv_funextsec.
Qed.
Definition comp_path_weq
{X Y : UU}
{f g h : X ≃ Y}
(p : ∏ (x : X), f x = g x)
(q : ∏ (x : X), g x = h x)
: path_weq p @ path_weq q
=
path_weq (λ x, p x @ q x).
Proof.
refine (comp_path_weq' _ _ @ _).
unfold path_weq ; apply maponpaths.
apply comp_funextsec.
Qed.
Definition precomp_path_weq
{X Y Z : UU}
(f : X ≃ Y)
{g h : Y ≃ Z}
(p : ∏ (y : Y), g y = h y)
: maponpaths
(λ z, z ∘ f)
(path_weq p)
=
@path_weq X Z (g ∘ f) (h ∘ f) (λ x, p (f x)).
Proof.
refine (precomp_path_weq' _ _ @ _).
unfold path_weq.
apply maponpaths.
exact (precomp_funextsec f p).
Qed.
Definition postcomp_path_weq
{X Y Z : UU}
{f g : X ≃ Y}
(h : Y ≃ Z)
(p : ∏ (x : X), f x = g x)
: maponpaths
(λ z, h ∘ z)
(path_weq p)
=
@path_weq X Z (h ∘ f) (h ∘ g) (λ x, maponpaths h (p x)).
Proof.
refine (postcomp_path_weq' _ _ @ _).
unfold path_weq.
apply maponpaths.
exact (postcomp_funext h p).
Qed.
Definition homotweq
{X Y : UU}
{f g : X ≃ Y}
{p q : ∏ (x : X), f x = g x}
(r : ∏ (x : X), p x = q x)
: path_weq p = path_weq q.
Proof.
assert (p = q) as H.
{
use funextsec.
exact r.
}
induction H.
apply idpath.
Defined.
(**
The laws to show that weak equivalences on `X` form a coherent weak 2-group.
*)
Definition weq_id_l
{X : UU}
(f : X ≃ X)
: f ∘ idweq X = f.
Proof.
use path_weq.
exact (λ _, idpath _).
Defined.
Definition weq_id_r
{X : UU}
(f : X ≃ X)
: idweq X ∘ f = f.
Proof.
use path_weq.
exact (λ _, idpath _).
Defined.
Definition weq_inv_l
{X : UU}
(f : X ≃ X)
: f ∘ invweq f = idweq X.
Proof.
use path_weq.
intro x ; simpl.
exact (homotweqinvweq f x).
Defined.
Definition weq_inv_r
{X : UU}
(f : X ≃ X)
: idweq X = invweq f ∘ f.
Proof.
use path_weq.
intro x ; simpl.
exact (!(homotinvweqweq f x)).
Defined.
Definition weq_assoc
{X : UU}
(f g h : X ≃ X)
: (h ∘ g) ∘ f = h ∘ (g ∘ f).
Proof.
use path_weq.
exact (λ _, idpath _).
Defined.
Definition weq_inv_triangle_l
{X : UU}
(f : X ≃ X)
: maponpaths
(λ z : X ≃ X, f ∘ z)
(weq_inv_r f)
@ !(weq_assoc f (invweq f) f)
@ maponpaths (λ z : X ≃ X, z ∘ f) (weq_inv_l f)
=
weq_id_l f @ ! weq_id_r f.
Proof.
unfold weq_inv_r, weq_assoc, weq_inv_l.
etrans.
{
etrans.
{
apply maponpaths_2.
apply postcomp_path_weq.
}
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths_2.
apply inv_path_weq.
}
etrans.
{
apply maponpaths.
apply precomp_path_weq.
}
apply comp_path_weq.
}
apply comp_path_weq.
}
unfold weq_id_l, weq_id_r.
refine (!_).
etrans.
{
apply maponpaths.
apply inv_path_weq.
}
etrans.
{
apply comp_path_weq.
}
apply homotweq.
intros x ; simpl.
refine (!_).
etrans.
{
apply maponpaths.
exact (!(homotweqinvweqweq f x)).
}
etrans.
{
refine (!_).
apply maponpathscomp0.
}
etrans.
{
apply maponpaths.
apply pathsinv0l.
}
apply idpath.
Qed.
Definition weq_inv_triangle_r
{X : UU}
(f : X ≃ X)
: maponpaths (λ z : X ≃ X, z ∘ invweq f) (weq_inv_r f)
@ weq_assoc (invweq f) f (invweq f)
@ maponpaths (λ z : X ≃ X, invweq f ∘ z) (weq_inv_l f)
=
weq_id_r (invweq f) @ ! weq_id_l (invweq f).
Proof.
unfold weq_inv_r, weq_assoc, weq_inv_l.
etrans.
{
etrans.
{
apply maponpaths_2.
apply precomp_path_weq.
}
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
apply postcomp_path_weq.
}
apply comp_path_weq.
}
apply comp_path_weq.
}
unfold weq_id_l, weq_id_r.
refine (!_).
etrans.
{
apply maponpaths.
apply inv_path_weq.
}
etrans.
{
apply comp_path_weq.
}
apply homotweq.
intros x ; simpl.
refine (!_).
etrans.
{
apply maponpaths.
apply homotweqweqinvweq.
}
apply pathsinv0l.
Qed.
Definition weq_triangle
{X : UU}
(f g : X ≃ X)
: maponpaths (λ z : X ≃ X, z ∘ f) (weq_id_l g) =
weq_assoc f (idweq X) g
@ maponpaths (λ z : X ≃ X, g ∘ z) (weq_id_r f).
Proof.
etrans.
{
apply precomp_path_weq.
}
refine (!_).
etrans.
{
apply maponpaths.
apply postcomp_path_weq.
}
etrans.
{
apply comp_path_weq.
}
apply homotweq.
exact (λ _, idpath _).
Qed.
Definition weq_pentagon
{X : UU}
(f₁ f₂ f₃ f₄ : X ≃ X)
: weq_assoc f₁ f₂ (f₄ ∘ f₃) @ weq_assoc (f₂ ∘ f₁) f₃ f₄
=
maponpaths (λ q : X ≃ X, q ∘ f₁) (weq_assoc f₂ f₃ f₄)
@ weq_assoc f₁ (f₃ ∘ f₂) f₄
@ maponpaths (λ q : X ≃ X, f₄ ∘ q) (weq_assoc f₁ f₂ f₃).
Proof.
etrans.
{
apply comp_path_weq.
}
refine (!_).
etrans.
{
etrans.
{
apply maponpaths_2.
apply precomp_path_weq.
}
etrans.
{
apply maponpaths.
etrans.
{
apply maponpaths.
apply postcomp_path_weq.
}
apply comp_path_weq.
}
apply comp_path_weq.
}
apply homotweq.
exact (λ _, idpath _).
Qed.
Definition aut_2gr
(X : UU)
(HX : isofhlevel 3 X)
: hit_algebra_one_types coh_2gr_signature.
Proof.
use make_2gr_algebra.
- use make_one_type.
+ exact (X ≃ X).
+ use isofhlevelsnweqtohlevelsn.
exact HX.
- exact (idweq X).
- exact invweq.
- exact weqcomp.
- exact weq_id_l.
- exact weq_id_r.
- exact weq_inv_l.
- exact weq_inv_r.
- exact weq_assoc.
- exact weq_inv_triangle_l.
- exact weq_inv_triangle_r.
- exact weq_triangle.
- exact weq_pentagon.
Defined.
| {
"alphanum_fraction": null,
"author": "UniMath",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/UniMath-GrpdHITs/GrpdHITs-cb5a9af84400eb770392632eb74860d4ebad9306/code/examples/coherent_group.v",
"reason": null,
"repo": "GrpdHITs",
"save_path": "github-repos/coq/UniMath-GrpdHITs",
"sha": "cb5a9af84400eb770392632eb74860d4ebad9306",
"size": null
} |
classdef PTKDensityInterpolation < PTKPlugin
% PTKDensityInterpolation. Plugin for interpolating density values to a
% different voxel size
%
% This is a plugin for the Pulmonary Toolkit. Plugins can be run using
% the gui, or through the interfaces provided by the Pulmonary Toolkit.
% See PTKPlugin.m for more information on how to run plugins.
%
% Plugins should not be run directly from your code.
%
%
%
% Licence
% -------
% Part of the TD Pulmonary Toolkit. https://github.com/tomdoel/pulmonarytoolkit
% Author: Tom Doel, 2012. www.tomdoel.com
% Distributed under the GNU GPL v3 licence. Please see website for details.
%
properties
ButtonText = 'Density Interpolation'
ToolTip = 'Recomputes image density with different sized voxels'
Category = 'Lungs'
AllowResultsToBeCached = true
AlwaysRunPlugin = false
PluginType = 'ReplaceOverlay'
HidePluginInDisplay = false
FlattenPreviewImage = false
PTKVersion = '1'
ButtonWidth = 6
ButtonHeight = 1
GeneratePreview = false
Visibility = 'Developer'
end
methods (Static)
function results = RunPlugin(application, reporting)
% The size of the 'voxels' for calculating the density.
% Note if you want to change this, you should change the above
% property
% AlwaysRunPlugin = false
% so that the plug is forced to re-run
interp_voxel_size_mm = [5, 5, 5];
% Fetch the intensity of just the lung regions
roi = application.GetResult('PTKLungROI');
left_and_right_lungs = application.GetResult('PTKLeftAndRightLungs');
reporting.ShowProgress('Finding lung region for density');
mask = left_and_right_lungs.RawImage > 0;
roi_lung = int16(roi.RawImage).*int16(mask);
interp_voxel_size_units = interp_voxel_size_mm./roi.VoxelSize;
image_size = roi.ImageSize;
i_span = 1 : interp_voxel_size_units(1) : image_size(1);
j_span = 1 : interp_voxel_size_units(2) : image_size(2);
k_span = 1 : interp_voxel_size_units(3) : image_size(3);
[interp_i, interp_j, interp_k] = ndgrid(i_span, j_span, k_span);
% Interpolate dentisty values
reporting.ShowProgress('Interpolating to new voxel grid');
roi_interp = interpn(single(roi_lung), interp_i, interp_j, interp_k, '*linear');
% Interpolate mask (gives a measure of how much of each voxel is
% within the lung vs outside the lung)
reporting.ShowProgress('Interpolating to new voxel size');
roi_mask = interpn(single(mask), interp_i, interp_j, interp_k, '*linear');
% Rescale voxels that are partially outside the lung, and remove
% those more than 50% outside of the lung
roi_interp = roi_interp./roi_mask;
roi_interp(roi_mask < 0.5) = 0;
roi_interp = roi_interp/max(roi_interp(:));
reporting.ShowProgress('Finding coordinates on original grid');
i_span_r = single(1 + (1:image_size(1))/interp_voxel_size_units(1));
j_span_r = single(1 + (1:image_size(2))/interp_voxel_size_units(2));
k_span_r = single(1 + (1:image_size(3))/interp_voxel_size_units(3));
[interp_i, interp_j, interp_k] = ndgrid(i_span_r, j_span_r, k_span_r);
interp_i = min(size(roi_interp, 1), round(interp_i));
interp_j = min(size(roi_interp, 2), round(interp_j));
interp_k = min(size(roi_interp, 3), round(interp_k));
reporting.ShowProgress('Interpolation to original grid');
indices = sub2ind(size(roi_interp), (interp_i(:)), (interp_j(:)), (interp_k(:)));
results_raw = zeros(image_size, 'single');
results_raw(:) = roi_interp(indices);
results = roi.BlankCopy;
results.ChangeRawImage(single(results_raw));
results.ImageType = PTKImageType.Scaled;
end
end
end | {
"alphanum_fraction": null,
"author": "tomdoel",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/tomdoel-pulmonarytoolkit/pulmonarytoolkit-09688a006d548fb85795df0338d1ed4f4a010fb9/Plugins/Analysis/PTKDensityInterpolation.m",
"reason": null,
"repo": "pulmonarytoolkit",
"save_path": "github-repos/MATLAB/tomdoel-pulmonarytoolkit",
"sha": "09688a006d548fb85795df0338d1ed4f4a010fb9",
"size": null
} |
import matplotlib.pyplot as plt
from scipy.io import wavfile
import os
from pydub import AudioSegment
# Calculate and plot spectrogram for a wav audio file
def graph_spectrogram(wav_file, sec=10):
rate, data = get_wav_info(wav_file)
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, freqs, bins, im = plt.specgram(data[:sec*rate], nfft, fs, noverlap = noverlap)
elif nchannels == 2:
pxx, freqs, bins, im = plt.specgram(data[:sec*rate,0], nfft, fs, noverlap = noverlap)
return pxx
# Load a wav file
def get_wav_info(wav_file):
rate, data = wavfile.read(wav_file)
return rate, data
# Used to standardize volume of audio clip
def match_target_amplitude(sound, target_dBFS):
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
# Load raw audio files for speech synthesis
def load_raw_audio():
activates = []
backgrounds = []
negatives = []
for filename in os.listdir("./data/NG"):
if filename.endswith("wav"):
activate = AudioSegment.from_wav("./data/NG/"+filename)
activates.append(activate)
for filename in os.listdir("./data/OK"):
if filename.endswith("wav"):
negative = AudioSegment.from_wav("./data/OK/"+filename)
negatives.append(negative)
return NG_files, OK_files | {
"alphanum_fraction": 0.675,
"author": null,
"avg_line_length": 34.4186046512,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e6fb4a308c961bfe3364fa9e6c640df351b09e19",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a9088a435f609dd832d47d9c1ff70b72ce09c57c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cryax/trigger_word_prediction",
"max_forks_repo_path": "utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a9088a435f609dd832d47d9c1ff70b72ce09c57c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cryax/trigger_word_prediction",
"max_issues_repo_path": "utils.py",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a9088a435f609dd832d47d9c1ff70b72ce09c57c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cryax/trigger_word_prediction",
"max_stars_repo_path": "utils.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 390,
"path": null,
"reason": "from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1480
} |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# pylint: disable=invalid-name
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
import numpy as np
from .fastsparse import fast_csr_matrix, fast_identity
from .qobj import Qobj
# Spin operators
def jmat(j, *args):
"""Higher-order spin operators:
Args:
j (float): Spin of operator
args (str): Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns:
Qobj: Requested spin operator(s).
Raises:
TypeError: Invalid input.
"""
if (np.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N - 1)) + [N - 1] * 2, dtype=np.int32)
ptr[-1] = N - 1
return fast_csr_matrix((data, ind, ptr), shape=(N, N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2 * j + 1)
data = np.array([j - k for k in range(N) if (j - k) != 0], dtype=complex)
# Even shaped matrix
if N % 2 == 0:
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N + 1, dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j)) + list(range(j + 1, N)), dtype=np.int32)
ptr = np.array(list(range(j + 1)) + list(range(j, N)), dtype=np.int32)
ptr[-1] = N - 1
return fast_csr_matrix((data, ind, ptr), shape=(N, N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Args:
j (float): Spin of operator
Returns:
Qobj: representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Args:
j (float): Spin of operator
Returns:
Qobj: representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters:
j (float): Spin of operator
Returns:
Qobj: representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Args:
j (float): Spin of operator
Returns:
Qobj: representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Args:
j (float): Spin of operators
Returns:
list: list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmap()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
"""Destruction (lowering) operator.
Args:
N (int): Dimension of Hilbert space.
offset (int): (default 0) The lowest number state that is included
in the finite number state representation of the operator.
Returns:
Qobj: Qobj for lowering operator.
Raises:
ValueError: Invalid input.
"""
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset + 1, N + offset, dtype=complex))
ind = np.arange(1, N, dtype=np.int32)
ptr = np.arange(N + 1, dtype=np.int32)
ptr[-1] = N - 1
return Qobj(fast_csr_matrix((data, ind, ptr), shape=(N, N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
"""Creation (raising) operator.
Args:
N (int): Dimension of Hilbert space.
offset (int): (default 0) The lowest number state that is included
in the finite number state representation of the operator.
Returns:
Qobj: Qobj for raising operator.
Raises:
ValueError: Invalid inputs.
"""
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
#
# QEYE returns identity operator for an N dimensional space
# a = qeye(N), N is integer & N>0
#
def qeye(N):
"""
Identity operator
Args:
N (int): Dimension of Hilbert space. If provided as a list of ints,
then the dimension is the product over this list, but the
``dims`` property of the new Qobj are set to this list.
Returns:
Qobj: Identity operator Qobj.
Raises:
ValueError: Invalid input.
"""
N = int(N)
if N < 0:
raise ValueError("N must be integer N>=0")
return Qobj(fast_identity(N), isherm=True, isunitary=True)
def identity(N):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
N : int or list of ints
Dimension of Hilbert space. If provided as a list of ints,
then the dimension is the product over this list, but the
``dims`` property of the new Qobj are set to this list.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(N)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Args:
N (int): Number of Fock states in Hilbert space.
offset (int): (default 0) The lowest number state that is included
in the finite number state representation of the operator.
Returns:
Qobj: Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Args:
N (int): Number of Fock states in Hilbert space.
offset (int): (default 0) The lowest number state that is
included in the finite number state
representation of the operator.
Returns:
Qobj: Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
# number operator, important!
def num(N, offset=0):
"""Quantum object for number operator.
Args:
N (int): The dimension of the Hilbert space.
offset(int): (default 0) The lowest number state that is included
in the finite number state representation of the operator.
Returns:
Qobj: Qobj for number operator.
"""
if offset == 0:
data = np.arange(1, N, dtype=complex)
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array([0] + list(range(0, N)), dtype=np.int32)
ptr[-1] = N - 1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N + 1, dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data, ind, ptr),
shape=(N, N)), isherm=True)
| {
"alphanum_fraction": 0.587332542,
"author": null,
"avg_line_length": 25.7510917031,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "77938824221fd4238cf2dc5afcd6c9d2c8c5d8c6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2022-02-24T17:39:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-29T08:40:24.000Z",
"max_forks_repo_head_hexsha": "77e40c8d99fd0490d85285e96f87e4905017b646",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "sagarpahwa/qiskit-aer",
"max_forks_repo_path": "qiskit/providers/aer/pulse/qutip_extra_lite/operators.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "77e40c8d99fd0490d85285e96f87e4905017b646",
"max_issues_repo_issues_event_max_datetime": "2020-11-10T17:15:15.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-18T15:35:31.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "sagarpahwa/qiskit-aer",
"max_issues_repo_path": "qiskit/providers/aer/pulse/qutip_extra_lite/operators.py",
"max_line_length": 80,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "77e40c8d99fd0490d85285e96f87e4905017b646",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "sagarpahwa/qiskit-aer",
"max_stars_repo_path": "qiskit/providers/aer/pulse/qutip_extra_lite/operators.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-12T00:28:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-29T08:33:39.000Z",
"num_tokens": 3210,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11794
} |
[STATEMENT]
lemma tranclp_unfold [code]:
"tranclp r a b \<longleftrightarrow> (a, b) \<in> trancl {(x, y). r x y}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r\<^sup>+\<^sup>+ a b = ((a, b) \<in> {(x, y). r x y}\<^sup>+)
[PROOF STEP]
by (simp add: trancl_def) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 129,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
//---------------------------------------------------------------------------//
//!
//! \file tstNormalDistribution.cpp
//! \author Alex Robinson
//! \brief Normal distribution unit tests.
//!
//---------------------------------------------------------------------------//
// Std Lib Includes
#include <iostream>
// Boost Includes
#include <boost/units/systems/si.hpp>
#include <boost/units/systems/cgs.hpp>
#include <boost/units/io.hpp>
// FRENSIE Includes
#include "Utility_UnivariateDistribution.hpp"
#include "Utility_NormalDistribution.hpp"
#include "Utility_PhysicalConstants.hpp"
#include "Utility_RandomNumberGenerator.hpp"
#include "Utility_QuantityTraits.hpp"
#include "Utility_ElectronVoltUnit.hpp"
#include "Utility_UnitTestHarnessWithMain.hpp"
#include "ArchiveTestHelpers.hpp"
//---------------------------------------------------------------------------//
// Testing Types
//---------------------------------------------------------------------------//
using boost::units::quantity;
using namespace Utility::Units;
namespace si = boost::units::si;
namespace cgs = boost::units::cgs;
typedef TestArchiveHelper::TestArchives TestArchives;
typedef std::tuple<
std::tuple<si::energy,si::amount,cgs::energy,si::amount>,
std::tuple<cgs::energy,si::amount,si::energy,si::amount>,
std::tuple<si::energy,si::length,cgs::energy,cgs::length>,
std::tuple<cgs::energy,cgs::length,si::energy,si::length>,
std::tuple<si::energy,si::mass,cgs::energy,cgs::mass>,
std::tuple<cgs::energy,cgs::mass,si::energy,si::mass>,
std::tuple<si::energy,si::dimensionless,cgs::energy,cgs::dimensionless>,
std::tuple<cgs::energy,cgs::dimensionless,si::energy,si::dimensionless>,
std::tuple<si::energy,void*,cgs::energy,void*>,
std::tuple<cgs::energy,void*,si::energy,void*>,
std::tuple<ElectronVolt,si::amount,si::energy,si::amount>,
std::tuple<ElectronVolt,si::amount,cgs::energy,si::amount>,
std::tuple<ElectronVolt,si::amount,KiloElectronVolt,si::amount>,
std::tuple<ElectronVolt,si::amount,MegaElectronVolt,si::amount>,
std::tuple<KiloElectronVolt,si::amount,si::energy,si::amount>,
std::tuple<KiloElectronVolt,si::amount,cgs::energy,si::amount>,
std::tuple<KiloElectronVolt,si::amount,ElectronVolt,si::amount>,
std::tuple<KiloElectronVolt,si::amount,MegaElectronVolt,si::amount>,
std::tuple<MegaElectronVolt,si::amount,si::energy,si::amount>,
std::tuple<MegaElectronVolt,si::amount,cgs::energy,si::amount>,
std::tuple<MegaElectronVolt,si::amount,ElectronVolt,si::amount>,
std::tuple<MegaElectronVolt,si::amount,KiloElectronVolt,si::amount>,
std::tuple<void*,MegaElectronVolt,void*,KiloElectronVolt>
> TestUnitTypeQuads;
//---------------------------------------------------------------------------//
// Testing Variables
//---------------------------------------------------------------------------//
std::shared_ptr<Utility::UnivariateDistribution> distribution(
new Utility::NormalDistribution( 0.0, 1.0 ) );
std::shared_ptr<Utility::UnitAwareUnivariateDistribution<cgs::length,si::amount> > unit_aware_distribution( new Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 0.0*si::meter, 0.01*si::meter, 0.5*si::mole, -Utility::QuantityTraits<quantity<si::length> >::inf(), Utility::QuantityTraits<quantity<si::length> >::inf() ) );
//---------------------------------------------------------------------------//
// Tests.
//---------------------------------------------------------------------------//
// Check that the distribution can be evaluated
FRENSIE_UNIT_TEST( NormalDistribution, evaluate )
{
FRENSIE_CHECK_EQUAL( distribution->evaluate( 0.0 ), 1.0 );
FRENSIE_CHECK_EQUAL( distribution->evaluate( 2.0 ), exp( -4.0/2.0 ) );
FRENSIE_CHECK_EQUAL( distribution->evaluate( -2.0 ), exp( -4.0/2.0 ) );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be evaluated
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, evaluate )
{
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluate( 0.0*cgs::centimeter ),
0.5*si::mole );
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluate( 2.0*cgs::centimeter ),
0.5*exp( -4.0/2.0 )*si::mole );
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluate( -2.0*cgs::centimeter ),
0.5*exp( -4.0/2.0 )*si::mole );
}
//---------------------------------------------------------------------------//
// Check that the PDF can be evaluated
FRENSIE_UNIT_TEST( NormalDistribution, evaluatePDF )
{
double center_value = 1.0/sqrt( 2.0*Utility::PhysicalConstants::pi );
double off_center_value = center_value*exp( -4.0/2.0 );
FRENSIE_CHECK_EQUAL( distribution->evaluatePDF( 0.0 ), center_value);
FRENSIE_CHECK_EQUAL( distribution->evaluatePDF( 2.0 ), off_center_value );
FRENSIE_CHECK_EQUAL( distribution->evaluatePDF( -2.0 ), off_center_value );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware PDF can be evaluated
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, evaluatePDF )
{
double center_value = 1.0/sqrt( 2.0*Utility::PhysicalConstants::pi );
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluatePDF( 0.0*cgs::centimeter ),
center_value/cgs::centimeter );
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluatePDF( 2.0*cgs::centimeter ),
center_value*exp( -4.0/2.0 )/cgs::centimeter );
FRENSIE_CHECK_EQUAL( unit_aware_distribution->evaluatePDF( -2.0*cgs::centimeter ),
center_value*exp( -4.0/2.0 )/cgs::centimeter );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
FRENSIE_UNIT_TEST( NormalDistribution, sample_static )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
double sample = Utility::NormalDistribution::sample( 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, 0.69314718055995, 1e-14 );
sample = Utility::NormalDistribution::sample( 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
sample = Utility::NormalDistribution::sample( 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, sample_static )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
quantity<cgs::length> sample =
Utility::UnitAwareNormalDistribution<cgs::length>::sample(
0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
0.69314718055995*cgs::centimeter,
1e-14 );
sample = Utility::UnitAwareNormalDistribution<cgs::length>::sample(
0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
sample = Utility::UnitAwareNormalDistribution<cgs::length>::sample(
0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
FRENSIE_UNIT_TEST( NormalDistribution, sampleAndRecordTrials_static )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
Utility::DistributionTraits::Counter trials = 0;
double sample = Utility::NormalDistribution::sampleAndRecordTrials(
trials, 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, 0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 1.0/trials, 1.0 );
sample = Utility::NormalDistribution::sampleAndRecordTrials(
trials, 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 2.0/trials, 1.0 );
sample = Utility::NormalDistribution::sampleAndRecordTrials(
trials, 0.0, 1.0 );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 3.0/trials, 0.75 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, sampleAndRecordTrials_static )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
Utility::DistributionTraits::Counter trials = 0;
quantity<cgs::length> sample =
Utility::UnitAwareNormalDistribution<cgs::length>::sampleAndRecordTrials(
trials, 0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 1.0/trials, 1.0 );
sample = Utility::UnitAwareNormalDistribution<cgs::length>::sampleAndRecordTrials(
trials, 0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 2.0/trials, 1.0 );
sample = Utility::UnitAwareNormalDistribution<cgs::length>::sampleAndRecordTrials(
trials, 0.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 3.0/trials, 0.75 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
FRENSIE_UNIT_TEST( NormalDistribution, sample )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
double sample = distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample, 0.69314718055995, 1e-14 );
sample = distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
sample = distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, sample )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
quantity<cgs::length> sample = unit_aware_distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
0.69314718055995*cgs::centimeter,
1e-14 );
sample = unit_aware_distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
sample = unit_aware_distribution->sample();
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the distribution can be sampled
FRENSIE_UNIT_TEST( NormalDistribution, sampleAndRecordTrials )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
Utility::DistributionTraits::Counter trials = 0;
double sample = distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, 0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 1.0/trials, 1.0 );
sample = distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 2.0/trials, 1.0 );
sample = distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample, -0.69314718055995, 1e-14 );
FRENSIE_CHECK_EQUAL( 3.0/trials, 0.75 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution can be sampled
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, sampleAndRecordTrials )
{
std::vector<double> fake_stream( 11 );
fake_stream[0] = 0.5;
fake_stream[1] = 0.5;
fake_stream[2] = 0.9;
fake_stream[3] = 0.5;
fake_stream[4] = 0.5;
fake_stream[5] = 0.2;
fake_stream[6] = 0.049787;
fake_stream[7] = 0.449329;
fake_stream[8] = 0.5;
fake_stream[9] = 0.5;
fake_stream[10] = 0.4;
Utility::RandomNumberGenerator::setFakeStream( fake_stream );
Utility::DistributionTraits::Counter trials = 0;
quantity<cgs::length> sample =
unit_aware_distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 1.0/trials, 1.0 );
sample = unit_aware_distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 2.0/trials, 1.0 );
sample = unit_aware_distribution->sampleAndRecordTrials( trials );
FRENSIE_CHECK_FLOATING_EQUALITY( sample,
-0.69314718055995*cgs::centimeter,
1e-14 );
FRENSIE_CHECK_EQUAL( 3.0/trials, 0.75 );
Utility::RandomNumberGenerator::unsetFakeStream();
}
//---------------------------------------------------------------------------//
// Check that the upper bound of the distribution independent variable can be
// returned
FRENSIE_UNIT_TEST( NormalDistribution, getUpperBoundOfIndepVar )
{
FRENSIE_CHECK_EQUAL( distribution->getUpperBoundOfIndepVar(),
std::numeric_limits<double>::infinity() );
}
//---------------------------------------------------------------------------//
// Check that the upper bound of the unit-aware distribution independent
// variable can be returned
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, getUpperBoundOfIndepVar )
{
FRENSIE_CHECK_EQUAL( unit_aware_distribution->getUpperBoundOfIndepVar(),
Utility::QuantityTraits<quantity<cgs::length> >::inf());
}
//---------------------------------------------------------------------------//
// Check that the lower bound of the distribution independent variable can be
// returned
FRENSIE_UNIT_TEST( NormalDistribution, getLowerBoundOfIndepVar )
{
FRENSIE_CHECK_EQUAL( distribution->getLowerBoundOfIndepVar(),
-std::numeric_limits<double>::infinity() );
}
//---------------------------------------------------------------------------//
// Check that the lower bound of the unit-aware distribution independent
// variable can be returned
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, getLowerBoundOfIndepVar )
{
FRENSIE_CHECK_EQUAL( unit_aware_distribution->getLowerBoundOfIndepVar(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf());
}
//---------------------------------------------------------------------------//
// Check that the distribution type can be returned
FRENSIE_UNIT_TEST( NormalDistribution, getDistributionType )
{
FRENSIE_CHECK_EQUAL( distribution->getDistributionType(),
Utility::NORMAL_DISTRIBUTION );
}
//---------------------------------------------------------------------------//
// Check that the unit-aware distribution type can be returned
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, getDistributionType )
{
FRENSIE_CHECK_EQUAL( unit_aware_distribution->getDistributionType(),
Utility::NORMAL_DISTRIBUTION );
}
//---------------------------------------------------------------------------//
// Check if the distribution is tabular
FRENSIE_UNIT_TEST( NormalDistribution, isTabular )
{
FRENSIE_CHECK( !distribution->isTabular() );
}
//---------------------------------------------------------------------------//
// Check if the unit-aware distribution is tabular
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, isTabular )
{
FRENSIE_CHECK( !unit_aware_distribution->isTabular() );
}
//---------------------------------------------------------------------------//
// Check if the distribution is continuous
FRENSIE_UNIT_TEST( NormalDistribution, isContinuous )
{
FRENSIE_CHECK( distribution->isContinuous() );
}
//---------------------------------------------------------------------------//
// Check if the unit-aware distribution is continuous
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, isContinuous )
{
FRENSIE_CHECK( unit_aware_distribution->isContinuous() );
}
//---------------------------------------------------------------------------//
// Check if the distribution is compatible with the interpolation type
FRENSIE_UNIT_TEST( NormalDistribution, isCompatibleWithInterpType )
{
FRENSIE_CHECK( distribution->isCompatibleWithInterpType<Utility::LinLin>() );
FRENSIE_CHECK( !distribution->isCompatibleWithInterpType<Utility::LinLog>() );
FRENSIE_CHECK( distribution->isCompatibleWithInterpType<Utility::LogLin>() );
FRENSIE_CHECK( !distribution->isCompatibleWithInterpType<Utility::LogLog>() );
// Create another distribution that is compatible with all interpolation
// types
Utility::NormalDistribution test_dist( 10.0, 1.0, 1.0, 19.0 );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LinLin>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LinLog>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LogLin>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LogLog>() );
}
//---------------------------------------------------------------------------//
// Check if the unit-aware distribution is compatible with the interp type
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, isCompatibleWithInterpType )
{
FRENSIE_CHECK( unit_aware_distribution->isCompatibleWithInterpType<Utility::LinLin>() );
FRENSIE_CHECK( !unit_aware_distribution->isCompatibleWithInterpType<Utility::LinLog>() );
FRENSIE_CHECK( unit_aware_distribution->isCompatibleWithInterpType<Utility::LogLin>() );
FRENSIE_CHECK( !unit_aware_distribution->isCompatibleWithInterpType<Utility::LogLog>() );
// Create another distribution that is compatible with all interpolation
// types
Utility::UnitAwareNormalDistribution<cgs::length,si::amount>
test_dist( 10.0*si::meter,
1.0*si::meter,
1.0*si::mole,
1.0*si::meter,
19.0*si::meter );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LinLin>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LinLog>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LogLin>() );
FRENSIE_CHECK( test_dist.isCompatibleWithInterpType<Utility::LogLog>() );
}
//---------------------------------------------------------------------------//
// Check that the distribution can be placed in a stream
FRENSIE_UNIT_TEST( NormalDistribution, ostream_operator )
{
std::ostringstream oss;
oss << Utility::NormalDistribution();
Utility::VariantMap dist_data =
Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), 0.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 1.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(),
-Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(),
Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 1.0, SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::NormalDistribution( 2.0 );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), 2.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 1.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(),
-Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(),
Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 1.0, SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::NormalDistribution( 2.0, 3.0 );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), 2.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 3.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(),
-Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(),
Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 1.0, SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::NormalDistribution( 2.0, 3.0, 4.0 );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), 2.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 3.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(),
-Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(),
Utility::QuantityTraits<double>::inf(), SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 4.0, SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::NormalDistribution( -2.0, 3.0, 4.0, -1.0, 1.0 );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), -2.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 3.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(), -1.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(), 1.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 4.0, SHOW_LHS );
oss.str( "" );
oss.clear();
oss << *distribution;
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(), "void", SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toDouble(), 0.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toDouble(), 1.0, SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toDouble(),
-Utility::QuantityTraits<double>::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toDouble(),
Utility::QuantityTraits<double>::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toDouble(), 1.0, SHOW_LHS );
}
//---------------------------------------------------------------------------//
// Check that a unit-aware distribution can be placed in a stream
FRENSIE_UNIT_TEST( UnitAwareNormalDistribution, ostream_operator )
{
std::ostringstream oss;
oss << Utility::UnitAwareNormalDistribution<cgs::length,si::amount>();
Utility::VariantMap dist_data =
Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
0.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
1.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
1.0*si::mole,
SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
2.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
1.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
1.0*si::mole,
SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
2.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
3.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
1.0*si::mole,
SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
2.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
3.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
4.0*si::mole,
SHOW_LHS );
oss.str( "" );
oss.clear();
oss << Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( -2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole, -1.0*cgs::centimeter, 1.0*cgs::centimeter );
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
-2.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
3.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-1.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
1.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
4.0*si::mole,
SHOW_LHS );
oss.str( "" );
oss.clear();
oss << *unit_aware_distribution;
dist_data = Utility::fromString<Utility::VariantMap>( oss.str() );
FRENSIE_CHECK_EQUAL( dist_data["type"].toString(),
"Normal Distribution" );
FRENSIE_CHECK_EQUAL( dist_data["independent unit"].toString(),
Utility::UnitTraits<cgs::length>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["dependent unit"].toString(),
Utility::UnitTraits<si::amount>::name(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["mean"].toType<quantity<cgs::length> >(),
0.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["standard dev"].toType<quantity<cgs::length> >(),
1.0*cgs::centimeter,
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["lower bound"].toType<quantity<cgs::length> >(),
-Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["upper bound"].toType<quantity<cgs::length> >(),
Utility::QuantityTraits<quantity<cgs::length> >::inf(),
SHOW_LHS );
FRENSIE_CHECK_EQUAL( dist_data["multiplier"].toType<quantity<si::amount> >(),
0.5*si::mole,
SHOW_LHS );
}
//---------------------------------------------------------------------------//
// Check that a distribution can be archived
FRENSIE_UNIT_TEST_TEMPLATE_EXPAND( NormalDistribution, archive, TestArchives )
{
FETCH_TEMPLATE_PARAM( 0, RawOArchive );
FETCH_TEMPLATE_PARAM( 1, RawIArchive );
typedef typename std::remove_pointer<RawOArchive>::type OArchive;
typedef typename std::remove_pointer<RawIArchive>::type IArchive;
std::string archive_base_name( "test_normal_dist" );
std::ostringstream archive_ostream;
// Create and archive some normal distributions
{
std::unique_ptr<OArchive> oarchive;
createOArchive( archive_base_name, archive_ostream, oarchive );
Utility::NormalDistribution dist_a;
Utility::NormalDistribution dist_b( 2.0 );
Utility::NormalDistribution dist_c( 2.0, 3.0 );
Utility::NormalDistribution dist_d( 2.0, 3.0, 4.0 );
Utility::NormalDistribution dist_e( 2.0, 3.0, 4.0, -1.0 );
Utility::NormalDistribution dist_f( 2.0, 3.0, 4.0, -1.0, 1.0 );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_a ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_b ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_c ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_d ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_e ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_f ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( distribution ) );
}
// Copy the archive ostream to an istream
std::istringstream archive_istream( archive_ostream.str() );
// Load the archived distributions
std::unique_ptr<IArchive> iarchive;
createIArchive( archive_istream, iarchive );
Utility::NormalDistribution dist_a;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_a ) );
FRENSIE_CHECK_EQUAL( dist_a, Utility::NormalDistribution() );
Utility::NormalDistribution dist_b;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_b ) );
FRENSIE_CHECK_EQUAL( dist_b, Utility::NormalDistribution( 2.0 ) );
Utility::NormalDistribution dist_c;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_c ) );
FRENSIE_CHECK_EQUAL( dist_c, Utility::NormalDistribution( 2.0, 3.0 ) );
Utility::NormalDistribution dist_d;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_d ) );
FRENSIE_CHECK_EQUAL( dist_d, Utility::NormalDistribution( 2.0, 3.0, 4.0 ) );
Utility::NormalDistribution dist_e;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_e ) );
FRENSIE_CHECK_EQUAL( dist_e, Utility::NormalDistribution( 2.0, 3.0, 4.0, -1.0 ) );
Utility::NormalDistribution dist_f;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_f ) );
FRENSIE_CHECK_EQUAL( dist_f, Utility::NormalDistribution( 2.0, 3.0, 4.0, -1.0, 1.0 ) );
std::shared_ptr<Utility::UnivariateDistribution> shared_dist;
FRENSIE_REQUIRE_NO_THROW( (*iarchive) >> boost::serialization::make_nvp( "distribution", shared_dist ) );
FRENSIE_CHECK_EQUAL( *dynamic_cast<Utility::NormalDistribution*>( shared_dist.get() ),
*dynamic_cast<Utility::NormalDistribution*>( distribution.get() ) );
}
//---------------------------------------------------------------------------//
// Check that a unit-aware distribution can be archived
FRENSIE_UNIT_TEST_TEMPLATE_EXPAND( UnitAwareNormalDistribution,
archive,
TestArchives )
{
FETCH_TEMPLATE_PARAM( 0, RawOArchive );
FETCH_TEMPLATE_PARAM( 1, RawIArchive );
typedef typename std::remove_pointer<RawOArchive>::type OArchive;
typedef typename std::remove_pointer<RawIArchive>::type IArchive;
std::string archive_base_name( "test_unit_aware_normal_dist" );
std::ostringstream archive_ostream;
// Create and archive some normal distributions
{
std::unique_ptr<OArchive> oarchive;
createOArchive( archive_base_name, archive_ostream, oarchive );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_a;
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_b( 2.0*cgs::centimeter );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_c( 2.0*cgs::centimeter, 3.0*cgs::centimeter );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_d( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_e( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole, -1.0*cgs::centimeter );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_f( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole, -1.0*cgs::centimeter, 1.0*cgs::centimeter );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_a ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_b ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_c ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_d ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_e ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( dist_f ) );
FRENSIE_REQUIRE_NO_THROW(
(*oarchive) << BOOST_SERIALIZATION_NVP( unit_aware_distribution ) );
}
// Copy the archive ostream to an istream
std::istringstream archive_istream( archive_ostream.str() );
// Load the archived distributions
std::unique_ptr<IArchive> iarchive;
createIArchive( archive_istream, iarchive );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_a;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_a ) );
FRENSIE_CHECK_EQUAL( dist_a, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>()) );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_b;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_b ) );
FRENSIE_CHECK_EQUAL( dist_b, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter )) );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_c;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_c ) );
FRENSIE_CHECK_EQUAL( dist_c, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter )) );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_d;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_d ) );
FRENSIE_CHECK_EQUAL( dist_d, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole )) );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_e;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_e ) );
FRENSIE_CHECK_EQUAL( dist_e, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole, -1.0*cgs::centimeter )) );
Utility::UnitAwareNormalDistribution<cgs::length,si::amount> dist_f;
FRENSIE_REQUIRE_NO_THROW(
(*iarchive) >> BOOST_SERIALIZATION_NVP( dist_f ) );
FRENSIE_CHECK_EQUAL( dist_f, (Utility::UnitAwareNormalDistribution<cgs::length,si::amount>( 2.0*cgs::centimeter, 3.0*cgs::centimeter, 4.0*si::mole, -1.0*cgs::centimeter, 1.0*cgs::centimeter )) );
std::shared_ptr<Utility::UnitAwareUnivariateDistribution<cgs::length,si::amount> > shared_dist;
FRENSIE_REQUIRE_NO_THROW( (*iarchive) >> boost::serialization::make_nvp( "unit_aware_distribution", shared_dist ) );
FRENSIE_CHECK_EQUAL( (*dynamic_cast<Utility::UnitAwareNormalDistribution<cgs::length,si::amount>*>( shared_dist.get() )),
(*dynamic_cast<Utility::UnitAwareNormalDistribution<cgs::length,si::amount>*>( unit_aware_distribution.get() )) );
}
//---------------------------------------------------------------------------//
// Check that distributions can be scaled
FRENSIE_UNIT_TEST_TEMPLATE_EXPAND( UnitAwareNormalDistribution,
explicit_conversion,
TestUnitTypeQuads )
{
FETCH_TEMPLATE_PARAM( 0, RawIndepUnitA );
FETCH_TEMPLATE_PARAM( 1, RawDepUnitA );
FETCH_TEMPLATE_PARAM( 2, RawIndepUnitB );
FETCH_TEMPLATE_PARAM( 3, RawDepUnitB );
typedef typename std::remove_pointer<RawIndepUnitA>::type IndepUnitA;
typedef typename std::remove_pointer<RawDepUnitA>::type DepUnitA;
typedef typename std::remove_pointer<RawIndepUnitB>::type IndepUnitB;
typedef typename std::remove_pointer<RawDepUnitB>::type DepUnitB;
typedef typename Utility::UnitTraits<IndepUnitA>::template GetQuantityType<double>::type IndepQuantityA;
typedef typename Utility::UnitTraits<typename Utility::UnitTraits<IndepUnitA>::InverseUnit>::template GetQuantityType<double>::type InverseIndepQuantityA;
typedef typename Utility::UnitTraits<IndepUnitB>::template GetQuantityType<double>::type IndepQuantityB;
typedef typename Utility::UnitTraits<typename Utility::UnitTraits<IndepUnitB>::InverseUnit>::template GetQuantityType<double>::type InverseIndepQuantityB;
typedef typename Utility::UnitTraits<DepUnitA>::template GetQuantityType<double>::type DepQuantityA;
typedef typename Utility::UnitTraits<DepUnitB>::template GetQuantityType<double>::type DepQuantityB;
// Copy from unitless distribution to distribution type A (static method)
Utility::UnitAwareNormalDistribution<IndepUnitA,DepUnitA>
unit_aware_dist_a_copy = Utility::UnitAwareNormalDistribution<IndepUnitA,DepUnitA>::fromUnitlessDistribution( *dynamic_cast<Utility::NormalDistribution*>( distribution.get() ) );
// Copy from distribution type A to distribution type B (explicit cast)
Utility::UnitAwareNormalDistribution<IndepUnitB,DepUnitB>
unit_aware_dist_b_copy( unit_aware_dist_a_copy );
IndepQuantityA indep_quantity_a =
Utility::QuantityTraits<IndepQuantityA>::initializeQuantity( 0.0 );
InverseIndepQuantityA inv_indep_quantity_a =
Utility::QuantityTraits<InverseIndepQuantityA>::initializeQuantity( 1.0/sqrt( 2.0*Utility::PhysicalConstants::pi ) );
DepQuantityA dep_quantity_a =
Utility::QuantityTraits<DepQuantityA>::initializeQuantity( 1.0 );
IndepQuantityB indep_quantity_b( indep_quantity_a );
InverseIndepQuantityB inv_indep_quantity_b( inv_indep_quantity_a );
DepQuantityB dep_quantity_b( dep_quantity_a );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluate( indep_quantity_a ),
dep_quantity_a,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluatePDF( indep_quantity_a ),
inv_indep_quantity_a,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluate( indep_quantity_b ),
dep_quantity_b,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluatePDF( indep_quantity_b ),
inv_indep_quantity_b,
1e-15 );
Utility::setQuantity( indep_quantity_a, 2.0 );
Utility::setQuantity( inv_indep_quantity_a, exp( -4.0/2.0 )/sqrt( 2.0*Utility::PhysicalConstants::pi ) );
Utility::setQuantity( dep_quantity_a, exp( -4.0/2.0 ) );
indep_quantity_b = IndepQuantityB( indep_quantity_a );
inv_indep_quantity_b = InverseIndepQuantityB( inv_indep_quantity_a );
dep_quantity_b = DepQuantityB( dep_quantity_a );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluate( indep_quantity_a ),
dep_quantity_a,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_a_copy.evaluatePDF( indep_quantity_a ),
inv_indep_quantity_a,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluate( indep_quantity_b ),
dep_quantity_b,
1e-15 );
FRENSIE_CHECK_FLOATING_EQUALITY(
unit_aware_dist_b_copy.evaluatePDF( indep_quantity_b ),
inv_indep_quantity_b,
1e-15 );
}
//---------------------------------------------------------------------------//
// Custom setup
//---------------------------------------------------------------------------//
FRENSIE_CUSTOM_UNIT_TEST_SETUP_BEGIN();
FRENSIE_CUSTOM_UNIT_TEST_INIT()
{
// Initialize the random number generator
Utility::RandomNumberGenerator::createStreams();
}
FRENSIE_CUSTOM_UNIT_TEST_SETUP_END();
//---------------------------------------------------------------------------//
// end tstNormalDistribution.cpp
//---------------------------------------------------------------------------//
| {
"alphanum_fraction": 0.6394717985,
"author": null,
"avg_line_length": 42.4370112945,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "b34d468447a66c25707825f0ded9f1a38afc0d9a",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2020-09-08T18:59:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-12T17:37:07.000Z",
"max_forks_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "bam241/FRENSIE",
"max_forks_repo_path": "packages/utility/distribution/test/tstNormalDistribution.cpp",
"max_issues_count": 43,
"max_issues_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094",
"max_issues_repo_issues_event_max_datetime": "2021-09-08T03:36:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-03T19:59:20.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "bam241/FRENSIE",
"max_issues_repo_path": "packages/utility/distribution/test/tstNormalDistribution.cpp",
"max_line_length": 333,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "bam241/FRENSIE",
"max_stars_repo_path": "packages/utility/distribution/test/tstNormalDistribution.cpp",
"max_stars_repo_stars_event_max_datetime": "2021-04-04T17:44:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-11-14T19:58:30.000Z",
"num_tokens": 12754,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 48845
} |
#!/usr/bin/env python
from pyquil.api import QVMConnection
from pyquil.quil import Program as P
from pyquil.gates import H
from functools import reduce
from numpy import average
from numpy import ceil
from numpy import log
from numpy import std
import argparse
class QDice:
def __init__(self):
self.qvm = QVMConnection()
def roll(self, hadamards=3, trials=1):
return self.qvm.run(
self.magic(hadamards=hadamards).measure_all(), trials=trials)
def magic(self, hadamards=3):
return P(*list(map(lambda i: H(i), range(hadamards))))
@staticmethod
def number(roll):
return 1 + reduce(lambda i, j: 2*i+j, roll)
def digits(n: int) -> int:
return ceil(log(n)/log(10))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Produces statistics of a quantum dice.')
parser.add_argument(
'-H', '--hadamards', type=int, default=3)
parser.add_argument(
'-n', '--trials', type=int, default=1024)
args = parser.parse_args()
qdice = QDice()
rolls = qdice.roll(hadamards=args.hadamards, trials=args.trials)
gamma = average(rolls, axis=0)
sigma = std(rolls, axis=0)
string = "#%0{0}d:[{1} ] => %0{2}d;".format(
digits(args.trials), args.hadamards*' %d', digits(2**args.hadamards))
for i, roll in enumerate(rolls):
print(string % (i+1, *roll, QDice.number(roll)))
print("")
print("μ{{⟨H(i)⟩}}=[{0} ], μ{{∑H(i)*2^i}}=%.2f;".format(
args.hadamards*' %.2f') % (*gamma, QDice.number(gamma)))
print("σ{{⟨H(i)⟩}}=[{0} ], σ{{∑H(i)*2^i}}=%.2f.".format(
args.hadamards*' %.2f') % (*sigma, QDice.number(sigma)))
| {
"alphanum_fraction": 0.6170212766,
"author": null,
"avg_line_length": 28.2,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9af50ed195f1c52054d9580bfd288ef97940b5b8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-05-21T01:56:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-21T01:56:04.000Z",
"max_forks_repo_head_hexsha": "2ad86fb8d61ad62f0764cb683b4392383aa3ad3b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hsk81/quantum-dice",
"max_forks_repo_path": "qdice.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2ad86fb8d61ad62f0764cb683b4392383aa3ad3b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hsk81/quantum-dice",
"max_issues_repo_path": "qdice.py",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2ad86fb8d61ad62f0764cb683b4392383aa3ad3b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hsk81/quantum-dice",
"max_stars_repo_path": "qdice.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 507,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1692
} |
using Plots
include("src/FluidQueues.jl")
include("model_def.jl")
h(t) = (t>0.0) ? FluidQueues.hitting_times_cdf(model,t,1.0,2.0) : zeros(2,4)
tvec = 0.0:0.1:3
cdfs = zeros(length(tvec),4)
for (c,t) in enumerate(tvec)
h_mat = h(t)
cdfs[c,:] = h_mat[3:6]
end
plot(tvec,cdfs) | {
"alphanum_fraction": 0.6548042705,
"author": null,
"avg_line_length": 25.5454545455,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "b979a8e58320860e1f3ecf6b2609c1e2dea992c2",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0f6f928b926684756ba664ddeab288f7992f5965",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "angus-lewis/FluidQueues.jl",
"max_forks_repo_path": "workspace.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0f6f928b926684756ba664ddeab288f7992f5965",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "angus-lewis/FluidQueues.jl",
"max_issues_repo_path": "workspace.jl",
"max_line_length": 76,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0f6f928b926684756ba664ddeab288f7992f5965",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "angus-lewis/FluidQueues.jl",
"max_stars_repo_path": "workspace.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 121,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 281
} |
import numpy as np
import logging
import sys
from fvcore.transforms.transform import (
HFlipTransform,
NoOpTransform,
VFlipTransform,
)
from PIL import Image
from detectron2.data import transforms as T
class ResizeShortestEdge(T.Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR, clip_frame_cnt=1
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice", "range_by_clip", "choice_by_clip"], sample_style
self.is_range = ("range" in sample_style)
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
if self.is_range:
self.size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
self.size = np.random.choice(self.short_edge_length)
if self.size == 0:
return NoOpTransform()
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return T.ResizeTransform(h, w, newh, neww, self.interp)
class RandomFlip(T.Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False, clip_frame_cnt=1):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if self._cnt % self.clip_frame_cnt == 0:
self.do = self._rand_range() < self.prob
self._cnt = 0 # avoiding overflow
self._cnt += 1
h, w = image.shape[:2]
if self.do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
def build_augmentation(cfg, is_train):
logger = logging.getLogger(__name__)
aug_list = []
if is_train:
# Crop
if cfg.INPUT.CROP.ENABLED:
aug_list.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
# Resize
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
ms_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM if "by_clip" in cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else 1
aug_list.append(ResizeShortestEdge(min_size, max_size, sample_style, clip_frame_cnt=ms_clip_frame_cnt))
# Flip
if cfg.INPUT.RANDOM_FLIP != "none":
if cfg.INPUT.RANDOM_FLIP == "flip_by_clip":
flip_clip_frame_cnt = cfg.INPUT.SAMPLING_FRAME_NUM
else:
flip_clip_frame_cnt = 1
aug_list.append(
# NOTE using RandomFlip modified for the support of flip maintenance
RandomFlip(
horizontal=(cfg.INPUT.RANDOM_FLIP == "horizontal") or (cfg.INPUT.RANDOM_FLIP == "flip_by_clip"),
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
clip_frame_cnt=flip_clip_frame_cnt,
)
)
# Additional augmentations : brightness, contrast, saturation, rotation
augmentations = cfg.INPUT.AUGMENTATIONS
if "brightness" in augmentations:
aug_list.append(T.RandomBrightness(0.9, 1.1))
if "contrast" in augmentations:
aug_list.append(T.RandomContrast(0.9, 1.1))
if "saturation" in augmentations:
aug_list.append(T.RandomSaturation(0.9, 1.1))
if "rotation" in augmentations:
aug_list.append(
T.RandomRotation(
[-15, 15], expand=False, center=[(0.4, 0.4), (0.6, 0.6)], sample_style="range"
)
)
else:
# Resize
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
aug_list.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
return aug_list
| {
"alphanum_fraction": 0.5952980688,
"author": null,
"avg_line_length": 36.0909090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f36528bc4cc69651e34f53681107fe408c3df74c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-03-27T13:46:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-10-31T08:30:39.000Z",
"max_forks_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "sukjunhwang/IFC",
"max_forks_repo_path": "projects/IFC/ifc/data/augmentation.py",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a",
"max_issues_repo_issues_event_max_datetime": "2022-03-28T22:41:01.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-10T03:27:21.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "sukjunhwang/IFC",
"max_issues_repo_path": "projects/IFC/ifc/data/augmentation.py",
"max_line_length": 116,
"max_stars_count": 51,
"max_stars_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "sukjunhwang/IFC",
"max_stars_repo_path": "projects/IFC/ifc/data/augmentation.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T03:49:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-30T02:05:45.000Z",
"num_tokens": 1387,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5955
} |
#include <vector>
#include <string>
#include <utility>
#include <map>
#include <boost/shared_ptr.hpp>
#ifndef HEADER_B
#define HEADER_B
#include "A.hpp"
enum testB {
BB, BBB
};
class Bklass : public A_second {
public:
// int i_; // from parent class
Bklass(int i): A_second(i) { };
Bklass(const Bklass & i): A_second(i.i_) { };
enum KlassE { B1, B2, B3};
struct KlassKlass { int k_; };
};
class B_second {
public:
int i_;
B_second(int i): i_(i) { };
B_second(const B_second & i): i_(i.i_) { };
void processA(const Aklass & a) {i_ = a.i_ + 10;}
};
#endif
| {
"alphanum_fraction": 0.5736677116,
"author": null,
"avg_line_length": 17.7222222222,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "1675ccb36aa630488160d40351a29b993deef59d",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 17,
"max_forks_repo_forks_event_max_datetime": "2021-12-27T08:16:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-18T10:43:39.000Z",
"max_forks_repo_head_hexsha": "981e19abc95b3fda2832da6f4dc293d36c5d9fb5",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "cbielow/autowrap",
"max_forks_repo_path": "tests/test_files/full_lib/B.hpp",
"max_issues_count": 74,
"max_issues_repo_head_hexsha": "981e19abc95b3fda2832da6f4dc293d36c5d9fb5",
"max_issues_repo_issues_event_max_datetime": "2021-07-29T08:55:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-17T15:51:57.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "cbielow/autowrap",
"max_issues_repo_path": "tests/test_files/full_lib/B.hpp",
"max_line_length": 57,
"max_stars_count": 46,
"max_stars_repo_head_hexsha": "981e19abc95b3fda2832da6f4dc293d36c5d9fb5",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "cbielow/autowrap",
"max_stars_repo_path": "tests/test_files/full_lib/B.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-04-29T06:28:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-07T04:42:34.000Z",
"num_tokens": 185,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 638
} |
import streamlit as st
import numpy as np
from PIL import Image
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
def convert_PIL_to_numpy(image, format):
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def read_image(file, format=None):
image = Image.open(file).convert('RGB')
return convert_PIL_to_numpy(image, format)
# @app.route('/health')
# def health():
# return "ok"
# @app.route('/')
# def main():
# return render_template('index.html')
panoptic_cfg = get_cfg()
panoptic_cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
panoptic_cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
panopticPredictor = DefaultPredictor(panoptic_cfg)
instance_cfg = get_cfg()
instance_cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
instance_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
instance_cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
instancePredictor = DefaultPredictor(instance_cfg)
keypoint_cfg = get_cfg()
keypoint_cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
keypoint_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
keypoint_cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
keypointPredictor = DefaultPredictor(keypoint_cfg)
def predict(path, np):
if path == 'keypoint':
cfg = keypoint_cfg
predictions = keypointPredictor(np)["instances"]
visualizer = Visualizer(np[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
instances = predictions.to('cpu')
vis_output = visualizer.draw_instance_predictions(predictions=instances)
elif path == 'instancesegmentation':
cfg = instance_cfg
predictions = instancePredictor(np)["instances"]
visualizer = Visualizer(np[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
instances = predictions.to('cpu')
vis_output = visualizer.draw_instance_predictions(predictions=instances)
elif path == 'panopticsegmentation':
cfg = panoptic_cfg
panoptic_seg, segments_info = panopticPredictor(np)["panoptic_seg"]
visualizer = Visualizer(np[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info)
else:
return 'nothing'
result_image = vis_output.get_image()[:, :, ::-1]
return result_image
st.title("사진을 넣어 물체를 인식해보세요!")
st.subheader("사진을 넣고 다양한 모델을 이용하여 사진의 물체들을 인식해보세요.")
model = st.selectbox('모델 선택', list(['instancesegmentation', 'panopticsegmentation', 'keypoint']))
input_file = st.file_uploader("파일을 넣어주세요.")
if input_file is not None:
input_file = read_image(input_file)
st.write('입력한 사진')
st.image(input_file)
st.write('결과물')
input_file = predict(model, input_file)
st.image(input_file)
| {
"alphanum_fraction": 0.6991025035,
"author": null,
"avg_line_length": 39.2037037037,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "007550deef116806f4dc17a677311e580b7a1244",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 13,
"max_forks_repo_forks_event_max_datetime": "2022-03-05T17:02:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-01-10T05:23:50.000Z",
"max_forks_repo_head_hexsha": "7f541c943656efdcbf6ee8192b49bfe7ceef8380",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "gkswjdzz/ainize-run-detectron2",
"max_forks_repo_path": "client_streamlit.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "7f541c943656efdcbf6ee8192b49bfe7ceef8380",
"max_issues_repo_issues_event_max_datetime": "2021-11-05T07:40:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-06-22T13:35:03.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "gkswjdzz/ainize-run-detectron2",
"max_issues_repo_path": "client_streamlit.py",
"max_line_length": 113,
"max_stars_count": 24,
"max_stars_repo_head_hexsha": "7f541c943656efdcbf6ee8192b49bfe7ceef8380",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "gkswjdzz/ainized-detectron2",
"max_stars_repo_path": "client_streamlit.py",
"max_stars_repo_stars_event_max_datetime": "2021-05-10T07:32:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-09T09:57:49.000Z",
"num_tokens": 1191,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4234
} |
! mimic NWChem tgt_sd_t_s1_1 kernel
! RL: do not redefine simd clause to be schedule(static, 1)
! RL: make the schedule clause usage be explicit
implicit integer (a-z)
l1 = 1; l2 = 1; l3 = 1; l4 = 1; l5 = 1; l6 = 1;
u1 = 24; u2 = 24; u3 = 24; u4 = 24; u5 = 24; u6 = 24;
call tgt_sd_t_s1_1(l1,l2,l3,l4,l5,l6, u1,u2,u3,u4,u5,u6)
end
subroutine tgt_sd_t_s1_1(l1,l2,l3,l4,l5,l6, u1,u2,u3,u4,u5,u6)
implicit integer (a-z)
real a(24,24,24,24,24,24)
real b(24,24,24,24,24,24)
a=3.0
b=0.0
!$omp target teams distribute parallel do schedule(static,1) collapse(6)
do i1 = l1, u1
do i2 = l2, u2
do i3 = l3, u3
do i4 = l4, u4
do i5 = l5, u5
do i6 = l6, u6
b(i6,i5,i4,i3,i2,i1) = a(i6,i5,i4,i3,i2,i1) + i3
end do
end do
end do
end do
end do
end do
!$omp end target teams distribute parallel do
! write(6,*) b(1,1,1,1,1,1)
! write(6,*) a(1,1,1,1,1,1)
write(6,*) ((b(k,j,1,1,1,1),j=1,4),k=1,4)
return
end
| {
"alphanum_fraction": 0.5763052209,
"author": null,
"avg_line_length": 22.1333333333,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "dc9392db449392194738161bf6684d1652acc438",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "raramakr/aomp",
"max_forks_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "raramakr/aomp",
"max_issues_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "raramakr/aomp",
"max_stars_repo_path": "test/smoke-fails/nwchem-s1_1/nwchem-s1_1.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 480,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 996
} |
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
# Based on Robert's case1
import pupynere as nc
import numpy
from data import datasinglepassbase
class Data(datasinglepassbase.DataSinglePassBase):
def complete_configuration(self):
self.data = {}
dir = '/export/karoly2/rhuva/phd/ACCESS/muriel/access_2month_optim/'
file = 'CoV_wind_station_output_prox_penalty.nc' #file with _II has smaller exclusion zone
infile = dir + file
f = nc.NetCDFFile(infile)
self.data['ts_wind'] = f.variables['CoV_wind'][:,:]
file = 'CoV_dsr_station_output_prox_penalty.nc'
infile = dir + file
f = nc.NetCDFFile(infile)
self.data['ts_solar'] = f.variables['CoV_dsr'][:,:]
file = 'Aus_demand_sample_raw.nc'
infile = dir + file
f = nc.NetCDFFile(infile)
self.data['ts_demand'] = f.variables['ts_demand'][:]
wind_nan = numpy.isnan(self.data['ts_wind'])
solar_nan = numpy.isnan(self.data['ts_solar'])
demand_nan = numpy.isnan(self.data['ts_demand'])
wind_row = wind_nan.any(1)
solar_row = solar_nan.any(1)
combo = numpy.array([wind_row, solar_row, demand_nan])
combo_flat = combo.any(0)
self.data['ts_wind'] = numpy.array(
self.data['ts_wind'][combo_flat == False, :], dtype=float)
self.data['ts_solar'] = numpy.array(
self.data['ts_solar'][combo_flat == False, :], dtype=float)
self.data['ts_demand'] = numpy.array(
self.data['ts_demand'][combo_flat == False], dtype=float)
print self.data['ts_wind'].shape
print self.data['ts_solar'].shape
print self.data['ts_demand'].shape
self.ts_length = self.data['ts_wind'].shape[0]
return None
| {
"alphanum_fraction": 0.6522902039,
"author": null,
"avg_line_length": 38.8441558442,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1a6a8d6193f12ffada44f72b2de62496fb4aa56b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "25ba16554ce8f614b9337e0fffce75da3fa259a4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zarppy/MUREIL_2014",
"max_forks_repo_path": "data/rhuva_data1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "25ba16554ce8f614b9337e0fffce75da3fa259a4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zarppy/MUREIL_2014",
"max_issues_repo_path": "data/rhuva_data1.py",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "25ba16554ce8f614b9337e0fffce75da3fa259a4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zarppy/MUREIL_2014",
"max_stars_repo_path": "data/rhuva_data1.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 687,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2991
} |
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
def sampler(faces, verts, n, bypass=False,sigma = 0.0):
"""Sample n verts given tri-mesh.
:Parameters:
faces : batch_size * face_num * 3
The faces of the mesh, start from 1.
verts : batch_size * vert_num * 3
The verts of the mesh.
n : an int number
sample num
:Return:
The sampled n verts: batch_size * n * 3
"""
if bypass:
return verts, verts
batch_size = faces.shape[0]
faces_flatten = faces.view(batch_size, -1) # b * (nv * 3)
face_verts = verts[:, faces_flatten[0]].view(batch_size, -1, 3, 3) # b * nf * 3 * 3
for i in range(batch_size):
# could batch?
face_verts[i] = verts[i, faces_flatten[i]].view(-1, 3, 3) # nf * 3 * 3
v1 = face_verts[:, :, 1] - face_verts[:, :, 0] # b * nv * 3
v2 = face_verts[:, :, 2] - face_verts[:, :, 0] # b * nv * 3
# cal face areas
areas = torch.sqrt(
torch.abs(torch.sum(v1 * v1, dim=-1) * torch.sum(v2 * v2, dim=-1) - (torch.sum(v1 * v2, dim=-1)) ** 2)) / 2.0
sample_verts = torch.ones(batch_size, n, 3, 3, device=faces.device)
sample_faces = torch.ones(batch_size, n, device=faces.device).long()
for i, area in enumerate(areas):
# could not batch
sample = torch.multinomial(area, n, replacement=True) # sample weighted
sample_faces[i] = sample
sample_verts[i] = face_verts[i, sample]
sample_v1 = sample_verts[:, :, 1] - sample_verts[:, :, 0]
sample_v2 = sample_verts[:, :, 2] - sample_verts[:, :, 0]
sample_norms = torch.cross(sample_v1, sample_v2)
sample_norms = sample_norms / (torch.norm(sample_norms, dim=-1, keepdim=True)+1e-9)
prob_vec1, prob_vec2 = torch.rand(batch_size, n, device=faces.device), torch.rand(batch_size,
n,
device=faces.device) # uniform sample a1, a2
mask = prob_vec1 + prob_vec2 > 1 # if a1 + a2 > 1, adjust a1 and a2
prob_vec1[mask] = 1 - prob_vec1[mask]
prob_vec2[mask] = 1 - prob_vec2[mask]
target_points = sample_verts[:, :, 0] + (sample_v1 * prob_vec1.unsqueeze(-1) + sample_v2 * prob_vec2.unsqueeze(-1))
dists = torch.min(torch.norm(sample_verts - target_points.unsqueeze(-2), dim=-1), dim=-1)[0]
# ratio = 1/dists
# ratio = ratio/torch.mean(ratio)
# sigma = self.options.sample_sigma - self.options.sample_sigma * (self.options.cur_step % 500) / \
# (500 * 2)
if sigma > 0:
ratio = torch.exp(-(dists / (2 * (sigma ** 2))))
else:
ratio = dists.clone()
ratio[0,:]=1.
return target_points, sample_norms, ratio
def sampler_color(faces, verts, n, colors=None, bypass=False):
"""Sample n colors given tri-mesh.
:Parameters:
faces : batch_size * face_num * 3
The faces of the mesh, start from 1.
verts : batch_size * vert_num * 3
The verts of the mesh.
n : an int number
sample num
:Return:
The sampled n verts: batch_size * n * 3
"""
if bypass:
return verts, verts, colors
if colors is None:
return sampler(faces, verts, n)
batch_size = faces.shape[0]
faces_flatten = faces.view(batch_size, -1) # b * (nv * 3)
face_verts = verts[:, faces_flatten[0]].view(batch_size, -1, 3, 3) # b * nf * 3 * 3
face_colors = colors[:, faces_flatten[0]].view(batch_size, -1, 3, 3) # b * nf * 3 * 3
for i in range(batch_size):
# could batch?
face_verts[i] = verts[i, faces_flatten[i]].view(-1, 3, 3) # nf * 3 * 3
face_colors[i] = colors[i, faces_flatten[i]].view(-1, 3, 3) # nf * 3 * 3
v1 = face_verts[:, :, 1] - face_verts[:, :, 0] # b * nv * 3
v2 = face_verts[:, :, 2] - face_verts[:, :, 0] # b * nv * 3
# cal face areas
areas = torch.sqrt(
torch.abs(torch.sum(v1 * v1, dim=-1) * torch.sum(v2 * v2, dim=-1) - (torch.sum(v1 * v2, dim=-1)) ** 2)) / 2.0
sample_verts = torch.ones(batch_size, n, 3, 3, device=faces.device)
sample_colors = torch.ones(batch_size, n, 3, 3, device=faces.device)
sample_faces = torch.ones(batch_size, n, device=faces.device).long()
for i, area in enumerate(areas):
# could not batch
sample = torch.multinomial(area, n, replacement=True) # sample weighted
sample_faces[i] = sample
sample_verts[i] = face_verts[i, sample]
sample_colors[i] = face_colors[i, sample]
sample_v1 = sample_verts[:, :, 1] - sample_verts[:, :, 0]
sample_v2 = sample_verts[:, :, 2] - sample_verts[:, :, 0]
sample_c1 = sample_colors[:, :, 1] - sample_colors[:, :, 0]
sample_c2 = sample_colors[:, :, 2] - sample_colors[:, :, 0]
prob_vec1, prob_vec2 = torch.rand(batch_size, n, device=faces.device), torch.rand(batch_size,
n,
device=faces.device) # uniform sample a1, a2
mask = prob_vec1 + prob_vec2 > 1 # if a1 + a2 > 1, adjust a1 and a2
prob_vec1[mask] = 1 - prob_vec1[mask]
prob_vec2[mask] = 1 - prob_vec2[mask]
target_points = sample_verts[:, :, 0] + (sample_v1 * prob_vec1.unsqueeze(-1) + sample_v2 * prob_vec2.unsqueeze(-1))
target_colors = sample_colors[:, :, 0] + (sample_c1 * prob_vec1.unsqueeze(-1) + sample_c2 * prob_vec2.unsqueeze(-1))
return target_points, None, target_colors
def sampler_uv(faces, verts, n, uvs=None, face_uvs=None,colors = None, bypass=False):
"""Sample n uvs given tri-mesh.
:Parameters:
faces : batch_size * face_num * 3
The faces of the mesh, start from 1.
verts : batch_size * vert_num * 3
The verts of the mesh.
n : an int number
sample num
:Return:
The sampled n verts: batch_size * n * 3
"""
if bypass:
return verts, verts, uvs
if uvs is None:
return sampler(faces, verts, n)
batch_size = faces.shape[0]
faces_flatten = faces.view(batch_size, -1) # b * (nv * 3)
face_uvs_flatten = face_uvs.view(batch_size, -1) # b * (nv * 3)
face_verts = verts[:, faces_flatten[0]].view(batch_size, -1, 3, 3) # b * nf * 3 * 3
face_uvs = uvs[:, face_uvs_flatten[0]].view(batch_size, -1, 3, 2) # b * nf * 3 * 2
for i in range(batch_size):
# could batch?
face_verts[i] = verts[i, faces_flatten[i]].view(-1, 3, 3) # nf * 3 * 3
face_uvs[i] = uvs[i, face_uvs_flatten[i]].view(-1, 3, 2) # nf * 2 * 3
v1 = face_verts[:, :, 1] - face_verts[:, :, 0] # b * nv * 3
v2 = face_verts[:, :, 2] - face_verts[:, :, 0] # b * nv * 3
# cal face areas
areas = torch.sqrt(
torch.abs(torch.sum(v1 * v1, dim=-1) * torch.sum(v2 * v2, dim=-1) - (torch.sum(v1 * v2, dim=-1)) ** 2)) / 2.0
sample_verts = torch.ones(batch_size, n, 3, 3, device=faces.device)
sample_uvs = torch.ones(batch_size, n, 3, 2, device=faces.device)
sample_faces = torch.ones(batch_size, n, device=faces.device).long()
for i, area in enumerate(areas):
# could not batch
sample = torch.multinomial(area, n, replacement=True) # sample weighted
sample_faces[i] = sample
sample_verts[i] = face_verts[i, sample]
sample_uvs[i] = face_uvs[i, sample]
sample_v1 = sample_verts[:, :, 1] - sample_verts[:, :, 0]
sample_v2 = sample_verts[:, :, 2] - sample_verts[:, :, 0]
sample_u1 = sample_uvs[:, :, 1] - sample_uvs[:, :, 0]
sample_u2 = sample_uvs[:, :, 2] - sample_uvs[:, :, 0]
#sample_c1 = sample_colors[:, :, 1] - sample_colors[:, :, 0]
#sample_c2 = sample_colors[:, :, 2] - sample_colors[:, :, 0]
prob_vec1, prob_vec2 = torch.rand(batch_size, n, device=faces.device), torch.rand(batch_size,
n,
device=faces.device) # uniform sample a1, a2
mask = prob_vec1 + prob_vec2 > 1 # if a1 + a2 > 1, adjust a1 and a2
prob_vec1[mask] = 1 - prob_vec1[mask]
prob_vec2[mask] = 1 - prob_vec2[mask]
target_points = sample_verts[:, :, 0] + (sample_v1 * prob_vec1.unsqueeze(-1) + sample_v2 * prob_vec2.unsqueeze(-1))
target_uvs = sample_uvs[:, :, 0] + (sample_u1 * prob_vec1.unsqueeze(-1) + sample_u2 * prob_vec2.unsqueeze(-1))
#target_colors = sample_colors[:, :, 0] + (sample_c1 * prob_vec1.unsqueeze(-1) + sample_c2 * prob_vec2.unsqueeze(-1))
return target_points, None, target_uvs#target_colors
def uv2color(uvs, texture):
new_uv = uvs.clone()
new_uv[:, 0] = ((uvs[:, 0] - 0.5)*2)
new_uv[:, 1] = -((uvs[:, 1] - 0.5)*2)
colors = F.grid_sample(texture.unsqueeze(0), new_uv.unsqueeze(0).unsqueeze(0))[0, :, 0] # 3 * nf
colors = torch.transpose(colors, 0, 1)
return colors
def test():
vertexes = []
faces = []
colors = []
with open('mesh0.obj') as f:
for line in f:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
vertexes.append([float(v) for v in line.split()[1:4]])
if len(line.split()) > 4:
colors.append([float(v) for v in line.split()[4:7]])
if line.split()[0] == 'f':
faces.append([int(v) for v in line.split()[1:4]])
faces = np.vstack(faces).astype('int32') - 1
vertexes = np.vstack(vertexes).astype('float32')
faces = Variable(torch.LongTensor(faces).cuda()).unsqueeze(0).repeat(2, 1, 1)
vertexes = Variable(torch.FloatTensor(vertexes).cuda()).unsqueeze(0).repeat(2, 1, 1)
if len(colors) > 0:
colors = Variable(torch.FloatTensor(colors).cuda()).unsqueeze(0).repeat(2, 1, 1)
else:
colors = None
points, _ = sampler_color(faces, vertexes, 100000, colors=colors)
points = points[0].cpu().data.numpy()
vert = np.hstack((np.full([points.shape[0], 1], 'v'), points))
np.savetxt('res.obj', vert, fmt='%s', delimiter=' ')
if __name__ == '__main__':
test()
| {
"alphanum_fraction": 0.6217280272,
"author": null,
"avg_line_length": 42.9132420091,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "eaba677bf063947108e4c81c2b89f6ca80961575",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-05T06:31:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-05T06:31:07.000Z",
"max_forks_repo_head_hexsha": "02775c634da6637fc09dae42a95177618199c11c",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "weixk2015/DHSP3D",
"max_forks_repo_path": "util/sampler.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "02775c634da6637fc09dae42a95177618199c11c",
"max_issues_repo_issues_event_max_datetime": "2022-02-14T06:27:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-05T09:52:24.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "weixk2015/DHSP3D",
"max_issues_repo_path": "util/sampler.py",
"max_line_length": 128,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "02775c634da6637fc09dae42a95177618199c11c",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "weixk2015/DHSP3D",
"max_stars_repo_path": "util/sampler.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T12:07:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-17T06:41:20.000Z",
"num_tokens": 3106,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9398
} |
#!/usr/bin/env python3
#
# BSD 3-Clause License
#
# This file is part of the Basalt project.
# https://gitlab.com/VladyslavUsenko/basalt.git
#
# Copyright (c) 2019-2021, Vladyslav Usenko and Nikolaus Demmel.
# All rights reserved.
#
import argparse
import json
import numpy as np
from scipy.spatial.transform import Rotation
def print_abs_rel(info, v_0, v_1):
diff = np.abs(np.linalg.norm(v_0 - v_1))
out = f'{info}:\t{diff:.5f}'
if diff < 10e-7:
out += ' (0.0%)'
else:
out += f' ({diff / (np.abs(np.linalg.norm(v_0)) * 100.0):.7f}%)'
print(out)
def main(calib_path_1, calib_path_2):
with open(calib_path_1, 'r') as c_1, open(calib_path_2, 'r') as c_2:
calib0 = json.load(c_1)
calib1 = json.load(c_2)
for i, (t_imu_cam_0, t_imu_cam_1) in enumerate(
zip(calib0['value0']['T_imu_cam'], calib1['value0']['T_imu_cam'])):
print(f'\nCamera {i} transformation differences')
t_0 = np.array(list(t_imu_cam_0.values())[0:2])
t_1 = np.array(list(t_imu_cam_1.values())[0:2])
r_0 = Rotation(list(t_imu_cam_0.values())[3:7])
r_1 = Rotation(list(t_imu_cam_1.values())[3:7])
print_abs_rel(f'Transformation', t_0, t_1)
print_abs_rel(f'Rotation', r_0.as_rotvec(), r_1.as_rotvec())
for i, (intrinsics0, intrinsics1) in enumerate(
zip(calib0['value0']['intrinsics'], calib1['value0']['intrinsics'])):
print(f'\nCamera {i} intrinsics differences')
for (
k_0, v_0), (_, v_1) in zip(
intrinsics0['intrinsics'].items(), intrinsics1['intrinsics'].items()):
print_abs_rel(f'Difference for {k_0}', v_0, v_1)
print_abs_rel('\nAccel Bias Difference',
np.array(calib0['value0']['calib_accel_bias'][0:2]),
np.array(calib1['value0']['calib_accel_bias'][0:2]))
print_abs_rel('Accel Scale Difference',
np.array(calib0['value0']['calib_accel_bias'][3:9]),
np.array(calib1['value0']['calib_accel_bias'][3:9]))
print_abs_rel('Gyro Bias Difference',
np.array(calib0['value0']['calib_gyro_bias'][0:2]),
np.array(calib1['value0']['calib_gyro_bias'][0:2]))
print_abs_rel('Gyro Scale Difference',
np.array(calib0['value0']['calib_gyro_bias'][3:12]),
np.array(calib1['value0']['calib_gyro_bias'][3:12]))
print_abs_rel(
'\nAccel Noise Std Difference',
calib0['value0']['accel_noise_std'],
calib1['value0']['accel_noise_std'])
print_abs_rel(
'Gyro Noise Std Difference',
calib0['value0']['gyro_noise_std'],
calib1['value0']['gyro_noise_std'])
print_abs_rel(
'Accel Bias Std Difference',
calib0['value0']['accel_bias_std'],
calib1['value0']['accel_bias_std'])
print_abs_rel(
'Gyro Bias Std Difference',
calib0['value0']['gyro_bias_std'],
calib1['value0']['gyro_bias_std'])
print_abs_rel(
'\nCam Time Offset Difference',
calib0['value0']['cam_time_offset_ns'],
calib0['value0']['cam_time_offset_ns'])
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('calib_path_1')
parser.add_argument('calib_path_2')
return parser
if __name__ == '__main__':
args = create_parser().parse_args()
main(args.calib_path_1, args.calib_path_2)
| {
"alphanum_fraction": 0.6126230457,
"author": null,
"avg_line_length": 32.2803738318,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5d76fbdd726ae08c61bcce471d0c015a225d8517",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 162,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T08:35:39.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-18T09:10:48.000Z",
"max_forks_repo_head_hexsha": "9dd7b2c8031283ec033211bc90ad70aa70323eaa",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "kwang-12/basalt-mirror",
"max_forks_repo_path": "scripts/compare_calib.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9dd7b2c8031283ec033211bc90ad70aa70323eaa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "kwang-12/basalt-mirror",
"max_issues_repo_path": "scripts/compare_calib.py",
"max_line_length": 86,
"max_stars_count": 445,
"max_stars_repo_head_hexsha": "9dd7b2c8031283ec033211bc90ad70aa70323eaa",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "kwang-12/basalt-mirror",
"max_stars_repo_path": "scripts/compare_calib.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T20:54:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-18T01:13:58.000Z",
"num_tokens": 1012,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3454
} |
import cv2
import numpy as np
import copy
# 画像のトリミングとかできるならここでやる
def preprocessing(img, top=0, bottom=-1, left=0, right=-1):
h, w, c = img.shape
if bottom == -1:
bottom = h
if right == -1:
right = w
# 投入部分をトリミングする方針に変更
# return crop_img(img)
return [img[top:bottom, left:right, :]]
def crop_img(img, minDist=30, param1=100, param2=40, minRadius=30, maxRadius=70):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 7)
circles = cv2.HoughCircles(
gray,
cv2.HOUGH_GRADIENT,
dp=1,
minDist=minDist,
param1=param1,
param2=param2,
minRadius=minRadius,
maxRadius=maxRadius
)
if circles is None:
return []
res = []
circles = np.uint16(np.around(circles))
for circle in circles[0, :]:
x, y, r = circle
cropped_img = copy.deepcopy(img[max(int(x) - int(r), 0):min(img.shape[0], x + r), max(int(y) - int(r), 0):min(img.shape[1], y + r), :])
if len(cropped_img) > 0:
res.append(cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB))
return res | {
"alphanum_fraction": 0.6445086705,
"author": null,
"avg_line_length": 26.6153846154,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7335450bbcb90e1ce83f5a99b691553699975969",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "87491f09b5093136cd1b35a2bfb35fcaedf19d15",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jphacks/C_2105",
"max_forks_repo_path": "arduino/python_server/utils/preprocessing.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "87491f09b5093136cd1b35a2bfb35fcaedf19d15",
"max_issues_repo_issues_event_max_datetime": "2021-11-15T00:17:35.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-13T12:32:03.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jphacks/C_2105",
"max_issues_repo_path": "arduino/python_server/utils/preprocessing.py",
"max_line_length": 139,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "87491f09b5093136cd1b35a2bfb35fcaedf19d15",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jphacks/C_2105",
"max_stars_repo_path": "arduino/python_server/utils/preprocessing.py",
"max_stars_repo_stars_event_max_datetime": "2021-10-24T01:36:41.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-24T01:36:41.000Z",
"num_tokens": 376,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1038
} |
################################################################################
#
# Morphism types
#
################################################################################
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{R}, S <: AbsAlgAss{R}} = AbsAlgAssMor{T, S, Generic.Mat{R}}
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{fmpq}, S <: AbsAlgAss{fmpq}} = AbsAlgAssMor{T, S, fmpq_mat}
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{fq}, S <: AbsAlgAss{fq}} = AbsAlgAssMor{T, S, fq_mat}
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{fq_nmod}, S <: AbsAlgAss{fq_nmod}} = AbsAlgAssMor{T, S, fq_nmod_mat}
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{nmod}, S <: AbsAlgAss{nmod}} = AbsAlgAssMor{T, S, nmod_mat}
morphism_type(::Type{T}, ::Type{S}) where {R, T <: AbsAlgAss{gfp_elem}, S <: AbsAlgAss{gfp_elem}} = AbsAlgAssMor{T, S, gfp_mat}
morphism_type(A::Type{T}) where {T <: AbsAlgAss} = morphism_type(A, A)
################################################################################
#
# Basis
#
################################################################################
function basis(A::AbsAlgAss)
if isdefined(A, :basis)
return A.basis::Vector{elem_type(A)}
end
B = Vector{elem_type(A)}(undef, dim(A))
for i in 1:dim(A)
z = Vector{elem_type(base_ring(A))}(undef, dim(A))
for j in 1:dim(A)
z[j] = zero(base_ring(A))
end
z[i] = one(base_ring(A))
B[i] = A(z)
end
A.basis = B
return B
end
################################################################################
#
# Predicates
#
################################################################################
issimple_known(A::AbsAlgAss) = A.issimple != 0
################################################################################
#
# Associativity, Distributivity test
#
################################################################################
function check_associativity(A::AbsAlgAss)
for i = 1:dim(A)
for j = 1:dim(A)
el = A[i] * A[j]
for k = 1:dim(A)
if el * A[k] != A[i] * (A[j] * A[k])
return false
end
end
end
end
return true
end
function check_distributivity(A::AbsAlgAss)
for i = 1:dim(A)
for j = 1:dim(A)
el = A[i]*A[j]
for k = 1:dim(A)
if A[i] * (A[j] + A[k]) != el + A[i] * A[k]
return false
end
end
end
end
return true
end
################################################################################
#
# Dimension of center
#
################################################################################
function dimension_of_center(A::AbsAlgAss)
C, _ = center(A)
return dim(C)
end
################################################################################
#
# Subalgebras
#
################################################################################
# Constructs the algebra e*A
# This is the generic fallback which constructs an associative algebra
function subalgebra(A::AbsAlgAss{T}, e::AbsAlgAssElem{T}, idempotent::Bool = false) where {T}
@assert parent(e) == A
B, mB = AlgAss(A)
C, mC = subalgebra(B, mB\e, idempotent)
mD = compose_and_squash(mB, mC)
@assert domain(mD) == C
return C, mD
end
function subalgebra(A::AbsAlgAss{T}, basis::Array{S}) where {T, S}
B, mB = AlgAss(A)
basis_pre = elem_type(B)[mB\(basis[i]) for i in 1:length(basis)]
C, mC = subalgebra(B, basis_pre)
mD = compose_and_squash(mB, mC)
@assert domain(mD) == C
return C, mD
end
################################################################################
#
# Decomposition
#
################################################################################
# Assume that A is a commutative algebra over a finite field of cardinality q.
# This functions computes a basis for ker(x -> x^q).
function kernel_of_frobenius(A::AbsAlgAss)
F = base_ring(A)
q = order(F)
b = A()
B = zero_matrix(F, dim(A), dim(A))
for i = 1:dim(A)
b.coeffs[i] = one(F)
if i > 1
b.coeffs[i - 1] = zero(F)
end
c = b^q - b
for j = 1:dim(A)
B[j, i] = c.coeffs[j]
end
end
V = right_kernel_basis(B)
return [ A(v) for v in V ]
end
@doc Markdown.doc"""
decompose(A::AbsAlgAss{T}) -> AlgAss{T}
Given a semisimple algebra over a field, this function
returns a decomposition of A as a direct sum of simple algebras.
"""
function decompose(A::AbsAlgAss{T}) where {T}
if isdefined(A, :decomposition)
return A.decomposition::Vector{Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}}
end
if issimple_known(A) && A.issimple == 1
B, mB = AlgAss(A)
res = Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[(B, mB)]
end
if A isa AlgAss
D = _decompose(A)
return D
end
if A isa AlgGrp || A isa AlgMat
B, mB = AlgAss(A)
D = _decompose(B)
res = Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[]
for (S, mS) in D
mD = compose_and_squash(mB, mS)
push!(res, (S, mD))
end
A.decomposition = res
return res
end
end
function _decompose(A::AbsAlgAss{T}) where {T}
if iscommutative(A)
res = _dec_com(A)
else
res = _dec_via_center(A)
end
A.decomposition = res
return res
end
function _dec_via_center(A::S) where {T, S <: AbsAlgAss{T}}
ZA, mZA = center(A)
Algs = _dec_com(ZA)
ZA.decomposition = Algs
res = Tuple{AlgAss{T}, morphism_type(AlgAss{T}, S)}[ subalgebra(A, mZA(BtoZA(one(B))), true) for (B, BtoZA) in Algs]
for i in 1:length(res)
res[i][1].issimple = 1
B, BtoZA = Algs[i] # B is the centre of res[i][1]
# Build a map from B to res[i][1] via B -> ZA -> A -> res[i][1]
M = zero_matrix(base_ring(A), dim(B), dim(res[i][1]))
for j = 1:dim(B)
t = mZA(BtoZA(B[j]))
s = res[i][2]\t
elem_to_mat_row!(M, j, s)
end
if dim(res[i][1]) != dim(B)
res[i][1].center = (B, hom(B, res[i][1], M))
else
# res[i][1] is commutative, so we do not cache the centre
iM = inv(M)
BtoA = hom(B, A, M*res[i][2].mat, res[i][2].imat*iM)
res[i] = (B, BtoA)
end
end
A.decomposition = res
return res
end
function _dec_com(A::AbsAlgAss)
if characteristic(base_ring(A)) > 0
return _dec_com_finite(A)
else
return _dec_com_gen(A)
end
end
function _dec_com_gen(A::AbsAlgAss{T}) where {T <: FieldElem}
if dim(A) == 1
A.issimple = 1
B, mB = AlgAss(A)
return Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[(B, mB)]
end
F = base_ring(A)
k = dim(A)
V = elem_type(A)[A[i] for i in 1:k]
while true
c = elem_type(F)[ rand(F, -10:10) for i = 1:k ]
a = dot(c, V)
f = minpoly(a)
if degree(f) < 2
continue
end
if isirreducible(f)
if degree(f) == dim(A)
A.issimple = 1
B, mB = AlgAss(A)
return Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[(B, mB)]
end
continue
end
@assert issquarefree(f)
fac = factor(f)
R = parent(f)
factors = Vector{elem_type(R)}()
for ff in keys(fac.fac)
push!(factors, ff)
end
sols = Vector{elem_type(R)}()
right_side = elem_type(R)[ R() for i = 1:length(factors) ]
max_deg = 0
for i = 1:length(factors)
right_side[i] = R(1)
if i != 1
right_side[i - 1] = R(0)
end
s = crt(right_side, factors)
push!(sols, s)
max_deg = max(max_deg, degree(s))
end
x = one(A)
powers = Vector{elem_type(A)}()
for i = 1:max_deg + 1
push!(powers, x)
x *= a
end
idems = Vector{elem_type(A)}()
for s in sols
idem = A()
for i = 0:degree(s)
idem += coeff(s, i)*powers[i + 1]
end
push!(idems, idem)
end
A.issimple = 2
res = Vector{Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}}()
for idem in idems
S, StoA = subalgebra(A, idem, true)
decS = _dec_com_gen(S)
for (B, BtoS) in decS
BtoA = compose_and_squash(StoA, BtoS)
push!(res, (B, BtoA))
end
end
return res
end
end
function _dec_com_finite(A::AbsAlgAss{T}) where T
if dim(A) == 1
A.issimple = 1
B, mB = AlgAss(A)
return Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[(B, mB)]
end
F = base_ring(A)
@assert !iszero(characteristic(F))
V = kernel_of_frobenius(A)
k = length(V)
if k == 1
A.issimple = 1
B, mB = AlgAss(A)
return Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}[(B, mB)]
end
A.issimple = 2
c = elem_type(F)[ rand(F) for i = 1:k ]
M = zero_matrix(F, dim(A), dim(A))
a = dot(c, V)
representation_matrix!(a, M)
f = minpoly(M)
while degree(f) < 2
for i = 1:length(c)
c[i] = rand(F)
end
a = dot(c, V)
zero!(M)
representation_matrix!(a, M)
f = minpoly(M)
end
#@assert issquarefree(f)
fac = factor(f)
R = parent(f)
factorss = collect(keys(fac.fac))
sols = Vector{typeof(f)}(undef, length(factorss))
right_side = typeof(f)[ zero(R) for i = 1:length(factorss) ]
max_deg = 0
for i = 1:length(factorss)
right_side[i] = one(R)
if 1 != i
right_side[i - 1] = zero(R)
end
sols[i] = crt(right_side, factorss)
max_deg = max(max_deg, degree(sols[i]))
end
powers = Vector{elem_type(A)}(undef, max_deg+1)
powers[1] = one(A)
powers[2] = a
x = a
for i = 3:max_deg + 1
x *= a
powers[i] = x
end
idems = Vector{elem_type(A)}()
for s in sols
idem = A()
for i = 0:degree(s)
idem += coeff(s, i)*powers[i + 1]
end
push!(idems, idem)
end
res = Vector{Tuple{AlgAss{T}, morphism_type(AlgAss{T}, typeof(A))}}()
for idem in idems
S, StoA = subalgebra(A, idem, true)
decS = _dec_com_finite(S)
for (B, BtoS) in decS
BtoA = compose_and_squash(StoA, BtoS)
push!(res, (B, BtoA))
end
end
return res
end
################################################################################
#
# Decomposition as number fields
#
################################################################################
@doc Markdown.doc"""
as_number_fields(A::AbsAlgAss{fmpq})
Given a commutative algebra over QQ, this function returns a decomposition
of A as direct sum of number fields.
"""
function as_number_fields(A::AbsAlgAss{fmpq})
if isdefined(A, :maps_to_numberfields)
return A.maps_to_numberfields::Vector{Tuple{AnticNumberField, AbsAlgAssToNfAbsMor{typeof(A), elem_type(A)}}}
end
d = dim(A)
Adec = decompose(A)
fields_not_cached = false
for i = 1:length(Adec)
if !isdefined(Adec[i][1], :maps_to_numberfields)
fields_not_cached = true
end
end
if fields_not_cached
# Compute a LLL reduced basis of the maximal order of A to find "small"
# polynomials for the number fields.
OA = maximal_order(A)
L = lll(basis_mat(OA, copy = false).num)
n = basis_mat(OA, copy = false).den
basis_lll = [ elem_from_mat_row(A, L, i, n) for i = 1:d ]
end
M = zero_matrix(FlintQQ, 0, d)
matrices = Vector{fmpq_mat}()
fields = Vector{AnticNumberField}()
for i = 1:length(Adec)
# For each small algebra construct a number field and the isomorphism
B, BtoA = Adec[i]
dB = dim(B)
if !isdefined(B, :maps_to_numberfields)
local K, BtoK
found_field = false # Only for debugging
for j = 1:d
t = BtoA\basis_lll[j]
mint = minpoly(t)
if degree(mint) == dB
found_field = true
K, BtoK = _as_field_with_isomorphism(B, t, mint)
B.maps_to_numberfields = Tuple{AnticNumberField, AbsAlgAssToNfAbsMor{typeof(B), elem_type(B)}}[(K, BtoK)]
push!(fields, K)
break
end
end
@assert found_field "This should not happen..."
else
K, BtoK = B.maps_to_numberfields[1]
push!(fields, K)
end
if length(Adec) == 1
A.maps_to_numberfields = Tuple{AnticNumberField, AbsAlgAssToNfAbsMor{typeof(A), elem_type(A)}}[(K, BtoK)]
return A.maps_to_numberfields
end
# Construct the map from K to A
N = zero_matrix(FlintQQ, degree(K), d)
for j = 1:degree(K)
t = BtoA(BtoK\basis(K)[j])
elem_to_mat_row!(N, j, t)
end
push!(matrices, N)
M = vcat(M, N)
end
@assert nrows(M) == d
invM = inv(M)
matrices2 = Vector{fmpq_mat}(undef, length(matrices))
offset = 1
for i = 1:length(matrices)
r = nrows(matrices[i])
N = sub(invM, 1:d, offset:(offset + r - 1))
matrices2[i] = N
offset += r
end
result = Vector{Tuple{AnticNumberField, AbsAlgAssToNfAbsMor{typeof(A), elem_type(A)}}}()
for i = 1:length(fields)
push!(result, (fields[i], AbsAlgAssToNfAbsMor(A, fields[i], matrices2[i], matrices[i])))
end
A.maps_to_numberfields = result
return result
end
################################################################################
#
# Random elements
#
################################################################################
function rand(A::AbsAlgAss{T}) where T
c = T[rand(base_ring(A)) for i = 1:dim(A)]
return A(c)
end
function rand(A::AbsAlgAss{T}, rng::UnitRange{Int}) where T
c = T[rand(base_ring(A), rng) for i = 1:dim(A)]
return A(c)
end
function rand(A::AlgAss{fmpq}, rng::UnitRange{Int} = -20:20)
c = [fmpq(rand(FlintZZ, rng)) for i = 1:dim(A)]
return A(c)
end
################################################################################
#
# Generators
#
################################################################################
function gens(A::AbsAlgAss, return_full_basis::Type{Val{T}} = Val{false}) where T
K = base_ring(A)
d = dim(A)
rfb = return_full_basis == Val{true}
# Sort the basis by the degree of the minpolys (hopefully those with higher
# degree generate a "bigger" subalgebra)
minpoly_degrees = [ (i, degree(minpoly(A[i]))) for i = 1:d ]
sort!(minpoly_degrees, by = x -> x[2], rev = true)
generators = Vector{elem_type(A)}()
full_basis = elem_type(A)[ one(A) ] # Contains products of generators which form a full basis
rfb ? full_basis_indices = Vector{Tuple{Int, Int}}[ Tuple{Int, Int}[] ] : nothing
B = zero_matrix(K, d, d)
cur_dim = 0
for i = 1:d
if cur_dim == d
break
end
n = length(full_basis)
b = A[minpoly_degrees[i][1]]
power = 1
while cur_dim < d
for k = 1:d
B[d, k] = coeffs(b, copy = false)[k]
end
new_dim = rref!(B)
if cur_dim == new_dim
break
end
if power == 1
push!(generators, b)
end
push!(full_basis, b)
if rfb
ind = Tuple{Int, Int}[ (length(generators), power) ]
push!(full_basis_indices, ind)
end
cur_dim = new_dim
cur_dim == d ? break : nothing
for r = 1:n
bb = b*full_basis[r]
for l = 1:n
t = full_basis[l]*bb
for k = 1:d
B[d, k] = coeffs(t, copy = false)[k]
end
new_dim = rref!(B)
if cur_dim == new_dim
continue
end
push!(full_basis, t)
cur_dim = new_dim
if rfb
ind2 = deepcopy(ind)
prepend!(ind2, full_basis_indices[l])
append!(ind2, full_basis_indices[r])
push!(full_basis_indices, ind2)
end
cur_dim == d ? break : nothing
end
cur_dim == d ? break : nothing
end
b *= A[minpoly_degrees[i][1]]
power += 1
end
end
# Remove the one
popfirst!(full_basis)
rfb ? popfirst!(full_basis_indices) : nothing
if rfb
return generators, full_basis, full_basis_indices
else
return generators
end
end
################################################################################
#
# Primitive elements
#
################################################################################
function primitive_element(A::AbsAlgAss)
a, _ = _primitive_element(A)
return a
end
function _primitive_element(A::AbsAlgAss)
error("Not implemented yet")
return nothing
end
function _primitive_element(A::AbsAlgAss{T}) where T <: Union{nmod, fq, fq_nmod, Generic.Res{fmpz}, fmpq, Generic.ResF{fmpz}, gfp_elem}
d = dim(A)
a = rand(A)
f = minpoly(a)
while degree(f) < d
a = rand(A)
f = minpoly(a)
end
return a, f
end
function _as_field(A::AbsAlgAss{T}) where T
d = dim(A)
a, mina = _primitive_element(A)
b = one(A)
M = zero_matrix(base_ring(A), d, d)
elem_to_mat_row!(M, 1, b)
for i in 1:(d - 1)
b = mul!(b, b, a)
elem_to_mat_row!(M, i + 1, b)
end
B = inv(M)
N = zero_matrix(base_ring(A), 1, d)
local f
let N = N, B = B
f = function(x)
for i in 1:d
N[1, i] = x.coeffs[i]
end
return N * B
end
end
return a, mina, f
end
function _as_field_with_isomorphism(A::AbsAlgAss{S}) where { S <: Union{fmpq, gfp_elem, Generic.ResF{fmpz}, fq_nmod, fq} }
return _as_field_with_isomorphism(A, _primitive_element(A)...)
end
# Assuming a is a primitive element of A and mina its minimal polynomial, this
# functions constructs the field base_ring(A)/mina and the isomorphism between
# A and this field.
function _as_field_with_isomorphism(A::AbsAlgAss{S}, a::AbsAlgAssElem{S}, mina::T) where { S <: Union{fmpq, gfp_elem, Generic.ResF{fmpz}, fq_nmod, fq}, T <: Union{fmpq_poly, gfp_poly, gfp_fmpz_poly, fq_nmod_poly, fq_poly} }
s = one(A)
M = zero_matrix(base_ring(A), dim(A), dim(A))
elem_to_mat_row!(M, 1, s)
for i = 2:dim(A)
s = mul!(s, s, a)
elem_to_mat_row!(M, i, s)
end
if base_ring(A) == FlintQQ
K = number_field(mina, cached = false)[1]
return K, AbsAlgAssToNfAbsMor(A, K, inv(M), M)
elseif base_ring(A) isa GaloisField
Fq = FqNmodFiniteField(mina, Symbol("a"), false)
return Fq, AbsAlgAssToFqMor(A, Fq, inv(M), M, parent(mina))
elseif base_ring(A) isa Generic.ResField{fmpz}
Fq = FqFiniteField(mina, Symbol("a"), false)
return Fq, AbsAlgAssToFqMor(A, Fq, inv(M), M, parent(mina))
elseif base_ring(A) isa FqNmodFiniteField || base_ring(A) isa FqFiniteField
Fr, RtoFr = field_extension(mina)
return Fr, AbsAlgAssToFqMor(A, Fr, inv(M), M, parent(mina), RtoFr)
else
error("Not implemented")
end
end
################################################################################
#
# Regular matrix algebra
#
################################################################################
@doc Markdown.doc"""
regular_matrix_algebra(A::Union{ AlgAss, AlgGrp })
Returns the matrix algebra B generated by the right representation matrices of
the basis elements of A and a map from B to A.
"""
function regular_matrix_algebra(A::Union{ AlgAss, AlgGrp })
K = base_ring(A)
B = AlgMat(K, [ representation_matrix(A[i], :right) for i = 1:dim(A) ], isbasis = true, check = false)
return B, hom(B, A, identity_matrix(K, dim(A)), identity_matrix(K, dim(A)))
end
###############################################################################
#
# Construction of a crossed product algebra
#
###############################################################################
function find_elem(G::Array{T,1}, el::T) where T
i=1
while true
if el.prim_img==G[i].prim_img
return i
end
i+=1
end
end
#K/Q is a Galois extension.
function CrossedProductAlgebra(K::AnticNumberField, G::Array{T,1}, cocval::Array{nf_elem, 2}) where T
n=degree(K)
m=length(G)
#=
Multiplication table
I order the basis in this way:
First, I put the basis of the Galois Group, then the product of them with the first
element of basis of the order and so on...
=#
M=Array{fmpq,3}(undef, n*m, n*m, n*m)
for i=1:n*m
for j=1:n*m
for s=1:n*m
M[i,j,s]=fmpq(0)
end
end
end
B=basis(K)
for i=1:n
for j=1:m
#I have the element B[i]*G[j]
for k=1:n
for h=1:m
# I take the element B[k]*G[h]
# and I take the product
# B[i]*G[j]* B[k]*G[h]=B[i]*G[j](B[k])*c[j,h]*(G[j]*G[h])
ind=find_elem(G,G[h] * G[j])
x=B[i]*G[j](B[k])*cocval[j,h]
#@show i, j, k,h, ind,B[i],G[j](B[k]),cocval[j,h], x
for s=0:n-1
M[j+(i-1)*n, h+(k-1)*n, ind+s*n]=coeff(x,s)
end
#@show M
end
end
end
end
return AlgAss(FlintQQ, M)
end
function CrossedProductAlgebra(O::NfOrd, G::Array{T,1}, cocval::Array{nf_elem, 2}) where T
n=degree(O)
m=length(G)
K=nf(O)
#=
Multiplication table
I order the basis in this way:
First, I put the basis of the Galois Group, then the product of them with the first
element of basis of the order and so on...
=#
M=Array{fmpq,3}(undef, n*m, n*m, n*m)
for i=1:n*m
for j=1:n*m
for s=1:n*m
M[i,j,s]=fmpq(0)
end
end
end
B = basis(O, copy = false)
el = O(0)
for j=1:m
for k=1:n
l =O(G[j](K(B[k])), false)
for h=1:m
ind = find_elem(G, G[h] * G[j])
t = O(cocval[j,h], false)
for i=1:n
#I have the element B[i]*G[j]
# I take the element B[k]*G[h]
# and I take the product
# B[i]*G[j]* B[k]*G[h]=B[i]*G[j](B[k])*c[j,h]*(G[j]*G[h])
mul!(el, B[i], l)
mul!(el, el, t)
y = coordinates(el)
for s=0:n-1
M[j+(i-1)*m, h+(k-1)*m, ind+s*m] = y[s+1]
end
end
end
end
end
j1 = find_identity(G, *)
j = find_elem(G, j1)
O1 = fmpq[0 for i=1:n*m]
O1[j] = fmpq(1)
A = AlgAss(FlintQQ, M, O1)
A.issimple = 1
return A
end
################################################################################
#
# Quaternion algebras
#
################################################################################
function quaternion_algebra(a::Int, b::Int)
M = Array{fmpq,3}(undef, 4,4,4)
for i = 1:4
for j = 1:4
for k = 1:4
M[i,j,k] = 0
end
end
end
M[1,1,1] = 1 # 1*1=1
M[1,2,2] = 1 # 1*i=i
M[1,3,3] = 1 # 1*j=j
M[1,4,4] = 1 # 1*ij=1
M[2,1,2] = 1
M[2,2,1] = a
M[2,3,4] = 1
M[2,4,3] = a
M[3,1,3] = 1
M[3,2,4] = -1
M[3,3,1] = b
M[3,4,2] = -b
M[4,1,4] = 1
M[4,2,3] = -a
M[4,3,2] = b
M[4,4,1] = -a*b
O = fmpq[1, 0, 0, 0]
return AlgAss(FlintQQ, M, O)
end
| {
"alphanum_fraction": 0.5301416049,
"author": null,
"avg_line_length": 25.9568261377,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "080f14ec1a6f1b29aefd751b8a72ebeac3436fbb",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "edgarcosta/Hecke.jl",
"max_forks_repo_path": "src/AlgAss/AbsAlgAss.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "edgarcosta/Hecke.jl",
"max_issues_repo_path": "src/AlgAss/AbsAlgAss.jl",
"max_line_length": 223,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "edgarcosta/Hecke.jl",
"max_stars_repo_path": "src/AlgAss/AbsAlgAss.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7130,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 22245
} |
from PIL import ImageTk
import PIL.Image
import tkinter as tk
import tkinter.messagebox
from tkinter import *
from tkinter.ttk import *
from time import strftime
from datetime import datetime as dt
import datetime
import pandas as pd
import pytz
from timezonefinder import TimezoneFinder
from geopy.geocoders import Nominatim
import time
from pprint import pprint
import geocoder
from astropy.time import Time
from astropy import units as u
import tzwhere
from dateutil import tz
root = Tk()
root.title('StarClock')
root.geometry('1500x1000')
galaxy=PIL.Image.open("galaxy.jpg")
galaxy=galaxy.resize((1600,1100),PIL.Image.ANTIALIAS)
galaxy2=ImageTk.PhotoImage(galaxy)
label1 = Label( root, image = galaxy2)
label1.place(x = 0, y = 0)
#---------------------------------------------------------------------------------------------------------------------------------
global latitude
global longitude
latitude = 0
longitude = 0
#---------------------------------------------------------------------------------------------------------------------------------
notelab = Label(root, text = "Showing Time for:", font = ('calibri', 18, 'bold'), background = 'black', foreground = 'white')
notelab.grid(column=0, row=0)
notelab = Label(root, text = "Latitude: " + str(latitude) + " Longitutde: " + str(longitude), font = ('calibri', 18, 'bold'), background = 'black', foreground = 'white')
notelab.grid(column=1, row=0)
#---------------------------------------------------------------------------------------------------------------------------------
global timezone
t_find = TimezoneFinder()
timezone = t_find.timezone_at(lng=longitude, lat=latitude)
timezone = pytz.timezone(timezone)
#---------------------------------------------------------------------------------------------------------------------------------
translab = Label(root, text = "Time Translator", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
translab.grid(column=2, row=1)
formlbl = Label(root, text = "Enter one of the following in 'From' Entry: Time, UTC, JD ", font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
formlbl.grid(column=2, row=2)
formlbl = Label(root, text = "Enter one of the following in 'To' Entry: Time, UTC, JD, GMST, LST ", font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
formlbl.grid(column=2, row=3)
formlbl = Label(root, text = "Format: Time/UTC = %Y-%m-%d %H:%M:%S.%f JD = %y%j.%f ", font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
formlbl.grid(column=2, row=4)
formlbl = Label(root, text = "Enter time in format seen to the left, exclude offset from date and time", font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
formlbl.grid(column=2, row=5)
fromlab = Label(root, text = "From:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
fromlab.grid(column=2, row=6)
fromEntry = Entry(root,width=10)
fromEntry.grid(column=3, row=6)
tolab = Label(root, text = "To:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
tolab.grid(column=2, row=7)
toEntry = Entry(root,width=10)
toEntry.grid(column=3, row=7)
timelab = Label(root, text = "Enter time to translate:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
timelab.grid(column=2, row=8)
timeEntry = Entry(root,width=10)
timeEntry.grid(column=3, row=8)
#---------------------------------------------------------------------------------------------------------------------------------
def translate():
#-----------------------------------------------------------------------------------------------------------------------------
if fromEntry.get() != 'JD' and fromEntry.get() != 'UTC' and fromEntry.get() != 'Time':
tk.messagebox.showinfo("Error", "Please enter JD, UTC, or Time in 'From' entry box")
return
if toEntry.get() != 'JD' and toEntry.get() != 'UTC' and toEntry.get() != 'Time' and toEntry.get() != 'LST' and toEntry.get() != 'GMST':
tk.messagebox.showinfo("Error", "Please enter JD, UTC, Time, LST, or GMST in 'To' entry box")
return
if str(fromEntry.get()) == str(toEntry.get()):
tk.messagebox.showinfo("Error", "'To' and 'From' entries should not be the same!")
return
if fromEntry.get() == 'JD':
try:
float(timeEntry.get())
except:
tk.messagebox.showinfo("Error", "Please enter JD time as number greater than 2086303 and less than 5373484")
return
if fromEntry.get() == 'JD' and float(timeEntry.get()) < 2086303 or fromEntry.get() == 'JD' and float(timeEntry.get()) > 5373484 :
tk.messagebox.showinfo("Error", "Please enter JD time as number greater than 2086303 and less than 5373484")
return
if fromEntry.get() == 'UTC' or fromEntry.get() == 'Time':
try:
dt.strptime(timeEntry.get(), "%Y-%m-%d %H:%M:%S.%f")
except:
tk.messagebox.showinfo("Error", "Please enter time entry in format %Y-%m-%d %H:%M:%S.%f. Seconds must have a decimal point.")
return
#-----------------------------------------------------------------------------------------------------------------------------
for i in root.grid_slaves():
if int(i.grid_info()["row"]) == 10 and int(i.grid_info()["column"]) == 2 :
i.grid_forget()
global latitude
global longitude
global timezone
if fromEntry.get() == 'JD':
times = timeEntry.get()
t = Time(times, format = 'jd', scale = 'utc')
utc = dt.strptime(str(t.utc.iso), "%Y-%m-%d %H:%M:%S.%f")
timez = tz.tzutc()
utc = utc.replace(tzinfo=timez)
if toEntry.get() == 'UTC':
utc = t.utc.iso
utcstr = strftime(str(utc))
newlab = Label(root, text = utcstr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'GMST':
newt = utc.astimezone(timezone)
time = Time(newt, scale='utc', location=(float(longitude), float(latitude)))
GMST = time.sidereal_time('mean', 'greenwich')
gmststr = strftime(str(GMST))
newlab = Label(root, text = gmststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'LST':
newt = utc.astimezone(timezone)
time = Time(newt, scale='utc', location=(float(longitude), float(latitude)))
LST = time.sidereal_time('apparent')
lststr = strftime(str(LST))
newlab = Label(root, text = lststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'Time':
newt = utc.astimezone(timezone)
nlab = Label(root, text = newt, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
nlab.grid(column=2, row=10)
if fromEntry.get() == 'UTC':
utc = dt.strptime(timeEntry.get(), "%Y-%m-%d %H:%M:%S.%f")
timez = tz.tzutc()
utc = utc.replace(tzinfo=timez)
if toEntry.get() == 'JD':
ts = pd.Timestamp(utc)
jdstr = strftime(str(ts.to_julian_date()))
newjdlab = Label(root, text = jdstr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newjdlab.grid(column=2, row=10)
elif toEntry.get() == 'GMST':
newt = utc.astimezone(timezone)
time = Time(newt, scale='utc', location=(float(longitude), float(latitude)))
GMST = time.sidereal_time('mean', 'greenwich')
gmststr = strftime(str(GMST))
newlab = Label(root, text = gmststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'LST':
newt = utc.astimezone(timezone)
time = Time(newt, scale='utc', location=(float(longitude), float(latitude)))
LST = time.sidereal_time('apparent')
lststr = strftime(str(LST))
newlab = Label(root, text = lststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'Time':
newt = utc.astimezone(timezone)
nlab = Label(root, text = newt, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
nlab.grid(column=2, row=10)
if fromEntry.get() == 'Time':
naive = dt.strptime(timeEntry.get(), "%Y-%m-%d %H:%M:%S.%f")
local_dt = timezone.localize(naive, is_dst=None)
if toEntry.get() == 'UTC':
newut = local_dt.astimezone(pytz.utc)
utcstr = strftime(str(newut))
newlab = Label(root, text = utcstr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'GMST':
newgm = local_dt.astimezone(pytz.utc)
time = Time(newgm, scale='utc', location=(float(longitude), float(latitude)))
GMST = time.sidereal_time('mean', 'greenwich')
gmststr = strftime(str(GMST))
newlab = Label(root, text = gmststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
elif toEntry.get() == 'JD':
newgm = local_dt.astimezone(pytz.utc)
ts = pd.Timestamp(newgm)
jdstr = strftime(str(ts.to_julian_date()))
newjdlab = Label(root, text = jdstr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newjdlab.grid(column=2, row=10)
elif toEntry.get() == 'LST':
newlst = local_dt.astimezone(pytz.utc)
time = Time(newlst, scale='utc', location=(float(longitude), float(latitude)))
LST = time.sidereal_time('apparent')
lststr = strftime(str(LST))
newlab = Label(root, text = lststr, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
newlab.grid(column=2, row=10)
transl = Button(root, text="Translate", command=translate)
transl.grid(column=2, row=9)
#---------------------------------------------------------------------------------------------------------------------------------
timelab = Label(root, text = "Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
timelab.grid(column=0, row=1)
def time():
datetimeTZ = dt.now(timezone)
timestring = datetimeTZ.strftime("%I:%M:%S %p")
timelbl.config(text = timestring)
timelbl.after(1000, time)
timelbl = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
timelbl.grid(column=1, row=1)
#---------------------------------------------------------------------------------------------------------------------------------
datelab = Label(root, text = "Date and Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
datelab.grid(column=0, row=2)
def date():
datetime_object = dt.now(timezone)
datestr = strftime(str(datetime_object))
datelbl.config(text = datestr)
datelbl.after(1000, date)
datelbl = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
datelbl.grid(column=1, row=2)
#---------------------------------------------------------------------------------------------------------------------------------
jdlab = Label(root, text = "Time JD:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
jdlab.grid(column=0, row=3)
def jd():
ts = pd.Timestamp(dt.utcnow())
jdstr = strftime(str(ts.to_julian_date()))
jdlbl.config(text = jdstr)
jdlbl.after(1000, jd)
jdlbl = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
jdlbl.grid(column=1, row=3)
#---------------------------------------------------------------------------------------------------------------------------------
utclab = Label(root, text = "Time UTC:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
utclab.grid(column=0, row=4)
def ut():
datetime_object = dt.utcnow()
utcstr = strftime(str(datetime_object))
utclbl.config(text = utcstr)
utclbl.after(1000, ut)
utclbl = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
utclbl.grid(column=1, row=4)
#---------------------------------------------------------------------------------------------------------------------------------
lstLabel = Label(root, text = "Local Sidereal Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
lstLabel.grid(column=0, row=5)
def lst():
global longitude
global latitude
time = Time(dt.now(timezone), scale='utc', location=(float(longitude), float(latitude)))
LST = time.sidereal_time('apparent')
stringloc = str(LST)
labellocal.config(text = stringloc)
labellocal.after(1000, lst)
labellocal = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
labellocal.grid(column=1, row=5)
#---------------------------------------------------------------------------------------------------------------------------------
gmstLabel = Label(root, text = "GMST:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
gmstLabel.grid(column=0, row=6)
def gmst():
global longitude
global latitude
time = Time(dt.now(timezone), scale='utc', location=(float(longitude), float(latitude)))
GMST = time.sidereal_time('mean', 'greenwich')
stringgreen = str(GMST)
labelgreen.config(text = stringgreen)
labelgreen.after(1000, gmst)
labelgreen = Label(root, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
labelgreen.grid(column=1, row=6)
#---------------------------------------------------------------------------------------------------------------------------------
changetz = Label(root, text = 'Change Timezone', font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
changetz.grid(column=0, row=7)
latlonlbl = Label(root, text = 'Format:West/South negative, East/North positive', font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
latlonlbl.grid(column=1, row=7)
latlonlbl2 = Label(root, text = 'Enter coordinates in decimal degrees format', font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
latlonlbl2.grid(column=1, row=8)
latlbl = Label(root, text = 'Enter Latitude', font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
latlbl.grid(column=0, row=9)
latEntry = Entry(root,width=10)
latEntry.grid(column=1, row=9)
lonlbl = Label(root, text = 'Enter Longitude', font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
lonlbl.grid(column=0, row=10)
lngEntry = Entry(root,width=10)
lngEntry.grid(column=1, row=10)
#---------------------------------------------------------------------------------------------------------------------------------
def change() :
#-----------------------------------------------------------------------------------------------------------------------------
try:
float(latEntry.get())
float(lngEntry.get())
except:
tk.messagebox.showinfo("Error", "Please enter latitude between -90 and 90, and longitude between -180 and 180")
return
if float(latEntry.get()) > 90 or float(latEntry.get()) < -90 or float(lngEntry.get()) > 180 or float(lngEntry.get()) < -180:
tk.messagebox.showinfo("Error", "Please enter latitude between -90 and 90, and longitude between -180 and 180")
return
#-----------------------------------------------------------------------------------------------------------------------------
global timezone
global latitude
global longitude
latitude = float(latEntry.get())
longitude = float(lngEntry.get())
timezone = t_find.timezone_at(lng=longitude, lat=latitude)
timezone = pytz.timezone(timezone)
for i in root.grid_slaves():
if int(i.grid_info()["row"]) == 0 and int(i.grid_info()["column"]) == 1:
i.grid_forget()
notelab = Label(root, text = "Latitude: " + str(latitude) + " Longitutde: " + str(longitude), font = ('calibri', 18, 'bold'), background = 'black', foreground = 'white')
notelab.grid(column=1, row=0)
tbz = Button(root, text="Calculate New Times", command=change)
tbz.grid(column=1, row=11)
lbltkw = Label(root, text = 'Calculate New Times:', font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
lbltkw.grid(column=0, row=11)
#---------------------------------------------------------------------------------------------------------------------------------
def pause() :
for i in root.grid_slaves():
if int(i.grid_info()["row"]) > 12:
i.grid_forget()
global longitude
global latitude
time = Time(dt.now(timezone), scale='utc', location=(float(longitude), float(latitude)))
LST = time.sidereal_time('apparent')
GMST = time.sidereal_time('mean', 'greenwich')
stringloc = str(LST)
stringgreen = str(GMST)
datetimeTZ = dt.now(timezone)
datetime_object = dt.now(timezone)
utc = dt.utcnow()
ts = pd.Timestamp(dt.now(timezone))
lb = Label(root, text = "Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
lb.grid(column=0, row=13)
DT = Label(root, text = "Date and Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
DT.grid(column=0, row=14)
JDT = Label(root, text = "JD Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
JDT.grid(column=0, row=15)
UTT = Label(root, text = "UTC Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
UTT.grid(column=0, row=16)
LTT = Label(root, text = "LST:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
LTT.grid(column=0, row=17)
GMT = Label(root, text = "GMST:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
GMT.grid(column=0, row=18)
ia = Label(root, text = "Paused times for:", font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
ia.grid(column=0, row=19)
b = Label(root, text = str(datetimeTZ.strftime("%I:%M:%S %p")), font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
b.grid(column=1, row=13)
c = Label(root, text = str(datetime_object), font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
c.grid(column=1, row=14)
d = Label(root, text = str(ts.to_julian_date()), font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
d.grid(column=1, row=15)
e = Label(root, text = str(utc), font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
e.grid(column=1, row=16)
e = Label(root, text = stringloc, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
e.grid(column=1, row=17)
gm = Label(root, text = stringgreen, font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
gm.grid(column=1, row=18)
inf = Label(root, text = "Latitude: " + str(latitude) + " Longitutde: " + str(longitude), font = ('calibri', 12, 'bold'), background = 'black', foreground = 'white')
inf.grid(column=1, row=19)
tiz = Label(root, text="Pause Time:", font = ('calibri', 20, 'bold'), background = 'black', foreground = 'white')
tiz.grid(column=0, row=12)
tiz = Button(root, text="Pause and Display time", command=pause)
tiz.grid(column=1, row=12)
#---------------------------------------------------------------------------------------------------------------------------------
time()
date()
jd()
ut()
lst()
gmst()
#---------------------------------------------------------------------------------------------------------------------------------
mainloop() | {
"alphanum_fraction": 0.5298210259,
"author": null,
"avg_line_length": 50.3405797101,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f0c7c81152c2394b61cf17f42e6d16e1cc590077",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "22a787a3fd29ddea0d02dc86e150f8316cc8db9d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "wcmears/python-codes",
"max_forks_repo_path": "Python Codes/starclock.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "22a787a3fd29ddea0d02dc86e150f8316cc8db9d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "wcmears/python-codes",
"max_issues_repo_path": "Python Codes/starclock.py",
"max_line_length": 181,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "22a787a3fd29ddea0d02dc86e150f8316cc8db9d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "wcmears/python-codes",
"max_stars_repo_path": "Python Codes/starclock.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5200,
"path": null,
"reason": "from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 20841
} |
from numpy import pi, exp
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotM18
A SlotM18 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rbo = self.get_Rbo()
sp = 2 * pi / self.Zs
Z1 = Rbo * exp(-1j * sp / 2)
ZM1 = Rbo * exp(-1j * sp / 2)
if self.is_outwards():
ZM2 = (Rbo - self.Hmag) * exp(-1j * sp / 2)
else: # inward slot
ZM2 = (Rbo + self.Hmag) * exp(-1j * sp / 2)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["ZM1"] = ZM1
point_dict["ZM2"] = ZM2
# symetry
point_dict["Z2"] = Z1.conjugate()
point_dict["ZM3"] = ZM2.conjugate()
point_dict["ZM4"] = ZM1.conjugate()
return point_dict
| {
"alphanum_fraction": 0.551143201,
"author": null,
"avg_line_length": 21.3076923077,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c74747a9d9e59355e2a6369815ed4b8265a463f1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-01-07T10:47:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-23T12:38:01.000Z",
"max_forks_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Eomys/Pyleecan",
"max_forks_repo_path": "pyleecan/Methods/Slot/SlotM18/_comp_point_coordinate.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118",
"max_issues_repo_issues_event_max_datetime": "2022-03-08T12:52:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-09T07:43:01.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Eomys/Pyleecan",
"max_issues_repo_path": "pyleecan/Methods/Slot/SlotM18/_comp_point_coordinate.py",
"max_line_length": 61,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "tobsen2code/pyleecan",
"max_stars_repo_path": "pyleecan/Methods/Slot/SlotM18/_comp_point_coordinate.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-02T15:26:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-05T15:22:39.000Z",
"num_tokens": 284,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 831
} |
import numpy as np
from importlib import import_module
from TexasHoldem import Card, Hand, Deck, Player, Game
def test_game():
cash = 10
ai_name = 'AI.DefaultTexasHoldemAI'
n_players = 5
names = np.random.choice(Player.names, n_players, replace=False)
players = [Player(ai=import_module(ai_name).TexasHoldemAI(names[i], cash, names), name=names[i], cash=cash)
for i in range(n_players)]
game = Game(players)
game.new_game()
def test_deck():
deal0 = (Card('Ace', 'Spades'), Card(9, 'Hearts'))
deal1 = (Card(2, 'Diamonds'), Card(4, 'Clubs'))
table_cards = (Card('Ace', 'Diamonds'), Card(4, 'Spades'), Card('Ace', 'Hearts'),
Card(7, 'Clubs'), Card(9, 'Clubs'))
deck = Deck()
hand0 = deck.get_best_hand(deal0 + table_cards)
hand1 = deck.get_best_hand(deal1 + table_cards)
assert hand0 > hand1
assert hand0.get_text() == 'Full house Aces full of 9s'
assert hand1.get_text() == 'Two pair Aces and 4s'
deck.random_cards(2)
if __name__ == '__main__':
test_deck()
test_game()
| {
"alphanum_fraction": 0.6398891967,
"author": null,
"avg_line_length": 31.8529411765,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6d899177804b4fd8d9f29c636982c57f7ca16214",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3c32252c410d4bc5a5d517cd27852044d48722ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alexrockhill/TexasHoldEm",
"max_forks_repo_path": "tests.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3c32252c410d4bc5a5d517cd27852044d48722ab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alexrockhill/TexasHoldEm",
"max_issues_repo_path": "tests.py",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3c32252c410d4bc5a5d517cd27852044d48722ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alexrockhill/TexasHoldEm",
"max_stars_repo_path": "tests.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 317,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1083
} |
'''
The trade algorithm for WealthSimple Assignment
Author: Jinhua Wang
License MIT
For simplicity purposes, this file assumes stock prices follows Brownian Motion,
and we only trade SP500.
'''
import numpy as np
class TradeData:
'''
Soure of the trade data
to simplify matters, assume that only SP500 is traded
'''
'''
Assume that the stock price movement follows Brownion Motion
'''
def brownMotion(self, starting_price):
T = 1 #a month
mu = 0.1
sigma = 0.5
S0 = starting_price
dt = 0.03 #every day
N = int(round(T/dt))
t = np.linspace(0, T, N)
W = np.random.standard_normal(size = N)
W = np.cumsum(W)*np.sqrt(dt)
X = (mu-0.5*sigma**2)*t + sigma*W
S = S0*np.exp(X)
return S
class Account:
cash=1000000000
portfolio={
"SP500":0
}
trans_history=[]
def purchase(self, price, volume, day):
self.cash = self.cash - price * volume
self.portfolio["SP500"] = self.portfolio["SP500"] + volume
self.trans_history.append(
{
"action": "buy",
"time": day,
"price":price,
"volume":volume,
}
)
def sell(self, price, volume, day):
self.cash = self.cash + price * volume
self.portfolio["SP500"] = self.portfolio["SP500"] - volume
self.trans_history.append(
{
"action": "sell",
"time": day,
"price":price,
"volume":volume,
}
)
class Algorithm:
stockprices=[]
#beginning net worth
begin_net = 0
#ending net worth
end_net = 0
def __init__(self):
# account A is for panic selling investor
self.accountA = Account()
# account B is for hold and sell investor
self.accountB = Account()
trade = TradeData()
self.stockprices = trade.brownMotion(2436)
#first day buy 50000 stocks
self.accountA.purchase(self.stockprices[0],50000,0)
self.accountB.purchase(self.stockprices[0],50000,0)
self.begin_netA = self.accountA.portfolio["SP500"] * self.stockprices[0] + self.accountA.cash
self.begin_netB = self.accountB.portfolio["SP500"] * self.stockprices[0] + self.accountB.cash
#assumes that an investor dumps all the stocks when the return drops below -2%
def panic_sell(self):
sell_date = -1;
for i, price in enumerate(self.stockprices):
if i>=1: #start from the second day
return_daily = (self.stockprices[i]-self.stockprices[0])/self.stockprices[0]
if return_daily < 0.02 and self.accountA.portfolio["SP500"]>0:
self.accountA.sell(self.stockprices[i],self.accountA.portfolio["SP500"], i)
sell_date = i;
if sell_date!=-1 and i - sell_date >= 20 or i==(len(self.stockprices)-1):
self.accountA.purchase(self.stockprices[i], 50000, i)
#evaluate the performance
end_netA = self.accountA.portfolio["SP500"] + self.stockprices[len(self.stockprices)-1] + self.accountA.cash
return_net = (end_netA - self.begin_netA) / self.begin_netA
print "Return of panic selling strategy after 30 trading days is " + str(return_net)
return return_net
#assumes an investor holds when the return drops below -2%
def hold_sell(self):
end_netB = self.accountB.portfolio["SP500"] + self.stockprices[len(self.stockprices)-1] + self.accountB.cash
return_net = (end_netB - self.begin_netB) / self.begin_netB
print "Return of hold strategy after 30 trading days is " + str(return_net)
return return_net
performance_a = []
performance_b = []
count_b_better_than_a = 0
for x in range(0, 100):
print "\n"
print "Simulation "+ str(x) + " results:"
a = Algorithm()
return_a = a.panic_sell()
performance_a.append(return_a)
return_b = a.hold_sell()
performance_b.append(return_b)
print "\n"
print "simulation finished \n"
print "there are " + str(len(performance_a)) + " cases in total"
for i, val in enumerate(performance_a):
if performance_b[i]>performance_a[i]:
count_b_better_than_a = count_b_better_than_a+1
percentage = float(count_b_better_than_a)/len(performance_a) * 100
print "In "+ str(percentage) + "% of 100 cases, hold strategy performs better than panic sell strategy \n\n" | {
"alphanum_fraction": 0.7017988346,
"author": null,
"avg_line_length": 30.5968992248,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "dde1a05f4a0639f9edfc2cb67354f4f73a1e116f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0ec97f42248592be06313258ec08dc07cb95cb72",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ginward/PanicSellAlgo",
"max_forks_repo_path": "algo.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0ec97f42248592be06313258ec08dc07cb95cb72",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ginward/PanicSellAlgo",
"max_issues_repo_path": "algo.py",
"max_line_length": 110,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0ec97f42248592be06313258ec08dc07cb95cb72",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ginward/PanicSellAlgo",
"max_stars_repo_path": "algo.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1159,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3947
} |
#
# Copyright 2017 Scott A Dixon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ___ _ _ ____ _ _ _ _ _
# |_ _|_ __ | |_ ___ _ __ _ __ ___| |_ / ___|| | ___ _| (_) __ _| |__ | |_
# | || '_ \| __/ _ \ '__| '_ \ / _ \ __| \___ \| |/ / | | | | |/ _` | '_ \| __|
# | || | | | || __/ | | | | | __/ |_ ___) | <| |_| | | | (_| | | | | |_
# |___|_| |_|\__\___|_| |_| |_|\___|\__| |____/|_|\_\\__, |_|_|\__, |_| |_|\__|
# |___/ |___/
import math
import numpy as np
class RectangularPixelMatrix(object):
'''
Square matrix of pixels.
'''
@classmethod
def on_visit_argparse(cls, parser, subparsers): # @UnusedVariable
pixel_args = parser.add_argument_group('Pixel Options')
#0, 32, 512
pixel_args.add_argument("--brightness", "-b", type=float, default=1.0, help="Maximum brightness of any given pixel.", metavar="[0.0 - 1.0]")
pixel_args.add_argument("--channel", default=0, help="OPC channel to use.")
pixel_args.add_argument("--stride", default=32, help="Number of pixels in a row for the attached matrix")
pixel_args.add_argument("--pixel-count", default=512, help="Total number of pixels in the attached matrix")
def __init__(self, args, opc_client):
super(RectangularPixelMatrix, self).__init__()
self._opc_client = opc_client
self._channel = args.channel
self._pixels = None
self.stride = args.stride
self.pixel_count = args.pixel_count
self.rows = self.stride
self.brightness = args.brightness
@property
def brightness(self):
if self._brightness is None:
return 1.0
return self._brightness[0][0]
@brightness.setter
def brightness(self, brightness):
if brightness < 0 or brightness > 1:
raise AttributeError("brightness must be a value from 0 to 1")
self._brightness = np.full((self.pixel_count, 3),
(brightness, brightness, brightness),
dtype=np.float)
@property
def pixels(self):
if None is self._pixels:
self.black()
return self._pixels
@pixels.setter
def pixels(self, pixels):
if None is pixels:
self.black()
else:
self._pixels = pixels
if self._brightness is not None:
self._pixels = np.multiply(self._brightness, self._pixels)
self._send()
def fill(self, pixel):
self.pixels = np.full((self.pixel_count, 3),
pixel,
dtype=np.uint8)
def black(self):
self._pixels = np.zeros((self.pixel_count, 3), dtype=np.uint8)
self._send()
def blue(self):
self._pixels = np.full((self.pixel_count, 3),
(0,0,255 * self.brightness),
dtype=np.uint8)
self._send()
def red(self):
self._pixels = np.full((self.pixel_count, 3),
(255 * self.brightness,0,0),
dtype=np.uint8)
self._send()
def _send(self):
self._opc_client.put_pixels(self._pixels, channel=self._channel)
| {
"alphanum_fraction": 0.5538461538,
"author": null,
"avg_line_length": 38.6138613861,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8940e437dd1725425f01032224830105fd951726",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7e3cae541a099daacd138c9c237356f370e497d4",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "scottdarch/InternetSkyLight",
"max_forks_repo_path": "glue/lights/__init__.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7e3cae541a099daacd138c9c237356f370e497d4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "scottdarch/InternetSkyLight",
"max_issues_repo_path": "glue/lights/__init__.py",
"max_line_length": 148,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7e3cae541a099daacd138c9c237356f370e497d4",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "scottdarch/InternetSkyLight",
"max_stars_repo_path": "glue/lights/__init__.py",
"max_stars_repo_stars_event_max_datetime": "2020-11-07T04:33:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-07T04:33:26.000Z",
"num_tokens": 943,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3900
} |
import numpy as np
import generators as ge
def question(n):
print(f'{n}. ' + ge.QHA[f'q{n}'])
def hint(n):
print(ge.QHA[f'h{n}'])
def answer(n):
print(ge.QHA[f'a{n}'])
def pick():
n = np.random.randint(1, 100)
question(n)
| {
"alphanum_fraction": 0.5657370518,
"author": null,
"avg_line_length": 11.9523809524,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "01ec48a74ea50936be3271e024fba8ac406c3eb9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4906,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T13:24:30.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-03T05:25:19.000Z",
"max_forks_repo_head_hexsha": "9c41cb36ba7f6be6c435073f90b27c4c1cbafa67",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nikitvs/numpy-100",
"max_forks_repo_path": "initialise.py",
"max_issues_count": 150,
"max_issues_repo_head_hexsha": "9c41cb36ba7f6be6c435073f90b27c4c1cbafa67",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T14:39:56.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-03-05T18:08:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nikitvs/numpy-100",
"max_issues_repo_path": "initialise.py",
"max_line_length": 37,
"max_stars_count": 8621,
"max_stars_repo_head_hexsha": "9c41cb36ba7f6be6c435073f90b27c4c1cbafa67",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nikitvs/numpy-100",
"max_stars_repo_path": "initialise.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T23:50:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-03T05:25:00.000Z",
"num_tokens": 80,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 251
} |
mutable struct Input
graph::DiGraph
weights::Array{Float64,2}
ndds::Array{Int64,1}
max_cycle_length::Int64
max_chain_length::Int64
solver_instance::Any
time_param::String
time_factor::Float64
start_time::Float64
function Input(
graph::DiGraph,
weights::Array{Float64,2},
ndds::Array{Int64,1},
max_cycle_length::Int64,
max_chain_length::Int64,
solver_instance::Any,
time_param::String,
time_factor::Float64,
start_time::Float64
)
new(graph, weights, ndds, max_cycle_length, max_chain_length, solver_instance, time_param, time_factor, start_time)
end
end
mutable struct Output
match_vertices::Array{Float64,1}
match_edges::Any
match_cycles::Union{Array{Float64,1}, Nothing}
graph_cycles::Union{Array{Array{Int64,1},1}, Nothing}
value::Float64
optimal::Bool
function Output()
match_vertices = Float64[]
match_edges = Float64[]
match_cycles = Array{Float64}[]
graph_cycles = Array{Float64}[]
value = typemin(Float64)
optimal = false
new(match_vertices, match_edges, match_cycles, graph_cycles, value, optimal)
end
function Output(
match_vertices::Array{Float64,1},
match_edges::Any,
match_cycles::Union{Array{Float64,1}, Nothing},
graph_cycles::Union{Array{Array{Int64,1},1}, Nothing},
value::Float64,
optimal::Bool,
)
new(match_vertices, match_edges, match_cycles, graph_cycles, value, optimal)
end
end
| {
"alphanum_fraction": 0.6523929471,
"author": null,
"avg_line_length": 28.3571428571,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "d0eee4522674a648c4db305ff6269c52c34ad370",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f2b8c05234ce9722235151e324a7026b642d4fb5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sukolsak/kpd_opt",
"max_forks_repo_path": "src_jl/transport.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f2b8c05234ce9722235151e324a7026b642d4fb5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sukolsak/kpd_opt",
"max_issues_repo_path": "src_jl/transport.jl",
"max_line_length": 123,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f2b8c05234ce9722235151e324a7026b642d4fb5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sukolsak/kpd_opt",
"max_stars_repo_path": "src_jl/transport.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 411,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1588
} |
import matplotlib.pyplot as plt
import numpy as np
#Plot e scatter
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(np.random.randn(50), color='red')
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(np.arange(50), np.random.randn(50))
plt.show()
| {
"alphanum_fraction": 0.6819923372,
"author": null,
"avg_line_length": 23.7272727273,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "97fa024aa5ab16817a8da346e8c0ab6b3ea08269",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Matheus-IT/lang-python-related",
"max_forks_repo_path": "data_analysis/Matplotlib/12Plot_e_scatter.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Matheus-IT/lang-python-related",
"max_issues_repo_path": "data_analysis/Matplotlib/12Plot_e_scatter.py",
"max_line_length": 48,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Matheus-IT/lang-python-related",
"max_stars_repo_path": "data_analysis/Matplotlib/12Plot_e_scatter.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 81,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 261
} |
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <algorithm>
#include <iterator>
#include <string.h>
#include <boost/tokenizer.hpp>
#define DATA_FOLDERS_PATH_FORMAT "../location_001_dataset_%03d/"
#define FILE_FORMAT "location_001_ivdata_%03d.txt"
struct event
{
public:
int device;
char phase;
float time_stamp;
int file_num;
};
using namespace std;
using namespace boost;
char *get_datafolder(int num)
{
char *arr = new char[strlen(DATA_FOLDERS_PATH_FORMAT) + 1];
sprintf(arr, DATA_FOLDERS_PATH_FORMAT, num);
return arr;
}
string get_file_path(int file_num)
{
int dataset_num;
char *x = new char[15];
strcpy(x, "Hello");
if (file_num <= 4800)
dataset_num = (file_num - 1) / 400 + 1;
else if (file_num <= 5230)
dataset_num = 13;
else if (file_num <= 5630)
dataset_num = 14;
else if (file_num <= 6030)
dataset_num = 15;
else
dataset_num = 16;
char *file_name = new char[50], *dir = new char[100];
strcpy(dir, get_datafolder(dataset_num));
sprintf(file_name, FILE_FORMAT, file_num);
strcat(dir, file_name);
return string(dir);
}
vector<vector<float> > read_csv(string file_name)
{
string data(file_name);
fstream file(data.c_str(), ios_base::in);
typedef tokenizer<escaped_list_separator<char> > Tokenizer;
vector<string> vec;
string line;
vector<vector<float> > res;
if (!file)
{
cout << "File not found!";
return res;
}
int header_count = 24;
while (getline(file, line))
{
if (header_count > 0)
{
--header_count;
continue;
}
Tokenizer tok(line);
vec.assign(tok.begin(), tok.end());
vector<float> temp;
for (int i = 0; i < vec.size(); i++)
temp.push_back(atof(vec[i].c_str()));
res.push_back(temp);
}
return res;
}
void extract(event *events, int event_num, float bef_time, float aft_time)
{
for (int i = 0; i <event_num; i++)
{
// Getting path of the file
string path = get_datafolder(events[i].file_num);
cout << events[i].file_num;
vector<vector<float> > data, temp;
data = read_csv(path);
// Missing data fix
if (data[0][0] > (events[i].time_stamp - bef_time))
{
string bef_path = get_datafolder(events[i].file_num - 1);
temp = read_csv(bef_path);
for (int itr = 0; itr < data.size(); ++itr)
temp.push_back(data[itr]);
data = temp;
}
else if (data[data[0].size()-1][0] < (events[i].time_stamp + aft_time))
{
string aft_path = get_datafolder(events[i].file_num + 1);
temp = read_csv(aft_path);
for (int itr = 0; itr < temp.size(); ++itr)
data.push_back(temp[itr]);
}
// Finding event
vector<vector<float> > data_events;
for (int itr = 0; itr < data.size(); ++itr)
if ((data[itr][0] > (events[i].time_stamp - bef_time)) && (data[itr][0] < (events[i].time_stamp + aft_time)))
data_events.push_back(data[itr]);
// Index based on phase
int indx = (events[i].phase == 'A') ? 1 : 2;
// Get that column alone
vector<int> res;
for (int itr = 0; itr < data_events.size(); ++itr)
res.push_back(data_events[itr][indx]);
}
}
int main()
{
string s("../location_001_dataset_001/location_001_ivdata_001.txt");
vector <vector<float> > f = read_csv(s);
return 0;
} | {
"alphanum_fraction": 0.5557025668,
"author": null,
"avg_line_length": 26.8014184397,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "bf24f2066e6ac5fa4996e5930cecc27fe389d3d0",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "91c90fcf217cc84d6cab7aa4c73c91d49cf0353b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aashish-jain/fastBLUED",
"max_forks_repo_path": "extract_events.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "91c90fcf217cc84d6cab7aa4c73c91d49cf0353b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aashish-jain/fastBLUED",
"max_issues_repo_path": "extract_events.cpp",
"max_line_length": 122,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "91c90fcf217cc84d6cab7aa4c73c91d49cf0353b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aashish-jain/fastBLUED",
"max_stars_repo_path": "extract_events.cpp",
"max_stars_repo_stars_event_max_datetime": "2019-08-26T10:42:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-09T14:39:12.000Z",
"num_tokens": 951,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3779
} |
#include <gtest/gtest.h>
#include "scheme/actor/Atom.hh"
#include "ligand_factory.hh"
#include "scheme/util/SimpleArray.hh"
#include <iterator> // std::back_inserter
#include <fstream>
#include <boost/foreach.hpp>
namespace scheme { namespace chemical { namespace test {
using std::cout;
using std::endl;
TEST(ligand_factory,make_atom_pdbline){
typedef util::SimpleArray<3,double> Position;
typedef actor::Atom<Position> Atom;
LigandFactory<Atom> f;
std::string l;
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( false, f.make_atom_pdbline(l).data().ishet );
l="HETATM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( true , f.make_atom_pdbline(l).data().ishet );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 7, f.make_atom_pdbline(l).data().atomnum );
l="ATOM 999999 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 999999, f.make_atom_pdbline(l).data().atomnum );
l="ATOM 9999999C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 999999, f.make_atom_pdbline(l).data().atomnum );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( "C3", f.make_atom_pdbline(l).data().atomname );
l="ATOM 7 ATOMABTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( "ATOM", f.make_atom_pdbline(l).data().atomname );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( "BTN", f.make_atom_pdbline(l).data().resname );
l="ATOM 7 ATOMABTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( "ABTN", f.make_atom_pdbline(l).data().resname );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 'X', f.make_atom_pdbline(l).data().chain );
l="ATOM 7 C3 BTN 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( ' ', f.make_atom_pdbline(l).data().chain );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 1, f.make_atom_pdbline(l).data().resnum );
l="ATOM 7 C3 BTN X9999 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 9999, f.make_atom_pdbline(l).data().resnum );
l="ATOM 7 C3 BTN X9999 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 9999, f.make_atom_pdbline(l).data().resnum );
l="ATOM 7 C3 BTN X999999 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( 999999, f.make_atom_pdbline(l).data().resnum );
l="ATOM 7 C3 BTN X 1 -99999.999 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( -99999.999, f.make_atom_pdbline(l).position()[0] );
l="ATOM 7 C3 BTN X 1 -0.470-999.999 5.377 1.00 20.00 C"; ASSERT_EQ( -999.999, f.make_atom_pdbline(l).position()[1] );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087-999.999 1.00 20.00 C"; ASSERT_EQ( -999.999, f.make_atom_pdbline(l).position()[2] );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377111.00 20.00 C"; ASSERT_EQ( 111, f.make_atom_pdbline(l).data().occ );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00120.00 C"; ASSERT_EQ( 120, f.make_atom_pdbline(l).data().bfac );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( "C", f.make_atom_pdbline(l).data().elem );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 AAAAAAAAAA"; ASSERT_EQ( "AAAAAAAAAA", f.make_atom_pdbline(l).data().elem );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( l, io::dump_pdb_atom(f.make_atom_pdbline(l)) );
l="ATOM 7 C BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( l, io::dump_pdb_atom(f.make_atom_pdbline(l)) );
l="ATOM 7 C3A BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( l, io::dump_pdb_atom(f.make_atom_pdbline(l)) );
l="ATOM 7 AC3A BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C"; ASSERT_EQ( l, io::dump_pdb_atom(f.make_atom_pdbline(l)) );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( false, f.make_atom_pdbline(l).data().ishet );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( 7, f.make_atom_pdbline(l).data().atomnum );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( "C3", f.make_atom_pdbline(l).data().atomname );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( "BTN", f.make_atom_pdbline(l).data().resname );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( 'X', f.make_atom_pdbline(l).data().chain );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( 1, f.make_atom_pdbline(l).data().resnum );
l="ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00"; ASSERT_EQ( "", f.make_atom_pdbline(l).data().elem );
}
TEST(ligand_factory,make_biotin){
typedef util::SimpleArray<3,double> Position;
typedef actor::Atom<Position> Atom;
LigandFactory<Atom> f;
std::vector<Atom> btn;
f.make_biotin_minimal(std::back_inserter(btn));
// BOOST_FOREACH(Atom a,btn) cout << io::dump_pdb_atom(a) << endl;
ASSERT_EQ( io::dump_pdb_atom(btn[0]), "ATOM 1 N1 BTN X 1 0.696 -12.422 3.375 1.00 20.00 N" );
ASSERT_EQ( io::dump_pdb_atom(btn[1]), "ATOM 2 S1 BTN X 1 0.576 -9.666 5.336 1.00 20.00 S" );
ASSERT_EQ( io::dump_pdb_atom(btn[2]), "ATOM 3 C1 BTN X 1 -0.523 -10.824 6.189 1.00 20.00 C" );
ASSERT_EQ( io::dump_pdb_atom(btn[3]), "ATOM 4 N2 BTN X 1 -1.324 -12.123 4.201 1.00 20.00 N" );
ASSERT_EQ( io::dump_pdb_atom(btn[4]), "ATOM 5 C2 BTN X 1 -0.608 -12.327 3.072 1.00 20.00 C" );
ASSERT_EQ( io::dump_pdb_atom(btn[5]), "ATOM 6 O1 BTN X 1 -1.125 -12.422 1.933 1.00 20.00 O" );
ASSERT_EQ( io::dump_pdb_atom(btn[6]), "ATOM 7 C3 BTN X 1 -0.470 -12.087 5.377 1.00 20.00 C" );
ASSERT_EQ( io::dump_pdb_atom(btn[7]), "ATOM 8 C4 BTN X 1 0.953 -12.267 4.780 1.00 20.00 C" );
ASSERT_EQ( io::dump_pdb_atom(btn[8]), "ATOM 9 C5 BTN X 1 1.765 -11.040 5.134 1.00 20.00 C" );
ASSERT_EQ( io::dump_pdb_atom(btn[9]), "ATOM 10 C6 BTN X 1 -1.836 -10.395 6.850 1.00 20.00 C" );
}
TEST(ligand_factory,make_aas){
typedef util::SimpleArray<3,double> Position;
typedef actor::Atom<Position> Atom;
LigandFactory<Atom> f;
std::vector<Atom> GLY;
f.make_atoms( std::back_inserter(GLY), "GLY", false );
EXPECT_EQ( GLY.size(), 3 );
BOOST_FOREACH(Atom a,GLY) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,GLY) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> GLY_H;
f.make_atoms( std::back_inserter(GLY_H), "GLY", true );
EXPECT_EQ( GLY_H.size(), 5 );
BOOST_FOREACH(Atom a,GLY_H) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,GLY_H) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> ALA;
f.make_atoms( std::back_inserter(ALA), "ALA", false );
EXPECT_EQ( ALA.size(), 4 );
BOOST_FOREACH(Atom a,ALA) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ALA) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> ALA_H;
f.make_atoms( std::back_inserter(ALA_H), "ALA", true );
EXPECT_EQ( ALA_H.size(), 8 );
BOOST_FOREACH(Atom a,ALA_H) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ALA_H) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> CYS;
f.make_atoms( std::back_inserter(CYS), "CYS", false );
EXPECT_EQ( CYS.size(), 3 );
BOOST_FOREACH(Atom a,CYS) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,CYS) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> CYS_H;
f.make_atoms( std::back_inserter(CYS_H), "CYS", true );
EXPECT_EQ( CYS_H.size(), 5 );
BOOST_FOREACH(Atom a,CYS_H) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,CYS_H) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> PHE;
f.make_atoms( std::back_inserter(PHE), "PHE", false );
EXPECT_EQ( PHE.size(), 7 );
BOOST_FOREACH(Atom a,PHE) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,PHE) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> PHE_H;
f.make_atoms( std::back_inserter(PHE_H), "PHE", true );
EXPECT_EQ( PHE_H.size(), 12 );
BOOST_FOREACH(Atom a,PHE_H) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,PHE_H) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> TYR;
f.make_atoms( std::back_inserter(TYR), "TYR", false );
EXPECT_EQ( TYR.size(), 8 );
BOOST_FOREACH(Atom a,TYR) ASSERT_GT( a.type(), 0 );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,TYR) io::dump_pdb_atom(out,a); out.close();
std::vector<Atom> TYR_H;
f.make_atoms( std::back_inserter(TYR_H), "TYR", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,TYR_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,TYR_H) cout << a << endl;
EXPECT_EQ( TYR_H.size(), 12 );
BOOST_FOREACH(Atom a,TYR_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ASP_H;
f.make_atoms( std::back_inserter(ASP_H), "ASP", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ASP_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,ASP_H) cout << a << endl;
EXPECT_EQ( ASP_H.size(), 4 );
BOOST_FOREACH(Atom a,ASP_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> GLU_H;
f.make_atoms( std::back_inserter(GLU_H), "GLU", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,GLU_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,GLU_H) cout << a << endl;
EXPECT_EQ( GLU_H.size(), 4 );
BOOST_FOREACH(Atom a,GLU_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ASN_H;
f.make_atoms( std::back_inserter(ASN_H), "ASN", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ASN_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,ASN_H) cout << a << endl;
EXPECT_EQ( ASN_H.size(), 6 );
BOOST_FOREACH(Atom a,ASN_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ASN;
f.make_atoms( std::back_inserter(ASN), "ASN", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ASN) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,ASN) cout << a << endl;
EXPECT_EQ( ASN.size(), 4 );
BOOST_FOREACH(Atom a,ASN) ASSERT_GT( a.type(), 0 );
std::vector<Atom> GLN_H;
f.make_atoms( std::back_inserter(GLN_H), "GLN", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,GLN_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,GLN_H) cout << a << endl;
EXPECT_EQ( GLN_H.size(), 6 );
BOOST_FOREACH(Atom a,GLN_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> GLN;
f.make_atoms( std::back_inserter(GLN), "GLN", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,GLN) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,GLN) cout << a << endl;
EXPECT_EQ( GLN.size(), 4 );
BOOST_FOREACH(Atom a,GLN) ASSERT_GT( a.type(), 0 );
std::vector<Atom> TRP_H;
f.make_atoms( std::back_inserter(TRP_H), "TRP", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,TRP_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,TRP_H) cout << a << endl;
EXPECT_EQ( TRP_H.size(), 16 );
BOOST_FOREACH(Atom a,TRP_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> TRP;
f.make_atoms( std::back_inserter(TRP), "TRP", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,TRP) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,TRP) cout << a << endl;
EXPECT_EQ( TRP.size(), 10 );
BOOST_FOREACH(Atom a,TRP) ASSERT_GT( a.type(), 0 );
std::vector<Atom> LEU_H;
f.make_atoms( std::back_inserter(LEU_H), "LEU", true );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,LEU_H) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,LEU_H) cout << a << endl;
EXPECT_EQ( LEU_H.size(), 11 );
BOOST_FOREACH(Atom a,LEU_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> LEU;
f.make_atoms( std::back_inserter(LEU), "LEU", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,LEU) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,LEU) cout << a << endl;
EXPECT_EQ( LEU.size(), 4 );
BOOST_FOREACH(Atom a,LEU) ASSERT_GT( a.type(), 0 );
std::vector<Atom> VAL_H;
f.make_atoms( std::back_inserter(VAL_H), "VAL", true );
// std::ofstream vhout("testval.pdb"); BOOST_FOREACH(Atom a,VAL_H) io::dump_pdb_atom(vhout,a); vhout.close();
// BOOST_FOREACH(Atom a,VAL_H) cout << a << endl;
EXPECT_EQ( VAL_H.size(), 11 );
BOOST_FOREACH(Atom a,VAL_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> VAL;
f.make_atoms( std::back_inserter(VAL), "VAL", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,VAL) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,VAL) cout << a << endl;
EXPECT_EQ( VAL.size(), 4 );
BOOST_FOREACH(Atom a,VAL) ASSERT_GT( a.type(), 0 );
std::vector<Atom> MET_H;
f.make_atoms( std::back_inserter(MET_H), "MET", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,MET_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,MET_H) cout << a << endl;
EXPECT_EQ( MET_H.size(), 6 );
BOOST_FOREACH(Atom a,MET_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> MET;
f.make_atoms( std::back_inserter(MET), "MET", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,MET) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,MET) cout << a << endl;
EXPECT_EQ( MET.size(), 3 );
BOOST_FOREACH(Atom a,MET) ASSERT_GT( a.type(), 0 );
std::vector<Atom> PRO_H;
f.make_atoms( std::back_inserter(PRO_H), "PRO", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,PRO_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,PRO_H) cout << a << endl;
EXPECT_EQ( PRO_H.size(), 13 );
BOOST_FOREACH(Atom a,PRO_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> PRO;
f.make_atoms( std::back_inserter(PRO), "PRO", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,PRO) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,PRO) cout << a << endl;
EXPECT_EQ( PRO.size(), 6 );
BOOST_FOREACH(Atom a,PRO) ASSERT_GT( a.type(), 0 );
std::vector<Atom> HIS_H;
f.make_atoms( std::back_inserter(HIS_H), "HIS", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,HIS_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,HIS_H) cout << a << endl;
EXPECT_EQ( HIS_H.size(), 8 );
BOOST_FOREACH(Atom a,HIS_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> HIS;
f.make_atoms( std::back_inserter(HIS), "HIS", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,HIS) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,HIS) cout << a << endl;
EXPECT_EQ( HIS.size(), 6 );
BOOST_FOREACH(Atom a,HIS) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ARG_H;
f.make_atoms( std::back_inserter(ARG_H), "ARG", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,ARG_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,ARG_H) cout << a << endl;
EXPECT_EQ( ARG_H.size(), 10 );
BOOST_FOREACH(Atom a,ARG_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ARG;
f.make_atoms( std::back_inserter(ARG), "ARG", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ARG) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,ARG) cout << a << endl;
EXPECT_EQ( ARG.size(), 5 );
BOOST_FOREACH(Atom a,ARG) ASSERT_GT( a.type(), 0 );
std::vector<Atom> LYS_H;
f.make_atoms( std::back_inserter(LYS_H), "LYS", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,LYS_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,LYS_H) cout << a << endl;
EXPECT_EQ( LYS_H.size(), 8 );
BOOST_FOREACH(Atom a,LYS_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> LYS;
f.make_atoms( std::back_inserter(LYS), "LYS", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,LYS) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,LYS) cout << a << endl;
EXPECT_EQ( LYS.size(), 3 );
BOOST_FOREACH(Atom a,LYS) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ILE_H;
f.make_atoms( std::back_inserter(ILE_H), "ILE", true );
// std::ofstream mhout("test.pdb"); BOOST_FOREACH(Atom a,ILE_H) io::dump_pdb_atom(mhout,a); mhout.close();
// BOOST_FOREACH(Atom a,ILE_H) cout << a << endl;
EXPECT_EQ( ILE_H.size(), 8 );
BOOST_FOREACH(Atom a,ILE_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> ILE;
f.make_atoms( std::back_inserter(ILE), "ILE", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,ILE) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,ILE) cout << a << endl;
EXPECT_EQ( ILE.size(), 3 );
BOOST_FOREACH(Atom a,ILE) ASSERT_GT( a.type(), 0 );
std::vector<Atom> SER_H;
f.make_atoms( std::back_inserter(SER_H), "SER", true );
// std::ofstream shout("test.pdb"); BOOST_FOREACH(Atom a,SER_H) io::dump_pdb_atom(shout,a); shout.close();
// BOOST_FOREACH(Atom a,SER_H) cout << a << endl;
EXPECT_EQ( SER_H.size(), 5 );
BOOST_FOREACH(Atom a,SER_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> SER;
f.make_atoms( std::back_inserter(SER), "SER", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,SER) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,SER) cout << a << endl;
EXPECT_EQ( SER.size(), 3 );
BOOST_FOREACH(Atom a,SER) ASSERT_GT( a.type(), 0 );
std::vector<Atom> THR_H;
f.make_atoms( std::back_inserter(THR_H), "THR", true );
// std::ofstream shout("test.pdb"); BOOST_FOREACH(Atom a,THR_H) io::dump_pdb_atom(shout,a); shout.close();
// BOOST_FOREACH(Atom a,THR_H) cout << a << endl;
EXPECT_EQ( THR_H.size(), 8 );
BOOST_FOREACH(Atom a,THR_H) ASSERT_GT( a.type(), 0 );
std::vector<Atom> THR;
f.make_atoms( std::back_inserter(THR), "THR", false );
// std::ofstream out("test.pdb"); BOOST_FOREACH(Atom a,THR) io::dump_pdb_atom(out,a); out.close();
// BOOST_FOREACH(Atom a,THR) cout << a << endl;
EXPECT_EQ( THR.size(), 4 );
BOOST_FOREACH(Atom a,THR) ASSERT_GT( a.type(), 0 );
}
}
}
}
| {
"alphanum_fraction": 0.6301059002,
"author": null,
"avg_line_length": 51.6983240223,
"converted": null,
"ext": "cc",
"file": null,
"hexsha": "e8ebee43dae19cc789d2fdb41ad5b85308b87cf9",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 14,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T12:56:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-02-08T01:42:28.000Z",
"max_forks_repo_head_hexsha": "cbde6bbeefd29a066273bdf2937cf36b0d2e6335",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "YaoYinYing/rifdock",
"max_forks_repo_path": "schemelib/scheme/chemical/ligand_factory.gtest.cc",
"max_issues_count": 13,
"max_issues_repo_head_hexsha": "cbde6bbeefd29a066273bdf2937cf36b0d2e6335",
"max_issues_repo_issues_event_max_datetime": "2022-03-28T11:02:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-01-30T17:45:57.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "YaoYinYing/rifdock",
"max_issues_repo_path": "schemelib/scheme/chemical/ligand_factory.gtest.cc",
"max_line_length": 148,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "cbde6bbeefd29a066273bdf2937cf36b0d2e6335",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "YaoYinYing/rifdock",
"max_stars_repo_path": "schemelib/scheme/chemical/ligand_factory.gtest.cc",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T04:16:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-23T01:03:48.000Z",
"num_tokens": 6598,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 18508
} |
import networkx.algorithms.tree.tests.test_mst
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
from graphscope.nx.utils.compat import with_graphscope_nx_context
import_as_graphscope_nx(networkx.algorithms.tree.tests.test_mst,
decorators=pytest.mark.usefixtures("graphscope_session"))
from networkx.algorithms.tree.tests.test_mst import MinimumSpanningTreeTestBase
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(MinimumSpanningTreeTestBase)
class TestBoruvka:
algorithm = "boruvka"
@pytest.mark.skip(reason="orjson not support nan")
def test_unicode_name(self):
pass
@pytest.mark.skip(reason="orjson not support nan")
def test_nan_weights(self):
pass
@pytest.mark.skip(reason="orjson not support nan")
def test_nan_weights_order(self):
pass
@pytest.mark.skip(reason="not support multigraph")
class MultigraphMSTTestBase():
pass
@pytest.mark.skip(reason="not support multigraph")
class TestKruskal():
pass
@pytest.mark.skip(reason="not support multigraph")
class TestPrim():
pass
| {
"alphanum_fraction": 0.7590149516,
"author": null,
"avg_line_length": 25.8409090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cf1b4cb6a834868760dd530a3d10b58d04a7c3c4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9e2d77d83378f85f001b555d06e4dcbf9a6a4260",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "wuyueandrew/GraphScope",
"max_forks_repo_path": "python/graphscope/nx/algorithms/tests/forward/tree/test_mst.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9e2d77d83378f85f001b555d06e4dcbf9a6a4260",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "wuyueandrew/GraphScope",
"max_issues_repo_path": "python/graphscope/nx/algorithms/tests/forward/tree/test_mst.py",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9e2d77d83378f85f001b555d06e4dcbf9a6a4260",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "wuyueandrew/GraphScope",
"max_stars_repo_path": "python/graphscope/nx/algorithms/tests/forward/tree/test_mst.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 258,
"path": null,
"reason": "import networkx,from networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 1137
} |
from torch import nn
import numpy as np
import torch
from utils import (
add_device,
get_logger,
)
logger = get_logger()
def train(model, dataloader, input_key, target_key, optimizer, loss_func,
device=torch.device('cpu')):
train_loss = 0.0
for step, data in enumerate(dataloader):
inputs = add_device(data[input_key], device)
targets = add_device(data[target_key], device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
postfix = {'train_loss': f'{(train_loss / (step + 1)):.5f}'}
dataloader.set_postfix(log=postfix)
def valid(model, dataloader, input_key, target_key,
device=torch.device('cpu'), activation=None):
outputs_data = []
targets_data = []
for step, data in enumerate(dataloader):
inputs = add_device(data[input_key], device)
targets = add_device(data[target_key], device)
if activation:
outputs = activation(model(inputs))
else:
outputs = model(inputs)
outputs_data.extend(outputs.tolist())
targets_data.extend(targets.tolist())
return outputs_data, targets_data
def pre_train(epochs, model, dataloader,
optimizer, loss_func,
input_key, target_key,
device=torch.device('cpu'),
patience=5, metrics=None, activation=None):
from tqdm import tqdm
logger.info(model)
model.to(device)
early_stopping = EarlyStopping(patience=patience, verbose=True)
criterion = loss_func
for epoch in range(epochs):
model.train()
dataloader.dataset.train()
train_data = tqdm(dataloader)
train_data.set_description(
f"[Epoch:{epoch+1:04d}/{epochs:04d} " +
f"lr:{optimizer.param_groups[0]['lr']:.5f}]"
)
train(model, train_data, input_key, target_key,
optimizer, criterion, device)
with torch.no_grad():
model.eval()
valid_loss = 0.0
dataloader.dataset.valid()
outputs_data, targets_data = valid(model,
dataloader,
input_key,
target_key,
device=device,
activation=activation)
valid_loss = criterion(torch.tensor(outputs_data),
torch.tensor(targets_data))
if metrics is None:
s = f'[Epoch:{epoch+1:04d}|valid| / '\
f'loss:{valid_loss:.6f}]'
else:
score = metrics(np.array(targets_data),
np.array(outputs_data))
s = f'[Epoch:{epoch+1:04d}|valid| / '\
f'loss:{valid_loss:.6f} / '\
f'metrics:{score:.6f}]'
logger.info(s)
best_model = early_stopping(valid_loss, model)
if early_stopping.early_stop:
logger.info("Early stopping")
break
return best_model
def wrap_phi_to_2pi_torch(x):
"""Shift input angle x to the range of [-pi, pi]
"""
import math
pi = math.pi
x = torch.fmod(2 * pi + torch.fmod(x + pi, 2 * pi), 2 * pi) - pi
return x
def wrap_phi_to_2pi_numpy(x):
"""Shift input angle x to the range of [-pi, pi]
"""
import math
pi = math.pi
x = np.fmod(2 * pi + np.fmod(x + pi, 2 * pi), 2 * pi) - pi
return x
def set_phi_within_valid_range(x):
if isinstance(x, torch.Tensor):
x_phi = x[:, 2]
x_phi = wrap_phi_to_2pi_torch(x_phi)
x_phi.unsqueeze_(1)
x = torch.cat([x[:, 0:2], x_phi], axis=1)
elif isinstance(x, np.ndarray):
x_phi = x[:, 2]
x_phi = wrap_phi_to_2pi_numpy(x_phi)
x_phi = np.expand_dims(x_phi, 1)
x = np.concatenate([x[:, 0:2], x_phi], axis=1)
return x
class EarlyStopping:
def __init__(self, patience=7, verbose=False, save=False, path='./logs'):
self.patience = patience
self.verbose = verbose
self.save = save
self.path = path
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.best_model = None
def __call__(self, val_loss, model):
from copy import deepcopy
score = -val_loss
if self.best_score is None:
self.best_score = score
self.best_model = deepcopy(
self.save_checkpoint(val_loss, model)
)
elif score <= self.best_score:
self.counter += 1
logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
self.best_model = deepcopy(
self.save_checkpoint(val_loss, model)
)
return self.best_model
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
logger.info(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). ' +
'updating model ...')
if self.save:
from os.path import join
save_path = join(self.path, 'checkpoint.pt')
torch.save(model.state_dict(), save_path)
self.val_loss_min = val_loss
return model
class MLPBlock(nn.Module):
def __init__(self,
layers,
activation,
activation_last=None,
batch_norm=False,
initialize=True,
*args,
**kwargs):
super(MLPBlock, self).__init__(*args, **kwargs)
from utils import get_module
_layers = []
for i, node in enumerate(layers):
if i == len(layers) - 1:
break
else:
_layers.append(nn.Linear(layers[i], layers[i+1]))
if batch_norm:
_layers.append(nn.BatchNorm1d(layers[i+1]))
if i == len(layers) - 2:
if activation_last is None:
_layers.append(get_module([nn], 'Identity')())
else:
_layers.append(get_module([nn], activation_last)())
else:
_layers.append(get_module([nn], activation)())
self._layers = nn.Sequential(*_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
return self._layers(x)
class Tau4vec_MLPTask(nn.Module):
def __init__(self,
layers_images=[768, 32, 32, 32, 4],
layers_calib=[8, 32, 32, 32, 4],
activation='ReLU',
batch_norm=False,
**kwargs):
super(Tau4vec_MLPTask, self).__init__(**kwargs)
self._mlp1 = MLPBlock(layers=layers_images,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._mlp2 = MLPBlock(layers=layers_calib,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._layers_calib = layers_calib
self._len_output_vers = layers_calib[-1] * 2
def forward(self, x):
fig = x[0].reshape(-1, 3, 16, 16)
x_1 = fig.reshape(fig.size(0), -1)
x_1 = self._mlp1(x_1)
input_jet_reshape_4 = x[1].reshape(-1, 4)
input_jet_reshape_3 = input_jet_reshape_4[:, :3] # mass is not used
x = torch.cat((x_1, input_jet_reshape_4), dim=1)
x = self._mlp2(x)
if self._layers_calib[-1] == 4:
x = x + input_jet_reshape_4
elif self._layers_calib[-1] == 3:
x = x + input_jet_reshape_3
x = set_phi_within_valid_range(x)
output = x.reshape(-1, self._layers_calib[-1] * 2)
return output
class Conv2DBlock(nn.Module):
def __init__(self, layers_conv2d=None, initialize=True, *args, **kwargs):
super(Conv2DBlock, self).__init__(*args, **kwargs)
from copy import copy
from utils import get_module
_layers = []
conv2d_args = {"stride": 1, "padding": 0, "activation": 'ReLU'}
maxpooling2d_args = {"kernel_size": 2, "stride": 2}
for layer, args in layers_conv2d:
if layer == 'conv2d':
layer_args = copy(conv2d_args)
layer_args.update(args)
activation = layer_args.pop('activation')
_layers.append(nn.Conv2d(**layer_args))
_layers.append(get_module([nn], activation)())
elif layer == 'maxpooling2d':
layer_args = copy(maxpooling2d_args)
layer_args.update(args)
_layers.append(nn.MaxPool2d(**layer_args))
else:
raise ValueError(f"{layer} is not implemented")
self._layers = nn.Sequential(*_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
return self._layers(x)
class Tau4vec_Conv2DTask(nn.Module):
def __init__(self,
layers_conv2d=[('conv2d', {'in_channels': 3, 'out_channels': 32, 'kernel_size': 3}),
('conv2d', {'in_channels': 32, 'out_channels': 16, 'kernel_size': 3}),
('maxpooling2d', {}),
('conv2d', {'in_channels': 16, 'out_channels': 16, 'kernel_size': 2}),
('conv2d', {'in_channels': 16, 'out_channels': 8, 'kernel_size': 2})],
layers_images=[128, 16, 16, 16, 4],
layers_calib=[8, 64, 64, 64, 4],
activation='ReLU',
batch_norm=False,
**kwargs):
super(Tau4vec_Conv2DTask, self).__init__(**kwargs)
self._conv2d = Conv2DBlock(layers_conv2d=layers_conv2d)
self._mlp1 = MLPBlock(layers=layers_images,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._mlp2 = MLPBlock(layers=layers_calib,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._layers_calib = layers_calib
def forward(self, x):
fig = x[0].reshape(-1, 3, 16, 16)
x_1 = self._conv2d(fig)
x_1 = x_1.reshape(x_1.size(0), -1) # flatten
x_1 = self._mlp1(x_1)
input_jet_reshape_4 = x[1].reshape(-1, 4)
input_jet_reshape_3 = input_jet_reshape_4[:, :3] # mass is not used
x = torch.cat((x_1, input_jet_reshape_4), dim=1)
x = self._mlp2(x)
if self._layers_calib[-1] == 4:
x = x + input_jet_reshape_4
elif self._layers_calib[-1] == 3:
x = x + input_jet_reshape_3
x = set_phi_within_valid_range(x)
output = x.reshape(-1, self._layers_calib[-1] * 2)
return output
class SF_layer(nn.Module):
def __init__(self, input_dim):
super(SF_layer, self).__init__()
self.sf = nn.Parameter(torch.Tensor(
np.ones(input_dim)
))
self.bias = nn.Parameter(torch.Tensor(
np.zeros(input_dim)
))
def forward(self, x):
return x * self.sf + self.bias
class Tau4vec_SFTask(nn.Module):
def __init__(self, n_input_vars=8, n_output_vars=6, n_jets=2):
super(Tau4vec_SFTask, self).__init__()
self.sf_layer = SF_layer(input_dim=(1, n_output_vars//2))
self.n_input_vars = n_input_vars
self.n_output_vars = n_output_vars
self.n_jets = n_jets
def forward(self, x):
x = x[1].reshape(-1, self.n_input_vars//self.n_jets)
if self.n_output_vars == 6:
x = x[:, :3] # mass is not used
x = self.sf_layer(x)
x = set_phi_within_valid_range(x)
x = x.reshape(-1, self.n_output_vars)
return x
class HiggsID_MLPTask(nn.Module):
def __init__(self,
layers=[8, 32, 32, 32, 1],
activation='ReLU',
activation_last='Identity',
batch_norm=False,
**kwargs):
super(HiggsID_MLPTask, self).__init__(**kwargs)
self.mlp = MLPBlock(layers=layers,
activation=activation,
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = self.mlp(x)
return x
class LSTMBlock(nn.Module):
def __init__(self,
layers,
activation=None,
batch_norm=False,
initialize=True,
*args,
**kwargs):
super(LSTMBlock, self).__init__(*args, **kwargs)
from collections import OrderedDict
from utils import get_module
_layers = OrderedDict()
for i, node in enumerate(layers):
if i == len(layers) - 1:
break
else:
_layers[f'LSTM{i}'] = nn.LSTM(layers[i], layers[i+1])
if batch_norm:
_layers['batchnorm1d'] = nn.BatchNorm1d(layers[-1])
if activation is not None:
_layers[activation] = get_module([nn], activation)()
self._layers = nn.Sequential(_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.LSTM:
for name, param in m.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def forward(self, x):
for layer in self._layers:
if type(layer) == nn.LSTM:
x, _ = layer(x)
else:
x = layer(x)
return x
class HiggsID_LSTMTask(nn.Module):
def __init__(self,
layers_lstm=[4, 32, 32, 32, 1],
layers_mlp=[1, 1],
activation_last='Identity',
batch_norm=False,
n_jets=2,
**kwargs):
super(HiggsID_LSTMTask, self).__init__(**kwargs)
self.layers_lstm = layers_lstm
self.n_jets = n_jets
self.lstm = LSTMBlock(layers=layers_lstm,
batch_norm=batch_norm)
self.mlp = MLPBlock(layers=layers_mlp,
activation='Identity',
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = torch.transpose(
x.reshape(-1, self.n_jets, self.layers_lstm[0]),
1, 0)
x = self.lstm(x)[-1]
x = self.mlp(x)
return x
class HiggsID_MassTask(nn.Module):
def __init__(self,
layers=[1, 64, 64, 1],
activation='ReLU',
activation_last='Identity',
batch_norm=False,
scale_mass=1./125.,
n_jets=2,
n_input_vars=8,
**kwargs):
super(HiggsID_MassTask, self).__init__(**kwargs)
self.scale_mass = scale_mass
self.n_input_vars = n_input_vars
self.n_jets = n_jets
self.mlp = MLPBlock(layers=layers,
activation=activation,
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = self.mass_layer(x, self.n_jets, self.n_input_vars)
x = x * self.scale_mass
x = self.mlp(x)
return x
@staticmethod
def mass_layer(tau_4vec, n_jets, n_input_vars):
tau_4vec = tau_4vec.reshape(-1, n_jets, n_input_vars // n_jets)
pt = torch.exp(
torch.clamp(tau_4vec[:, :, 0], min=-7., max=7.)
) - 0.1
eta = tau_4vec[:, :, 1]
phi = tau_4vec[:, :, 2]
mass = 1.777
px = pt * torch.cos(phi)
py = pt * torch.sin(phi)
pz = pt * torch.sinh(torch.clamp(eta, min=-5, max=5))
epsilon = 0.1 # avoid nan when e=0. sqrt(x)^' = -1/2 * 1/sqrt(x)
e = torch.sqrt(
epsilon + px**2 + py**2 + pz**2 + mass**2
)
tau_4vec = torch.stack([px, py, pz, e], dim=2)
tau_4vec = torch.sum(tau_4vec, dim=1)
px, py, pz, e = torch.chunk(tau_4vec, chunks=4, dim=1)
mass = torch.sqrt(
epsilon + e**2 - (px**2 + py**2 + pz**2)
)
return mass
class SubTask_Gaussian(nn.Module):
def __init__(self, in_len=8, sigma=1.0):
super(SubTask_Gaussian, self).__init__()
self.sigma = sigma
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.sum(axis=1) * 0.0
if self.sigma != 0:
sampled_noise = torch.empty_like(x).normal_() * self.sigma
x = x + sampled_noise
return self.sigmoid(x).reshape(-1, 1)
| {
"alphanum_fraction": 0.5295876003,
"author": null,
"avg_line_length": 33.6405959032,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a4ee4c10df61d3eadc29442a375ee5a3fb32e8dc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "UTokyo-ICEPP/multiml_htautau",
"max_forks_repo_path": "examples/pytorch/models/sub_task.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "UTokyo-ICEPP/multiml_htautau",
"max_issues_repo_path": "examples/pytorch/models/sub_task.py",
"max_line_length": 102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5f926c2291a55f57419aa0130d07e2a793fc7353",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "UTokyo-ICEPP/multiml_htautau",
"max_stars_repo_path": "examples/pytorch/models/sub_task.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4333,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 18065
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 15:23:20 2020
@author: grat05
"""
import sys
import os
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir)))
#from iNa_models import Koval_ina, OHaraRudy_INa
import atrial_model
from atrial_model.iNa.models import OHaraRudy_INa, Koval_ina
import atrial_model.run_sims_functions
from atrial_model.run_sims_functions import peakCurr, normalized2val, calcExpTauInact, monoExp,\
calcExpTauAct, triExp, biExp
from atrial_model.run_sims import calc_diff
from atrial_model.iNa.define_sims import sim_fs, datas,\
keys_all, exp_parameters
from atrial_model.iNa.model_setup import model, mp_locs, sub_mps, sub_mp_bounds, model_params_initial
from atrial_model.parse_cmd_args import args
import atrial_model.run_sims_functions
from atrial_model.run_sims import calc_results, SimResults
from atrial_model.iNa.define_sims import sim_fs, datas, keys_all, exp_parameters
from atrial_model.iNa.model_setup import model_params_initial, mp_locs, sub_mps, model
#from atrial_model.iNa.stat_model import make_model
from atrial_model.iNa.stat_model_3 import StatModel, key_frame
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
import pickle
import pymc3 as pm
#import pymc
import datetime
import numpy as np
import pickle
from threading import Timer
from multiprocessing import Manager
from functools import partial
import os
import copy
keys_keep = []
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# #iv curve
keys_iin = [
('1323431_1', 'Dataset B'), ('1323431_3', 'Dataset A 2'),
('1323431_3', 'Dataset A 20'), ('1323431_3', 'Dataset A 5'),
('1323431_4', 'Dataset B Control'),
('7971163_1', 'Dataset'),
('8928874_7', 'Dataset C day 1'), ('8928874_7', 'Dataset C day 3'),
('8928874_7', 'Dataset C day 5'), ('8928874_7', 'Dataset C fresh'),
('21647304_1', 'Dataset B Adults'), #('21647304_1', 'Dataset B Pediatrics'),
('12890054_3', 'Dataset C Control'), ('12890054_3', 'Dataset D Control'),
('12890054_5', 'Dataset C Control'), #('12890054_5', 'Dataset D Control'),
('23341576_2', 'Dataset C Control')
]
keys_keep += keys_iin
#idealized current traces
keys_iin = [('21647304_2', 'Dataset B Adults'), ('21647304_2', 'Dataset B Pediactric'),
('8928874_8', 'Dataset D fresh'), ('8928874_8', 'Dataset D day 1'),\
('8928874_8', 'Dataset D day 3'), ('8928874_8', 'Dataset D day 5'),
('7971163_3', 'Dataset C')
]
# keys_iin = [key for key_in in keys_iin
# for key in sim_fs
# if key[0] == key_in[0] and key_in[1] in key[1]]
keys_keep += keys_iin
##activation normalized to driving force
keys_iin = [
('1323431_2', 'Dataset'),\
('8928874_7', 'Dataset D fresh'), ('8928874_7', 'Dataset D day 1'),\
('8928874_7', 'Dataset D day 3'), ('8928874_7', 'Dataset D day 5'),\
('21647304_3', 'Dataset A Adults'), ('21647304_3', 'Dataset A Pediatrics')
]
keys_keep += keys_iin
# I2/I1 Recovery
keys_iin = [('1323431_8', 'Dataset A -140'), ('1323431_8', 'Dataset A -120'),\
('1323431_8', 'Dataset A -100'),\
('21647304_3', 'Dataset C Adults'), ('21647304_3', 'Dataset C Pediatrics'),\
('8928874_9', 'Dataset fresh'), ('8928874_9', 'Dataset day 1'),\
('8928874_9', 'Dataset day 3'), ('8928874_9', 'Dataset day 5')
]
keys_keep += keys_iin
# # #recovery normalized to preprepulse
keys_iin = [\
('7971163_6', 'Dataset -75'),\
('7971163_6', 'Dataset -85'),\
('7971163_6', 'Dataset -95'),\
('7971163_6', 'Dataset -105'),\
('7971163_6', 'Dataset -115'),
('7971163_6', 'Dataset -125'),\
('7971163_6', 'Dataset -135')
]
keys_keep += keys_iin
#inactivation normalized to no prepulse
keys_iin = [
# ('7971163_4', 'Dataset 32ms'), ('7971163_4', 'Dataset 64ms'),
# ('7971163_4', 'Dataset 128ms'),
('7971163_4', 'Dataset 256ms'), ('7971163_4', 'Dataset 512ms'),\
('8928874_8', 'Dataset C fresh'), ('8928874_8', 'Dataset C day 1'),\
('8928874_8', 'Dataset C day 3'), ('8928874_8', 'Dataset C day 5'),
('21647304_3', 'Dataset B Adults'), ('21647304_3', 'Dataset B Pediatrics')
]
keys_keep += keys_iin
# #inactivation normalized to first
keys_iin = [('7971163_5', 'Dataset A -65'), ('7971163_5', 'Dataset A -75'),\
('7971163_5', 'Dataset A -85'), ('7971163_5', 'Dataset A -95'),\
('7971163_5', 'Dataset A -105')
]
keys_keep += keys_iin
#tau inactivation
# keys_iin = [('8928874_8', 'Dataset E fresh'), ('8928874_8', 'Dataset E day 1'),\
# ('8928874_8', 'Dataset E day 3'), ('8928874_8', 'Dataset E day 5')]#,\
# ('1323431_5', 'Dataset B fast'),\
# ('21647304_2', 'Dataset C Adults'), ('21647304_2', 'Dataset C Pediactric')]
#keys_keep += keys_iin
# #####tau activation
# keys_iin = [('8928874_8', 'Dataset D fresh'), ('8928874_8', 'Dataset D day 1'),\
# ('8928874_8', 'Dataset D day 3'), ('8928874_8', 'Dataset D day 5'),
# ('7971163_3', 'Dataset C')]
# keys_keep += keys_iin
# #tau inactivation fast & slow
# keys_iin = [('21647304_2', 'Dataset C Adults'), ('21647304_2', 'Dataset D Adults'),\
# ('21647304_2', 'Dataset C Pediactric'), ('21647304_2', 'Dataset D Pediactric')]
# #('1323431_5', 'Dataset B fast'),('1323431_5', 'Dataset B slow'),\
# keys_keep += keys_iin
# #tau inactivation normalized to first
# keys_iin = [('1323431_6', 'Dataset -80'), ('1323431_6', 'Dataset -100')]
# keys_keep += keys_iin
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
keys_keep = set(keys_keep)
sim_fs = {key: sim_f for key, sim_f in sim_fs.items() if key in keys_keep}
datas = {key: data for key, data in datas.items() if key in keys_keep}
keys_keep_grps = [[key for key in key_grp if key in keys_keep] for key_grp in keys_all]
keys_keep_grps = [grp for grp in keys_keep_grps if len(grp) > 0]
if __name__ == '__main__':
atrial_model.run_sims_functions.plot1 = False #sim
atrial_model.run_sims_functions.plot2 = False #diff
atrial_model.run_sims_functions.plot3 = False #tau
key_frame = key_frame(keys_keep_grps, exp_parameters)
map_res = {}
map_res['keys'] = list(key_frame.index)
map_res['keys_grps'] = keys_keep_grps
map_res['sub_MAP'] = {}
model_name = './MAP_'
model_name += args.model_name
model_name += '_{cdate.month:02d}{cdate.day:02d}_{cdate.hour:02d}{cdate.minute:02d}'
model_name = model_name.format(cdate=datetime.datetime.now())
db_path = args.out_dir+'/'+model_name+'.pickle'
start = {'model_param_intercept': np.zeros(len(mp_locs)),
'b_temp': np.zeros(len(mp_locs)),
'model_param_sigma': np.zeros(len(mp_locs)),
'error_sigma': np.zeros(len(keys_keep_grps)),
'model_param': []
}
counts = np.zeros(len(mp_locs), dtype=int)
with Pool() as proc_pool:
calc_fn = partial(calc_results, model_parameters_full=model_params_initial,\
mp_locs=mp_locs, data=datas,error_fill=0,\
pool=proc_pool)
run_biophysical = SimResults(calc_fn=calc_fn, sim_funcs=sim_fs, disp_print=False)
for i,key_grp in enumerate(keys_keep_grps):
sub_key_frame = key_frame.loc[key_grp]
grp_name = sub_key_frame['Sim Group'].unique()[0]
print(grp_name)
with StatModel(run_biophysical, sub_key_frame, datas,
mp_locs, model) as stat_model:
map_estimates = pm.find_MAP(model=stat_model, include_transformed=False
, method='powell', maxeval=10000)
map_res['sub_MAP'][grp_name] = map_estimates
fitted = map_estimates['model_param_sigma'] > 1e-3
grp_size = len(key_grp)*fitted
counts += grp_size
start['model_param_intercept'] += grp_size*map_estimates['model_param_intercept']
start['b_temp'] += grp_size*map_estimates['b_temp']
start['model_param_sigma'] += grp_size*map_estimates['model_param_sigma']
start['error_sigma'][i] = map_estimates['error_sigma']
model_param = map_estimates['model_param'].copy()
model_param[:, ~fitted] = np.nan
start['model_param'].append(model_param)
start['model_param_intercept'] /= counts
start['b_temp'] /= counts
start['model_param_sigma'] /= counts
start['model_param'] = np.concatenate(start['model_param'], axis=0)
for i in range(len(mp_locs)):
unfit = np.isnan(start['model_param'][:,i])
mean_vals = start['model_param_intercept'][i] +\
key_frame['temp ( K )']*start['b_temp'][i]
start['model_param'][unfit,i] = mean_vals[unfit]
map_res['start'] = copy.deepcopy(start)
with StatModel(run_biophysical, key_frame, datas,
mp_locs, model) as stat_model:
#method='powell'method='Nelder-Mead'
print("full model")
map_estimates = pm.find_MAP(start=start, model=stat_model, include_transformed=False
, method='powell', maxeval=40000)
map_res['MAP'] = map_estimates
with open(db_path, 'wb') as file:
pickle.dump(map_res, file)
| {
"alphanum_fraction": 0.6125702606,
"author": null,
"avg_line_length": 36.9245283019,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e55d356affd234b574eaebb673611e43c5353b32",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "144017625ba5244c4fb431cccb347f7f2b4853b1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hundlab/iNaCells2021Code",
"max_forks_repo_path": "atrial_model/iNa/scripts/fit_MAP.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "144017625ba5244c4fb431cccb347f7f2b4853b1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hundlab/iNaCells2021Code",
"max_issues_repo_path": "atrial_model/iNa/scripts/fit_MAP.py",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "144017625ba5244c4fb431cccb347f7f2b4853b1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hundlab/iNaCells2021Code",
"max_stars_repo_path": "atrial_model/iNa/scripts/fit_MAP.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2774,
"path": null,
"reason": "import numpy,import pymc3",
"repo": null,
"save_path": null,
"sha": null,
"size": 9785
} |
From Tweetnacl Require Import Libs.Export.
Open Scope Z.
Definition set_xor (i:Z) := Z.lnot (i - 1).
Lemma set_xor_0 : set_xor 0 = 0.
Proof. reflexivity. Qed.
Lemma set_xor_1 : set_xor 1 = -1.
Proof. reflexivity. Qed.
Lemma land_0 : forall i, Z.land 0 i = 0.
Proof. intro. go. Qed.
Lemma land_minus_1 : forall i, Z.land (-1) i = i.
Proof. intro. apply Z.land_m1_l. Qed.
Close Scope Z.
| {
"alphanum_fraction": null,
"author": "ildyria",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/ildyria-coq-verif-tweetnacl/coq-verif-tweetnacl-8181ab4406cefd03ab0bd53d4063eb1644a2673d/proofs/spec/Low/Binary_select.v",
"reason": null,
"repo": "coq-verif-tweetnacl",
"save_path": "github-repos/coq/ildyria-coq-verif-tweetnacl",
"sha": "8181ab4406cefd03ab0bd53d4063eb1644a2673d",
"size": null
} |
#= ######################################################
model structs
###################################################### =#
mutable struct ElementRaw{T1<:Integer, T2<:AbstractFloat}
node1::T1
node2::T1
com::Point2D{T2}
nvec::Point2D{T2}
area::T2
end
struct Element{T1<:Integer, T2<:AbstractFloat}
node1::T1
node2::T1
com::Point2D{T2}
nvec::Point2D{T2}
area::T2
end
struct Part{T1<:Integer, T2<:AbstractFloat}
nodes::Vector{Point2D{T2}}
elem::Vector{ElementRaw{T1,T2}}
nnodes::T1
nelem::T1
end
struct ElementAssign{T<:Integer}
first::T
last::T
end
mutable struct ModelRaw{T1<:Integer, T2<:AbstractFloat}
nodes::Vector{Point2D{T2}}
elem::Vector{ElementRaw{T1,T2}}
elem2par::Vector{ElementAssign{T1}}
nnodes::T1
nelem::T1
npar::T1
end
struct Model{T1<:Integer, T2<:AbstractFloat}
nodes::Vector{Point2D{T2}}
elem::Vector{Element{T1,T2}}
elem2par::Vector{ElementAssign{T1}}
nnodes::T1
nelem::T1
npar::T1
end
#= ######################################################
Functions for working with structs
###################################################### =#
function create_empty_model()
return ModelRaw(Vector{Point2D{Float64}}(), Vector{ElementRaw{Int64,Float64}}(), Vector{ElementAssign{Int64}}(), 0, 0, 0)
end
function norm(v::Point2D)
l = sqrt(v.x^2 + v.y^2)
return l
end
function normit(v::Point2D)::Point2D
vn = v / norm(v)
return vn
end
function add_offset!(p::Part, offset::Integer)
for i = 1:p.nelem
p.elem[i].node1 += offset
p.elem[i].node2 += offset
end
end
function get_com(p1::Point2D{T}, p2::Point2D{T})::Point2D{T} where T<:AbstractFloat
# calculate center of line (center of mass)
c = p1 + ((p2 - p1) / 2)
return c
end
function get_nvec(p1::Point2D{T}, p2::Point2D{T}, dir::String)::Point2D{T} where T<:AbstractFloat
# calculate normal vector
n = Point2D((-1) * (p2.y - p1.y), (p2.x - p1.x))
(dir == "neg") && (n = n * (-1.0))
n_norm = normit(n)
return n_norm
end
function get_length(p1::Point2D, p2::Point2D)
# calculate length of line element
# is used as area here
l = sqrt((p1.x-p2.x)^2 + (p1.y-p2.y)^2)
return l
end
function add!(m::ModelRaw, p::Part)
append!(m.nodes, p.nodes)
add_offset!(p, m.nnodes)
append!(m.elem, p.elem)
push!(m.elem2par, ElementAssign(m.nelem+1, m.nelem+p.nelem))
m.nnodes += p.nnodes
m.nelem += p.nelem
m.npar += 1
end
function get_nodes_min_and_max(m)
# get minimum and maximum value of nodes
# as Vector of Point2D
xmin = minimum(m.nodes[i].x for i = 1:m.nnodes)
xmax = maximum(m.nodes[i].x for i = 1:m.nnodes)
ymin = minimum(m.nodes[i].y for i = 1:m.nnodes)
ymax = maximum(m.nodes[i].y for i = 1:m.nnodes)
return Point2D(xmin, ymin), Point2D(xmax, ymax)
end
function offset_model!(m::ModelRaw)
# offset all nodes to only positiv coords
nmin, nmax = get_nodes_min_and_max(m)
for i = 1:m.nnodes
m.nodes[i] = m.nodes[i] - nmin
end
for i = 1:m.nelem
m.elem[i].com = m.elem[i].com - nmin
end
end
function make_model_immutable(m::ModelRaw)::Model
nodes = m.nodes
elem = Vector{Element{Int64,Float64}}(undef,m.nelem)
for i = 1:m.nelem
i1 = m.elem[i].node1
i2 = m.elem[i].node2
c = m.elem[i].com
nv = m.elem[i].nvec
a = m.elem[i].area
elem[i] = Element(i1, i2, c, nv, a)
end
elem2par = m.elem2par
nnodes = m.nnodes
nelem = m.nelem
npar = m.npar
return Model(nodes, elem, elem2par, nnodes, nelem, npar)
end
#= ######################################################
Define discretized geometries
###################################################### =#
function edge(p1::Point2D, p2::Point2D; seed::T = 10, dir::String = "pos") where T<:Integer
"""
creating edge between two points
# println("edge between ", p1, " and ",p2)
"""
dx = (p2.x - p1.x) / seed
dy = (p2.y - p1.y) / seed
nnodes = seed+1
nodes = Vector{Point2D{Float64}}(undef,nnodes)
for i = 1:nnodes
nodes[i] = Point2D(p1.x + (i-1) * dx, p1.y + (i-1) * dy)
end
# nodes = [Point2D(p1.x + (i-1) * dx, p1.y + (i-1) * dy) for i = 1:nnodes]
nelements = seed
elements = Vector{ElementRaw{Int64,Float64}}(undef,nelements)
for i = 1:nelements
i1 = i
i2 = i + 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[i] = ElementRaw(i1, i2, c, nv, a)
end
return Part(nodes, elements, nnodes, nelements)
end
function rectangle(x::T1, y::T1, c::Point2D; seedx::T2 = 10, seedy::T2 = 10, dir::String = "pos") where {T1<:Real, T2<:Integer}
# creating rectangle with center
p1 = c + Point2D(-0.5*x, -0.5*y)
p2 = c + Point2D(-0.5*x, 0.5*y)
p3 = c + Point2D(0.5*x, 0.5*y)
p4 = c + Point2D(0.5*x, -0.5*y)
dx = (p3.x - p2.x) / seedx
dy = (p2.y - p1.y) / seedy
nnodes = 2*seedx + 2*seedy
nodes = Vector{Point2D{Float64}}(undef,nnodes)
for i = 1:seedy
nodes[i] = Point2D(p1.x, p1.y + (i-1) * dy)
end
offset = seedy
for i = 1:seedx
nodes[offset + i] = Point2D(p2.x + (i-1) * dx, p2.y)
end
offset = seedy + seedx
for i = 1:seedy
nodes[offset + i] = Point2D(p3.x, p3.y - (i-1) * dy)
end
offset = seedy + seedx + seedy
for i = 1:seedx
nodes[offset + i] = Point2D(p4.x - (i-1) * dx, p4.y)
end
nelements = nnodes
elements = Vector{ElementRaw{Int64,Float64}}(undef,nelements)
for i = 1:nelements-1
i1 = i
i2 = i + 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[i] = ElementRaw(i1, i2, c, nv, a)
end
i1 = nnodes
i2 = 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[end] = ElementRaw(i1, i2, c, nv, a)
return Part(nodes, elements, nnodes, nelements)
end
function circle(d::T1, c::Point2D; seed::T2 = 12, dir::String = "pos") where {T1<:Real, T2<:Integer}
# creating circle based on diameter and center
r = d/2
phi = 2 * pi / seed
nnodes = seed
nodes = Vector{Point2D{Float64}}(undef,nnodes)
for i = 1:nnodes
nodes[i] = Point2D(c.x + r * cos(i*phi), c.y + r * sin(i*phi))
end
nelements = seed
elements = Vector{ElementRaw{Int64,Float64}}(undef,nelements)
for i = 1:nelements-1
i1 = i
i2 = i + 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[i] = ElementRaw(i1, i2, c, nv, a)
end
i1 = nnodes
i2 = 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[end] = ElementRaw(i1, i2, c, nv, a)
return Part(nodes, elements, nnodes, nelements)
end
function circle_open(d::T1, c::Point2D; seed::T2 = 12, leftout::T2 = 2, open::String = "right", dir::String = "pos") where {T1<:Real, T2 <: Integer}
# creating circle based on diameter and center which is open at one side
r = d/2
phi = 2 * pi / seed
# use half of leftout as start
phio = 2 * pi / seed * (leftout/2) # phi offset
if open == "left"
phio = phio + 1 * pi
end
nnodes = seed-leftout+1
nodes = Vector{Point2D{Float64}}(undef,nnodes)
for i = 1:nnodes
nodes[i] = Point2D(c.x + r * cos((i-1) * phi + phio), c.y + r * sin((i-1) * phi + phio))
end
nelements = seed - leftout
elements = Vector{ElementRaw{Int64,Float64}}(undef,nelements)
for i = 1:nelements
i1 = i
i2 = i + 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[i] = ElementRaw(i1, i2, c, nv, a)
end
return Part(nodes, elements, nnodes, nelements)
end
function cosinus(a::T1, b::T1, p1::Point2D; seed::T2 = 30, dir::String = "pos") where {T1<:Real, T2<:Integer}
# creating cosinus wave
nnodes = seed+1
nodes = Vector{Point2D{Float64}}(undef,nnodes)
for i = 1:nnodes
nodes[i] = Point2D(p1.x + b*(i-1)*2*pi/(seed), p1.y + a*cos((i-1)*2*pi/(seed)))
end
nelements = seed
elements = Vector{ElementRaw{Int64,Float64}}(undef,nelements)
for i = 1:nelements
i1 = i
i2 = i + 1
c = get_com(nodes[i1], nodes[i2])
nv = get_nvec(nodes[i1], nodes[i2], dir)
a = get_length(nodes[i1], nodes[i2])
elements[i] = ElementRaw(i1, i2, c, nv, a)
end
return Part(nodes, elements, nnodes, nelements)
end
#= ######################################################
Other functions
###################################################### =#
function element_analysis(m::Model; printit = true)
# analysis of elements
area = [m.elem[i].area for i = m.elem2par[1].first:m.elem2par[end].last]
e_length_mean = sum(area) / m.nelem
# e_length_mean = round(e_length_mean, digits=2)
e_length_min = minimum(area)
e_length_max = maximum(area)
if printit
println("Element length:")
println(" Mean: ", e_length_mean)
println(" Min: ", e_length_min)
println(" Max: ", e_length_max)
else
return t_max, t_min, t_mean_occ, n_occ/n_all
end
end | {
"alphanum_fraction": 0.5644504749,
"author": null,
"avg_line_length": 30.5127388535,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "7fda341f24cb627c644b449aa4df20e31d28b365",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eefd2d1af31e97a795df3a1b2c8af728330a320b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bueschgens/RadMod2D",
"max_forks_repo_path": "src/mesh2D.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eefd2d1af31e97a795df3a1b2c8af728330a320b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bueschgens/RadMod2D",
"max_issues_repo_path": "src/mesh2D.jl",
"max_line_length": 148,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eefd2d1af31e97a795df3a1b2c8af728330a320b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bueschgens/RadMod2D",
"max_stars_repo_path": "src/mesh2D.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3307,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 9581
} |
(*<*)
(*
* Copyright 2015, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
theory Noninterference
imports
Global_Invariants_Lemmas
Local_Invariants_Lemmas
Tactics
begin
(*>*)
section\<open> Noninterference \<close>
lemma mut_del_barrier1_subseteq_mut_mo_valid_ref_locs[locset_cache]: (* FIXME rename *)
"mut_m.del_barrier1_locs \<subseteq> mut_m.mo_valid_ref_locs"
unfolding mut_m.del_barrier1_locs_def mut_m.mo_valid_ref_locs_def by (auto intro: append_prefixD)
lemma mut_del_barrier2_subseteq_mut_mo_valid_ref[locset_cache]: (* FIXME rename *)
"mut_m.ins_barrier_locs \<subseteq> mut_m.mo_valid_ref_locs"
unfolding mut_m.ins_barrier_locs_def mut_m.mo_valid_ref_locs_def by (auto intro: append_prefixD)
context gc
begin
lemma obj_fields_marked_locs_subseteq_hp_IdleMarkSweep_locs:
"obj_fields_marked_locs \<subseteq> hp_IdleMarkSweep_locs"
unfolding gc.obj_fields_marked_locs_def gc.hp_IdleMarkSweep_locs_def gc.mark_loop_locs_def gc.mark_loop_mo_locs_def
apply (clarsimp simp: locset_cache loc_defs)
apply (drule mp)
apply (auto intro: append_prefixD)
done
lemma obj_fields_marked_locs_subseteq_hs_in_sync_locs:
"obj_fields_marked_locs \<subseteq> hs_in_sync_locs"
unfolding obj_fields_marked_locs_def hs_in_sync_locs_def hs_done_locs_def mark_loop_mo_locs_def
by (auto simp: loc_defs dest: prefix_same_cases)
lemma obj_fields_marked_good_ref_subseteq_hp_IdleMarkSweep_locs:
"obj_fields_marked_good_ref_locs \<subseteq> hp_IdleMarkSweep_locs"
unfolding obj_fields_marked_good_ref_locs_def mark_loop_locs_def hp_IdleMarkSweep_locs_def mark_loop_mo_locs_def
apply (clarsimp simp: loc_defs)
apply (drule mp)
apply (auto intro: append_prefixD)
done
lemma mark_loop_mo_mark_loop_field_done_subseteq_hs_in_sync_locs:
"obj_fields_marked_good_ref_locs \<subseteq> hs_in_sync_locs"
unfolding obj_fields_marked_good_ref_locs_def hs_in_sync_locs_def mark_loop_mo_locs_def hs_done_locs_def
by (auto simp: loc_defs dest: prefix_same_cases)
lemma no_grey_refs_locs_subseteq_hs_in_sync_locs:
"no_grey_refs_locs \<subseteq> hs_in_sync_locs"
by (auto simp: no_grey_refs_locs_def black_heap_locs_def hs_in_sync_locs_def hs_done_locs_def sweep_locs_def loc_defs
dest: prefix_same_cases)
lemma get_roots_UN_get_work_locs_subseteq_gc_W_empty_locs:
"get_roots_UN_get_work_locs \<subseteq> gc_W_empty_locs"
unfolding get_roots_UN_get_work_locs_def
by (auto simp: hs_get_roots_locs_def hs_get_work_locs_def gc_W_empty_locs_def)
end
declare
gc.obj_fields_marked_locs_subseteq_hp_IdleMarkSweep_locs[locset_cache]
gc.obj_fields_marked_locs_subseteq_hs_in_sync_locs[locset_cache]
gc.obj_fields_marked_good_ref_subseteq_hp_IdleMarkSweep_locs[locset_cache]
gc.mark_loop_mo_mark_loop_field_done_subseteq_hs_in_sync_locs[locset_cache]
gc.no_grey_refs_locs_subseteq_hs_in_sync_locs[locset_cache]
gc.get_roots_UN_get_work_locs_subseteq_gc_W_empty_locs[locset_cache]
lemma handshake_obj_fields_markedD:
"\<lbrakk> atS gc gc.obj_fields_marked_locs s; gc.handshake_invL s \<rbrakk> \<Longrightarrow> sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep \<and> All (ghost_hs_in_sync (s\<down> sys))"
unfolding gc.handshake_invL_def
by (metis (no_types, lifting) atS_mono gc.obj_fields_marked_locs_subseteq_hp_IdleMarkSweep_locs gc.obj_fields_marked_locs_subseteq_hs_in_sync_locs)
lemma obj_fields_marked_good_ref_locs_hp_phaseD:
"\<lbrakk> atS gc gc.obj_fields_marked_good_ref_locs s; gc.handshake_invL s \<rbrakk>
\<Longrightarrow> sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep \<and> All (ghost_hs_in_sync (s\<down> sys))"
unfolding gc.handshake_invL_def
by (metis (no_types, lifting) atS_mono gc.mark_loop_mo_mark_loop_field_done_subseteq_hs_in_sync_locs gc.obj_fields_marked_good_ref_subseteq_hp_IdleMarkSweep_locs)
lemma gc_marking_reaches_Mutate:
assumes xys: "\<forall>y. (x reaches y) s \<longrightarrow> valid_ref y s"
assumes xy: "(x reaches y) (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)),
mem_store_buffers := (mem_store_buffers (s sys))(p := ws)\<rparr>))"
assumes sb: "sys_mem_store_buffers (mutator m) s = mw_Mutate r f opt_r' # ws"
assumes vri: "valid_refs_inv s"
shows "valid_ref y s"
proof -
from xy xys
have "\<exists>z. z \<in> {x} \<union> mut_m.tso_store_refs m s \<and> (z reaches y) s \<and> valid_ref y s"
proof induct
case (refl x) then show ?case by auto
next
case (step x y z) with sb vri show ?case
apply (clarsimp simp: points_to_Mutate)
apply (elim disjE)
apply (metis (no_types, lifting) obj_at_cong reaches_def rtranclp.rtrancl_into_rtrancl)
apply (metis (no_types, lifting) obj_at_def option.case(2) reaches_def rtranclp.rtrancl_into_rtrancl valid_refs_invD(4))
apply clarsimp
apply (elim disjE)
apply (rule exI[where x=z])
apply (clarsimp simp: mut_m.tso_store_refs_def)
apply (rule valid_refs_invD(3)[where m=m and x=z], auto simp: mut_m.tso_store_refs_def; fail)[1]
apply (metis (no_types, lifting) obj_at_cong reaches_def rtranclp.rtrancl_into_rtrancl)
apply clarsimp
apply (elim disjE)
apply (rule exI[where x=z])
apply (clarsimp simp: mut_m.tso_store_refs_def)
apply (rule valid_refs_invD(3)[where m=m and x=z], auto simp: mut_m.tso_store_refs_def)[1]
apply (metis (no_types, lifting) obj_at_def option.case(2) reaches_def rtranclp.rtrancl_into_rtrancl valid_refs_invD(4))
done
qed
then show ?thesis by blast
qed
lemma (in sys) gc_obj_fields_marked_invL[intro]:
notes filter_empty_conv[simp]
notes fun_upd_apply[simp]
shows
"\<lbrace> gc.fM_fA_invL \<^bold>\<and> gc.handshake_invL \<^bold>\<and> gc.obj_fields_marked_invL
\<^bold>\<and> LSTP (fM_rel_inv \<^bold>\<and> handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> tso_store_inv \<^bold>\<and> valid_refs_inv \<^bold>\<and> valid_W_inv) \<rbrace>
sys
\<lbrace> gc.obj_fields_marked_invL \<rbrace>"
proof(vcg_jackhammer (keep_locs) (no_thin_post_inv), vcg_name_cases)
case (tso_dequeue_store_buffer s s' p w ws) show ?case
proof(cases w)
case (mw_Mark ref mark) with tso_dequeue_store_buffer show ?thesis
apply -
apply (clarsimp simp: p_not_sys gc.obj_fields_marked_invL_def)
apply (intro conjI impI; clarsimp)
apply (frule (1) handshake_obj_fields_markedD)
apply (clarsimp simp: gc.obj_fields_marked_def)
apply (frule (1) valid_W_invD)
apply (drule_tac x=x in spec)
apply clarsimp
apply (erule obj_at_field_on_heapE)
apply (force split: obj_at_splits)
apply (force split: obj_at_splits)
apply (erule obj_at_field_on_heapE)
apply (clarsimp split: obj_at_splits; fail)
apply (clarsimp split: obj_at_splits)
apply (metis valid_W_invD(1))
apply (metis valid_W_invD(1))
apply (force simp: valid_W_invD(1) split: obj_at_splits)
done
next case (mw_Mutate r f opt_r') with tso_dequeue_store_buffer show ?thesis
apply -
apply (clarsimp simp: p_not_sys gc.obj_fields_marked_invL_def)
apply (erule disjE; clarsimp)
apply (rename_tac m)
apply (drule_tac m=m in mut_m.handshake_phase_invD; clarsimp simp: hp_step_rel_def)
apply (drule_tac x=m in spec)
apply (intro conjI impI; clarsimp simp: obj_at_field_on_heap_imp_valid_ref gc_marking_reaches_Mutate split: option.splits)
subgoal for m
apply (frule (1) handshake_obj_fields_markedD)
apply (elim disjE; auto simp: gc.obj_fields_marked_def split: option.splits)
done
subgoal for m r'
apply (frule (1) obj_fields_marked_good_ref_locs_hp_phaseD)
apply (elim disjE; clarsimp simp: marked_insertionD)
done
done
next case (mw_Mutate_Payload r f pl) with tso_dequeue_store_buffer show ?thesis by - (erule gc_obj_fields_marked_invL_niE; clarsimp)
next case (mw_fA mark) with tso_dequeue_store_buffer show ?thesis by - (erule gc_obj_fields_marked_invL_niE; clarsimp)
next case (mw_fM mark) with tso_dequeue_store_buffer show ?thesis
apply -
apply (clarsimp simp: p_not_sys fM_rel_inv_def fM_rel_def gc.obj_fields_marked_invL_def)
apply (erule disjE; clarsimp)
apply (intro conjI impI; clarsimp)
apply (metis (no_types, lifting) handshake_obj_fields_markedD hs_phase.distinct(7))
apply (metis (no_types, lifting) hs_phase.distinct(7) obj_fields_marked_good_ref_locs_hp_phaseD)
apply (metis (no_types, lifting) UnCI elem_set hs_phase.distinct(7) gc.obj_fields_marked_good_ref_locs_def obj_fields_marked_good_ref_locs_hp_phaseD option.simps(15) thin_locs_pre_keep_atSE)
done
next case (mw_Phase ph) with tso_dequeue_store_buffer show ?thesis
by - (erule gc_obj_fields_marked_invL_niE; clarsimp)
qed
qed
subsection\<open>The infamous termination argument\<close>
lemma (in mut_m) gc_W_empty_mut_inv_eq_imp:
"eq_imp (\<lambda>m'. sys_W \<^bold>\<otimes> WL (mutator m') \<^bold>\<otimes> sys_ghost_hs_in_sync m')
gc_W_empty_mut_inv"
by (simp add: eq_imp_def gc_W_empty_mut_inv_def)
lemmas gc_W_empty_mut_inv_fun_upd[simp] = eq_imp_fun_upd[OF mut_m.gc_W_empty_mut_inv_eq_imp, simplified eq_imp_simps, rule_format]
lemma (in gc) gc_W_empty_invL_eq_imp:
"eq_imp (\<lambda>(m', p) s. (AT s gc, s\<down> gc, sys_W s\<down>, WL p s\<down>, sys_ghost_hs_in_sync m' s\<down>))
gc_W_empty_invL"
by (simp add: eq_imp_def gc_W_empty_invL_def mut_m.gc_W_empty_mut_inv_def no_grey_refs_def grey_def)
lemmas gc_W_empty_invL_niE[nie] =
iffD1[OF gc.gc_W_empty_invL_eq_imp[simplified eq_imp_simps, rule_format, unfolded conj_explode, rule_format], rotated -1]
lemma gc_W_empty_mut_inv_load_W:
"\<lbrakk> \<forall>m. mut_m.gc_W_empty_mut_inv m s; \<forall>m. sys_ghost_hs_in_sync m s; WL gc s = {}; WL sys s = {} \<rbrakk>
\<Longrightarrow> no_grey_refs s"
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def no_grey_refs_def grey_def)
apply (rename_tac x xa)
apply (case_tac xa)
apply (simp_all add: WL_def)
done
context gc
begin
lemma gc_W_empty_mut_inv_hs_init[iff]:
"mut_m.gc_W_empty_mut_inv m (s(sys := s sys\<lparr>hs_type := ht, ghost_hs_in_sync := \<langle>False\<rangle>\<rparr>))"
"mut_m.gc_W_empty_mut_inv m (s(sys := s sys\<lparr>hs_type := ht, ghost_hs_in_sync := \<langle>False\<rangle>, ghost_hs_phase := hp' \<rparr>))"
by (simp_all add: mut_m.gc_W_empty_mut_inv_def)
lemma gc_W_empty_invL[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> handshake_invL \<^bold>\<and> obj_fields_marked_invL \<^bold>\<and> gc_W_empty_invL \<^bold>\<and> LSTP valid_W_inv \<rbrace>
gc
\<lbrace> gc_W_empty_invL \<rbrace>"
apply (vcg_jackhammer; (clarsimp elim: gc_W_empty_mut_inv_load_W simp: WL_def)?)
proof vcg_name_cases
case (mark_loop_get_work_done_loop s s') then show ?case
by (simp add: WL_def gc_W_empty_mut_inv_load_W valid_W_inv_sys_ghg_empty_iff)
next case (mark_loop_get_roots_done_loop s s') then show ?case
by (simp add: WL_def gc_W_empty_mut_inv_load_W valid_W_inv_sys_ghg_empty_iff)
qed
end
lemma (in sys) gc_gc_W_empty_invL[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> gc.gc_W_empty_invL \<rbrace> sys"
by vcg_chainsaw
lemma empty_WL_GC:
"\<lbrakk> atS gc gc.get_roots_UN_get_work_locs s; gc.obj_fields_marked_invL s \<rbrakk> \<Longrightarrow> gc_ghost_honorary_grey s\<down> = {}"
unfolding gc.obj_fields_marked_invL_def
using atS_mono[OF _ gc.get_roots_UN_get_work_locs_subseteq_ghost_honorary_grey_empty_locs]
apply metis
done
lemma gc_hs_get_roots_get_workD:
"\<lbrakk> atS gc gc.get_roots_UN_get_work_locs s; gc.handshake_invL s \<rbrakk>
\<Longrightarrow> sys_ghost_hs_phase s\<down> = hp_IdleMarkSweep \<and> sys_hs_type s\<down> \<in> {ht_GetWork, ht_GetRoots}"
unfolding gc.handshake_invL_def
apply clarsimp
apply (metis (no_types, lifting) atS_mono atS_un gc.get_roots_UN_get_work_locs_def gc.hs_get_roots_locs_subseteq_hp_IdleMarkSweep_locs gc.hs_get_work_locs_subseteq_hp_IdleMarkSweep_locs)
done
context gc
begin
lemma handshake_sweep_mark_endD:
"\<lbrakk> atS gc no_grey_refs_locs s; handshake_invL s; handshake_phase_inv s\<down> \<rbrakk>
\<Longrightarrow> mut_m.mut_ghost_hs_phase m s\<down> = hp_IdleMarkSweep \<and> All (ghost_hs_in_sync (s\<down> sys))"
apply (simp add: gc.handshake_invL_def)
apply (elim conjE)
apply (drule mp, erule atS_mono[OF _ gc.no_grey_refs_locs_subseteq_hs_in_sync_locs])
apply (drule mut_m.handshake_phase_invD)
apply (simp only: gc.no_grey_refs_locs_def cong del: atS_state_weak_cong)
apply (clarsimp simp: atS_un)
apply (elim disjE)
apply (drule mp, erule atS_mono[where ls'="gc.hp_IdleMarkSweep_locs"])
apply (clarsimp simp: gc.black_heap_locs_def locset_cache)
apply (clarsimp simp: hp_step_rel_def)
apply blast
apply (drule mp, erule atS_mono[where ls'="gc.hp_IdleMarkSweep_locs"])
apply (clarsimp simp: hp_IdleMarkSweep_locs_def hp_step_rel_def)
apply (clarsimp simp: hp_step_rel_def)
apply blast
apply (clarsimp simp: atS_simps locset_cache hp_step_rel_def)
apply blast
done
lemma gc_W_empty_mut_mo_co_mark:
"\<lbrakk> \<forall>x. mut_m.gc_W_empty_mut_inv x s\<down>; mutators_phase_inv s\<down>;
mut_m.mut_ghost_honorary_grey m s\<down> = {};
r \<in> mut_m.mut_roots m s\<down> \<union> mut_m.mut_ghost_honorary_root m s\<down>; white r s\<down>;
atS gc get_roots_UN_get_work_locs s; gc.handshake_invL s; gc.obj_fields_marked_invL s;
atS gc gc_W_empty_locs s \<longrightarrow> gc_W s\<down> = {};
handshake_phase_inv s\<down>; valid_W_inv s\<down> \<rbrakk>
\<Longrightarrow> mut_m.gc_W_empty_mut_inv m' (s\<down>(mutator m := s\<down> (mutator m)\<lparr>ghost_honorary_grey := {r}\<rparr>))"
apply (frule (1) gc_hs_get_roots_get_workD)
apply (frule_tac m=m in mut_m.handshake_phase_invD)
apply (clarsimp simp: hp_step_rel_def simp del: Un_iff)
apply (elim disjE, simp_all)
proof(goal_cases before_get_work past_get_work before_get_roots after_get_roots)
case before_get_work then show ?thesis
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def)
apply blast
done
next case past_get_work then show ?thesis
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def)
apply (frule spec[where x=m], clarsimp)
apply (frule (2) mut_m.reachable_snapshot_inv_white_root)
apply clarsimp
apply (drule grey_protects_whiteD)
apply (clarsimp simp: grey_def)
apply (rename_tac g p)
apply (case_tac p; clarsimp)
(* mutator *)
apply blast
(* Can't be the GC *)
apply (frule (1) empty_WL_GC)
apply (drule mp, erule atS_mono[OF _ get_roots_UN_get_work_locs_subseteq_gc_W_empty_locs])
apply (clarsimp simp: WL_def; fail)
(* Can't be sys *)
apply (clarsimp simp: WL_def valid_W_inv_sys_ghg_empty_iff; fail)
done
next case before_get_roots then show ?case
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def)
apply blast
done
next case after_get_roots then show ?case
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def)
apply (frule spec[where x=m], clarsimp)
apply (frule (2) mut_m.reachable_snapshot_inv_white_root)
apply clarsimp
apply (drule grey_protects_whiteD)
apply (clarsimp simp: grey_def)
apply (rename_tac g p)
apply (case_tac p; clarsimp)
(* mutator *)
apply blast
(* Can't be the GC *)
apply (frule (1) empty_WL_GC)
apply (drule mp, erule atS_mono[OF _ get_roots_UN_get_work_locs_subseteq_gc_W_empty_locs])
apply (clarsimp simp: WL_def; fail)
(* Can't be sys *)
apply (clarsimp simp: WL_def valid_W_inv_sys_ghg_empty_iff; fail)
done
qed
lemma no_grey_refs_mo_co_mark:
"\<lbrakk> mutators_phase_inv s\<down>;
no_grey_refs s\<down>;
gc.handshake_invL s;
at gc mark_loop s \<or> at gc mark_loop_get_roots_load_W s \<or> at gc mark_loop_get_work_load_W s \<or> atS gc no_grey_refs_locs s;
r \<in> mut_m.mut_roots m s\<down> \<union> mut_m.mut_ghost_honorary_root m s\<down>; white r s\<down>;
handshake_phase_inv s\<down> \<rbrakk>
\<Longrightarrow> no_grey_refs (s\<down>(mutator m := s\<down> (mutator m)\<lparr>ghost_honorary_grey := {r}\<rparr>))"
apply (elim disjE)
apply (clarsimp simp: atS_simps gc.handshake_invL_def locset_cache)
apply (frule mut_m.handshake_phase_invD)
apply (clarsimp simp: hp_step_rel_def)
apply (drule spec[where x=m])
apply (clarsimp simp: conj_disj_distribR[symmetric])
apply (simp add: handshake_in_syncD mut_m.no_grey_refs_not_rootD; fail)
apply (clarsimp simp: atS_simps gc.handshake_invL_def locset_cache)
apply (frule mut_m.handshake_phase_invD)
apply (clarsimp simp: hp_step_rel_def)
apply (drule spec[where x=m])
apply (simp add: handshake_in_syncD mut_m.no_grey_refs_not_rootD; fail)
apply (clarsimp simp: atS_simps gc.handshake_invL_def locset_cache)
apply (frule mut_m.handshake_phase_invD)
apply (clarsimp simp: hp_step_rel_def)
apply (drule spec[where x=m])
apply (simp add: handshake_in_syncD mut_m.no_grey_refs_not_rootD; fail)
apply (frule (2) handshake_sweep_mark_endD)
apply (drule spec[where x=m])
apply clarsimp
apply (simp add: handshake_in_syncD mut_m.no_grey_refs_not_rootD; fail)
done
end
context mut_m
begin
lemma gc_W_empty_invL[intro]:
notes gc.gc_W_empty_mut_mo_co_mark[simp]
notes gc.no_grey_refs_mo_co_mark[simp]
notes fun_upd_apply[simp]
shows
"\<lbrace> handshake_invL \<^bold>\<and> mark_object_invL \<^bold>\<and> tso_lock_invL
\<^bold>\<and> mut_get_roots.mark_object_invL m
\<^bold>\<and> mut_store_del.mark_object_invL m
\<^bold>\<and> mut_store_ins.mark_object_invL m
\<^bold>\<and> gc.handshake_invL \<^bold>\<and> gc.obj_fields_marked_invL
\<^bold>\<and> gc.gc_W_empty_invL
\<^bold>\<and> LSTP (handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> valid_W_inv) \<rbrace>
mutator m
\<lbrace> gc.gc_W_empty_invL \<rbrace>"
proof(vcg_chainsaw gc.gc_W_empty_invL_def, vcg_name_cases)
case (hs_noop_done s s' x) then show ?case
unfolding gc.handshake_invL_def
by (metis atS_un gc.get_roots_UN_get_work_locs_def hs_type.distinct(1) hs_type.distinct(3))
next case (hs_get_roots_done0 s s' x) then show ?case
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def WL_def)
apply (metis (no_types, lifting))
done
next case (hs_get_work_done0 s s' x) then show ?case
apply (clarsimp simp: mut_m.gc_W_empty_mut_inv_def WL_def)
apply (metis (no_types, lifting))
done
qed (simp_all add: no_grey_refs_def)
end
context gc
begin
lemma mut_store_old_mark_object_invL[intro]:
notes fun_upd_apply[simp]
shows
"\<lbrace> fM_fA_invL \<^bold>\<and> handshake_invL \<^bold>\<and> sweep_loop_invL \<^bold>\<and> gc_W_empty_invL
\<^bold>\<and> mut_m.mark_object_invL m
\<^bold>\<and> mut_store_del.mark_object_invL m
\<^bold>\<and> LSTP (handshake_phase_inv \<^bold>\<and> mut_m.mutator_phase_inv m) \<rbrace>
gc
\<lbrace> mut_store_del.mark_object_invL m \<rbrace>"
apply (vcg_chainsaw mut_m.mark_object_invL_def mut_m.mut_store_del_mark_object_invL_def2) \<comment> \<open>\<open>at gc sweep_loop_free s\<close>\<close>
apply (metis (no_types, lifting) handshake_in_syncD mut_m.mutator_phase_inv_aux.simps(5) mut_m.no_grey_refs_not_rootD obj_at_cong white_def)+
done
lemma mut_store_ins_mark_object_invL[intro]:
"\<lbrace> fM_fA_invL \<^bold>\<and> handshake_invL \<^bold>\<and> sweep_loop_invL \<^bold>\<and> gc_W_empty_invL
\<^bold>\<and> mut_m.mark_object_invL m
\<^bold>\<and> mut_store_ins.mark_object_invL m
\<^bold>\<and> LSTP (handshake_phase_inv \<^bold>\<and> mut_m.mutator_phase_inv m) \<rbrace>
gc
\<lbrace> mut_store_ins.mark_object_invL m \<rbrace>"
apply (vcg_chainsaw mut_m.mark_object_invL_def mut_m.mut_store_ins_mark_object_invL_def2) \<comment> \<open>\<open>at gc sweep_loop_free s\<close>\<close>
apply (metis (no_types, lifting) handshake_in_syncD mut_m.mutator_phase_inv_aux.simps(5) mut_m.no_grey_refs_not_rootD obj_at_cong white_def)+
done
lemma mut_mark_object_invL[intro]:
"\<lbrace> fM_fA_invL \<^bold>\<and> gc_W_empty_invL \<^bold>\<and> handshake_invL \<^bold>\<and> sweep_loop_invL
\<^bold>\<and> mut_m.handshake_invL m \<^bold>\<and> mut_m.mark_object_invL m
\<^bold>\<and> LSTP (fM_rel_inv \<^bold>\<and> handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> sys_phase_inv) \<rbrace>
gc
\<lbrace> mut_m.mark_object_invL m \<rbrace>"
proof(vcg_chainsaw mut_m.handshake_invL_def mut_m.mark_object_invL_def, vcg_name_cases "mutator m") \<comment> \<open>\<open>at gc sweep_loop_free s\<close>\<close>
case (ins_barrier_locs s s') then show ?case
apply -
apply (drule_tac x=m in spec)
apply (clarsimp simp: fun_upd_apply dest!: handshake_in_syncD split: obj_at_field_on_heap_splits)
apply (metis (no_types, lifting) mut_m.no_grey_refs_not_rootD obj_at_cong white_def)
apply (metis (no_types) marked_not_white mut_m.no_grey_refs_not_rootD whiteI)
done
next case (del_barrier1_locs s s') then show ?case
apply -
apply (drule_tac x=m in spec)
apply (clarsimp simp: fun_upd_apply dest!: handshake_in_syncD split: obj_at_field_on_heap_splits)
apply (metis (no_types, lifting) mut_m.no_grey_refs_not_rootD obj_at_cong white_def)
apply (metis (no_types, lifting) marked_not_white mut_m.no_grey_refs_not_rootD obj_at_cong white_def)
done
qed blast+
end
lemma mut_m_get_roots_no_fM_write:
"\<lbrakk> mut_m.handshake_invL m s; handshake_phase_inv s\<down>; fM_rel_inv s\<down>; tso_store_inv s\<down> \<rbrakk>
\<Longrightarrow> atS (mutator m) mut_m.hs_get_roots_locs s \<and> p \<noteq> sys \<longrightarrow> \<not>sys_mem_store_buffers p s\<down> = mw_fM fl # ws"
unfolding mut_m.handshake_invL_def
apply (elim conjE)
apply (drule mut_m.handshake_phase_invD[where m=m])
apply (drule fM_rel_invD)
apply (clarsimp simp: hp_step_rel_def fM_rel_def filter_empty_conv p_not_sys)
apply (metis (full_types) hs_phase.distinct(7) list.set_intros(1) tso_store_invD(4))
done
(* FIXME loads of cut-and-paste here *)
lemma (in sys) mut_mark_object_invL[intro]:
notes filter_empty_conv[simp]
notes fun_upd_apply[simp]
shows
"\<lbrace> mut_m.handshake_invL m \<^bold>\<and> mut_m.mark_object_invL m
\<^bold>\<and> LSTP (fA_rel_inv \<^bold>\<and> fM_rel_inv \<^bold>\<and> handshake_phase_inv \<^bold>\<and> mutators_phase_inv \<^bold>\<and> phase_rel_inv \<^bold>\<and> valid_refs_inv \<^bold>\<and> valid_W_inv \<^bold>\<and> tso_store_inv) \<rbrace>
sys
\<lbrace> mut_m.mark_object_invL m \<rbrace>"
proof(vcg_chainsaw mut_m.mark_object_invL_def, vcg_name_cases "mutator m")
case (hs_get_roots_loop_locs s s' p w ws x) then show ?case
apply -
apply (cases w; clarsimp split: obj_at_splits)
apply (meson valid_W_invD(1))
apply (simp add: atS_mono mut_m.hs_get_roots_loop_locs_subseteq_hs_get_roots_locs mut_m_get_roots_no_fM_write)
done
next case (hs_get_roots_loop_done s s' p w ws y) then show ?case
apply -
apply (cases w; clarsimp simp: p_not_sys valid_W_invD split: obj_at_splits)
apply (rename_tac fl obj)
apply (drule_tac fl=fl and p=p and ws=ws in mut_m_get_roots_no_fM_write; clarsimp)
apply (drule mp, erule atS_simps, loc_mem)
apply blast
done
next case (hs_get_roots_done s s' p w ws x) then show ?case
apply -
apply (cases w; clarsimp simp: p_not_sys valid_W_invD split: obj_at_splits)
apply blast
apply (rename_tac fl)
apply (drule_tac fl=fl and p=p and ws=ws in mut_m_get_roots_no_fM_write; clarsimp)
apply (drule mp, erule atS_simps, loc_mem)
apply blast
done
next case (mo_ptest_locs s s' p ws ph') then show ?case by (clarsimp simp: p_not_sys; elim disjE; clarsimp simp: phase_rel_def handshake_in_syncD dest!: phase_rel_invD)
next case (store_ins s s' p w ws y) then show ?case
apply -
apply (cases w; clarsimp simp: p_not_sys valid_W_invD split: obj_at_splits)
apply (metis (no_types, lifting) hs_phase.distinct(3, 5) mut_m.mut_ghost_handshake_phase_idle mut_m_not_idle_no_fM_writeD store_ins(9))
using valid_refs_invD(9) apply fastforce
apply (elim disjE; clarsimp simp: phase_rel_def handshake_in_syncD dest!: phase_rel_invD)
done
next case (del_barrier1_locs s s' p w ws) then show ?case
proof(cases w)
case (mw_Mutate r f opt_r') with del_barrier1_locs show ?thesis
apply (clarsimp simp: p_not_sys; elim disjE; clarsimp)
apply (intro conjI impI; clarsimp simp: obj_at_field_on_heap_imp_valid_ref split: option.splits)
apply (intro conjI impI; clarsimp)
apply (smt (z3) reachableI(1) valid_refs_invD(8))
apply (metis (no_types, lifting) marked_insertionD mut_m.mutator_phase_inv_aux.simps(4) mut_m.mutator_phase_inv_aux.simps(5) obj_at_cong reachableI(1) valid_refs_invD(8))
(* brutal *)
apply (rename_tac ma x2)
apply (frule_tac m=m in mut_m.handshake_phase_invD)
apply (frule_tac m=ma in mut_m.handshake_phase_invD)
apply (frule spec[where x=m])
apply (drule_tac x=ma in spec)
apply (clarsimp simp: hp_step_rel_def)
apply (elim disjE; clarsimp simp: marked_insertionD mut_m.mut_ghost_handshake_phase_idle)
done
next case (mw_fM fl) with del_barrier1_locs mut_m_not_idle_no_fM_writeD show ?thesis by fastforce
next case (mw_Phase ph) with del_barrier1_locs show ?thesis by (clarsimp simp: p_not_sys; elim disjE; clarsimp simp: phase_rel_def handshake_in_syncD dest!: phase_rel_invD)
qed (fastforce simp: valid_W_invD split: obj_at_field_on_heap_splits obj_at_splits)+
next case (ins_barrier_locs s s' p w ws) then show ?case
proof(cases w)
case (mw_Mutate r f opt_r') with ins_barrier_locs show ?thesis
apply (clarsimp simp: p_not_sys; elim disjE; clarsimp)
apply (intro conjI impI; clarsimp simp: obj_at_field_on_heap_imp_valid_ref split: option.splits)
apply (intro conjI impI; clarsimp)
apply (smt (z3) reachableI(1) valid_refs_invD(8))
apply (metis (no_types, lifting) marked_insertionD mut_m.mutator_phase_inv_aux.simps(4) mut_m.mutator_phase_inv_aux.simps(5) obj_at_cong reachableI(1) valid_refs_invD(8))
(* brutal *)
apply (rename_tac ma x2)
apply (frule_tac m=m in mut_m.handshake_phase_invD)
apply (frule_tac m=ma in mut_m.handshake_phase_invD)
apply (frule spec[where x=m])
apply (drule_tac x=ma in spec)
apply (clarsimp simp: hp_step_rel_def)
apply (elim disjE; clarsimp simp: marked_insertionD mut_m.mut_ghost_handshake_phase_idle)
done
next case (mw_fM fl) with ins_barrier_locs mut_m_not_idle_no_fM_writeD show ?thesis by fastforce
next case (mw_Phase ph) with ins_barrier_locs show ?thesis by (clarsimp simp: p_not_sys; elim disjE; clarsimp simp: phase_rel_def handshake_in_syncD dest!: phase_rel_invD)
qed (fastforce simp: valid_W_invD split: obj_at_field_on_heap_splits obj_at_splits)+
next case (lop_store_ins s s' p w ws y) then show ?case
apply -
apply (cases w; clarsimp simp: valid_W_invD(1) split: obj_at_splits)
apply (metis (no_types, opaque_lifting) hs_phase.distinct(5,7) mut_m_not_idle_no_fM_write)
apply (clarsimp simp: p_not_sys; elim disjE; clarsimp simp: phase_rel_def handshake_in_syncD dest!: phase_rel_invD; fail)+
done
qed
(*<*)
end
(*>*)
| {
"alphanum_fraction": null,
"author": "isabelle-prover",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/ConcurrentGC/Noninterference.thy",
"reason": null,
"repo": "mirror-afp-devel",
"save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel",
"sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1",
"size": null
} |
import numpy as np
import torch
from collections import defaultdict
#ops = ['~', '^', '+', '*', '<', '>']
ops = ['~', '+', '>']
symb2nary = {
'0' : 0,
'1' : 0,
'~' : 1,
'+' : 2,
'>' : 3
}
class NAryBooleanExpLang(object):
def __init__(self, n = 3, p = 0.5):
assert n <= 3
self.n = n
self.p = p
self.sigma = ['0', '1']
self.expr = [exp for exp,m in symb2nary.items() if m <= n and m!=0]
self.nary2symbs = defaultdict(list)
for exp,m in symb2nary.items():
self.nary2symbs[m].append(exp)
def expand_expr(self):
choices = [0] + [i for i in range(1,self.n+1)]
ps = [1-self.p] + [self.p/self.n for i in range(self.n)]
toss = np.random.choice(choices, p = ps)
if toss == 0:
return np.random.choice(['0', '1']), toss
else:
opr = np.random.choice(self.nary2symbs[toss])
return opr, toss
def generate_string(self, maxlength):
count = 1
expr = ''
while len(expr) < maxlength:
symb, new_exprs = self.expand_expr()
expr += symb
count = count - 1 + new_exprs
if count == 0:
return expr
return ''
def generate_list(self, num, min_size, max_size):
arr = set()
while len(arr) < num:
expr = self.generate_string(max_size)
if len(expr) < min_size:
continue
if expr == '' or expr in arr:
continue
arr.add(expr)
print("Generated {}/{} expressions".format(len(arr), num), end = '\r', flush = True)
print()
return list(arr)
def output_generator(self, seq):
out_seq = ['0' for i in range(len(seq) - 1)] + ['1']
return ''.join(out_seq)
def training_set_generator(self, num, min_size, max_size):
input_seq = self.generate_list(num, min_size, max_size)
output_seq = []
for i in range(len(input_seq)):
output_seq.append(self.output_generator(input_seq[i]))
return input_seq, output_seq
def lineToTensorOutput(self, line):
tensor = torch.zeros(len(line), 2)
for li, letter in enumerate(line):
tensor[li][int(letter)] = 1.0
return tensor
def depth_counter(self, seq):
count = 0
depths = []
for ch in seq:
count = count + symb2nary[ch] - 1
depths.append(count)
return np.array(depths)[:, np.newaxis]
| {
"alphanum_fraction": 0.6182242991,
"author": null,
"avg_line_length": 24.8837209302,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "51bd5a9a51c083544b00aac66c24bded6ae3dc59",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-10-10T12:06:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-10T12:06:42.000Z",
"max_forks_repo_head_hexsha": "48eea2ea6e2802ba827868723f75fa6c82401cde",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "satwik77/Transformer-Formal-Languages",
"max_forks_repo_path": "src/utils/boolean_expr_generator.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "48eea2ea6e2802ba827868723f75fa6c82401cde",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "satwik77/Transformer-Formal-Languages",
"max_issues_repo_path": "src/utils/boolean_expr_generator.py",
"max_line_length": 87,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "48eea2ea6e2802ba827868723f75fa6c82401cde",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "satwik77/Transformer-Formal-Languages",
"max_stars_repo_path": "src/utils/boolean_expr_generator.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-14T18:52:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-10T08:44:08.000Z",
"num_tokens": 682,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2140
} |
import numpy as np
if __name__ =="__main__":
xx = np.array([[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.8, 0.6, 0.3, 0.4],
[0.8, 0.6, 0.7, 0.8],
[0.8, 0.8, 0.8, 0.4],
[0.1, 0.6, 0.7, 0.8],
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.8, 0.6, 0.3, 0.4],
[0.8, 0.6, 0.7, 0.8],
[0.8, 0.8, 0.8, 0.4],
[0.1, 0.6, 0.7, 0.8]
])
y_actual = np.array([1,0,0,1,0,1,1,0,0,1,0,1])
| {
"alphanum_fraction": 0.2713815789,
"author": null,
"avg_line_length": 32,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8142aabe95c7abbe5d41840421797be452f485aa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e0a459cc5741f376cb26c43538cde74a8c6d3b22",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "induraj2020/DeepEnsemble",
"max_forks_repo_path": "Test/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e0a459cc5741f376cb26c43538cde74a8c6d3b22",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "induraj2020/DeepEnsemble",
"max_issues_repo_path": "Test/test.py",
"max_line_length": 50,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "e0a459cc5741f376cb26c43538cde74a8c6d3b22",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "induraj2020/DeepEnsemble",
"max_stars_repo_path": "Test/test.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-02T12:22:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-02T12:22:25.000Z",
"num_tokens": 302,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 608
} |
# Fixed point classification in 2D
using DrWatson
@quickactivate "NonlinearDynamicsTextbook"
include(srcdir("style.jl"))
using DynamicalSystems, PyPlot, Random
alleigs = []
fig, axs = subplots(2,3; figsize = (figx, 2figy))
ax = gca()
xgrid = -5:0.05:5
ygrid = xgrid
ux = zeros(length(xgrid), length(ygrid))
uy = copy(ux)
Ms = [
[1.0 0.8;
0.4 1.0],
[-1.0 -0.8;
1.8 -1.0],
[-1.0 0;
1 1],
[0 -1;
1 0],
]
titles = ["repulsive node", "attractive spiral", "hyperbolic/saddle", "center"]
using LinearAlgebra
function stream_eigs!(ax, M, c = "C0")
for (i, x) in enumerate(xgrid)
for (j, y) in enumerate(ygrid)
ux[i, j], uy[i, j] = M * [x, y]
end
end
ax.streamplot(Vector(xgrid), Vector(ygrid), ux', uy';
linewidth = 1.5, density = 0.5, color = c, arrowsize = 2,
)
ev = eigen(M)
push!(alleigs, ev.values)
if eltype(ev.values) <: Float64
e1 = ev.vectors[:, 1]
e2 = ev.vectors[:, 2]
for e in (e1, e2)
ax.plot(e[1] .* 2xgrid, e[2] .* 2ygrid; color = "C2", ls = "dashed")
end
end
ax.set_xlim(xgrid[1], xgrid[end])
ax.set_ylim(ygrid[1], ygrid[end])
ax.set_xticks([])
ax.set_yticks([])
ax.plot(0, 0; marker = "o", mec = "C2", mew = 1,
markersize = 12, mfc = :white, zorder = 99
)
end
function tovec(e)
if eltype(e) <: Real
e1 = e
e2 = zeros(2)
else
e1 = [e[1].re, e[2].re]
e2 = [e[1].im, e[2].im]
end
return e1, e2
end
for (i, M) in enumerate(Ms)
ax = axs[i]
color = i < 3 ? "C$(i-1)" : "C$(i)"
stream_eigs!(ax, M, color)
ax.set_title(titles[i]; color = color)
# Plot eigenvalues
e1, e2 = tovec(alleigs[i])
axs[5].scatter(e1, e2; color = color, s = 200, zorder = 99)
end
# Set axis of eigenval plot
axs[5].spines["left"].set_position("center")
axs[5].spines["bottom"].set_position("center")
axs[5].grid(false)
axs[5].spines["right"].set_color("none")
axs[5].spines["top"].set_color("none")
axs[5].set_title("eigenvalues"; color = "k")
# axs[5].set_yticklabels([])
# axs[5].set_xticklabels([])
axs[5].set_xlim(-2, 2)
axs[5].set_ylim(-1.8, 1.8)
# Plot limit cycle
function vanderpoll(u, p, t)
x, y = u;
xdot = p*(x - x^3/3 - y)
ydot = x/p
return SVector(xdot, ydot)
end
ds = ContinuousDynamicalSystem(vanderpoll, [0.1, 0.2], 0.5)
tr = trajectory(ds, 7.0; Ttr = 100.0)
axs[6].clear()
axs[6].plot(columns(tr)...; color = "C2", linestyle = "--")
for (i, x) in enumerate(xgrid)
for (j, y) in enumerate(ygrid)
ux[i, j], uy[i, j] = ds.f(SVector(x, y), ds.p, 0)
end
end
axs[6].streamplot(Vector(xgrid), Vector(ygrid), ux', uy';
linewidth = 1.5, density = 0.5, color = "C5", arrowsize = 2
)
ax = axs[6]
ax.set_xlim(xgrid[1], xgrid[end])
ax.set_ylim(ygrid[1], ygrid[end])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("attractive limit cycle"; color = "C5")
fig.subplots_adjust(bottom = 0.02, left = 0.02, top = 0.92, right = 0.97, hspace = 0.2)
wsave(plotsdir("2", "2ddynamics"), fig) | {
"alphanum_fraction": 0.5735628451,
"author": null,
"avg_line_length": 26.093220339,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "b46d56acdb842a6e816e2b86f61a81288d74bc91",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T08:45:11.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-28T18:49:22.000Z",
"max_forks_repo_head_hexsha": "bfae8cf867f458f00151da089332f2ce3bea5dd0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JuliaDynamics/NonlinearDynamicsTextbook",
"max_forks_repo_path": "figure_generation/2/2.2.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bfae8cf867f458f00151da089332f2ce3bea5dd0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JuliaDynamics/NonlinearDynamicsTextbook",
"max_issues_repo_path": "figure_generation/2/2.2.jl",
"max_line_length": 87,
"max_stars_count": 88,
"max_stars_repo_head_hexsha": "bfae8cf867f458f00151da089332f2ce3bea5dd0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JuliaDynamics/NonlinearDynamicsTextbook",
"max_stars_repo_path": "figure_generation/2/2.2.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T09:23:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-18T20:54:23.000Z",
"num_tokens": 1175,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3079
} |
"""
to do...
"""
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import argparse
import gym
| {
"alphanum_fraction": 0.7698412698,
"author": null,
"avg_line_length": 14,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "efa9896724ef807dc63a0fd8be351172f44137be",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "90c4f302b588bbf8be7962aaaa7f61c0234fb8d9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sherlockHSY/Reinforcement_learning_with_pytorch",
"max_forks_repo_path": "train_pendulum.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "90c4f302b588bbf8be7962aaaa7f61c0234fb8d9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sherlockHSY/Reinforcement_learning_with_pytorch",
"max_issues_repo_path": "train_pendulum.py",
"max_line_length": 49,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "90c4f302b588bbf8be7962aaaa7f61c0234fb8d9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sherlockHSY/Reinforcement_learning_with_pytorch",
"max_stars_repo_path": "train_pendulum.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T08:38:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-15T05:32:36.000Z",
"num_tokens": 28,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 126
} |
```julia
using DifferentialEquations
using Plots
```
Consider the simple reaction:
\begin{align}
A &\longleftrightarrow B\\
B &\longleftrightarrow C\\
\end{align}
Both are elementary steps that occur in the liquid phase, and we will consider it in a few different solvent environments.
```julia
gammaA(XA, XS, A12A, A21A) = exp.(XS.^2 .*(A12A .+ 2*(A21A - A12A)*XA))
gammaB(XB, XS, A12B, A21B) = exp.(XS.^2 .*(A12B .+ 2*(A21B - A12B)*XB))
gammaC(XC, XS, A12C, A21C) = exp.(XS.^2 .*(A12C .+ 2*(A21C - A12C)*XC))
gammaTS1(XTS1, XS, A12TS1, A21TS1) = exp.(XS.^2 .*(A12TS1 .+ 2*(A21TS1 - A12TS1)*XTS1))
gammaTS2(XTS2, XS, A12TS2, A21TS2) = exp.(XS.^2 .*(A12TS2 .+ 2*(A21TS2 - A12TS2)*XTS2))
z1(XA, XB, XS, A12A, A21A, A12B, A21B) = 1/K10*gammaB(XB, XS, A12B, A21B)./gammaA(XA, XS, A12A, A21A).*XB./XA
z2(XB, XC, XS, A12B, A21B, A12C, A21C) = 1/K20*gammaC(XC, XS, A12C, A21C)./gammaB(XB, XS, A12B, A21B).*XC./XB
rate1(XA, XB, XTS1, XS, A12A, A21A, A12B, A21B, A12TS1, A21TS1) = k10*gammaA(XA, XS, A12A, A21A)./gammaTS1(XTS1, XS, A12TS1, A21TS1).*XA.*(1 .- z1(XA, XB, XS, A12A, A21A, A12B, A21B))
rate2(XB, XC, XTS2, XS, A12B, A21B, A12C, A21C, A12TS2, A21TS2) = k20*gammaB(XB, XS, A12B, A21B)./gammaTS2(XTS2, XS, A12TS2, A21TS2).*XB.*(1 .- z2(XB, XC, XS, A12B, A21B, A12C, A21C))
```
rate2 (generic function with 1 method)
```julia
function batch(du, u, p, t)
MAR = p["MAR"]
PAR = p["PAR"]
k10, k20, K10, K20, V, NS = PAR
NA = u[:,1]
NB = u[:,2]
NC = u[:,3]
NT = NA + NB + NC .+ NS
XA = NA./NT
XB = NB./NT
XC = NC./NT
XTS1 = XA
XTS2 = XB
XS = NS./NT
#For A in solvent
A12A = MAR[1]
A21A = MAR[2]
#For B in solvent
A12B = MAR[3]
A21B = MAR[4]
#For C in solvent
A12C = MAR[5]
A21C = MAR[6]
#For Transition State 1 in solvent
A12TS1 = MAR[7]
A21TS1 = MAR[8]
#For Transition State 2 in solvent
A12TS2 = MAR[9]
A21TS2 = MAR[10]
gammaA = exp.(XS.^2 .*(A12A .+ 2*(A21A - A12A)*XA))
gammaB = exp.(XS.^2 .*(A12B .+ 2*(A21B - A12B)*XB))
gammaC = exp.(XS.^2 .*(A12C .+ 2*(A21C - A12C)*XC))
gammaTS1 = exp.(XS.^2 .*(A12TS1 .+ 2*(A21TS1 - A12TS1)*XTS1))
gammaTS2 = exp.(XS.^2 .*(A12TS2 .+ 2*(A21TS2 - A12TS2)*XTS2))
z1 = 1/K10*gammaB./gammaA.*XB./XA
z2 = 1/K20*gammaC./gammaB.*XC./XB
z2[isnan.(z2)] .= 0
r1 = k10*gammaA./gammaTS1.*XA.*(1 .- z1).*NT/V
r2 = k20*gammaB./gammaTS2.*XB.*(1 .- z2).*NT/V
RA = -r1[1]
RB = r1[1] - r2[1]
RC = r2[1]
du[1] = RA*V
du[2] = RB*V
du[3] = RC*V
return du, r1, r2, z1, z2
end
```
batch (generic function with 1 method)
```julia
k10 = 1
k20 = 1
K10 = 1
K20 = 1
V = 1
NTOT = 100
NA0 = 0.1
NB0 = 0.0
NC0 = 0.0
NS = NTOT - NA0 - NB0 - NC0
var0 = [NA0 NB0 NC0]
span = (0.0, 20.0);
```
```julia
#Solvate transition state relative to reactants
MARSET1 = zeros(10,3)
MARSET1[:,1] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #no solvation
MARSET1[:,2] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #destabilize TS1
MARSET1[:,3] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #stabilize TS1
tfine = range(0.0, stop = maximum(span), length = 1000)
e1out = zeros(length(tfine), size(MARSET1, 2))
e2out = zeros(length(tfine), size(MARSET1, 2))
r1out = zeros(length(tfine), size(MARSET1, 2))
r2out = zeros(length(tfine), size(MARSET1, 2))
z1out = zeros(length(tfine), size(MARSET1, 2))
z2out = zeros(length(tfine), size(MARSET1, 2))
for i = 1:size(MARSET1, 2)
p0 = Dict("MAR" => MARSET1[:,i], "PAR" => [k10, k20, K10, K20, V, NS])
prob = ODEProblem(batch, var0, span, p0)
sol = solve(prob, Rodas5(), abstol = 1e-10, reltol = 1e-10)
solf = sol(tfine)
NA = solf[1,:]
NB = solf[2,:]
NC = solf[3,:]
NT = NA + NB + NC .+ NS
ex1 = (NA0 .- NA)/NA0
ex2 = (NC)/NA0
dut, rt1, rt2, zt1, zt2 = batch([0., 0., 0.], [NA NB NC], p0, tfine)
e1out[:,i] = ex1
e2out[:,i] = ex2
r1out[:,i] = rt1
r2out[:,i] = rt2
z1out[:,i] = zt1
z2out[:,i] = zt2
end
plt1 = plot(tfine, e1out, xlabel = "time", ylabel = "extent", labels = ["e1" nothing nothing], legend = :bottomright)
plt1 = plot!(plt1,tfine, e2out, ls = :dash, labels = ["e2" nothing nothing])
plt2 = plot(tfine, r1out, xlabel = "time", ylabel = "rate", labels = ["r1" nothing nothing], legend = :topright)
plt2 = plot!(tfine, r2out, ls = :dash, labels = ["r2" nothing nothing])
plt3 = plot(e1out, r1out, xlabel = "extent", ylabel = "rate", labels = ["r1" nothing nothing], legend = :topright)
plt3 = plot!(e1out, r2out, ls = :dash, labels = ["r2" nothing nothing])
plt4 = plot(e1out, z1out, xlabel = "extent", ylabel = "z", labels = ["z1" nothing nothing], legend = :topright)
plt4 = plot!(e1out, z2out, ls = :dash, labels = ["z2" nothing nothing])
plt5 = plot(tfine, z1out, xlabel = "time", ylabel = "z", labels = ["z1" nothing nothing], legend = :topright)
plt5 = plot!(tfine, z2out, ls = :dash, labels = ["z2" nothing nothing])
display(plt1)
display(plt2)
display(plt3)
display(plt4)
display(plt5)
```
| {
"alphanum_fraction": 0.6840439426,
"author": null,
"avg_line_length": 177.4727830309,
"converted": true,
"ext": "ipynb",
"file": null,
"hexsha": "d0e383c262941cb3bae976cf83a378ee1a70408c",
"include": null,
"lang": "Jupyter Notebook",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a6eb581e4e3e72f40fd6c7e900b6f4b30311076f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jqbond/Research_Public",
"max_forks_repo_path": "2021_JCAT_DeDonder_Solvents/Case Study 2.ipynb",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a6eb581e4e3e72f40fd6c7e900b6f4b30311076f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jqbond/Research_Public",
"max_issues_repo_path": "2021_JCAT_DeDonder_Solvents/Case Study 2.ipynb",
"max_line_length": 16447,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a6eb581e4e3e72f40fd6c7e900b6f4b30311076f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jqbond/Research_Public",
"max_stars_repo_path": "2021_JCAT_DeDonder_Solvents/Case Study 2.ipynb",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2386,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 694451
} |
# -*- coding: utf-8 -*-
"""
Automatic Colour Conversion Graph
=================================
Defines the automatic colour conversion graph objects:
- :func:`colour.describe_conversion_path`
- :func:`colour.convert`
"""
import inspect
import numpy as np
import textwrap
from collections import namedtuple
from copy import copy
from functools import partial
from pprint import pformat
from colour.colorimetry import (CCS_ILLUMINANTS, SDS_ILLUMINANTS,
TVS_ILLUMINANTS_HUNTERLAB)
from colour.colorimetry import (colorimetric_purity, complementary_wavelength,
dominant_wavelength, excitation_purity,
lightness, luminance, luminous_efficacy,
luminous_efficiency, luminous_flux, sd_to_XYZ,
whiteness, yellowness, wavelength_to_XYZ)
from colour.recovery import XYZ_to_sd
from colour.models import RGB_COLOURSPACE_sRGB
from colour.models import (
CAM02LCD_to_JMh_CIECAM02, CAM02SCD_to_JMh_CIECAM02,
CAM02UCS_to_JMh_CIECAM02, CAM16LCD_to_JMh_CAM16, CAM16SCD_to_JMh_CAM16,
CAM16UCS_to_JMh_CAM16, CMYK_to_CMY, CMY_to_CMYK, CMY_to_RGB, DIN99_to_Lab,
HSL_to_RGB, HSV_to_RGB, Hunter_Lab_to_XYZ, Hunter_Rdab_to_XYZ,
ICtCp_to_XYZ, IgPgTg_to_XYZ, IPT_to_XYZ, JMh_CAM16_to_CAM16LCD,
JMh_CAM16_to_CAM16SCD, JMh_CAM16_to_CAM16UCS, JMh_CIECAM02_to_CAM02LCD,
JMh_CIECAM02_to_CAM02SCD, JMh_CIECAM02_to_CAM02UCS, JzAzBz_to_XYZ,
LCHab_to_Lab, LCHuv_to_Luv, Lab_to_DIN99, Lab_to_LCHab, Lab_to_XYZ,
Luv_to_LCHuv, Luv_to_XYZ, Luv_to_uv, Luv_uv_to_xy, OSA_UCS_to_XYZ,
Oklab_to_XYZ, Prismatic_to_RGB, RGB_luminance, RGB_to_CMY, RGB_to_HSL,
RGB_to_HSV, RGB_to_Prismatic, RGB_to_RGB, RGB_to_XYZ, RGB_to_YCbCr,
RGB_to_YCoCg, RGB_to_YcCbcCrc, UCS_to_XYZ, UCS_to_uv, UCS_uv_to_xy,
UVW_to_XYZ, XYZ_to_Hunter_Lab, XYZ_to_Hunter_Rdab, XYZ_to_ICtCp,
XYZ_to_IgPgTg, XYZ_to_IPT, XYZ_to_JzAzBz, XYZ_to_Lab, XYZ_to_Luv,
XYZ_to_OSA_UCS, XYZ_to_Oklab, XYZ_to_RGB, XYZ_to_UCS, XYZ_to_UVW,
XYZ_to_hdr_CIELab, XYZ_to_hdr_IPT, XYZ_to_sRGB, XYZ_to_xy, XYZ_to_xyY,
YCbCr_to_RGB, YCoCg_to_RGB, YcCbcCrc_to_RGB, cctf_decoding, cctf_encoding,
hdr_CIELab_to_XYZ, hdr_IPT_to_XYZ, sRGB_to_XYZ, uv_to_Luv, uv_to_UCS,
xyY_to_XYZ, xyY_to_xy, xy_to_Luv_uv, xy_to_UCS_uv, xy_to_XYZ, xy_to_xyY)
from colour.notation import (HEX_to_RGB, RGB_to_HEX, munsell_value,
munsell_colour_to_xyY, xyY_to_munsell_colour)
from colour.quality import colour_quality_scale, colour_rendering_index
from colour.appearance import (
CAM_Specification_CAM16, CAM16_to_XYZ, CAM_Specification_CIECAM02,
CIECAM02_to_XYZ, XYZ_to_ATD95, XYZ_to_CAM16, XYZ_to_CIECAM02, XYZ_to_Hunt,
XYZ_to_LLAB, XYZ_to_Nayatani95, XYZ_to_RLAB)
from colour.temperature import CCT_to_uv, uv_to_CCT
from colour.utilities import (domain_range_scale, filter_kwargs, message_box,
required, tsplit, tstack, usage_warning)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'Conversion_Specification', 'CIECAM02_to_JMh_CIECAM02',
'JMh_CIECAM02_to_CIECAM02', 'CAM16_to_JMh_CAM16', 'JMh_CAM16_to_CAM16',
'XYZ_to_luminance', 'RGB_luminance_to_RGB',
'CONVERSION_SPECIFICATIONS_DATA', 'CONVERSION_GRAPH_NODE_LABELS',
'CONVERSION_SPECIFICATIONS', 'CONVERSION_GRAPH',
'describe_conversion_path', 'convert'
]
class Conversion_Specification(
namedtuple('Conversion_Specification',
('source', 'target', 'conversion_function'))):
"""
Conversion specification for *Colour* graph for automatic colour
conversion describing two nodes and the edge in the graph.
Parameters
----------
source : unicode
Source node in the graph.
target : array_like
Target node in the graph.
conversion_function : callable
Callable converting from the ``source`` node to the ``target`` node.
"""
def __new__(cls, source=None, target=None, conversion_function=None):
return super(Conversion_Specification, cls).__new__(
cls, source.lower(), target.lower(), conversion_function)
def CIECAM02_to_JMh_CIECAM02(CAM_Specification_CIECAM02):
"""
Converts from *CIECAM02* specification to *CIECAM02* :math:`JMh`
correlates.
Parameters
----------
CAM_Specification_CIECAM02 : CAM_Specification_CIECAM02
*CIECAM02* colour appearance model specification.
Returns
-------
ndarray
*CIECAM02* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CIECAM02(J=41.731091132513917,
... M=0.108842175669226,
... h=219.048432658311780)
>>> CIECAM02_to_JMh_CIECAM02(specification) # doctest: +ELLIPSIS
array([ 4.1731091...e+01, 1.0884217...e-01, 2.1904843...e+02])
"""
return tstack([
CAM_Specification_CIECAM02.J,
CAM_Specification_CIECAM02.M,
CAM_Specification_CIECAM02.h,
])
def JMh_CIECAM02_to_CIECAM02(JMh):
"""
Converts from *CIECAM02* :math:`JMh` correlates to *CIECAM02*
specification.
Parameters
----------
JMh : array_like
*CIECAM02* :math:`JMh` correlates.
Returns
-------
CAM_Specification_CIECAM02
*CIECAM02* colour appearance model specification.
Examples
--------
>>> JMh = np.array([4.17310911e+01, 1.08842176e-01, 2.19048433e+02])
>>> JMh_CIECAM02_to_CIECAM02(JMh) # doctest: +ELLIPSIS
CAM_Specification_CIECAM02(J=41.7310911..., C=None, h=219.0484329..., \
s=None, Q=None, M=0.1088421..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CIECAM02(J=J, M=M, h=h)
def CAM16_to_JMh_CAM16(CAM_Specification_CAM16):
"""
Converts from *CAM16* specification to *CAM16* :math:`JMh` correlates.
Parameters
----------
CAM_Specification_CAM16 : CAM_Specification_CAM16
*CAM16* colour appearance model specification.
Returns
-------
ndarray
*CAM16* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CAM16(J=41.731207905126638,
... M=0.107436772335905,
... h=217.067959767393010)
>>> CAM16_to_JMh_CAM16(specification) # doctest: +ELLIPSIS
array([ 4.1731207...e+01, 1.0743677...e-01, 2.1706796...e+02])
"""
return tstack([
CAM_Specification_CAM16.J,
CAM_Specification_CAM16.M,
CAM_Specification_CAM16.h,
])
def JMh_CAM16_to_CAM16(JMh):
"""
Converts from *CAM6* :math:`JMh` correlates to *CAM6* specification.
Parameters
----------
JMh : array_like
*CAM6* :math:`JMh` correlates.
Returns
-------
CAM6_Specification
*CAM6* colour appearance model specification.
Examples
--------
>>> JMh = np.array([4.17312079e+01, 1.07436772e-01, 2.17067960e+02])
>>> JMh_CAM16_to_CAM16(JMh) # doctest: +ELLIPSIS
CAM_Specification_CAM16(J=41.7312079..., C=None, h=217.06796..., s=None, \
Q=None, M=0.1074367..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CAM16(J=J, M=M, h=h)
def XYZ_to_luminance(XYZ):
"""
Converts from *CIE XYZ* tristimulus values to *luminance* :math:`Y`.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
Returns
-------
array_like
*Luminance* :math:`Y`.
Examples
--------
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_luminance(XYZ) # doctest: +ELLIPSIS
0.1219722...
"""
_X, Y, _Z = tsplit(XYZ)
return Y
def RGB_luminance_to_RGB(Y):
"""
Converts from *luminance* :math:`Y` to *RGB*.
Parameters
----------
Y : array_like
*Luminance* :math:`Y`.
Returns
-------
array_like
*RGB*.
Examples
--------
>>> RGB_luminance_to_RGB(0.123014562384318) # doctest: +ELLIPSIS
array([ 0.1230145..., 0.1230145..., 0.1230145...])
"""
return tstack([Y, Y, Y])
_DEFAULT_ILLUMINANT = 'D65'
"""
Default automatic colour conversion graph illuminant name.
_DEFAULT_ILLUMINANT : unicode
"""
_SD_DEFAULT_ILLUMINANT = SDS_ILLUMINANTS[_DEFAULT_ILLUMINANT]
"""
Default automatic colour conversion graph illuminant spectral distribution.
_SD_DEFAULT_ILLUMINANT : SpectralDistribution
"""
_CCS_DEFAULT_ILLUMINANT = CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][_DEFAULT_ILLUMINANT]
"""
Default automatic colour conversion graph illuminant *CIE xy* chromaticity
coordinates.
_CCS_DEFAULT_ILLUMINANT : ndarray
"""
_TVS_DEFAULT_ILLUMINANT = xy_to_XYZ(_CCS_DEFAULT_ILLUMINANT)
"""
Default automatic colour conversion graph illuminant *CIE XYZ* tristimulus
values.
_TVS_DEFAULT_ILLUMINANT : ndarray
"""
_RGB_COLOURSPACE_DEFAULT = RGB_COLOURSPACE_sRGB
"""
Default automatic colour conversion graph *RGB* colourspace.
_RGB_COLOURSPACE_DEFAULT : RGB_COLOURSPACE_RGB
"""
CONVERSION_SPECIFICATIONS_DATA = [
# Colorimetry
('Spectral Distribution', 'CIE XYZ',
partial(sd_to_XYZ, illuminant=_SD_DEFAULT_ILLUMINANT)),
('CIE XYZ', 'Spectral Distribution', XYZ_to_sd),
('Spectral Distribution', 'Luminous Flux', luminous_flux),
('Spectral Distribution', 'Luminous Efficiency', luminous_efficiency),
('Spectral Distribution', 'Luminous Efficacy', luminous_efficacy),
('CIE XYZ', 'Luminance', XYZ_to_luminance),
('Luminance', 'Lightness', lightness),
('Lightness', 'Luminance', luminance),
('CIE XYZ', 'Whiteness', partial(whiteness,
XYZ_0=_TVS_DEFAULT_ILLUMINANT)),
('CIE XYZ', 'Yellowness', yellowness),
('CIE xy', 'Colorimetric Purity',
partial(colorimetric_purity, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Complementary Wavelength',
partial(complementary_wavelength, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Dominant Wavelength',
partial(dominant_wavelength, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('CIE xy', 'Excitation Purity',
partial(excitation_purity, xy_n=_CCS_DEFAULT_ILLUMINANT)),
('Wavelength', 'CIE XYZ', wavelength_to_XYZ),
# Colour Models
('CIE XYZ', 'CIE xyY', XYZ_to_xyY),
('CIE xyY', 'CIE XYZ', xyY_to_XYZ),
('CIE xyY', 'CIE xy', xyY_to_xy),
('CIE xy', 'CIE xyY', xy_to_xyY),
('CIE XYZ', 'CIE xy', XYZ_to_xy),
('CIE xy', 'CIE XYZ', xy_to_XYZ),
('CIE XYZ', 'CIE Lab', XYZ_to_Lab),
('CIE Lab', 'CIE XYZ', Lab_to_XYZ),
('CIE Lab', 'CIE LCHab', Lab_to_LCHab),
('CIE LCHab', 'CIE Lab', LCHab_to_Lab),
('CIE XYZ', 'CIE Luv', XYZ_to_Luv),
('CIE Luv', 'CIE XYZ', Luv_to_XYZ),
('CIE Luv', 'CIE Luv uv', Luv_to_uv),
('CIE Luv uv', 'CIE Luv', uv_to_Luv),
('CIE Luv uv', 'CIE xy', Luv_uv_to_xy),
('CIE xy', 'CIE Luv uv', xy_to_Luv_uv),
('CIE Luv', 'CIE LCHuv', Luv_to_LCHuv),
('CIE LCHuv', 'CIE Luv', LCHuv_to_Luv),
('CIE XYZ', 'CIE UCS', XYZ_to_UCS),
('CIE UCS', 'CIE XYZ', UCS_to_XYZ),
('CIE UCS', 'CIE UCS uv', UCS_to_uv),
('CIE UCS uv', 'CIE UCS', uv_to_UCS),
('CIE UCS uv', 'CIE xy', UCS_uv_to_xy),
('CIE xy', 'CIE UCS uv', xy_to_UCS_uv),
('CIE XYZ', 'CIE UVW', XYZ_to_UVW),
('CIE UVW', 'CIE XYZ', UVW_to_XYZ),
('CIE Lab', 'DIN99', Lab_to_DIN99),
('DIN99', 'CIE Lab', DIN99_to_Lab),
('CIE XYZ', 'hdr CIELab', XYZ_to_hdr_CIELab),
('hdr CIELab', 'CIE XYZ', hdr_CIELab_to_XYZ),
('CIE XYZ', 'Hunter Lab',
partial(
XYZ_to_Hunter_Lab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('Hunter Lab', 'CIE XYZ',
partial(
Hunter_Lab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('CIE XYZ', 'Hunter Rdab',
partial(
XYZ_to_Hunter_Rdab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('Hunter Rdab', 'CIE XYZ',
partial(
Hunter_Rdab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB['CIE 1931 2 Degree Standard Observer']
['D65'].XYZ_n / 100)),
('CIE XYZ', 'ICtCp', XYZ_to_ICtCp),
('ICtCp', 'CIE XYZ', ICtCp_to_XYZ),
('CIE XYZ', 'IgPgTg', XYZ_to_IgPgTg),
('IgPgTg', 'CIE XYZ', IgPgTg_to_XYZ),
('CIE XYZ', 'IPT', XYZ_to_IPT),
('IPT', 'CIE XYZ', IPT_to_XYZ),
('CIE XYZ', 'JzAzBz', XYZ_to_JzAzBz),
('JzAzBz', 'CIE XYZ', JzAzBz_to_XYZ),
('CIE XYZ', 'hdr IPT', XYZ_to_hdr_IPT),
('hdr IPT', 'CIE XYZ', hdr_IPT_to_XYZ),
('CIE XYZ', 'OSA UCS', XYZ_to_OSA_UCS),
('OSA UCS', 'CIE XYZ', OSA_UCS_to_XYZ),
('CIE XYZ', 'Oklab', XYZ_to_Oklab),
('Oklab', 'CIE XYZ', Oklab_to_XYZ),
# RGB Colour Models
('CIE XYZ', 'RGB',
partial(
XYZ_to_RGB,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_XYZ_to_RGB=_RGB_COLOURSPACE_DEFAULT.matrix_XYZ_to_RGB)),
('RGB', 'CIE XYZ',
partial(
RGB_to_XYZ,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_RGB_to_XYZ=_RGB_COLOURSPACE_DEFAULT.matrix_RGB_to_XYZ)),
('RGB', 'Scene-Referred RGB',
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT)),
('Scene-Referred RGB', 'RGB',
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT)),
('RGB', 'HSV', RGB_to_HSV),
('HSV', 'RGB', HSV_to_RGB),
('RGB', 'HSL', RGB_to_HSL),
('HSL', 'RGB', HSL_to_RGB),
('CMY', 'RGB', CMY_to_RGB),
('RGB', 'CMY', RGB_to_CMY),
('CMY', 'CMYK', CMY_to_CMYK),
('CMYK', 'CMY', CMYK_to_CMY),
('RGB', 'RGB Luminance',
partial(
RGB_luminance,
primaries=_RGB_COLOURSPACE_DEFAULT.primaries,
whitepoint=_RGB_COLOURSPACE_DEFAULT.whitepoint)),
('RGB Luminance', 'RGB', RGB_luminance_to_RGB),
('RGB', 'Prismatic', RGB_to_Prismatic),
('Prismatic', 'RGB', Prismatic_to_RGB),
('Output-Referred RGB', 'YCbCr', RGB_to_YCbCr),
('YCbCr', 'Output-Referred RGB', YCbCr_to_RGB),
('RGB', 'YcCbcCrc', RGB_to_YcCbcCrc),
('YcCbcCrc', 'RGB', YcCbcCrc_to_RGB),
('Output-Referred RGB', 'YCoCg', RGB_to_YCoCg),
('YCoCg', 'Output-Referred RGB', YCoCg_to_RGB),
('RGB', 'Output-Referred RGB', cctf_encoding),
('Output-Referred RGB', 'RGB', cctf_decoding),
('Scene-Referred RGB', 'Output-Referred RGB', cctf_encoding),
('Output-Referred RGB', 'Scene-Referred RGB', cctf_decoding),
('CIE XYZ', 'sRGB', XYZ_to_sRGB),
('sRGB', 'CIE XYZ', sRGB_to_XYZ),
# Colour Notation Systems
('Output-Referred RGB', 'Hexadecimal', RGB_to_HEX),
('Hexadecimal', 'Output-Referred RGB', HEX_to_RGB),
('CIE xyY', 'Munsell Colour', xyY_to_munsell_colour),
('Munsell Colour', 'CIE xyY', munsell_colour_to_xyY),
('Luminance', 'Munsell Value', munsell_value),
('Munsell Value', 'Luminance', partial(luminance, method='ASTM D1535')),
# Colour Quality
('Spectral Distribution', 'CRI', colour_rendering_index),
('Spectral Distribution', 'CQS', colour_quality_scale),
# Colour Temperature
('CCT', 'CIE UCS uv', CCT_to_uv),
('CIE UCS uv', 'CCT', uv_to_CCT),
# Advanced Colorimetry
('CIE XYZ', 'Hunt',
partial(
XYZ_to_Hunt,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
XYZ_b=_TVS_DEFAULT_ILLUMINANT,
L_A=80 * 0.2,
CCT_w=6504)),
('CIE XYZ', 'ATD95',
partial(
XYZ_to_ATD95,
XYZ_0=_TVS_DEFAULT_ILLUMINANT,
Y_0=80 * 0.2,
k_1=0,
k_2=(15 + 50) / 2)),
('CIE XYZ', 'CIECAM02',
partial(
XYZ_to_CIECAM02,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CIECAM02', 'CIE XYZ',
partial(
CIECAM02_to_XYZ,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CIECAM02', 'CIECAM02 JMh', CIECAM02_to_JMh_CIECAM02),
('CIECAM02 JMh', 'CIECAM02', JMh_CIECAM02_to_CIECAM02),
('CIE XYZ', 'CAM16',
partial(
XYZ_to_CAM16,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CAM16', 'CIE XYZ',
partial(
CAM16_to_XYZ,
XYZ_w=_TVS_DEFAULT_ILLUMINANT,
L_A=64 / np.pi * 0.2,
Y_b=20)),
('CAM16', 'CAM16 JMh', CAM16_to_JMh_CAM16),
('CAM16 JMh', 'CAM16', JMh_CAM16_to_CAM16),
('CIE XYZ', 'LLAB',
partial(XYZ_to_LLAB, XYZ_0=_TVS_DEFAULT_ILLUMINANT, Y_b=80 * 0.2, L=80)),
('CIE XYZ', 'Nayatani95',
partial(
XYZ_to_Nayatani95,
XYZ_n=_TVS_DEFAULT_ILLUMINANT,
Y_o=0.2,
E_o=1000,
E_or=1000)),
('CIE XYZ', 'RLAB',
partial(XYZ_to_RLAB, XYZ_n=_TVS_DEFAULT_ILLUMINANT, Y_n=20)),
('CIECAM02 JMh', 'CAM02LCD', JMh_CIECAM02_to_CAM02LCD),
('CAM02LCD', 'CIECAM02 JMh', CAM02LCD_to_JMh_CIECAM02),
('CIECAM02 JMh', 'CAM02SCD', JMh_CIECAM02_to_CAM02SCD),
('CAM02SCD', 'CIECAM02 JMh', CAM02SCD_to_JMh_CIECAM02),
('CIECAM02 JMh', 'CAM02UCS', JMh_CIECAM02_to_CAM02UCS),
('CAM02UCS', 'CIECAM02 JMh', CAM02UCS_to_JMh_CIECAM02),
('CAM16 JMh', 'CAM16LCD', JMh_CAM16_to_CAM16LCD),
('CAM16LCD', 'CAM16 JMh', CAM16LCD_to_JMh_CAM16),
('CAM16 JMh', 'CAM16SCD', JMh_CAM16_to_CAM16SCD),
('CAM16SCD', 'CAM16 JMh', CAM16SCD_to_JMh_CAM16),
('CAM16 JMh', 'CAM16UCS', JMh_CAM16_to_CAM16UCS),
('CAM16UCS', 'CAM16 JMh', CAM16UCS_to_JMh_CAM16),
]
"""
Automatic colour conversion graph specifications data describing two nodes and
the edge in the graph.
CONVERSION_SPECIFICATIONS_DATA : list
"""
CONVERSION_SPECIFICATIONS = [
Conversion_Specification(*specification)
for specification in CONVERSION_SPECIFICATIONS_DATA
]
"""
Automatic colour conversion graph specifications describing two nodes and
the edge in the graph.
CONVERSION_SPECIFICATIONS : list
"""
CONVERSION_GRAPH_NODE_LABELS = {
specification[0].lower(): specification[0]
for specification in CONVERSION_SPECIFICATIONS_DATA
}
"""
Automatic colour conversion graph node labels.
CONVERSION_GRAPH_NODE_LABELS : dict
"""
CONVERSION_GRAPH_NODE_LABELS.update({
specification[1].lower(): specification[1]
for specification in CONVERSION_SPECIFICATIONS_DATA
})
@required('NetworkX')
def _build_graph():
"""
Builds the automatic colour conversion graph.
Returns
-------
DiGraph
Automatic colour conversion graph.
"""
import networkx as nx
graph = nx.DiGraph()
for specification in CONVERSION_SPECIFICATIONS:
graph.add_edge(
specification.source,
specification.target,
conversion_function=specification.conversion_function)
return graph
CONVERSION_GRAPH = None
"""
Automatic colour conversion graph.
CONVERSION_GRAPH : DiGraph
"""
@required('NetworkX')
def _conversion_path(source, target):
"""
Returns the conversion path from the source node to the target node in the
automatic colour conversion graph.
Parameters
----------
source : unicode
Source node.
target : unicode
Target node.
Returns
-------
list
Conversion path from the source node to the target node, i.e. a list of
conversion function callables.
Examples
--------
>>> _conversion_path('cie lab', 'cct')
... # doctest: +ELLIPSIS
[<function Lab_to_XYZ at 0x...>, <function XYZ_to_xy at 0x...>, \
<function xy_to_CCT at 0x...>]
"""
import colour
import networkx as nx
global CONVERSION_GRAPH
if CONVERSION_GRAPH is None:
# Updating the :attr:`CONVERSION_GRAPH` attributes.
colour.graph.CONVERSION_GRAPH = CONVERSION_GRAPH = _build_graph()
path = nx.shortest_path(CONVERSION_GRAPH, source, target)
return [
CONVERSION_GRAPH.get_edge_data(a, b)['conversion_function']
for a, b in zip(path[:-1], path[1:])
]
def _lower_order_function(callable_):
"""
Returns the lower order function associated with given callable, i.e.
the function wrapped by a partial object.
Parameters
----------
callable_ : callable
Callable to return the lower order function.
Returns
-------
callable
Lower order function or given callable if no lower order function
exists.
"""
return callable_.func if isinstance(callable_, partial) else callable_
def describe_conversion_path(source,
target,
mode='Short',
width=79,
padding=3,
print_callable=print,
**kwargs):
"""
Describes the conversion path from source colour representation to target
colour representation using the automatic colour conversion graph.
Parameters
----------
source : unicode
Source colour representation, i.e. the source node in the automatic
colour conversion graph.
target : unicode
Target colour representation, i.e. the target node in the automatic
colour conversion graph.
mode : unicode, optional
**{'Short', 'Long', 'Extended'}**,
Verbose mode: *Short* describes the conversion path, *Long* provides
details about the arguments, definitions signatures and output values,
*Extended* appends the definitions documentation.
width : int, optional
Message box width.
padding : unicode, optional
Padding on each sides of the message.
print_callable : callable, optional
Callable used to print the message box.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.convert`},
Please refer to the documentation of the previously listed definition.
Examples
--------
>>> describe_conversion_path('Spectral Distribution', 'sRGB', width=75)
===========================================================================
* *
* [ Conversion Path ] *
* *
* "sd_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
"""
try: # pragma: no cover
signature_inspection = inspect.signature
except AttributeError: # pragma: no cover
signature_inspection = inspect.getargspec
source, target, mode = source.lower(), target.lower(), mode.lower()
width = (79 + 2 + 2 * 3 - 4) if mode == 'extended' else width
conversion_path = _conversion_path(source, target)
message_box(
'[ Conversion Path ]\n\n{0}'.format(' --> '.join([
'"{0}"'.format(
_lower_order_function(conversion_function).__name__)
for conversion_function in conversion_path
])), width, padding, print_callable)
for conversion_function in conversion_path:
conversion_function_name = _lower_order_function(
conversion_function).__name__
# Filtering compatible keyword arguments passed directly and
# irrespective of any conversion function name.
filtered_kwargs = filter_kwargs(conversion_function, **kwargs)
# Filtering keyword arguments passed as dictionary with the
# conversion function name.
filtered_kwargs.update(kwargs.get(conversion_function_name, {}))
return_value = filtered_kwargs.pop('return', None)
if mode in ('long', 'extended'):
message = (
'[ "{0}" ]'
'\n\n[ Signature ]\n\n{1}').format(
_lower_order_function(conversion_function).__name__,
pformat(
signature_inspection(
_lower_order_function(conversion_function))))
if filtered_kwargs:
message += '\n\n[ Filtered Arguments ]\n\n{0}'.format(
pformat(filtered_kwargs))
if mode in ('extended', ):
message += '\n\n[ Documentation ]\n\n{0}'.format(
textwrap.dedent(
str(
_lower_order_function(conversion_function)
.__doc__)).strip())
if return_value is not None:
message += '\n\n[ Conversion Output ]\n\n{0}'.format(
return_value)
message_box(message, width, padding, print_callable)
@domain_range_scale('1')
def convert(a, source, target, **kwargs):
"""
Converts given object :math:`a` from source colour representation to target
colour representation using the automatic colour conversion graph.
The conversion is performed by finding the shortest path in a
`NetworkX <https://networkx.github.io/>`__ :class:`DiGraph` class instance.
The conversion path adopts the **'1'** domain-range scale and the object
:math:`a` is expected to be *soft* normalised accordingly. For example,
*CIE XYZ* tristimulus values arguments for use with the *CAM16* colour
appearance model should be in domain `[0, 1]` instead of the domain
`[0, 100]` used with the **'Reference'** domain-range scale. The arguments
are typically converted as follows:
- *Scalars* in domain-range `[0, 10]`, e.g *Munsell Value* are
scaled by *10*.
- *Percentages* in domain-range `[0, 100]` are scaled by *100*.
- *Degrees* in domain-range `[0, 360]` are scaled by *360*.
- *Integers* in domain-range `[0, 2**n -1]` where `n` is the bit
depth are scaled by *2**n -1*.
See the `Domain-Range Scales <../basics.html#domain-range-scales>`__ page
for more information.
Parameters
----------
a : array_like or numeric or SpectralDistribution
Object :math:`a` to convert. If :math:`a` represents a reflectance,
transmittance or absorptance value, the expectation is that it is
viewed under *CIE Standard Illuminant D Series* *D65*. The illuminant
can be changed on a per definition basis along the conversion path.
source : unicode
Source colour representation, i.e. the source node in the automatic
colour conversion graph.
target : unicode
Target colour representation, i.e. the target node in the automatic
colour conversion graph.
Other Parameters
----------------
\\**kwargs : dict, optional
{'\\*'},
Please refer to the documentation of the supported conversion
definitions.
Arguments for the conversion definitions are passed as keyword
arguments whose names is those of the conversion definitions and values
set as dictionaries. For example, in the conversion from spectral
distribution to *sRGB* colourspace, passing arguments to the
:func:`colour.sd_to_XYZ` definition is done as follows::
convert(sd, 'Spectral Distribution', 'sRGB', sd_to_XYZ={\
'illuminant': SDS_ILLUMINANTS['FL2']})
It is also possible to pass keyword arguments directly to the various
conversion definitions irrespective of their name. This is
``dangerous`` and could cause unexpected behaviour, consider the
following conversion::
convert(sd, 'Spectral Distribution', 'sRGB', 'illuminant': \
SDS_ILLUMINANTS['FL2'])
Because both the :func:`colour.sd_to_XYZ` and
:func:`colour.XYZ_to_sRGB` definitions have an *illuminant* argument,
`SDS_ILLUMINANTS['FL2']` will be passed to both of them and will raise
an exception in the :func:`colour.XYZ_to_sRGB` definition. This will
be addressed in the future by either catching the exception and trying
a new time without the keyword argument or more elegantly via type
checking.
With that in mind, this mechanism offers some good benefits: For
example, it allows defining a conversion from *CIE XYZ* colourspace to
*n* different colour models while passing an illuminant argument but
without having to explicitly define all the explicit conversion
definition arguments::
a = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = CCS_ILLUMINANTS[\
'CIE 1931 2 Degree Standard Observer']['D65']
for model in ('CIE xyY', 'CIE Lab'):
convert(a, 'CIE XYZ', model, illuminant=illuminant)
Instead of::
for model in ('CIE xyY', 'CIE Lab'):
convert(a, 'CIE XYZ', model, XYZ_to_xyY={'illuminant': \
illuminant}, XYZ_to_Lab={'illuminant': illuminant})
Mixing both approaches is possible for the brevity benefits. It is made
possible because the keyword arguments directly passed are filtered
first and then the resulting dict is updated with the explicit
conversion definition arguments::
illuminant = CCS_ILLUMINANTS[\
'CIE 1931 2 Degree Standard Observer']['D65']
convert(sd, 'Spectral Distribution', 'sRGB', 'illuminant': \
SDS_ILLUMINANTS['FL2'], XYZ_to_sRGB={'illuminant': illuminant})
For inspection purposes, verbose is enabled by passing arguments to the
:func:`colour.describe_conversion_path` definition via the ``verbose``
keyword argument as follows::
convert(sd, 'Spectral Distribution', 'sRGB', \
verbose={'mode': 'Long'})
Returns
-------
ndarray or numeric or SpectralDistribution
Converted object :math:`a`.
Warnings
--------
The domain-range scale is **'1'** and cannot be changed.
Notes
-----
- The **RGB** colour representation is assumed to be linear and
representing *scene-referred* imagery, i.e. **Scene-Referred RGB**
representation. To encode such *RGB* values as *output-referred*
(*display-referred*) imagery, i.e. encode the *RGB* values using an
encoding colour component transfer function (Encoding CCTF) /
opto-electronic transfer function (OETF / OECF), the
**Output-Referred RGB** representation must be used::
convert(RGB, 'Scene-Referred RGB', 'Output-Referred RGB')
Likewise, encoded *output-referred* *RGB* values can be decoded with
the **Scene-Referred RGB** representation::
convert(RGB, 'Output-Referred RGB', 'Scene-Referred RGB')
- Various defaults have been adopted compared to the low-level *Colour*
API:
- The default illuminant for the computation is
*CIE Standard Illuminant D Series* *D65*. It can be changed on a
per definition basis along the conversion path.
- The default *RGB* colourspace primaries and whitepoint are that of
the *BT.709*/*sRGB* colourspace. They can be changed on a per
definition basis along the conversion path.
- When using **sRGB** as a source or target colour representation,
the convenient :func:`colour.sRGB_to_XYZ` and
:func:`colour.XYZ_to_sRGB` definitions are used, respectively.
Thus, decoding and encoding using the sRGB electro-optical transfer
function (EOTF) and its inverse will be applied by default.
- Most of the colour appearance models have defaults set according to
*IEC 61966-2-1:1999* viewing conditions, i.e. *sRGB* 64 Lux ambiant
illumination, 80 :math:`cd/m^2`, adapting field luminance about
20% of a white object in the scene.
Examples
--------
>>> from colour import SDS_COLOURCHECKERS
>>> sd = SDS_COLOURCHECKERS['ColorChecker N Ohta']['dark skin']
>>> convert(sd, 'Spectral Distribution', 'sRGB',
... verbose={'mode': 'Short', 'width': 75})
... # doctest: +ELLIPSIS
===========================================================================
* *
* [ Conversion Path ] *
* *
* "sd_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
array([ 0.4567579..., 0.3098698..., 0.2486192...])
>>> illuminant = SDS_ILLUMINANTS['FL2']
>>> convert(sd, 'Spectral Distribution', 'sRGB',
... sd_to_XYZ={'illuminant': illuminant})
... # doctest: +ELLIPSIS
array([ 0.4792457..., 0.3167696..., 0.1736272...])
>>> a = np.array([0.45675795, 0.30986982, 0.24861924])
>>> convert(a, 'Output-Referred RGB', 'CAM16UCS')
... # doctest: +ELLIPSIS
array([ 0.3999481..., 0.0920655..., 0.0812752...])
>>> a = np.array([0.39994811, 0.09206558, 0.08127526])
>>> convert(a, 'CAM16UCS', 'sRGB', verbose={'mode': 'Short', 'width': 75})
... # doctest: +ELLIPSIS
===========================================================================
* *
* [ Conversion Path ] *
* *
* "UCS_Li2017_to_JMh_CAM16" --> "JMh_CAM16_to_CAM16" --> *
* "CAM16_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
array([ 0.4567576..., 0.3098826..., 0.2486222...])
"""
# TODO: Remove the following warning whenever the automatic colour
# conversion graph implementation is considered stable.
usage_warning(
'The "Automatic Colour Conversion Graph" is a beta feature, be '
'mindful of this when using it. Please report any unexpected '
'behaviour and do not hesitate to ask any questions should they arise.'
'\nThis warning can be disabled with the '
'"colour.utilities.suppress_warnings" context manager as follows:\n'
'with colour.utilities.suppress_warnings(colour_usage_warnings=True): '
'\n convert(*args, **kwargs)')
source, target = source.lower(), target.lower()
conversion_path = _conversion_path(source, target)
verbose_kwargs = copy(kwargs)
for conversion_function in conversion_path:
conversion_function_name = _lower_order_function(
conversion_function).__name__
# Filtering compatible keyword arguments passed directly and
# irrespective of any conversion function name.
filtered_kwargs = filter_kwargs(conversion_function, **kwargs)
# Filtering keyword arguments passed as dictionary with the
# conversion function name.
filtered_kwargs.update(kwargs.get(conversion_function_name, {}))
a = conversion_function(a, **filtered_kwargs)
if conversion_function_name in verbose_kwargs:
verbose_kwargs[conversion_function_name]['return'] = a
else:
verbose_kwargs[conversion_function_name] = {'return': a}
if 'verbose' in verbose_kwargs:
verbose_kwargs.update(verbose_kwargs.pop('verbose'))
describe_conversion_path(source, target, **verbose_kwargs)
return a
| {
"alphanum_fraction": 0.6218056051,
"author": null,
"avg_line_length": 36.8734817814,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "684f723472e6f76e8338d05fa36abd34349dfd13",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9f5e87438abd38bf4847a67a4678ce76fbe87637",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "rishabhvarshney14/colour",
"max_forks_repo_path": "colour/graph/conversion.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9f5e87438abd38bf4847a67a4678ce76fbe87637",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "rishabhvarshney14/colour",
"max_issues_repo_path": "colour/graph/conversion.py",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9f5e87438abd38bf4847a67a4678ce76fbe87637",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "rishabhvarshney14/colour",
"max_stars_repo_path": "colour/graph/conversion.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T05:32:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T05:32:37.000Z",
"num_tokens": 9773,
"path": null,
"reason": "import numpy,import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 36431
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.