text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import copy
import cmath
import h5py
import math
import numpy
import scipy.linalg
import sys
import time
from pauxy.walkers.multi_ghf import MultiGHFWalker
from pauxy.walkers.single_det import SingleDetWalker
from pauxy.walkers.multi_det import MultiDetWalker
from pauxy.walkers.multi_coherent import MultiCoherentWalker
from pauxy.walkers.thermal import ThermalWalker
from pauxy.walkers.stack import FieldConfig
from pauxy.utils.io import get_input_value
from pauxy.utils.misc import update_stack
class Walkers(object):
"""Container for groups of walkers which make up a wavefunction.
Parameters
----------
system : object
System object.
trial : object
Trial wavefunction object.
nwalkers : int
Number of walkers to initialise.
nprop_tot : int
Total number of propagators to store for back propagation + itcf.
nbp : int
Number of back propagation steps.
"""
def __init__(self, system, trial, qmc, walker_opts={}, verbose=False,
comm=None, nprop_tot=None, nbp=None):
self.nwalkers = qmc.nwalkers
self.ntot_walkers = qmc.ntot_walkers
if verbose:
print("# nwalkers = {}".format(self.nwalkers))
print("# ntot_walkers = {}".format(self.ntot_walkers))
self.write_freq = walker_opts.get('write_freq', 0)
self.write_file = walker_opts.get('write_file', 'restart.h5')
self.use_log_shift = walker_opts.get('use_log_shift', False)
self.shift_counter = 1
self.read_file = walker_opts.get('read_file', None)
if comm is None:
rank = 0
else:
rank = comm.rank
if verbose:
print("# Setting up wavefunction object.")
if trial.name == 'MultiSlater':
self.walker_type = 'MSD'
# TODO: FDM FIXTHIS
if trial.ndets == 1:
if verbose:
print("# Usinge single det walker with msd wavefunction.")
self.walker_type = 'SD'
trial.psi = trial.psi[0]
self.walkers = [SingleDetWalker(system, trial, walker_opts=walker_opts,
index=w, nprop_tot=nprop_tot,
nbp=nbp)
for w in range(qmc.nwalkers)]
else:
self.walkers = [
MultiDetWalker(system, trial, walker_opts=walker_opts,
verbose=(verbose and w == 0))
for w in range(qmc.nwalkers)
]
self.buff_size = self.walkers[0].buff_size
if nbp is not None:
self.buff_size += self.walkers[0].field_configs.buff_size
self.walker_buffer = numpy.zeros(self.buff_size,
dtype=numpy.complex128)
elif trial.name == 'thermal':
self.walker_type = 'thermal'
self.walkers = [ThermalWalker(system, trial,
walker_opts=walker_opts,
verbose=(verbose and w==0))
for w in range(qmc.nwalkers)]
self.buff_size = self.walkers[0].buff_size + self.walkers[0].stack.buff_size
self.walker_buffer = numpy.zeros(self.buff_size,
dtype=numpy.complex128)
stack_size = self.walkers[0].stack_size
if system.name == "Hubbard":
if stack_size % qmc.nstblz != 0 or qmc.nstblz < stack_size:
if verbose:
print("# Stabilisation frequency is not commensurate "
"with stack size.")
print("# Determining a better value.")
if qmc.nstblz < stack_size:
qmc.nstblz = stack_size
if verbose:
print("# Updated stabilization frequency: "
" {}".format(qmc.nstblz))
else:
qmc.nstblz = update_stack(qmc.nstblz, stack_size,
name="nstblz", verbose=verbose)
elif trial.name == "coherent_state" and trial.symmetrize:
self.walker_type = 'MSD'
self.walkers = [MultiCoherentWalker(system, trial, walker_opts=walker_opts,
index=w, nprop_tot=nprop_tot,
nbp=nbp)
for w in range(qmc.nwalkers)]
self.buff_size = self.walkers[0].buff_size
if nbp is not None:
if verbose:
print("# Performing back propagation.")
print("# Number of steps in imaginary time: {:}.".format(nbp))
self.buff_size += self.walkers[0].field_configs.buff_size
self.walker_buffer = numpy.zeros(self.buff_size,
dtype=numpy.complex128)
else:
self.walker_type = 'SD'
self.walkers = [SingleDetWalker(system, trial, walker_opts=walker_opts,
index=w, nprop_tot=nprop_tot,
nbp=nbp)
for w in range(qmc.nwalkers)]
self.buff_size = self.walkers[0].buff_size
if nbp is not None:
if verbose:
print("# Performing back propagation.")
print("# Number of steps in imaginary time: {:}.".format(nbp))
self.buff_size += self.walkers[0].field_configs.buff_size
self.walker_buffer = numpy.zeros(self.buff_size,
dtype=numpy.complex128)
if system.name == "Generic" or system.name == "UEG":
dtype = complex
else:
dtype = int
self.pcont_method = get_input_value(walker_opts, 'population_control',
default='comb')
self.min_weight = walker_opts.get('min_weight', 0.1)
self.max_weight = walker_opts.get('max_weight', 4.0)
if verbose:
print("# Using {} population control "
"algorithm.".format(self.pcont_method))
mem = float(self.walker_buffer.nbytes) / (1024.0**3)
print("# Buffer size for communication: {:13.8e} GB".format(mem))
if mem > 2.0:
# TODO: FDM FIX THIS
print(" # Warning: Walker buffer size > 2GB. May run into MPI"
"issues.")
if not self.walker_type == "thermal":
walker_size = 3 + self.walkers[0].phi.size
if self.write_freq > 0:
self.write_restart = True
self.dsets = []
with h5py.File(self.write_file,'w',driver='mpio',comm=comm) as fh5:
for i in range(self.ntot_walkers):
fh5.create_dataset('walker_%d'%i, (walker_size,),
dtype=numpy.complex128)
else:
self.write_restart = False
if self.read_file is not None:
if verbose:
print("# Reading walkers from %s file series."%self.read_file)
self.read_walkers(comm)
self.target_weight = qmc.ntot_walkers
self.nw = qmc.nwalkers
self.set_total_weight(qmc.ntot_walkers)
def orthogonalise(self, trial, free_projection):
"""Orthogonalise all walkers.
Parameters
----------
trial : object
Trial wavefunction object.
free_projection : bool
True if doing free projection.
"""
for w in self.walkers:
detR = w.reortho(trial)
if free_projection:
(magn, dtheta) = cmath.polar(detR)
w.weight *= magn
w.phase *= cmath.exp(1j*dtheta)
def add_field_config(self, nprop_tot, nbp, system, dtype):
"""Add FieldConfig object to walker object.
Parameters
----------
nprop_tot : int
Total number of propagators to store for back propagation + itcf.
nbp : int
Number of back propagation steps.
nfields : int
Number of fields to store for each back propagation step.
dtype : type
Field configuration type.
"""
for w in self.walkers:
w.field_configs = FieldConfig(system.nfields, nprop_tot, nbp, dtype)
def copy_historic_wfn(self):
"""Copy current wavefunction to psi_n for next back propagation step."""
for (i,w) in enumerate(self.walkers):
numpy.copyto(self.walkers[i].phi_old, self.walkers[i].phi)
def copy_bp_wfn(self, phi_bp):
"""Copy back propagated wavefunction.
Parameters
----------
phi_bp : object
list of walker objects containing back propagated walkers.
"""
for (i, (w,wbp)) in enumerate(zip(self.walkers, phi_bp)):
numpy.copyto(self.walkers[i].phi_bp, wbp.phi)
def copy_init_wfn(self):
"""Copy current wavefunction to initial wavefunction.
The definition of the initial wavefunction depends on whether we are
calculating an ITCF or not.
"""
for (i,w) in enumerate(self.walkers):
numpy.copyto(self.walkers[i].phi_right, self.walkers[i].phi)
def pop_control(self, comm):
if self.ntot_walkers == 1:
return
if self.use_log_shift:
self.update_log_ovlp(comm)
weights = numpy.array([abs(w.weight) for w in self.walkers])
global_weights = numpy.empty(len(weights)*comm.size)
comm.Allgather(weights, global_weights)
total_weight = sum(global_weights)
# Rescale weights to combat exponential decay/growth.
scale = total_weight / self.target_weight
if total_weight < 1e-8:
if comm.rank == 0:
print("# Warning: Total weight is {:13.8e}: "
.format(total_weight))
print("# Something is seriously wrong.")
sys.exit()
self.set_total_weight(total_weight)
# Todo: Just standardise information we want to send between routines.
for w in self.walkers:
w.unscaled_weight = w.weight
w.weight = w.weight / scale
if self.pcont_method == "comb":
global_weights = global_weights / scale
self.comb(comm, global_weights)
elif self.pcont_method == "pair_branch":
self.pair_branch(comm)
else:
if comm.rank == 0:
print("Unknown population control method.")
def comb(self, comm, weights):
"""Apply the comb method of population control / branching.
See Booth & Gubernatis PRE 80, 046704 (2009).
Parameters
----------
comm : MPI communicator
"""
# Need make a copy to since the elements in psi are only references to
# walker objects in memory. We don't want future changes in a given
# element of psi having unintended consequences.
# todo : add phase to walker for free projection
if comm.rank == 0:
parent_ix = numpy.zeros(len(weights), dtype='i')
else:
parent_ix = numpy.empty(len(weights), dtype='i')
if comm.rank == 0:
total_weight = sum(weights)
cprobs = numpy.cumsum(weights)
r = numpy.random.random()
comb = [(i+r) * (total_weight/self.target_weight) for i in
range(self.target_weight)]
iw = 0
ic = 0
while ic < len(comb):
if comb[ic] < cprobs[iw]:
parent_ix[iw] += 1
ic += 1
else:
iw += 1
data = {'ix': parent_ix}
else:
data = None
data = comm.bcast(data, root=0)
parent_ix = data['ix']
# Keep total weight saved for capping purposes.
# where returns a tuple (array,), selecting first element.
kill = numpy.where(parent_ix == 0)[0]
clone = numpy.where(parent_ix > 1)[0]
reqs = []
walker_buffers = []
# First initiate non-blocking sends of walkers.
comm.barrier()
for i, (c, k) in enumerate(zip(clone, kill)):
# Sending from current processor?
if c // self.nw == comm.rank:
# Location of walker to clone in local list.
clone_pos = c % self.nw
# copying walker data to intermediate buffer to avoid issues
# with accessing walker data during send. Might not be
# necessary.
dest_proc = k // self.nw
# with h5py.File('before_{}.h5'.format(comm.rank), 'a') as fh5:
# fh5['walker_{}_{}_{}'.format(c,k,dest_proc)] = self.walkers[clone_pos].get_buffer()
buff = self.walkers[clone_pos].get_buffer()
reqs.append(comm.Isend(buff, dest=dest_proc, tag=i))
# Now receive walkers on processors where walkers are to be killed.
for i, (c, k) in enumerate(zip(clone, kill)):
# Receiving to current processor?
if k // self.nw == comm.rank:
# Processor we are receiving from.
source_proc = c // self.nw
# Location of walker to kill in local list of walkers.
kill_pos = k % self.nw
comm.Recv(self.walker_buffer, source=source_proc, tag=i)
# with h5py.File('walkers_recv.h5', 'w') as fh5:
# fh5['walk_{}'.format(k)] = self.walker_buffer.copy()
self.walkers[kill_pos].set_buffer(self.walker_buffer)
# with h5py.File('after_{}.h5'.format(comm.rank), 'a') as fh5:
# fh5['walker_{}_{}_{}'.format(c,k,comm.rank)] = self.walkers[kill_pos].get_buffer()
# Complete non-blocking send.
for rs in reqs:
rs.wait()
# Necessary?
# if len(kill) > 0 or len(clone) > 0:
# sys.exit()
comm.Barrier()
# Reset walker weight.
# TODO: check this.
for w in self.walkers:
w.weight = 1.0
def pair_branch(self, comm):
walker_info = [[abs(w.weight),1,comm.rank,comm.rank] for w in self.walkers]
glob_inf = comm.gather(walker_info, root=0)
# Want same random number seed used on all processors
if comm.rank == 0:
# Rescale weights.
glob_inf = numpy.array([item for sub in glob_inf for item in sub])
total_weight = sum(w[0] for w in glob_inf)
sort = numpy.argsort(glob_inf[:,0], kind='mergesort')
isort = numpy.argsort(sort, kind='mergesort')
glob_inf = glob_inf[sort]
s = 0
e = len(glob_inf) - 1
tags = []
isend = 0
while s < e:
if glob_inf[s][0] < self.min_weight or glob_inf[e][0] > self.max_weight:
# sum of paired walker weights
wab = glob_inf[s][0] + glob_inf[e][0]
r = numpy.random.rand()
if r < glob_inf[e][0] / wab:
# clone large weight walker
glob_inf[e][0] = 0.5 * wab
glob_inf[e][1] = 2
# Processor we will send duplicated walker to
glob_inf[e][3] = glob_inf[s][2]
send = glob_inf[s][2]
# Kill small weight walker
glob_inf[s][0] = 0.0
glob_inf[s][1] = 0
glob_inf[s][3] = glob_inf[e][2]
else:
# clone small weight walker
glob_inf[s][0] = 0.5 * wab
glob_inf[s][1] = 2
# Processor we will send duplicated walker to
glob_inf[s][3] = glob_inf[e][2]
send = glob_inf[e][2]
# Kill small weight walker
glob_inf[e][0] = 0.0
glob_inf[e][1] = 0
glob_inf[e][3] = glob_inf[s][2]
tags.append([send])
s += 1
e -= 1
else:
break
nw = self.nwalkers
glob_inf = glob_inf[isort].reshape((comm.size,nw,4))
else:
data = None
total_weight = 0
data = comm.scatter(glob_inf, root=0)
# Keep total weight saved for capping purposes.
walker_buffers = []
reqs = []
for iw, walker in enumerate(data):
if walker[1] > 1:
tag = comm.rank*len(walker_info) + walker[3]
self.walkers[iw].weight = walker[0]
buff = self.walkers[iw].get_buffer()
reqs.append(comm.Isend(buff,
dest=int(round(walker[3])),
tag=tag))
for iw, walker in enumerate(data):
if walker[1] == 0:
tag = walker[3]*len(walker_info) + comm.rank
comm.Recv(self.walker_buffer,
source=int(round(walker[3])),
tag=tag)
self.walkers[iw].set_buffer(self.walker_buffer)
for r in reqs:
r.wait()
def recompute_greens_function(self, trial, time_slice=None):
for w in self.walkers:
w.greens_function(trial, time_slice)
def set_total_weight(self, total_weight):
for w in self.walkers:
w.total_weight = total_weight
w.old_total_weight = w.total_weight
def reset(self, trial):
for w in self.walkers:
w.stack.reset()
w.stack.set_all(trial.dmat)
w.greens_function(trial)
w.weight = 1.0
w.phase = 1.0 + 0.0j
def get_write_buffer(self, i):
w = self.walkers[i]
buff = numpy.concatenate([[w.weight], [w.phase], [w.ot], w.phi.ravel()])
return buff
def set_walker_from_buffer(self, i, buff):
w = self.walkers[i]
w.weight = buff[0]
w.phase = buff[1]
w.ot = buff[2]
w.phi = buff[3:].reshape(self.walkers[i].phi.shape)
def write_walkers(self, comm):
start = time.time()
with h5py.File(self.write_file,'r+',driver='mpio',comm=comm) as fh5:
for (i,w) in enumerate(self.walkers):
ix = i + self.nwalkers*comm.rank
buff = self.get_write_buffer(i)
fh5['walker_%d'%ix][:] = self.get_write_buffer(i)
if comm.rank == 0:
print(" # Writing walkers to file.")
print(" # Time to write restart: {:13.8e} s"
.format(time.time()-start))
def update_log_ovlp(self, comm):
send = numpy.zeros(3, dtype=numpy.complex128)
# Overlap log factor
send[0] = sum(abs(w.ot) for w in self.walkers)
# Det R log factor
send[1] = sum(abs(w.detR) for w in self.walkers)
send[2] = sum(abs(w.log_detR) for w in self.walkers)
global_av = numpy.zeros(3, dtype=numpy.complex128)
comm.Allreduce(send, global_av)
log_shift = numpy.log(global_av[0]/self.ntot_walkers)
detR_shift = numpy.log(global_av[1]/self.ntot_walkers)
log_detR_shift = global_av[2]/self.ntot_walkers
# w.log_shift = -0.5
n = self.shift_counter
nm1 = self.shift_counter - 1
for w in self.walkers:
w.log_shift = (w.log_shift*nm1 + log_shift)/n
w.log_detR_shift = (w.log_detR_shift*nm1 + log_detR_shift)/n
w.detR_shift = (w.detR_shift*nm1 + detR_shift)/n
self.shift_counter += 1
def read_walkers(self, comm):
with h5py.File(self.read_file, 'r') as fh5:
for (i,w) in enumerate(self.walkers):
try:
ix = i + self.nwalkers*comm.rank
self.set_walker_from_buffer(i, fh5['walker_%d'%ix][:])
except KeyError:
print(" # Could not read walker data from:"
" %s"%(self.read_file))
|
{"hexsha": "fa078b878dbe30e91904cdecca0939654da23115", "size": 20669, "ext": "py", "lang": "Python", "max_stars_repo_path": "pauxy/walkers/handler.py", "max_stars_repo_name": "pauxy-qmc/pauxy", "max_stars_repo_head_hexsha": "1da80284284769b59361c73cfa3c2d914c74a73f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-08-05T17:17:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:06:18.000Z", "max_issues_repo_path": "pauxy/walkers/handler.py", "max_issues_repo_name": "pauxy-qmc/pauxy", "max_issues_repo_head_hexsha": "1da80284284769b59361c73cfa3c2d914c74a73f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-05-17T21:28:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-22T18:05:50.000Z", "max_forks_repo_path": "pauxy/walkers/handler.py", "max_forks_repo_name": "pauxy-qmc/pauxy", "max_forks_repo_head_hexsha": "1da80284284769b59361c73cfa3c2d914c74a73f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-05-18T01:03:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T15:36:29.000Z", "avg_line_length": 42.5288065844, "max_line_length": 105, "alphanum_fraction": 0.524215008, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4653}
|
[STATEMENT]
lemma agg_sum_commute:
fixes f :: "('a,'b::aggregation_order) square"
shows "(\<Sum>\<^sub>k \<Sum>\<^sub>l f (k,l)) = (\<Sum>\<^sub>l \<Sum>\<^sub>k f (k,l))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. aggregation.sum_0 (\<lambda>k. aggregation.sum_0 (\<lambda>l. f (k, l)) {l. True}) {k. True} = aggregation.sum_0 (\<lambda>l. aggregation.sum_0 (\<lambda>k. f (k, l)) {k. True}) {l. True}
[PROOF STEP]
by (rule aggregation.sum_0.swap)
|
{"llama_tokens": 197, "file": "Aggregation_Algebras_Matrix_Aggregation_Algebras", "length": 1}
|
function slsharedisp_decindent(nsteps)
%SLSHAREDISP_DECINDENT Decreases the indent of the displayer
%
% $ Syntax $
% - slsharedisp_decindent()
% - slsharedisp_decindent(nsteps)
%
% $ Description $
% - slsharedisp_decindent() decreases the indent by one step.
%
% - slsharedisp_decindent(nsteps) decreases the indent by specified
% number of steps.
%
% $ History $
% - Created by Dahua Lin, on Aug 29, 2006
%
global GLOBAL_SHARE_DISPLAYER;
if isempty(GLOBAL_SHARE_DISPLAYER)
error('sltoolbox:gdisperr', ...
'The global displayer is not open');
end
if nargin == 0
nsteps = 1;
end
GLOBAL_SHARE_DISPLAYER(end).indent = ...
GLOBAL_SHARE_DISPLAYER(end).indent - nsteps;
|
{"author": "lmthang", "repo": "nmt.hybrid", "sha": "50d5c025f18ed280ff0fd2e2adce327f4170a2c3", "save_path": "github-repos/MATLAB/lmthang-nmt.hybrid", "path": "github-repos/MATLAB/lmthang-nmt.hybrid/nmt.hybrid-50d5c025f18ed280ff0fd2e2adce327f4170a2c3/code/wordsim/code/sltoolbox_r101/sltoolbox_r101/sltoolbox/utils/slsharedisp_decindent.m"}
|
from fawkes.models import NetworkPoisson
import pandas as pd
import numpy as np
import h5py as h5
import sys
import os
"""Creates HDF5 datasets of estimates and stability from MCMC samples."""
def import_samples(path, name, date, burn):
print("Importing data for name {} and date {}...".format(name, date))
try:
with h5.File(path, 'r') as hdf:
lambda0 = hdf['/{}/{}/lambda0'.format(name, date)][:]
W = hdf['/{}/{}/W'.format(name, date)][:]
mu = hdf['/{}/{}/mu'.format(name, date)][:]
tau = hdf['/{}/{}/tau'.format(name, date)][:]
print('> Successfully imported {} samples.'.format(W.shape[-1]))
except:
print('> Unable to find sample data; returning None')
return None
return lambda0[:, burn:], W[:, :, burn:], mu[:, :, burn:], tau[:, :, burn:]
def post_process(name, date, dt_max, burn=500):
N = 12
model = NetworkPoisson(N, dt_max)
sample_path = '/Volumes/datasets/ITCH/samples/large2007_dt_max={}.hdf5'.format(dt_max)
# Import samples
samples = import_samples(sample_path, name, date, burn)
if samples is not None:
# Unpack samples
lambda0, W, mu, tau = samples
# Compute point estimates
lambda0 = np.median(lambda0, axis=1)
W = np.median(W, axis=2).reshape(N * N) # row major
mu = np.median(mu, axis=2).reshape(N * N) # row major
tau = np.median(tau, axis=2).reshape(N * N) # row major
estimates = [name, date] + list(np.concatenate([lambda0, W, mu, tau]))
# Check stability
model.lamb = lambda0
model.W = W.reshape((N,N))
model.mu = mu.reshape((N,N))
model.tau = tau.reshape((N,N))
_, maxeig = model.check_stability(return_value=True)
eigenvalue = [name, date, maxeig]
return estimates, eigenvalue
root = '/Volumes/datasets/ITCH/'
dates = [date for date in os.listdir('{}/csv/'.format(root)) if date != '.DS_Store']
names = [name.lstrip(' ') for name in pd.read_csv('{}/SP500.txt'.format(root))['Symbol']]
names.sort()
mcmc = []
eig = []
dt_max = 60
for name in names:
for date in dates:
estimate, eigenvalue = post_process(name, date)
mcmc.append(estimate)
eig.append(eigenvalue)
# Write to HDF5
with h5.File() as hdf:
hdf['mcmc'] = mcmc
hdf['eig'] = eig
|
{"hexsha": "68d17ad80c5bb0f20778af1e2525b149ecc39ba4", "size": 2356, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/continuous/postprocess_samples.py", "max_stars_repo_name": "cswaney/fawkes", "max_stars_repo_head_hexsha": "90c623476bf62b808947277840a2d5de3c95a7ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-05-05T18:59:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T12:40:08.000Z", "max_issues_repo_path": "examples/continuous/postprocess_samples.py", "max_issues_repo_name": "cswaney/fawkes", "max_issues_repo_head_hexsha": "90c623476bf62b808947277840a2d5de3c95a7ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/continuous/postprocess_samples.py", "max_forks_repo_name": "cswaney/fawkes", "max_forks_repo_head_hexsha": "90c623476bf62b808947277840a2d5de3c95a7ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-19T05:18:41.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-19T05:18:41.000Z", "avg_line_length": 34.6470588235, "max_line_length": 90, "alphanum_fraction": 0.5984719864, "include": true, "reason": "import numpy", "num_tokens": 649}
|
#
# File:
# conmasklc.py
#
# Synopsis:
# Draws contours over a masked lambert conformal map.
#
# Category:
# Contouring over maps.
#
# Author:
# Mary Haley (based on a code by Fred Clare)
#
# Date of initial publication:
# December, 2009
#
# Description:
# This example produces two frames:
# 1) A masked Lambert Conformal map
# 2) Contours over a masked Lambert Conformal map
#
# Effects illustrated:
# o Reading in a NetCDF file using Nio.
# o Masking a Lambert conformal map projection.
# o Controlling the number and spacing of contour levels.
# o Changing font height and color.
# o How to explicitly define contour levels.
#
# Output:
# This examples produces contours over a masked lambert conformal map.
#
# Notes:
#
#
# Import NumPy.
#
from __future__ import print_function
import numpy, os
#
# Import Nio for a NetCDF reader and Ngl for plotting.
#
import Nio, Ngl
#
# Read some variables off the file.
#
dirc = Ngl.pynglpath("data")
nf = Nio.open_file(os.path.join(dirc,"cdf","meccatemp.cdf"))
T = nf.variables["t"][:]
lat = nf.variables["lat"][:]
lon = nf.variables["lon"][:]
#
# Set up a color map using RGB triplets.
#
cmap = numpy.array(\
[ [.700,.700,.700], \
[.650,.650,.700], [.610,.600,.700], [.550,.550,.700], \
[.560,.500,.700], [.450,.450,.700], [.420,.400,.700], \
[.350,.350,.700], [.300,.300,.700], [.250,.250,.700], \
[.200,.200,.700], [.150,.150,.700], [.100,.100,.700], \
[.050,.050,.700], [.000,.000,.700], [.000,.050,.700], \
[.000,.100,.700], [.000,.150,.700], [.000,.200,.700], \
[.000,.250,.700], [.000,.300,.700], [.000,.350,.700], \
[.000,.400,.700], [.000,.450,.600], [.000,.500,.500], \
[.000,.550,.400], [.000,.600,.300], [.000,.650,.200], \
[.000,.700,.100], [.000,.725,.000], [.000,.690,.000], \
[.030,.685,.000], [.060,.680,.000], [.100,.575,.000], \
[.130,.570,.000], [.160,.565,.000], [.550,.550,.000], \
[.555,.545,.000], [.560,.530,.000], [.565,.485,.000], \
[.570,.420,.000], [.675,.375,.000], [.680,.330,.000], \
[.690,.300,.000], [.700,.285,.000], [.700,.270,.000], \
[.700,.260,.000], [.700,.240,.000], [.700,.180,.000], \
[.700,.130,.000], [.700,.120,.000], [.700,.100,.000], \
[.700,.090,.000], [.750,.090,.000], [.800,.090,.000], \
[.830,.070,.000], [.870,.050,.000], [.900,.030,.000], \
[.950,.010,.000], [.990,.000,.000], [1.00,.000,.000], \
[1.00,.000,.000] ])
wks_type = "png"
wks = Ngl.open_wks(wks_type,"conmasklc")
#----------- Begin first plot -----------------------------------------
res = Ngl.Resources()
#
# Set map resources.
#
res.mpProjection = "LambertConformal"
res.mpLimitMode = "LatLon" # limit map via lat/lon
res.mpMinLatF = 10. # map area
res.mpMaxLatF = 75. # latitudes
res.mpMinLonF = 60. # and
res.mpMaxLonF = 165. # longitudes
res.nglMaskLambertConformal = True
#res.nglMaskLambertConformalOutlineOn = False
res.tiMainString = "A masked Lambert Conformal map"
res.tiMainFontHeightF = 0.010
map = Ngl.map(wks,res)
#
# Set some scalar field resources.
#
res.sfXArray = lon
res.sfYArray = lat
#
# Set some contour resources
#
res.cnFillOn = True
res.cnLinesOn = False
res.cnLineLabelsOn = False
res.cnFillPalette = cmap
res.cnLevelSelectionMode = "ManualLevels"
res.cnMinLevelValF = 195.
res.cnMaxLevelValF = 328.
res.cnLevelSpacingF = 5.
#
# Set title resources.
#
res.tiMainString = "January Global Surface Temperature (K)"
#
# Set labelbar resources.
#
res.lbOrientation = "Horizontal"
nt = 30 # Pick a time for a single plot.
res.lbTitleString = "Day {}".format(nt+1)
map = Ngl.contour_map(wks,T[nt,:,:],res)
Ngl.end()
|
{"hexsha": "aa1e66dd3118d7f198cd6f67ce92070735153b21", "size": 3989, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/conmasklc.py", "max_stars_repo_name": "yang69can/pyngl", "max_stars_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 125, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:04:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T14:06:56.000Z", "max_issues_repo_path": "examples/conmasklc.py", "max_issues_repo_name": "yang69can/pyngl", "max_issues_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2017-11-08T23:23:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T03:17:39.000Z", "max_forks_repo_path": "examples/conmasklc.py", "max_forks_repo_name": "yang69can/pyngl", "max_forks_repo_head_hexsha": "78a7040ce9de4b7a442b0c3b5faecccab2f01426", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-08-27T10:50:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T14:56:05.000Z", "avg_line_length": 27.8951048951, "max_line_length": 73, "alphanum_fraction": 0.5557783906, "include": true, "reason": "import numpy", "num_tokens": 1236}
|
///////////////////////////////////////////////////////////////////////////////
// importance_sampling::generate.hpp //
// //
// Copyright 2009 Erwann Rogard. Distributed under the Boost //
// Software License, Version 1.0. (See accompanying file //
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //
///////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_STATISTICS_DETAIL_IMPORTANCE_SAMPLING_GENERATE_HPP_ER_2009
#define BOOST_STATISTICS_DETAIL_IMPORTANCE_SAMPLING_GENERATE_HPP_ER_2009
#include <iterator>
#include <functional>
#include <boost/iterator/iterator_traits.hpp>
#include <boost/math/tools/precision.hpp>
#include <boost/random/variate_generator.hpp>
#include <boost/utility.hpp>
#include <boost/statistics/detail/importance_sampling/random/sampler.hpp>
#include <boost/random/ref_distribution.hpp>
namespace boost{
namespace statistics{
namespace detail{
namespace importance_sampling{
template<typename ItT,typename N,typename ItW,typename ItP,typename U>
ItT generate(
ItT b_t, // target values (output)
N n, // sample size
ItW b_w, // unnormalized weights
ItW e_w, // unnormalized weights
ItP b_p, // proposal values
U& urng
)
{
typedef boost::iterator_range<ItW> range_w_;
typedef boost::iterator_range<ItP> range_p_;
typedef typename boost::iterator_value<ItW>::type w_;
typedef sampler<range_p_,w_> iss_;
typedef boost::random::ref_distribution<iss_&> ref_iss_;
typedef boost::variate_generator<U&,ref_iss_> gen_iss_;
range_w_ range_w(b_w,e_w);
range_p_ range_p(
b_p,
boost::next(
b_p,
std::distance(
b_w,
e_w
)
)
);
iss_ iss( range_w, range_p );
ref_iss_ ref_iss( iss );
gen_iss_ gen_iss( urng, iss );
return std::generate_n(
b_t,
n,
gen_iss
);
}
}// importance_sampling
}// statistics
}// detail
}// boost
#endif
|
{"hexsha": "87f75b0dab49b72ac38c91c1468134f30dfdfaea", "size": 2447, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "importance_sampling/boost/statistics/detail/importance_sampling/random/generate.hpp", "max_stars_repo_name": "rogard/boost_sandbox_statistics", "max_stars_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "importance_sampling/boost/statistics/detail/importance_sampling/random/generate.hpp", "max_issues_repo_name": "rogard/boost_sandbox_statistics", "max_issues_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "importance_sampling/boost/statistics/detail/importance_sampling/random/generate.hpp", "max_forks_repo_name": "rogard/boost_sandbox_statistics", "max_forks_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4637681159, "max_line_length": 79, "alphanum_fraction": 0.5206375153, "num_tokens": 498}
|
import os
import numpy as np
from sklearn.naive_bayes import GaussianNB
import timeit
def nbClassifier(X_train, y_train, X_test):
clf = GaussianNB()
start_time = timeit.default_timer()
clf.fit(X_train, y_train)
elapsedTraining = (timeit.default_timer() - start_time) * 1000
start_time = timeit.default_timer()
predicted_labels = clf.predict(X_test)
elapsedTesting = (timeit.default_timer() - start_time) * 1000
return elapsedTraining,elapsedTesting,predicted_labels
|
{"hexsha": "0aa17d9a56d824bcd0abd5771968ec67537213fb", "size": 501, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/Classification Algorithms/NB.py", "max_stars_repo_name": "DrMoe/Evaluation-of-satellite-imagery-based-crop-classification", "max_stars_repo_head_hexsha": "ca7324ee6e5c399ea08d2c3ac11497e4ed95f473", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-01-07T14:51:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-06T18:58:13.000Z", "max_issues_repo_path": "Python/Classification Algorithms/NB.py", "max_issues_repo_name": "DrMoe/Evaluation-of-satellite-imagery-based-crop-classification", "max_issues_repo_head_hexsha": "ca7324ee6e5c399ea08d2c3ac11497e4ed95f473", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/Classification Algorithms/NB.py", "max_forks_repo_name": "DrMoe/Evaluation-of-satellite-imagery-based-crop-classification", "max_forks_repo_head_hexsha": "ca7324ee6e5c399ea08d2c3ac11497e4ed95f473", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-05-31T15:01:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-27T07:27:44.000Z", "avg_line_length": 29.4705882353, "max_line_length": 66, "alphanum_fraction": 0.75249501, "include": true, "reason": "import numpy", "num_tokens": 127}
|
#include "App/config.h"
#include <pwd.h>
#include <sys/types.h>
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <iostream>
using namespace std;
void tc::Config::parseConfigFile()
{
// parse the config files
boost::property_tree::ptree pt;
try {
boost::property_tree::ini_parser::read_ini(configFile, pt);
enclavePath = pt.get<string>("enclave_path");
tcContractEthereumAddr = pt.get<string>("tc_address");
relayRPCAccessPoint = pt.get<int>("RPC.port");
sealedECDSAKey = pt.get<string>("sealed.sig_key");
sealedHybridEncryptionkey = pt.get<string>("sealed.hybrid_key");
} catch (const exception &e) {
cout << e.what() << endl;
cout << "please provide with a correct config file" << endl;
exit(-1);
}
}
tc::Config::Config(int argc, const char **argv)
{
try {
po::options_description desc("Allowed options");
desc.add_options()("help,h", "print this message");
desc.add_options()("measurement,m",
po::bool_switch(&isPrintMR)->default_value(false),
"print the measurement (MR_ENCLAVE) and exit.");
desc.add_options()("config,c",
po::value(&configFile)->default_value(DFT_CONFIG_FILE),
"Path to a config file");
po::store(po::parse_command_line(argc, argv, desc), vm);
if (vm.count("help")) {
cerr << desc;
cerr.flush();
exit(0);
}
po::notify(vm);
} catch (po::required_option &e) {
cerr << e.what() << endl;
exit(-1);
} catch (exception &e) {
cerr << e.what() << endl;
exit(-1);
} catch (...) {
cerr << "Unknown error!" << endl;
exit(-1);
}
parseConfigFile();
}
string tc::Config::toString()
{
stringstream ss;
ss << "Using config file: " << this->getConfigFile() << endl;
ss << "+ using enclave image: " << this->getEnclavePath() << endl;
ss << "+ listening for TC relay at port: " << this->getRelayRPCAccessPoint()
<< endl;
ss << "+ serving contract at: " << this->getTcEthereumAddress();
return ss.str();
}
const string &tc::Config::getConfigFile() const { return configFile; }
int tc::Config::getRelayRPCAccessPoint() const { return relayRPCAccessPoint; }
const string &tc::Config::getSealedSigKey() const { return sealedECDSAKey; }
const string &tc::Config::getSealedHybridKey() const
{
return sealedHybridEncryptionkey;
}
const string &tc::Config::getEnclavePath() const { return enclavePath; }
const string &tc::Config::getTcEthereumAddress() const
{
return tcContractEthereumAddr;
}
bool tc::Config::getIsPrintMR() const { return isPrintMR; }
|
{"hexsha": "eb110bd4e0257cbb4e4ca50f7458e45353a56b7d", "size": 2678, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "App/config.cpp", "max_stars_repo_name": "bl4ck5un/minimal-sgx-app", "max_stars_repo_head_hexsha": "970d368a02fc8ec37475cf8c1d42c3d48cbbdbdb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-08-01T01:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-01T01:08:09.000Z", "max_issues_repo_path": "App/config.cpp", "max_issues_repo_name": "bl4ck5un/minimal-sgx-app", "max_issues_repo_head_hexsha": "970d368a02fc8ec37475cf8c1d42c3d48cbbdbdb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "App/config.cpp", "max_forks_repo_name": "bl4ck5un/minimal-sgx-app", "max_forks_repo_head_hexsha": "970d368a02fc8ec37475cf8c1d42c3d48cbbdbdb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0898876404, "max_line_length": 78, "alphanum_fraction": 0.643017177, "num_tokens": 697}
|
//
// Created by calebcintary on 3/20/22.
//
#include <boost/test/unit_test.hpp>
#include "pyplot_cpp/Histogram.hpp"
#include "pyplot_cpp/plt/Properties.hpp"
BOOST_AUTO_TEST_SUITE(Histogram_Test)
BOOST_AUTO_TEST_CASE(Histogram_SimpleShow_Test) {
pyplot_cpp::Histogram hist;
hist.setData({1, 2, 3, 4, 5, 6, 7, 4});
hist.setBins({1, 2, 3, 4, 5, 6, 7});
hist.setTitle("Simple Histogram");
hist.show();
}
BOOST_AUTO_TEST_CASE(Histogram_SimpleSave_Test) {
pyplot_cpp::Histogram hist;
hist.setData({1, 2, 3, 4, 5, 6, 7, 4});
hist.setBins({1, 2, 3, 4, 5, 6, 7});
hist.setTitle("Simple Histogram");
hist.save("../../examples/Hist.png");
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "3729e314522a261dae54991deafef1acde3fc166", "size": 760, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/Hist_Test.cpp", "max_stars_repo_name": "CalebCintary/pyplot_cpp", "max_stars_repo_head_hexsha": "de33c3e921229e5efd72a7fc4fdb17212edc9aa9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-29T10:52:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:52:21.000Z", "max_issues_repo_path": "test/Hist_Test.cpp", "max_issues_repo_name": "CalebCintary/pyplot_cpp", "max_issues_repo_head_hexsha": "de33c3e921229e5efd72a7fc4fdb17212edc9aa9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-03-20T12:17:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T17:50:12.000Z", "max_forks_repo_path": "test/Hist_Test.cpp", "max_forks_repo_name": "CalebCintary/pyplot_cpp", "max_forks_repo_head_hexsha": "de33c3e921229e5efd72a7fc4fdb17212edc9aa9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-29T19:40:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T19:40:59.000Z", "avg_line_length": 27.1428571429, "max_line_length": 53, "alphanum_fraction": 0.6223684211, "num_tokens": 235}
|
hours : List Nat
hours = [1..12]
nats : Stream Nat
nats = [0,1..]
|
{"hexsha": "869a0c471e7e63b10e60ccc57cf88f08ff00eb54", "size": 67, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "tests/ideMode/ideMode005/Ranges.idr", "max_stars_repo_name": "ska80/idris-jvm", "max_stars_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 396, "max_stars_repo_stars_event_min_datetime": "2016-07-17T08:00:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T22:47:13.000Z", "max_issues_repo_path": "tests/ideMode/ideMode005/Ranges.idr", "max_issues_repo_name": "ska80/idris-jvm", "max_issues_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2016-08-04T06:13:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T04:00:31.000Z", "max_forks_repo_path": "tests/ideMode/ideMode005/Ranges.idr", "max_forks_repo_name": "ska80/idris-jvm", "max_forks_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2016-09-15T15:19:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T13:05:48.000Z", "avg_line_length": 11.1666666667, "max_line_length": 17, "alphanum_fraction": 0.5820895522, "num_tokens": 27}
|
using Test
using Documenter
using ExperimentalDesign
tests = ["variance_predictions.jl"]
@testset "ExperimentalDesign" begin
for test in tests
include(test)
end
@testset "Doctests" begin
DocMeta.setdocmeta!(ExperimentalDesign,
:DocTestSetup,
:(using ExperimentalDesign, Distributions,
Random, StatsModels, DataFrames;
Random.seed!(443591););
recursive = true)
doctest(ExperimentalDesign)
end
end
|
{"hexsha": "4e8b2653a1710509512984828ff98d74ef59b831", "size": 583, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/ExperimentalDesign.jl-4babbea4-9e7d-11e9-116f-e1ada04bd296", "max_stars_repo_head_hexsha": "841a479c0f9a97ac88ae793703df7f905e4df0ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/ExperimentalDesign.jl-4babbea4-9e7d-11e9-116f-e1ada04bd296", "max_issues_repo_head_hexsha": "841a479c0f9a97ac88ae793703df7f905e4df0ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/ExperimentalDesign.jl-4babbea4-9e7d-11e9-116f-e1ada04bd296", "max_forks_repo_head_hexsha": "841a479c0f9a97ac88ae793703df7f905e4df0ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5, "max_line_length": 70, "alphanum_fraction": 0.5523156089, "num_tokens": 110}
|
# QuTiP Lecture: Particle emission from a photon cascade
D. Lukin, Stanford University
In this Jupyter notebook, we use QuTiP: The Quantum Toolbox in Python to study a cascaded three level system excited by a classical pulse. This model system captures the essense of the dynamics of a biexcitonic in a quantum dot [[1,2]](#refs).
For more information about QuTiP see the project web page: http://qutip.org/
## Introduction
The system consists of three states: ground $\left|g\right\rangle$, intermediate $\left|i\right\rangle$, and excited $\left|e\right\rangle$. The process we are modeling is the two-photon excitation of the system with a classical pulse driving the transition $\left|g\right\rangle \to \left|e\right\rangle$. The system is coupled to two decay channels, via collapse operators
$\sigma_1 = \sqrt{\gamma_1} \left|i\right\rangle \left\langle e\right| $ and $\sigma_2 = \sqrt{\gamma_2} \left|g\right\rangle \left\langle i\right| $. Here, $\gamma_1$ and $\gamma_2$ are the coupling strength of the system to channels 1 and 2. In all simulations, we are setting $2\gamma_2 = \gamma_1 = \gamma$.
In a frame rotating at the laser's frequency for the two-photon resonance, the system Hamiltonian is given by
$$H_{\mbox{sys}} = H_0 + H_E(t)$$
where $$H_0 = E_b/2 \left|i\right\rangle \left\langle i\right|$$
and $H_E$ represents two-photon excitation of the system by a classical pulse in the rotating wave appoximation. Further,
$$H_I(t) = \frac{(\mu \cdot E(t))^2}{E_b} (\left|g\right\rangle \left\langle e\right| + \left|e\right\rangle \left\langle g\right|) .$$
Here, $E(t)$ is the time dependent electric field pulse driving the system, $\mu$ is the coupling strength, and $E_b$ is the binding energy of the biexciton. The driving strength depends on the square of $\mu \cdot E(t)$ because the coherent drive occurs via virtual excitation of the intermediate state.
The system-only evolution (tracing over the dynamics of the two channels into which the system spontaneously emits), is given by the Master Equation:
\begin{equation}
\partial_t \rho = \mathcal L\rho = -i[H_I(t), \rho] + \mathcal D[\sigma_1]\rho + \mathcal D[ \sigma_2]\rho,
\end{equation}
where $\mathcal D[c]\rho = c\rho c^\dagger - \frac 12 (c^\dagger c \rho + \rho c^\dagger c)$ is the standard Lindblad dissipator.
## Setting Up Computation in QuTiP
```python
%matplotlib inline
```
```python
import matplotlib.pyplot as plt
import numpy as np
from qutip import *
```
### Setup of time discretization and pulse
```python
# time parameters
gamma = 1.0 # decay rate, sets the overall timescale of the system
globaldt = 0.15/gamma # default time step
minpointsperpulse = 5.0 # minimum time resolution of a pulse
# define function E(t), a gaussian.
def pulse_shape(t, args):
width = args['width']
norm = args['norm']
pulse_t_offset = 3.5
t_offset = width*pulse_t_offset
return norm * np.exp(-(t - t_offset) ** 2.0 /
(2.0 * width ** 2.0))
# pulse normalization function
def pulsenormconst(args):
args['norm'] = 1.0
tlist = tlistfunc(args)
unnorm_pulse = [pulse_shape(t, args) for t in tlist]
unnorm_pulse_area = np.trapz(unnorm_pulse, tlist)
return 1/unnorm_pulse_area
# in order to speed up the simulation for shorter pulses, we take advantage of
# QuTiP's built in support for variable time steps. This function generates a
# the list of times for which system dynamics will be calculated.
def tlistfunc(args):
width = args['width']
norm = args['norm']
tmin = args['tmin']
tmax = args['tmax']
tlist = []
pulse_t_offset = 3.5
# if the global time resolution satisfies the minimum number of points per
# pulse, make time list a simple linearly-spaced set of points.
if width/minpointsperpulse > globaldt:
tlist = np.linspace(tmin, tmax, int((tmax - tmin)/globaldt))
# otherwise, increase resolution inside the pulse only.
else:
tlist1 = np.linspace(tmin, width*pulse_t_offset*2.0, \
int(minpointsperpulse*(pulse_t_offset*2.0 - tmin)))[:-1]
tlist2 = np.linspace(width*pulse_t_offset*2.0, tmax, \
int((tmax - width*pulse_t_offset*2.0)/globaldt))
tlist = np.append(tlist1, tlist2)
return tlist
def prepare_pulse(width):
pulseargs = {'width':width * gamma, 'tmin':0.0, 'tmax':width*1.5 + 5.0/gamma} # define pulse properties
pulseargs['norm'] = pulse_area * pulsenormconst(pulseargs) # compute numerically the normalization constant
tlist = tlistfunc(pulseargs) # generate time list
pulse = [pulse_shape(t, pulseargs) for t in tlist] # generate pulse shape
return tlist, pulse, pulseargs
```
### Setup of the physics
For this notebook, we consider the case of so-called $\pi$ area pulses, which for short pulses prepare the system in the excited state $|e\rangle$.
```python
C = 1 # includes dipole coupling strength and binding energy
pulse_area = np.pi/(C**2) # setting area of pulse to Pi.
#System state space
g = fock(3,0)
i = fock(3,1)
e = fock(3,2)
# Hamiltonian
H0 = qeye(3) # identity because we are in the interaction picture
H_I = C/2 * (e*g.dag() + g*e.dag()) # time dependent field
H = [H0, [H_I, pulse_shape]] # complete system Hamiltonian
s1 = np.sqrt(2*gamma) * i*e.dag() # lowering operator for channel 1
s2 = np.sqrt(gamma) * g*i.dag() # lowering operator for channel 2
```
## Dynamics of System Expectation Values
We begin by checking that the system is set up correctly by computing the system dynamics given by
$$
\partial_t \rho = \mathcal L\rho = -i[H_I(t), \rho] + \mathcal D[\sqrt{\gamma_1} \sigma_1]\rho + \mathcal D[\sqrt{\gamma_2} \sigma_2]\rho,
$$
and extracting the expectation values of projection operators $\Pi_i = \left|i\right\rangle \left\langle i\right|$ and $\Pi_e = \left|e\right\rangle \left\langle e\right|$.
```python
#prepare pulse (argument is pulse width)
tlist, pulse, pulseargs = prepare_pulse(0.001)
#compute system evolution and expectation values of projectors. Supply pulseargs as parameter for H(t).
result = mesolve(H, g, tlist, [s1, s2], [e*e.dag(), i*i.dag()], args=pulseargs)
e_expvals = result.expect[0]
i_expvals = result.expect[1]
#plot the data
fig,ax=plt.subplots(figsize=(8,5))
ax.plot(tlist,e_expvals, label="population E")
ax.plot(tlist,i_expvals, label="population I")
ax.legend()
ax.set_xlabel('Time');
ax.set_ylabel('Population');
ax.set_xlim(0, 5)
ax.set_title('System evolution');
```
We see that the system behaves correctly: The short pulse dynamics are captured, where the system briefly visits unity excitation of the excite state, indicating that time discretization is properly set up. Note also that the decay rate of $\left|e\right\rangle$ is twice as fast as the decay rate of $\left|i\right\rangle$.
It is straightforward to compute the expectation value for the total number of photons emitted into either channel, since the rate of photon emission is given by $\mbox{Tr}[\rho(t) \sigma_1^\dagger \sigma_1]$ or $\mbox{Tr}[\rho(t) \sigma_2^\dagger \sigma_2]$ and thus
$$\langle N\rangle = \int_0^\infty\mbox{Tr}[\rho(t) \ \sigma_1^\dagger \sigma_1] \ dt = \gamma_1 \int_0^\infty \langle \Pi_e \rangle \ dt$$
```python
Nexp = 2*gamma*np.trapz(result.expect[0], tlist); print(Nexp)
```
0.9977906071836861
## Computing N-photon emission probabilities
Using the formalism of conditioned evolution, the $N$-photon emission probability is given by
$$P(N) = \int_0^\infty dt_1 \int_0^\infty dt_2 \dots \int_0^\infty dt_N \ p(t_1, t_2, \dots t_N),$$
where $p(t_1, t_2, \dots t_N)$ is the probability that the system emits photons at only times $\{t_1, t_2, \dots t_N\}$.
The number of photons emitted into channels 1 and 2 is always equal, because of the cascade nature of the system. We can thus choose to monitor (condition) the emission from either channel, while tracing over the state of the second channel. We choose, arbitrarily, to monitor channel 1.
We define the collapse superoperator for channel $1$ as $\mathcal S\rho = \sigma_1\rho \sigma_1^\dagger$, and the nonunitary evolution conditioned on no emision as $\mathcal K = \mathcal L - \mathcal S$. Then $p(t_1, t_2, \dots t_N)$ is given by
$$p(t_1, t_2, \dots t_N) = \mbox{Tr}[\mathcal K(\infty, t_N) \ \mathcal S \ \mathcal K(t_N, t_{N-1}) \ \mathcal S \ \dots \ \mathcal S \ \mathcal K(t_1, 0)\left| g \right\rangle \left \langle g \right |]$$
This expression for $p(t_1, t_2, \dots t_N)$ is derived in Carmichael, Chapter 7 [[3](#refs)]. Since the final state of the atom is always $\left | g\right \rangle$, the trace takes only a single entry of the density matrix $\left\langle g\right |\rho(t\rightarrow\infty)\left | g \right\rangle$.
Explicitly the superoperator $\mathcal K$ is
$$
\mathcal K\rho = -i[H_I(t), \rho] +\mathcal D[\sqrt{\gamma_2} \sigma_2]\rho - (\sigma_1^\dagger\sigma_1 \rho + \rho \sigma_1^\dagger\sigma_1)/2
$$
We set up $\mathcal K$ in QuTiP as follows:
```python
Ktimedep = -1.0j * C/2 * (spre(e*g.dag() + g*e.dag()) - spost(e*g.dag() + g*e.dag()))
Ktimeindep = - 0.5 * (spre(s1.dag()*s1) + spost(s1.dag()*s1)) \
+ sprepost(s2, s2.dag()) - 0.5*(spre(s2.dag()*s2) + spost(s2.dag()*s2))
K = [Ktimeindep, [Ktimedep, pulse_shape]]
```
To evaluate $p(t_1, t_2, \dots t_N)$, we initialize the system in the ground state $\rho(0) = \left|g\right\rangle \left\langle g \right |$, and evolve it with $\mathcal K$ from $t = 0$ to $t = t_1$. Then we act on the evolved $\rho$ with $\mathcal S$, then evolve with $\mathcal K$ until $t = t_2$, etc.
When computing the total $N$-photon probability $P(N)$, it is possible to re-use solutions from different trajectories to make the algorithm more time-efficient. A natural implementation of this is with recursion.
```python
#parameters
width = 0.05 #pulse width
#prepare pulse
tlist, pulse, pulseargs = prepare_pulse(width)
def conditionalIteration(rhoinit, totalphotons, iternumber, tinitindex):
'''
Recursive function that computes the probability of emission of N photons, P(N)
rhoinit - initial state of the system
totalphotons - N
iternumber - current recursion level
tinitindex - the index in tlist that determines the timestep from which to begin the recursive computataion
'''
curritertrajprobs = []
curritertimes = []
if iternumber == totalphotons:
out = mesolve(K, rhoinit, tlist[tinitindex:], [], [], args=pulseargs)
rho = out.states[-1]
prob = expect(g*g.dag(), rho)
if totalphotons == 0: #if calculating P(0), the innermost recursion step is also the
return prob #outermost one, so return probability, not probability density.
else: #else return the probability density
return tlist[tinitindex], prob
else:
conditionedrho = mesolve(K, rhoinit, tlist[tinitindex:], [], [], args=pulseargs)
for i in range(tinitindex,len(tlist)):
rho = conditionedrho.states[i - tinitindex]
rhoprojected = s1 * rho * s1.dag()
outtime,outtrajprob = conditionalIteration(rhoprojected, totalphotons, iternumber + 1, i)
curritertrajprobs.append(outtrajprob)
curritertimes.append(outtime)
integral = abs(np.trapz(curritertrajprobs, curritertimes))
if iternumber == 0:
return integral #for the outermost loop, return final cummilative probability.
else:
return integral, tlist[tinitindex] #for inner loops return probability density term by term.
#Calculate N-photon probabilities by calling the recursive function
P0 = conditionalIteration(g*g.dag(), 0, 0, 0); print("P(0) = " + str(P0))
P1 = conditionalIteration(g*g.dag(), 1, 0, 0); print("P(1) = " + str(P1))
P2 = conditionalIteration(g*g.dag(), 2, 0, 0); print("P(2) = " + str(P2))
```
P(0) = 0.002715148979783426
P(1) = 0.9873106987545373
P(2) = 0.0009142280636881267
Thus, we see that for a short pulse, the system acts as a good single photon source. This calculation may require higher time resolution for short pulses to achieve high accuracy.
## Computing the system+channel density matrix
The results in the section are covered in Ref. [[2]](#refs).
We take a step back and consider a representation of the three-level system coupled to two dissipative channels where no information about the state is discarded. In the coarse-grained time basis [[4]](#refs), the state can be written as
$$\left|\Psi\right\rangle = \sum_{\phi_j, \mathbf T_1, \mathbf T_2} c_{\phi_j, \mathbf T_1, \mathbf T_2} \left|\phi_j, \mathbf T_1, \mathbf T_2\right\rangle,$$
where $\phi_j \in \{g, j, e\}$, $\mathbf T_k = \{t_1, t_2, \cdots, t_N\}$ is the list of times when a photon was emitted into channel $k$.
We may now trace over the second channel, thus obtaining a mixed state:
$$\chi = \mbox{Tr}_{\mathbf T_2}[\left|\Psi\right\rangle \left\langle \Psi \right |] = \sum_{\mathbf T''_2} \sum_{\phi_j, \mathbf T_1, \mathbf T_2} \sum_{\phi_j', \mathbf T_1', \mathbf T_2'} c_{\phi_j, \mathbf T_1, \mathbf T_2} c^*_{\phi'_j, \mathbf T_1', \mathbf T_2'}\langle \mathbf T_2'' \left|\phi_j, \mathbf T_1, \mathbf T_2\right\rangle \langle \phi'_j, \mathbf T'_1, \mathbf T'_2 \left|\mathbf T''_2\right\rangle.$$
The modes of channel 2 are orthogonal, thus,
$$\chi = \sum_{\mathbf T_2} \sum_{\phi_j, \mathbf T_1} \sum_{\phi_j', \mathbf T_1'} c_{\phi_j, \mathbf T_1, \mathbf T_2} c^*_{\phi'_j, \mathbf T_1', \mathbf T_2}\langle \mathbf T_2 \left|\phi_j, \mathbf T_1, \mathbf T_2\right\rangle \langle \phi'_j, \mathbf T'_1, \mathbf T_2 \left|\mathbf T_2\right\rangle.$$
We can re-define the states in the Hilbert space of system and channel 1 only, and write:
$$\chi = \sum_{\phi_j, \mathbf T_1} \sum_{\phi_j', \mathbf T_1'} \Big(\sum_{\mathbf T_2} c_{\phi_j, \mathbf T_1, \mathbf T_2} c^*_{\phi'_j, \mathbf T_1', \mathbf T_2}\Big) \left|\phi_j, \mathbf T_1\right\rangle\left\langle\phi'_j, \mathbf T'_1\right|$$
We see that
$$\sum_{\mathbf T_2} c_{\phi_j, \mathbf T_1, \mathbf T_2} c^*_{\phi'_i, \mathbf T_1', \mathbf T_2} = \left\langle\phi'_i, \mathbf T'_1\right|\chi\left|\phi_j, \mathbf T_1\right\rangle$$ is the matrix element of the reduced density matrix of the system and channel 1 only. Note that since the initial state and final state of the system is $\left | g\right\rangle$, only elements $\phi_j = \phi_j' = g$ are nonzero. We can thus drop the system state labels and denote the matrix elements by $\left\langle\mathbf T'_1\right|\chi\left| \mathbf T_1\right\rangle $.
It turns out, in the process of obtaining $P(N)$ in the previous section, we already computed the diagonal elements of $\chi$.
Calculation of the off-diagonal elements was made possible with the technique in Ref. [[4]](#refs) and was derived in Ref. [[2]](#refs). We present the final result:
$$
\left\langle\mathbf{T'}_1\big|\ \chi \ \big|\mathbf{T}_1\right\rangle = \text{Tr}_\text{sys}\big[\mathcal{K}(\infty, \tau_N)\mathcal S_{Q[\tau_N]}\mathcal{K}(\tau_N, \tau_{N-1})\mathcal S_{Q[\tau_{N-1}]}\cdots \mathcal{K}(\tau_2,\tau_1)\mathcal S_{Q[\tau_1]} \mathcal{K}(\tau_1,0)\left | g\right\rangle\left\langle g\right |\big]
$$
Here, ${\tau_i}$ is a time ordered set of times $\mathbf T_1 \cup \mathbf T_1'$, $\mathcal K$ is defined as before, and $Q[\tau_{i}]$ takes on the value 1 if $\tau_i \in \mathbf T_1$, and 0 if $\tau_i \in \mathbf T_1'$. The operators
$\mathcal S_0, \mathcal S_1$ are defined as $$\mathcal S_0\rho = \sigma_1 \rho$$ $$\mathcal S_1\rho = \rho\sigma_1$$
Note, this calculation is very similar to
$$p(t_1, t_2, \dots t_N) = \mbox{Tr}[\mathcal K(\infty, t_N) \ \mathcal S \ \mathcal K(t_N, t_{N-1}) \ \mathcal S \ \cdots \ \mathcal S \ \mathcal K(t_1, 0)\left| g \right\rangle \left \langle g \right |].$$
The difference is that instead of applying the collapse operator on both sides for a given emission time $t$, one has to apply them either on the right or the left, depending on whether $\tau$ comes from $\mathbf T_1$ or $\mathbf T_1'$.
Computing the full density matrix for a large number of photon emissions is computationally expensive. Here, we will only calculate the density matrix for 1-photon channel states:
```python
#parameters
width = 0.05 #pulse width
#prepare pulse
tlist, pulse, pulseargs = prepare_pulse(width)
#array to be populated with terms of the system-channel density matrix
rhototal = [[0 for x in tlist] for x in tlist]
result = mesolve(K, g*g.dag(), tlist, [], [], args=pulseargs)
rho_pre_emission = result.states
#since rhototal is Hermitian, it is sufficient to calculate only terms on the diagonal and above.
#tL (tR) represents the emission time on the left, a.k.a in T1 (right, a.k.a in T1').
for tL in range(len(tlist)):
result = mesolve(K, s1 * rho_pre_emission[tL], tlist[tL:], [], [], args=pulseargs)
rho_post_tL = result.states
for tR in range(tL,len(tlist)):
i = tR - tL
result = mesolve(K, rho_post_tL[i] * s1.dag(), tlist[tR:], [], [], args=pulseargs)
rhofinal = result.states[-1]
matelement = expect(g*g.dag(), rhofinal)
rhototal[tR][tL] = np.conjugate(matelement)
rhototal[tL][tR] = matelement
```
Having obtained the entire system+channel density matrix, we can compute quantities that previously were not accessible to us. For instance, the trace purity of the single photon emission into channel 1, defined as
$$\mathbb P = \lim_{t\to\infty} \int_0^t \mbox{d}t_1 \int_0^t \mbox{d}t_1' \frac{\bigl|\left\langle t_1'\big| \ \chi \ \big| \ t_1 \right\rangle \bigr|^2}{P(1)^2} $$
So let's compute the one-photon-purity:
```python
diagonal = np.diagonal(rhototal)
P1 = np.trapz(diagonal, tlist)
Purity = np.trapz([np.trapz(np.square(rhototal[row]), tlist) for row in range(len(tlist))] , tlist)/(P1**2)
print("P(1) = "+ str(P1)) #note that this number should agree with P(1) from the previous section
print("Purity = "+ str(Purity))
```
P(1) = 0.9873106987545373
Purity = 0.6682330028475325
## References
<a id='refs'></a>
[1] Quantum dot single photon sources with ultra-low multi-photon probability, Lukas Hanschke, Kevin A. Fischer, Stefan Appel, Daniil Lukin, Jakob Wierzbowski, Shuo Sun, Rahul Trivedi, Jelena Vučković, Jonathan J. Finley, Kai Müller. (2018) [[arXiv:1801.01672](https://arxiv.org/abs/1801.01672)]
[2] Particle emission from open-quantum systems, Kevin A. Fischer, Rahul Trivedi, Daniil Lukin. (2017) [[arXiv:1803.04648](https://arxiv.org/abs/1803.04648)]
[3] H. Carmichael, An Open Systems Approach to Quantum Optics, Lectures Presented at the Université Libre de Bruxelles, 1991
[4] Scattering of Coherent Pulses from Quantum-Optical Systems, Kevin A. Fischer, Rahul Trivedi, Vinay Ramasesh, Irfan Siddiqi, Jelena Vučković. (2017) [[arXiv:1710.02875](https://arxiv.org/abs/1710.02875)]
## Version
```python
from qutip.ipynbtools import version_table
version_table()
```
<table><tr><th>Software</th><th>Version</th></tr><tr><td>QuTiP</td><td>4.3.0.dev0+36fd841</td></tr><tr><td>Numpy</td><td>1.14.1</td></tr><tr><td>SciPy</td><td>0.18.1</td></tr><tr><td>matplotlib</td><td>1.4.3</td></tr><tr><td>Cython</td><td>0.24.1</td></tr><tr><td>Number of CPUs</td><td>8</td></tr><tr><td>BLAS Info</td><td>OPENBLAS</td></tr><tr><td>IPython</td><td>4.2.0</td></tr><tr><td>Python</td><td>3.4.3 (default, Nov 28 2017, 16:41:13)
[GCC 4.8.4]</td></tr><tr><td>OS</td><td>posix [linux]</td></tr><tr><td colspan='2'>Mon Apr 09 13:12:57 2018 PDT</td></tr></table>
|
{"hexsha": "0e6c8f5c701bf5da234afac42cf3402ef49e331c", "size": 51083, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "qutip-notebooks-master/examples/photon-emission.ipynb", "max_stars_repo_name": "OliverDudgeon/QSync", "max_stars_repo_head_hexsha": "34adbcf37d501b803aa000b0421ce22fb7934e9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qutip-notebooks-master/examples/photon-emission.ipynb", "max_issues_repo_name": "OliverDudgeon/QSync", "max_issues_repo_head_hexsha": "34adbcf37d501b803aa000b0421ce22fb7934e9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qutip-notebooks-master/examples/photon-emission.ipynb", "max_forks_repo_name": "OliverDudgeon/QSync", "max_forks_repo_head_hexsha": "34adbcf37d501b803aa000b0421ce22fb7934e9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 89.15008726, "max_line_length": 24576, "alphanum_fraction": 0.7606835934, "converted": true, "num_tokens": 6270}
|
import os
import numpy as np
from gym import spaces
import mujoco_py
from envs.gym_kuka_mujoco.envs.assets import kuka_asset_dir
from envs.gym_kuka_mujoco.utils.quaternion import identity_quat, subQuat, quatAdd, mat2Quat
from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite, forwardVelKinSite
from envs.gym_kuka_mujoco.utils.mujoco_utils import get_qpos_indices, get_qvel_indices, get_actuator_indices, \
get_joint_indices
from envs.mujoco.mujoco_config import MujocoConfig
from envs.gym_kuka_mujoco.utils.control_utils import *
from envs.gym_kuka_mujoco.utils.transform_utils import *
from collections.abc import Iterable
import transforms3d as transforms3d
state = np.array([1.57061e+00, 1.39156e-03, 1.56917e+00])
reference = np.array([-0.32692, 0.19711, -2.75531])
real_ori_attractor = transforms3d.euler.euler2mat(
state[0],
state[1],
state[2],
'sxyz')
ref_ori_attractor = transforms3d.euler.euler2mat(
reference[0],
reference[1],
reference[2],
'sxyz')
ori_error_point = orientation_error(real_ori_attractor, ref_ori_attractor)
mat_t = transforms3d.euler.euler2mat(state[0],
state[1],
state[2],
'sxyz')
mat_d = transforms3d.euler.euler2mat(ori_error_point[0],
ori_error_point[1],
ori_error_point[2],
'sxyz')
print("ori_error :",
ori_error_point,
orientation_error(mat_d, 2 * mat_t),
orientation_error(mat_d, mat_t),
orientation_error(euler2mat(state), 2 * euler2mat(state))
)
|
{"hexsha": "18b0b1ca46b2cad84738680166609e91ca427f38", "size": 1699, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/envs_assistive/alg_test.py", "max_stars_repo_name": "hzm2016/assistive-gym-robosuite", "max_stars_repo_head_hexsha": "5c529f4444cc386383618bfa584341740a8468f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-22T07:45:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T07:45:28.000Z", "max_issues_repo_path": "envs/envs_assistive/alg_test.py", "max_issues_repo_name": "hzm2016/assistive-gym-robosuite", "max_issues_repo_head_hexsha": "5c529f4444cc386383618bfa584341740a8468f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "envs/envs_assistive/alg_test.py", "max_forks_repo_name": "hzm2016/assistive-gym-robosuite", "max_forks_repo_head_hexsha": "5c529f4444cc386383618bfa584341740a8468f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3958333333, "max_line_length": 111, "alphanum_fraction": 0.6745144202, "include": true, "reason": "import numpy", "num_tokens": 450}
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import topology.instances.nnreal
import order.liminf_limsup
import topology.metric_space.lipschitz
/-!
# Extended non-negative reals
-/
noncomputable theory
open classical set filter metric
open_locale classical topological_space ennreal nnreal big_operators filter
variables {α : Type*} {β : Type*} {γ : Type*}
namespace ennreal
variables {a b c d : ℝ≥0∞} {r p q : ℝ≥0}
variables {x y z : ℝ≥0∞} {ε ε₁ ε₂ : ℝ≥0∞} {s : set ℝ≥0∞}
section topological_space
open topological_space
/-- Topology on `ℝ≥0∞`.
Note: this is different from the `emetric_space` topology. The `emetric_space` topology has
`is_open {⊤}`, while this topology doesn't have singleton elements. -/
instance : topological_space ℝ≥0∞ := preorder.topology ℝ≥0∞
instance : order_topology ℝ≥0∞ := ⟨rfl⟩
instance : t2_space ℝ≥0∞ := by apply_instance -- short-circuit type class inference
instance : second_countable_topology ℝ≥0∞ :=
⟨⟨⋃q ≥ (0:ℚ), {{a : ℝ≥0∞ | a < real.to_nnreal q}, {a : ℝ≥0∞ | ↑(real.to_nnreal q) < a}},
(countable_encodable _).bUnion $ assume a ha, (countable_singleton _).insert _,
le_antisymm
(le_generate_from $ by simp [or_imp_distrib, is_open_lt', is_open_gt'] {contextual := tt})
(le_generate_from $ λ s h, begin
rcases h with ⟨a, hs | hs⟩;
[ rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ a < real.to_nnreal q}, {b | ↑(real.to_nnreal q) < b},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn a b, and_assoc]),
rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ ↑(real.to_nnreal q) < a}, {b | b < ↑(real.to_nnreal q)},
from set.ext (assume b,
by simp [hs, @ennreal.lt_iff_exists_rat_btwn b a, and_comm, and_assoc])];
{ apply is_open_Union, intro q,
apply is_open_Union, intro hq,
exact generate_open.basic _ (mem_bUnion hq.1 $ by simp) }
end)⟩⟩
lemma embedding_coe : embedding (coe : ℝ≥0 → ℝ≥0∞) :=
⟨⟨begin
refine le_antisymm _ _,
{ rw [@order_topology.topology_eq_generate_intervals ℝ≥0∞ _,
← coinduced_le_iff_le_induced],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
show is_open {b : ℝ≥0 | a < ↑b},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_lt'] },
show is_open {b : ℝ≥0 | ↑b < a},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_gt', is_open_const] } },
{ rw [@order_topology.topology_eq_generate_intervals ℝ≥0 _],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
exact ⟨Ioi a, is_open_Ioi, by simp [Ioi]⟩,
exact ⟨Iio a, is_open_Iio, by simp [Iio]⟩ }
end⟩,
assume a b, coe_eq_coe.1⟩
lemma is_open_ne_top : is_open {a : ℝ≥0∞ | a ≠ ⊤} := is_open_ne
lemma is_open_Ico_zero : is_open (Ico 0 b) := by { rw ennreal.Ico_eq_Iio, exact is_open_Iio}
lemma open_embedding_coe : open_embedding (coe : ℝ≥0 → ℝ≥0∞) :=
⟨embedding_coe, by { convert is_open_ne_top, ext (x|_); simp [none_eq_top, some_eq_coe] }⟩
lemma coe_range_mem_nhds : range (coe : ℝ≥0 → ℝ≥0∞) ∈ 𝓝 (r : ℝ≥0∞) :=
is_open.mem_nhds open_embedding_coe.open_range $ mem_range_self _
@[norm_cast] lemma tendsto_coe {f : filter α} {m : α → ℝ≥0} {a : ℝ≥0} :
tendsto (λa, (m a : ℝ≥0∞)) f (𝓝 ↑a) ↔ tendsto m f (𝓝 a) :=
embedding_coe.tendsto_nhds_iff.symm
lemma continuous_coe : continuous (coe : ℝ≥0 → ℝ≥0∞) :=
embedding_coe.continuous
lemma continuous_coe_iff {α} [topological_space α] {f : α → ℝ≥0} :
continuous (λa, (f a : ℝ≥0∞)) ↔ continuous f :=
embedding_coe.continuous_iff.symm
lemma nhds_coe {r : ℝ≥0} : 𝓝 (r : ℝ≥0∞) = (𝓝 r).map coe :=
(open_embedding_coe.map_nhds_eq r).symm
lemma tendsto_nhds_coe_iff {α : Type*} {l : filter α} {x : ℝ≥0} {f : ℝ≥0∞ → α} :
tendsto f (𝓝 ↑x) l ↔ tendsto (f ∘ coe : ℝ≥0 → α) (𝓝 x) l :=
show _ ≤ _ ↔ _ ≤ _, by rw [nhds_coe, filter.map_map]
lemma continuous_at_coe_iff {α : Type*} [topological_space α] {x : ℝ≥0} {f : ℝ≥0∞ → α} :
continuous_at f (↑x) ↔ continuous_at (f ∘ coe : ℝ≥0 → α) x :=
tendsto_nhds_coe_iff
lemma nhds_coe_coe {r p : ℝ≥0} :
𝓝 ((r : ℝ≥0∞), (p : ℝ≥0∞)) = (𝓝 (r, p)).map (λp:ℝ≥0×ℝ≥0, (p.1, p.2)) :=
((open_embedding_coe.prod open_embedding_coe).map_nhds_eq (r, p)).symm
lemma continuous_of_real : continuous ennreal.of_real :=
(continuous_coe_iff.2 continuous_id).comp nnreal.continuous_of_real
lemma tendsto_of_real {f : filter α} {m : α → ℝ} {a : ℝ} (h : tendsto m f (𝓝 a)) :
tendsto (λa, ennreal.of_real (m a)) f (𝓝 (ennreal.of_real a)) :=
tendsto.comp (continuous.tendsto continuous_of_real _) h
lemma tendsto_to_nnreal {a : ℝ≥0∞} (ha : a ≠ ⊤) :
tendsto ennreal.to_nnreal (𝓝 a) (𝓝 a.to_nnreal) :=
begin
lift a to ℝ≥0 using ha,
rw [nhds_coe, tendsto_map'_iff],
exact tendsto_id
end
lemma eventually_eq_of_to_real_eventually_eq {l : filter α} {f g : α → ℝ≥0∞}
(hfi : ∀ᶠ x in l, f x ≠ ∞) (hgi : ∀ᶠ x in l, g x ≠ ∞)
(hfg : (λ x, (f x).to_real) =ᶠ[l] (λ x, (g x).to_real)) :
f =ᶠ[l] g :=
begin
filter_upwards [hfi, hgi, hfg],
intros x hfx hgx hfgx,
rwa ← ennreal.to_real_eq_to_real hfx hgx,
end
lemma continuous_on_to_nnreal : continuous_on ennreal.to_nnreal {a | a ≠ ∞} :=
λ a ha, continuous_at.continuous_within_at (tendsto_to_nnreal ha)
lemma tendsto_to_real {a : ℝ≥0∞} (ha : a ≠ ⊤) : tendsto ennreal.to_real (𝓝 a) (𝓝 a.to_real) :=
nnreal.tendsto_coe.2 $ tendsto_to_nnreal ha
/-- The set of finite `ℝ≥0∞` numbers is homeomorphic to `ℝ≥0`. -/
def ne_top_homeomorph_nnreal : {a | a ≠ ∞} ≃ₜ ℝ≥0 :=
{ continuous_to_fun := continuous_on_iff_continuous_restrict.1 continuous_on_to_nnreal,
continuous_inv_fun := continuous_subtype_mk _ continuous_coe,
.. ne_top_equiv_nnreal }
/-- The set of finite `ℝ≥0∞` numbers is homeomorphic to `ℝ≥0`. -/
def lt_top_homeomorph_nnreal : {a | a < ∞} ≃ₜ ℝ≥0 :=
by refine (homeomorph.set_congr $ set.ext $ λ x, _).trans ne_top_homeomorph_nnreal;
simp only [mem_set_of_eq, lt_top_iff_ne_top]
lemma nhds_top : 𝓝 ∞ = ⨅ a ≠ ∞, 𝓟 (Ioi a) :=
nhds_top_order.trans $ by simp [lt_top_iff_ne_top, Ioi]
lemma nhds_top' : 𝓝 ∞ = ⨅ r : ℝ≥0, 𝓟 (Ioi r) :=
nhds_top.trans $ infi_ne_top _
lemma nhds_top_basis : (𝓝 ∞).has_basis (λ a, a < ∞) (λ a, Ioi a) := nhds_top_basis
lemma tendsto_nhds_top_iff_nnreal {m : α → ℝ≥0∞} {f : filter α} :
tendsto m f (𝓝 ⊤) ↔ ∀ x : ℝ≥0, ∀ᶠ a in f, ↑x < m a :=
by simp only [nhds_top', tendsto_infi, tendsto_principal, mem_Ioi]
lemma tendsto_nhds_top_iff_nat {m : α → ℝ≥0∞} {f : filter α} :
tendsto m f (𝓝 ⊤) ↔ ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a :=
tendsto_nhds_top_iff_nnreal.trans ⟨λ h n, by simpa only [ennreal.coe_nat] using h n,
λ h x, let ⟨n, hn⟩ := exists_nat_gt x in
(h n).mono (λ y, lt_trans $ by rwa [← ennreal.coe_nat, coe_lt_coe])⟩
lemma tendsto_nhds_top {m : α → ℝ≥0∞} {f : filter α}
(h : ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a) : tendsto m f (𝓝 ⊤) :=
tendsto_nhds_top_iff_nat.2 h
lemma tendsto_nat_nhds_top : tendsto (λ n : ℕ, ↑n) at_top (𝓝 ∞) :=
tendsto_nhds_top $ λ n, mem_at_top_sets.2
⟨n+1, λ m hm, ennreal.coe_nat_lt_coe_nat.2 $ nat.lt_of_succ_le hm⟩
@[simp, norm_cast] lemma tendsto_coe_nhds_top {f : α → ℝ≥0} {l : filter α} :
tendsto (λ x, (f x : ℝ≥0∞)) l (𝓝 ∞) ↔ tendsto f l at_top :=
by rw [tendsto_nhds_top_iff_nnreal, at_top_basis_Ioi.tendsto_right_iff];
[simp, apply_instance, apply_instance]
lemma nhds_zero : 𝓝 (0 : ℝ≥0∞) = ⨅a ≠ 0, 𝓟 (Iio a) :=
nhds_bot_order.trans $ by simp [bot_lt_iff_ne_bot, Iio]
lemma nhds_zero_basis : (𝓝 (0 : ℝ≥0∞)).has_basis (λ a : ℝ≥0∞, 0 < a) (λ a, Iio a) := nhds_bot_basis
lemma nhds_zero_basis_Iic : (𝓝 (0 : ℝ≥0∞)).has_basis (λ a : ℝ≥0∞, 0 < a) Iic := nhds_bot_basis_Iic
@[instance] lemma nhds_within_Ioi_coe_ne_bot {r : ℝ≥0} : (𝓝[Ioi r] (r : ℝ≥0∞)).ne_bot :=
nhds_within_Ioi_self_ne_bot' ennreal.coe_lt_top
@[instance] lemma nhds_within_Ioi_zero_ne_bot : (𝓝[Ioi 0] (0 : ℝ≥0∞)).ne_bot :=
nhds_within_Ioi_coe_ne_bot
-- using Icc because
-- • don't have 'Ioo (x - ε) (x + ε) ∈ 𝓝 x' unless x > 0
-- • (x - y ≤ ε ↔ x ≤ ε + y) is true, while (x - y < ε ↔ x < ε + y) is not
lemma Icc_mem_nhds (xt : x ≠ ⊤) (ε0 : ε ≠ 0) : Icc (x - ε) (x + ε) ∈ 𝓝 x :=
begin
rw _root_.mem_nhds_iff,
by_cases x0 : x = 0,
{ use Iio (x + ε),
have : Iio (x + ε) ⊆ Icc (x - ε) (x + ε), assume a, rw x0, simpa using le_of_lt,
use this, exact ⟨is_open_Iio, mem_Iio_self_add xt ε0⟩ },
{ use Ioo (x - ε) (x + ε), use Ioo_subset_Icc_self,
exact ⟨is_open_Ioo, mem_Ioo_self_sub_add xt x0 ε0 ε0 ⟩ }
end
lemma nhds_of_ne_top (xt : x ≠ ⊤) : 𝓝 x = ⨅ ε > 0, 𝓟 (Icc (x - ε) (x + ε)) :=
begin
refine le_antisymm _ _,
-- first direction
simp only [le_infi_iff, le_principal_iff], assume ε ε0, exact Icc_mem_nhds xt ε0.lt.ne',
-- second direction
rw nhds_generate_from, refine le_infi (assume s, le_infi $ assume hs, _),
rcases hs with ⟨xs, ⟨a, (rfl : s = Ioi a)|(rfl : s = Iio a)⟩⟩,
{ rcases exists_between xs with ⟨b, ab, bx⟩,
have xb_pos : 0 < x - b := tsub_pos_iff_lt.2 bx,
have xxb : x - (x - b) = b := sub_sub_cancel xt bx.le,
refine infi_le_of_le (x - b) (infi_le_of_le xb_pos _),
simp only [mem_principal, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xxb at h₁, calc a < b : ab ... ≤ y : h₁ },
{ rcases exists_between xs with ⟨b, xb, ba⟩,
have bx_pos : 0 < b - x := tsub_pos_iff_lt.2 xb,
have xbx : x + (b - x) = b := add_tsub_cancel_of_le xb.le,
refine infi_le_of_le (b - x) (infi_le_of_le bx_pos _),
simp only [mem_principal, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xbx at h₂, calc y ≤ b : h₂ ... < a : ba },
end
/-- Characterization of neighborhoods for `ℝ≥0∞` numbers. See also `tendsto_order`
for a version with strict inequalities. -/
protected theorem tendsto_nhds {f : filter α} {u : α → ℝ≥0∞} {a : ℝ≥0∞} (ha : a ≠ ⊤) :
tendsto u f (𝓝 a) ↔ ∀ ε > 0, ∀ᶠ x in f, (u x) ∈ Icc (a - ε) (a + ε) :=
by simp only [nhds_of_ne_top ha, tendsto_infi, tendsto_principal, mem_Icc]
protected lemma tendsto_at_top [nonempty β] [semilattice_sup β] {f : β → ℝ≥0∞} {a : ℝ≥0∞}
(ha : a ≠ ⊤) : tendsto f at_top (𝓝 a) ↔ ∀ε>0, ∃N, ∀n≥N, (f n) ∈ Icc (a - ε) (a + ε) :=
by simp only [ennreal.tendsto_nhds ha, mem_at_top_sets, mem_set_of_eq, filter.eventually]
instance : has_continuous_add ℝ≥0∞ :=
begin
refine ⟨continuous_iff_continuous_at.2 _⟩,
rintro ⟨(_|a), b⟩,
{ exact tendsto_nhds_top_mono' continuous_at_fst (λ p, le_add_right le_rfl) },
rcases b with (_|b),
{ exact tendsto_nhds_top_mono' continuous_at_snd (λ p, le_add_left le_rfl) },
simp only [continuous_at, some_eq_coe, nhds_coe_coe, ← coe_add, tendsto_map'_iff, (∘),
tendsto_coe, tendsto_add]
end
protected lemma tendsto_at_top_zero [hβ : nonempty β] [semilattice_sup β] {f : β → ℝ≥0∞} :
filter.at_top.tendsto f (𝓝 0) ↔ ∀ ε > 0, ∃ N, ∀ n ≥ N, f n ≤ ε :=
begin
rw ennreal.tendsto_at_top zero_ne_top,
{ simp_rw [set.mem_Icc, zero_add, zero_tsub, zero_le _, true_and], },
{ exact hβ, },
end
protected lemma tendsto_mul (ha : a ≠ 0 ∨ b ≠ ⊤) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) (𝓝 (a, b)) (𝓝 (a * b)) :=
have ht : ∀b:ℝ≥0∞, b ≠ 0 → tendsto (λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) (𝓝 ((⊤:ℝ≥0∞), b)) (𝓝 ⊤),
begin
refine assume b hb, tendsto_nhds_top_iff_nnreal.2 $ assume n, _,
rcases lt_iff_exists_nnreal_btwn.1 (pos_iff_ne_zero.2 hb) with ⟨ε, hε, hεb⟩,
replace hε : 0 < ε, from coe_pos.1 hε,
filter_upwards [prod_is_open.mem_nhds (lt_mem_nhds $ @coe_lt_top (n / ε)) (lt_mem_nhds hεb)],
rintros ⟨a₁, a₂⟩ ⟨h₁, h₂⟩,
dsimp at h₁ h₂ ⊢,
rw [← div_mul_cancel n hε.ne', coe_mul],
exact mul_lt_mul h₁ h₂
end,
begin
cases a, {simp [none_eq_top] at hb, simp [none_eq_top, ht b hb, top_mul, hb] },
cases b,
{ simp [none_eq_top] at ha,
simp [*, nhds_swap (a : ℝ≥0∞) ⊤, none_eq_top, some_eq_coe, top_mul, tendsto_map'_iff, (∘),
mul_comm] },
simp [some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_mul.symm, tendsto_coe, tendsto_mul]
end
protected lemma tendsto.mul {f : filter α} {ma : α → ℝ≥0∞} {mb : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λa, ma a * mb a) f (𝓝 (a * b)) :=
show tendsto ((λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) ∘ (λa, (ma a, mb a))) f (𝓝 (a * b)), from
tendsto.comp (ennreal.tendsto_mul ha hb) (hma.prod_mk_nhds hmb)
protected lemma tendsto.const_mul {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) : tendsto (λb, a * m b) f (𝓝 (a * b)) :=
by_cases
(assume : a = 0, by simp [this, tendsto_const_nhds])
(assume ha : a ≠ 0, ennreal.tendsto.mul tendsto_const_nhds (or.inl ha) hm hb)
protected lemma tendsto.mul_const {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) : tendsto (λx, m x * b) f (𝓝 (a * b)) :=
by simpa only [mul_comm] using ennreal.tendsto.const_mul hm ha
lemma tendsto_finset_prod_of_ne_top {ι : Type*} {f : ι → α → ℝ≥0∞} {x : filter α} {a : ι → ℝ≥0∞}
(s : finset ι) (h : ∀ i ∈ s, tendsto (f i) x (𝓝 (a i))) (h' : ∀ i ∈ s, a i ≠ ∞):
tendsto (λ b, ∏ c in s, f c b) x (𝓝 (∏ c in s, a c)) :=
begin
induction s using finset.induction with a s has IH, { simp [tendsto_const_nhds] },
simp only [finset.prod_insert has],
apply tendsto.mul (h _ (finset.mem_insert_self _ _)),
{ right,
exact (prod_lt_top (λ i hi, h' _ (finset.mem_insert_of_mem hi))).ne },
{ exact IH (λ i hi, h _ (finset.mem_insert_of_mem hi))
(λ i hi, h' _ (finset.mem_insert_of_mem hi)) },
{ exact or.inr (h' _ (finset.mem_insert_self _ _)) }
end
protected lemma continuous_at_const_mul {a b : ℝ≥0∞} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at ((*) a) b :=
tendsto.const_mul tendsto_id h.symm
protected lemma continuous_at_mul_const {a b : ℝ≥0∞} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at (λ x, x * a) b :=
tendsto.mul_const tendsto_id h.symm
protected lemma continuous_const_mul {a : ℝ≥0∞} (ha : a ≠ ⊤) : continuous ((*) a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_const_mul (or.inl ha)
protected lemma continuous_mul_const {a : ℝ≥0∞} (ha : a ≠ ⊤) : continuous (λ x, x * a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_mul_const (or.inl ha)
@[continuity]
lemma continuous_pow (n : ℕ) : continuous (λ a : ℝ≥0∞, a ^ n) :=
begin
induction n with n IH,
{ simp [continuous_const] },
simp_rw [nat.succ_eq_add_one, pow_add, pow_one, continuous_iff_continuous_at],
assume x,
refine ennreal.tendsto.mul (IH.tendsto _) _ tendsto_id _;
by_cases H : x = 0,
{ simp only [H, zero_ne_top, ne.def, or_true, not_false_iff]},
{ exact or.inl (λ h, H (pow_eq_zero h)) },
{ simp only [H, pow_eq_top_iff, zero_ne_top, false_or, eq_self_iff_true,
not_true, ne.def, not_false_iff, false_and], },
{ simp only [H, true_or, ne.def, not_false_iff] }
end
protected lemma tendsto.pow {f : filter α} {m : α → ℝ≥0∞} {a : ℝ≥0∞} {n : ℕ}
(hm : tendsto m f (𝓝 a)) :
tendsto (λ x, (m x) ^ n) f (𝓝 (a ^ n)) :=
((continuous_pow n).tendsto a).comp hm
lemma le_of_forall_lt_one_mul_le {x y : ℝ≥0∞} (h : ∀ a < 1, a * x ≤ y) : x ≤ y :=
begin
have : tendsto (* x) (𝓝[Iio 1] 1) (𝓝 (1 * x)) :=
(ennreal.continuous_at_mul_const (or.inr one_ne_zero)).mono_left inf_le_left,
rw one_mul at this,
haveI : (𝓝[Iio 1] (1 : ℝ≥0∞)).ne_bot := nhds_within_Iio_self_ne_bot' ennreal.zero_lt_one,
exact le_of_tendsto this (eventually_nhds_within_iff.2 $ eventually_of_forall h)
end
lemma infi_mul_left' {ι} {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) (h0 : a = 0 → nonempty ι) :
(⨅ i, a * f i) = a * ⨅ i, f i :=
begin
by_cases H : a = ⊤ ∧ (⨅ i, f i) = 0,
{ rcases h H.1 H.2 with ⟨i, hi⟩,
rw [H.2, mul_zero, ← bot_eq_zero, infi_eq_bot],
exact λ b hb, ⟨i, by rwa [hi, mul_zero, ← bot_eq_zero]⟩ },
{ rw not_and_distrib at H,
casesI is_empty_or_nonempty ι,
{ rw [infi_of_empty, infi_of_empty, mul_top, if_neg],
exact mt h0 (not_nonempty_iff.2 ‹_›) },
{ exact (map_infi_of_continuous_at_of_monotone' (ennreal.continuous_at_const_mul H)
ennreal.mul_left_mono).symm } }
end
lemma infi_mul_left {ι} [nonempty ι] {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, a * f i) = a * ⨅ i, f i :=
infi_mul_left' h (λ _, ‹nonempty ι›)
lemma infi_mul_right' {ι} {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) (h0 : a = 0 → nonempty ι) :
(⨅ i, f i * a) = (⨅ i, f i) * a :=
by simpa only [mul_comm a] using infi_mul_left' h h0
lemma infi_mul_right {ι} [nonempty ι] {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, f i * a) = (⨅ i, f i) * a :=
infi_mul_right' h (λ _, ‹nonempty ι›)
protected lemma continuous_inv : continuous (has_inv.inv : ℝ≥0∞ → ℝ≥0∞) :=
continuous_iff_continuous_at.2 $ λ a, tendsto_order.2
⟨begin
assume b hb,
simp only [@ennreal.lt_inv_iff_lt_inv b],
exact gt_mem_nhds (ennreal.lt_inv_iff_lt_inv.1 hb),
end,
begin
assume b hb,
simp only [gt_iff_lt, @ennreal.inv_lt_iff_inv_lt _ b],
exact lt_mem_nhds (ennreal.inv_lt_iff_inv_lt.1 hb)
end⟩
@[simp] protected lemma tendsto_inv_iff {f : filter α} {m : α → ℝ≥0∞} {a : ℝ≥0∞} :
tendsto (λ x, (m x)⁻¹) f (𝓝 a⁻¹) ↔ tendsto m f (𝓝 a) :=
⟨λ h, by simpa only [function.comp, ennreal.inv_inv]
using (ennreal.continuous_inv.tendsto a⁻¹).comp h,
(ennreal.continuous_inv.tendsto a).comp⟩
protected lemma tendsto.div {f : filter α} {ma : α → ℝ≥0∞} {mb : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) :
tendsto (λa, ma a / mb a) f (𝓝 (a / b)) :=
by { apply tendsto.mul hma _ (ennreal.tendsto_inv_iff.2 hmb) _; simp [ha, hb] }
protected lemma tendsto.const_div {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) : tendsto (λb, a / m b) f (𝓝 (a / b)) :=
by { apply tendsto.const_mul (ennreal.tendsto_inv_iff.2 hm), simp [hb] }
protected lemma tendsto.div_const {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) : tendsto (λx, m x / b) f (𝓝 (a / b)) :=
by { apply tendsto.mul_const hm, simp [ha] }
protected lemma tendsto_inv_nat_nhds_zero : tendsto (λ n : ℕ, (n : ℝ≥0∞)⁻¹) at_top (𝓝 0) :=
ennreal.inv_top ▸ ennreal.tendsto_inv_iff.2 tendsto_nat_nhds_top
lemma bsupr_add {ι} {s : set ι} (hs : s.nonempty) {f : ι → ℝ≥0∞} :
(⨆ i ∈ s, f i) + a = ⨆ i ∈ s, f i + a :=
begin
simp only [← Sup_image], symmetry,
rw [image_comp (+ a)],
refine is_lub.Sup_eq ((is_lub_Sup $ f '' s).is_lub_of_tendsto _ (hs.image _) _),
exacts [λ x _ y _ hxy, add_le_add hxy le_rfl,
tendsto.add (tendsto_id' inf_le_left) tendsto_const_nhds]
end
lemma Sup_add {s : set ℝ≥0∞} (hs : s.nonempty) : Sup s + a = ⨆b∈s, b + a :=
by rw [Sup_eq_supr, bsupr_add hs]
lemma supr_add {ι : Sort*} {s : ι → ℝ≥0∞} [h : nonempty ι] : supr s + a = ⨆b, s b + a :=
let ⟨x⟩ := h in
calc supr s + a = Sup (range s) + a : by rw Sup_range
... = (⨆b∈range s, b + a) : Sup_add ⟨s x, x, rfl⟩
... = _ : supr_range
lemma add_supr {ι : Sort*} {s : ι → ℝ≥0∞} [h : nonempty ι] : a + supr s = ⨆b, a + s b :=
by rw [add_comm, supr_add]; simp [add_comm]
lemma supr_add_supr {ι : Sort*} {f g : ι → ℝ≥0∞} (h : ∀i j, ∃k, f i + g j ≤ f k + g k) :
supr f + supr g = (⨆ a, f a + g a) :=
begin
by_cases hι : nonempty ι,
{ letI := hι,
refine le_antisymm _ (supr_le $ λ a, add_le_add (le_supr _ _) (le_supr _ _)),
simpa [add_supr, supr_add] using
λ i j:ι, show f i + g j ≤ ⨆ a, f a + g a, from
let ⟨k, hk⟩ := h i j in le_supr_of_le k hk },
{ have : ∀f:ι → ℝ≥0∞, (⨆i, f i) = 0 := λ f, supr_eq_zero.mpr (λ i, (hι ⟨i⟩).elim),
rw [this, this, this, zero_add] }
end
lemma supr_add_supr_of_monotone {ι : Sort*} [semilattice_sup ι]
{f g : ι → ℝ≥0∞} (hf : monotone f) (hg : monotone g) :
supr f + supr g = (⨆ a, f a + g a) :=
supr_add_supr $ assume i j, ⟨i ⊔ j, add_le_add (hf $ le_sup_left) (hg $ le_sup_right)⟩
lemma finset_sum_supr_nat {α} {ι} [semilattice_sup ι] {s : finset α} {f : α → ι → ℝ≥0∞}
(hf : ∀a, monotone (f a)) :
∑ a in s, supr (f a) = (⨆ n, ∑ a in s, f a n) :=
begin
refine finset.induction_on s _ _,
{ simp, },
{ assume a s has ih,
simp only [finset.sum_insert has],
rw [ih, supr_add_supr_of_monotone (hf a)],
assume i j h,
exact (finset.sum_le_sum $ assume a ha, hf a h) }
end
lemma mul_Sup {s : set ℝ≥0∞} {a : ℝ≥0∞} : a * Sup s = ⨆i∈s, a * i :=
begin
by_cases hs : ∀x∈s, x = (0:ℝ≥0∞),
{ have h₁ : Sup s = 0 := (bot_unique $ Sup_le $ assume a ha, (hs a ha).symm ▸ le_refl 0),
have h₂ : (⨆i ∈ s, a * i) = 0 :=
(bot_unique $ supr_le $ assume a, supr_le $ assume ha, by simp [hs a ha]),
rw [h₁, h₂, mul_zero] },
{ simp only [not_forall] at hs,
rcases hs with ⟨x, hx, hx0⟩,
have s₁ : Sup s ≠ 0 :=
pos_iff_ne_zero.1 (lt_of_lt_of_le (pos_iff_ne_zero.2 hx0) (le_Sup hx)),
have : Sup ((λb, a * b) '' s) = a * Sup s :=
is_lub.Sup_eq ((is_lub_Sup s).is_lub_of_tendsto
(assume x _ y _ h, mul_le_mul_left' h _)
⟨x, hx⟩
(ennreal.tendsto.const_mul (tendsto_id' inf_le_left) (or.inl s₁))),
rw [this.symm, Sup_image] }
end
lemma mul_supr {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : a * supr f = ⨆i, a * f i :=
by rw [← Sup_range, mul_Sup, supr_range]
lemma supr_mul {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : supr f * a = ⨆i, f i * a :=
by rw [mul_comm, mul_supr]; congr; funext; rw [mul_comm]
lemma supr_div {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : supr f / a = ⨆i, f i / a :=
supr_mul
protected lemma tendsto_coe_sub : ∀{b:ℝ≥0∞}, tendsto (λb:ℝ≥0∞, ↑r - b) (𝓝 b) (𝓝 (↑r - b)) :=
begin
refine forall_ennreal.2 ⟨λ a, _, _⟩,
{ simp [@nhds_coe a, tendsto_map'_iff, (∘), tendsto_coe, ← with_top.coe_sub],
exact tendsto_const_nhds.sub tendsto_id },
simp,
exact (tendsto.congr' (mem_of_superset (lt_mem_nhds $ @coe_lt_top r) $
by simp [le_of_lt] {contextual := tt})) tendsto_const_nhds
end
lemma sub_supr {ι : Sort*} [nonempty ι] {b : ι → ℝ≥0∞} (hr : a < ⊤) :
a - (⨆i, b i) = (⨅i, a - b i) :=
let ⟨r, eq, _⟩ := lt_iff_exists_coe.mp hr in
have Inf ((λb, ↑r - b) '' range b) = ↑r - (⨆i, b i),
from is_glb.Inf_eq $ is_lub_supr.is_glb_of_tendsto
(assume x _ y _, tsub_le_tsub (le_refl (r : ℝ≥0∞)))
(range_nonempty _)
(ennreal.tendsto_coe_sub.comp (tendsto_id' inf_le_left)),
by rw [eq, ←this]; simp [Inf_image, infi_range, -mem_range]; exact le_rfl
lemma exists_countable_dense_no_zero_top :
∃ (s : set ℝ≥0∞), countable s ∧ dense s ∧ 0 ∉ s ∧ ∞ ∉ s :=
begin
obtain ⟨s, s_count, s_dense, hs⟩ : ∃ s : set ℝ≥0∞, countable s ∧ dense s ∧
(∀ x, is_bot x → x ∉ s) ∧ (∀ x, is_top x → x ∉ s) := exists_countable_dense_no_bot_top ℝ≥0∞,
exact ⟨s, s_count, s_dense, λ h, hs.1 0 (by simp) h, λ h, hs.2 ∞ (by simp) h⟩,
end
end topological_space
section tsum
variables {f g : α → ℝ≥0∞}
@[norm_cast] protected lemma has_sum_coe {f : α → ℝ≥0} {r : ℝ≥0} :
has_sum (λa, (f a : ℝ≥0∞)) ↑r ↔ has_sum f r :=
have (λs:finset α, ∑ a in s, ↑(f a)) = (coe : ℝ≥0 → ℝ≥0∞) ∘ (λs:finset α, ∑ a in s, f a),
from funext $ assume s, ennreal.coe_finset_sum.symm,
by unfold has_sum; rw [this, tendsto_coe]
protected lemma tsum_coe_eq {f : α → ℝ≥0} (h : has_sum f r) : ∑'a, (f a : ℝ≥0∞) = r :=
(ennreal.has_sum_coe.2 h).tsum_eq
protected lemma coe_tsum {f : α → ℝ≥0} : summable f → ↑(tsum f) = ∑'a, (f a : ℝ≥0∞)
| ⟨r, hr⟩ := by rw [hr.tsum_eq, ennreal.tsum_coe_eq hr]
protected lemma has_sum : has_sum f (⨆s:finset α, ∑ a in s, f a) :=
tendsto_at_top_supr $ λ s t, finset.sum_le_sum_of_subset
@[simp] protected lemma summable : summable f := ⟨_, ennreal.has_sum⟩
lemma tsum_coe_ne_top_iff_summable {f : β → ℝ≥0} :
∑' b, (f b:ℝ≥0∞) ≠ ∞ ↔ summable f :=
begin
refine ⟨λ h, _, λ h, ennreal.coe_tsum h ▸ ennreal.coe_ne_top⟩,
lift (∑' b, (f b:ℝ≥0∞)) to ℝ≥0 using h with a ha,
refine ⟨a, ennreal.has_sum_coe.1 _⟩,
rw ha,
exact ennreal.summable.has_sum
end
protected lemma tsum_eq_supr_sum : ∑'a, f a = (⨆s:finset α, ∑ a in s, f a) :=
ennreal.has_sum.tsum_eq
protected lemma tsum_eq_supr_sum' {ι : Type*} (s : ι → finset α) (hs : ∀ t, ∃ i, t ⊆ s i) :
∑' a, f a = ⨆ i, ∑ a in s i, f a :=
begin
rw [ennreal.tsum_eq_supr_sum],
symmetry,
change (⨆i:ι, (λ t : finset α, ∑ a in t, f a) (s i)) = ⨆s:finset α, ∑ a in s, f a,
exact (finset.sum_mono_set f).supr_comp_eq hs
end
protected lemma tsum_sigma {β : α → Type*} (f : Πa, β a → ℝ≥0∞) :
∑'p:Σa, β a, f p.1 p.2 = ∑'a b, f a b :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_sigma' {β : α → Type*} (f : (Σ a, β a) → ℝ≥0∞) :
∑'p:(Σa, β a), f p = ∑'a b, f ⟨a, b⟩ :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_prod {f : α → β → ℝ≥0∞} : ∑'p:α×β, f p.1 p.2 = ∑'a, ∑'b, f a b :=
tsum_prod' ennreal.summable $ λ _, ennreal.summable
protected lemma tsum_comm {f : α → β → ℝ≥0∞} : ∑'a, ∑'b, f a b = ∑'b, ∑'a, f a b :=
tsum_comm' ennreal.summable (λ _, ennreal.summable) (λ _, ennreal.summable)
protected lemma tsum_add : ∑'a, (f a + g a) = (∑'a, f a) + (∑'a, g a) :=
tsum_add ennreal.summable ennreal.summable
protected lemma tsum_le_tsum (h : ∀a, f a ≤ g a) : ∑'a, f a ≤ ∑'a, g a :=
tsum_le_tsum h ennreal.summable ennreal.summable
protected lemma sum_le_tsum {f : α → ℝ≥0∞} (s : finset α) : ∑ x in s, f x ≤ ∑' x, f x :=
sum_le_tsum s (λ x hx, zero_le _) ennreal.summable
protected lemma tsum_eq_supr_nat' {f : ℕ → ℝ≥0∞} {N : ℕ → ℕ} (hN : tendsto N at_top at_top) :
∑'i:ℕ, f i = (⨆i:ℕ, ∑ a in finset.range (N i), f a) :=
ennreal.tsum_eq_supr_sum' _ $ λ t,
let ⟨n, hn⟩ := t.exists_nat_subset_range,
⟨k, _, hk⟩ := exists_le_of_tendsto_at_top hN 0 n in
⟨k, finset.subset.trans hn (finset.range_mono hk)⟩
protected lemma tsum_eq_supr_nat {f : ℕ → ℝ≥0∞} :
∑'i:ℕ, f i = (⨆i:ℕ, ∑ a in finset.range i, f a) :=
ennreal.tsum_eq_supr_sum' _ finset.exists_nat_subset_range
protected lemma tsum_eq_liminf_sum_nat {f : ℕ → ℝ≥0∞} :
∑' i, f i = filter.at_top.liminf (λ n, ∑ i in finset.range n, f i) :=
begin
rw [ennreal.tsum_eq_supr_nat, filter.liminf_eq_supr_infi_of_nat],
congr,
refine funext (λ n, le_antisymm _ _),
{ refine le_binfi (λ i hi, finset.sum_le_sum_of_subset_of_nonneg _ (λ _ _ _, zero_le _)),
simpa only [finset.range_subset, add_le_add_iff_right] using hi, },
{ refine le_trans (infi_le _ n) _,
simp [le_refl n, le_refl ((finset.range n).sum f)], },
end
protected lemma le_tsum (a : α) : f a ≤ ∑'a, f a :=
le_tsum' ennreal.summable a
@[simp] protected lemma tsum_eq_zero : ∑' i, f i = 0 ↔ ∀ i, f i = 0 :=
⟨λ h i, nonpos_iff_eq_zero.1 $ h ▸ ennreal.le_tsum i, λ h, by simp [h]⟩
protected lemma tsum_eq_top_of_eq_top : (∃ a, f a = ∞) → ∑' a, f a = ∞
| ⟨a, ha⟩ := top_unique $ ha ▸ ennreal.le_tsum a
@[simp] protected lemma tsum_top [nonempty α] : ∑' a : α, ∞ = ∞ :=
let ⟨a⟩ := ‹nonempty α› in ennreal.tsum_eq_top_of_eq_top ⟨a, rfl⟩
lemma tsum_const_eq_top_of_ne_zero {α : Type*} [infinite α] {c : ℝ≥0∞} (hc : c ≠ 0) :
(∑' (a : α), c) = ∞ :=
begin
have A : tendsto (λ (n : ℕ), (n : ℝ≥0∞) * c) at_top (𝓝 (∞ * c)),
{ apply ennreal.tendsto.mul_const tendsto_nat_nhds_top,
simp only [true_or, top_ne_zero, ne.def, not_false_iff] },
have B : ∀ (n : ℕ), (n : ℝ≥0∞) * c ≤ (∑' (a : α), c),
{ assume n,
rcases infinite.exists_subset_card_eq α n with ⟨s, hs⟩,
simpa [hs] using @ennreal.sum_le_tsum α (λ i, c) s },
simpa [hc] using le_of_tendsto' A B,
end
protected lemma ne_top_of_tsum_ne_top (h : ∑' a, f a ≠ ∞) (a : α) : f a ≠ ∞ :=
λ ha, h $ ennreal.tsum_eq_top_of_eq_top ⟨a, ha⟩
protected lemma tsum_mul_left : ∑'i, a * f i = a * ∑'i, f i :=
if h : ∀i, f i = 0 then by simp [h] else
let ⟨i, (hi : f i ≠ 0)⟩ := not_forall.mp h in
have sum_ne_0 : ∑'i, f i ≠ 0, from ne_of_gt $
calc 0 < f i : lt_of_le_of_ne (zero_le _) hi.symm
... ≤ ∑'i, f i : ennreal.le_tsum _,
have tendsto (λs:finset α, ∑ j in s, a * f j) at_top (𝓝 (a * ∑'i, f i)),
by rw [← show (*) a ∘ (λs:finset α, ∑ j in s, f j) = λs, ∑ j in s, a * f j,
from funext $ λ s, finset.mul_sum];
exact ennreal.tendsto.const_mul ennreal.summable.has_sum (or.inl sum_ne_0),
has_sum.tsum_eq this
protected lemma tsum_mul_right : (∑'i, f i * a) = (∑'i, f i) * a :=
by simp [mul_comm, ennreal.tsum_mul_left]
@[simp] lemma tsum_supr_eq {α : Type*} (a : α) {f : α → ℝ≥0∞} :
∑'b:α, (⨆ (h : a = b), f b) = f a :=
le_antisymm
(by rw [ennreal.tsum_eq_supr_sum]; exact supr_le (assume s,
calc (∑ b in s, ⨆ (h : a = b), f b) ≤ ∑ b in {a}, ⨆ (h : a = b), f b :
finset.sum_le_sum_of_ne_zero $ assume b _ hb,
suffices a = b, by simpa using this.symm,
classical.by_contradiction $ assume h,
by simpa [h] using hb
... = f a : by simp))
(calc f a ≤ (⨆ (h : a = a), f a) : le_supr (λh:a=a, f a) rfl
... ≤ (∑'b:α, ⨆ (h : a = b), f b) : ennreal.le_tsum _)
lemma has_sum_iff_tendsto_nat {f : ℕ → ℝ≥0∞} (r : ℝ≥0∞) :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
refine ⟨has_sum.tendsto_sum_nat, assume h, _⟩,
rw [← supr_eq_of_tendsto _ h, ← ennreal.tsum_eq_supr_nat],
{ exact ennreal.summable.has_sum },
{ exact assume s t hst, finset.sum_le_sum_of_subset (finset.range_subset.2 hst) }
end
lemma tendsto_nat_tsum (f : ℕ → ℝ≥0∞) :
tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 (∑' n, f n)) :=
by { rw ← has_sum_iff_tendsto_nat, exact ennreal.summable.has_sum }
lemma to_nnreal_apply_of_tsum_ne_top {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' i, f i ≠ ∞) (x : α) :
(((ennreal.to_nnreal ∘ f) x : ℝ≥0) : ℝ≥0∞) = f x :=
coe_to_nnreal $ ennreal.ne_top_of_tsum_ne_top hf _
lemma summable_to_nnreal_of_tsum_ne_top {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' i, f i ≠ ∞) :
summable (ennreal.to_nnreal ∘ f) :=
by simpa only [←tsum_coe_ne_top_iff_summable, to_nnreal_apply_of_tsum_ne_top hf] using hf
lemma tendsto_cofinite_zero_of_tsum_ne_top {α} {f : α → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto f cofinite (𝓝 0) :=
begin
have f_ne_top : ∀ n, f n ≠ ∞, from ennreal.ne_top_of_tsum_ne_top hf,
have h_f_coe : f = λ n, ((f n).to_nnreal : ennreal),
from funext (λ n, (coe_to_nnreal (f_ne_top n)).symm),
rw [h_f_coe, ←@coe_zero, tendsto_coe],
exact nnreal.tendsto_cofinite_zero_of_summable (summable_to_nnreal_of_tsum_ne_top hf),
end
lemma tendsto_at_top_zero_of_tsum_ne_top {f : ℕ → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto f at_top (𝓝 0) :=
by { rw ←nat.cofinite_eq_at_top, exact tendsto_cofinite_zero_of_tsum_ne_top hf }
/-- The sum over the complement of a finset tends to `0` when the finset grows to cover the whole
space. This does not need a summability assumption, as otherwise all sums are zero. -/
lemma tendsto_tsum_compl_at_top_zero {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto (λ (s : finset α), ∑' b : {x // x ∉ s}, f b) at_top (𝓝 0) :=
begin
lift f to α → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hf,
convert ennreal.tendsto_coe.2 (nnreal.tendsto_tsum_compl_at_top_zero f),
ext1 s,
rw ennreal.coe_tsum,
exact nnreal.summable_comp_injective (tsum_coe_ne_top_iff_summable.1 hf) subtype.coe_injective
end
protected lemma tsum_apply {ι α : Type*} {f : ι → α → ℝ≥0∞} {x : α} :
(∑' i, f i) x = ∑' i, f i x :=
tsum_apply $ pi.summable.mpr $ λ _, ennreal.summable
lemma tsum_sub {f : ℕ → ℝ≥0∞} {g : ℕ → ℝ≥0∞} (h₁ : ∑' i, g i ≠ ∞) (h₂ : g ≤ f) :
∑' i, (f i - g i) = (∑' i, f i) - (∑' i, g i) :=
begin
have h₃: ∑' i, (f i - g i) = ∑' i, (f i - g i + g i) - ∑' i, g i,
{ rw [ennreal.tsum_add, add_sub_self h₁]},
have h₄:(λ i, (f i - g i) + (g i)) = f,
{ ext n, rw tsub_add_cancel_of_le (h₂ n)},
rw h₄ at h₃, apply h₃,
end
end tsum
lemma tendsto_to_real_iff {ι} {fi : filter ι} {f : ι → ℝ≥0∞} (hf : ∀ i, f i ≠ ∞) {x : ℝ≥0∞}
(hx : x ≠ ∞) :
fi.tendsto (λ n, (f n).to_real) (𝓝 x.to_real) ↔ fi.tendsto f (𝓝 x) :=
begin
refine ⟨λ h, _, λ h, tendsto.comp (ennreal.tendsto_to_real hx) h⟩,
have h_eq : f = (λ n, ennreal.of_real (f n).to_real),
by { ext1 n, rw ennreal.of_real_to_real (hf n), },
rw [h_eq, ← ennreal.of_real_to_real hx],
exact ennreal.tendsto_of_real h,
end
lemma tsum_coe_ne_top_iff_summable_coe {f : α → ℝ≥0} :
∑' a, (f a : ℝ≥0∞) ≠ ∞ ↔ summable (λ a, (f a : ℝ)) :=
begin
rw nnreal.summable_coe,
exact tsum_coe_ne_top_iff_summable,
end
lemma tsum_coe_eq_top_iff_not_summable_coe {f : α → ℝ≥0} :
∑' a, (f a : ℝ≥0∞) = ∞ ↔ ¬ summable (λ a, (f a : ℝ)) :=
begin
rw [← @not_not (∑' a, ↑(f a) = ⊤)],
exact not_congr tsum_coe_ne_top_iff_summable_coe
end
lemma summable_to_real {f : α → ℝ≥0∞} (hsum : ∑' x, f x ≠ ∞) :
summable (λ x, (f x).to_real) :=
begin
lift f to α → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hsum,
rwa ennreal.tsum_coe_ne_top_iff_summable_coe at hsum,
end
end ennreal
namespace nnreal
open_locale nnreal
lemma tsum_eq_to_nnreal_tsum {f : β → ℝ≥0} :
(∑' b, f b) = (∑' b, (f b : ℝ≥0∞)).to_nnreal :=
begin
by_cases h : summable f,
{ rw [← ennreal.coe_tsum h, ennreal.to_nnreal_coe] },
{ have A := tsum_eq_zero_of_not_summable h,
simp only [← ennreal.tsum_coe_ne_top_iff_summable, not_not] at h,
simp only [h, ennreal.top_to_nnreal, A] }
end
/-- Comparison test of convergence of `ℝ≥0`-valued series. -/
lemma exists_le_has_sum_of_le {f g : β → ℝ≥0} {r : ℝ≥0}
(hgf : ∀b, g b ≤ f b) (hfr : has_sum f r) : ∃p≤r, has_sum g p :=
have ∑'b, (g b : ℝ≥0∞) ≤ r,
begin
refine has_sum_le (assume b, _) ennreal.summable.has_sum (ennreal.has_sum_coe.2 hfr),
exact ennreal.coe_le_coe.2 (hgf _)
end,
let ⟨p, eq, hpr⟩ := ennreal.le_coe_iff.1 this in
⟨p, hpr, ennreal.has_sum_coe.1 $ eq ▸ ennreal.summable.has_sum⟩
/-- Comparison test of convergence of `ℝ≥0`-valued series. -/
lemma summable_of_le {f g : β → ℝ≥0} (hgf : ∀b, g b ≤ f b) : summable f → summable g
| ⟨r, hfr⟩ := let ⟨p, _, hp⟩ := exists_le_has_sum_of_le hgf hfr in hp.summable
/-- A series of non-negative real numbers converges to `r` in the sense of `has_sum` if and only if
the sequence of partial sum converges to `r`. -/
lemma has_sum_iff_tendsto_nat {f : ℕ → ℝ≥0} {r : ℝ≥0} :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
rw [← ennreal.has_sum_coe, ennreal.has_sum_iff_tendsto_nat],
simp only [ennreal.coe_finset_sum.symm],
exact ennreal.tendsto_coe
end
lemma not_summable_iff_tendsto_nat_at_top {f : ℕ → ℝ≥0} :
¬ summable f ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
begin
split,
{ intros h,
refine ((tendsto_of_monotone _).resolve_right h).comp _,
exacts [finset.sum_mono_set _, tendsto_finset_range] },
{ rintros hnat ⟨r, hr⟩,
exact not_tendsto_nhds_of_tendsto_at_top hnat _ (has_sum_iff_tendsto_nat.1 hr) }
end
lemma summable_iff_not_tendsto_nat_at_top {f : ℕ → ℝ≥0} :
summable f ↔ ¬ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
by rw [← not_iff_not, not_not, not_summable_iff_tendsto_nat_at_top]
lemma summable_of_sum_range_le {f : ℕ → ℝ≥0} {c : ℝ≥0}
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : summable f :=
begin
apply summable_iff_not_tendsto_nat_at_top.2 (λ H, _),
rcases exists_lt_of_tendsto_at_top H 0 c with ⟨n, -, hn⟩,
exact lt_irrefl _ (hn.trans_le (h n)),
end
lemma tsum_le_of_sum_range_le {f : ℕ → ℝ≥0} {c : ℝ≥0}
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : ∑' n, f n ≤ c :=
le_of_tendsto' (has_sum_iff_tendsto_nat.1 (summable_of_sum_range_le h).has_sum) h
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ≥0} (hf : summable f)
{i : β → α} (hi : function.injective i) : ∑' x, f (i x) ≤ ∑' x, f x :=
tsum_le_tsum_of_inj i hi (λ c hc, zero_le _) (λ b, le_refl _) (summable_comp_injective hf hi) hf
lemma summable_sigma {β : Π x : α, Type*} {f : (Σ x, β x) → ℝ≥0} :
summable f ↔ (∀ x, summable (λ y, f ⟨x, y⟩)) ∧ summable (λ x, ∑' y, f ⟨x, y⟩) :=
begin
split,
{ simp only [← nnreal.summable_coe, nnreal.coe_tsum],
exact λ h, ⟨h.sigma_factor, h.sigma⟩ },
{ rintro ⟨h₁, h₂⟩,
simpa only [← ennreal.tsum_coe_ne_top_iff_summable, ennreal.tsum_sigma', ennreal.coe_tsum, h₁]
using h₂ }
end
lemma indicator_summable {f : α → ℝ≥0} (hf : summable f) (s : set α) :
summable (s.indicator f) :=
begin
refine nnreal.summable_of_le (λ a, le_trans (le_of_eq (s.indicator_apply f a)) _) hf,
split_ifs,
exact le_refl (f a),
exact zero_le_coe,
end
lemma tsum_indicator_ne_zero {f : α → ℝ≥0} (hf : summable f) {s : set α} (h : ∃ a ∈ s, f a ≠ 0) :
∑' x, (s.indicator f) x ≠ 0 :=
λ h', let ⟨a, ha, hap⟩ := h in
hap (trans (set.indicator_apply_eq_self.mpr (absurd ha)).symm
(((tsum_eq_zero_iff (indicator_summable hf s)).1 h') a))
open finset
/-- For `f : ℕ → ℝ≥0`, then `∑' k, f (k + i)` tends to zero. This does not require a summability
assumption on `f`, as otherwise all sums are zero. -/
lemma tendsto_sum_nat_add (f : ℕ → ℝ≥0) : tendsto (λ i, ∑' k, f (k + i)) at_top (𝓝 0) :=
begin
rw ← tendsto_coe,
convert tendsto_sum_nat_add (λ i, (f i : ℝ)),
norm_cast,
end
lemma has_sum_lt {f g : α → ℝ≥0} {sf sg : ℝ≥0} {i : α} (h : ∀ (a : α), f a ≤ g a) (hi : f i < g i)
(hf : has_sum f sf) (hg : has_sum g sg) : sf < sg :=
begin
have A : ∀ (a : α), (f a : ℝ) ≤ g a := λ a, nnreal.coe_le_coe.2 (h a),
have : (sf : ℝ) < sg :=
has_sum_lt A (nnreal.coe_lt_coe.2 hi) (has_sum_coe.2 hf) (has_sum_coe.2 hg),
exact nnreal.coe_lt_coe.1 this
end
@[mono] lemma has_sum_strict_mono
{f g : α → ℝ≥0} {sf sg : ℝ≥0} (hf : has_sum f sf) (hg : has_sum g sg) (h : f < g) : sf < sg :=
let ⟨hle, i, hi⟩ := pi.lt_def.mp h in has_sum_lt hle hi hf hg
lemma tsum_lt_tsum {f g : α → ℝ≥0} {i : α} (h : ∀ (a : α), f a ≤ g a) (hi : f i < g i)
(hg : summable g) : ∑' n, f n < ∑' n, g n :=
has_sum_lt h hi (summable_of_le h hg).has_sum hg.has_sum
@[mono] lemma tsum_strict_mono {f g : α → ℝ≥0} (hg : summable g) (h : f < g) :
∑' n, f n < ∑' n, g n :=
let ⟨hle, i, hi⟩ := pi.lt_def.mp h in tsum_lt_tsum hle hi hg
lemma tsum_pos {g : α → ℝ≥0} (hg : summable g) (i : α) (hi : 0 < g i) :
0 < ∑' b, g b :=
by { rw ← tsum_zero, exact tsum_lt_tsum (λ a, zero_le _) hi hg }
end nnreal
namespace ennreal
lemma tsum_to_real_eq
{f : α → ℝ≥0∞} (hf : ∀ a, f a ≠ ∞) :
(∑' a, f a).to_real = ∑' a, (f a).to_real :=
begin
lift f to α → ℝ≥0 using hf,
have : (∑' (a : α), (f a : ℝ≥0∞)).to_real =
((∑' (a : α), (f a : ℝ≥0∞)).to_nnreal : ℝ≥0∞).to_real,
{ rw [ennreal.coe_to_real], refl },
rw [this, ← nnreal.tsum_eq_to_nnreal_tsum, ennreal.coe_to_real],
exact nnreal.coe_tsum
end
lemma tendsto_sum_nat_add (f : ℕ → ℝ≥0∞) (hf : ∑' i, f i ≠ ∞) :
tendsto (λ i, ∑' k, f (k + i)) at_top (𝓝 0) :=
begin
lift f to ℕ → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hf,
replace hf : summable f := tsum_coe_ne_top_iff_summable.1 hf,
simp only [← ennreal.coe_tsum, nnreal.summable_nat_add _ hf, ← ennreal.coe_zero],
exact_mod_cast nnreal.tendsto_sum_nat_add f
end
end ennreal
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ} (hf : summable f) (hn : ∀ a, 0 ≤ f a)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
begin
lift f to α → ℝ≥0 using hn,
rw nnreal.summable_coe at hf,
simpa only [(∘), ← nnreal.coe_tsum] using nnreal.tsum_comp_le_tsum_of_inj hf hi
end
/-- Comparison test of convergence of series of non-negative real numbers. -/
lemma summable_of_nonneg_of_le {f g : β → ℝ}
(hg : ∀b, 0 ≤ g b) (hgf : ∀b, g b ≤ f b) (hf : summable f) : summable g :=
begin
lift f to β → ℝ≥0 using λ b, (hg b).trans (hgf b),
lift g to β → ℝ≥0 using hg,
rw nnreal.summable_coe at hf ⊢,
exact nnreal.summable_of_le (λ b, nnreal.coe_le_coe.1 (hgf b)) hf
end
/-- A series of non-negative real numbers converges to `r` in the sense of `has_sum` if and only if
the sequence of partial sum converges to `r`. -/
lemma has_sum_iff_tendsto_nat_of_nonneg {f : ℕ → ℝ} (hf : ∀i, 0 ≤ f i) (r : ℝ) :
has_sum f r ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
lift f to ℕ → ℝ≥0 using hf,
simp only [has_sum, ← nnreal.coe_sum, nnreal.tendsto_coe'],
exact exists_congr (λ hr, nnreal.has_sum_iff_tendsto_nat)
end
lemma ennreal.of_real_tsum_of_nonneg {f : α → ℝ} (hf_nonneg : ∀ n, 0 ≤ f n) (hf : summable f) :
ennreal.of_real (∑' n, f n) = ∑' n, ennreal.of_real (f n) :=
by simp_rw [ennreal.of_real, ennreal.tsum_coe_eq (nnreal.has_sum_of_real_of_nonneg hf_nonneg hf)]
lemma not_summable_iff_tendsto_nat_at_top_of_nonneg {f : ℕ → ℝ} (hf : ∀ n, 0 ≤ f n) :
¬ summable f ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
begin
lift f to ℕ → ℝ≥0 using hf,
exact_mod_cast nnreal.not_summable_iff_tendsto_nat_at_top
end
lemma summable_iff_not_tendsto_nat_at_top_of_nonneg {f : ℕ → ℝ} (hf : ∀ n, 0 ≤ f n) :
summable f ↔ ¬ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
by rw [← not_iff_not, not_not, not_summable_iff_tendsto_nat_at_top_of_nonneg hf]
lemma summable_sigma_of_nonneg {β : Π x : α, Type*} {f : (Σ x, β x) → ℝ} (hf : ∀ x, 0 ≤ f x) :
summable f ↔ (∀ x, summable (λ y, f ⟨x, y⟩)) ∧ summable (λ x, ∑' y, f ⟨x, y⟩) :=
by { lift f to (Σ x, β x) → ℝ≥0 using hf, exact_mod_cast nnreal.summable_sigma }
lemma summable_of_sum_le {ι : Type*} {f : ι → ℝ} {c : ℝ} (hf : 0 ≤ f)
(h : ∀ u : finset ι, ∑ x in u, f x ≤ c) :
summable f :=
⟨ ⨆ u : finset ι, ∑ x in u, f x,
tendsto_at_top_csupr (finset.sum_mono_set_of_nonneg hf) ⟨c, λ y ⟨u, hu⟩, hu ▸ h u⟩ ⟩
lemma summable_of_sum_range_le {f : ℕ → ℝ} {c : ℝ} (hf : ∀ n, 0 ≤ f n)
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : summable f :=
begin
apply (summable_iff_not_tendsto_nat_at_top_of_nonneg hf).2 (λ H, _),
rcases exists_lt_of_tendsto_at_top H 0 c with ⟨n, -, hn⟩,
exact lt_irrefl _ (hn.trans_le (h n)),
end
lemma tsum_le_of_sum_range_le {f : ℕ → ℝ} {c : ℝ} (hf : ∀ n, 0 ≤ f n)
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : ∑' n, f n ≤ c :=
le_of_tendsto' ((has_sum_iff_tendsto_nat_of_nonneg hf _).1
(summable_of_sum_range_le hf h).has_sum) h
/-- If a sequence `f` with non-negative terms is dominated by a sequence `g` with summable
series and at least one term of `f` is strictly smaller than the corresponding term in `g`,
then the series of `f` is strictly smaller than the series of `g`. -/
lemma tsum_lt_tsum_of_nonneg {i : ℕ} {f g : ℕ → ℝ}
(h0 : ∀ (b : ℕ), 0 ≤ f b) (h : ∀ (b : ℕ), f b ≤ g b) (hi : f i < g i) (hg : summable g) :
∑' n, f n < ∑' n, g n :=
tsum_lt_tsum h hi (summable_of_nonneg_of_le h0 h hg) hg
section
variables [emetric_space β]
open ennreal filter emetric
/-- In an emetric ball, the distance between points is everywhere finite -/
lemma edist_ne_top_of_mem_ball {a : β} {r : ℝ≥0∞} (x y : ball a r) : edist x.1 y.1 ≠ ⊤ :=
lt_top_iff_ne_top.1 $
calc edist x y ≤ edist a x + edist a y : edist_triangle_left x.1 y.1 a
... < r + r : by rw [edist_comm a x, edist_comm a y]; exact add_lt_add x.2 y.2
... ≤ ⊤ : le_top
/-- Each ball in an extended metric space gives us a metric space, as the edist
is everywhere finite. -/
def metric_space_emetric_ball (a : β) (r : ℝ≥0∞) : metric_space (ball a r) :=
emetric_space.to_metric_space edist_ne_top_of_mem_ball
local attribute [instance] metric_space_emetric_ball
lemma nhds_eq_nhds_emetric_ball (a x : β) (r : ℝ≥0∞) (h : x ∈ ball a r) :
𝓝 x = map (coe : ball a r → β) (𝓝 ⟨x, h⟩) :=
(map_nhds_subtype_coe_eq _ $ is_open.mem_nhds emetric.is_open_ball h).symm
end
section
variable [pseudo_emetric_space α]
open emetric
lemma tendsto_iff_edist_tendsto_0 {l : filter β} {f : β → α} {y : α} :
tendsto f l (𝓝 y) ↔ tendsto (λ x, edist (f x) y) l (𝓝 0) :=
by simp only [emetric.nhds_basis_eball.tendsto_right_iff, emetric.mem_ball,
@tendsto_order ℝ≥0∞ β _ _, forall_prop_of_false ennreal.not_lt_zero, forall_const, true_and]
/-- Yet another metric characterization of Cauchy sequences on integers. This one is often the
most efficient. -/
lemma emetric.cauchy_seq_iff_le_tendsto_0 [nonempty β] [semilattice_sup β] {s : β → α} :
cauchy_seq s ↔ (∃ (b: β → ℝ≥0∞), (∀ n m N : β, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N)
∧ (tendsto b at_top (𝓝 0))) :=
⟨begin
assume hs,
rw emetric.cauchy_seq_iff at hs,
/- `s` is Cauchy sequence. The sequence `b` will be constructed by taking
the supremum of the distances between `s n` and `s m` for `n m ≥ N`-/
let b := λN, Sup ((λ(p : β × β), edist (s p.1) (s p.2))''{p | p.1 ≥ N ∧ p.2 ≥ N}),
--Prove that it bounds the distances of points in the Cauchy sequence
have C : ∀ n m N, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
{ refine λm n N hm hn, le_Sup _,
use (prod.mk m n),
simp only [and_true, eq_self_iff_true, set.mem_set_of_eq],
exact ⟨hm, hn⟩ },
--Prove that it tends to `0`, by using the Cauchy property of `s`
have D : tendsto b at_top (𝓝 0),
{ refine tendsto_order.2 ⟨λa ha, absurd ha (ennreal.not_lt_zero), λε εpos, _⟩,
rcases exists_between εpos with ⟨δ, δpos, δlt⟩,
rcases hs δ δpos with ⟨N, hN⟩,
refine filter.mem_at_top_sets.2 ⟨N, λn hn, _⟩,
have : b n ≤ δ := Sup_le begin
simp only [and_imp, set.mem_image, set.mem_set_of_eq, exists_imp_distrib, prod.exists],
intros d p q hp hq hd,
rw ← hd,
exact le_of_lt (hN p q (le_trans hn hp) (le_trans hn hq))
end,
simpa using lt_of_le_of_lt this δlt },
-- Conclude
exact ⟨b, ⟨C, D⟩⟩
end,
begin
rintros ⟨b, ⟨b_bound, b_lim⟩⟩,
/-b : ℕ → ℝ, b_bound : ∀ (n m N : ℕ), N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
b_lim : tendsto b at_top (𝓝 0)-/
refine emetric.cauchy_seq_iff.2 (λε εpos, _),
have : ∀ᶠ n in at_top, b n < ε := (tendsto_order.1 b_lim ).2 _ εpos,
rcases filter.mem_at_top_sets.1 this with ⟨N, hN⟩,
exact ⟨N, λm n hm hn, calc
edist (s m) (s n) ≤ b N : b_bound m n N hm hn
... < ε : (hN _ (le_refl N)) ⟩
end⟩
lemma continuous_of_le_add_edist {f : α → ℝ≥0∞} (C : ℝ≥0∞)
(hC : C ≠ ⊤) (h : ∀x y, f x ≤ f y + C * edist x y) : continuous f :=
begin
rcases eq_or_ne C 0 with (rfl|C0),
{ simp only [zero_mul, add_zero] at h,
exact continuous_of_const (λ x y, le_antisymm (h _ _) (h _ _)) },
{ refine continuous_iff_continuous_at.2 (λ x, _),
by_cases hx : f x = ∞,
{ have : f =ᶠ[𝓝 x] (λ _, ∞),
{ filter_upwards [emetric.ball_mem_nhds x ennreal.coe_lt_top],
refine λ y (hy : edist y x < ⊤), _, rw edist_comm at hy,
simpa [hx, hC, hy.ne] using h x y },
exact this.continuous_at },
{ refine (ennreal.tendsto_nhds hx).2 (λ ε (ε0 : 0 < ε), _),
filter_upwards [emetric.closed_ball_mem_nhds x (ennreal.div_pos_iff.2 ⟨ε0.ne', hC⟩)],
have hεC : C * (ε / C) = ε := ennreal.mul_div_cancel' C0 hC,
refine λ y (hy : edist y x ≤ ε / C), ⟨tsub_le_iff_right.2 _, _⟩,
{ rw edist_comm at hy,
calc f x ≤ f y + C * edist x y : h x y
... ≤ f y + C * (ε / C) : add_le_add_left (mul_le_mul_left' hy C) (f y)
... = f y + ε : by rw hεC },
{ calc f y ≤ f x + C * edist y x : h y x
... ≤ f x + C * (ε / C) : add_le_add_left (mul_le_mul_left' hy C) (f x)
... = f x + ε : by rw hεC } } }
end
theorem continuous_edist : continuous (λp:α×α, edist p.1 p.2) :=
begin
apply continuous_of_le_add_edist 2 (by norm_num),
rintros ⟨x, y⟩ ⟨x', y'⟩,
calc edist x y ≤ edist x x' + edist x' y' + edist y' y : edist_triangle4 _ _ _ _
... = edist x' y' + (edist x x' + edist y y') : by simp [edist_comm]; cc
... ≤ edist x' y' + (edist (x, y) (x', y') + edist (x, y) (x', y')) :
add_le_add_left (add_le_add (le_max_left _ _) (le_max_right _ _)) _
... = edist x' y' + 2 * edist (x, y) (x', y') : by rw [← mul_two, mul_comm]
end
@[continuity] theorem continuous.edist [topological_space β] {f g : β → α}
(hf : continuous f) (hg : continuous g) : continuous (λb, edist (f b) (g b)) :=
continuous_edist.comp (hf.prod_mk hg : _)
theorem filter.tendsto.edist {f g : β → α} {x : filter β} {a b : α}
(hf : tendsto f x (𝓝 a)) (hg : tendsto g x (𝓝 b)) :
tendsto (λx, edist (f x) (g x)) x (𝓝 (edist a b)) :=
(continuous_edist.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
lemma cauchy_seq_of_edist_le_of_tsum_ne_top {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n) (hd : tsum d ≠ ∞) :
cauchy_seq f :=
begin
lift d to (ℕ → nnreal) using (λ i, ennreal.ne_top_of_tsum_ne_top hd i),
rw ennreal.tsum_coe_ne_top_iff_summable at hd,
exact cauchy_seq_of_edist_le_of_summable d hf hd
end
lemma emetric.is_closed_ball {a : α} {r : ℝ≥0∞} : is_closed (closed_ball a r) :=
is_closed_le (continuous_id.edist continuous_const) continuous_const
@[simp] lemma emetric.diam_closure (s : set α) : diam (closure s) = diam s :=
begin
refine le_antisymm (diam_le $ λ x hx y hy, _) (diam_mono subset_closure),
have : edist x y ∈ closure (Iic (diam s)),
from map_mem_closure2 (@continuous_edist α _) hx hy (λ _ _, edist_le_diam_of_mem),
rwa closure_Iic at this
end
@[simp] lemma metric.diam_closure {α : Type*} [pseudo_metric_space α] (s : set α) :
metric.diam (closure s) = diam s :=
by simp only [metric.diam, emetric.diam_closure]
lemma is_closed_set_of_lipschitz_on_with {α β} [pseudo_emetric_space α] [pseudo_emetric_space β]
(K : ℝ≥0) (s : set α) :
is_closed {f : α → β | lipschitz_on_with K f s} :=
begin
simp only [lipschitz_on_with, set_of_forall],
refine is_closed_bInter (λ x hx, is_closed_bInter $ λ y hy, is_closed_le _ _),
exacts [continuous.edist (continuous_apply x) (continuous_apply y), continuous_const]
end
lemma is_closed_set_of_lipschitz_with {α β} [pseudo_emetric_space α] [pseudo_emetric_space β]
(K : ℝ≥0) :
is_closed {f : α → β | lipschitz_with K f} :=
by simp only [← lipschitz_on_univ, is_closed_set_of_lipschitz_on_with]
namespace real
/-- For a bounded set `s : set ℝ`, its `emetric.diam` is equal to `Sup s - Inf s` reinterpreted as
`ℝ≥0∞`. -/
lemma ediam_eq {s : set ℝ} (h : bounded s) :
emetric.diam s = ennreal.of_real (Sup s - Inf s) :=
begin
rcases eq_empty_or_nonempty s with rfl|hne, { simp },
refine le_antisymm (metric.ediam_le_of_forall_dist_le $ λ x hx y hy, _) _,
{ have := real.subset_Icc_Inf_Sup_of_bounded h,
exact real.dist_le_of_mem_Icc (this hx) (this hy) },
{ apply ennreal.of_real_le_of_le_to_real,
rw [← metric.diam, ← metric.diam_closure],
have h' := real.bounded_iff_bdd_below_bdd_above.1 h,
calc Sup s - Inf s ≤ dist (Sup s) (Inf s) : le_abs_self _
... ≤ diam (closure s) :
dist_le_diam_of_mem h.closure (cSup_mem_closure hne h'.2) (cInf_mem_closure hne h'.1) }
end
/-- For a bounded set `s : set ℝ`, its `metric.diam` is equal to `Sup s - Inf s`. -/
lemma diam_eq {s : set ℝ} (h : bounded s) : metric.diam s = Sup s - Inf s :=
begin
rw [metric.diam, real.ediam_eq h, ennreal.to_real_of_real],
rw real.bounded_iff_bdd_below_bdd_above at h,
exact sub_nonneg.2 (real.Inf_le_Sup s h.1 h.2)
end
@[simp] lemma ediam_Ioo (a b : ℝ) :
emetric.diam (Ioo a b) = ennreal.of_real (b - a) :=
begin
rcases le_or_lt b a with h|h,
{ simp [h] },
{ rw [real.ediam_eq (bounded_Ioo _ _), cSup_Ioo h, cInf_Ioo h] },
end
@[simp] lemma ediam_Icc (a b : ℝ) :
emetric.diam (Icc a b) = ennreal.of_real (b - a) :=
begin
rcases le_or_lt a b with h|h,
{ rw [real.ediam_eq (bounded_Icc _ _), cSup_Icc h, cInf_Icc h] },
{ simp [h, h.le] }
end
@[simp] lemma ediam_Ico (a b : ℝ) :
emetric.diam (Ico a b) = ennreal.of_real (b - a) :=
le_antisymm (ediam_Icc a b ▸ diam_mono Ico_subset_Icc_self)
(ediam_Ioo a b ▸ diam_mono Ioo_subset_Ico_self)
@[simp] lemma ediam_Ioc (a b : ℝ) :
emetric.diam (Ioc a b) = ennreal.of_real (b - a) :=
le_antisymm (ediam_Icc a b ▸ diam_mono Ioc_subset_Icc_self)
(ediam_Ioo a b ▸ diam_mono Ioo_subset_Ioc_self)
end real
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ℝ≥0∞`,
then the distance from `f n` to the limit is bounded by `∑'_{k=n}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) (n : ℕ) :
edist (f n) a ≤ ∑' m, d (n + m) :=
begin
refine le_of_tendsto (tendsto_const_nhds.edist ha)
(mem_at_top_sets.2 ⟨n, λ m hnm, _⟩),
refine le_trans (edist_le_Ico_sum_of_edist_le hnm (λ k _ _, hf k)) _,
rw [finset.sum_Ico_eq_sum_range],
exact sum_le_tsum _ (λ _ _, zero_le _) ennreal.summable
end
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ℝ≥0∞`,
then the distance from `f 0` to the limit is bounded by `∑'_{k=0}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto₀ {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) :
edist (f 0) a ≤ ∑' m, d m :=
by simpa using edist_le_tsum_of_edist_le_of_tendsto d hf ha 0
end --section
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/topology/instances/ennreal.lean"}
|
# primitive collision detection helper
import math
import numpy as np
import basis.data_adapter as da
from panda3d.core import NodePath, CollisionNode, CollisionTraverser, CollisionHandlerQueue, BitMask32
from panda3d.core import CollisionBox, CollisionSphere, CollisionPolygon, GeomVertexReader
def gen_box_cdnp(pdnp, name='cdnp_box', radius=0.01):
"""
:param obstacle:
:return:
author: weiwei
date: 20180811
"""
bottom_left, top_right = pdnp.getTightBounds()
center = (bottom_left + top_right) / 2.0
# enlarge the bounding box
bottom_left -= (bottom_left - center).normalize() * radius
top_right += (top_right - center).normalize() * radius
collision_primitive = CollisionBox(bottom_left, top_right)
collision_node = CollisionNode(name)
collision_node.addSolid(collision_primitive)
return collision_node
def gen_cylindrical_cdnp(pdnp, name='cdnp_cylinder', radius=0.01):
"""
:param trimeshmodel:
:param name:
:param radius:
:return:
author: weiwei
date: 20200108
"""
bottom_left, top_right = pdnp.getTightBounds()
center = (bottom_left + top_right) / 2.0
# enlarge the bounding box
bottomleft_adjustvec = bottom_left - center
bottomleft_adjustvec[2] = 0
bottomleft_adjustvec.normalize()
bottom_left += bottomleft_adjustvec * radius
topright_adjustvec = top_right - center
topright_adjustvec[2] = 0
topright_adjustvec.normalize()
top_right += topright_adjustvec * radius
bottomleft_pos = da.pdv3_to_npv3(bottom_left)
topright_pos = da.pdv3_to_npv3(top_right)
collision_node = CollisionNode(name)
for angle in np.nditer(np.linspace(math.pi / 10, math.pi * 4 / 10, 4)):
ca = math.cos(angle)
sa = math.sin(angle)
new_bottomleft_pos = np.array([bottomleft_pos[0] * ca, bottomleft_pos[1] * sa, bottomleft_pos[2]])
new_topright_pos = np.array([topright_pos[0] * ca, topright_pos[1] * sa, topright_pos[2]])
new_bottomleft = da.npv3_to_pdv3(new_bottomleft_pos)
new_topright = da.npv3_to_pdv3(new_topright_pos)
collision_primitive = CollisionBox(new_bottomleft, new_topright)
collision_node.addSolid(collision_primitive)
return collision_node
def gen_polygons_cdnp(pdnp, name='cdnp_polygons', radius=.01):
"""
:param trimeshmodel:
:param name:
:param radius: TODO
:return:
author: weiwei
date: 20210204
"""
collision_node = CollisionNode(name)
# counter = 0
for geom in pdnp.findAllMatches('**/+GeomNode'):
geom_node = geom.node()
for g in range(geom_node.getNumGeoms()):
geom = geom_node.getGeom(g).decompose()
vdata = geom.getVertexData()
vreader = GeomVertexReader(vdata, 'vertex')
for p in range(geom.getNumPrimitives()):
prim = geom.getPrimitive(p)
for p2 in range(prim.getNumPrimitives()):
s = prim.getPrimitiveStart(p2)
e = prim.getPrimitiveEnd(p2)
v = []
for vi in range(s, e):
vreader.setRow(prim.getVertex(vi))
# TODO expand radius by moving along normal directions
v.append(vreader.getData3f())
col_poly = CollisionPolygon(*v)
collision_node.addSolid(col_poly)
# print("polygon ", counter)
# counter += 1
return collision_node
def gen_surfaceballs_cdnp(objtrm, name='cdnp_surfaceball', radius=0.01):
"""
:param obstacle:
:return:
author: weiwei
date: 20180811
"""
nsample = int(math.ceil(objtrm.area / (radius * 0.3) ** 2))
nsample = 120 if nsample > 120 else nsample # threshhold
samples = objtrm.sample_surface(nsample)
collision_node = CollisionNode(name)
for sglsample in samples:
collision_node.addSolid(CollisionSphere(sglsample[0], sglsample[1], sglsample[2], radius=radius))
return collision_node
def gen_pointcloud_cdnp(objtrm, name='cdnp_pointcloud', radius=0.02):
"""
:param obstacle:
:return:
author: weiwei
date: 20191210
"""
collision_node = CollisionNode(name)
for sglpnt in objtrm.vertices:
collision_node.addSolid(CollisionSphere(sglpnt[0], sglpnt[1], sglpnt[2], radius=radius))
return collision_node
def is_collided(objcm_list0, objcm_list1, toggle_contact_points=False, toggle_plot_cdprimit=False):
"""
detect the collision between collision models
:param: objcm_list0, a single collision model or a list of collision models
:param: objcm_list1
:return: True or False
author: weiwei
date: 20190312osaka, 20201214osaka
"""
if not isinstance(objcm_list0, list):
objcm_list0 = [objcm_list0]
if not isinstance(objcm_list1, list):
objcm_list1 = [objcm_list1]
if toggle_plot_cdprimit:
for one_objcm in objcm_list0:
one_objcm.show_cdprimit()
for one_objcm in objcm_list1:
one_objcm.show_cdprimit()
tmpnp = NodePath("collision nodepath")
ctrav = CollisionTraverser()
chan = CollisionHandlerQueue()
for one_objcm in objcm_list0:
ctrav.addCollider(one_objcm.copy_cdnp_to(tmpnp), chan)
for one_objcm in objcm_list1:
one_objcm.copy_cdnp_to(tmpnp)
ctrav.traverse(tmpnp)
if chan.getNumEntries() > 0:
if toggle_contact_points:
contact_points = [da.pdv3_to_npv3(cd_entry.getSurfacePoint(base.render)) for cd_entry in chan.getEntries()]
return True, contact_points
else:
return True
else:
return False
if __name__ == '__main__':
import os
import time
import basis
import numpy as np
import modeling.collision_model as cm
import modeling.geometric_model as gm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.7, .7, .7], lookat_pos=[0, 0, 0])
objpath = os.path.join(basis.__path__[0], 'objects', 'bunnysim.stl')
objcm = cm.CollisionModel(objpath, cdprimit_type='polygons')
objcm.set_rgba(np.array([.2, .5, 0, 1]))
objcm.set_pos(np.array([.01, .01, .01]))
objcm.attach_to(base)
objcm.show_cdprimit()
objcmlist = []
for i in range(100):
objcmlist.append(cm.CollisionModel(os.path.join(basis.__path__[0], 'objects', 'housing.stl'), cdprimit_type='box'))
objcmlist[-1].set_pos(np.random.random_sample((3,)))
objcmlist[-1].set_rgba(np.array([1, .5, 0, 1]))
objcmlist[-1].attach_to(base)
objcmlist[-1].show_cdprimit()
tic = time.time()
result = is_collided(objcm, objcmlist)
toc = time.time()
time_cost = toc - tic
print(time_cost)
print(result)
# tic = time.time()
# is_cmcmlist_collided2(objcm, objcmlist)
# toc = time.time()
# time_cost = toc-tic
# print(time_cost)
base.run()
# NOTE 20210321, CollisionPolygon into CollisonPolygon detection is not available for 1.19
# :collide(error): Invalid attempt to detect collision from CollisionPolygon!
#
# This means that a CollisionPolygon object was added to a
# CollisionTraverser as if it were a colliding object. However,
# no implementation for this kind of object has yet been defined
# to collide with other objects.
# wd.World(cam_pos=[1.0, 1, .0, 1.0], lookat_pos=[0, 0, 0])
# objpath = os.path.join(basis.__path__[0], 'objects', 'yumifinger.stl')
# objcm1 = cm.CollisionModel(objpath, cdprimitive_type='polygons')
# # homomat = np.array([[-0.5, -0.82363909, 0.2676166, -0.00203699],
# # [-0.86602539, 0.47552824, -0.1545085, 0.01272306],
# # [0., -0.30901703, -0.95105648, 0.12604253],
# # [0., 0., 0., 1.]])
# homomat = np.array([[ 1.00000000e+00, 2.38935501e-16, 3.78436685e-17, -7.49999983e-03],
# [ 2.38935501e-16, -9.51056600e-01, -3.09017003e-01, 2.04893537e-02],
# [-3.78436685e-17, 3.09017003e-01, -9.51056600e-01, 1.22025304e-01],
# [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
# objcm1.set_homomat(homomat)
# objcm1.set_rgba([1, 1, .3, .2])
#
# objpath = os.path.join(basis.__path__[0], 'objects', 'tubebig.stl')
# objcm2 = cm.CollisionModel(objpath, cdprimitive_type='polygons')
# objcm2.set_rgba([1, 1, .3, .2])
# iscollided, contact_points = is_collided(objcm1, objcm2, toggle_contact_points=True)
# objcm1.show_cdmesh()
# objcm2.show_cdmesh()
# objcm1.attach_to(base)
# objcm2.attach_to(base)
# print(iscollided)
# for ct_pnt in contact_points:
# gm.gen_sphere(ct_pnt, radius=.001).attach_to(base)
# base.run()
|
{"hexsha": "e1d4551754b71a8fffdb2514c120cbb640c0ad15", "size": 8860, "ext": "py", "lang": "Python", "max_stars_repo_path": "modeling/_panda_cdhelper.py", "max_stars_repo_name": "liang324/wrs", "max_stars_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-07T04:51:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T04:51:07.000Z", "max_issues_repo_path": "modeling/_panda_cdhelper.py", "max_issues_repo_name": "liang324/wrs", "max_issues_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modeling/_panda_cdhelper.py", "max_forks_repo_name": "liang324/wrs", "max_forks_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1896551724, "max_line_length": 123, "alphanum_fraction": 0.6472911964, "include": true, "reason": "import numpy", "num_tokens": 2555}
|
import Base.show
export show
function show{F}(io::IO, dr::DimRedux{F})
k,n = size(dr.Ξ)
print(io, "$(typeof(dr)): dimension reduction map over the field $F from $n to $k dimensions")
end
function show{F,DR}(io::IO, sk::Sketch{F,DR})
k,n = size(sk.X)
m,k = size(sk.Y)
s,s = size(sk.Z)
print(io, "$(typeof(sk)): sketch for an $m×$n matrix over the field $F using a $DR dimension reduction map")
end
|
{"hexsha": "89cdaec1a17cab29a0c2a21d6e55ff9320c94213", "size": 423, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utilities.jl", "max_stars_repo_name": "udellgroup/LowRankSketch.jl", "max_stars_repo_head_hexsha": "041401ba5fbf1e6ac4e0abc4367b4111e877bcfa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-01T00:48:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T00:48:07.000Z", "max_issues_repo_path": "src/utilities.jl", "max_issues_repo_name": "udellgroup/LowRankSketch.jl", "max_issues_repo_head_hexsha": "041401ba5fbf1e6ac4e0abc4367b4111e877bcfa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utilities.jl", "max_forks_repo_name": "udellgroup/LowRankSketch.jl", "max_forks_repo_head_hexsha": "041401ba5fbf1e6ac4e0abc4367b4111e877bcfa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2, "max_line_length": 112, "alphanum_fraction": 0.6312056738, "num_tokens": 134}
|
import os.path
import time
import re
# Core utilities
import SimpleCV
import random
import pickle
import numpy
import layer
def change_image_format(input_image):
r = numpy.array([input_image[:,:,0]])/255.
g = numpy.array([input_image[:,:,1]])/255.
b = numpy.array([input_image[:,:,2]])/255.
output_image = numpy.append(r,g,axis=0)
output_image = numpy.append(output_image,b,axis=0)
return output_image
class Adam_optimizer:
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8):
self.m_w = 0
self.v_w = 0
self.m_b = 0
self.v_b = 0
self.lr = learning_rate
self.b1 = beta_1
self.b2 = beta_2
self.ep = epsilon
def save_optimizer(self, file_path):
pickle.dump(self, open(file_path, 'w'))
def load_optimizer(self, file_path):
print "Loading Adam_optimizer..."
old_one = pickle.load(open(file_path, 'r'))
self.m_w = old_one.m_w
self.v_w = old_one.v_w
self.m_b = old_one.m_b
self.v_b = old_one.v_b
self.lr = old_one.lr
self.b1 = old_one.b1
self.b2 = old_one.b2
self.ep = old_one.ep
print "Loading completed..."
def calculate_delta(self, gradient_weights, gradient_bias):
self.m_w = self.b1*self.m_w+(1.0-self.b1)*gradient_weights
self.v_w = self.b2*self.v_w+(1.0-self.b2)*numpy.square(gradient_weights)
m_non_biased = self.m_w/(1.0-self.b1)
v_non_biased = self.v_w/(1.0-self.b2)
delta_weights = -(self.lr*m_non_biased)/(numpy.sqrt(v_non_biased)+self.ep)
self.m_b = self.b1*self.m_b+(1.0-self.b1)*gradient_bias
self.v_b = self.b2*self.v_b+(1.0-self.b2)*numpy.square(gradient_bias)
m_non_biased = self.m_b/(1.0-self.b1)
v_non_biased = self.v_b/(1.0-self.b2)
delta_bias = -(self.lr*m_non_biased)/(numpy.sqrt(v_non_biased)+self.ep)
return (delta_weights, delta_bias)
def reset(self):
self.m_w = 0
self.v_w = 0
self.m_b = 0
self.v_b = 0
sample_dir_path = "/cifar/"
#train_list_path = sample_dir_path+"train.list"
labels_path = sample_dir_path+"labels.txt"
network_dir_path = "/network/"
progress_path = network_dir_path+"progress_of_training"
train_list_path = network_dir_path+"train.list"
#train_list_path = network_dir_path+"valid.list"
epoch_path = network_dir_path+"current_epoch"
train_error_path = network_dir_path+"training_error"
layer_1_save_path = network_dir_path+"layer_1"
layer_2_save_path = network_dir_path+"layer_2"
layer_3_save_path = network_dir_path+"layer_3"
optimizer_dir_path = network_dir_path+"optimizer/"
optimizer_1_save_path = optimizer_dir_path+"layer_1_optimizer"
optimizer_2_save_path = optimizer_dir_path+"layer_2_optimizer"
optimizer_3_save_path = optimizer_dir_path+"layer_3_optimizer"
exit_flag = False
is_training = False
epoch_cnt = 1
last_sample_cnt = 0
start_time = time.time()
empty = numpy.array([])
train_list = open(train_list_path).readlines()
labels_list = open(labels_path).readlines()
if os.path.isfile(epoch_path):
epoch_cnt = int(open(epoch_path).readline())
else:
temp_file = open(epoch_path, 'w')
temp_file.write(str(epoch_cnt))
temp_file.close
if os.path.isfile(progress_path):
last_sample_cnt = int(open(progress_path).readline())
is_training = True
numpy_settings = numpy.seterr(all='raise')
###################################################################################################
print "layer 1 initialization..."
input_depth = 3
num_of_filter = 8
field_size = 4
stride = field_size
zero_pad = 0
layer_1 = layer.convolutional_layer(input_depth,num_of_filter,field_size,stride,zero_pad,empty,empty)
optimizer_1 = Adam_optimizer()
if (is_training):
if (os.path.exists(layer_1_save_path)):
print "Loading from ",layer_1_save_path
layer_1.load_layer(layer_1_save_path)
else:
print "Unable to open layer 1 from",layer_1_save_path
if (os.path.exists(optimizer_1_save_path)):
print "Loading from ",optimizer_1_save_path
optimizer_1.load_optimizer(optimizer_1_save_path)
else:
print "Unable to open optimizer 1 from",optimizer_1_save_path
print "################################################################################"
###################################################################################################
print "layer 2 initialization..."
input_depth = num_of_filter
num_of_filter = 16
field_size = 4
stride = field_size
zero_pad = 0
layer_2 = layer.convolutional_layer(input_depth,num_of_filter,field_size,stride,zero_pad,empty,empty)
optimizer_2 = Adam_optimizer()
if (is_training):
if (os.path.exists(layer_2_save_path)):
print "Loading from ",layer_2_save_path
layer_2.load_layer(layer_2_save_path)
else:
print "Unable to open layer 2 from",layer_2_save_path
if (os.path.exists(optimizer_2_save_path)):
print "Loading from ",optimizer_2_save_path
optimizer_2.load_optimizer(optimizer_2_save_path)
else:
print "Unable to open optimizer 2 from",optimizer_2_save_path
print "################################################################################"
###################################################################################################
print "layer 3 initialization..."
input_depth = num_of_filter
num_of_filter = 10
field_size = 2
stride = field_size
zero_pad = 0
layer_3 = layer.convolutional_layer(input_depth,num_of_filter,field_size,stride,zero_pad,empty,empty)
optimizer_3 = Adam_optimizer()
if (is_training):
if (os.path.exists(layer_3_save_path)):
print "Loading from ",layer_3_save_path
layer_3.load_layer(layer_3_save_path)
else:
print "Unable to open layer 3 from",layer_3_save_path
if (os.path.exists(optimizer_3_save_path)):
print "Loading from ",optimizer_3_save_path
optimizer_3.load_optimizer(optimizer_3_save_path)
else:
print "Unable to open optimizer 3 from",optimizer_3_save_path
print "################################################################################"
###################################################################################################
layer_4 = layer.softmax_layer()
###################################################################################################
# Trial
logo = SimpleCV.Image(train_list[0][:-1])
input_image = logo.getNumpy()
input_image = change_image_format(input_image)
numpy.seterr(all='warn')
print "validating layer_1..."
layer_1.check_input_validity(input_image)
output_1 = layer_1.propagate_forward(input_image)
print "layer_1 shape:",layer_1.weights.shape
print "output_1 size:",output_1.shape
print "validating layer_2..."
layer_2.check_input_validity(output_1)
output_2 = layer_2.propagate_forward(output_1)
print "layer_2 shape:",layer_2.weights.shape
print "output_2 size:",output_2.shape
print "validating layer_3..."
layer_3.check_input_validity(output_2)
output_3 = layer_3.propagate_forward(output_2)
print "layer_3 shape:",layer_3.weights.shape
print "output_3 size:",output_3.shape
output_4 = layer_4.propagate_forward(output_3)
###################################################################################################
#batch_size = len(train_list)/100
batch_size = 500
#regularization_magnitude = 1e-5
#dropout = 0.5 # probability of keeping a unit active. higher = less dropout
#decay = 1.0
total_error = 0
min_average_error = 1.0
while (True):
Gw3 = numpy.zeros(layer_3.weights.shape)
Gb3 = numpy.zeros(layer_3.bias.shape)
Gw2 = numpy.zeros(layer_2.weights.shape)
Gb2 = numpy.zeros(layer_2.bias.shape)
Gw1 = numpy.zeros(layer_1.weights.shape)
Gb1 = numpy.zeros(layer_1.bias.shape)
#if (epoch_cnt > 1):
#regularization_magnitude *= numpy.power(decay, epoch_cnt)
#dropout = (dropout/(1.0-decay))*(1.0-numpy.power(decay, epoch_cnt+1))
print
print "Current epoch:",epoch_cnt
#print "Regularization magnitude:",regularization_magnitude
#print "Dropout:",dropout
print "Start training from sample:",last_sample_cnt
for sample_cnt in range(last_sample_cnt, len(train_list)):
train_sample_label = re.split('[,_,.]', train_list[sample_cnt])[1]
cost = numpy.zeros(len(labels_list))
for cnt in range(0, len(labels_list)):
if (labels_list[cnt][:-1] == train_sample_label):
cost[cnt] = 1
break
elif (cnt == len(labels_list)-1):
print "Unknown image:",train_list[sample_cnt][:-1]
exit_flag = True
if (exit_flag):
continue
logo = SimpleCV.Image(train_list[sample_cnt][:-1])
input_image = logo.getNumpy()
input_image = change_image_format(input_image)
output_1 = layer_1.propagate_forward(input_image)
#Md1 = (numpy.random.rand(*output_1.shape)<dropout)/dropout
#output_1 *= Md1
output_2 = layer_2.propagate_forward(output_1)
#Md2 = (numpy.random.rand(*output_2.shape)<dropout)/dropout
#output_2 *= Md2
output_3 = layer_3.propagate_forward(output_2)
output_4 = layer_4.propagate_forward(output_3)
error_0 = output_4-cost
error_1 = layer_4.propagate_backward(error_0)
(error_2, gw3, gb3) = layer_3.propagate_backward(error_1)
#error_2 *= Md2
(error_3, gw2, gb2) = layer_2.propagate_backward(error_2)
#error_3 *= Md1
(error_4, gw1, gb1) = layer_1.propagate_backward(error_3)
Gw3 += gw3
Gb3 += gb3
Gw2 += gw2
Gb2 += gb2
Gw1 += gw1
Gb1 += gb1
total_error += numpy.sum(error_0*error_0)
#if ((sample_cnt+1)%(batch_size*5) == 0):
#time_passed = time.time()-start_time
#hours = int(time_passed)/3600
#minutes = int(time_passed-3600*hours)/60
#print
#print "Computation duration:",hours,"hrs,",minutes,"min,",time_passed%60,"sec"
#print "Counter:",sample_cnt
#print "Label:",train_sample_label
#print "Error sum:",0.5*numpy.sum(numpy.square(error_0))
#print "Error:"
#print error_0
if ((sample_cnt+1)%batch_size == 0):
# regularize weights
#layer_3.regularize_weights(0, regularization_magnitude)
#layer_2.regularize_weights(0, regularization_magnitude)
#layer_1.regularize_weights(0, regularization_magnitude)
# update delta weights
(V_Gw3, V_Gb3) = optimizer_3.calculate_delta(Gw3, Gb3)
(V_Gw2, V_Gb2) = optimizer_2.calculate_delta(Gw2, Gb2)
(V_Gw1, V_Gb1) = optimizer_1.calculate_delta(Gw1, Gb1)
# update the weights
#V_Gw2[:,:-1,:,:] = 0
#V_Gw1[:-1,:,:,:] = 0
#V_Gb1[:-1] = 0
layer_3.update_weights(V_Gw3, V_Gb3)
layer_2.update_weights(V_Gw2, V_Gb2)
layer_1.update_weights(V_Gw1, V_Gb1)
Gw3 = numpy.zeros(layer_3.weights.shape)
Gb3 = numpy.zeros(layer_3.bias.shape)
Gw2 = numpy.zeros(layer_2.weights.shape)
Gb2 = numpy.zeros(layer_2.bias.shape)
Gw1 = numpy.zeros(layer_1.weights.shape)
Gb1 = numpy.zeros(layer_1.bias.shape)
layer_1.save_layer(layer_1_save_path)
layer_2.save_layer(layer_2_save_path)
layer_3.save_layer(layer_3_save_path)
optimizer_1.save_optimizer(optimizer_1_save_path)
optimizer_2.save_optimizer(optimizer_2_save_path)
optimizer_3.save_optimizer(optimizer_3_save_path)
temp_file = open(progress_path, 'w')
temp_file.write(str(sample_cnt+1))
temp_file.close
#print
#print "##############################"
#print "Saving completed..."
#print "##############################"
if ((sample_cnt+1)%(batch_size*5) == 0):
time_passed = time.time()-start_time
hours = int(time_passed)/3600
minutes = int(time_passed-3600*hours)/60
print
print "Computation duration:",hours,"hrs,",minutes,"min,",time_passed%60,"sec"
print "Epoch",epoch_cnt,"progress:",100.0*(sample_cnt+1.0)/len(train_list),"%"
print "layer_1 velocity:",numpy.linalg.norm(V_Gw1.ravel())
print "layer_2 velocity:",numpy.linalg.norm(V_Gw2.ravel())
print "layer_3 velocity:",numpy.linalg.norm(V_Gw3.ravel())
# finish current epoch
#optimizer_1.reset()
#optimizer_2.reset()
#optimizer_3.reset()
optimizer_1.save_optimizer(optimizer_1_save_path)
optimizer_2.save_optimizer(optimizer_2_save_path)
optimizer_3.save_optimizer(optimizer_3_save_path)
layer_1.save_layer(network_dir_path+"epoch_"+str(epoch_cnt)+"_layer_1_completed")
layer_2.save_layer(network_dir_path+"epoch_"+str(epoch_cnt)+"_layer_2_completed")
layer_3.save_layer(network_dir_path+"epoch_"+str(epoch_cnt)+"_layer_3_completed")
if (total_error/len(train_list) < min_average_error):
layer_1.save_layer(network_dir_path+"min_layer_1_completed")
layer_2.save_layer(network_dir_path+"min_layer_2_completed")
layer_3.save_layer(network_dir_path+"min_layer_3_completed")
min_average_error = total_error/len(train_list)
# record training error
temp_file = open(train_error_path, 'a+')
temp_file.write("%s\n" % str(total_error/len(train_list)))
temp_file.close()
last_sample_cnt = 0
temp_file = open(progress_path, 'w')
temp_file.write(str(last_sample_cnt))
temp_file.close
print
print "Average training error:",total_error/len(train_list)
print "##############################"
print "Epoch",epoch_cnt,"completed..."
print "##############################"
# shuffle the train dataset
random.shuffle(train_list)
temp_file = open(train_list_path, 'w')
for item in train_list:
temp_file.writelines("%s" % item)
temp_file.close()
epoch_cnt += 1
temp_file = open(epoch_path, 'w')
temp_file.write(str(epoch_cnt))
temp_file.close
total_error = 0
###################################################################################################
|
{"hexsha": "19f021b7e8b479992886adcb30e23cb5ee70e744", "size": 14364, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "unionsetde/ToyNN", "max_stars_repo_head_hexsha": "57dc642ee3500996514a86a129714d6cc38e0824", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "unionsetde/ToyNN", "max_issues_repo_head_hexsha": "57dc642ee3500996514a86a129714d6cc38e0824", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "unionsetde/ToyNN", "max_forks_repo_head_hexsha": "57dc642ee3500996514a86a129714d6cc38e0824", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9230769231, "max_line_length": 101, "alphanum_fraction": 0.6296992481, "include": true, "reason": "import numpy", "num_tokens": 3607}
|
#include <boost/multi_index/composite_key.hpp>
|
{"hexsha": "2570a53ba663a4d40415c53fc2d1081772265693", "size": 47, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_multi_index_composite_key.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_multi_index_composite_key.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_multi_index_composite_key.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.5, "max_line_length": 46, "alphanum_fraction": 0.829787234, "num_tokens": 11}
|
[STATEMENT]
lemma reduced_row_echelon_form_def':
"reduced_row_echelon_form A =
(
(\<forall>i. is_zero_row i A \<longrightarrow> \<not> (\<exists>j. j>i \<and> \<not> is_zero_row j A)) \<and>
(\<forall>i. \<not> (is_zero_row i A) \<longrightarrow> A $ i $ (LEAST k. A $ i $ k \<noteq> 0) = 1) \<and>
(\<forall>i. i<i+1 \<and> \<not> (is_zero_row i A) \<and> \<not> (is_zero_row (i+1) A) \<longrightarrow> ((LEAST k. A $ i $ k \<noteq> 0) < (LEAST k. A $ (i+1) $ k \<noteq> 0))) \<and>
(\<forall>i. \<not> (is_zero_row i A) \<longrightarrow> (\<forall>j. i \<noteq> j \<longrightarrow> A $ j $ (LEAST k. A $ i $ k \<noteq> 0) = 0))
)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. reduced_row_echelon_form A = ((\<forall>i. is_zero_row i A \<longrightarrow> \<not> (\<exists>j>i. \<not> is_zero_row j A)) \<and> (\<forall>i. \<not> is_zero_row i A \<longrightarrow> A $ i $ (LEAST k. A $ i $ k \<noteq> (0::'a)) = (1::'a)) \<and> (\<forall>i. i < i + (1::'c) \<and> \<not> is_zero_row i A \<and> \<not> is_zero_row (i + (1::'c)) A \<longrightarrow> (LEAST k. A $ i $ k \<noteq> (0::'a)) < (LEAST k. A $ (i + (1::'c)) $ k \<noteq> (0::'a))) \<and> (\<forall>i. \<not> is_zero_row i A \<longrightarrow> (\<forall>j. i \<noteq> j \<longrightarrow> A $ j $ (LEAST k. A $ i $ k \<noteq> (0::'a)) = (0::'a))))
[PROOF STEP]
unfolding reduced_row_echelon_form_def reduced_row_echelon_form_upt_k_def is_zero_row_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<forall>i. is_zero_row_upt_k i (ncols A) A \<longrightarrow> \<not> (\<exists>j>i. \<not> is_zero_row_upt_k j (ncols A) A)) \<and> (\<forall>i. \<not> is_zero_row_upt_k i (ncols A) A \<longrightarrow> A $ i $ (LEAST k. A $ i $ k \<noteq> (0::'a)) = (1::'a)) \<and> (\<forall>i. i < i + (1::'c) \<and> \<not> is_zero_row_upt_k i (ncols A) A \<and> \<not> is_zero_row_upt_k (i + (1::'c)) (ncols A) A \<longrightarrow> (LEAST n. A $ i $ n \<noteq> (0::'a)) < (LEAST n. A $ (i + (1::'c)) $ n \<noteq> (0::'a))) \<and> (\<forall>i. \<not> is_zero_row_upt_k i (ncols A) A \<longrightarrow> (\<forall>j. i \<noteq> j \<longrightarrow> A $ j $ (LEAST n. A $ i $ n \<noteq> (0::'a)) = (0::'a)))) = ((\<forall>i. is_zero_row_upt_k i (ncols A) A \<longrightarrow> \<not> (\<exists>j>i. \<not> is_zero_row_upt_k j (ncols A) A)) \<and> (\<forall>i. \<not> is_zero_row_upt_k i (ncols A) A \<longrightarrow> A $ i $ (LEAST k. A $ i $ k \<noteq> (0::'a)) = (1::'a)) \<and> (\<forall>i. i < i + (1::'c) \<and> \<not> is_zero_row_upt_k i (ncols A) A \<and> \<not> is_zero_row_upt_k (i + (1::'c)) (ncols A) A \<longrightarrow> (LEAST k. A $ i $ k \<noteq> (0::'a)) < (LEAST k. A $ (i + (1::'c)) $ k \<noteq> (0::'a))) \<and> (\<forall>i. \<not> is_zero_row_upt_k i (ncols A) A \<longrightarrow> (\<forall>j. i \<noteq> j \<longrightarrow> A $ j $ (LEAST k. A $ i $ k \<noteq> (0::'a)) = (0::'a))))
[PROOF STEP]
..
|
{"llama_tokens": 1291, "file": "Gauss_Jordan_Rref", "length": 2}
|
[STATEMENT]
lemma remove_const_lv_mondaic_steps:
assumes lv: "lv \<R>" and fresh: "(c, 0) \<notin> funas_rel \<R>"
and mon: "monadic \<F>"
and steps: "(s \<cdot> const_subst c, t \<cdot> const_subst c) \<in> (srstep \<F> \<R>)\<^sup>+"
shows "(s, t) \<in> (srstep \<F> \<R>)\<^sup>+"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (s, t) \<in> (srstep \<F> \<R>)\<^sup>+
[PROOF STEP]
using remove_const_lv_mondaic_steps_rhs[OF lv fresh mon remove_const_lv_mondaic_steps_lhs[OF assms]]
[PROOF STATE]
proof (prove)
using this:
(s, t) \<in> (srstep \<F> \<R>)\<^sup>+
goal (1 subgoal):
1. (s, t) \<in> (srstep \<F> \<R>)\<^sup>+
[PROOF STEP]
by simp
\<comment> \<open>Steps on lv trs\<close>
|
{"llama_tokens": 328, "file": "Rewrite_Properties_Reduction_Rewriting_Rewriting_LLRG_LV_Mondaic", "length": 2}
|
"""
python clean_generate_JSON.py
"""
import fnmatch
import cv2
import json
import numpy as np
import os
import base64
from copy import deepcopy
import imgaug as ia
import imgaug.augmenters as iaa
import imageio
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
import shutil
# import matplotlib.pyplot as plt
import imantics
try:
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
except:
from imgaug.augmentables.segmaps import SegmentationMapOnImage as SegmentationMapsOnImage
ia.seed(1)
LABELLINGS = {"building": 1, "crop": 2, "hills": 3}
CLASSES = list(LABELLINGS.keys())
ROOT_DIR = os.getcwd()
DATA_PATH = os.path.join(ROOT_DIR, "JSONDataFiles")
Masks_Dir = "Temp_Masks"
Images_Dir = "Temp_Images"
Combined_Masks_Dir = "Temp_Combined_Masks"
AugMasksDir = "Temp_Aug_Masks"
# AugImgsDir = "Temp_Aug_Imgs"
TEMPORARY = [Masks_Dir, Images_Dir,
Combined_Masks_Dir, AugMasksDir]
OutputJSONDir = "Final_Output_JSONs_Imgs"
OutputDir = OutputJSONDir
AugImgsDir = OutputJSONDir
def destruct_folders(paths):
for epath in paths:
try:
shutil.rmtree(epath)
except:
pass
destruct_folders(TEMPORARY)
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
print("Created Directory : ", dir)
else:
print("Directory already existed : ", dir)
return dir
def create_folders(paths):
for epath in paths:
create_dir(epath)
DIRS_TO_CREATE = [Masks_Dir, Images_Dir,
Combined_Masks_Dir, AugMasksDir, AugImgsDir, OutputJSONDir]
create_folders(DIRS_TO_CREATE )
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def openJSON(json_path):
with open(json_path) as f:
jsonfile = json.load(f)
return jsonfile
def save_base64_to_img(Base64ImageData, OutputImgPath):
with open(OutputImgPath, "wb") as fh:
fh.write(base64.urlsafe_b64decode(Base64ImageData))
def getFileNameNoExt(fpath):
"""
get filenamee no extension
"""
return os.path.splitext(os.path.basename(fpath))[0]
def saveCombinedClassWiseMasks(ShapesData, ShapeX, ShapeY, filename):
CLASS_MASKS = dict()
mask = np.zeros([ShapeX, ShapeY], dtype=np.uint8)
for each_class in CLASSES:
CLASS_MASKS[each_class] = deepcopy(mask)
for i in range(len(ShapesData)):
current_label = ShapesData[i]["label"]
filler_data = np.array(
[ShapesData[i]["points"]], dtype=np.int32)
print(filler_data.shape)
CLASS_MASKS[current_label] = cv2.fillPoly(
CLASS_MASKS[current_label], filler_data, 255) # 255 or 1
for each_class in CLASSES:
CurrentMaskPath = os.path.join(Combined_Masks_Dir, str(
filename)+str("_")+str(each_class)+".png")
cv2.imwrite(CurrentMaskPath, CLASS_MASKS[each_class])
return True
def getCombinedMaskstoOneMask(filename, Combined_Masks_Dir, ShapeX, ShapeY):
"""
Usage :
big_segmap, ALL_SEGMAPS = getCombinedMaskstoOneMask(filename, Combined_Masks_Dir, ShapeX, ShapeY)
"""
ALL_SEGMAPS = dict()
# mapped segmapconstruct here
big_segmap = np.zeros([ShapeX, ShapeY], dtype=np.uint8)
for each_class in CLASSES:
CurrentMaskPath = os.path.join(Combined_Masks_Dir, str(
filename)+str("_")+str(each_class)+".png")
old_segmap = np.array(imageio.imread(CurrentMaskPath), dtype=bool)
print("Segmap", old_segmap.shape)
ALL_SEGMAPS[each_class] = old_segmap
label_num = LABELLINGS[each_class]
idx_R, idx_C = np.nonzero(old_segmap)
for r, c in zip(idx_R, idx_C):
big_segmap[r][c] = label_num
return big_segmap, ALL_SEGMAPS
def augmentSingle(image, big_segmap, augmentation_sequence):
"""
Usage :
augmented_image , augmented_segmap_mask = augmentSingle(image, big_segmap, augmentation_sequence=seq)
"""
segmap = SegmentationMapsOnImage(big_segmap, shape=image.shape)
# Augment images and segmaps.
images_aug_i, segmaps_aug_i = augmentation_sequence(
image=image, segmentation_maps=segmap)
return images_aug_i, segmaps_aug_i.get_arr()
def breakOneMasktoCombinedMasks(augmented_segmap_mask, ShapeX, ShapeY):
"""
Usage :
AUG_MASKS = breakOneMasktoCombinedMasks(augmented_segmap_mask, ShapeX, ShapeY)
"""
AUG_MASKS = dict()
for each_class in CLASSES:
label_num = LABELLINGS[each_class]
idx_R, idx_C = np.where(augmented_segmap_mask == label_num)
curr_segmap = np.zeros([ShapeX, ShapeY], dtype=bool)
for r, c in zip(idx_R, idx_C):
curr_segmap[r][c] = True
AUG_MASKS[each_class] = curr_segmap
return AUG_MASKS
def writeAugmentedMasksandImage(AUG_MASKS,augmented_image, AugMasksDir,AugImgsDir,filename,counter):
"""
Usage :
writeAugmentedMasksandImage(AUG_MASKS,augmented_image, AugMasksDir,AugImgsDir,filename,counter)
"""
AugImgPath = os.path.join(AugImgsDir, str(
filename)+str("_")+str(counter)+".png")
cv2.imwrite(AugImgPath, augmented_image)
for each_class in CLASSES:
AugMaskPath = os.path.join(AugMasksDir, str(
filename)+str("_")+str(each_class)+str(counter)+".png")
curr_aug_mask = AUG_MASKS[each_class]
cv2.imwrite(AugMaskPath, curr_aug_mask*255)
return True
def getShapesforJSONfromAugmentedMasks(AUG_MASKS,ShapeX, ShapeY):
"""
Usage:
all_shapes,NEW_MASKS = getShapesforJSONfromAugmentedMasks(AUG_MASKS,ShapeX, ShapeY)
"""
NEW_MASKS = dict()
all_shapes = []
for each_class in CLASSES:
CLASS_MASKS = imantics.Polygons.from_mask(
mask=AUG_MASKS[each_class])
CLASS_MASKS_LIST = list(CLASS_MASKS)
# 1, len(pts),2
# np.zeros(segmap.shape)
mask_final = np.zeros((ShapeX, ShapeY))
# cv2.fillPoly( mask_final, BIG_AREA, 255) # 255 or 1
BIG_AREA = []
HUGE_AREA = np.zeros(
(len(CLASS_MASKS_LIST), 1, len(CLASS_MASKS_LIST)//2, 2))
big_counter = 0
for each_small_list in CLASS_MASKS_LIST:
Xs = each_small_list[0::2]
Ys = each_small_list[1::2]
"""
{'flags': {},
'group_id': None,
'label': 'building',
'points': [[0.9525240599620731, 150.9941335024576],
[0.0, 239.2364039955605]],
'shape_type': 'polygon'}
"""
coords = [[x, y] for x, y in zip(Xs, Ys)]
dict_to_add = {'flags': {},
'group_id': None,
'label': each_class,
'points': coords,
'shape_type': 'polygon'}
all_shapes.append(dict_to_add)
# print(len(Xs),len(Ys))
mask = np.zeros((1, len(Xs), 2))
count = 0
for x, y in zip(Xs, Ys):
# print(count)
mask[0][count] = np.array([x, y])
count += 1
# HUGE_AREA[big_counter] = mask
big_counter += 1
# print(mask)
print(mask.shape)
# mask_final = cv2.fillPoly( mask_final, [mask], 255) # 255 or 1
mask_final = cv2.fillPoly(mask_final, np.array(
[mask], dtype=np.int32), 255) # 255 or 1
BIG_AREA.append(mask)
NEW_MASKS[each_class] = mask_final
CLASS_MASKS_ARR = np.array(CLASS_MASKS_LIST,dtype=object)
return all_shapes,NEW_MASKS
if __name__ == '__main__':
json_files = find('*.json', DATA_PATH)
print(len(json_files), "JSON Files Found :")
AnnotatedImgFolder = DATA_PATH
for jsonpath in json_files:
content = openJSON(jsonpath)
filename = getFileNameNoExt(jsonpath)
print("FileName :", filename)
ImgData = content["imageData"]
ImagePath = os.path.join(Images_Dir, str(filename)+".png")
save_base64_to_img(Base64ImageData=ImgData, OutputImgPath=ImagePath)
imgFile = cv2.imread(ImagePath)
ShapeX, ShapeY, ShapeZ = imgFile.shape
CLASS_MASKS = saveCombinedClassWiseMasks(
ShapesData=content["shapes"], ShapeX=ShapeX, ShapeY=ShapeY, filename=filename)
"""
augmentation = iaa.Sometimes(0.5, [
iaa.CLAHE(clip_limit=(1, 10)),
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.GaussianBlur(sigma=(0.0, 5.0))
])
# Define our augmentation pipeline.
seq1 = iaa.Sequential([
iaa.Dropout([0.05, 0.2]), # drop 5% or 20% of all pixels
iaa.Sharpen((0.0, 1.0)), # sharpen the image
# rotate by -45 to 45 degrees (affects segmaps)
iaa.Affine(rotate=(-45, 45)),
# apply water effect (affects segmaps)
iaa.ElasticTransformation(alpha=50, sigma=5)
], random_order=True)
"""
# Define our augmentation pipeline.
seq = iaa.Sequential([
iaa.Sharpen((0.0, 1.0)), # sharpen the image
# rotate by -45 to 45 degrees (affects segmaps)
iaa.Affine(rotate=(-45, 45)),
iaa.CLAHE(clip_limit=(1, 10))
], random_order=True)
image = imageio.imread(ImagePath)
num_augs = 5
"""
Major Changes Here
for each in Temp Combined - for a imagename , maskname label1,label2,label3 -> convert to aug_iamgename, maskname label1,label2,label3
same behaviour required for SegmentationMapsOnImage
"""
"""
imageData : base64
imageHeight
imagePath
imageWidth
shapes
- label : building,
- points : [[x1,y1],[x2,y2] ... [xn,yn]]
"""
for counter in range(num_augs):
big_segmap, ALL_SEGMAPS = getCombinedMaskstoOneMask(filename, Combined_Masks_Dir, ShapeX, ShapeY)
augmented_image , augmented_segmap_mask = augmentSingle(image, big_segmap, augmentation_sequence=seq)
AUG_MASKS = breakOneMasktoCombinedMasks(augmented_segmap_mask, ShapeX, ShapeY)
writeAugmentedMasksandImage(AUG_MASKS,augmented_image, AugMasksDir,AugImgsDir,filename,counter)
all_shapes,NEW_MASKS = getShapesforJSONfromAugmentedMasks(AUG_MASKS,ShapeX, ShapeY)
# OutputJSONDir
OutputJSONPath = os.path.join(OutputJSONDir, str(
filename)+str("_")+str(counter)+".json")
# shutil.copy(jsonpath, OutputJSONPath)
contentnew = openJSON(jsonpath)
CounterImgPath = os.path.join(AugImgsDir, str(
filename)+str("_")+str(counter)+".png")
fh = CounterImgPath
image_opened = open(fh, 'rb')
image_read = image_opened.read()
# encodestring also works aswell as decodestring
image_64_encode = base64.encodebytes(image_read)
# im
# str(image_64_encode)
contentnew["imageData"] = image_64_encode.decode()
contentnew["imageHeight"] = ShapeX
contentnew["imageWidth"] = ShapeY
"""
'..\\all_annt_jpgData_palmdel_512\\PauliRGB (6)_palmdell.jpg'
"""
add_path_before = "..\\all_annt_jpgData_palmdel_512\\"
# AugImgsDir
contentnew["imagePath"] = CounterImgPath
contentnew["shapes"] = all_shapes
print("Content Type :", type(contentnew))
def np_encoder(object):
if isinstance(object, np.generic):
return object.item()
with open(OutputJSONPath, "w") as write_file:
json.dump(contentnew, write_file, default=np_encoder)
# shutil.copy(AugImgsDir,OutputJSONDir)
print("Generation Complete.")
destruct_folders(TEMPORARY)
|
{"hexsha": "836266187a481112cc833492aba820056c49ada8", "size": 12090, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLModel/LabelMeData/clean_generate_JSON.py", "max_stars_repo_name": "ShubhamKrSingh21/ObjectifyManthan", "max_stars_repo_head_hexsha": "e99a5e69b3bc8d0f8ca66aee61a2185f01bc5bd5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-10-31T13:39:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T18:09:01.000Z", "max_issues_repo_path": "MLModel/LabelMeData/clean_generate_JSON.py", "max_issues_repo_name": "ShubhamKrSingh21/ObjectifyManthan", "max_issues_repo_head_hexsha": "e99a5e69b3bc8d0f8ca66aee61a2185f01bc5bd5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLModel/LabelMeData/clean_generate_JSON.py", "max_forks_repo_name": "ShubhamKrSingh21/ObjectifyManthan", "max_forks_repo_head_hexsha": "e99a5e69b3bc8d0f8ca66aee61a2185f01bc5bd5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-31T14:17:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-31T14:17:06.000Z", "avg_line_length": 33.7709497207, "max_line_length": 142, "alphanum_fraction": 0.623573201, "include": true, "reason": "import numpy", "num_tokens": 3145}
|
[STATEMENT]
lemma rel_sv[relator_props]: "single_valued R \<Longrightarrow> single_valued (\<langle>R\<rangle>rel)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. single_valued R \<Longrightarrow> single_valued (\<langle>R\<rangle>rel)
[PROOF STEP]
unfolding rel_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. single_valued R \<Longrightarrow> single_valued (br \<alpha> invar O \<langle>R\<rangle>set_rel)
[PROOF STEP]
by tagged_solver
|
{"llama_tokens": 167, "file": "Collections_ICF_ICF_Autoref", "length": 2}
|
import os
from collections import OrderedDict
import numpy as np
np.set_printoptions(suppress=True)
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from time import time
from copy import copy
class designer():
def __init__(self,ff,weight,method='D'):
'''
input:
------
ff: 2-D array. Rows represent points in the pool; columns represent parameters
involved in the direvative.
weight: 1-D array. Its length equals to the total number of points in the pool,
or the numnber of rows of 'ff'.
method: The criterion used for the optimization, default is D-optimal method.
'''
self.ff = ff
self.m = ff.shape[1] # number of parameters
self.weight = weight
self.N_candidates = np.sum(weight!=0)
self.method = method
self.d = 0 # sensitivity function
self.d_max = 0 # initialize the maximum of sensitivity.
self.id_minimax = None
self.M = 0 # information matrix
self.M_inv = self.M # information matrix inverse
self.psi_iter = [] # all the optimal criteria over the iterative procedure
self.phi_iter = [] # all the sensitivity function ove the iterative procedure
self.weight_iter = []
def cal_criterion(self,local=False):
self.M = 0
for i,f in enumerate(self.ff):
self.M += self.weight[i] * np.outer(f,f)
self.M_inv = np.linalg.inv(self.M)
if self.method == 'D':
self.d = np.array([f @ self.M_inv @ f for f in self.ff])
if local==False:
self.id_minimax = np.argmax(self.d)
self.d_max = self.d[self.id_minimax]
else:
self.id_minimax = np.argmin(np.ma.array(self.d,mask=(self.weight==0)))
def collect(self):
self.psi_iter.append(np.linalg.det(self.M_inv))
self.phi_iter.append(self.d_max)
self.weight_iter.append(self.weight)
def update_design(self, alpha, action='add'):
if action == 'add':
alpha_s = alpha
elif action == 'remove':
p_s = self.weight[self.id_minimax]
alpha_s = -min(alpha, p_s/(1-p_s))
else:
print("Design not updated")
return 1
self.weight = self.weight * (1-alpha_s) # reduce current design by alpha
self.weight[self.id_minimax] += alpha_s # add the new point weighted by alpha
self.weight = self.weight / sum(self.weight) # renormalize weight
return 0
def optimize(self,verbose=False,delta=1e-5,max_steps=1e6,remove=False):
if delta == None:
threshold = 0 # no limit on "d_max"
else:
threshold = self.m / (1-delta)
# the stop condition: either maximum steps or threshold met.
stop = lambda s: s >= max_steps or self.d_max <= threshold
step = 0
self.cal_criterion(local=False)
self.collect()
while not stop(step):
step += 1
alpha = 1 / (1+step+self.N_candidates) # step length
self.cal_criterion(local=False)
if self.update_design(alpha,action='add'):
break
if remove == True:
self.cal_criterion(local=True)
if self.update_design(alpha,action='remove'):
break
self.collect()
if verbose:
print('Iteration steps: {}'.format(step))
print('criterion: {:.3f}'.format(self.m/self.d_max))
def timer():
pass
|
{"hexsha": "8d72df36c7c8355ff60411363a5a9ff93d996ac2", "size": 3596, "ext": "py", "lang": "Python", "max_stars_repo_path": "design.py", "max_stars_repo_name": "zhul9311/optimal-design", "max_stars_repo_head_hexsha": "1bf6d3d1879030a679b23113a22d712abfb6fb4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "design.py", "max_issues_repo_name": "zhul9311/optimal-design", "max_issues_repo_head_hexsha": "1bf6d3d1879030a679b23113a22d712abfb6fb4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "design.py", "max_forks_repo_name": "zhul9311/optimal-design", "max_forks_repo_head_hexsha": "1bf6d3d1879030a679b23113a22d712abfb6fb4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5438596491, "max_line_length": 87, "alphanum_fraction": 0.5873192436, "include": true, "reason": "import numpy", "num_tokens": 818}
|
import os, sys, time
import numpy as np
from psychopy import visual, core, data, logging, event
from .task_base import Task
from .videogame import _onPygletKeyPress, _onPygletKeyRelease, _keyPressBuffer, _keyReleaseBuffer
from ..shared import config, utils
class ButtonPressTask(Task):
BUTTONS = {
'l': [(132,176),(175,176),(196,200),(175,222),(132,222)],
'r': [(250,176),(290,176),(290,222),(250,222),(230,200)],
'u': [(189,162),(189,120),(233,120),(233,162),(211,184)],
'd': [(189,235),(189,276),(232,276),(232,235),(211,215)],
'a': [(648, 200), 25],
'b': [(592, 253), 25],
'x': [(592, 144), 25],
'y': [(538, 200), 25],
}
FINAL_WAIT = 9
DEFAULT_INSTRUCTION = """You will be instructed to press the buttons of the controller for short or long durations."""
def __init__(self, design, run, *args, **kwargs):
super().__init__(*args, **kwargs)
self.design = data.importConditions(design)
self.run_id = run
self.duration = len(self.design)
self._progress_bar_refresh_rate = 3 # 3 flips per trial
def _instructions(self, exp_win, ctl_win):
screen_text = visual.TextStim(
exp_win,
text=self.instruction,
alignText="center",
color="white",
wrapWidth=config.WRAP_WIDTH,
)
for frameN in range(config.FRAME_RATE * config.INSTRUCTION_DURATION):
screen_text.draw(exp_win)
if ctl_win:
screen_text.draw(ctl_win)
yield ()
if self.run_id == 1:
yield True
yield from self._long_instructions(exp_win, ctl_win)
def _long_instructions(self, exp_win, ctl_win):
screen_text = visual.TextStim(
exp_win,
text="""The long bar indicates long keypresses blocks,
you need to time the press and the release to the button that light-up""",
alignText="center",
color="white",
pos=(0,-.75),
wrapWidth=config.WRAP_WIDTH,
)
self._controller_img.draw(exp_win)
screen_text.draw(exp_win)
self._cue['long'].draw(exp_win)
yield True
core.wait(config.INSTRUCTION_DURATION)
screen_text.text = """The dot indicates short keypresses,
You have to press and release immediately the button that light-up."""
self._controller_img.draw(exp_win)
screen_text.draw(exp_win)
self._cue['short'].draw(exp_win)
yield True
core.wait(config.INSTRUCTION_DURATION)
yield True
def _setup(self, exp_win):
self.trials = data.TrialHandler(self.design, 1, method="sequential")
self._controller_img = visual.ImageStim(
exp_win,
image="data/game_ctrlr/ctrlr.png",
size=(800, 481),
units="pixels"
)
if 'lr_condition' in self.design[0]:
self._controller_img.mask = np.ones((1,2))
self._cue = {
'short': visual.Circle(
exp_win, pos=[0,0], radius=25, units="pixels",
lineColor=(255,165,30), fillColor=(255,165,30), colorSpace='rgb255',),
'long': visual.Rect(
exp_win, width=100., height=20, units="pixels",
fillColor=(40,220,120), colorSpace='rgb255',),
}
buttons_aspect = {
'fillColor':(255,160,110),
'colorSpace':'rgb255',
'opacity':.6,
'lineWidth':0
}
self._buttons = {
key: (
visual.ShapeStim(
exp_win,
vertices=[(s - self._controller_img.size/2) * (1,-1) for s in shape],
units="pixels",
**buttons_aspect)
if len(shape)>2
else visual.Circle(
exp_win,
radius=shape[1],
pos=(shape[0] - self._controller_img.size/2) * (1,-1),
units="pixels",
**buttons_aspect))
for key, shape in self.BUTTONS.items()
}
def _set_key_handler(self, exp_win):
# activate repeat keys
exp_win.winHandle.on_key_press = _onPygletKeyPress
exp_win.winHandle.on_key_release = _onPygletKeyRelease
self.pressed_keys = set()
def _unset_key_handler(self, exp_win):
# deactivate custom keys handling
exp_win.winHandle.on_key_press = event._onPygletKey
# del exp_win.winHandle.on_key_release
def _handle_controller_presses(self, exp_win):
exp_win.winHandle.dispatch_events()
global _keyPressBuffer, _keyReleaseBuffer
time_offset = core.getTime() - self.task_timer.getTime()
key_presses = [(k, t-time_offset) for (k,t) in _keyPressBuffer]
key_releases = [(k, t-time_offset) for (k,t) in _keyReleaseBuffer]
_keyReleaseBuffer.clear()
_keyPressBuffer.clear()
return key_presses, key_releases
def _run(self, exp_win, ctl_win):
self._set_key_handler(exp_win)
time_offset = core.getTime() - self.task_timer.getTime()
for trial_n, trial in enumerate(self.trials):
if 'lr_condition' in trial:
self._controller_img.mask.fill(1)
self._controller_img.mask[:,(slice(0,1) if trial['lr_condition'] == 'r' else slice(1,None))] = -.5
self._controller_img.mask = self._controller_img.mask
# draw cue, flip
self._controller_img.draw(exp_win)
self._cue[trial['condition']].draw(exp_win)
yield True
exp_win.logOnFlip(
level=logging.EXP,
msg=f"image: {trial['condition']}",
)
# draw to backbuffer
self._controller_img.draw(exp_win)
self._cue[trial['condition']].draw(exp_win)
self._buttons[trial['key']].draw(exp_win)
utils.wait_until(self.task_timer, trial["onset"] - 1 / config.FRAME_RATE)
# keypresses = event.getKeys(self.RESPONSE_KEYS) # flush response keys
yield True # flip
trial["onset_flip"] = (
self._exp_win_last_flip_time - self._exp_win_first_flip_time
)
self.progress_bar.set_description(
f"Trial {trial_n}:: {trial['condition']} {trial['key']}"
)
# reset
self._controller_img.draw(exp_win)
self._cue[trial['condition']].draw(exp_win)
utils.wait_until(self.task_timer, trial["onset"] + trial["duration"] - 1 / config.FRAME_RATE)
yield True # flip
trial["offset_flip"] = (
self._exp_win_last_flip_time - self._exp_win_first_flip_time
)
utils.wait_until(self.task_timer, trial["offset_flip"] + 1 )
key_pressed, key_released = self._handle_controller_presses(exp_win)
if trial_n == 0:
for k in [
'key_press_time', 'key_press_rt',
'key_release_time', 'key_release_rt',
'key_duration',
'all_keypresses', 'all_keyreleases']:
trial[k] = None
kp_matches = [(k,t) for k,t in key_pressed if k==trial['key']]
kr_matches = [(k,t) for k,t in key_released if k==trial['key']]
if len(kp_matches):
trial['key_press_time'] = kp_matches[0][1]
trial['key_press_rt'] = trial['key_press_time'] - trial['onset_flip']
if len(kr_matches):
trial['key_release_time'] = kr_matches[0][1]
if len(kp_matches):
trial['key_duration'] = trial['key_release_time'] - trial['key_press_time']
# only pertinent for long keypresses, but still computing it for short ones
trial['key_release_rt'] = trial['key_release_time'] - trial['offset_flip']
trial['all_keypresses'] = key_pressed # log to exclude trial with confounded keys
trial['all_keyreleases'] = key_released
self.progress_bar.set_description(
f"Trial {trial_n}:: {trial['condition']} {trial['key']} rt={trial.get('key_press_rt')}"
)
utils.wait_until(self.task_timer, trial["onset"] + trial['duration'] + self.FINAL_WAIT)
self._unset_key_handler(exp_win)
yield True
def _stop(self, exp_win, ctl_win):
self._unset_key_handler(exp_win)
yield True
def _restart(self):
self.trials = data.TrialHandler(self.design, 1, method="sequential")
def _save(self):
self.trials.saveAsWideText(self._generate_unique_filename("events", "tsv"))
return False
|
{"hexsha": "0452c40ee3c1b341e2a4140f2a08e5092c041299", "size": 8875, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tasks/game_controller.py", "max_stars_repo_name": "eddyfortier/task_stimuli", "max_stars_repo_head_hexsha": "b3e0c477775d42b0efa4389531042a80a848fe86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-10T13:21:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T11:37:54.000Z", "max_issues_repo_path": "src/tasks/game_controller.py", "max_issues_repo_name": "eddyfortier/task_stimuli", "max_issues_repo_head_hexsha": "b3e0c477775d42b0efa4389531042a80a848fe86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-11-14T16:41:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T14:55:40.000Z", "max_forks_repo_path": "src/tasks/game_controller.py", "max_forks_repo_name": "eddyfortier/task_stimuli", "max_forks_repo_head_hexsha": "b3e0c477775d42b0efa4389531042a80a848fe86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-08-19T19:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-16T15:45:43.000Z", "avg_line_length": 37.447257384, "max_line_length": 122, "alphanum_fraction": 0.572056338, "include": true, "reason": "import numpy", "num_tokens": 2085}
|
#!/usr/bin/env python
import os.path
import sys
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LinearSegmentedColormap, ColorConverter
from matplotlib import pylab
#from pylab import colorbar, clf, axes, linspace, pcolor, meshgrid, show, axis, title
#from scitools.easyviz.matplotlib_ import colorbar, clf, axes, linspace,\
#pcolor, meshgrid, show, colormap
from numpy import sort
try:
import matplotlib.pyplot as plt
except:
print("Error importing pyplot from matplotlib, please install matplotlib package first...")
sys.tracebacklimit=0
raise Exception("Importing matplotlib failed")
def grid(x, y, z, resX=500, resY=500):
"""
Converts 3 column data to matplotlib grid
"""
from matplotlib.mlab import griddata
#from scipy.interpolate import griddata
xi = np.linspace(min(x), max(x), resX)
yi = np.linspace(min(y), max(y), resY)
# mlab version
Z = griddata(x, y, z, xi, yi, interp='linear')
# scipy version
#Z = griddata((x, y), z, (xi[None,:], yi[:,None]), method='cubic')
X, Y = np.meshgrid(xi, yi)
return X, Y, Z
def clamp(x,xmin,xmax):
if x < xmin:
return xmin
elif x > xmax:
return xmax
else:
return x
def hsl2rgb(h, s, l):
"""
HSL in range 0-1 to rgb in range 0-1 (H-hue, S-saturation, L-lightness)
"""
# see: http://geekymonkey.com/Programming/CSharp/RGB2HSL_HSL2RGB.htm
# default
RGB = [1.0, 1.0, 1.0]
if l <= 0.5:
v = l * (1.0 + s)
else:
v = l + s - l * s
if v > 0:
m = l + l - v
sv = (v - m ) / v
h *= 6.0
sextant = int(h)
fract = h - sextant
vsf = v * sv * fract
mid1 = m + vsf
mid2 = v - vsf
if sextant == 0:
RGB = [v,mid1,m]
elif sextant == 1:
RGB = [mid2,v,m]
elif sextant == 2:
RGB = [m,v,mid1]
elif sextant == 3:
RGB = [m,mid2,v]
elif sextant == 4:
RGB = [mid1,m,v]
elif sextant == 5:
RGB = [v,m,mid2]
return RGB
def make_custom_colormap(type):
"""
arbitrary function based colormap
"""
n = 30
R = []
G = []
B = []
x0 = []
for i in range(n+1):
# range 0 to 1
x = i/(1.0*n)
# RGB triple already:
RGB = [1.0,1.0,1.0]
# rainbow (blue-cyan-green-yellow-orange-red)
if type == 0:
# color function: x in [-1,1]
v = 2.0 * x - 1.0
RGB[0] = clamp(1.5 - abs(2*v - 1),0,1)
RGB[1] = clamp(1.5 - abs(2*v),0,1)
RGB[2] = clamp(1.5 - abs(2*v + 1),0,1)
# hot-to-cold (blue-green-yellow-red)
if type == 1:
# color function: x in [-1,1]
v = 2.0 * x - 1.0
RGB[0] = clamp(2 - abs(2*v - 2),0,1)
RGB[1] = clamp(2 - abs(2*v),0,1)
RGB[2] = clamp(2 - abs(2*v + 2),0,1)
# hsl
if type == 2:
# color function: x in [0,1]
v = x
if v < 0.25:
RGB = hsl2rgb(0.01, v, 0.5)
elif v < 0.5:
RGB = hsl2rgb(0.2, v, 0.5)
elif v < 0.75:
RGB = hsl2rgb(0.3, v, v)
else:
RGB = hsl2rgb(0.12, v, v)
# hot variation
if type == 3:
# color function: x in [0,1]
v = 1 - (1-x)*(1-x)
# red
if v < 0.6:
RGB[0] = v / 0.6
else:
RGB[0] = 1.0
# green
if v < 0.2:
RGB[1] = 0.0
elif v < 0.7:
RGB[1] = (v - 0.2) / 0.5
else:
RGB[1] = 1.0
# blue
if v < 0.4:
RGB[2] = 0.0
elif v < 0.8:
RGB[2] = (v-0.4) / 0.4
else:
RGB[2] = 1.0
# hot
if type == 4:
# color function: x in [0,1]
v = x
#v = 1 - (1-x)*(1-x)
# red
if v < 0.5: # hot would go till 0.36
RGB[0] = 0.0416 + (1.0 - 0.0416) * v / 0.5
else:
RGB[0] = 1.0
# green
if v < 0.36:
RGB[1] = 0.0
elif v < 0.75:
RGB[1] = (v - 0.36) / (0.75 - 0.36)
else:
RGB[1] = 1.0
# blue
if v < 0.75:
RGB[2] = 0.0
elif v < 0.9: # hot would go till 1.0
RGB[2] = (v-0.75) / (0.9 - 0.75)
else:
RGB[2] = 1.0
# YlOrBr
if type == 5:
# color function: x in [0,1]
v = 1 - (1-x)*(1-x)
# red
if v < 0.6:
RGB[0] = v / 0.6
else:
RGB[0] = 1.0
# green
if v < 0.2:
RGB[1] = 0.0
elif v < 0.7:
RGB[1] = (v - 0.2) / 0.5
else:
RGB[1] = 1.0
# blue
if v < 0.4:
RGB[2] = 0.0
elif v < 0.8:
RGB[2] = (v-0.4) / 0.4
else:
RGB[2] = 1.0
# terrain (blue-dark-blue-white-brown-red-yellow-black)
if type == 6:
# color function: x in [0,1]
#v = 1 - (1-x)*(1-x)
v = x
# HSL (hue-saturation-lightness)
# http://www.workwithcolor.com/hsl-color-picker-01.htm?cp=0000FF
# turquise
h0 = 185./360.
s0 = 1.0
l0 = 0.61
# dark blue
h1 = 225./360.
s1 = 0.35
l1 = 0.28
# white (from blue)
h2a = 240./360.
s2a = 1.0
l2a = 1.0
# white
h2b = 20./360.
s2b = 0.2
l2b = 1.0
# brown-green
h3 = 45./360.
s3 = 0.75
l3 = 0.34
# red
h4 = 0.0
s4 = 1.0
l4 = 0.5
# yellow
h5 = 61./360.
s5 = 1.0
l5 = 0.5
# black
h6 = 61./360.
s6 = 0.0
l6 = 0.1
# turquise-blue-white
array1_h = [h0,h1,h2a]
array1_s = [s0,s1,s2a]
array1_l = [l0,l1,l2a]
# white-brown-red-yellow-black
array2_h = [h2b,h3,h4,h5,h6]
array2_s = [s2b,s3,s4,s5,s6]
array2_l = [l2b,l3,l4,l5,l6]
# sets the split value between the 2 color ranges (separated by white)
split = 0.5
for ii in range(2):
h = 0.0
s = 1.0
l = 0.0
# value scale and number of color increments
if ii == 0:
incr = 2
delta = split/incr
v0 = 0.0
array_hsl = [array1_h,array1_s,array1_l]
else:
incr = 4
delta = (1.0 - split)/incr
v0 = split
array_hsl = [array2_h,array2_s,array2_l]
#print("array hsl",array_hsl)
done_color = False
for jj in range(incr):
thr1 = v0 + jj*delta
thr2 = v0 + (jj+1)*delta
if v >= thr1 and v <= thr2:
fac = (v - thr1)/delta
h1 = array_hsl[0][jj]
h2 = array_hsl[0][jj+1]
s1 = array_hsl[1][jj]
s2 = array_hsl[1][jj+1]
l1 = array_hsl[2][jj]
l2 = array_hsl[2][jj+1]
h = h1 + (h2-h1)*fac
s = s1 + (s2-s1)*fac
l = l1 + (l2-l1)*fac
#print("range ",jj,v,fac,h,s,l)
done_color = True
break
if done_color: break
if v == 1.0:
h = 0.0
s = 0.0
l = 0.01
#print("terrain hsl ",i,v,h,s,l)
#print("")
RGB = hsl2rgb(h, s, l)
x0.append(x)
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
cmap_dict = {}
cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]
cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]
cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]
mymap = LinearSegmentedColormap('mymap',cmap_dict)
return mymap
def make_colormap(colors):
"""
Define a new color map based on values specified in the dictionary
colors, where colors[z] is the color that value z should be mapped to,
with linear interpolation between the given values of z.
The z values (dictionary keys) are real numbers and the values
colors[z] can be either an RGB list, e.g. [1,0,0] for red, or an
html hex string, e.g. "#ff0000" for red.
"""
z = sort(colors.keys())
n = len(z)
z1 = min(z)
zn = max(z)
x0 = (z - z1) / (zn - z1)
CC = ColorConverter()
R = []
G = []
B = []
for i in range(n):
#i'th color at level z[i]:
Ci = colors[z[i]]
if type(Ci) == str:
# a hex string of form '#ff0000' for example (for red)
RGB = CC.to_rgb(Ci)
else:
# assume it's an RGB triple already:
RGB = Ci
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
cmap_dict = {}
cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]
cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]
cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]
mymap = LinearSegmentedColormap('mymap',cmap_dict)
return mymap
def plot_kernels(filename,show=False):
"""
plots ASCII kernel file
"""
print "plotting kernel file: ",filename
print ""
data = np.loadtxt(filename)
# checks data
if data.ndim != 2:
print "Error: wrong data dimension for kernel file",data.ndim
sys.tracebacklimit=0
raise Exception("Invalid data dimension")
# checks array
if len(data[1,:]) != 5:
print "data shape : ",data.shape
print "data lengths: ",len(data[:,1]),len(data[1,:])
print "Error: wrong data format for kernel file",data.shape
sys.tracebacklimit=0
raise Exception("Invalid data format")
# splits up data
x = data[:,0]
y = data[:,1]
print "dimensions:"
print " x-range min/max = %f / %f" % (x.min(), x.max())
print " y-range min/max = %f / %f" % (y.min(), y.max())
print ""
#ytmp = y.max()
#y = y.max() - y
#x = x / x.max() * 9000
#x = x.max() - x
z1 = data[:,2] # e.g. rho
z2 = data[:,3] # e.g. alpha
z3 = data[:,4] # e.g. beta
# names like
# rhop_alpha_beta_kernel.dat
# or
# proc000000_rhop_alpha_beta_kernel.dat
name = os.path.basename(file)
name_kernels = str.split(name,"_")
if len(name_kernels) == 4:
kernel1 = 'K_' + name_kernels[0] # rhop
kernel2 = 'K_' + name_kernels[1] # alpha
kernel3 = 'K_' + name_kernels[2] # beta
elif len(name_kernels) == 5:
kernel1 = 'K_' + name_kernels[1]
kernel2 = 'K_' + name_kernels[2]
kernel3 = 'K_' + name_kernels[3]
else:
kernel1 = 'K_1'
kernel2 = 'K_2'
kernel3 = 'K_3'
kernel2 = 'VP'
kernel3 = 'VS'
print "statistics:"
print " %12s : min/max = %e / %e" % (kernel1,z1.min(),z1.max())
print " %12s : min/max = %e / %e" % (kernel2,z2.min(),z2.max())
print " %12s : min/max = %e / %e" % (kernel3,z3.min(),z3.max())
print ""
total_max = (np.concatenate((z2,z3))).max()*0.9
total_min = (np.concatenate((z2,z3))).min()*0.9
total_max = 6000
total_min = 835
print " data max = ",total_max
print " data min = ",total_min
print ""
#total_max = 4500
#total_min = 1000
# setup figure (with 3 subplots)
fig, axes = plt.subplots(nrows=1, ncols=1,figsize=(10,4))
zinterval = (total_max - total_min)/5.0
xmajorLocator = MultipleLocator(2000)
ymajorLocator = MultipleLocator(1000)
zmajorLocator = MultipleLocator(zinterval)
axes.xaxis.set_major_locator(xmajorLocator)
axes.yaxis.set_major_locator(ymajorLocator)
ax = axes
X, Y, Z = grid(x,y,z2)
axes.set_ylabel(kernel2)
#divider = make_axes_locatable(axes)
axes = plt.gca()
axes.invert_yaxis()
#cax = divider.append_axes('right', size='2%', pad=0.05)
colormap = 'jet'
#colormap = 'RdBu'
#colormap = 'coolwarm'
#plt.gca().invert_yaxis()
#plt.style.use('ggplot')
my_colormap = make_colormap({0.0:[0.1,0.0,0.0],
0.2:[0.8,0.0,0.0],
0.3:[1.0,0.7,0.0],
0.42:[0.92,0.92,0.92],
0.5:[0.92,0.92,0.92],
0.58:[0.92,0.92,0.92],
0.7:[0.0,0.6,0.7],
0.8:[0.0,0.0,0.8],
1.0:[0.0,0.0,0.1]})
my_colormap = make_custom_colormap(6)
im = ax.imshow(Z,vmax=total_max, vmin=total_min,
extent=[x.min(), x.max(), y.min(), y.max()],cmap=my_colormap)
# moves plots together
#fig.subplots_adjust(hspace=0)
#plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
cbar = fig.colorbar(im,orientation='vertical',fraction=0.016, pad=0.02)
cbar.set_ticks(zmajorLocator)
# colorbar
#fig.colorbar(im, ax=axes.ravel().tolist())
#fig.colorbar(im, ax=axes.ravel().tolist(),orientation='horizontal')
# show the figure
fname = 'Null-space sampled VP 1 ($D_{misfit} = 1.49e-2$)'
plt.gca().invert_yaxis()
plt.title(fname,fontsize=20)
if show:
plt.figure()
# saves kernel figure as file
dir = os.path.dirname(file)
name_without_ending = str.split(name,".")[0]
outfile = dir + "./"+ "fig8c.png"
fig.savefig(outfile, format="png")
print "*****"
print "plotted file: ",outfile
print "*****"
print ""
def usage():
print "usage: ./plot_kernel.py file [1 == show figure / 0 == just plot file]"
print " where"
print " file - ASCII kernel file, e.g. OUTPUT_FILES/proc000000_rhop_alpha_beta_kernel.dat"
if __name__ == '__main__':
# gets arguments
if len(sys.argv) < 2:
usage()
sys.exit(1)
else:
file = sys.argv[1]
if len(sys.argv) > 2:
show_plot = sys.argv[2]
else:
show_plot = 0
if show_plot == '1':
plot_kernels(file,show=True)
else:
plot_kernels(file)
|
{"hexsha": "862279c7c0efe902ef6a4402c3274469824cac6e", "size": 15739, "ext": "py", "lang": "Python", "max_stars_repo_path": "Visualization/Null-Space Shuttle/-2/plot_kernel_vp.py", "max_stars_repo_name": "qianchengliu0/Uncertainty_Quantification_Marmousi_Example", "max_stars_repo_head_hexsha": "501775f7ad2c6b83fc12ab7e4e113e12cbbb3345", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Visualization/Null-Space Shuttle/-2/plot_kernel_vp.py", "max_issues_repo_name": "qianchengliu0/Uncertainty_Quantification_Marmousi_Example", "max_issues_repo_head_hexsha": "501775f7ad2c6b83fc12ab7e4e113e12cbbb3345", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Visualization/Null-Space Shuttle/-2/plot_kernel_vp.py", "max_forks_repo_name": "qianchengliu0/Uncertainty_Quantification_Marmousi_Example", "max_forks_repo_head_hexsha": "501775f7ad2c6b83fc12ab7e4e113e12cbbb3345", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-15T15:58:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T15:58:04.000Z", "avg_line_length": 29.3091247672, "max_line_length": 101, "alphanum_fraction": 0.4446279942, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 4799}
|
getlibraryfor{T<:Real}(::Type{T}) = SimplePolyhedraLibrary()
type SimplePolyhedraLibrary <: PolyhedraLibrary
end
type SimplePolyhedron{N, T} <: Polyhedron{N, T}
hrep::Nullable{HRepresentation{N, T}}
vrep::Nullable{HRepresentation{N, T}}
end
function polyhedron{N, T}(hrep::HRepresentation{N, T}, ::SimplePolyhedraLibrary)
SimplePolyhedron{N, T}(hrep, nothing)
end
function polyhedron{N, T}(vrep::VRepresentation{N, T}, ::SimplePolyhedraLibrary)
SimplePolyhedron{N, T}(nothing, vrep)
end
function Base.copy{N, T}(p::SimplePolyhedron{N, T})
if !isnull(p.hrep)
SimplePolyhedron{N, T}(get(p.hrep))
else
SimplePolyhedron{N, T}(get(p.vrep))
end
end
function Base.push!{N}(p::SimplePolyhedron{N}, ine::HRepresentation{N})
p.hrep = get(p.hrep) ∩ ine
end
inequalitiesarecomputed(p::SimplePolyhedron) = !isnull(p.hrep)
getinequalities(p::SimplePolyhedron) = get(p.hrep) # TODO copy
generatorsarecomputed(p::SimplePolyhedron) = !isnull(p.vrep)
getgenerators(p::SimplePolyhedron) = get(p.vrep) # TODO copy
|
{"hexsha": "f28780b54c1b6857729286c65e1a206d8ed0039a", "size": 1024, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/simplepolyhedron.jl", "max_stars_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_stars_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/simplepolyhedron.jl", "max_issues_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_issues_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/simplepolyhedron.jl", "max_forks_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_forks_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1176470588, "max_line_length": 80, "alphanum_fraction": 0.736328125, "num_tokens": 349}
|
import os
import numpy as np
import sklearn.metrics as metrics
from sklearn.externals import joblib
from utils.Results import ResultsSingleRun
class BaseOptionsClassifier:
def __init__(self, name, dir_models_base, options_filename_dataset_training, filename_options_clf):
self.name = name;
self.dir_model = self._getDirModel(dir_models_base)
self.filename_options_training_data = options_filename_dataset_training;
self.filename_options_clf = filename_options_clf;
return;
def _getDirModel(self, dir_models_base):
dir_models = dir_models_base + '/' + self.name + '/';
if not os.path.exists(dir_models):
os.makedirs(dir_models);
return dir_models;
def getFilenameOptionsTrainingData(self):
return self.filename_options_training_data;
def getFilenameLearnedFeatures(self, run):
filename = self.dir_model + 'learned_features_' + self.name + '_' + self.filename_options_clf + '_' + self.filename_options_training_data + '_run' + str(run) + '.sav';
return filename;
def getFilenameClf(self, run):
filename = self.dir_model + self.name + '_' + self.filename_options_clf + '_' + self.filename_options_training_data + '_run' + str(run) + '.sav';
return filename;
def getDirModel(self):
return self.dir_model;
def getName(self):
return self.name;
def getFilenameOptions(self):
strOpt = self.name + '_' + self.filename_options_clf;
return strOpt;
class BaseClassifier:
def __init__(self):
self.clf = None;
return;
def _loadClassifier(self, filename_classifier):
clf_model = joblib.load(filename_classifier)
self.clf = clf_model;
def train(self, df, early_readmission_flagname):
print('training data: ' + str(df.shape))
labels = df[early_readmission_flagname].values;
data = df.drop(early_readmission_flagname, axis=1).values;
self.clf.fit(data, labels);
def train_partial(self, df, early_readmission_flagname):
print('training data: ' + str(df.shape))
labels = df[early_readmission_flagname].values;
data = df.drop(early_readmission_flagname, axis=1).values;
self.clf.partial_fit(data, labels, classes=np.unique(labels));
def setResults(self, predictions, labels):
fpr, tpr, thresholds_fprtpr = metrics.roc_curve(labels, predictions[:, 1]);
precision, recall, thresholds_pr = metrics.precision_recall_curve(labels, predictions[:, 1]);
average_precision = metrics.average_precision_score(labels, predictions[:, 1]);
roc_auc = metrics.auc(fpr, tpr);
results = ResultsSingleRun();
results.precision = precision;
results.recall = recall;
results.thresholds_precision_recall = thresholds_pr;
results.average_precision = average_precision;
results.tpr = tpr;
results.fpr = fpr;
results.thresholds_precision_recall = thresholds_fprtpr;
results.roc_auc = roc_auc;
results.calcFMeasure(precision, recall);
return results;
def predict(self, df, early_readmission_flagname):
print('prediction data: ' + str(df.shape))
labels = df[early_readmission_flagname].values;
data = df.drop(early_readmission_flagname, axis=1).values;
predictions = self.clf.predict_proba(data);
results = self.setResults(predictions, labels);
return results;
def _writeNumericListToFile(self, numList, filename):
file = open(filename, 'w');
for num in numList:
file.write(str(num) + '\n');
file.close();
|
{"hexsha": "86f79ae1b13c43525fffd7a6182749ce9f5b06f2", "size": 3695, "ext": "py", "lang": "Python", "max_stars_repo_path": "learning/BaseClassifier.py", "max_stars_repo_name": "ASMDS/PATREC", "max_stars_repo_head_hexsha": "091df6ec20e0736340a2b2ff9a25ac81bec48259", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-09T12:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T12:31:52.000Z", "max_issues_repo_path": "learning/BaseClassifier.py", "max_issues_repo_name": "ASMDS/PATREC", "max_issues_repo_head_hexsha": "091df6ec20e0736340a2b2ff9a25ac81bec48259", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learning/BaseClassifier.py", "max_forks_repo_name": "ASMDS/PATREC", "max_forks_repo_head_hexsha": "091df6ec20e0736340a2b2ff9a25ac81bec48259", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.212962963, "max_line_length": 175, "alphanum_fraction": 0.6755074425, "include": true, "reason": "import numpy", "num_tokens": 802}
|
#!/usr/bin/env python3
#
# Copyright 2019 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Ryan Shim, Gilbert
import os
import tensorflow as tf
import random
import sys
import time
from gazebo_msgs.srv import DeleteEntity
from gazebo_msgs.srv import SpawnEntity
from geometry_msgs.msg import Pose
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile
from rclpy.qos import qos_profile_sensor_data
from std_srvs.srv import Empty
from geometry_msgs.msg import Twist
from pic4rl_msgs.srv import State, Reset, Step
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
import numpy as np
import math
from numpy import savetxt
import cv2
from cv_bridge import CvBridge
from rclpy.qos import QoSProfile
class Pic4rlEnvironment(Node):
def __init__(self):
super().__init__('pic4rl_environment')
# To see debug logs
#rclpy.logging.set_logger_level('omnirob_rl_environment', 10)
"""************************************************************
** Initialise ROS publishers and subscribers
************************************************************"""
qos = QoSProfile(depth=10)
self.cmd_vel_pub = self.create_publisher(
Twist,
'cmd_vel',
qos)
self.Image_sub = self.create_subscription(
Image,
'/camera/depth/image_raw',
self.DEPTH_callback,
qos_profile=qos_profile_sensor_data)
# Initialise client
#self.send_twist = self.create_client(Twist, 'send_twist')
#self.task_succeed_client = self.create_client(Empty, 'task_succeed')
#self.task_fail_client = self.create_client(Empty, 'task_fail')
self.pause_physics_client = self.create_client(Empty, 'pause_physics')
self.unpause_physics_client = self.create_client(Empty, 'unpause_physics')
self.get_state_client = self.create_client(State, 'get_state')
self.new_episode_client = self.create_client(Reset, 'new_episode')
"""##########
State variables
##########"""
self.init_step = True
self.episode_step = 0
self.goal_pos_x = None
self.goal_pos_y = None
self.goal_index = 0
self.previous_twist = None
self.previous_pose = Odometry()
self.previous_pos = Odometry()
self.total_distance = 0
self.goal_distance = 0
self.stage = 1
self.lidar_points = 359
self.cutoff = 5
#self.depth_image = np.zeros((240,320), np.uint8)
self.bridge = CvBridge()
#test variable
self.step_flag = False
self.twist_received = None
"""##########
Environment initialization
##########"""
"""#############
Main functions
#############"""
def render(self):
pass
def step(self, action):
twist = Twist()
twist.linear.x = float(action[0])
#twist.linear.y = float(action[1])
twist.angular.z = float(action[1])
observation, reward, done = self._step(twist)
info = None
return observation, reward, done, info, self.total_distance, self.goal_distance
def _step(self, twist=Twist(), reset_step = False):
#After environment reset sensors data are not instaneously available
#that's why there's the while. A timer could be added to increase robustness
data_received = False
while not data_received:
# Send action
self.send_action(twist)
# Get state
state = self.get_state()
data_received = state.data_received
lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, depth_image = self.process_state(state, reset_step)
# Check events (failure,timeout, success)
done, event = self.check_events(lidar_measurements, goal_distance, self.episode_step)
if not reset_step:
# Get reward
reward = self.get_reward(twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, done, event)
observation = self.get_observation(twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, depth_image)
else:
reward = None
observation = None
self.path_length = 0
# Send observation and reward
self.update_state(twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, done, event)
return observation, reward, done
def reset(self, episode):
#self.destroy_subscription('cmd_vel')
self.episode = episode
req = Reset.Request()
req.goal_pos_x,req.goal_pos_y = self.get_goal(episode)
self.get_logger().info("Environment reset ...")
while not self.new_episode_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
future = self.new_episode_client.call_async(req)
#self.get_logger().debug("Reset env request sent ...")
#rclpy.spin_until_future_complete(self, future,timeout_sec=1.0)
#time_start = time.time()
while rclpy.ok():
rclpy.spin_once(self,timeout_sec=2)
if future.done():
if future.result() is not None:
self.get_logger().debug("Environment reset done")
break
#if time.time() - time_start > 10:
# raise ValueError("In realtà non è un ValueError")
self.get_logger().debug("Performing null step to reset variables")
_,_,_, = self._step(reset_step = True)
observation,_,_, = self._step()
return observation, self.goal_pose_x, self.goal_pose_y
"""#############
Secondary functions (used in main functions)
#############"""
def send_action(self,twist):
self.get_logger().debug("unpausing...")
self.unpause()
self.get_logger().debug("publishing twist...")
self.cmd_vel_pub.publish(twist)
time.sleep(0.1)
self.get_logger().debug("pausing...")
self.pause()
def get_state(self):
self.get_logger().debug("Asking for the state...")
req = State.Request()
future =self.get_state_client.call_async(req)
rclpy.spin_until_future_complete(self, future)
try:
state = future.result()
except Exception as e:
node.get_logger().error('Service call failed %r' % (e,))
self.get_logger().debug("State received ...")
return state
def process_state(self,state, reset_step):
self.episode_step += 1
#from LaserScan msg to 359 len filterd list
lidar_measurements = self.filter_laserscan(state.scan)
#from 359 filtered lidar points to 60 selected lidar points
lidar_measurements = self.process_laserscan(lidar_measurements)
#from Odometry msg to x,y, yaw, distance, angle wrt goal
goal_distance, goal_angle, pos_x, pos_y, yaw = self.process_odom(state.odom, reset_step)
#process Depth Image from sensor msg
depth_image = self.process_depth_image()
return lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, depth_image
def check_events(self, lidar_measurements, goal_distance, step):
min_range = 0.25
if 0.05 <min(lidar_measurements) <= min_range:
# Collision
self.get_logger().info('Collision')
return True, "collision"
if goal_distance < 0.2:
# Goal reached
self.get_logger().info('Goal')
return True, "goal"
if step >= 1000:
#Timeout
self.get_logger().info('Timeout')
return True, "timeout"
return False, "None"
def get_observation(self, twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw,depth_image):
#WITH DEPTH CAMERA
# state_list = []
# goal_dist = goal_distance/self.cutoff
# goal_angle_norm = goal_angle/(math.pi)
# goal_info = np.array([goal_dist, goal_angle_norm], dtype=np.float32)
# goal_info =tf.convert_to_tensor(goal_info)
# state_list.append((goal_info))
# state_list.append(depth_image)
# return state_list
#WITH LIDAR
state_list = []
state_list.append(float(goal_distance))
state_list.append(float(goal_angle))
#state_list.append(float(self.min_obstacle_distance))
#state_list.append(float(self.min_obstacle_angle))
for point in lidar_measurements:
state_list.append(float(point))
#print(point)
state = np.array(state_list,dtype = np.float32)
return state
def get_reward(self,twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, done, event):
reward = 0
if event == "goal":
reward += 100
elif event == "collision":
reward += -50
elif event == "timeout":
reward += -25
self.get_logger().debug(str(reward))
#print("Score:", reward)
return reward
def get_goal(self,episode):
goal_pose_list_easy = [[2.0, -1.5],[1.2, -1.8],[0.2, -2.0], [2.0, 2.0], [0.8, 2.0],
[-1.9, 1.2], [-1.9, -0.5], [-2.0, -2.0]]
goal_pose_list = [[3.0, 2.0], [-3.0, -2.0], [-0.2, 4.0], [-2.0, -4.0], [-4.0, 1.0], [-2.5, -2.5], [2.2, 4.0], [3.5, 4.0], [2.5, -4.4],[4.5,4.5], [-4.2, -4.2],[3.6, 3.6],
[1.0, -4.0], [-1.9, -4.0], [-4.5, -3.0], [-4.1, 4.1], [2.3, 4.2], [-2.4, 4.2], [1.3, -4.2],[-4.4, -1.0], [4.0, 2.5],[-4.5, 0.8],[-0.5, -4.2], [-4.1, 0.0]]
x = goal_pose_list[self.goal_index][0]
y = goal_pose_list[self.goal_index][1]
self.goal_index += 1
print("Goal pose: ", x, y)
self.get_logger().info("New goal")
#self.get_logger().info("New goal: (x,y) : " + str(x) + "," +str(y))
self.goal_pose_x = x
self.goal_pose_y = y
return x,y
def update_state(self,twist,lidar_measurements, goal_distance, goal_angle, pos_x, pos_y, yaw, done, event):
#Here state variables are updated
self.episode_step += 1
self.previous_twist = twist
self.previous_lidar_measurements = lidar_measurements
self.previous_goal_distance = goal_distance
self.previous_goal_angle = goal_angle
self.previous_pos_x = pos_x
self.previous_pos_y = pos_y
self.previous_yaw = yaw
# If done, set flag for resetting everything at next step
if done:
self.init_step = True
self.episode_step = 0
"""#############
Auxiliar functions (used in secondary functions)
#############"""
def pause(self):
req = Empty.Request()
while not self.pause_physics_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
self.pause_physics_client.call_async(req)
def unpause(self):
req = Empty.Request()
while not self.unpause_physics_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
self.unpause_physics_client.call_async(req)
def filter_laserscan(self,laserscan_msg):
# There are some outliers (0 or nan values, they all are set to 0) that will not be passed to the DRL agent
# Correct data:
scan_range = []
# Takes only sensed measurements
for i in range(self.lidar_points):
if laserscan_msg.ranges[i] == float('Inf'):
scan_range.append(3.50)
elif np.isnan(laserscan_msg.ranges[i]):
scan_range.append(0.00)
else:
scan_range.append(laserscan_msg.ranges[i])
return scan_range
def process_laserscan(self,lidar_pointlist):
scan_range_process = []
min_dist_point = 100
# Takes only 60 lidar points
for i in range(self.lidar_points):
if lidar_pointlist[i] < min_dist_point:
min_dist_point = lidar_pointlist[i]
if i % 10 == 0:
scan_range_process.append(min_dist_point)
min_dist_point = 100
#print('selected lidar points:', len(scan_range_process))
self.min_obstacle_distance = min(scan_range_process)
self.min_obstacle_angle = np.argmin(scan_range_process)
return scan_range_process
def DEPTH_callback(self, msg):
depth_image_raw = np.zeros((120,160), np.uint8)
depth_image_raw = self.bridge.imgmsg_to_cv2(msg, '32FC1')
self.depth_image_raw = np.array(depth_image_raw, dtype= np.float32)
#print(self.depth_image_raw.shape)
#savetxt('/home/mauromartini/mauro_ws/depth_images/text_depth_image_raw.csv', depth_image_raw, delimiter=',')
#np.save('/home/maurom/depth_images/depth_image.npy', depth_image_raw)
#cv2.imwrite('/home/mauromartini/mauro_ws/depth_images/d_img_raw.png', self.depth_image_raw)
#@tf.function
def process_depth_image(self):
img = np.array(self.depth_image_raw, dtype= np.float32)
#savetxt('/home/maurom/depth_images/text_depth_image.csv', depth_image, delimiter=',')
#print('image shape: ', depth_image.shape)
#check crop is performed correctly
#img = tf.convert_to_tensor(self.depth_image_raw, dtype=tf.float32)
#img = img.reshape(240,320,1)
img = tf.reshape(img, [120,160,1])
#width =304
#height = 228
#h_off = int((240-height)*0.5)
#w_off = int((320-width)*0.5)
#img_crop = tf.image.crop_to_bounding_box(img,h_off,w_off,height,width)
img_resize = tf.image.resize(img,[60,80])
depth_image = tf.reshape(img_resize, [60,80])
depth_image = np.array(depth_image, dtype= np.float32)
#cv2.imwrite('/home/mauromartini/mauro_ws/depth_images/d_img_res.png', depth_image)
#savetxt('/home/mauromartini/mauro_ws/depth_images/depth_image_60_80.csv', depth_image, delimiter=',')
depth_image = self.depth_rescale(depth_image, self.cutoff)
#print(depth_image.shape)
#cv2.imwrite('/home/mauromartini/mauro_ws/depth_images/d_img_cutoff.png', depth_image)
#savetxt('/home/mauromartini/mauro_ws/depth_images/depth_image_cutoff.csv', depth_image, delimiter=',')
self.image_size = depth_image.shape
return depth_image
def depth_rescale(self,img, cutoff):
#Useful to turn the background into black into the depth images.
w,h = img.shape
#new_img = np.zeros([w,h,3])
img = img.flatten()
img[img>cutoff] = cutoff
img = img.reshape([w,h])
#assert np.max(img) > 0.0
img = img/cutoff
#img_visual = 255*(self.depth_image_raw/cutoff)
img = np.array(img, dtype=np.float32)
#img_visual = np.array(img_visual, dtype=np.uint8)
#img_visual = cv2.equalizeHist(img_visual)
#cv2.imwrite('/home/mauromartini/mauro_ws/depth_images/d_img_cutoff.png', img_visual)
#for i in range(3):
# img[:,:,i] = cv2.equalizeHist(img[:,:,i])
return img
def process_odom(self, odom_msg, reset_step):
if(reset_step):
self.previous_pos_x = odom_msg.pose.pose.position.x
self.previous_pos_y = odom_msg.pose.pose.position.y
self.total_distance = 0
pos_x = odom_msg.pose.pose.position.x
pos_y = odom_msg.pose.pose.position.y
_,_,yaw = self.euler_from_quaternion(odom_msg.pose.pose.orientation)
goal_distance = math.sqrt(
(self.goal_pose_x-pos_x)**2
+ (self.goal_pose_y-pos_y)**2)
path_theta = math.atan2(
self.goal_pose_y-pos_y,
self.goal_pose_x-pos_x)
goal_angle = path_theta - yaw
if goal_angle > math.pi:
goal_angle -= 2 * math.pi
elif goal_angle < -math.pi:
goal_angle += 2 * math.pi
self.goal_distance = goal_distance
self.goal_angle = goal_angle
#print('Goal distance:', self.goal_distance)
d_increment = math.sqrt((pos_x - self.previous_pos_x)**2 + (pos_y - self.previous_pos_y)**2)
self.total_distance = self.total_distance + d_increment
#print("Total distance traveled is ", self.total_distance)
self.previous_pose_x = pos_x
self.previous__pose_y = pos_y
return goal_distance, goal_angle, pos_x, pos_y, yaw
def euler_from_quaternion(self, quat):
"""
Converts quaternion (w in last place) to euler roll, pitch, yaw
quat = [x, y, z, w]
"""
x = quat.x
y = quat.y
z = quat.z
w = quat.w
sinr_cosp = 2 * (w*x + y*z)
cosr_cosp = 1 - 2*(x*x + y*y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2 * (w*y - z*x)
pitch = np.arcsin(sinp)
siny_cosp = 2 * (w*z + x*y)
cosy_cosp = 1 - 2 * (y*y + z*z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def main(args=None):
rclpy.init()
pic4rl_environment = Pic4rlEnvironment()
pic4rl_environment.spin()
pic4rl_environment.get_logger().info('Node spinning ...')
rclpy.spin_once(pic4rl_environment)
pic4rl_environment.destroy()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
{"hexsha": "2d7881c407cbe92a3730cf2ded3c74f3ca1572c4", "size": 15853, "ext": "py", "lang": "Python", "max_stars_repo_path": "pic4rl/pic4rl/trash/pic4rl_env_test.py", "max_stars_repo_name": "PIC4SeRCentre/pic4rl", "max_stars_repo_head_hexsha": "1a1a511042bf332c96750de084d9ac3a302efa12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-08T10:40:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-08T10:40:47.000Z", "max_issues_repo_path": "pic4rl/pic4rl/trash/pic4rl_env_test.py", "max_issues_repo_name": "PIC4SeRCentre/pic4rl", "max_issues_repo_head_hexsha": "1a1a511042bf332c96750de084d9ac3a302efa12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pic4rl/pic4rl/trash/pic4rl_env_test.py", "max_forks_repo_name": "PIC4SeRCentre/pic4rl", "max_forks_repo_head_hexsha": "1a1a511042bf332c96750de084d9ac3a302efa12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2066929134, "max_line_length": 174, "alphanum_fraction": 0.7064278055, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4642}
|
import numpy as np
import tensorflow as tf
class TFGenerator(object):
def __init__(self, data, labels, idxs=None, batch_size=32, shuffle=True, prefetch=4, map_fn=None, one_shot=False, data_dtype=tf.float32, labels_dtype=tf.float32):
"""
Class to create a tf.data.Dataset given a set of data and associated labels.
Use the create() function to return the dataset
:param data: Input data
:param labels: Input labels, can be none
:param idxs: Indices of the data to use, if None all data are used
:param batch_size: Batch size for training / inference
:param shuffle: Whether to shuffle the data each time
:param prefetch: How many batches to prefetch
:param map_fn: Function applied to the data when creating a batch. Must take a tensor as input
:param one_shot: If True, dataset will only iterate through the data once. (Use for validation / inference etc)
"""
self.data = data
self.data_dtype = data_dtype
if isinstance(labels, list):
labels = np.asarray(labels)
self.labels = labels
self.labels_dtype = labels_dtype
self.batch_size = batch_size
self.shuffle = shuffle
self.prefetch = prefetch
self.map_fn = map_fn
self.one_shot = one_shot
if idxs is None:
self.idxs = np.arange(len(data))
else:
self.idxs = idxs.copy()
self.on_epoch_end()
def __len__(self):
if self.one_shot:
return int(np.ceil(len(self.idxs) / self.batch_size))
else:
return int(np.floor(len(self.idxs) / self.batch_size))
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idxs)
def generator(self):
"""
Generates pairs of data and optionally, labels. After all data is processed, the index is randomised
:return: generator of (data[i], labels[i])
"""
i = 0
while i < len(self.data):
idx = self.idxs[i]
if self.labels is None:
yield self.data[idx]
else:
yield (self.data[idx], self.labels[idx])
i += 1
self.on_epoch_end()
def create(self):
"""
Creates the tf.data.Dataset
:return: A tf.data.Dataset that iterates through batches of (data, label) pairs
"""
if self.labels is not None:
ds = tf.data.Dataset.from_generator(self.generator,
output_types=(self.data_dtype, self.labels_dtype),
output_shapes=(self.data[0].shape, self.labels[0].shape))
else:
ds = tf.data.Dataset.from_generator(self.generator,
output_types=self.data_dtype,
output_shapes=self.data[0].shape)
"""
From docs:
Performance can often be improved by setting num_parallel_calls so that map will use multiple threads to process elements.
If deterministic order isn't required, it can also improve performance to set deterministic=False.
Note that the map function has to take a Tensor input
"""
if self.map_fn is not None:
ds = ds.map(lambda x, y: (self.map_fn(x), y), num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.one_shot is False:
ds = ds.repeat()
ds = ds.batch(self.batch_size).prefetch(self.prefetch)
return ds
@staticmethod
def map_fn_divide_255(t):
t = tf.cast(t, tf.float32)
return tf.divide(t, 255.0)
if __name__ == "__main__":
"""
Tests
"""
a = np.asarray(range(100))
b = np.tile(np.asarray([0, 1]), 50)
ds = TFGenerator(a, b, batch_size=9, shuffle=True)
dg = ds.create()
print("Continuous, batches per epoch = {}".format(len(ds)))
di = iter(dg)
for i in range(12):
print(next(di)[0].numpy())
ds.one_shot = True
print("Single epoch, batches per epoch = {}".format(len(ds)))
dg = ds.create()
di = iter(dg)
for i in range(12):
print(next(di)[0].numpy())
print("Single epoch (2nd run), batches per epoch = {}".format(len(ds)))
dg = ds.create()
di = iter(dg)
for i in range(12):
print(next(di)[0].numpy())
|
{"hexsha": "acbb65688920776ac0fec75aafaf40257338ca89", "size": 4440, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_generator.py", "max_stars_repo_name": "geometrikal/IIC-1", "max_stars_repo_head_hexsha": "6b337670d58fcb5c34b6ec34236ea2ed472e1d92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf_generator.py", "max_issues_repo_name": "geometrikal/IIC-1", "max_issues_repo_head_hexsha": "6b337670d58fcb5c34b6ec34236ea2ed472e1d92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf_generator.py", "max_forks_repo_name": "geometrikal/IIC-1", "max_forks_repo_head_hexsha": "6b337670d58fcb5c34b6ec34236ea2ed472e1d92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0, "max_line_length": 166, "alphanum_fraction": 0.581981982, "include": true, "reason": "import numpy", "num_tokens": 994}
|
import numpy as np
import pandas as pd
STDoSD = lambda std, n: std/(n**(1/2))
#print(STDoSD(1.36,50))
UCL_nsig = lambda x, nsig, std, n: x + nsig*(STDoSD(std,n))
LCL_nsig = lambda x, nsig, std, n: x - nsig*(STDoSD(std,n))
#print(UCL_nsig(420, 3, 30, 25))
#print(LCL_nsig(420, 3, 30, 25))
UCL_x = lambda MoSM, MF, ARoS: MoSM + (MF*ARoS)
UCL_R = lambda aR, ARoS: aR*ARoS
LCL_x = lambda MoSM, MF, ARoS: MoSM - (MF*ARoS)
LCL_xMax = lambda MoSM, MF, ARoS: max([0,LCL_x(MoSM, MF, ARoS)])
LCL_R = lambda aR, ARoS: max([0, UCL_R(aR,ARoS)])
MoSM = 4
MF = 0.1
ARoS = 3
UR = 4.1
LR = 3.9
#print(MoSM)
#print(MoSM+3*(MoSM**0.5))
#print(MoSM-3*(MoSM**0.5))
#print(UCL_x(MoSM,MF,ARoS))
#print(LCL_x(MoSM,MF,ARoS))
#print(UCL_R(UR,ARoS))
#print(LCL_R(LR,ARoS))
#print(ARoS)
#print(UCL_x(MoSM,MF,ARoS))
#print(UCL_R(UR, ARoS))
#print(LCL_xMax(MoSM,MF,ARoS))
#print(LCL_R(LR, ARoS))
MFdefp = lambda Dlst, Ssz, n:(sum(Dlst)/(Ssz*n))
STDoSDp = lambda p, n: (p*(1-p)/n)**0.5
pm = MFdefp([6,5,6,4,3,4,5,3,6,3,7,5,4,3,4,5,6,5,4,3,7],100,21)
z = 3
Op = STDoSDp(pm,100)
print(pm)
print(Op)
print(pm+(z*Op))
print(pm-(z*Op))
|
{"hexsha": "83ce2cab421be2e7eb244d2de2164642b60aada1", "size": 1107, "ext": "py", "lang": "Python", "max_stars_repo_path": "OM300Ch6.py", "max_stars_repo_name": "JoshChima/OM300_Mastered", "max_stars_repo_head_hexsha": "db17c8ca1eb1045b8b96fde34f193767d038629f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OM300Ch6.py", "max_issues_repo_name": "JoshChima/OM300_Mastered", "max_issues_repo_head_hexsha": "db17c8ca1eb1045b8b96fde34f193767d038629f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OM300Ch6.py", "max_forks_repo_name": "JoshChima/OM300_Mastered", "max_forks_repo_head_hexsha": "db17c8ca1eb1045b8b96fde34f193767d038629f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5531914894, "max_line_length": 64, "alphanum_fraction": 0.6341463415, "include": true, "reason": "import numpy", "num_tokens": 532}
|
[STATEMENT]
lemma not_cong_is_anga1:
assumes "\<not> A B C CongA A' B' C'" and
"A B C AngAcute a"
shows "\<not> A' B' C' AngAcute a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> A' B' C' AngAcute a
[PROOF STEP]
using assms(1) assms(2) is_anga_conga
[PROOF STATE]
proof (prove)
using this:
\<not> A B C CongA A' B' C'
A B C AngAcute a
\<lbrakk>?A ?B ?C AngAcute ?a; ?A' ?B' ?C' AngAcute ?a\<rbrakk> \<Longrightarrow> ?A ?B ?C CongA ?A' ?B' ?C'
goal (1 subgoal):
1. \<not> A' B' C' AngAcute a
[PROOF STEP]
by auto
|
{"llama_tokens": 257, "file": "IsaGeoCoq_Tarski_Neutral", "length": 2}
|
import numpy as np
from sklearn.linear_model import LinearRegression
from pyuplift import BaseModel
class Cadit(BaseModel):
"""The class which implements the cadit approach [1].
+----------------+-----------------------------------------------------------------------------------+
| **Parameters** | | **model : object, optional (default=sklearn.linear_model.LinearRegression)** |
| | | The regression model which will be used for predict uplift. |
+----------------+-----------------------------------------------------------------------------------+
*******
Methods
*******
+-------------------------------------------------+--------------------------------------------------+
| :ref:`fit(self, X, y, t) <cadit_fit>` | Build a model from the training set (X, y, t). |
+-------------------------------------------------+--------------------------------------------------+
| :ref:`predict(self, X, t=None) <cadit_predict>` | Predict an uplift for X. |
+-------------------------------------------------+--------------------------------------------------+
"""
def __init__(self, model=LinearRegression()):
try:
model.__getattribute__('fit')
model.__getattribute__('predict')
except AttributeError:
raise ValueError('Model should contains two methods: fit and predict.')
self.model = model
def fit(self, X, y, t):
"""Build a model from the training set (X, y, t).
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **y: numpy array with shape = [n_samples,]** |
| | | Array of target of feature. |
| | | **t: numpy array with shape = [n_samples,]** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | **self : object** |
+------------------+---------------------------------------------------------------------------------+
"""
z = self.__get_z_values(y, t)
self.model.fit(X, z)
return self
def predict(self, X, t=None):
"""Predict an uplift for X.
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **t: numpy array with shape = [n_samples,] or None** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | | **self : object** |
| | | The predicted values. |
+------------------+---------------------------------------------------------------------------------+
"""
return self.model.predict(X)
def __get_z_values(self, y, t):
p_t0 = t[t == 0].shape[0] / t.shape[0]
p_t1 = 1 - p_t0
y_mean = y.mean()
z = []
for i in range(y.shape[0]):
if t[i] == 0:
val = (1/p_t1) * (y[i] - y_mean)
else:
val = - (1/p_t0) * (y[i] - y_mean)
z.append(val)
return np.array(z)
|
{"hexsha": "f53fb24ff645da5051da99b9b66355b89f81b555", "size": 4289, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyuplift/variable_selection/cadit.py", "max_stars_repo_name": "duketemon/pyuplift", "max_stars_repo_head_hexsha": "33daa0768ff333387cb8223ebfaedaffa57de335", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2019-02-24T07:41:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T05:07:26.000Z", "max_issues_repo_path": "pyuplift/variable_selection/cadit.py", "max_issues_repo_name": "duketemon/pyuplift", "max_issues_repo_head_hexsha": "33daa0768ff333387cb8223ebfaedaffa57de335", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-03-17T07:57:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-02T19:55:49.000Z", "max_forks_repo_path": "pyuplift/variable_selection/cadit.py", "max_forks_repo_name": "duketemon/pyuplift", "max_forks_repo_head_hexsha": "33daa0768ff333387cb8223ebfaedaffa57de335", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-07-17T12:36:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-16T11:36:35.000Z", "avg_line_length": 53.6125, "max_line_length": 110, "alphanum_fraction": 0.2786197249, "include": true, "reason": "import numpy", "num_tokens": 716}
|
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
from future.utils import raise_from
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from . import augmentation_pipelines
class CocoDataset(Dataset):
"""Coco dataset."""
def __init__(self, root_dir, set_name='train2017', transform=None):
"""
Args:
root_dir (string): COCO directory.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.set_name = set_name
self.transform = transform
self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.root_dir, 'images', self.set_name, image_info['file_name'])
img = skimage.io.imread(path)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32) / 255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def num_classes(self):
return 80
class CSVDataset(Dataset):
"""CSV dataset."""
def __init__(self, train_file, class_list, transform=None, augment=0, pipeline=None, blacken=False, keep_name=True):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.augment = augment
self.pipeline = pipeline
self.train_file = train_file
self.class_list = class_list
self.transform = transform
self.blacken = blacken
self.keep_name = keep_name
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def get(self, idx):
annot = self.load_annotations(idx)
img = self.load_image(idx, annot)
return img
def getfilenameindex(self, filename):
if filename in self.image_names:
index = self.image_names.index(filename)
return index
#sample = dataset_train[index]
#return sample
def __getitem__(self, idx):
annot = self.load_annotations(idx)
img = self.load_image(idx, annot)
filename = self.image_names[idx]
# <<<<<<< Updated upstream
# if self.augment:
# if random.uniform(0, 1) < 0.25:
# bbs = convert_bounding_boxes(img.shape, annot)
# img, bbs = seq(image=img, bounding_boxes=bbs)
# annot = revert_bounding_boxes(bbs)
# =======
img = (img * 255.0).astype(np.uint8) # Added
aug = 0
if random.uniform(0, 1) < self.augment:
aug = 1
#bbs = convert_bounding_boxes(img.shape, annot)
#img, bbs = seq(image=img, bounding_boxes=bbs)
#annot = revert_bounding_boxes(bbs)
pipeline = getattr(augmentation_pipelines, self.pipeline)
bbs = convert_bounding_boxes(img.shape, annot)
img, bbs = pipeline(image=img, bounding_boxes=bbs)
annot = revert_bounding_boxes(bbs)
img = img.astype(np.float32) / 255.0 # Added
# >>>>>>> Stashed changes
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
if self.keep_name:
sample['filename'] = filename
sample['augmented'] = aug
sample['index'] = idx
return sample
def load_image(self, image_index, annot=None):
img = skimage.io.imread(self.image_names[image_index])
if annot is not None and self.blacken:
img = blacken(img, annot)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32) / 255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
if (x2 - x1) < 1 or (y2 - y1) < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)),
None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
class ContextDataset(CSVDataset):
def __init__(self, train_file, class_list, memory=None, transform=None, augment=False, blacken=False, keep_name=False, aug_rate=0.3):
super().__init__(train_file, class_list, transform=transform, augment=augment, blacken=blacken, keep_name=keep_name)
self.context = [] # (img, annotation) tuples
self.learned_context = [] # (img, annotation) tuples
self.init_frame = []
self.augment_rate = aug_rate
if memory is None:
self.memory = len(self.image_names)
else:
self.memory = memory
def __len__(self):
return len(self.image_names) + len(self.learned_context) + len(self.context) + len(self.init_frame)
def __getitem__(self, idx):
if idx < len(self.image_names):
return super().__getitem__(idx)
idx -= len(self.image_names)
if idx < len(self.learned_context):
img, annot = self.learned_context[idx]
else:
idx -= len(self.learned_context)
if len(self.context) > 0 and idx < len(self.context):
img, annot = self.context[idx]
else:
img, annot = self.init_frame[0]
img = img.copy().astype(np.float32) / 255.0
annot = annot.copy()
if self.augment:
if random.uniform(0, 1) < self.augment_rate:
bbs = convert_bounding_boxes(img.shape, annot)
img, bbs = seq(image=img, bounding_boxes=bbs)
annot = revert_bounding_boxes(bbs)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def context_len(self):
return len(self.context)
def add_context(self, new_context, remember_rate=0.1):
valid_context = []
for img, bbs in new_context:
if img is not None and bbs is not None:
valid_context.append((img, bbs))
# We don't want to remember every frame - could get overwhelming
if random.uniform(0, 1) < remember_rate:
self.learned_context += self.context
self.context = valid_context
# Make sure our learned context doesn't overwhelm our original dataset
if len(self.learned_context) > self.memory:
self.learned_context = random.sample(self.learned_context, self.memory // 2)
def set_init_frame(self, first_frame):
self.learned_context += self.init_frame
self.init_frame = first_frame
class InferenceDataset(Dataset):
def __init__(self, images, transform=None):
"""
A dataset used for inference that can be quickly loaded from memory, and compatable with retinanet
images: List of images
"""
self.images = images
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx].copy().astype(np.float32) / 255.0
sample = {'img': image, 'annot': np.zeros((0,5))}
if self.transform:
sample = self.transform(sample)
return sample
def collater(data):
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
scales = [s['scale'] for s in data]
pad_ws = [s['pad_w'] for s in data]
pad_hs = [s['pad_h'] for s in data]
filenames = [s['filename'] for s in data if 'filename' in s]
augflags = [s['augmented'] for s in data] # Added
idxs = [s['index'] for s in data] # Added
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
max_num_annots = max(annot.shape[0] for annot in annots)
if max_num_annots > 0:
annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots):
# print(annot.shape)
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = torch.ones((len(annots), 1, 5)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
result = {'img': padded_imgs, 'annot': annot_padded, 'scale': scales, 'pad_w': pad_ws, 'pad_h': pad_hs}
if len(filenames) > 0:
result['filename'] = filenames
result['augmented'] = augflags # Added
result['index'] = idxs # Added
return result
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
#def __call__(self, sample, min_side=608-4*32, max_side=1024-4*32):
def __call__(self, sample, min_side=416-3*32, max_side=1056):
image, annots = sample['img'], sample['annot']
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
annots[:, :4] *= scale
result = {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale, 'pad_w': pad_w, 'pad_h': pad_h}
if 'filename' in sample:
result['filename'] = sample['filename']
return result
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5, gauss=0.3):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample['img'] = image
sample['annot'] = annots
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
result = {'img': ((image.astype(np.float32) - self.mean) / self.std), 'annot': annots}
if 'filename' in sample:
result['filename'] = sample['filename']
return result
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
tens = tensor.clone()
for t, m, s in zip(tens, self.mean, self.std):
t.mul_(s).add_(m)
return tens
class ContextSampler(Sampler):
def __init__(self, data_source, batch_size=1, crystalize=False):
self.data_source = data_source
self.batch_size = batch_size
self.crystalize = crystalize
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
return len(self.data_source) - len(self.data_source.context) - len(self.data_source.init_frame)
def group_images(self):
if self.crystalize: # We no longer infer new bounding boxes, rather train exclusively on our learned memory
order = list(range(len(self.data_source.learned_context)))
random.shuffle(order)
return [[order[x % len(order)] + len(self.data_source.image_names) for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
c_len = self.data_source.context_len()
if self.data_source.context_len() == 0:
return [[i] for i in range(len(self))]
groups = []
for i in range(len(self)):
group = [i % len(self.data_source.image_names), len(self) + i % c_len, len(self) + (i + 1) % c_len] # Include an image from the original dataset
if len(self.data_source.init_frame) > 0 and random.uniform(0,1) < 0.15: # 15% we include the seed frame
group.append(len(self) + c_len)
if len(self.data_source.learned_context) > 0:
group.append(len(self.data_source.image_names) + i % len(self.data_source.learned_context)) # Include a memory frame
groups.append(group)
return groups
# return [[i, len(self) + i % c_len] + [i + c_len] if len(self.data_source.init_frame) > 0 and random.uniform(0,1) < 0.1 else [] for i in range(len(self))]
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
# New
class BalancedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last, ignore_negatives=False):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.ignore_negatives = ignore_negatives
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
return len(self.groups)
def group_images(self):
groups_by_label = {}
for label in self.data_source.labels:
groups_by_label[label] = []
groups_by_label[-1] = []
indices = list(range(len(self.data_source)))
random.shuffle(indices)
for index in indices:
annotations = self.data_source.load_annotations(index)
if annotations.shape[0] == 0:
groups_by_label[-1].append(index)
continue
best_label = -1
group_size = float('inf')
for label in groups_by_label:
if label in annotations[:, 4] and len(groups_by_label[label]) < group_size:
best_label = label
group_size = len(groups_by_label[label])
groups_by_label[best_label].append(index)
for key in groups_by_label:
groups_by_label[key].sort(key=lambda x: self.data_source.image_aspect_ratio(x))
if self.ignore_negatives:
del groups_by_label[-1]
# We have built up a dict that maps labels to images that contain that label
largest = max([len(group) for group in groups_by_label.values()])
groups = []
for i in range(largest):
groups.append([group[int(float(i) / float(largest) * len(group))] for group in groups_by_label.values() if len(group) > 0])
self.batch_size = len(groups[0])
return groups
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.6),
iaa.Crop(percent=(0, 0.2)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(
0.5,
iaa.GaussianBlur(sigma=(0, 0.25)),
),
iaa.Sometimes(
0.5,
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5),
),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
iaa.LinearContrast((0.75, 1.5)),
iaa.Multiply((0.8, 1.3), per_channel=0.5),
iaa.Affine(
scale={"x": (0.8, 1.25), "y": (0.8, 1.25)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-15, 15),
order=[0,1],
shear=(-5,5),
),
iaa.PiecewiseAffine(scale=(0.01, 0.05)),
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),])),
], random_order=True) # apply augmenters in random order
def convert_bounding_boxes(img_shape, annotations):
boxes = []
for row in annotations:
box = BoundingBox(x1=row[0], y1=row[1], x2=row[2], y2=row[3], label=row[4])
boxes.append(box)
return BoundingBoxesOnImage(boxes, shape=img_shape)
def revert_bounding_boxes(bbs):
annotations = np.zeros((0, 5))
for bounding_box in bbs:
annotation = np.zeros((1, 5))
annotation[0, 0] = bounding_box.x1
annotation[0, 1] = bounding_box.y1
annotation[0, 2] = bounding_box.x2
annotation[0, 3] = bounding_box.y2
annotation[0, 4] = bounding_box.label
annotations = np.append(annotations, annotation, axis=0)
return annotations
# Black out all background -
def blacken(img, annots):
mask = img >= 0 # create boolean array in the shape of image, initialized all to true
for annot in annots:
mask[int(annot[1]):int(annot[3]), int(annot[0]):int(annot[2]), :] = False
img[mask] = 0
return img
|
{"hexsha": "797cbb7f5ce496e3c52ee403e55492cec83bb1d0", "size": 26977, "ext": "py", "lang": "Python", "max_stars_repo_path": "MULTITASK_FILES/RETINANET_FILES/src/pytorch-retinanet/retinanet/dataloader.py", "max_stars_repo_name": "egoodman92/semi-supervised-surgery", "max_stars_repo_head_hexsha": "42f7af7e707e71ecd64b9f215fab5c07e2b71d70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MULTITASK_FILES/RETINANET_FILES/src/pytorch-retinanet/retinanet/dataloader.py", "max_issues_repo_name": "egoodman92/semi-supervised-surgery", "max_issues_repo_head_hexsha": "42f7af7e707e71ecd64b9f215fab5c07e2b71d70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MULTITASK_FILES/RETINANET_FILES/src/pytorch-retinanet/retinanet/dataloader.py", "max_forks_repo_name": "egoodman92/semi-supervised-surgery", "max_forks_repo_head_hexsha": "42f7af7e707e71ecd64b9f215fab5c07e2b71d70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2782719187, "max_line_length": 163, "alphanum_fraction": 0.5915038737, "include": true, "reason": "import numpy", "num_tokens": 6661}
|
from ctapipe.instrument.optics import OpticsDescription
from astropy import units as u
import pytest
def test_guess_optics():
from ctapipe.instrument import guess_telescope
answer = guess_telescope(1855, 28.0 * u.m)
od = OpticsDescription.from_name(answer.name)
assert od.equivalent_focal_length.to_value(u.m) == 28
assert od.num_mirrors == 1
def test_construct_optics():
OpticsDescription(
name='test',
num_mirrors=1,
num_mirror_tiles=100,
mirror_area=u.Quantity(550, u.m**2),
equivalent_focal_length=u.Quantity(10, u.m),
)
with pytest.raises(TypeError):
OpticsDescription(
name='test',
num_mirrors=1,
num_mirror_tiles=100,
mirror_area=550,
equivalent_focal_length=10,
)
|
{"hexsha": "d752dc2b4572a5fbbc8f667609c9bcfaba1f4e3c", "size": 827, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctapipe/instrument/tests/test_optics.py", "max_stars_repo_name": "Pluto9th/ctapipe", "max_stars_repo_head_hexsha": "8c4faa674a1949210cbda8cb9e2413dd6362afea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ctapipe/instrument/tests/test_optics.py", "max_issues_repo_name": "Pluto9th/ctapipe", "max_issues_repo_head_hexsha": "8c4faa674a1949210cbda8cb9e2413dd6362afea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ctapipe/instrument/tests/test_optics.py", "max_forks_repo_name": "Pluto9th/ctapipe", "max_forks_repo_head_hexsha": "8c4faa674a1949210cbda8cb9e2413dd6362afea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0606060606, "max_line_length": 57, "alphanum_fraction": 0.6553808948, "include": true, "reason": "from astropy", "num_tokens": 203}
|
import cv2
from PIL import Image
import os
import sys
import torch
import argparse
import numpy as np
from modules import utils
from train import train
from data import VideoDataset
from torchvision import transforms
import data.transforms as vtf
from models.retinanet import build_retinanet
from gen_dets import gen_dets, eval_framewise_dets
from tubes import build_eval_tubes
from val import val
import torch.utils.data as data_utils
from data import custum_collate
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def set_video(inp, video_name):
cap = cv2.VideoCapture(inp)
ret, frame = cap.read()
frheight, frwidth, ch = frame.shape
print(frheight,'.......',frwidth)
fps = round(cap.get(cv2.CAP_PROP_FPS))
video_width = 1381
video_height = 777
size = (video_width, video_height)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter(video_name, fourcc, fps, size)
return cap, video, fps
def set_out_video(video_name):
fps = 12
video_width = 1280
video_height = 960
size = (video_width, video_height)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter(video_name, fourcc, fps, size)
return video
def main():
parser = argparse.ArgumentParser(description='Training single stage FPN with OHEM, resnet as backbone')
parser.add_argument('DATA_ROOT', help='Location to root directory for dataset reading') # /mnt/mars-fast/datasets/
parser.add_argument('SAVE_ROOT', help='Location to root directory for saving checkpoint models') # /mnt/mars-alpha/
parser.add_argument('MODEL_PATH',help='Location to root directory where kinetics pretrained models are stored')
parser.add_argument('--MODE', default='train',
help='MODE can be train, gen_dets, eval_frames, eval_tubes define SUBSETS accordingly, build tubes')
# Name of backbone network, e.g. resnet18, resnet34, resnet50, resnet101 resnet152 are supported
parser.add_argument('--ARCH', default='resnet50',
type=str, help=' base arch')
parser.add_argument('--MODEL_TYPE', default='I3D',
type=str, help=' base model')
parser.add_argument('--ANCHOR_TYPE', default='RETINA',
type=str, help='type of anchors to be used in model')
parser.add_argument('--SEQ_LEN', default=8,
type=int, help='NUmber of input frames')
parser.add_argument('--TEST_SEQ_LEN', default=8,
type=int, help='NUmber of input frames')
parser.add_argument('--MIN_SEQ_STEP', default=1,
type=int, help='DIFFERENCE of gap between the frames of sequence')
parser.add_argument('--MAX_SEQ_STEP', default=1,
type=int, help='DIFFERENCE of gap between the frames of sequence')
# if output heads are have shared features or not: 0 is no-shareing else sharining enabled
# parser.add_argument('--MULIT_SCALE', default=False, type=str2bool,help='perfrom multiscale training')
parser.add_argument('--HEAD_LAYERS', default=3,
type=int,help='0 mean no shareding more than 0 means shareing')
parser.add_argument('--NUM_FEATURE_MAPS', default=5,
type=int,help='0 mean no shareding more than 0 means shareing')
parser.add_argument('--CLS_HEAD_TIME_SIZE', default=3,
type=int, help='Temporal kernel size of classification head')
parser.add_argument('--REG_HEAD_TIME_SIZE', default=3,
type=int, help='Temporal kernel size of regression head')
# Name of the dataset only voc or coco are supported
parser.add_argument('--DATASET', default='road',
type=str,help='dataset being used')
parser.add_argument('--TRAIN_SUBSETS', default='train_3,',
type=str,help='Training SUBSETS seprated by ,')
parser.add_argument('--VAL_SUBSETS', default='',
type=str,help='Validation SUBSETS seprated by ,')
parser.add_argument('--TEST_SUBSETS', default='',
type=str,help='Testing SUBSETS seprated by ,')
# Input size of image only 600 is supprted at the moment
parser.add_argument('--MIN_SIZE', default=512,
type=int, help='Input Size for FPN')
# data loading argumnets
parser.add_argument('-b','--BATCH_SIZE', default=4,
type=int, help='Batch size for training')
parser.add_argument('--TEST_BATCH_SIZE', default=1,
type=int, help='Batch size for testing')
# Number of worker to load data in parllel
parser.add_argument('--NUM_WORKERS', '-j', default=8,
type=int, help='Number of workers used in dataloading')
# optimiser hyperparameters
parser.add_argument('--OPTIM', default='SGD',
type=str, help='Optimiser type')
parser.add_argument('--RESUME', default=0,
type=int, help='Resume from given epoch')
parser.add_argument('--MAX_EPOCHS', default=30,
type=int, help='Number of training epoc')
parser.add_argument('-l','--LR', '--learning-rate',
default=0.004225, type=float, help='initial learning rate')
parser.add_argument('--MOMENTUM', default=0.9,
type=float, help='momentum')
parser.add_argument('--MILESTONES', default='20,25',
type=str, help='Chnage the lr @')
parser.add_argument('--GAMMA', default=0.1,
type=float, help='Gamma update for SGD')
parser.add_argument('--WEIGHT_DECAY', default=1e-4,
type=float, help='Weight decay for SGD')
# Freeze layers or not
parser.add_argument('--FBN','--FREEZE_BN', default=True,
type=str2bool, help='freeze bn layers if true or else keep updating bn layers')
parser.add_argument('--FREEZE_UPTO', default=1,
type=int, help='layer group number in ResNet up to which needs to be frozen')
# Loss function matching threshold
parser.add_argument('--POSTIVE_THRESHOLD', default=0.5,
type=float, help='Min threshold for Jaccard index for matching')
parser.add_argument('--NEGTIVE_THRESHOLD', default=0.4,
type=float, help='Max threshold Jaccard index for matching')
# Evaluation hyperparameters
parser.add_argument('--EVAL_EPOCHS', default='30',
type=str, help='eval epochs to test network on these epoch checkpoints usually the last epoch is used')
parser.add_argument('--VAL_STEP', default=2,
type=int, help='Number of training epoch before evaluation')
parser.add_argument('--IOU_THRESH', default=0.5,
type=float, help='Evaluation threshold for validation and for frame-wise mAP')
parser.add_argument('--CONF_THRESH', default=0.5,
type=float, help='Confidence threshold for to remove detection below given number')
parser.add_argument('--NMS_THRESH', default=0.5,
type=float, help='NMS threshold to apply nms at the time of validation')
parser.add_argument('--TOPK', default=10,
type=int, help='topk detection to keep for evaluation')
parser.add_argument('--GEN_CONF_THRESH', default=0.5,
type=float, help='Confidence threshold at the time of generation and dumping')
parser.add_argument('--GEN_TOPK', default=100,
type=int, help='topk at the time of generation')
parser.add_argument('--GEN_NMS', default=0.5,
type=float, help='NMS at the time of generation')
parser.add_argument('--CLASSWISE_NMS', default=False,
type=str2bool, help='apply classwise NMS/no tested properly')
parser.add_argument('--JOINT_4M_MARGINALS', default=False,
type=str2bool, help='generate score of joints i.e. duplexes or triplet by marginals like agents and actions scores')
## paths hyper parameters
parser.add_argument('--COMPUTE_PATHS', default=False,
type=str2bool, help=' COMPUTE_PATHS if set true then it overwrite existing ones')
parser.add_argument('--PATHS_IOUTH', default=0.5,
type=float, help='Iou threshold for building paths to limit neighborhood search')
parser.add_argument('--PATHS_COST_TYPE', default='score',
type=str, help='cost function type to use for matching, other options are scoreiou, iou')
parser.add_argument('--PATHS_JUMP_GAP', default=4,
type=int, help='GAP allowed for a tube to be kept alive after no matching detection found')
parser.add_argument('--PATHS_MIN_LEN', default=6,
type=int, help='minimum length of generated path')
parser.add_argument('--PATHS_MINSCORE', default=0.1,
type=float, help='minimum score a path should have over its length')
## paths hyper parameters
parser.add_argument('--COMPUTE_TUBES', default=False, type=str2bool, help='if set true then it overwrite existing tubes')
parser.add_argument('--TUBES_ALPHA', default=0,
type=float, help='alpha cost for changeing the label')
parser.add_argument('--TRIM_METHOD', default='none',
type=str, help='other one is indiv which works for UCF24')
parser.add_argument('--TUBES_TOPK', default=10,
type=int, help='Number of labels to assign for a tube')
parser.add_argument('--TUBES_MINLEN', default=5,
type=int, help='minimum length of a tube')
parser.add_argument('--TUBES_EVAL_THRESHS', default='0.2,0.5',
type=str, help='evaluation threshold for checking tube overlap at evaluation time, one can provide as many as one wants')
# parser.add_argument('--TRAIL_ID', default=0,
# type=int, help='eval TUBES_Thtrshold at evaluation time')
###
parser.add_argument('--LOG_START', default=10,
type=int, help='start loging after k steps for text/tensorboard')
parser.add_argument('--LOG_STEP', default=10,
type=int, help='Log every k steps for text/tensorboard')
parser.add_argument('--TENSORBOARD', default=1,
type=str2bool, help='Use tensorboard for loss/evalaution visualization')
# Program arguments
parser.add_argument('--MAN_SEED', default=123,
type=int, help='manualseed for reproduction')
parser.add_argument('--MULTI_GPUS', default=True, type=str2bool, help='If more than 0 then use all visible GPUs by default only one GPU used ')
# Use CUDA_VISIBLE_DEVICES=0,1,4,6 to select GPUs to use
## Parse arguments
args = parser.parse_args()
args = utils.set_args(args) # set directories and SUBSETS fo datasets
args.MULTI_GPUS = False if args.BATCH_SIZE == 1 else args.MULTI_GPUS
## set random seeds and global settings
np.random.seed(args.MAN_SEED)
torch.manual_seed(args.MAN_SEED)
# torch.cuda.manual_seed_all(args.MAN_SEED)
torch.set_default_tensor_type('torch.FloatTensor')
args = utils.create_exp_name(args)
utils.setup_logger(args)
logger = utils.get_logger(__name__)
logger.info(sys.version)
assert args.MODE in ['train','val','test','gen_dets','eval_frames', 'eval_tubes'], 'MODE must be from ' + ','.join(['train','test','tubes'])
if args.MODE == 'train':
args.TEST_SEQ_LEN = args.SEQ_LEN
else:
args.SEQ_LEN = args.TEST_SEQ_LEN
ttransform = transforms.Compose([
vtf.ResizeClip(args.MIN_SIZE, args.MAX_SIZE),
vtf.ToTensorStack(),
vtf.Normalize(mean=args.MEANS, std=args.STDS)])
if args.MODE in ['train','val']:
# args.CONF_THRESH = 0.05
args.SUBSETS = args.TRAIN_SUBSETS
train_transform = transforms.Compose([
vtf.ResizeClip(args.MIN_SIZE, args.MAX_SIZE),
vtf.ToTensorStack(),
vtf.Normalize(mean=args.MEANS, std=args.STDS)])
# train_skip_step = args.SEQ_LEN
# if args.SEQ_LEN>4 and args.SEQ_LEN<=10:
# train_skip_step = args.SEQ_LEN-2
if args.SEQ_LEN>10:
train_skip_step = args.SEQ_LEN + (args.MAX_SEQ_STEP - 1) * 2 - 2
else:
train_skip_step = args.SEQ_LEN
train_dataset = VideoDataset(args, train=True, skip_step=train_skip_step, transform=train_transform)
logger.info('Done Loading Dataset Train Dataset')
## For validation set
full_test = False
args.SUBSETS = args.VAL_SUBSETS
skip_step = args.SEQ_LEN*8
else:
args.SEQ_LEN = args.TEST_SEQ_LEN
args.MAX_SEQ_STEP = 1
args.SUBSETS = args.TEST_SUBSETS
full_test = True #args.MODE != 'train'
args.skip_beggning = 0
args.skip_ending = 0
if args.MODEL_TYPE == 'I3D':
args.skip_beggning = 2
args.skip_ending = 2
elif args.MODEL_TYPE != 'C2D':
args.skip_beggning = 2
skip_step = args.SEQ_LEN - args.skip_beggning
val_transform = transforms.Compose([
vtf.ResizeClip(args.MIN_SIZE, args.MAX_SIZE),
vtf.ToTensorStack(),
vtf.Normalize(mean=args.MEANS,std=args.STDS)])
val_dataset = VideoDataset(args, train=False, transform=val_transform, skip_step=skip_step, full_test=full_test)
logger.info('Done Loading Dataset Validation Dataset')
args.num_classes = val_dataset.num_classes
# one for objectness
args.label_types = val_dataset.label_types
args.num_label_types = val_dataset.num_label_types
args.all_classes = val_dataset.all_classes
args.num_classes_list = val_dataset.num_classes_list
args.num_ego_classes = val_dataset.num_ego_classes
args.ego_classes = val_dataset.ego_classes
args.head_size = 256
# olympia_classes = val_dataset.olympia_classes
if args.MODE in ['train', 'val','test','gen_dets']:
net = build_retinanet(args).cuda()
logger.info('\nLets do dataparallel\n')
net = torch.nn.DataParallel(net)
net.eval()
args.MODEL_PATH = args.SAVE_ROOT + 'model_{:06d}.pth'.format(args.EVAL_EPOCHS[0])
logger.info('Loaded model from :: '+args.MODEL_PATH)
net.load_state_dict(torch.load(args.MODEL_PATH))
val_data_loader = data_utils.DataLoader(val_dataset, 1, num_workers=args.NUM_WORKERS,
shuffle=False, pin_memory=True, collate_fn=custum_collate)
video = set_out_video('ROAD_test_vid_'+str(args.GEN_CONF_THRESH)+'_.MP4')
activation = torch.nn.Sigmoid().cuda()
with torch.no_grad():
for val_itr, (images, gt_boxes, gt_targets, ego_labels, batch_counts, img_indexs, wh,videonames,start_frames,img_names) in enumerate(val_data_loader):
height, width = images.shape[-2:]
print(val_itr)
images = images.cuda(0, non_blocking=True)
decoded_boxes, confidence, ego_preds = net(images)
confidence = activation(confidence)
det_boxes = []
for nlt in range(args.num_label_types):
numc = args.num_classes_list[nlt]
det_boxes.append([[] for _ in range(numc)])
for s in range(args.SEQ_LEN):
image = cv2.imread(img_names[0][s])
# image = cv2.resize(image,(width,height))
org_height,org_width = image.shape[:2]
decoded_boxes_frame = decoded_boxes[0, s].clone()
cc = 0
for gb in range(len(gt_targets[0][s])):
gt_box = gt_boxes[0][s][gb]
gt_agent_ind = np.where(gt_targets[0][s][gb][1:11].numpy().astype(int)==1)[0]
gt_agent = ''
for acc in range(len(gt_agent_ind)):
gt_agent = gt_agent+'_'+args.all_classes[1][gt_agent_ind[acc]]
gt_action_ind = np.where(gt_targets[0][s][gb][11:30].numpy().astype(int)==1)[0]
gt_action = ''
for acc in range(len(gt_action_ind)):
gt_action = gt_action+'_'+args.all_classes[2][gt_action_ind[acc]]
gt_location_ind = np.where(gt_targets[0][s][gb][30:42].numpy().astype(int)==1)[0]
gt_location = ''
for acc in range(len(gt_location_ind)):
gt_location = gt_location+'_'+args.all_classes[3][gt_location_ind[acc]]
gt_dup_ind = np.where(gt_targets[0][s][gb][42:81].numpy().astype(int)==1)[0]
gt_dup = ''
for acc in range(len(gt_dup_ind)):
gt_dup = gt_dup+'_'+args.all_classes[4][gt_dup_ind[acc]]
gt_trip_ind = np.where(gt_targets[0][s][gb][81:149].numpy().astype(int)==1)[0]
gt_trip = ''
for acc in range(len(gt_trip_ind)):
gt_trip = gt_trip+'_'+args.all_classes[5][gt_trip_ind[acc]]
gt_box[0] = (gt_box[0]/682)*org_width # width x1
gt_box[2] = (gt_box[2]/682)*org_width # width x2
gt_box[1] = (gt_box[1]/512)*org_height # height y1
gt_box[3] = (gt_box[3]/512)*org_height # height y2
# print(int(boxes[bb][0]), int(boxes[bb][1]),int(boxes[bb][2]), int(boxes[bb][3]))
cv2.rectangle(image, (int(gt_box[0]), int(gt_box[1])), (int(gt_box[2]), int(gt_box[3])), (0, 255, 0), 2)
cv2.putText(image, gt_agent, (int(gt_box[0]), int(gt_box[1]-100)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
cv2.putText(image, gt_action, (int(gt_box[0]), int(gt_box[1]-80)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
cv2.putText(image, gt_location, (int(gt_box[0]), int(gt_box[1]-60)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
cv2.putText(image, gt_dup, (int(gt_box[0]), int(gt_box[1]-40)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
cv2.putText(image, gt_trip, (int(gt_box[0]), int(gt_box[1]-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
gt_boxes_batch = gt_boxes[0, s, :batch_counts[0, s],:].numpy()
gt_labels_batch = gt_targets[0, s, :batch_counts[0, s]].numpy()
decoded_boxes_batch = decoded_boxes[0,s]
frame_gt = utils.get_individual_labels(gt_boxes_batch, gt_labels_batch[:,:1])
# gt_boxes_all[0].append(frame_gt)
confidence_batch = confidence[0,s]
scores = confidence_batch[:, 0].squeeze().clone()
cls_dets, save_data = utils.filter_detections_for_dumping(args, scores, decoded_boxes_batch, confidence_batch)
# print(save_data)
# print(conf)
for ppred in save_data:
bbox = ppred[:4]
# print(bbox)
agent_lab_ind = max(ppred[5:15])
if agent_lab_ind > args.GEN_CONF_THRESH:
agent_lab = args.all_classes[1][np.argmax(ppred[5:15])]
else:
agent_lab = ''
action_lab_ind = max(ppred[15:34])
if action_lab_ind > args.GEN_CONF_THRESH:
action_lab = args.all_classes[2][np.argmax(ppred[15:34])]
else:
action_lab = ''
loc_lab_ind = max(ppred[34:46])
if loc_lab_ind > args.GEN_CONF_THRESH:
loc_lab = args.all_classes[3][np.argmax(ppred[34:46])]
else:
loc_lab = ''
dup_lab_ind = max(ppred[46:85])
if dup_lab_ind > args.GEN_CONF_THRESH:
dup_lab = args.all_classes[4][np.argmax(ppred[46:85])]
else:
dup_lab = ''
trip_lab_ind = max(ppred[85:153])
if trip_lab_ind > args.GEN_CONF_THRESH:
trip_lab = args.all_classes[5][np.argmax(ppred[85:153])]
else:
trip_lab = ''
# print(agent_lab)
# print(action_lab)
# print(loc_lab)
# print(dup_lab)
# print(trip_lab)
bbox[0] = (bbox[0]/682)*org_width # width x1
bbox[2] = (bbox[2]/682)*org_width # width x2
bbox[1] = (bbox[1]/512)*org_height # height y1
bbox[3] = (bbox[3]/512)*org_height # height y2
cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 0, 255), 2)
cv2.putText(image, agent_lab, (int(bbox[0]), int(bbox[3]+20)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (11,12,255), 2)
cv2.putText(image, action_lab, (int(bbox[0]), int(bbox[3]+40)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (11,12,255), 2)
cv2.putText(image, loc_lab, (int(bbox[0]), int(bbox[3]+60)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (11,12,255), 2)
cv2.putText(image, dup_lab, (int(bbox[0]), int(bbox[3]+80)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (11,12,255), 2)
cv2.putText(image, trip_lab, (int(bbox[0]), int(bbox[3]+100)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (11,12,255), 2)
video.write(image)
video.release()
if __name__ == "__main__":
main()
|
{"hexsha": "ae110fe2c8a060989aff5d3e8e6785d0216768a1", "size": 22220, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference_vis.py", "max_stars_repo_name": "salmank255/ROADSlowFast", "max_stars_repo_head_hexsha": "e939d8f79fe3eb6f3dd32e967a34530d00f45c8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference_vis.py", "max_issues_repo_name": "salmank255/ROADSlowFast", "max_issues_repo_head_hexsha": "e939d8f79fe3eb6f3dd32e967a34530d00f45c8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference_vis.py", "max_forks_repo_name": "salmank255/ROADSlowFast", "max_forks_repo_head_hexsha": "e939d8f79fe3eb6f3dd32e967a34530d00f45c8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.7305936073, "max_line_length": 158, "alphanum_fraction": 0.5923942394, "include": true, "reason": "import numpy", "num_tokens": 5255}
|
import simpy
import sys
sys.path #sometimes need this to refresh the path
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import torch
import numpy as np
from tabulate import tabulate
import pandas as pd
from pandas import DataFrame
import machine
import sequencing
import job_creation
import breakdown_creation
import heterogeneity_creation
import validation
class shopfloor:
def __init__(self,env,span,m_no,**kwargs):
# STEP 1: create environment instances and specifiy simulation span
self.env=env
self.span = span
self.m_no = m_no
self.m_list = []
# STEP 2: create instances of machines
for i in range(m_no):
expr1 = '''self.m_{} = machine.machine(env, {}, print = 0)'''.format(i,i) # create machines
exec(expr1)
expr2 = '''self.m_list.append(self.m_{})'''.format(i) # add to machine list
exec(expr2)
# STEP 3: initialize the initial jobs, distribute jobs to workcenters
if 'seed' in kwargs:
self.job_creator = job_creation.creation\
(self.env, self.span, self.m_list, [1,50], 3, 0.9, seed=kwargs['seed'])
#self.job_creator.output()
else:
print("WARNING: seed is not fixed !!")
raise Exception
# STEP 4: initialize all machines
for i in range(m_no):
expr3 = '''self.m_{}.initialization(self.m_list,self.job_creator)'''.format(i) # initialize all machines
exec(expr3)
# STEP 6: initialize the scenario creator
'''
intervals = np.ones(8)*self.span/8
pt_range_list = [[2,11],[5,8],[3,12],[6,8],[4,8],[3,12],[2,8],[5,8]]
self.scenario = heterogeneity_creation.creation(self.env, self.job_creator, intervals, pt_range_list)
'''
# specify the architecture of DRL
if 'sequencing_rule' in kwargs:
print("Taking over: machines use {} sequencing rule".format(kwargs['sequencing_rule']))
for m in self.m_list:
order = "m.job_sequencing = sequencing." + kwargs['sequencing_rule']
try:
exec(order)
except:
print("Rule assigned to machine {} is invalid !".format(m.m_label))
raise Exception
elif len(kwargs):
arch = kwargs['arch'] + "=True"
if type(kwargs['rwd_func']) is str:
rwd_func = "reward_function='{}'".format(kwargs['rwd_func'])
else:
rwd_func = 'reward_function=' + str(kwargs['rwd_func'])
order = "self.sequencing_brain = validation.DRL_sequencing(self.env, self.m_list, self.job_creator, self.span, {}, {})".format(arch,rwd_func)
exec(order)
print("---> {},{} <---".format(arch,rwd_func))
def simulation(self):
self.env.run()
# dictionary to store shopfloors and production record
spf_dict = {}
production_record = {}
arch_set = ['IQL_AS','I_DDQN_AS','I_DDQN_AS','TEST_AS','AS'] + ['IQL','I_DDQN','I_DDQN','Default','bsf_DDQN']
func_set = [12,12,3,3,10] + [12,12,3,12,'']
'''
arch_set = ['IQL_AS','I_DDQN_AS','I_DDQN_AS','TEST_AS','AS']
func_set = [12,12,3,3,10]
'''
title = [x+str(func_set[i]) for i,x in enumerate(arch_set)]
# how long is the simulation
span = 1000
scale = 10
sum_record = []
benchmark_record = []
max_record = []
rate_record = []
iteration = 1
FIFO = 0
export_result = 1
if FIFO:
title.insert(0,'FIFO')
if export_result:
title = ['I-G-DQN-AS','I-G-DDQN-AS','I-DDQN-AS','G-DDQN-AS','deep MARL-AS'] + ['I-G-DQN-MR','I-G-DDQN-MR','I-DDQN-MR','G-DDQN-MR','deep MARL-MR']
for run in range(iteration):
print('******************* ITERATION-{} *******************'.format(run))
sum_record.append([])
benchmark_record.append([])
max_record.append([])
rate_record.append([])
seed = np.random.randint(2000000000)
# run simulation with different rules
if FIFO:
env = simpy.Environment()
spf = shopfloor(env, span, scale, sequencing_rule = 'FIFO', seed = seed)
spf.simulation()
output_time, cumulative_tard, tard_mean, tard_max, tard_rate = spf.job_creator.tardiness_output()
sum_record[run].append(cumulative_tard[-1])
max_record[run].append(tard_max)
rate_record[run].append(tard_rate)
for idx,x in enumerate(arch_set):
# and extra run with DRL
env = simpy.Environment()
spf = shopfloor(env, span, scale, arch = x, rwd_func = func_set[idx], seed = seed)
spf.simulation()
output_time, cumulative_tard, tard_mean, tard_max, tard_rate = spf.job_creator.tardiness_output()
sum_record[run].append(cumulative_tard[-1])
max_record[run].append(tard_max)
rate_record[run].append(tard_rate)
#print('Number of jobs created',spf.job_creator.total_no)
#print(sum_record)
print('-------------- Complete Record --------------')
print(tabulate(sum_record, headers=title))
print('-------------- Average Performance --------------')
# get the overall performance (include DRL)
avg = np.mean(sum_record,axis=0)
max = np.mean(max_record,axis=0)
tardy_rate = np.around(np.mean(rate_record,axis=0)*100,2)
ratio = np.around(avg/avg.min()*100,2)
rank = np.argsort(ratio)
winning_rate = np.zeros(len(title))
for idx in np.argmin(sum_record,axis=1):
winning_rate[idx] += 1
winning_rate = np.around(winning_rate/iteration*100,2)
for rank,rule in enumerate(rank):
print("{}, avg.: {} | max: {} | %: {}% | tardy %: {}% | wining rate: {}%"\
.format(title[rule],avg[rule],max[rule],ratio[rule],tardy_rate[rule],winning_rate[rule]))
# check the parameter and scenario setting
spf.sequencing_brain.check_parameter()
if export_result:
df_win_rate = DataFrame([winning_rate], columns=title)
#print(df_win_rate)
df_sum = DataFrame(sum_record, columns=title)
#print(df_sum)
df_tardy_rate = DataFrame(rate_record, columns=title)
#print(df_tardy_rate)
df_max = DataFrame(max_record, columns=title)
#print(df_max)
address = sys.path[0]+'\\Thesis_result_figure\\RAW_tournament.xlsx'
Excelwriter = pd.ExcelWriter(address,engine="xlsxwriter")
dflist = [df_win_rate, df_sum, df_tardy_rate, df_max]
sheetname = ['win rate','sum', 'tardy rate', 'maximum']
for i,df in enumerate(dflist):
df.to_excel(Excelwriter, sheet_name=sheetname[i], index=False)
Excelwriter.save()
print('export to {}'.format(address))
|
{"hexsha": "f628b0fec4facb2d3d6abdc0c632fe58c0b23e30", "size": 6699, "ext": "py", "lang": "Python", "max_stars_repo_path": "JSP/Thesis_tournament.py", "max_stars_repo_name": "Reallyhardtocreateaname/PhD-Thesis-Projects", "max_stars_repo_head_hexsha": "de0878f51ec66c9905227b2d260ffaa0b4946f1f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-25T07:51:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T07:52:23.000Z", "max_issues_repo_path": "JSP/Thesis_tournament.py", "max_issues_repo_name": "Harrison-Mao/PhD-Thesis-Projects", "max_issues_repo_head_hexsha": "a4063c3fa06602d95d9215ae9b6ed3e4b55dc017", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "JSP/Thesis_tournament.py", "max_forks_repo_name": "Harrison-Mao/PhD-Thesis-Projects", "max_forks_repo_head_hexsha": "a4063c3fa06602d95d9215ae9b6ed3e4b55dc017", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-23T03:47:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:50:37.000Z", "avg_line_length": 37.8474576271, "max_line_length": 154, "alphanum_fraction": 0.6154649948, "include": true, "reason": "import numpy", "num_tokens": 1711}
|
# coding: utf-8
# # Des dates qui font des nombres premiers ?
#
# Ce petit [notebook Jupyter](https://www.jupyter.org/), écrit en [Python](https://www.python.org/), a pour but de résoudre la question suivante :
#
# > *"En 2017, combien de jours ont leur date qui est un nombre premier ?"*
#
# Par exemple, en 2017, le 23 février donne `23022017` est premier, mais le 24 février donne `24022017` qui ne l'est pas.
# On veut trouver toutes les dates en 2017 qui sont des nombres premiers.
# ----
# ## 1. Il faut bien poser le problème
# - *Q:* Qu'entends-on par *"date qui est un nombre premier"* ?
# - Ça dépend de l'écriture de la date, que ce soit `12012017` pour le 12 janvier ou `01122017`.
# - Dans le doute, la solution présentée ici sera adaptable aux deux cas.
# - *Q:* Pourquoi se restreindre à 2017 ?
# - Parce que j'écris ce document en 2017...
# - Parce que c'est un nombre impair, et donc un nombre de la forme `12012017` a une chance d'être premier !
# - Mais dans le doute, la solution présentée ici sera adaptable à n'importe quelle année.
# - *Q:* Quelles années ont le plus de dates premières ?
# - *Q:* Est-ce qu'on peut inverser la question et chercher quel jour donne le plus de dates premières ?
# ----
# ## 2. Une première solution, naïve
# - On va d'abord écrire (ou importer) une fonction pour tester si un entier est premier,
# - Puis on va écrire une fonction qui transforme une date en son nombre,
# - Et enfin une boucle sur les 365 (ou 366) jours de l'année suffira à afficher les jours ayant une date première.
# ### 2.1. Tester la primalité, version tricheur
# [`sympy`](http://www.sympy.org/) propose une fonction [`sympy.isprime`](http://docs.sympy.org/latest/modules/ntheory.html#sympy.ntheory.primetest.isprime).
# In[1]:
from sympy import isprime
# Elle marche très bien, et est très rapide !
# In[2]:
[isprime(i) for i in [2, 3, 5, 7, 10, 11, 13, 17, 2017]]
# Pour des nombres de 8 chiffres (c'est tout petit), elle est vraiment rapide :
# In[3]:
from numpy.random import randint
get_ipython().run_line_magic('timeit', 'sum([isprime(i) for i in randint(1e8, 1e9-1, 10**4)])')
# $\implies$ $65 ~\text{ms}$ pour 10000 nombres à tester, ça me semble assez rapide pour ce qu'on veut en faire !
# ----
# ### 2.2. Transformer une date en nombre
# On va utiliser le module [`datetime`](https://docs.python.org/3/library/datetime.html) de la bibliothèque standard :
# In[4]:
from datetime import datetime
# In[5]:
today = datetime.today()
YEAR = today.year
print("On va travailler avec l'année", YEAR, "!")
# C'est ensuite facile de transformer une date en nombre, selon les deux formats.
# On utilise [le formatage avec `.format()`](https://pyformat.info/#datetime) (en Python 3) :
# In[6]:
def date_vers_nombre(date):
return int("{:%d%m%Y}".format(date))
def date_vers_nombre_2(date):
return int("{:%m%d%Y}".format(date))
# In[7]:
date = datetime(YEAR, 1, 12)
print(date_vers_nombre(date))
print(date_vers_nombre_2(date)) # Le 0 initial est ignoré
# ### 2.3. Tester tous les jours de l'année
# On peut partir du 1er janvier de cette année, et ajouter des jours un par un.
# On utilise un itérateur (avec le mot clé `yield`), pour pouvoir facilement boucler sur tous les jours de l'année en cours :
# In[8]:
from datetime import timedelta
def tous_les_jours(year=YEAR):
date = datetime(year, 1, 1)
un_jour = timedelta(days=1)
for i in range(0, 366):
yield date
date += un_jour
if date.year > year: # On est allé trop loin
raise StopIteration
# On peut vérifier que ça donne ce qu'on voulait :
# In[9]:
for date in tous_les_jours():
print("Le jour {:%d/%m/%Y} donne l'entier {:>8} au format jour-mois-année et {:>8} au format mois-jour-année.".format(date, date_vers_nombre(date), date_vers_nombre_2(date)))
# Maintenant, il suffit de boucler, de tester si l'entier est premier, et de n'afficher que ceux qui le sont :
# In[11]:
def date_premieres(conversion=date_vers_nombre, year=YEAR):
for date in tous_les_jours(year):
if isprime(conversion(date)):
yield date
# On peut aussi facilement trouver la prochaine date qui sera première :
# In[12]:
def prochaine_date_premiere(date=datetime.today(), conversion=date_vers_nombre):
year = date.year
un_jour = timedelta(days=1)
for i in range(0, 366):
if isprime(conversion(date)):
return date
date += un_jour
if date.year > year: # On est allé trop loin
return None
return None
# In[13]:
date = datetime.today()
prochain = prochaine_date_premiere(date)
print("Pour le jour d'aujourd'hui ({:%x}), le prochain jour ayant une date première dans l'année {} est : {:%x} !".format(date, date.year, prochain))
# #### Pour les dates écrites "jour mois année" :
# In[14]:
for date in date_premieres(date_vers_nombre):
print("Le jour {:%d/%m/%Y} donne l'entier {:>8} qui est premier !".format(date, date_vers_nombre(date)))
# #### Pour les dates écrites "mois jour année" :
# In[15]:
for date in date_premieres(date_vers_nombre_2):
print("Le jour {:%d/%m/%Y} donne l'entier {:>8} qui est premier !".format(date, date_vers_nombre_2(date)))
# #### Comparaison
# Il y a 71 jours, en 2017, qui ont une date première si on les écrit "jour mois année", et 69 si on les écrits "mois jour année".
# In[16]:
len(list(date_premieres(date_vers_nombre)))
# In[17]:
len(list(date_premieres(date_vers_nombre_2)))
# ----
# ## 3. Résolvons le problème pour toutes les années entre 0 AC et 2500 AC
#
# On aimerait afficher une courbe montrant l'évolution du nombre de dates premières au cours des années, selon les deux formats.
# In[18]:
def nombres_dates_premieres(year=YEAR):
if year % 2 == 0:
return [0, 0]
else:
return [len(list(date_premieres(date_vers_nombre, year=year))), len(list(date_premieres(date_vers_nombre_2, year=year)))]
# In[19]:
nombres_dates_premieres()
# On vérifie que pour les années paires il n'y a pas de dates premières :
# In[20]:
len(list(date_premieres(date_vers_nombre, year=2016)))
# On peut donc récupérer toutes ces nombres, jusqu'à l'année 3000.
# (On pourrait gagner du temps en ne considérant que les années impaires, et l'année 1)
# In[21]:
import numpy as np
def intervale_nombres_dates_premieres(year1=1, year2=3000):
nombres = np.zeros((year2 - year1 + 1, 2))
for i, year in enumerate(range(year1, year2 + 1)):
nombres[i, :] = nombres_dates_premieres(year)
return nombres
# On essaie sur un intervale de 11 ans :
# In[22]:
get_ipython().run_cell_magic('time', '', 'nombres = intervale_nombres_dates_premieres(year1=2010, year2=2020)\nprint(nombres)')
# In[23]:
get_ipython().run_cell_magic('time', '', 'nombres = intervale_nombres_dates_premieres(year1=1, year2=3000)\nprint(nombres)')
# In[24]:
np.shape(nombres)
# Quelques statistiques :
# In[25]:
print("- Au format jour-mois-année, il y a en moyenne {:.3g} jours premiers par an.".format(np.mean(nombres[:, 0])))
print("- Et en moyenne {:.3g} jours premiers par an, en enlevant les années paires.".format(np.mean(nombres[:, 0][nombres[:, 0] > 0])))
print("- Au format mois-jour-année, il y a en moyenne {:.3g} jours premiers par an.".format(np.mean(nombres[:, 1])))
print("- Et en moyenne {:.3g} jours premiers par an, en enlevant les années paires.".format(np.mean(nombres[:, 1][nombres[:, 1] > 0])))
# On va afficher tout ça :
# In[28]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context="notebook", style="darkgrid", palette="hls", font="sans-serif", font_scale=1.4)
# In[37]:
def affiche_nombres_dates_premieres(nombres):
plt.figure()
plt.plot(nombres[:, 0], 'b.', label="Format jour-mois-année")
plt.plot(nombres[:, 1], 'g*', label="Format mois-jour-année")
plt.title("Nombres de dates premières par an")
plt.legend(loc='best', numpoints=1, fancybox=True, shadow=True, framealpha=0.8)
plt.xlabel("Année")
plt.ylabel("Nombre de dates premières")
# In[38]:
affiche_nombres_dates_premieres(nombres)
# Avec un histogramme :
# In[39]:
def hist_nombres_dates_premieres(nombres, ind=0):
nbs = nombres[:, ind]
nbs = nbs[nbs > 0]
plt.figure()
plt.hist(nbs, bins=100, label="Format %s-année" % ('jour-mois' if ind==0 else 'mois-jour'), color='bg'[ind])
plt.title("Répartition du nombres de dates premières par an")
plt.legend(loc='best', numpoints=1, fancybox=True, shadow=True, framealpha=0.8)
plt.xlabel("Nombres de dates premières")
plt.ylabel("Nombres")
# In[40]:
hist_nombres_dates_premieres(nombres, 0)
hist_nombres_dates_premieres(nombres, 1)
# ## 4. Quels jours donnent le plus de nombres premiers ?
#
# On peut se poser une autre question : quelle date donne le plus de nombres premiers ?
#
# On a vu que le 23 février donne `23022017` qui est premier, et `24022017` ne l'est pas.
# Mais en sur 3000 années successives, est-ce que le 23 février donne plus souvent un nombre premier que le 24 février ?
# In[49]:
def meme_jour_toutes_les_annees(days=0, year1=1, year2=3000):
date = datetime(year1, 1, 1)
date += timedelta(days=days)
for i, year in enumerate(range(year1, year2 + 1)):
yield date.replace(year=year)
# In[52]:
list(meme_jour_toutes_les_annees(days=31+22, year1=2017, year2=2019))
# In[53]:
def nombre_annees_qui_donnent_date_premiere(days=0, year1=1, year2=3000, conversion=date_vers_nombre):
for date in meme_jour_toutes_les_annees(days=days, year1=year1, year2=year2):
if isprime(conversion(date)):
yield date
# Par exemple, pour mon anniversaire, entre ma naissance et maintenant, une seule année a donné une date `1201YEAR` qui soit première :
# In[59]:
list(nombre_annees_qui_donnent_date_premiere(days=11, year1=1993, year2=2017))
# Le 23 février a plus de chance :
# In[61]:
list(nombre_annees_qui_donnent_date_premiere(days=31+22, year1=1993, year2=2017))
# On peut récupérer ces données pour tous les jours de l'année :
# In[62]:
def histogramme_par_jours(year1=1, year2=3000, conversion=date_vers_nombre):
jours = np.zeros(366)
for days in range(366):
jours[days] += len(list(nombre_annees_qui_donnent_date_premiere(days=days, year1=year1, year2=year2, conversion=conversion)))
return jours
# In[68]:
get_ipython().run_cell_magic('time', '', 'jours = histogramme_par_jours(year1=1993, year2=2017)\njours')
# In[73]:
get_ipython().run_cell_magic('time', '', 'jours_format1 = histogramme_par_jours(year1=1, year2=3000, conversion=date_vers_nombre)\njours_format1 /= 3000')
# In[74]:
get_ipython().run_cell_magic('time', '', 'jours_format2 = histogramme_par_jours(year1=1, year2=3000, conversion=date_vers_nombre_2)\njours_format2 /= 3000')
# ### 4.1. Visualisations, jour par jour
# Exploitons ces données :
# In[75]:
def hist_nombres_dates_premieres(jours, ind=0):
plt.figure()
plt.plot(jours, label="Format %s-année" % ('jour-mois' if ind==0 else 'mois-jour'), color='bg'[ind])
plt.title("Répartition du nombres de dates premières selon la date\nEntre l'an 1 et l'an 3000")
plt.legend(loc='best', numpoints=1, fancybox=True, shadow=True, framealpha=0.8)
plt.xlabel("Jour dans l'année")
plt.ylabel("Fréquence")
# In[76]:
hist_nombres_dates_premieres(jours_format1, ind=0)
hist_nombres_dates_premieres(jours_format2, ind=1)
# In[94]:
def txt_date_de_jour(days):
date = datetime(year=1, day=1, month=1) + timedelta(days=int(days))
return "{:%d/%m}".format(date)
# ### 4.2. Quelle jour donne le plus de dates premières ?
# Sans trop de surprise, c'est le 1er janvier qui gagne, pour les deux cas :
# In[98]:
txt_date_de_jour(np.argmax(jours_format1))
txt_date_de_jour(np.argmax(jours_format2))
# Mais en enlevant le 1er janvier et le dernier jour de l'année, on trouve une différence :
# In[100]:
np.argmax(jours_format1[1:-1])
txt_date_de_jour(_)
np.argmax(jours_format2[1:-1])
txt_date_de_jour(_)
# ----
# ## 5. Conclusions
# - Pour les années impaires, il y a en moyenne une soixantaine de jours qui ont une date première !s,
# - Le nombre est quasiment identique selon les deux formats, `jour-mois` ou `mois-jour`.
#
# Ce n'était pas très dur à calculer, mais intéressant.
#
# - Et en moyenne, un certain jour a entre 6% et 8% de donner une date première, entre l'an 1 et l'an 3000.
# - Avec les deux formats, le 1er janvier est le jour qui donne le plus de dates premières, et puis le 31 décembre, puis le 31 août pour le format `jour-mois` et le 8 janvier pour le format `mois-jour`.
# > C'est tout pour aujourd'hui les amis, [allez voir d'autres notebooks si vous êtes curieux !](https://github.com/Naereen/notebooks/).
#
# > [See this repository for other Python notebook doing numerical simulations](https://github.com/Naereen/notebooks/tree/master/simus/).
|
{"hexsha": "77aa6f08f3870d645ac9f2b793f325e5189f5be5", "size": 12996, "ext": "py", "lang": "Python", "max_stars_repo_path": "simus/Des_dates_qui_font_des_nombres_premiers.py", "max_stars_repo_name": "IEWbgfnYDwHRoRRSKtkdyMDUzgdwuBYgDKtDJWd/narnt", "max_stars_repo_head_hexsha": "0eda13a7b8663e218b4fe2e06a974b99db9ff166", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 102, "max_stars_repo_stars_event_min_datetime": "2016-06-25T09:30:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T21:02:49.000Z", "max_issues_repo_path": "simus/Des_dates_qui_font_des_nombres_premiers.py", "max_issues_repo_name": "operade/notebooks", "max_issues_repo_head_hexsha": "56f97e33e81b5e86905961b09184a41b7616fa90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2016-06-26T12:21:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-06T09:19:49.000Z", "max_forks_repo_path": "simus/Des_dates_qui_font_des_nombres_premiers.py", "max_forks_repo_name": "operade/notebooks", "max_forks_repo_head_hexsha": "56f97e33e81b5e86905961b09184a41b7616fa90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2017-05-13T23:54:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-17T15:34:24.000Z", "avg_line_length": 27.710021322, "max_line_length": 202, "alphanum_fraction": 0.6957525392, "include": true, "reason": "import numpy,from numpy,from sympy", "num_tokens": 3904}
|
[STATEMENT]
lemma verticesFrom_nth: "distinct (vertices f) \<Longrightarrow> d < length (vertices f) \<Longrightarrow>
v \<in> \<V> f \<Longrightarrow> (verticesFrom f v)!d = f\<^bsup>d\<^esup> \<bullet> v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v
[PROOF STEP]
proof (induct d)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>distinct (vertices f); 0 < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! 0 = f\<^bsup>0\<^esup> \<bullet> v
2. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
distinct (vertices f)
0 < |vertices f|
v \<in> \<V> f
goal (2 subgoals):
1. \<lbrakk>distinct (vertices f); 0 < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! 0 = f\<^bsup>0\<^esup> \<bullet> v
2. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
distinct (vertices f)
0 < |vertices f|
v \<in> \<V> f
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
distinct (vertices f)
0 < |vertices f|
v \<in> \<V> f
goal (1 subgoal):
1. verticesFrom f v ! 0 = f\<^bsup>0\<^esup> \<bullet> v
[PROOF STEP]
by (simp add: verticesFrom_Def nextVertices_def)
[PROOF STATE]
proof (state)
this:
verticesFrom f v ! 0 = f\<^bsup>0\<^esup> \<bullet> v
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
case (Suc n)
[PROOF STATE]
proof (state)
this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
[PROOF STEP]
have dist2: "distinct (verticesFrom f v)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
goal (1 subgoal):
1. distinct (verticesFrom f v)
[PROOF STEP]
apply (frule_tac verticesFrom_congs)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; vertices f \<cong> verticesFrom f v\<rbrakk> \<Longrightarrow> distinct (verticesFrom f v)
[PROOF STEP]
by (auto simp: congs_distinct)
[PROOF STATE]
proof (state)
this:
distinct (verticesFrom f v)
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
from Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
[PROOF STEP]
have in2: "v \<in> set (verticesFrom f v)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
goal (1 subgoal):
1. v \<in> set (verticesFrom f v)
[PROOF STEP]
apply (frule_tac verticesFrom_congs)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; vertices f \<cong> verticesFrom f v\<rbrakk> \<Longrightarrow> v \<in> set (verticesFrom f v)
[PROOF STEP]
by (auto dest: congs_pres_nodes)
[PROOF STATE]
proof (state)
this:
v \<in> set (verticesFrom f v)
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v \<in> set (verticesFrom f v)
[PROOF STEP]
have vFrom: "(verticesFrom f v) = butlast (verticesFrom f v) @ [last (verticesFrom f v)]"
[PROOF STATE]
proof (prove)
using this:
v \<in> set (verticesFrom f v)
goal (1 subgoal):
1. verticesFrom f v = butlast (verticesFrom f v) @ [last (verticesFrom f v)]
[PROOF STEP]
apply (cases "(verticesFrom f v)" rule: rev_exhaust)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>v \<in> set (verticesFrom f v); verticesFrom f v = []\<rbrakk> \<Longrightarrow> verticesFrom f v = butlast (verticesFrom f v) @ [last (verticesFrom f v)]
2. \<And>ys y. \<lbrakk>v \<in> set (verticesFrom f v); verticesFrom f v = ys @ [y]\<rbrakk> \<Longrightarrow> verticesFrom f v = butlast (verticesFrom f v) @ [last (verticesFrom f v)]
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
verticesFrom f v = butlast (verticesFrom f v) @ [last (verticesFrom f v)]
goal (1 subgoal):
1. \<And>d. \<lbrakk>\<lbrakk>distinct (vertices f); d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! d = f\<^bsup>d\<^esup> \<bullet> v; distinct (vertices f); Suc d < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc d = f\<^bsup>Suc d\<^esup> \<bullet> v
[PROOF STEP]
from Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
goal (1 subgoal):
1. verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
proof (cases "last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v
goal (2 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
with Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v
[PROOF STEP]
have "verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v
goal (1 subgoal):
1. verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
[PROOF STEP]
by (rule_tac Suc) auto
[PROOF STATE]
proof (state)
this:
verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
goal (2 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v
verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
[PROOF STEP]
have "last (verticesFrom f v) = verticesFrom f v ! n"
[PROOF STATE]
proof (prove)
using this:
last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v
verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
goal (1 subgoal):
1. last (verticesFrom f v) = verticesFrom f v ! n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
last (verticesFrom f v) = verticesFrom f v ! n
goal (2 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
with Suc dist2 in2
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
distinct (verticesFrom f v)
v \<in> set (verticesFrom f v)
last (verticesFrom f v) = verticesFrom f v ! n
[PROOF STEP]
have "Suc n = length (verticesFrom f v)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
distinct (verticesFrom f v)
v \<in> set (verticesFrom f v)
last (verticesFrom f v) = verticesFrom f v ! n
goal (1 subgoal):
1. Suc n = |verticesFrom f v|
[PROOF STEP]
apply (rule_tac nth_last_Suc_n)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; distinct (verticesFrom f v); v \<in> set (verticesFrom f v); last (verticesFrom f v) = verticesFrom f v ! n\<rbrakk> \<Longrightarrow> distinct (verticesFrom f v)
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; distinct (verticesFrom f v); v \<in> set (verticesFrom f v); last (verticesFrom f v) = verticesFrom f v ! n\<rbrakk> \<Longrightarrow> n < |verticesFrom f v|
3. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; distinct (verticesFrom f v); v \<in> set (verticesFrom f v); last (verticesFrom f v) = verticesFrom f v ! n\<rbrakk> \<Longrightarrow> last (verticesFrom f v) = verticesFrom f v ! n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Suc n = |verticesFrom f v|
goal (2 subgoals):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) = f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
2. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
with Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
Suc n = |verticesFrom f v|
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
Suc n = |verticesFrom f v|
goal (1 subgoal):
1. verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
with Suc
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>distinct (vertices f); n < |vertices f|; v \<in> \<V> f\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = f\<^bsup>n\<^esup> \<bullet> v
distinct (vertices f)
Suc n < |vertices f|
v \<in> \<V> f
last (verticesFrom f v) \<noteq> f\<^bsup>n\<^esup> \<bullet> v
goal (1 subgoal):
1. verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
[PROOF STEP]
apply (simp add: nextVertices_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> verticesFrom f v ! Suc n = f \<bullet> ((f \<bullet> ^^ n) v)
[PROOF STEP]
apply (rule sym)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> f \<bullet> ((f \<bullet> ^^ n) v) = verticesFrom f v ! Suc n
[PROOF STEP]
apply (rule_tac nextElem_vFrom_suc1)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> distinct (vertices f)
2. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> v \<in> \<V> f
3. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> n < |vertices f|
4. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v
5. \<lbrakk>verticesFrom f v ! n = (f \<bullet> ^^ n) v; distinct (vertices f); Suc n < |vertices f|; v \<in> \<V> f; last (verticesFrom f v) \<noteq> (f \<bullet> ^^ n) v\<rbrakk> \<Longrightarrow> verticesFrom f v ! n = (f \<bullet> ^^ n) v
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
verticesFrom f v ! Suc n = f\<^bsup>Suc n\<^esup> \<bullet> v
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8628, "file": "Flyspeck-Tame_FaceDivisionProps", "length": 46}
|
import types
import numpy as np
import torch
from torch.nn.functional import mse_loss
from all.core import State
from ._agent import Agent
from .a2c import A2CTestAgent
from .utils import make_grads_observable, flatten_grads
class QMCPG(Agent):
"""
Quasi Monte Carlo Policy Gradient (QMCPG).
Args:
features (FeatureNetwork): Shared feature layers.
v (VNetwork): Value head which approximates the state-value function.
policy (StochasticPolicy): Policy head which outputs an action distribution.
discount_factor (float): Discount factor for future rewards.
min_batch_size (int): Updates will occurs when an episode ends after at least
this many state-action pairs are seen. Set this to a large value in order
to train on multiple episodes at once.
"""
def __init__(
self, features, v, policy, qmc_engine, discount_factor=0.99, min_batch_size=1, batch_reseeding=False, observe_grads=True,
):
self.features = features
self.v = v
self.policy = policy
self.qmc_engine = qmc_engine
self.discount_factor = discount_factor
self.min_batch_size = min_batch_size
self.batch_reseeding = batch_reseeding
self._current_batch_size = 0
self._trajectories = []
self._features = []
self._log_pis = []
self._rewards = []
self._grad_var = None
self._grad_norm = None
self._observe_grads = observe_grads
if observe_grads:
make_grads_observable(self)
def act(self, state):
if not self._features:
return self._initial(state)
if not state.done:
return self._act(state, state.reward)
return self._terminal(state, state.reward)
def eval(self, state):
return self.policy.eval(self.features.eval(state))
def _initial(self, state):
features = self.features(state)
distribution = self.policy(features)
action = distribution.sample()
self._features = [features]
self._log_pis.append(distribution.log_prob(action))
return action
def _act(self, state, reward):
features = self.features(state)
distribution = self.policy(features)
# action = distribution.sample()
action = self._sample_with_qmc(distribution)
self._features.append(features)
self._rewards.append(reward)
self._log_pis.append(distribution.log_prob(action))
return action
def _terminal(self, state, reward):
self._rewards.append(reward)
features = State.array(self._features)
rewards = torch.tensor(self._rewards, device=features.device)
log_pis = torch.stack(self._log_pis)
#print(torch.exp(log_pis).mean())
self._trajectories.append((features, rewards, log_pis))
#self._current_batch_size += len(features)
self._current_batch_size += 1
self._features = []
self._rewards = []
self._log_pis = []
if self._current_batch_size >= self.min_batch_size:
self._train()
# have to return something
return self.policy.no_grad(self.features.no_grad(state)).sample()
def _train(self):
# forward pass
values = torch.cat(
[self.v(features) for (features, _, _) in self._trajectories]
)
# forward passes for log_pis were stored during execution
log_pis = torch.cat([log_pis for (_, _, log_pis) in self._trajectories])
# compute targets
targets = torch.cat(
[
self._compute_discounted_returns(rewards)
for (_, rewards, _) in self._trajectories
]
)
advantages = targets - values.detach()
discounts = self.discount_factor ** torch.arange(
values.shape[0], device=values.device
)
# compute losses
value_loss = mse_loss(values, targets)
policy_loss = -(discounts * advantages * log_pis).mean()
# backward pass
self.v.reinforce(value_loss)
if self._observe_grads:
grads = []
grads += self.policy.reinforce(policy_loss, return_grad=True)[1]
grads += self.features.reinforce(return_grad=True)[1]
grads = flatten_grads(grads)
self._grad_norm = float(torch.norm(grads))
# almost unbiased two sample estimator
try:
self._grad_var = float(0.5 * torch.sum((grads - self._prev_grads) ** 2))
except:
self._grad_var = float(torch.sum(grads ** 2))
self._prev_grads = grads
self._grad_cost = values.shape[0]
else:
self.policy.reinforce(policy_loss)
self.features.reinforce()
# cleanup
self._trajectories = []
self._current_batch_size = 0
if self.batch_reseeding:
self.qmc_engine.reseed()
def _compute_discounted_returns(self, rewards):
returns = rewards.clone()
t = len(returns) - 1
discounted_return = 0
for reward in torch.flip(rewards, dims=(0,)):
discounted_return = reward + self.discount_factor * discounted_return
returns[t] = discounted_return
t -= 1
return returns
def _sample_with_qmc(self, distribution):
if isinstance(distribution, torch.distributions.Categorical):
cmf = torch.cumsum(distribution.probs, dim=0)[:-1]
noise = self.qmc_engine.sample_action_noise("uniform")
#noise = np.random.rand(1)
noise = torch.as_tensor(noise, device=cmf.device, dtype=cmf.dtype)
return torch.sum(noise > cmf)
elif isinstance(distribution, torch.distributions.Normal):
noise = self.qmc_engine.sample_action_noise("Gaussian")
return distribution.mean + distribution.scale * noise
else:
assert False
def is_grad_available(self):
return (self._grad_var is not None) and (self._grad_norm is not None) and (self._grad_cost is not None)
def get_grad_info(self):
# Return grad_var and grad_norm, then reset the values
# so that grad_info is not available until next gradient calculation
grad_var = self._grad_var
grad_norm = self._grad_norm
grad_cost = self._grad_cost
self._grad_var = None
self._grad_norm = None
self._grad_cost = None
return grad_var, grad_norm, grad_cost
QMCPGTestAgent = A2CTestAgent
|
{"hexsha": "bbaacbabddaf2886e366da08b67f00c20115f33c", "size": 6613, "ext": "py", "lang": "Python", "max_stars_repo_path": "all/agents/qmcpg.py", "max_stars_repo_name": "kstoneriv3/autonomous-learning-library-with-rrpg", "max_stars_repo_head_hexsha": "11f6dd4e72b4143944cf972a8f938406113d860f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "all/agents/qmcpg.py", "max_issues_repo_name": "kstoneriv3/autonomous-learning-library-with-rrpg", "max_issues_repo_head_hexsha": "11f6dd4e72b4143944cf972a8f938406113d860f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "all/agents/qmcpg.py", "max_forks_repo_name": "kstoneriv3/autonomous-learning-library-with-rrpg", "max_forks_repo_head_hexsha": "11f6dd4e72b4143944cf972a8f938406113d860f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1366120219, "max_line_length": 129, "alphanum_fraction": 0.6287615303, "include": true, "reason": "import numpy", "num_tokens": 1468}
|
[STATEMENT]
lemma pequiv_pr_trans[intro,trans]:
"\<lbrakk> a \<simeq> b; b \<sqsubseteq> c \<rbrakk> \<Longrightarrow> a \<sqsubseteq> c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<simeq> b; b \<sqsubseteq> c\<rbrakk> \<Longrightarrow> a \<sqsubseteq> c
[PROOF STEP]
unfolding pequiv_def refines_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>P. sound P \<longrightarrow> wp a P = wp b P; \<forall>P. sound P \<longrightarrow> wp b P \<tturnstile> wp c P\<rbrakk> \<Longrightarrow> \<forall>P. sound P \<longrightarrow> wp a P \<tturnstile> wp c P
[PROOF STEP]
by(simp)
|
{"llama_tokens": 239, "file": "pGCL_Algebra", "length": 2}
|
(* Title: CTT/Arith.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1991 University of Cambridge
*)
section {* Elementary arithmetic *}
theory Arith
imports Bool
begin
subsection {* Arithmetic operators and their definitions *}
definition
add :: "[i,i]\<Rightarrow>i" (infixr "#+" 65) where
"a#+b == rec(a, b, \<lambda>u v. succ(v))"
definition
diff :: "[i,i]\<Rightarrow>i" (infixr "-" 65) where
"a-b == rec(b, a, \<lambda>u v. rec(v, 0, \<lambda>x y. x))"
definition
absdiff :: "[i,i]\<Rightarrow>i" (infixr "|-|" 65) where
"a|-|b == (a-b) #+ (b-a)"
definition
mult :: "[i,i]\<Rightarrow>i" (infixr "#*" 70) where
"a#*b == rec(a, 0, \<lambda>u v. b #+ v)"
definition
mod :: "[i,i]\<Rightarrow>i" (infixr "mod" 70) where
"a mod b == rec(a, 0, %u v. rec(succ(v) |-| b, 0, %x y. succ(v)))"
definition
div :: "[i,i]\<Rightarrow>i" (infixr "div" 70) where
"a div b == rec(a, 0, \<lambda>u v. rec(succ(u) mod b, succ(v), \<lambda>x y. v))"
notation (xsymbols)
mult (infixr "#\<times>" 70)
notation (HTML output)
mult (infixr "#\<times>" 70)
lemmas arith_defs = add_def diff_def absdiff_def mult_def mod_def div_def
subsection {* Proofs about elementary arithmetic: addition, multiplication, etc. *}
(** Addition *)
(*typing of add: short and long versions*)
lemma add_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #+ b : N"
apply (unfold arith_defs)
apply typechk
done
lemma add_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a #+ b = c #+ d : N"
apply (unfold arith_defs)
apply equal
done
(*computation for add: 0 and successor cases*)
lemma addC0: "b:N \<Longrightarrow> 0 #+ b = b : N"
apply (unfold arith_defs)
apply rew
done
lemma addC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) #+ b = succ(a #+ b) : N"
apply (unfold arith_defs)
apply rew
done
(** Multiplication *)
(*typing of mult: short and long versions*)
lemma mult_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* b : N"
apply (unfold arith_defs)
apply (typechk add_typing)
done
lemma mult_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a #* b = c #* d : N"
apply (unfold arith_defs)
apply (equal add_typingL)
done
(*computation for mult: 0 and successor cases*)
lemma multC0: "b:N \<Longrightarrow> 0 #* b = 0 : N"
apply (unfold arith_defs)
apply rew
done
lemma multC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) #* b = b #+ (a #* b) : N"
apply (unfold arith_defs)
apply rew
done
(** Difference *)
(*typing of difference*)
lemma diff_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a - b : N"
apply (unfold arith_defs)
apply typechk
done
lemma diff_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a - b = c - d : N"
apply (unfold arith_defs)
apply equal
done
(*computation for difference: 0 and successor cases*)
lemma diffC0: "a:N \<Longrightarrow> a - 0 = a : N"
apply (unfold arith_defs)
apply rew
done
(*Note: rec(a, 0, \<lambda>z w.z) is pred(a). *)
lemma diff_0_eq_0: "b:N \<Longrightarrow> 0 - b = 0 : N"
apply (unfold arith_defs)
apply (NE b)
apply hyp_rew
done
(*Essential to simplify FIRST!! (Else we get a critical pair)
succ(a) - succ(b) rewrites to pred(succ(a) - b) *)
lemma diff_succ_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) - succ(b) = a - b : N"
apply (unfold arith_defs)
apply hyp_rew
apply (NE b)
apply hyp_rew
done
subsection {* Simplification *}
lemmas arith_typing_rls = add_typing mult_typing diff_typing
and arith_congr_rls = add_typingL mult_typingL diff_typingL
lemmas congr_rls = arith_congr_rls intrL2_rls elimL_rls
lemmas arithC_rls =
addC0 addC_succ
multC0 multC_succ
diffC0 diff_0_eq_0 diff_succ_succ
ML {*
structure Arith_simp_data: TSIMP_DATA =
struct
val refl = @{thm refl_elem}
val sym = @{thm sym_elem}
val trans = @{thm trans_elem}
val refl_red = @{thm refl_red}
val trans_red = @{thm trans_red}
val red_if_equal = @{thm red_if_equal}
val default_rls = @{thms arithC_rls} @ @{thms comp_rls}
val routine_tac = routine_tac (@{thms arith_typing_rls} @ @{thms routine_rls})
end
structure Arith_simp = TSimpFun (Arith_simp_data)
local val congr_rls = @{thms congr_rls} in
fun arith_rew_tac ctxt prems = make_rew_tac ctxt
(Arith_simp.norm_tac ctxt (congr_rls, prems))
fun hyp_arith_rew_tac ctxt prems = make_rew_tac ctxt
(Arith_simp.cond_norm_tac ctxt (prove_cond_tac, congr_rls, prems))
end
*}
method_setup arith_rew = {*
Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (arith_rew_tac ctxt ths))
*}
method_setup hyp_arith_rew = {*
Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_arith_rew_tac ctxt ths))
*}
subsection {* Addition *}
(*Associative law for addition*)
(*Commutative law for addition. Can be proved using three inductions.
Must simplify after first induction! Orientation of rewrites is delicate*)
lemma add_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #+ b = b #+ a : N"
apply (NE a)
apply hyp_arith_rew
apply (rule sym_elem)
prefer 2
apply (NE b)
prefer 4
apply (NE b)
apply hyp_arith_rew
done
subsection {* Multiplication *}
(*right annihilation in product*)
lemma mult_0_right: "a:N \<Longrightarrow> a #* 0 = 0 : N"
apply (NE a)
apply hyp_arith_rew
done
(*right successor law for multiplication*)
lemma mult_succ_right: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* succ(b) = a #+ (a #* b) : N"
apply (NE a)
apply (hyp_arith_rew add_assoc [THEN sym_elem])
apply (assumption | rule add_commute mult_typingL add_typingL intrL_rls refl_elem)+
done
(*Commutative law for multiplication*)
lemma mult_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a #* b = b #* a : N"
apply (NE a)
apply (hyp_arith_rew mult_0_right mult_succ_right)
done
(*addition distributes over multiplication*)
lemma add_mult_distrib: "\<lbrakk>a:N; b:N; c:N\<rbrakk> \<Longrightarrow> (a #+ b) #* c = (a #* c) #+ (b #* c) : N"
apply (NE a)
apply (hyp_arith_rew add_assoc [THEN sym_elem])
done
(*Associative law for multiplication*)
lemma mult_assoc: "\<lbrakk>a:N; b:N; c:N\<rbrakk> \<Longrightarrow> (a #* b) #* c = a #* (b #* c) : N"
apply (NE a)
apply (hyp_arith_rew add_mult_distrib)
done
subsection {* Difference *}
text {*
Difference on natural numbers, without negative numbers
a - b = 0 iff a<=b a - b = succ(c) iff a>b *}
lemma diff_self_eq_0: "a:N \<Longrightarrow> a - a = 0 : N"
apply (NE a)
apply hyp_arith_rew
done
lemma add_0_right: "\<lbrakk>c : N; 0 : N; c : N\<rbrakk> \<Longrightarrow> c #+ 0 = c : N"
by (rule addC0 [THEN [3] add_commute [THEN trans_elem]])
(*Addition is the inverse of subtraction: if b<=x then b#+(x-b) = x.
An example of induction over a quantified formula (a product).
Uses rewriting with a quantified, implicative inductive hypothesis.*)
schematic_lemma add_diff_inverse_lemma:
"b:N \<Longrightarrow> ?a : PROD x:N. Eq(N, b-x, 0) --> Eq(N, b #+ (x-b), x)"
apply (NE b)
(*strip one "universal quantifier" but not the "implication"*)
apply (rule_tac [3] intr_rls)
(*case analysis on x in
(succ(u) <= x) --> (succ(u)#+(x-succ(u)) = x) *)
prefer 4
apply (NE x)
apply assumption
(*Prepare for simplification of types -- the antecedent succ(u)<=x *)
apply (rule_tac [2] replace_type)
apply (rule_tac [1] replace_type)
apply arith_rew
(*Solves first 0 goal, simplifies others. Two sugbgoals remain.
Both follow by rewriting, (2) using quantified induction hyp*)
apply intr (*strips remaining PRODs*)
apply (hyp_arith_rew add_0_right)
apply assumption
done
(*Version of above with premise b-a=0 i.e. a >= b.
Using ProdE does not work -- for ?B(?a) is ambiguous.
Instead, add_diff_inverse_lemma states the desired induction scheme
the use of RS below instantiates Vars in ProdE automatically. *)
lemma add_diff_inverse: "\<lbrakk>a:N; b:N; b - a = 0 : N\<rbrakk> \<Longrightarrow> b #+ (a-b) = a : N"
apply (rule EqE)
apply (rule add_diff_inverse_lemma [THEN ProdE, THEN ProdE])
apply (assumption | rule EqI)+
done
subsection {* Absolute difference *}
(*typing of absolute difference: short and long versions*)
lemma absdiff_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a |-| b : N"
apply (unfold arith_defs)
apply typechk
done
lemma absdiff_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a |-| b = c |-| d : N"
apply (unfold arith_defs)
apply equal
done
lemma absdiff_self_eq_0: "a:N \<Longrightarrow> a |-| a = 0 : N"
apply (unfold absdiff_def)
apply (arith_rew diff_self_eq_0)
done
lemma absdiffC0: "a:N \<Longrightarrow> 0 |-| a = a : N"
apply (unfold absdiff_def)
apply hyp_arith_rew
done
lemma absdiff_succ_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> succ(a) |-| succ(b) = a |-| b : N"
apply (unfold absdiff_def)
apply hyp_arith_rew
done
(*Note how easy using commutative laws can be? ...not always... *)
lemma absdiff_commute: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a |-| b = b |-| a : N"
apply (unfold absdiff_def)
apply (rule add_commute)
apply (typechk diff_typing)
done
(*If a+b=0 then a=0. Surprisingly tedious*)
schematic_lemma add_eq0_lemma: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> ?c : PROD u: Eq(N,a#+b,0) . Eq(N,a,0)"
apply (NE a)
apply (rule_tac [3] replace_type)
apply arith_rew
apply intr (*strips remaining PRODs*)
apply (rule_tac [2] zero_ne_succ [THEN FE])
apply (erule_tac [3] EqE [THEN sym_elem])
apply (typechk add_typing)
done
(*Version of above with the premise a+b=0.
Again, resolution instantiates variables in ProdE *)
lemma add_eq0: "\<lbrakk>a:N; b:N; a #+ b = 0 : N\<rbrakk> \<Longrightarrow> a = 0 : N"
apply (rule EqE)
apply (rule add_eq0_lemma [THEN ProdE])
apply (rule_tac [3] EqI)
apply typechk
done
(*Here is a lemma to infer a-b=0 and b-a=0 from a|-|b=0, below. *)
schematic_lemma absdiff_eq0_lem:
"\<lbrakk>a:N; b:N; a |-| b = 0 : N\<rbrakk> \<Longrightarrow> ?a : SUM v: Eq(N, a-b, 0) . Eq(N, b-a, 0)"
apply (unfold absdiff_def)
apply intr
apply eqintr
apply (rule_tac [2] add_eq0)
apply (rule add_eq0)
apply (rule_tac [6] add_commute [THEN trans_elem])
apply (typechk diff_typing)
done
(*if a |-| b = 0 then a = b
proof: a-b=0 and b-a=0, so b = a+(b-a) = a+0 = a*)
lemma absdiff_eq0: "\<lbrakk>a |-| b = 0 : N; a:N; b:N\<rbrakk> \<Longrightarrow> a = b : N"
apply (rule EqE)
apply (rule absdiff_eq0_lem [THEN SumE])
apply eqintr
apply (rule add_diff_inverse [THEN sym_elem, THEN trans_elem])
apply (erule_tac [3] EqE)
apply (hyp_arith_rew add_0_right)
done
subsection {* Remainder and Quotient *}
(*typing of remainder: short and long versions*)
lemma mod_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a mod b : N"
apply (unfold mod_def)
apply (typechk absdiff_typing)
done
lemma mod_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a mod b = c mod d : N"
apply (unfold mod_def)
apply (equal absdiff_typingL)
done
(*computation for mod : 0 and successor cases*)
lemma modC0: "b:N \<Longrightarrow> 0 mod b = 0 : N"
apply (unfold mod_def)
apply (rew absdiff_typing)
done
lemma modC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) mod b = rec(succ(a mod b) |-| b, 0, \<lambda>x y. succ(a mod b)) : N"
apply (unfold mod_def)
apply (rew absdiff_typing)
done
(*typing of quotient: short and long versions*)
lemma div_typing: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a div b : N"
apply (unfold div_def)
apply (typechk absdiff_typing mod_typing)
done
lemma div_typingL: "\<lbrakk>a = c:N; b = d:N\<rbrakk> \<Longrightarrow> a div b = c div d : N"
apply (unfold div_def)
apply (equal absdiff_typingL mod_typingL)
done
lemmas div_typing_rls = mod_typing div_typing absdiff_typing
(*computation for quotient: 0 and successor cases*)
lemma divC0: "b:N \<Longrightarrow> 0 div b = 0 : N"
apply (unfold div_def)
apply (rew mod_typing absdiff_typing)
done
lemma divC_succ: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) div b = rec(succ(a) mod b, succ(a div b), \<lambda>x y. a div b) : N"
apply (unfold div_def)
apply (rew mod_typing)
done
(*Version of above with same condition as the mod one*)
lemma divC_succ2: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow>
succ(a) div b =rec(succ(a mod b) |-| b, succ(a div b), \<lambda>x y. a div b) : N"
apply (rule divC_succ [THEN trans_elem])
apply (rew div_typing_rls modC_succ)
apply (NE "succ (a mod b) |-|b")
apply (rew mod_typing div_typing absdiff_typing)
done
(*for case analysis on whether a number is 0 or a successor*)
lemma iszero_decidable: "a:N \<Longrightarrow> rec(a, inl(eq), \<lambda>ka kb. inr(<ka, eq>)) :
Eq(N,a,0) + (SUM x:N. Eq(N,a, succ(x)))"
apply (NE a)
apply (rule_tac [3] PlusI_inr)
apply (rule_tac [2] PlusI_inl)
apply eqintr
apply equal
done
(*Main Result. Holds when b is 0 since a mod 0 = a and a div 0 = 0 *)
lemma mod_div_equality: "\<lbrakk>a:N; b:N\<rbrakk> \<Longrightarrow> a mod b #+ (a div b) #* b = a : N"
apply (NE a)
apply (arith_rew div_typing_rls modC0 modC_succ divC0 divC_succ2)
apply (rule EqE)
(*case analysis on succ(u mod b)|-|b *)
apply (rule_tac a1 = "succ (u mod b) |-| b" in iszero_decidable [THEN PlusE])
apply (erule_tac [3] SumE)
apply (hyp_arith_rew div_typing_rls modC0 modC_succ divC0 divC_succ2)
(*Replace one occurrence of b by succ(u mod b). Clumsy!*)
apply (rule add_typingL [THEN trans_elem])
apply (erule EqE [THEN absdiff_eq0, THEN sym_elem])
apply (rule_tac [3] refl_elem)
apply (hyp_arith_rew div_typing_rls)
done
end
|
{"author": "Josh-Tilles", "repo": "isabelle", "sha": "990accf749b8a6e037d25012258ecae20d59ca62", "save_path": "github-repos/isabelle/Josh-Tilles-isabelle", "path": "github-repos/isabelle/Josh-Tilles-isabelle/isabelle-990accf749b8a6e037d25012258ecae20d59ca62/src/CTT/Arith.thy"}
|
import numpy as np
import open3d as o3d
import pybullet as p
# some codes are copied from https://github.com/ethz-asl/vgn.git
class CameraIntrinsic(object):
"""Intrinsic parameters of a pinhole camera model.
Attributes:
width (int): The width in pixels of the camera.
height(int): The height in pixels of the camera.
K: The intrinsic camera matrix.
"""
def __init__(self, width, height, fx, fy, cx, cy):
self.width = width
self.height = height
self.K = np.array(
[[fx, 0.0, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]]
)
@property
def fx(self):
return self.K[0, 0]
@property
def fy(self):
return self.K[1, 1]
@property
def cx(self):
return self.K[0, 2]
@property
def cy(self):
return self.K[1, 2]
def to_dict(self):
"""Serialize intrinsic parameters to a dict object."""
data = {
"width": self.width,
"height": self.height,
"K": self.K.flatten().tolist(),
}
return data
@classmethod
def from_dict(cls, data):
"""Deserialize intrinisic parameters from a dict object."""
intrinsic = cls(
width=data["width"],
height=data["height"],
fx=data["K"][0],
fy=data["K"][4],
cx=data["K"][2],
cy=data["K"][5],
)
return intrinsic
class Camera(object):
"""Virtual RGB-D camera based on the PyBullet camera interface.
Attributes:
intrinsic: The camera intrinsic parameters.
"""
def __init__(self, intrinsic, near=0.01, far=4):
self.intrinsic = intrinsic
self.near = near
self.far = far
self.proj_matrix = _build_projection_matrix(intrinsic, near, far)
self.gl_proj_matrix = self.proj_matrix.flatten(order="F")
def render(self, client, extrinsic):
"""Render synthetic RGB and depth images.
Args:
extrinsic: Extrinsic parameters, T_cam_ref.
"""
# Construct OpenGL compatible view and projection matrices.
gl_view_matrix = extrinsic.copy() if extrinsic is not None else np.eye(4)
gl_view_matrix[2, :] *= -1 # flip the Z axis
gl_view_matrix = gl_view_matrix.flatten(order="F")
result = client.getCameraImage(
width=self.intrinsic.width,
height=self.intrinsic.height,
viewMatrix=gl_view_matrix,
projectionMatrix=self.gl_proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL,
)
rgb, z_buffer = np.ascontiguousarray(result[2][:, :, :3]), result[3]
depth = (
1.0 * self.far * self.near / (self.far - (self.far - self.near) * z_buffer)
)
return Frame(rgb, depth, self.intrinsic, extrinsic)
class Frame(object):
def __init__(self, rgb, depth, intrinsic, extrinsic=None):
self.rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
color=o3d.geometry.Image(rgb),
depth=o3d.geometry.Image(depth),
depth_scale=1.0,
depth_trunc=2.0,
convert_rgb_to_intensity=False
)
self.intrinsic = o3d.camera.PinholeCameraIntrinsic(
width=intrinsic.width,
height=intrinsic.height,
fx=intrinsic.fx,
fy=intrinsic.fy,
cx=intrinsic.cx,
cy=intrinsic.cy,
)
self.extrinsic = extrinsic if extrinsic is not None \
else np.eye(4)
def color_image(self):
return np.asarray(self.rgbd.color)
def depth_image(self):
return np.asarray(self.rgbd.depth)
def point_cloud(self):
pc = o3d.geometry.PointCloud.create_from_rgbd_image(
image=self.rgbd,
intrinsic=self.intrinsic,
extrinsic=self.extrinsic
)
return pc
def _build_projection_matrix(intrinsic, near, far):
perspective = np.array(
[
[intrinsic.fx, 0.0, -intrinsic.cx, 0.0],
[0.0, intrinsic.fy, -intrinsic.cy, 0.0],
[0.0, 0.0, near + far, near * far],
[0.0, 0.0, -1.0, 0.0],
]
)
ortho = _gl_ortho(0.0, intrinsic.width, intrinsic.height, 0.0, near, far)
return np.matmul(ortho, perspective)
def _gl_ortho(left, right, bottom, top, near, far):
ortho = np.diag(
[2.0 / (right - left), 2.0 / (top - bottom), -2.0 / (far - near), 1.0]
)
ortho[0, 3] = -(right + left) / (right - left)
ortho[1, 3] = -(top + bottom) / (top - bottom)
ortho[2, 3] = -(far + near) / (far - near)
return ortho
|
{"hexsha": "47d2e4b3718daee35dea4d034db16e639af039e1", "size": 4710, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/perception.py", "max_stars_repo_name": "guodashun/plug-in", "max_stars_repo_head_hexsha": "d805f57af12bbf94a17a52e518903a02c267f4df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/perception.py", "max_issues_repo_name": "guodashun/plug-in", "max_issues_repo_head_hexsha": "d805f57af12bbf94a17a52e518903a02c267f4df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/perception.py", "max_forks_repo_name": "guodashun/plug-in", "max_forks_repo_head_hexsha": "d805f57af12bbf94a17a52e518903a02c267f4df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3734939759, "max_line_length": 87, "alphanum_fraction": 0.5643312102, "include": true, "reason": "import numpy", "num_tokens": 1216}
|
import numpy as np
import numpy.testing as npt
from qspace.bases import spf
from qspace.sampling import sphere, space
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, run_module_suite,
assert_array_equal)
def test_spherical_polar_fourier():
radial_order = 3
angular_rank = 4
zeta = 60.0
spherical_polar_fourier = spf.SphericalPolarFourier(radial_order,
angular_rank, zeta)
assert_equal(spherical_polar_fourier.radial_order, radial_order)
assert_equal(spherical_polar_fourier.angular_rank, angular_rank)
assert_equal(spherical_polar_fourier.zeta, zeta)
r = 1.0
theta = np.pi * np.random.rand()
phi = 2 * np.pi * np.random.rand()
assert_almost_equal(
spherical_polar_fourier.spherical_function(r, theta, phi), 0)
def test_dimension():
radial_order = 3
angular_rank = 4
dim_spf = 45
assert_equal(dim_spf, spf.dimension(radial_order, angular_rank))
def test_indices():
radial_order = 4
angular_rank = 6
dim_spf = spf.dimension(radial_order, angular_rank)
for i in range(dim_spf):
n = spf.index_n(i, radial_order, angular_rank)
l = spf.index_l(i, radial_order, angular_rank)
m = spf.index_m(i, radial_order, angular_rank)
assert_equal(i, spf.index_i(n, l, m, radial_order, angular_rank))
assert_equal(l % 2, 0)
assert_(np.abs(m) <= l)
assert_(n < radial_order)
def test_matrix():
shell_radii = [1.0, 2.0, 3.0, 4.0, 5.0]
nb_shells = len(shell_radii)
K_s = 64
shell = sphere.jones(K_s)
points = np.vstack([radius * shell for radius in shell_radii])
r, theta, phi = space.to_spherical(points)
radial_order = 4
angular_rank = 4
zeta = 1.0
H = spf.matrix(r, theta, phi, radial_order, angular_rank, zeta)
y = np.exp(-r**2 / 2) * points[:, 2]**2
x = np.dot(np.linalg.pinv(H), y)
dim_spf = spf.dimension(radial_order, angular_rank)
for i in range(dim_spf):
n = spf.index_n(i, radial_order, angular_rank)
l = spf.index_l(i, radial_order, angular_rank)
m = spf.index_m(i, radial_order, angular_rank)
if m != 0 or l > 2 or n > 1:
assert_almost_equal(x[i], 0)
assert_almost_equal(y, np.dot(H, x))
if __name__ == '__main__':
run_module_suite()
|
{"hexsha": "54d6b578742251789eac72c11d5ddca82346d226", "size": 2401, "ext": "py", "lang": "Python", "max_stars_repo_path": "qspace/bases/tests/test_spf.py", "max_stars_repo_name": "ecaruyer/qspace", "max_stars_repo_head_hexsha": "5aa6b714bb16e7e769c5dd4ecdd91591f46e04a8", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-04-16T14:34:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T10:24:57.000Z", "max_issues_repo_path": "qspace/bases/tests/test_spf.py", "max_issues_repo_name": "oesteban/qspace", "max_issues_repo_head_hexsha": "2bb6eb01281a5357eb3bc142328c28e7981b6fa0", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qspace/bases/tests/test_spf.py", "max_forks_repo_name": "oesteban/qspace", "max_forks_repo_head_hexsha": "2bb6eb01281a5357eb3bc142328c28e7981b6fa0", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-04-20T14:10:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T19:17:10.000Z", "avg_line_length": 31.5921052632, "max_line_length": 73, "alphanum_fraction": 0.6609745939, "include": true, "reason": "import numpy,from numpy", "num_tokens": 687}
|
@with_kw mutable struct MomentumParameters{T <: AbstractFloat} <: AbstractPolicyParameters
μ::T = 0.9
ϵ::T = 1e-3
end
abstract type MomentumTrait end
abstract type Classical <: MomentumTrait end
abstract type Nesterov <: MomentumTrait end
struct Momentum{T <: MomentumTrait} <: AbstractBoosting
params::MomentumParameters{Float64}
ν::Vector{Float64}
function (::Type{Momentum})(::Type{T}; kwargs...) where T <: MomentumTrait
return new{T}(MomentumParameters(; kwargs...),Vector{Float64}())
end
end
function initialize!(momentum::Momentum,x₀::Vector{Float64})
resize!(momentum.ν,length(x₀))
momentum.ν .= zeros(length(x₀))
end
params(momentum::Momentum) = momentum.params
function boost!(momentum::Momentum{Classical},wid::Integer,klocal::Integer,kglobal::Integer,gprev::AbstractVector,gcurr::AbstractVector)
@unpack μ,ϵ = momentum.params
momentum.ν .= μ*momentum.ν + ϵ*gprev
gcurr .= momentum.ν
end
function boost!(momentum::Momentum{Nesterov},wid::Integer,klocal::Integer,kglobal::Integer,gprev::AbstractVector,gcurr::AbstractVector)
@unpack μ,ϵ = momentum.params
νprev = copy(momentum.ν)
momentum.ν .= μ*νprev+ϵ*gprev
gcurr .= μ^2*νprev+(1+μ)*ϵ*gprev
end
Momentum(; kwargs...) = Momentum(Classical; kwargs...)
Nesterov(; kwargs...) = Momentum(Nesterov; kwargs...)
|
{"hexsha": "583a34c903141e3f8ab8560fb4158d2eb62bf0c4", "size": 1342, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/boosting/momentum.jl", "max_stars_repo_name": "pologrp/POLO.jl", "max_stars_repo_head_hexsha": "c866563d02d060c733e3e6da5ccd57a96f85db61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-02T00:04:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-08T14:02:31.000Z", "max_issues_repo_path": "src/boosting/momentum.jl", "max_issues_repo_name": "pologrp/POLO.jl", "max_issues_repo_head_hexsha": "c866563d02d060c733e3e6da5ccd57a96f85db61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-15T09:32:32.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-15T16:05:08.000Z", "max_forks_repo_path": "src/boosting/momentum.jl", "max_forks_repo_name": "pologrp/POLO.jl", "max_forks_repo_head_hexsha": "c866563d02d060c733e3e6da5ccd57a96f85db61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7317073171, "max_line_length": 136, "alphanum_fraction": 0.7153502235, "num_tokens": 381}
|
"""
Created on Sun march 27 00:51:11 2020
@author: Gautam Pala
"""
import os
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image
import glob
import time
import h5py
import pandas as pd
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.utils import plot_model
from keras.regularizers import l2
from keras import regularizers
from keras.utils import Sequence
import collections
from sklearn.utils import class_weight as cw
# codePath = os.path.dirname(__file__) + '/train_image/'
# output_labels_path = os.path.dirname(__file__) + '/train.csv'
codePath = './train_image/'
output_labels_path = './train.csv'
def process_image(imagefile):
im = Image.open(imagefile)
im = im.convert(mode='L')
im = im.resize((350,350))
im = np.asarray(im, dtype=float).reshape(350, 350, 1)
# Minmax normalization
im_minmax = im/np.float(255)
# GCN
im_gcn = (im_minmax - np.mean(im_minmax))/np.std(im_minmax)
return im_gcn
class BatchSampler(Sequence):
def __init__(self, data_dir, label_file, batch_size):
self.batch_size = batch_size
self.image_files = glob.glob(data_dir + '*.jpg')
self.labels_dict = {}
self.label2id = {}
self.class_weights = {}
lines = open(label_file).read().splitlines()
labels = []
for line in lines:
name, label = line.split(',')
self.labels_dict[name] = label
labels.append(label)
count = 0
for k in sorted(set(labels)):
self.label2id[k] = count
count += 1
all_labels_as_integer = [self.label2id.get(n, n) for n in labels]
self.class_weights = cw.compute_class_weight('balanced', np.unique(all_labels_as_integer), all_labels_as_integer)
def __len__(self):
return int(np.ceil(len(self.image_files)/float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.image_files[idx * self.batch_size: (idx + 1) * self.batch_size]
batch_y = []
for file in batch_x:
file_name = file.split('/')[-1]
label = self.labels_dict[file_name]
batch_y.append(self.label2id[label])
return np.array([process_image(x) for x in batch_x]), np.array(batch_y)
def sample_count_of_all_labels(self):
counter = collections.Counter(self.labels_dict.values())
return counter.keys(), counter.values()
target_image_shape = (350, 350)
# def Preprocess_Image(codePath):
#
# print("Pre processing images (Resizing and converting to black & white)")
# start = time.time()
# for path_image in glob.glob(codePath + '*.jpg'):
# img = mpimg.imread(path_image)
# # Convert from RGB image to black & white image
# if img.ndim != 2:
# image_pillow = Image.open(path_image)
# image_pillow = image_pillow.convert(mode='L')
# # plt.imshow(np.asarray(image_pillow), cmap='gray')
# image_pillow.save(path_image, format='JPEG')
# if img.shape != target_image_shape:
# image_pillow = image_pillow.resize(target_image_shape)
# image_pillow.save(path_image, format='JPEG')
# end = time.time()
# print("Pre processing of images completed in {} seconds".format(end-start))
# print("-----------------------------------------------")
# Uncomment below function calls to pre-process and create data-sets
# Preprocess_Image(codePath)
# (3) Create a sequential model
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(350,350,1), kernel_size=(11,11), strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# Passing it to a dense layer
model.add(Flatten())
# 1st Dense Layer
# model.add(Dense(4096, input_shape=(224*224*3,)))
model.add(Dense(1024))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Dense Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Batch Normalisation
model.add(BatchNormalization())
# Output Layer
model.add(Dense(8))
model.add(Activation('softmax'))
print(model.summary())
# plot_model(model, to_file='emotion_model.png', show_shapes=True, show_layer_names=False)
# Train and test model
obj_batch_sampler = BatchSampler(codePath, output_labels_path, 32)
print("Number of training images = ", len(obj_batch_sampler.image_files))
print(obj_batch_sampler.label2id)
print("Number of batches = ", len(obj_batch_sampler))
label_type, label_count = obj_batch_sampler.sample_count_of_all_labels()
print(label_type)
print(label_count)
print(obj_batch_sampler.class_weights)
optimizer = Adam()
# optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# If targets are one-hot encoded, use categorical_crossentropy. If targets are integers,
# use sparse_categorical_crossentropy.
loss = 'sparse_categorical_crossentropy'
metrics= ['sparse_categorical_accuracy']
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
model.fit_generator(obj_batch_sampler, steps_per_epoch=len(obj_batch_sampler), epochs=10, verbose=1,
shuffle=True, class_weight=obj_batch_sampler.class_weights)
# score = model.evaluate(xte,yte, batch_size=batch_size)
model.save('emotion_model.h5')
|
{"hexsha": "1fee06ef550d5300ef45fbb645c6130e52707756", "size": 6903, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnn_emotion.py", "max_stars_repo_name": "JayeshKriplani/Deep-Learning-based-Emotion_Detection", "max_stars_repo_head_hexsha": "6d77421a94ae73331e8da9adb3604bddf75e909a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cnn_emotion.py", "max_issues_repo_name": "JayeshKriplani/Deep-Learning-based-Emotion_Detection", "max_issues_repo_head_hexsha": "6d77421a94ae73331e8da9adb3604bddf75e909a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnn_emotion.py", "max_forks_repo_name": "JayeshKriplani/Deep-Learning-based-Emotion_Detection", "max_forks_repo_head_hexsha": "6d77421a94ae73331e8da9adb3604bddf75e909a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-01T06:12:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T06:12:19.000Z", "avg_line_length": 33.1875, "max_line_length": 121, "alphanum_fraction": 0.7060698247, "include": true, "reason": "import numpy", "num_tokens": 1697}
|
# Copyright (c) Facebook, Inc. and its affiliates
# Copyright (c) MTRF authors
#!/usr/bin/env python
import os
# os.system('chmod +x sawyer_read_angles.py')
import rospy
from std_msgs.msg import String
from rospy_tutorials.msg import Floats
from rospy.numpy_msg import numpy_msg
from rospy_message_converter import message_converter
import intera_interface
import numpy as np
from geometry_msgs.msg import Pose
def talker():
pub_pose = rospy.Publisher('get_pose', Pose, queue_size=10)
pub_angles = rospy.Publisher('get_angles', Floats, queue_size=10)
pub_velocities = rospy.Publisher('get_angle_velocities', Floats, queue_size=10)
# pub = rospy.Publisher('chatter', Pose, queue_size=11)
rospy.init_node('Sawyer_reader')
limb = intera_interface.Limb('right')
rate = rospy.Rate(13) # 10hz
while not rospy.is_shutdown():
angles = limb.joint_angles()
fk_resp = limb.fk_request(angles)
pos = fk_resp.pose_stamp[0].pose
velocities = limb.joint_velocities()
rospy.loginfo(pos)
rospy.loginfo(list(angles.values()))
# rospy.loginfo(velocities)
pub_pose.publish(pos)
pub_angles.publish(Floats(list(angles.values())))
pub_velocities.publish(Floats(list(velocities.values())))
rate.sleep()
if __name__ == '__main__':
# run with python2, intera env
try:
talker()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "af191b602de79e6874d1965ba9742161c52c012b", "size": 1438, "ext": "py", "lang": "Python", "max_stars_repo_path": "MTRF/r3l/r3l/sawyer_hardware/sawyer_read_angles.py", "max_stars_repo_name": "facebookresearch/MTRF", "max_stars_repo_head_hexsha": "2fee8f3f1c2150fcecc2db2fa9e122a664a72d72", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-29T10:09:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T05:48:32.000Z", "max_issues_repo_path": "MTRF/r3l/r3l/sawyer_hardware/sawyer_read_angles.py", "max_issues_repo_name": "facebookresearch/MTRF", "max_issues_repo_head_hexsha": "2fee8f3f1c2150fcecc2db2fa9e122a664a72d72", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MTRF/r3l/r3l/sawyer_hardware/sawyer_read_angles.py", "max_forks_repo_name": "facebookresearch/MTRF", "max_forks_repo_head_hexsha": "2fee8f3f1c2150fcecc2db2fa9e122a664a72d72", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2608695652, "max_line_length": 83, "alphanum_fraction": 0.707232267, "include": true, "reason": "import numpy", "num_tokens": 357}
|
/**
* @file tests/main_tests/emst_test.cpp
* @author Manish Kumar
*
* Test RUN_BINDING() of emst_main.cpp.
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with mlpack. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause for more information.
*/
#define BINDING_TYPE BINDING_TYPE_TEST
#include <mlpack/core.hpp>
#include <mlpack/methods/emst/emst_main.cpp>
#include <mlpack/core/util/mlpack_main.hpp>
#include "main_test_fixture.hpp"
#include "../catch.hpp"
#include <boost/math/special_functions/round.hpp>
using namespace mlpack;
BINDING_TEST_FIXTURE(EMSTTestFixture);
/**
* Make sure that Output has 3 Dimensions and
* check the number of output edges.
*/
TEST_CASE_METHOD(EMSTTestFixture, "EMSTOutputDimensionTest",
"[EMSTMainTest][BindingTests]")
{
arma::mat x;
if (!data::Load("test_data_3_1000.csv", x))
FAIL("Cannot load test dataset test_data_3_1000.csv!");
// Input random data points.
SetInputParam("input", std::move(x));
SetInputParam("leaf_size", (int) 2);
RUN_BINDING();
// Now check that the output has 3 dimensions.
REQUIRE(params.Get<arma::mat>("output").n_rows == 3);
// Check number of output points.
REQUIRE(params.Get<arma::mat>("output").n_cols == 999);
}
/**
* Check Naive algorithm Output has 3 Dimensions and
* check the number of output edges.
*/
TEST_CASE_METHOD(EMSTTestFixture, "EMSTNaiveOutputDimensionTest",
"[EMSTMainTest][BindingTests]")
{
arma::mat x;
if (!data::Load("test_data_3_1000.csv", x))
FAIL("Cannot load test dataset test_data_3_1000.csv!");
// Input random data points.
SetInputParam("input", std::move(x));
SetInputParam("naive", true);
RUN_BINDING();
// Now check that the output has 3 dimensions.
REQUIRE(params.Get<arma::mat>("output").n_rows == 3);
// Check number of output points.
REQUIRE(params.Get<arma::mat>("output").n_cols == 999);
}
/**
* Ensure that we can't specify an invalid leaf size.
*/
TEST_CASE_METHOD(EMSTTestFixture, "EMSTInvalidLeafSizeTest",
"[EMSTMainTest][BindingTests]")
{
arma::mat x;
if (!data::Load("test_data_3_1000.csv", x))
FAIL("Cannot load test dataset test_data_3_1000.csv!");
// Input random data points.
SetInputParam("input", std::move(x));
SetInputParam("leaf_size", (int) -1); // Invalid leaf size.
Log::Fatal.ignoreInput = true;
REQUIRE_THROWS_AS(RUN_BINDING(), std::runtime_error);
Log::Fatal.ignoreInput = false;
}
/**
* Check that all elements of first two output rows are close to integers.
*/
TEST_CASE_METHOD(EMSTTestFixture, "EMSTFirstTwoOutputRowsIntegerTest",
"[EMSTMainTest][BindingTests]")
{
arma::mat x;
if (!data::Load("test_data_3_1000.csv", x))
FAIL("Cannot load test dataset test_data_3_1000.csv!");
// Input random data points.
SetInputParam("input", std::move(x));
SetInputParam("leaf_size", (int) 2);
for (size_t i = 0; i < params.Get<arma::mat>("output").n_cols; ++i)
{
REQUIRE(params.Get<arma::mat>("output")(0, i) ==
Approx(boost::math::iround(params.Get<arma::mat>("output")(0, i))).
epsilon(1e-7));
REQUIRE(params.Get<arma::mat>("output")(1, i) ==
Approx(boost::math::iround(params.Get<arma::mat>("output")(1, i))).
epsilon(1e-7));
}
}
|
{"hexsha": "3cf3a8802a46ce4ae6830886f424e26dae7be4ca", "size": 3432, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mlpack/tests/main_tests/emst_test.cpp", "max_stars_repo_name": "oblanchet/mlpack", "max_stars_repo_head_hexsha": "e02ab3be544694294d2f73bd12a98d0d162ef3af", "max_stars_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_stars_count": 4216.0, "max_stars_repo_stars_event_min_datetime": "2015-01-01T02:06:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:12:06.000Z", "max_issues_repo_path": "src/mlpack/tests/main_tests/emst_test.cpp", "max_issues_repo_name": "oblanchet/mlpack", "max_issues_repo_head_hexsha": "e02ab3be544694294d2f73bd12a98d0d162ef3af", "max_issues_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_issues_count": 2621.0, "max_issues_repo_issues_event_min_datetime": "2015-01-01T01:41:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:01:26.000Z", "max_forks_repo_path": "src/mlpack/tests/main_tests/emst_test.cpp", "max_forks_repo_name": "oblanchet/mlpack", "max_forks_repo_head_hexsha": "e02ab3be544694294d2f73bd12a98d0d162ef3af", "max_forks_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_forks_count": 1972.0, "max_forks_repo_forks_event_min_datetime": "2015-01-01T23:37:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T06:03:41.000Z", "avg_line_length": 29.5862068966, "max_line_length": 78, "alphanum_fraction": 0.6809440559, "num_tokens": 936}
|
[STATEMENT]
lemma absc_distr_self:
"MDP.MC.T (absc cfg) = distr (MDP.MC.T cfg) MDP.MC.S (smap absc)" if "cfg \<in> valid_cfg"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. MDP.MC.T (absc cfg) = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
using \<open>cfg \<in> _\<close>
[PROOF STATE]
proof (prove)
using this:
cfg \<in> MDP.valid_cfg
goal (1 subgoal):
1. MDP.MC.T (absc cfg) = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
proof (coinduction arbitrary: cfg rule: MDP.MC.T_coinduct)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> prob_space (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc))
2. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
3. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
case prob
[PROOF STATE]
proof (state)
this:
cfg \<in> MDP.valid_cfg
goal (3 subgoals):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> prob_space (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc))
2. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
3. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. prob_space (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc))
[PROOF STEP]
by (rule MDP.MC.T.prob_space_distr, simp)
[PROOF STATE]
proof (state)
this:
prob_space (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc))
goal (2 subgoals):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
2. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
2. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
case sets
[PROOF STATE]
proof (state)
this:
cfg \<in> MDP.valid_cfg
goal (2 subgoals):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
2. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
sets (distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc)) = sets (stream_space (count_space UNIV))
goal (1 subgoal):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
case prems: (cont cfg)
[PROOF STATE]
proof (state)
this:
cfg \<in> MDP.valid_cfg
goal (1 subgoal):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
define t where "t \<equiv> \<lambda> y. THE x. y = absc x \<and> x \<in> K_cfg cfg"
[PROOF STATE]
proof (state)
this:
t \<equiv> \<lambda>y. THE x. y = absc x \<and> x \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
define M' where "M' \<equiv> \<lambda> cfg. distr (MDP.MC.T (t cfg)) MDP.MC.S (smap absc)"
[PROOF STATE]
proof (state)
this:
M' \<equiv> \<lambda>cfg. distr (MDP.MC.T (t cfg)) (stream_space (count_space UNIV)) (smap absc)
goal (1 subgoal):
1. \<And>cfg. cfg \<in> MDP.valid_cfg \<Longrightarrow> \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
proof (rule exI[where x = M'], safe, goal_cases)
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
case A: (1 y)
[PROOF STATE]
proof (state)
this:
y \<in> set_pmf (K_cfg (absc cfg))
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
from A prems
[PROOF STATE]
proof (chain)
picking this:
y \<in> set_pmf (K_cfg (absc cfg))
cfg \<in> MDP.valid_cfg
[PROOF STEP]
obtain x' where "y = absc x'" "x' \<in> K_cfg cfg"
[PROOF STATE]
proof (prove)
using this:
y \<in> set_pmf (K_cfg (absc cfg))
cfg \<in> MDP.valid_cfg
goal (1 subgoal):
1. (\<And>x'. \<lbrakk>y = absc x'; x' \<in> set_pmf (K_cfg cfg)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: K_cfg_map_absc)
[PROOF STATE]
proof (state)
this:
y = absc x'
x' \<in> set_pmf (K_cfg cfg)
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
with K_cfg_bisim_unique[OF prems _ _ absc_bisim_abss]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x \<in> set_pmf (K_cfg cfg); ?x' \<in> set_pmf (K_cfg cfg); absc ?x = absc ?x'\<rbrakk> \<Longrightarrow> ?x = ?x'
y = absc x'
x' \<in> set_pmf (K_cfg cfg)
[PROOF STEP]
have
"y = absc (t y)" "x' = t y"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> set_pmf (K_cfg cfg); ?x' \<in> set_pmf (K_cfg cfg); absc ?x = absc ?x'\<rbrakk> \<Longrightarrow> ?x = ?x'
y = absc x'
x' \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. y = absc (t y) &&& x' = t y
[PROOF STEP]
unfolding t_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> set_pmf (K_cfg cfg); ?x' \<in> set_pmf (K_cfg cfg); absc ?x = absc ?x'\<rbrakk> \<Longrightarrow> ?x = ?x'
y = absc x'
x' \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. y = absc (THE x. y = absc x \<and> x \<in> set_pmf (K_cfg cfg)) &&& x' = (THE x. y = absc x \<and> x \<in> set_pmf (K_cfg cfg))
[PROOF STEP]
by (auto intro: theI2)
[PROOF STATE]
proof (state)
this:
y = absc (t y)
x' = t y
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
y = absc (t y)
x' = t y
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
have "x' \<in> valid_cfg"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x' \<in> MDP.valid_cfg
[PROOF STEP]
using \<open>x' \<in> _\<close> prems
[PROOF STATE]
proof (prove)
using this:
x' \<in> set_pmf (K_cfg cfg)
cfg \<in> MDP.valid_cfg
goal (1 subgoal):
1. x' \<in> MDP.valid_cfg
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x' \<in> MDP.valid_cfg
goal (5 subgoals):
1. \<And>y. y \<in> set_pmf (K_cfg (absc cfg)) \<Longrightarrow> \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
2. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
3. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
4. \<And>y. prob_space (M' y)
5. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
y = absc (t y)
x' = t y
x' \<in> MDP.valid_cfg
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
y = absc (t y)
x' = t y
x' \<in> MDP.valid_cfg
goal (1 subgoal):
1. \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
[PROOF STEP]
unfolding M'_def
[PROOF STATE]
proof (prove)
using this:
y = absc (t y)
x' = t y
x' \<in> MDP.valid_cfg
goal (1 subgoal):
1. \<exists>cfg. y = absc cfg \<and> distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) (smap absc) = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg
goal (4 subgoals):
1. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
2. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
3. \<And>y. prob_space (M' y)
4. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
2. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
3. \<And>y. prob_space (M' y)
4. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
case 5
[PROOF STATE]
proof (state)
this:
goal (4 subgoals):
1. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
2. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
3. \<And>y. prob_space (M' y)
4. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
unfolding M'_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) (smap absc)) (stream_space (count_space UNIV)) ((##) y))
[PROOF STEP]
apply (subst distr_distr)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
2. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
3. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
[PROOF STEP]
prefer 3
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
3. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst MDP.MC.T_eq_bind)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. distr (measure_pmf (K_cfg cfg) \<bind> (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
3. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst distr_bind)
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
2. space (measure_pmf (K_cfg cfg)) \<noteq> {}
3. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
4. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (distr (MDP.MC.T x) (stream_space (count_space UNIV)) ((##) x)) (stream_space (count_space UNIV)) (smap absc)) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
5. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
6. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
prefer 4
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (distr (MDP.MC.T x) (stream_space (count_space UNIV)) ((##) x)) (stream_space (count_space UNIV)) (smap absc)) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
3. space (measure_pmf (K_cfg cfg)) \<noteq> {}
4. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
5. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
6. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst distr_distr)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
2. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
3. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
4. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
5. space (measure_pmf (K_cfg cfg)) \<noteq> {}
6. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
7. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
prefer 3
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
3. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
4. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
5. space (measure_pmf (K_cfg cfg)) \<noteq> {}
6. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
7. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst K_cfg_map_absc)
[PROOF STATE]
proof (prove)
goal (9 subgoals):
1. cfg \<in> MDP.valid_cfg
2. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (map_pmf absc (K_cfg cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
3. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
4. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
5. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
6. space (measure_pmf (K_cfg cfg)) \<noteq> {}
7. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
9. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (rule prems)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (map_pmf absc (K_cfg cfg)) \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
3. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
4. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
5. space (measure_pmf (K_cfg cfg)) \<noteq> {}
6. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
7. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst map_pmf_rep_eq)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = distr (measure_pmf (K_cfg cfg)) (count_space UNIV) absc \<bind> (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc))
2. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
3. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
4. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
5. space (measure_pmf (K_cfg cfg)) \<noteq> {}
6. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
7. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>y. MDP.MC.T.random_variable (t y) (stream_space (count_space UNIV)) (smap absc)
[PROOF STEP]
apply (subst bind_distr)
[PROOF STATE]
proof (prove)
goal (11 subgoals):
1. measure_pmf.random_variable (K_cfg cfg) (count_space UNIV) absc
2. (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc)) \<in> count_space UNIV \<rightarrow>\<^sub>M subprob_algebra ?K12
3. space (measure_pmf (K_cfg cfg)) \<noteq> {}
4. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc))
5. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
6. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
7. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
8. space (measure_pmf (K_cfg cfg)) \<noteq> {}
9. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
10. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
A total of 11 subgoals...
[PROOF STEP]
prefer 4
[PROOF STATE]
proof (prove)
goal (11 subgoals):
1. measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x)) = measure_pmf (K_cfg cfg) \<bind> (\<lambda>x. distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc))
2. measure_pmf.random_variable (K_cfg cfg) (count_space UNIV) absc
3. (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc)) \<in> count_space UNIV \<rightarrow>\<^sub>M subprob_algebra ?K12
4. space (measure_pmf (K_cfg cfg)) \<noteq> {}
5. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
6. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
7. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
8. space (measure_pmf (K_cfg cfg)) \<noteq> {}
9. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
10. \<And>y. (##) y \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
A total of 11 subgoals...
[PROOF STEP]
apply (rule bind_measure_pmf_cong)
[PROOF STATE]
proof (prove)
goal (13 subgoals):
1. \<And>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) \<in> space (subprob_algebra ?N14)
2. \<And>x. distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc) \<in> space (subprob_algebra ?N14)
3. \<And>i. i \<in> set_pmf (K_cfg cfg) \<Longrightarrow> distr (MDP.MC.T i) (stream_space (count_space UNIV)) (smap absc \<circ> (##) i) = distr (MDP.MC.T (t (absc i))) (stream_space (count_space UNIV)) ((##) (absc i) \<circ> smap absc)
4. measure_pmf.random_variable (K_cfg cfg) (count_space UNIV) absc
5. (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc)) \<in> count_space UNIV \<rightarrow>\<^sub>M subprob_algebra ?K12
6. space (measure_pmf (K_cfg cfg)) \<noteq> {}
7. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
9. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
10. space (measure_pmf (K_cfg cfg)) \<noteq> {}
A total of 13 subgoals...
[PROOF STEP]
prefer 3
[PROOF STATE]
proof (prove)
goal (13 subgoals):
1. \<And>i. i \<in> set_pmf (K_cfg cfg) \<Longrightarrow> distr (MDP.MC.T i) (stream_space (count_space UNIV)) (smap absc \<circ> (##) i) = distr (MDP.MC.T (t (absc i))) (stream_space (count_space UNIV)) ((##) (absc i) \<circ> smap absc)
2. \<And>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) \<in> space (subprob_algebra ?N14)
3. \<And>x. distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc) \<in> space (subprob_algebra ?N14)
4. measure_pmf.random_variable (K_cfg cfg) (count_space UNIV) absc
5. (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc)) \<in> count_space UNIV \<rightarrow>\<^sub>M subprob_algebra ?K12
6. space (measure_pmf (K_cfg cfg)) \<noteq> {}
7. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
8. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
9. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
10. space (measure_pmf (K_cfg cfg)) \<noteq> {}
A total of 13 subgoals...
[PROOF STEP]
subgoal premises A for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) = distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) = distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc)
[PROOF STEP]
have "t (absc x) = x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t (absc x) = x
[PROOF STEP]
unfolding t_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (THE xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg)) = x
[PROOF STEP]
proof (rule the_equality, goal_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. absc x = absc x \<and> x \<in> set_pmf (K_cfg cfg)
2. \<And>xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg) \<Longrightarrow> xa = x
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. absc x = absc x \<and> x \<in> set_pmf (K_cfg cfg)
2. \<And>xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg) \<Longrightarrow> xa = x
[PROOF STEP]
with A
[PROOF STATE]
proof (chain)
picking this:
x \<in> set_pmf (K_cfg cfg)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
x \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. absc x = absc x \<and> x \<in> set_pmf (K_cfg cfg)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
absc x = absc x \<and> x \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. \<And>xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg) \<Longrightarrow> xa = x
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg) \<Longrightarrow> xa = x
[PROOF STEP]
case (2 x')
[PROOF STATE]
proof (state)
this:
absc x = absc x' \<and> x' \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. \<And>xa. absc x = absc xa \<and> xa \<in> set_pmf (K_cfg cfg) \<Longrightarrow> xa = x
[PROOF STEP]
with K_cfg_bisim_unique[OF prems _ A absc_bisim_abss]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x \<in> set_pmf (K_cfg cfg); absc ?x = absc x\<rbrakk> \<Longrightarrow> ?x = x
absc x = absc x' \<and> x' \<in> set_pmf (K_cfg cfg)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> set_pmf (K_cfg cfg); absc ?x = absc x\<rbrakk> \<Longrightarrow> ?x = x
absc x = absc x' \<and> x' \<in> set_pmf (K_cfg cfg)
goal (1 subgoal):
1. x' = x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x' = x
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
t (absc x) = x
goal (1 subgoal):
1. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) = distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t (absc x) = x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
t (absc x) = x
goal (1 subgoal):
1. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) = distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc)
[PROOF STEP]
by (auto simp: comp_def)
[PROOF STATE]
proof (state)
this:
distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) = distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (prove)
goal (12 subgoals):
1. \<And>x. distr (MDP.MC.T x) (stream_space (count_space UNIV)) (smap absc \<circ> (##) x) \<in> space (subprob_algebra ?N14)
2. \<And>x. distr (MDP.MC.T (t (absc x))) (stream_space (count_space UNIV)) ((##) (absc x) \<circ> smap absc) \<in> space (subprob_algebra ?N14)
3. measure_pmf.random_variable (K_cfg cfg) (count_space UNIV) absc
4. (\<lambda>y. distr (MDP.MC.T (t y)) (stream_space (count_space UNIV)) ((##) y \<circ> smap absc)) \<in> count_space UNIV \<rightarrow>\<^sub>M subprob_algebra ?K12
5. space (measure_pmf (K_cfg cfg)) \<noteq> {}
6. \<And>x. smap absc \<in> stream_space (count_space UNIV) \<rightarrow>\<^sub>M stream_space (count_space UNIV)
7. \<And>x. MDP.MC.T.random_variable x (stream_space (count_space UNIV)) ((##) x)
8. measure_pmf.random_variable (K_cfg cfg) (subprob_algebra ?K3) (\<lambda>t. distr (MDP.MC.T t) (stream_space (count_space UNIV)) ((##) t))
9. space (measure_pmf (K_cfg cfg)) \<noteq> {}
10. smap absc \<in> ?K3 \<rightarrow>\<^sub>M stream_space (count_space UNIV)
A total of 12 subgoals...
[PROOF STEP]
by (fastforce
simp: space_subprob_algebra MC_syntax.in_S
intro: bind_measure_pmf_cong MDP.MC.T.subprob_space_distr MDP.MC.T.prob_space_distr
)+
[PROOF STATE]
proof (state)
this:
distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
goal (3 subgoals):
1. \<And>y x. x \<in> sets (M' y) \<Longrightarrow> x \<in> sets (stream_space (count_space UNIV))
2. \<And>y x. x \<in> sets (stream_space (count_space UNIV)) \<Longrightarrow> x \<in> sets (M' y)
3. \<And>y. prob_space (M' y)
[PROOF STEP]
qed (auto simp: M'_def intro: MDP.MC.T.prob_space_distr)
[PROOF STATE]
proof (state)
this:
\<exists>M'. (\<forall>y\<in>set_pmf (K_cfg (absc cfg)). \<exists>cfg. y = absc cfg \<and> M' y = distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) \<and> cfg \<in> MDP.valid_cfg) \<and> (\<forall>y. sets (M' y) = sets (stream_space (count_space UNIV)) \<and> prob_space (M' y)) \<and> distr (MDP.MC.T cfg) (stream_space (count_space UNIV)) (smap absc) = measure_pmf (K_cfg (absc cfg)) \<bind> (\<lambda>y. distr (M' y) (stream_space (count_space UNIV)) ((##) y))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 16430, "file": "Probabilistic_Timed_Automata_PTA", "length": 71}
|
#!/usr/bin/python
import numpy as np
import deepSNP
import deepSNP_utils
def snp_pos_feature_matrix(read, window_start):
"""
Creates vector of zeros, except 1 at SNP position
:param read: pysam read
:param window_start: starting position of feature window
:return: (WINDOW_SIZE x 1) binary matrix marking SNP position
"""
# if SNP exists in read, get position
snp_pos_in_read = get_snp_pos_in_read(read)
# create zero vector
snp_mask_matrix = np.zeros((deepSNP.WINDOW_SIZE, 1))
# if we have a snp, mark 1 at SNP location in read
if snp_pos_in_read >= 0:
# if read is forward strand, add position from start of read
if not read.is_reverse:
snp_pos_in_matrix = (read.reference_start + snp_pos_in_read) - window_start
# if read is reverse strand, subtract position from end of read
else:
snp_pos_in_matrix = (read.reference_end - 1 - snp_pos_in_read) - window_start
# print "snp_pos_in_matrix:", snp_pos_in_matrix
# don't mark SNP if it occurs outside of our window
if snp_pos_in_matrix < deepSNP.WINDOW_SIZE and snp_pos_in_matrix >= 0:
snp_mask_matrix[snp_pos_in_matrix] = 1
return snp_mask_matrix
def print_snp_pos_feature_matrix(snp_mask_feat_matrix):
"""
Prints a string of zeros, except 1 at SNP location
:param snp_mask_feat_matrix: binary mask matrix with SNP location
:return: printable string
"""
mask_string = ""
for val in snp_mask_feat_matrix:
mask_string += str(int(val))
print mask_string
return mask_string
def get_snp_pos_in_read(read):
"""
Use NM and MD tag to extract SNP positions in read
:param read: pysam read
:return: integer position of SNP in read
"""
# get number of mismatches in read (edit distance)
if read.has_tag("NM"):
num_mismatches = read.get_tag("NM")
else:
return -1
# TODO: be able to handle reads w/ multiple SNPs? or toss those reads?
# TODO: if so, replace all ACGT with X, split, then add prev val to current to get pos
# if we have a positive number of mismatches, we have SNPs!
if num_mismatches == 1:
md_flag = read.get_tag("MD")
# print md_flag
# try to split using base character
for b in ['A', 'C', 'T', 'G']:
# if we can split the string, string form like [0-9]+[ACGT]
# where the leading number is #matches before SNP and since
# python is zero based this should give us the index of SNP
# NOTE: len(md_flag) < 6 prevents things like 11A2T1T19 from getting through
if len(md_flag) < 6:
if len(md_flag.split(b)) == 2:
# TODO: handle deletions?
# check for deletion character
if len(md_flag.split("^")) == 2:
return -1
# if read is reversed, need to flip
if read.is_reverse:
return int(md_flag.split(b)[1])
else:
return int(md_flag.split(b)[0])
else:
return -1
|
{"hexsha": "c5164552d873550fea04536d0bb5515130986354", "size": 3180, "ext": "py", "lang": "Python", "max_stars_repo_path": "snp_pos_feature.py", "max_stars_repo_name": "brianhill11/deepSNP", "max_stars_repo_head_hexsha": "4979fbf84fe3e68d76b0054ba031e084d494411d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snp_pos_feature.py", "max_issues_repo_name": "brianhill11/deepSNP", "max_issues_repo_head_hexsha": "4979fbf84fe3e68d76b0054ba031e084d494411d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snp_pos_feature.py", "max_forks_repo_name": "brianhill11/deepSNP", "max_forks_repo_head_hexsha": "4979fbf84fe3e68d76b0054ba031e084d494411d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5517241379, "max_line_length": 90, "alphanum_fraction": 0.6226415094, "include": true, "reason": "import numpy", "num_tokens": 797}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#####
Tools
#####
*Created on Thu Jun 7 14:45 2017 by A. Pahl*
Helper Tools acting on individual data..
"""
import os
import os.path as op
import sys
import glob
from collections import Counter, namedtuple
import yaml
import pandas as pd
import numpy as np
import scipy.spatial.distance as dist
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
ROWS = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "AA", "AB", "AC", "AD", "AE", "AF"]
KEEP = ['Compound_Id', "Batch_Id", "Producer",
"Address", "Conc_uM", "Smiles", "Pure_Flag"]
def load_config(conf):
"""Loads configuration from default location and
returns config object.
Known configuration files are `config.yaml` and `plates.yaml`.
Raises error when the file could not be loaded."""
assert conf in ["config", "plates"]
if "HOME" in os.environ:
conf_fn = op.join(os.environ["HOME"], ".config",
"cellpainting2", "{}.yaml".format(conf))
elif "HOMEPATH" in os.environ: # Windows
conf_fn = op.join(os.environ["HOMEPATH"],
"cellpainting2", "{}.yaml".format(conf))
try:
with open(conf_fn, 'r') as ymlfile:
config = yaml.load(ymlfile)
except FileNotFoundError:
print("Configuration file {}.yaml not found.".format(config))
print("Have a look at the *.yaml files in the `conf` folder of")
print("the `cluster_tools directories` for templates and locations.")
raise
return config
cp_config = load_config("config")
ACT_PROF_PARAMETERS = cp_config["Parameters"]
def is_interactive_ipython():
try:
get_ipython()
ipy = True
# print("> interactive IPython session.")
except NameError:
ipy = False
return ipy
class Summary(Counter):
"""An OrderedDict-based class that keeps track of the time since its instantiation.
Used for reporting running details of pipeline functions."""
def __init__(self, **kwargs):
"""Parameters:
timeit: whether or not to use the timing functionality. Default: True"""
super().__init__(**kwargs)
def __str__(self):
s_list = []
keys = sorted(self.keys())
mlen = max(map(len, keys))
line_end = "\n"
for idx, k in enumerate(keys, 1):
value = self[k]
s_list.append("{k:{mlen}s}: {val:>7}".format(k=k, mlen=mlen, val=value))
s_list.append(line_end)
result = "".join(s_list)
return result
def __repr__(self):
return self.__str__()
def print(self, final=False):
keys_len = len(self.keys())
result = self.__str__()
if not final:
result = result + '\033[{}A\r'.format(keys_len)
print(result, end="")
sys.stdout.flush()
def profile_sim_dist_corr(current, reference):
"""Calculate the similarity of two activity_profiles of the same length.
Returns value between 0 .. 1"""
ref_len = len(reference)
assert ref_len == len(
current), "Activity Profiles must have the same length to be compared."
result = 1 - dist.correlation(current, reference)
if result < 0.0: result = 0.0
return result
def profile_sim_tanimoto(p1, p2):
p_len = len(p1)
assert p_len == len(p2), "profiles must be of same length!"
matching = 0
significant = 0
for idx in range(p_len):
if (p1[idx] < 0.0 and p2[idx] < 0.0) or (p1[idx] > 0.0 and p2[idx] > 0.0):
matching += 1
if p1[idx] != 0.0 or p2[idx] != 0.0:
significant += 1
result = matching / significant
return result
def profile_sim_tanimoto_weighted(p1, p2):
p_len = len(p1)
assert p_len == len(p2), "profiles must be of same length!"
matching = 0.0
significant = 0.0
for idx in range(p_len):
val1 = p1[idx]
val2 = p2[idx]
val1_abs = abs(val1)
val2_abs = abs(val2)
if (val1 < 0 and val2 < 0) or (val1 > 0 and val2 > 0):
matching += max(val1_abs, val2_abs) - abs(val1_abs - val2_abs)
if val1_abs > 0 or val2_abs > 0:
significant += max(val1_abs, val2_abs)
result = matching / significant
return result
def subtract_profiles(prof1, prof2):
"""Subtract prof2 from prof1. A new profile is returned."""
prof1_len = len(prof1)
assert prof1_len == len(prof2), "Activity Profiles must have the same length to be compared."
result = []
for idx in range(prof1_len):
d = prof1[idx] - prof2[idx]
if abs(d) <= 1.58: d = 0.0
result.append(d)
return result
def del_nz_positions(prof1, prof2):
"""Set positions that are non-zero in both profiles to zero. A new profile is returned."""
prof1_len = len(prof1)
assert prof1_len == len(prof2), "Activity Profiles must have the same length to be compared."
result = []
for idx in range(prof1_len):
if prof1[idx] != 0.0 and prof2[idx] != 0.0:
result.append(0.0)
else:
result.append(prof1[idx])
return result
def mol_from_smiles(smi):
if not isinstance(smi, str):
smi = "*"
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
return mol
def chem_sim(mol_fp, query_smi):
query = mol_from_smiles(query_smi)
if len(query.GetAtoms()) > 1:
query_fp = Chem.GetMorganFingerprint(query, 2) # ECFC4
return round(DataStructs.TanimotoSimilarity(mol_fp, query_fp), 3)
return np.nan
def split_plate_name(full_name, sep="-"):
"""Split the full platename into (date, plate).
Returns a namedtuple or None, if the name spec is not met."""
parts = full_name.split(sep=sep, maxsplit=1)
if len(parts) == 0:
return None # The full plate name needs to contain at least one '-'.
if len(parts[0]) != 6:
return None # The date has to be of format <yymmdd>.
Plate = namedtuple("Plate", ["date", "name"])
result = Plate(date=parts[0], name=parts[1])
return result
def get_plates_in_dir(dir, exclude=["layout"]):
"""Return a list of all plates in the given dir.
Performs a search of the subdirs in the dir and adds plate if it conforms
to the `split_plate_name` spec.
Returns a list of full plate name strings."""
result = []
plate_dir = op.join(dir, "*")
for dir_name in glob.glob(plate_dir):
skip = False
for x in exclude:
if x in dir_name:
skip = True
break
if skip: continue
if not op.isdir(dir_name): continue
plate_name = op.split(dir_name)[1]
plate = split_plate_name(plate_name)
if plate is None: continue # the dir_name does not conform to the spec
result.append(plate_name) # apped the full platename as string
return result
def format_well(well):
"""Fix well format, e.g. `A1` --> `A01`."""
wl = len(well)
assert wl >= 2 and wl <= 4, "well has to have 2 - 4 characters!"
column = []
row = []
for pos in range(wl):
c = well[pos]
if c.isalpha():
row.append(c.upper())
continue
row_str = "".join(row)
assert row_str in ROWS, "row {} is not a valid row.".format(row_str)
column.append(c)
if len(column) < 2:
column.insert(0, "0") # prepend a zero
result = row
result.extend(column)
return "".join(result)
def well_from_position_single(row, col):
result = [ROWS[row - 1], "{:02d}".format(col)]
return "".join(result)
def position_from_well_single(well):
wl = len(well)
column = []
row = []
for pos in range(wl):
c = well[pos]
if c.isalpha():
row.append(c.upper())
continue
row_str = "".join(row)
try:
row_num = ROWS.index(row_str) + 1
except ValueError:
raise ValueError("row {} is not a valid row.".format(row_str))
column.append(c)
column_num = int("".join(column))
return row_num, column_num
def find_dups(it):
"""Find duplicates in an iterable."""
ctr = Counter(it)
result = {}
for c in ctr:
if ctr[c] > 1:
result[c] = ctr[c]
return result
def diff(it1, it2):
"""Find the differences between two iterables"""
s2 = set(it2)
diff = [x for x in it1 if x not in s2]
return diff
def print_dir(obj):
for f in dir(obj):
if not f.startswith("_"):
print(f)
def print_iter(l):
for x in l:
print(x)
def create_dirs(path):
if not op.isdir(path):
os.makedirs(path)
def empty_dir(path):
"""Remove all file in the given directory."""
if not op.isdir(path): return
for fn in os.listdir(path):
full_name = op.join(path, fn)
if op.isfile(full_name):
os.unlink(full_name)
def middle(lst, size):
"""Return the middle fraction of a sorted list, removing outliers."""
mid_lst = sorted(list(lst))
l_mid_lst = len(mid_lst)
num_el = int(size * l_mid_lst)
start = (l_mid_lst - num_el) // 2
end = start + num_el
mid_lst = mid_lst[start:end]
return mid_lst
def melt(df, id_prop="Compound_Id"):
"""Taken and adapted from the Holoviews measles heatmap example."""
result = pd.melt(df, id_vars=id_prop,
var_name="Parameter", value_name="Value")
result = result.reset_index().drop("index", axis=1)
# result = result.sort_values([id_prop, "Parameter"]).reset_index().drop("index", axis=1)
result = result[["Parameter", id_prop, "Value"]]
return result
def show_available_plates():
config = load_config("config")
plates = get_plates_in_dir(config["Dirs"]["PlatesDir"])
print("Available Plates:")
for plate_full_name in plates:
print(" -", plate_full_name)
def listify(s, sep=" ", as_int=True):
"""A helper func for the Jupyter Notebook,
which generates a correctly formatted list out of pasted text."""
to_number = int if as_int else float
result = []
if s.startswith("["):
s = s[1:]
if s.endswith("]"):
s = s[:-1]
lst = s.split(sep)
for el in lst:
if len(el) == 0:
continue
try:
el = to_number(el)
except ValueError:
pass
result.append(el)
return result
def list_from_file(fn, skip=0):
"""Return a list of strings from reading a file, optionally skipping rows at the beginning."""
result = open(fn).read().strip().split("\n")
return result[skip:]
def get_act_parm(prof, act_prof_parm=ACT_PROF_PARAMETERS):
"""Get a list of parameters that are active (value diff. from zero)
in the given profile."""
result = []
for idx, val in enumerate(prof):
if val != 0.0:
result.append(act_prof_parm[idx])
return result
|
{"hexsha": "acc3fc6c2acc0495d8d12f612f8db50f46203a8d", "size": 11059, "ext": "py", "lang": "Python", "max_stars_repo_path": "cellpainting2/tools.py", "max_stars_repo_name": "apahl/cellpainting2", "max_stars_repo_head_hexsha": "b0fd4adbca804b136e3dca9c1b466a6efb49cbd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cellpainting2/tools.py", "max_issues_repo_name": "apahl/cellpainting2", "max_issues_repo_head_hexsha": "b0fd4adbca804b136e3dca9c1b466a6efb49cbd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-04-11T13:37:43.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-12T07:22:30.000Z", "max_forks_repo_path": "cellpainting2/tools.py", "max_forks_repo_name": "apahl/cellpainting2", "max_forks_repo_head_hexsha": "b0fd4adbca804b136e3dca9c1b466a6efb49cbd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1794195251, "max_line_length": 98, "alphanum_fraction": 0.5990595895, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2929}
|
# example of neural net functions in Econometrics.jl
using Econometrics, Glob
# generate draws from linear regression model, and
# fitted coefficients from correct model, plus
# quadratic and cubic models (irrelevant regressors)
# and 5 pure noise statistics
function make_simdata(reps=100000)
n = 30
simdata = zeros(reps, 35+6)
for rep = 1:reps
# draw the regressors
x = randn(n,4)
z = [ones(n,1) x]
# draw the parameters from prior
b = randn(5,1)
sig = exp.(randn(1))
# generate dependent variable
y = z*b + sig.*randn(n,1)
# linear model
bhat1 = z\y
uhat = y-z*bhat1
sighat1 = sqrt.(uhat'*uhat/(n-size(z,2)))
# quadratic model
z = [ones(n,1) x 0.1*x.^2.0]
bhat2 = z\y
uhat = y-z*bhat2
sighat2 = sqrt.(uhat'*uhat/(n-size(z,2)))
# cubic model
z = [ones(n,1) x 0.1*x.^2.0 0.01*x.^3.0]
bhat3 = z\y
uhat = y-z*bhat3
sighat3 = sqrt.(uhat'*uhat/(n-size(z,2)))
# pure noise
z = randn(1,5)
# assemble:
simdata[rep,:] = [b' log.(sig) bhat1' log.(sighat1) bhat2' log.(sighat2) bhat3' log.(sighat3) z]
end
return simdata
end
# fit a neural net to the linear model data, and show influence of statistics
function main()
data = make_simdata()
noutputs = 6
trainsize = 80000
savefile = "olsnet"
layerconfig = [20, 10, 0, 0]
epochs = 30
TrainNet(data, trainsize, noutputs, layerconfig, 512, epochs, savefile)
Y = data[trainsize+1:end,1:noutputs]
olsfit = data[trainsize+1:end,(noutputs+1):(2*noutputs)]
title = "linear regression example"
params = ["constant", "x1","x2","x3","x4","x5"]
fit = AnalyzeNet(savefile, epochs, data, trainsize, noutputs, title=title, params=params, doplot=true);
rm.(glob("olsnet-*"))
end
main();
|
{"hexsha": "a55d0937f546ccb0da670c2679c4fa508ce932f1", "size": 1895, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/neural_net_example.jl", "max_stars_repo_name": "UserQuestions/Econometrics.jl", "max_stars_repo_head_hexsha": "f9db0ca3046e7c5328f085581a12b1c733cf9bcf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/neural_net_example.jl", "max_issues_repo_name": "UserQuestions/Econometrics.jl", "max_issues_repo_head_hexsha": "f9db0ca3046e7c5328f085581a12b1c733cf9bcf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/neural_net_example.jl", "max_forks_repo_name": "UserQuestions/Econometrics.jl", "max_forks_repo_head_hexsha": "f9db0ca3046e7c5328f085581a12b1c733cf9bcf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1186440678, "max_line_length": 107, "alphanum_fraction": 0.5920844327, "num_tokens": 632}
|
# Copyright 2017 Amir Hossein Delgoshaie, amirdel@stanford.edu
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee
# is hereby granted, provided that the above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import numpy as np
import pickle
import os
import time
import matplotlib.pyplot as plt
from py_dp.dispersion.dispersion_aux_classes import dispersionSaver
from py_dp.dispersion.dispersion_models import DispModelStencilMethod, DispModelExtendedStencil, DispModelUncorrelated
from py_dp.dispersion.transition_matrix_fcns import get_trans_matrix_single_attrib_both_methods, normalize_columns
from py_dp.dispersion.dispersion_visualization_tools import compare_trans_mat, compare_trans_mat_hist, compare_trans_mat_vtheta
from py_dp.dispersion.plot_wrapper_functions import plot_wrapper_with_saved_data, generate_plot_data
from py_dp.dispersion.dispersion_visualization_tools import model_holder, data_holder
from py_dp.dispersion.mapping_input import TemporalMapInput
from py_dp.dispersion.mapping import mapping_v_theta_repeat
from py_dp.dispersion.dispersion_aux_classes import correlatedSaver3d
# The small drift in the x direction is introduced again, find out why...
n_total = 5000
theta, n_nodes = np.pi/4, 500
t_start = time.time()
coeff_array = [10.0, 20.0, 40.0, 80.0, 160.0]
# coeff_array = [10.0, 20.0]
avg_available = True
study_folder = '/home/amirhossein/research_data/cees_plots/dt_uncorrelated'
summary_data_folder = os.path.join(study_folder, 'summary_data')
summary_pics_folder = os.path.join(study_folder, 'summary_pics')
for folder in [summary_pics_folder]:
if not os.path.exists(folder):
os.mkdir(folder)
t_file_path = os.path.join(study_folder, 'time_file.npz')
time_file = np.load(t_file_path)
t_end, t_scale = time_file['t_end'], time_file['t_scale']
network_length_path = os.path.join(study_folder, 'network_length.npz')
network_length_file = np.load(network_length_path)
l = network_length_file['l']
time_step = 10*t_scale
model_labels = []
for i in coeff_array:
coeff_str = str(i).split('.')[0]
model_labels.append(''+coeff_str)
plt.rcParams.update({'font.size': 20})
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rcParams.update({'figure.autolayout': True})
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'serif': ['Stix']})
legend_size = 13
datalabel = r"$data$"
save_name = 'all'
y_correction = 0.0
lw = 1
fmt = 'pdf'
bt_bound_box = [0, 800]
plot_wrapper_with_saved_data(t_end, t_scale, time_step, summary_data_folder, summary_pics_folder, save_name,
datalabel, model_labels, l, theta, y_correction, lw, fmt,
zoom_plots=False, n_pores=n_nodes, bt_bound_box=bt_bound_box)
print '------------------------------------------------------------------------'
t_finish = time.time()
print 'Total time: {:.2f} seconds'.format(t_finish - t_start)
|
{"hexsha": "bb957ed6d82e594a1e056c15581a165ccbae810e", "size": 3464, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_scripts/plot_test_uncorrelated_cees.py", "max_stars_repo_name": "amirdel/dispersion-continua", "max_stars_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot_scripts/plot_test_uncorrelated_cees.py", "max_issues_repo_name": "amirdel/dispersion-continua", "max_issues_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_scripts/plot_test_uncorrelated_cees.py", "max_forks_repo_name": "amirdel/dispersion-continua", "max_forks_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.987012987, "max_line_length": 127, "alphanum_fraction": 0.7647228637, "include": true, "reason": "import numpy", "num_tokens": 861}
|
% !TeX spellcheck = en_GB
\chapter{Results}
\section{LWC and LWP from MEPS}%\hfill}
\label{app:LWP_MEPS}
%%% image LWC Retrieval MEPS comparison %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[t]%\ContinuedFloat
\centering
% 21/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161221}
\caption{}\label{fig:LWC21}
\end{subfigure}
\centering
% 22/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={0.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161222}
\caption{}\label{fig:LWC22}
\end{subfigure}
%\end{figure}
%\begin{figure}\ContinuedFloat
\centering
% 23/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={0.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161223}
\caption{}\label{fig:LWC23}
\end{subfigure}
\end{figure}
\begin{figure}\ContinuedFloat
\centering
% 24/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={0.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161224}
\caption{}\label{fig:LWC24}
\end{subfigure}
\centering
% 25/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={0.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161225}
\caption{}\label{fig:LWC25}
\end{subfigure}
%\end{figure}
%\begin{figure}\ContinuedFloat
\centering
% 26/12
\begin{subfigure}[t]{0.85\textwidth}
\includegraphics[trim={0.5cm 0.5cm 27cm .5cm},clip,width=\textwidth]{./fig_LWC/20161226}
\caption{}\label{fig:LWC26}
\end{subfigure}
\caption{Upper panel: \SI{200}{\metre}-averaged LWC ensemble mean forecast from MEPS.
Lower panel: LWP from MEPS, initialised at \SI{00}{\UTC}. Black line represents the deterministic forecast, the doted blue line the ensemble mean and the grey lines the nine perturbed members.}\label{fig:LWC}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
{"hexsha": "5c4e943779929119f4481b28e4bc05b4c33b918f", "size": 1934, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis_full/Appendix/LWC_MEPS.tex", "max_stars_repo_name": "franzihe/Latex_thesis", "max_stars_repo_head_hexsha": "128284a01155bdc28b3e9374e538a07a1e5722c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thesis_full/Appendix/LWC_MEPS.tex", "max_issues_repo_name": "franzihe/Latex_thesis", "max_issues_repo_head_hexsha": "128284a01155bdc28b3e9374e538a07a1e5722c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thesis_full/Appendix/LWC_MEPS.tex", "max_forks_repo_name": "franzihe/Latex_thesis", "max_forks_repo_head_hexsha": "128284a01155bdc28b3e9374e538a07a1e5722c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1636363636, "max_line_length": 210, "alphanum_fraction": 0.6763185109, "num_tokens": 716}
|
\documentclass[11pt,addpoints,answers]{exam}
%\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage{amsmath, amsfonts}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{titling}
\usepackage{url}
\usepackage{xfrac}
% \usepackage{fancyhdr} % CONFLICTS with the exam class
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{natbib}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{paralist}
\usepackage{epstopdf}
\usepackage{tabularx}
\usepackage{longtable}
\usepackage{multirow}
\usepackage{multicol}
\usepackage[colorlinks=true,urlcolor=blue]{hyperref}
\usepackage{fancyvrb}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{float}
\usepackage{paralist}
\usepackage[svgname]{xcolor}
\usepackage{enumerate}
\usepackage{array}
\usepackage{times}
\usepackage{url}
\usepackage{comment}
\usepackage{environ}
\usepackage{times}
\usepackage{textcomp}
\usepackage{caption}
\usepackage[colorlinks=true,urlcolor=blue]{hyperref}
\usepackage{listings}
\usepackage{parskip} % For NIPS style paragraphs.
\usepackage[compact]{titlesec} % Less whitespace around titles
\usepackage[inline]{enumitem} % For inline enumerate* and itemize*
\usepackage{datetime}
\usepackage{comment}
% \usepackage{minted}
\usepackage{lastpage}
\usepackage{color}
\usepackage{xcolor}
\usepackage{listings}
\usepackage{tikz}
\usetikzlibrary{shapes,decorations,bayesnet}
%\usepackage{framed}
\usepackage{booktabs}
\usepackage{cprotect}
\usepackage{xcolor}
\usepackage{verbatimbox}
\usepackage[many]{tcolorbox}
\usepackage{cancel}
\usepackage{wasysym}
\usepackage{mdframed}
\usepackage{subcaption}
\usetikzlibrary{shapes.geometric}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Formatting for \CorrectChoice of "exam" %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\CorrectChoiceEmphasis{}
\checkedchar{\blackcircle}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Better numbering %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Common Math Commands %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Custom commands %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\vc}[1]{\boldsymbol{#1}}
\newcommand{\adj}[1]{\frac{d J}{d #1}}
\newcommand{\chain}[2]{\adj{#2} = \adj{#1}\frac{d #1}{d #2}}
% mathcal
\newcommand{\Ac}{\mathcal{A}}
\newcommand{\Bc}{\mathcal{B}}
\newcommand{\Cc}{\mathcal{C}}
\newcommand{\Dc}{\mathcal{D}}
\newcommand{\Ec}{\mathcal{E}}
\newcommand{\Fc}{\mathcal{F}}
\newcommand{\Gc}{\mathcal{G}}
\newcommand{\Hc}{\mathcal{H}}
\newcommand{\Ic}{\mathcal{I}}
\newcommand{\Jc}{\mathcal{J}}
\newcommand{\Kc}{\mathcal{K}}
\newcommand{\Lc}{\mathcal{L}}
\newcommand{\Mc}{\mathcal{M}}
\newcommand{\Nc}{\mathcal{N}}
\newcommand{\Oc}{\mathcal{O}}
\newcommand{\Pc}{\mathcal{P}}
\newcommand{\Qc}{\mathcal{Q}}
\newcommand{\Rc}{\mathcal{R}}
\newcommand{\Sc}{\mathcal{S}}
\newcommand{\Tc}{\mathcal{T}}
\newcommand{\Uc}{\mathcal{U}}
\newcommand{\Vc}{\mathcal{V}}
\newcommand{\Wc}{\mathcal{W}}
\newcommand{\Xc}{\mathcal{X}}
\newcommand{\Yc}{\mathcal{Y}}
\newcommand{\Zc}{\mathcal{Z}}
% mathbb
\newcommand{\Ab}{\mathbb{A}}
\newcommand{\Bb}{\mathbb{B}}
\newcommand{\Cb}{\mathbb{C}}
\newcommand{\Db}{\mathbb{D}}
\newcommand{\Eb}{\mathbb{E}}
\newcommand{\Fb}{\mathbb{F}}
\newcommand{\Gb}{\mathbb{G}}
\newcommand{\Hb}{\mathbb{H}}
\newcommand{\Ib}{\mathbb{I}}
\newcommand{\Jb}{\mathbb{J}}
\newcommand{\Kb}{\mathbb{K}}
\newcommand{\Lb}{\mathbb{L}}
\newcommand{\Mb}{\mathbb{M}}
\newcommand{\Nb}{\mathbb{N}}
\newcommand{\Ob}{\mathbb{O}}
\newcommand{\Pb}{\mathbb{P}}
\newcommand{\Qb}{\mathbb{Q}}
\newcommand{\Rb}{\mathbb{R}}
\newcommand{\Sb}{\mathbb{S}}
\newcommand{\Tb}{\mathbb{T}}
\newcommand{\Ub}{\mathbb{U}}
\newcommand{\Vb}{\mathbb{V}}
\newcommand{\Wb}{\mathbb{W}}
\newcommand{\Xb}{\mathbb{X}}
\newcommand{\Yb}{\mathbb{Y}}
\newcommand{\Zb}{\mathbb{Z}}
% mathbf lowercase
\newcommand{\av}{\mathbf{a}}
\newcommand{\bv}{\mathbf{b}}
\newcommand{\cv}{\mathbf{c}}
\newcommand{\dv}{\mathbf{d}}
\newcommand{\ev}{\mathbf{e}}
\newcommand{\fv}{\mathbf{f}}
\newcommand{\gv}{\mathbf{g}}
\newcommand{\hv}{\mathbf{h}}
\newcommand{\iv}{\mathbf{i}}
\newcommand{\jv}{\mathbf{j}}
\newcommand{\kv}{\mathbf{k}}
\newcommand{\lv}{\mathbf{l}}
\newcommand{\mv}{\mathbf{m}}
\newcommand{\nv}{\mathbf{n}}
\newcommand{\ov}{\mathbf{o}}
\newcommand{\pv}{\mathbf{p}}
\newcommand{\qv}{\mathbf{q}}
\newcommand{\rv}{\mathbf{r}}
\newcommand{\sv}{\mathbf{s}}
\newcommand{\tv}{\mathbf{t}}
\newcommand{\uv}{\mathbf{u}}
\newcommand{\vv}{\mathbf{v}}
\newcommand{\wv}{\mathbf{w}}
\newcommand{\xv}{\mathbf{x}}
\newcommand{\yv}{\mathbf{y}}
\newcommand{\zv}{\mathbf{z}}
% mathbf uppercase
\newcommand{\Av}{\mathbf{A}}
\newcommand{\Bv}{\mathbf{B}}
\newcommand{\Cv}{\mathbf{C}}
\newcommand{\Dv}{\mathbf{D}}
\newcommand{\Ev}{\mathbf{E}}
\newcommand{\Fv}{\mathbf{F}}
\newcommand{\Gv}{\mathbf{G}}
\newcommand{\Hv}{\mathbf{H}}
\newcommand{\Iv}{\mathbf{I}}
\newcommand{\Jv}{\mathbf{J}}
\newcommand{\Kv}{\mathbf{K}}
\newcommand{\Lv}{\mathbf{L}}
\newcommand{\Mv}{\mathbf{M}}
\newcommand{\Nv}{\mathbf{N}}
\newcommand{\Ov}{\mathbf{O}}
\newcommand{\Pv}{\mathbf{P}}
\newcommand{\Qv}{\mathbf{Q}}
\newcommand{\Rv}{\mathbf{R}}
\newcommand{\Sv}{\mathbf{S}}
\newcommand{\Tv}{\mathbf{T}}
\newcommand{\Uv}{\mathbf{U}}
\newcommand{\Vv}{\mathbf{V}}
\newcommand{\Wv}{\mathbf{W}}
\newcommand{\Xv}{\mathbf{X}}
\newcommand{\Yv}{\mathbf{Y}}
\newcommand{\Zv}{\mathbf{Z}}
% bold greek lowercase
\newcommand{\alphav }{\boldsymbol \alpha }
\newcommand{\betav }{\boldsymbol \beta }
\newcommand{\gammav }{\boldsymbol \gamma }
\newcommand{\deltav }{\boldsymbol \delta }
\newcommand{\epsilonv }{\boldsymbol \epsilon }
\newcommand{\varepsilonv}{\boldsymbol \varepsilon}
\newcommand{\zetav }{\boldsymbol \zeta }
\newcommand{\etav }{\boldsymbol \eta }
\newcommand{\thetav }{\boldsymbol \theta }
\newcommand{\varthetav }{\boldsymbol \vartheta }
\newcommand{\iotav }{\boldsymbol \iota }
\newcommand{\kappav }{\boldsymbol \kappa }
\newcommand{\varkappav }{\boldsymbol \varkappa }
\newcommand{\lambdav }{\boldsymbol \lambda }
\newcommand{\muv }{\boldsymbol \mu }
\newcommand{\nuv }{\boldsymbol \nu }
\newcommand{\xiv }{\boldsymbol \xi }
\newcommand{\omicronv }{\boldsymbol \omicron }
\newcommand{\piv }{\boldsymbol \pi }
\newcommand{\varpiv }{\boldsymbol \varpi }
\newcommand{\rhov }{\boldsymbol \rho }
\newcommand{\varrhov }{\boldsymbol \varrho }
\newcommand{\sigmav }{\boldsymbol \sigma }
\newcommand{\varsigmav }{\boldsymbol \varsigma }
\newcommand{\tauv }{\boldsymbol \tau }
\newcommand{\upsilonv }{\boldsymbol \upsilon }
\newcommand{\phiv }{\boldsymbol \phi }
\newcommand{\varphiv }{\boldsymbol \varphi }
\newcommand{\chiv }{\boldsymbol \chi }
\newcommand{\psiv }{\boldsymbol \psi }
\newcommand{\omegav }{\boldsymbol \omega }
% bold greek uppercase
\newcommand{\Gammav }{\boldsymbol \Gamma }
\newcommand{\Deltav }{\boldsymbol \Delta }
\newcommand{\Thetav }{\boldsymbol \Theta }
\newcommand{\Lambdav }{\boldsymbol \Lambda }
\newcommand{\Xiv }{\boldsymbol \Xi }
\newcommand{\Piv }{\boldsymbol \Pi }
\newcommand{\Sigmav }{\boldsymbol \Sigma }
\newcommand{\Upsilonv }{\boldsymbol \Upsilon }
\newcommand{\Phiv }{\boldsymbol \Phi }
\newcommand{\Psiv }{\boldsymbol \Psi }
\newcommand{\Omegav }{\boldsymbol \Omega }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Code highlighting with listings %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\definecolor{bluekeywords}{rgb}{0.13,0.13,1}
\definecolor{greencomments}{rgb}{0,0.5,0}
\definecolor{redstrings}{rgb}{0.9,0,0}
\definecolor{light-gray}{gray}{0.95}
\newcommand{\MYhref}[3][blue]{\href{#2}{\color{#1}{#3}}}%
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{mauve}{rgb}{0.58,0,0.82}
\lstdefinelanguage{Shell}{
keywords={tar, cd, make},
%keywordstyle=\color{bluekeywords}\bfseries,
alsoletter={+},
ndkeywords={python3, python, py, javac, java, gcc, c, g++, cpp, .txt, m, .tar},
%ndkeywordstyle=\color{bluekeywords}\bfseries,
identifierstyle=\color{black},
sensitive=false,
comment=[l]{//},
morecomment=[s]{/*}{*/},
commentstyle=\color{purple}\ttfamily,
stringstyle=\color{red}\ttfamily,
morestring=[b]',
morestring=[b]",
backgroundcolor = \color{light-gray}
}
\lstset{columns=fixed, basicstyle=\ttfamily,
backgroundcolor=\color{light-gray},xleftmargin=0.5cm,frame=tlbr,framesep=4pt,framerule=0pt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Custom box for highlights %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Define box and box title style
\tikzstyle{mybox} = [fill=blue!10, very thick,
rectangle, rounded corners, inner sep=1em, inner ysep=1em]
% \newcommand{\notebox}[1]{
% \begin{tikzpicture}
% \node [mybox] (box){%
% \begin{minipage}{\textwidth}
% #1
% \end{minipage}
% };
% \end{tikzpicture}%
% }
\NewEnviron{notebox}{
\begin{tikzpicture}
\node [mybox] (box){
\begin{minipage}{\textwidth}
\BODY
\end{minipage}
};
\end{tikzpicture}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Commands showing / hiding solutions %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% To HIDE SOLUTIONS (to post at the website for students), set this value to 0: \def\issoln{0}
\def\issoln{0}
% Some commands to allow solutions to be embedded in the assignment file.
\ifcsname issoln\endcsname \else \def\issoln{0} \fi
% Default to an empty solutions environ.
\NewEnviron{soln}{}{}
% Default to an empty qauthor environ.
\NewEnviron{qauthor}{}{}
% Deafault to an empty learning objective environ.
\NewEnviron{qlearningobjective}{}
% Default to visible (but empty) solution box.
\newtcolorbox[]{studentsolution}[1][]{%
breakable,
enhanced,
colback=white,
title=Solution,
#1
}
\if\issoln 0
% Otherwise, include solutions as below.
\RenewEnviron{soln}{
\leavevmode\color{red}\ignorespaces
\textbf{Solution} \BODY
}{}
% Learning objective environment
\RenewEnviron{qlearningobjective}{
\leavevmode\color{blue}\ignorespaces \textbf{Learning Objective } \BODY }{}
\fi
\if\issoln 1
% Otherwise, include solutions as below.
\RenewEnviron{solution}{}
\fi
% Default to an empty tags environ.
\NewEnviron{tags}{}{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Commands for customizing the assignment %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\courseNum}{10-301 / 10-601}
\newcommand{\courseName}{Introduction to Machine Learning}
\newcommand{\courseSem}{Fall 2021}
\newcommand{\courseUrl}{\url{http://mlcourse.org}}
\newcommand{\hwNum}{Homework 1}
\newcommand{\hwTopic}{Background}
\newcommand{\hwName}{\hwNum: \hwTopic}
\newcommand{\outDate}{Sept. 1, 2021}
\newcommand{\dueDate}{Sept. 8, 2021}
\newcommand{\taNames}{Sana, Catherine, Joseph, Zachary, Brendon}
%\pagestyle{fancyplain}
\lhead{\hwName}
\rhead{\courseNum}
\cfoot{\thepage{} of \numpages{}}
\title{\textsc{\hwName}} % Title
\author{}
\date{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Useful commands for typesetting the questions %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand \expect {\mathbb{E}}
\newcommand \mle [1]{{\hat #1}^{\rm MLE}}
\newcommand \map [1]{{\hat #1}^{\rm MAP}}
\newcommand \argmax {\operatorname*{argmax}}
\newcommand \argmin {\operatorname*{argmin}}
\newcommand \code [1]{{\tt #1}}
\newcommand \datacount [1]{\#\{#1\}}
\newcommand \ind [1]{\mathbb{I}\{#1\}}
\newcommand{\blackcircle}{\tikz\draw[black,fill=black] (0,0) circle (1ex);}
\renewcommand{\circle}{\tikz\draw[black] (0,0) circle (1ex);}
\newcommand{\pts}[1]{\textbf{[#1 pts]}}
%%%%%%%%%%%%%%%%%%%%%%%%%%
% Document configuration %
%%%%%%%%%%%%%%%%%%%%%%%%%%
% Don't display a date in the title and remove the white space
\predate{}
\postdate{}
\date{}
% Don't display an author and remove the white space
%\preauthor{}
%\postauthor{}
%%%%%%%%%%%%%%%%%%
% Begin Document %
%%%%%%%%%%%%%%%%%%
\begin{document}
\section*{}
\begin{center}
\textsc{\LARGE \hwNum} \\
\textsc{\LARGE \hwTopic\footnote{Compiled on \today{} at \currenttime{}}} \\
\vspace{1em}
\textsc{\large \courseNum{} \courseName{} (\courseSem)} \\
%\vspace{0.25em}
\courseUrl\\
\vspace{1em}
OUT: \outDate \\
%\vspace{0.5em}
DUE: \dueDate \\
TAs: \taNames
\end{center}
\section*{START HERE: Instructions}
\begin{itemize}
\item \textbf{Collaboration policy:} Collaboration on solving the homework is allowed, after you have thought about the problems on your own. It is also OK to get clarification (but not solutions) from books or online resources, again after you have thought about the problems on your own. There are two requirements: first, cite your collaborators fully and completely (e.g., ``Jane explained to me what is asked in Question 2.1''). Second, write your solution {\em independently}: close the book and all of your notes, and send collaborators out of the room, so that the solution comes from you only. See the Academic Integrity Section on the course site for more information: \url{http://www.cs.cmu.edu/~mgormley/courses/10601/syllabus.html#7-academic-integrity-policies}
\item\textbf{Late Submission Policy:} See the late submission policy here: \url{http://www.cs.cmu.edu/~mgormley/courses/10601/syllabus.html#late-homework-policy}
\item\textbf{Submitting your work:}
\begin{itemize}
% Since we are not using Canvas this semester.
% \item \textbf{Canvas:} We will use an online system called Canvas for short answer and multiple choice questions. You can log in with your Andrew ID and password. (As a reminder, never enter your Andrew password into any website unless you have first checked that the URL starts with "https://" and the domain name ends in ".cmu.edu" -- but in this case it's OK since both conditions are met). You may only \textbf{submit once} on canvas, so be sure of your answers before you submit. However, canvas allows you to work on your answers and then close out of the page and it will save your progress. You will not be granted additional submissions, so please be confident of your solutions when you are submitting your assignment.
\item \textbf{Programming:} You will submit your code for programming questions on the homework to Gradescope (\url{https://gradescope.com}). After uploading your code, our grading scripts will autograde your assignment by running your program on a virtual machine (VM). When you are developing, check that the version number of the programming language environment (e.g. Python 3.9.6, OpenJDK 11.0.11, g++ 7.5.0) and versions of permitted libraries (e.g. \texttt{numpy} 1.21.2 and \texttt{scipy} 1.7.1) match those used on Gradescope. You have a \textbf{total of 10 Gradescope programming submissions.} Use them wisely. In order to not waste code submissions, we recommend debugging your implementation on your local machine (or the linux servers) and making sure your code is running correctly first before any Gradescope coding submission. {\color{red} The above is true for future assignments, but this one allows \textbf{unlimited submissions.}}
\item \textbf{Written:} For written problems such as short answer, multiple choice, derivations, proofs, or plots, we will be using Gradescope (\url{https://gradescope.com/}). Please use the provided template. Submissions can be handwritten onto the template, but should be labeled and clearly legible. If your writing is not legible, you will not be awarded marks. Alternatively, submissions can be written in LaTeX. Regrade requests can be made, however this gives the TA the opportunity to regrade your entire paper, meaning if additional mistakes are found then points will be deducted.
Each derivation/proof should be completed on a separate page. For short answer questions you \textbf{should not} include your work in your solution. If you include your work in your solutions, your assignment may not be graded correctly by our AI assisted grader. {\color{red} For this assignment only, if you answer at least 90\% of the written questions correctly, you get full marks on the written portion of this assignment. For this assignment only, \textbf{we will offer two rounds of grading}. The first round of grading will happen immediately following the due date specified above. We will then release your grades to you and if you got less than 90\% on the written questions, you will be allowed to submit once again by a second due date. The exact due date for the second round will be announced after we release the first round grades. }
\end{itemize}
\item \textbf{Materials:} The data that you will need in order to complete this assignment is posted along with the writeup and template on Piazza.
\end{itemize}
%Homework 9 will be on Gradescope, but will be "Canvas-style"- all problems will be multiple choice, select all that apply, or numerical answer.
For multiple choice or select all that apply questions, shade in the box or circle in the template document corresponding to the correct answer(s) for each of the questions. For \LaTeX{} users, replace \lstinline{\choice} with \lstinline{\CorrectChoice} to obtain a shaded box/circle, and don't change anything else.
\clearpage
%\input{qtemplates.tex}
%\clearpage
\section{Programming: Decision Stump [30 Points]}
\subsection{Introduction}
In this homework you have to choose Python, Java, or C++ as your programming language. Submitting code for more than one language may result in undefined behavior.
The goal of this assignment is to ensure that you:
\begin{enumerate}
\item Have a way to edit and test your code (i.e. a text editor and compiler/interpreter)
\item Are familiar with submitting to Gradescope
\item Are familiar with file I/O and standard output in the language of your choice
\end{enumerate}
\textbf{Warning:} This handout assumes that you are using a unix command prompt (with \texttt{zsh, bash, csh} or similar). Windows commands may differ slightly.
\subsection{Decision Stump}
\subsubsection{Algorithm}
This simple algorithm acts as a precursor to the Decision \emph{Tree} that you will implement in the next homework assignment. We hope that you will employ best practices when coding so that you can re-use your own code here in the next assignment.
This assignment requires you to implement a Decision Stump. A Decision Stump is simply a decision tree of depth one (it predicts a class label for the input instance based on testing just one of the instance's attributes). You may assume that the attribute to be tested by your Decision Stump is provided as input to your program (on the command line). Your algorithm should partition the provided training data based on that attribute. You may assume that the attributes are always binary and that the output class label is always binary. As such, the left branch of your trained decision stump should assign a class label corresponding the majority label among the training examples that sort down that branch. The right branch should do likewise for the other value of the attribute. The training procedure should store the decision stump data structure for use at test time. In case of a tie in majority vote, you may output either of the two values or pick randomly between them.
At test time, each example should be passed down through the stump. Its label becomes the label (i.e. the stored majority vote) of the corresponding branch in which it lands.
\subsubsection{The Datasets}
\label{sec:data}
\paragraph{Materials} Download the zip file from Piazza, which contains all the data that you will need in order to complete this assignment.
\paragraph{Datasets}
The handout contains three datasets. Each one contains attributes and labels and is already split into training and testing data. The first line of each \lstinline{.tsv} file contains the name of each attribute, and \emph{the class label is always the last column}.
\begin{enumerate}
\item \textbf{politician:}
The first task is to predict whether a US politician is a member of the Democrat or Republican party, based on their past voting history. Attributes (aka. features) are short descriptions of bills that were voted on, such as \emph{Aid\_to\_nicaraguan\_contras} or \emph{Duty\_free\_exports}. Values are given as \emph{`y'} for yes votes and \emph{`n'} for no votes. The training data is in \lstinline{politicians_train.tsv}, and the test data in \lstinline{politicians_test.tsv}.
\item \textbf{education:}
The second task is to predict the final \emph{grade} (A, not A) for high school students. The attributes (covariates, predictors) are student grades on 5 multiple choice assignments \emph{M1} through \emph{M5}, 4 programming assignments \emph{P1} through \emph{P4}, and the final exam \emph{F}. The training data is in \newline \lstinline{education_train.tsv}, and the test data in \lstinline{education_test.tsv}.
\item \textbf{small:}
We also include \lstinline{small_train.tsv} and \lstinline{small_test.tsv}---a small, purely for demonstration version of the politicians dataset, with \emph{only} attributes \emph{Anti\_satellite\_test\_ban} and \newline \emph{Export\_south\_africa}.
\end{enumerate}
The handout zip file also contains the predictions and metrics from a reference implementation of a Decision Stump for the \textbf{politician} (splitting on feature 3), \textbf{education} (splitting on feature 5) and \textbf{small} (splitting on feature 0) datasets (see subfolder \emph{example\_output}). You can check your own output against these to see if your implementation is correct.\footnote{Yes, you read that correctly: we are giving you the correct answers.}
\begin{notebox} \textbf{Note:}
For simplicity, all attributes are discretized into just two categories. This applies to all the datasets in the handout, as well as the additional datasets on which we will evaluate your Decision Stump.
\end{notebox}
\subsubsection{Command Line Arguments}
The autograder runs and evaluates the output from the files generated, using the following command:
\begin{tabular}{ll}
For Python: &
\begin{lstlisting}[language=Shell]
$ python3 decisionStump.py [args...]
\end{lstlisting}
\\
For Java: &
\begin{lstlisting}[language=Shell]
$ javac decisionStump.java; java decisionStump [args...]
\end{lstlisting}
\\
For C++: &
\begin{lstlisting}[language=Shell]
$ g++ -g decisionStump.cpp; ./a.out [args...]
\end{lstlisting}
\end{tabular}
Where above \lstinline{[args...]} is a placeholder for six command-line arguments:
\texttt{<train input> <test input> <split index> <train out> <test out> <metrics out>}. These arguments are described in detail below:
\begin{enumerate}
\item \lstinline{<train input>}: path to the training input \lstinline{.tsv} file
\item \lstinline{<test input>}: path to the test input \lstinline{.tsv} file
\item \lstinline{<split index>}: the index of feature at which we split the dataset. The first column has index 0, the second column index 1, and so on.
\item \lstinline{<train out>}: path of output \lstinline{.labels} file to which the predictions on the \textit{training} data should be written
\item \lstinline{<test out>}: path of output \lstinline{.labels} file to which the predictions on the \emph{test} data should be written
\item \lstinline{<metrics out>}: path of the output \lstinline{.txt} file to which metrics such as train and test error should be written
\end{enumerate}
As an example, if you implemented your program in Python, the following command line would run your program on the politicians dataset and split the dataset by the first feature (Remember that the index of feature starts from zero). The train predictions would be written to \lstinline{pol_0_train.labels}, the test predictions to \lstinline{pol_0_test.labels}, and the metrics to \lstinline{pol_0_metrics.txt}.
%
\begin{lstlisting}[language=Shell]
$ python3 decisionStump.py politicians_train.tsv politicians_test.tsv \
0 pol_0_train.labels pol_0_test.labels pol_0_metrics.txt
\end{lstlisting}
% In \texttt{reverse.\{py|m|java|cpp\}}, implement a program that reads in the lines of a file, then writes them in reverse order to an output file. Specifically, your program should take two command line arguments: the name of the input file and the name of the output file. It should read the lines of the input file and write them to the output file from last to first, separated by ``\textbackslash n". You should assume that the input file has unix-style line breaks. (Windows uses ``\textbackslash r\textbackslash n" to indicate a new line. Unix uses only ``\textbackslash n".)
% For example, if the file \texttt{input.txt} contained the stream
% \begin{verbatim}
% #pineapples\n#pinstripes\n#pinwheelofdoom\n#pinsir\n
% \end{verbatim}
% which is commonly displayed as
% \begin{verbatim}
% #pineapples
% #pinstripes
% #pinwheelofdoom
% #pinsir
% \end{verbatim}
% depending on your language of choice, one of the following:
% \begin{itemize}
% \item \texttt{python3 reverse.py input.txt output.txt}
% \item \texttt{javac reverse.java; java reverse input.txt output.txt}
% \item \texttt{g++ reverse.cpp; ./a.out input.txt output.txt}
% \end{itemize}
% should write the following to output.txt
% \begin{verbatim}
% #pinsir\n#pinwheelofdoom\n#pinstripes\n#pineapples\n
% \end{verbatim}
% which is displayed as
% \begin{verbatim}
% #pinsir
% #pinwheelofdoom
% #pinstripes
% #pineapples
% \end{verbatim}
\subsubsection{Output: Labels Files}
\label{sec:labels}
Your program should write two output \lstinline{.labels} files containing the predictions of your model on training data (\lstinline{<train out>}) and test data (\lstinline{<test out>}). Each should contain the predicted labels for each example printed on a new line. Use '\textbackslash n' to create a new line.
Your labels should exactly match those of a reference decision stump implementation---this will be checked by the autograder by running your program and evaluating your output file against the reference solution.
\textbf{Note}: You should output your predicted labels using the same string identifiers as the original training data: e.g., for the politicians dataset you should output democrat/republican and for the education dataset you should output A/notA.
%
The first few lines of an example output file is given below for the politician dataset:
\begin{quote}
\begin{verbatim}
republican
republican
democrat
democrat
democrat
democrat
democrat
...
\end{verbatim}
\end{quote}
\subsubsection{Output: Metrics File}
\label{sec:metrics}
Generate another file where you should report the training error and testing error. This file should be written to the path specified by the command line argument \lstinline{<metrics out>}. Your reported numbers should be within 0.01 of the reference solution. You do not need to round your reported numbers! The Autograder will automatically incorporate the right tolerance for float comparisons. The file should be formatted as follows:
% error(train): 0.3076532
% error(test): 0.4523292
\begin{quote}
\begin{verbatim}
error(train): 0.241611
error(test): 0.228916
\end{verbatim}
\end{quote}
\subsection{Command Line Arguments}
In this and future programming assignments, we will use command line arguments to run your programs with different parameters. Below, we provide some simple examples for how to do this in each of the programming languages you can use in the course. In the examples below, suppose your program takes two arguments: an input file and an output file.
Python:
\begin{lstlisting}[language=Python]
import sys
if __name__ == '__main__':
infile = sys.argv[1]
outfile = sys.argv[2]
print("The input file is: %s" % (infile))
print("The output file is: %s" % (output))
\end{lstlisting}
Java:
\begin{lstlisting}[language=Java]
public class myclass {
public static void main(String[] args) {
String infile = args[0];
String outfile = args[1];
System.out.println("The input file is: " + infile);
System.out.println("The output file is: " + outfile);
}
}
\end{lstlisting}
C++:
\begin{lstlisting}[language=C++]
#include <iostream>
#include <string>
using namespace std;
int main(int argc, char **argv){
if (argc >= 3) {
string infile = string(argv[1]);
string outfile = string(argv[2]);
cout << "The input file is: " << infile << endl;
cout << "The output file is: " << outfile << endl;
}
return 0;
}
\end{lstlisting}
\subsection{Code Submission}
You must submit a file named \texttt{decisionStump.\{py|m|java|cpp\}}. The autograder is case sensitive, so observe that all your files should be named in \textbf{lowercase}. You must submit this file to the corresponding homework link on Gradescope.
Note: For this assignment, you may make arbitrarily many submissions to the autograder before the deadline, but only your last submission will be graded.
% \begin{notebox}
% {\bf Python3 Users:} Please include a blank file called python3.txt (case-sensitive) in your tar submission and we will execute your submitted program using Python 3 instead of Python 2.7. If the file is not present, we will default to running your code with Python 2.7.
% \end{notebox}
\clearpage
\section*{Instructions for Specific Problem Types}
For ``Select One" questions, please fill in the appropriate bubble completely:
\begin{quote}
\textbf{Select One:} Who taught this course?
\begin{checkboxes}
\CorrectChoice Matt Gormley / Henry Chai
\choice Marie Curie
\choice Noam Chomsky
\end{checkboxes}
\end{quote}
If you need to change your answer, you may cross out the previous answer and bubble in the new answer:
\begin{quote}
\textbf{Select One:} Who taught this course?
\begin{list}{}
\item\CIRCLE{} Matt Gormley / Henry Chai
\item\Circle{} Marie Curie\\
\xcancel{\CIRCLE}{} Noam Chomsky
\end{list}
\end{quote}
For ``Select all that apply" questions, please fill in all appropriate squares completely:
\begin{quote}
\textbf{Select all that apply:} Which are scientists?
{
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\begin{checkboxes}
\choice Stephen Hawking
\CorrectChoice Albert Einstein
\choice Isaac Newton
\choice None of the above
\end{checkboxes}
}
\end{quote}
Again, if you need to change your answer, you may cross out the previous answer(s) and bubble in the new answer(s):
\begin{quote}
\textbf{Select all that apply:} Which are scientists?
\begin{list}{}
\item $\blacksquare$ Stephen Hawking
\item $\blacksquare$ Albert Einstein
\item $\blacksquare$ Isaac Newton\\
\xcancel{$\blacksquare$} I don't know
\end{list}
\end{quote}
For questions where you must fill in a blank, please make sure your final answer is fully included in the given space. You may cross out answers or parts of answers, but the final answer must still be within the given space.
\begin{quote}
\textbf{Fill in the blank:} What is the course number?
\begin{tcolorbox}[fit,height=1cm, width=4cm, blank, borderline={1pt}{-2pt},nobeforeafter]
\begin{center}\huge10-601\end{center}
\end{tcolorbox}\hspace{2cm}
\begin{tcolorbox}[fit,height=1cm, width=4cm, blank, borderline={1pt}{-2pt},nobeforeafter]
\begin{center}\huge10-\xcancel{7}601\end{center}
\end{tcolorbox}
\end{quote}
\clearpage
% added course policy section
% currently it is stated that these questions will not be graded, but must be finished
\clearpage
\section{Written Questions \pts{\numpoints{}}}
In this section, you will work through a number of problems covering prerequisite material: probability, statistics, calculus, linear algebra, geometry, and computer science. The first subsection covers common course policy questions
\subsection{Course Policies}
This section covers important course policies that every student should know and understand. These questions MUST be finished in order for the whole homework to be considered for grading.
\begin{questions}
\question[1] Assignment turned in late without prior approval will incur a daily penalty. How much is the penalty? Up to 1 day: \underline{\hspace{0.5cm}} Up to 2 day: \underline{\hspace{0.5cm}} Up to 3 day:
\underline{\hspace{0.5cm}} Up to 4 day:
\underline{\hspace{0.5cm}}
\textbf{Select one:}
\begin{checkboxes}
\choice 5\%, 10\%, 15\%, 20\%
\choice 10\%, 20\%, 30\%, 40\%
\choice 25\%, 50\%, 75\%, 100\%
\choice 20\%, 40\%, 60\%, 80\%
\end{checkboxes}
\question[1] How many grace days do you have in total for all homework? Can you combine grace days with late days to extend a homework submission deadline by more than 3 days?
\textbf{Select one:}
\begin{checkboxes}
\choice As many as I want; Of course!
\choice 6; No
\choice 6; Yes
\choice 8; Yes
\end{checkboxes}
\question[1] Seeking help from other students in understanding course materials needed to solve homework problems is ALLOWED under which of the follow conditions?
\textbf{Select all that apply:}
{%
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\begin{checkboxes}
\choice Any written notes are taken on an impermanent surface (e.g. whiteboard, chalkboard) and discarded before writing up one's solution alone.
\choice Learning is facilitated not circumvented; i.e., the purpose of seeking help is to learn and understand the problem instead of merely getting an answer
\choice Help both given and received is reported in collaboration questions in the homework
\choice The student updates his/her collaborative questions even if it is after submitting their own assignment
\choice None of the above
\end{checkboxes}
}
\clearpage
\question[1] Which of the following is (are) strictly forbidden in solving and submitting homework?
\textbf{Select all that apply:}
{%
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\begin{checkboxes}
\choice Searching on the internet for solutions or sample codes
\choice Consulting people outside this class who have seen or solved the problem before
\choice Turning in someone else's homework
\choice Using anyone else's, or allowing other classmates to use your computer or Gradescope account in connection with this course
\choice None of the above
\end{checkboxes}
}
\question[1] If you solved your assignment completely on your own, you can skip the collaboration questions at the end of each homework.
\textbf{Select one:}
\begin{checkboxes}
\choice True
\choice False
\end{checkboxes}
\question[1] What is (are) the consequence(s) of being caught cheating in this course? Select all that apply.
\textbf{First time:}
{%
\begin{checkboxes}
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice A negative 100\% grade on the assignment
\choice AIV report to university authorities
\end{checkboxes}
}
\textbf{Second time:}
{%
\begin{checkboxes}
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice Failure of the course
\choice AIV report to university authorities
\end{checkboxes}
}
\question[1] Assume a difficult situation arises in the middle of the semester (e.g. medical, personal etc.) that might prevent you from submitting assignments on time or working as well as you would like. What should you do? Select all that apply
\begin{checkboxes}
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice Talk to the course staff early so they can point you to the available resources on campus and make necessary arrangements
\choice Do not speak to the course staff, try to finish the class, reach out to the course staff in the end of the semester explaining your special situation
\choice Reach out to your advisor so that they are aware of the situation
\choice None of the above
\end{checkboxes}
\end{questions}
\clearpage\subsection{Probability and Statistics}
\textbf{\underline{Use the following data to answer questions 1-2}}. Consider data created by flipping a coin five times $S $ = [1, 1, 0, 1, 1] , where 1 denotes that the coin turned up heads and 0 denotes that it turned up tails. \bigskip
\begin{questions}
\question[1] What is the probability of observing any combination of this data (4 heads and 1 tails), assuming it was generated by flipping a coin X with an unequal probability of heads (1) and tails (0), where the distribution is $P(X = 1) = 0.75$, $P(X = 0) = 0.25$?
\textbf{Select one:}
\begin{checkboxes}
\choice $\frac{405}{1024}$
\choice $\frac{1}{32}$
\choice $\frac{324}{1024}$
\choice $\frac{81}{1024}$
\end{checkboxes}
\question[1] Note that the probability of this data sample would be greater if the value of P(X = 1) was not 0.75, but instead some other value. What is the value of P(X = 1) that maximizes the probability of the sample S? Provide your answer as a fraction.
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\question[1] State true or false. For events A and B, where $A \cap B$ indicates A AND B, and $A \cup B$ indicates A OR B, $$ P(A \cap B) = P(A) + P(B) - P(A \cup B)$$
\textbf{Select one:}
\begin{checkboxes}
\choice True
\choice False
\end{checkboxes}
\question[1] State true or false. For events A and B, $$P(A_1\cap A_2 \cap A_3) = P(A_3|A_2\cap A_1)P(A_2|A_1)P(A_1)$$
\textbf{Select one:}
\begin{checkboxes}
\choice True
\choice False
\end{checkboxes}
\clearpage
\question[2] Whether your car is wet in the morning (W) is dependent on whether it rained last night (R) or not, however other factors may have lead to your car being wet. The following are probabilities of such events:
\begin{eqnarray*}
& P(R) = 0.4\\
& P(W | R) = 0.8\\
& P(W | \neg R ) = 0.2
\end{eqnarray*} What is the probability that your car is wet in the morning?
\textbf{Select one:}
\begin{checkboxes}
\choice 0.64
\choice 0.56
\choice 0.44
\choice 0.4
\end{checkboxes}
\bigskip
\textbf{\underline{Use the following information to answer questions 6-7}}. Consider the following joint probability table where both X and Y are binary variables:\\[12pt]
\begin{tabular}{ccc}
X & Y & Probability \\
0 & 0 & 0.1\\
0 & 1 & 0.2\\
1 & 0 & 0.4\\
1 & 1 & 0.3
\end{tabular}
\question[1] What is $P(X = 1 | Y=1)$?
\textbf{Select one:}
\begin{checkboxes}
\choice $\frac{2}{3}$
\choice $\frac{3}{7}$
\choice $\frac{4}{5}$
\choice $\frac{3}{5}$
\end{checkboxes}
\question[1] What is $P(Y=0)$?
\textbf{Select one:}
\begin{checkboxes}
\choice 0.2
\choice 0.6
\choice 0.5
\choice 0.3
\end{checkboxes}
\clearpage
\textbf{\underline{Use the following information to answer questions 8-10}}. Let X be a random variable and the expected value of X is $E[X] = 1$ and the variance of X is $Var[X] = 1$.
\question[1] What is $E[6X]$?
\textbf{Select one:}
\begin{checkboxes}
\choice 1
\choice 3
\choice 6
\choice 36
\end{checkboxes}
\question[1] What is $Var[3X]$?
\textbf{Select one:}
\begin{checkboxes}
\choice 1
\choice 3
\choice 6
\choice 9
\end{checkboxes}
\question[1] What is $Var[2X + 3]$?
\textbf{Select one:}
\begin{checkboxes}
\choice 3
\choice 4
\choice 5
\choice 7
\end{checkboxes}
\clearpage
\textbf{\underline{Use the following information to answer questions 11-14:}}
Let A, B, and C be random variables with discrete probability distributions. Consider the following two joint probability tables: one relating A and B, and the other relating B and C.
$$\begin{array}{c|ccc}
_{\large A}\backslash^{\large B} & b_1 & b_2 & b_3 \\
\hline
a_1 & 0.1 & 0.05 & 0.15 \\
a_2 & 0.1 & 0.05 & 0.3 \\
a_3 & 0.05 & 0.15 & 0.05 \\
\end{array}
\quad \quad \quad
\begin{array}{c|cccc}
_{\large B}\backslash^{\large C} & c_1 & c_2 & c_3 & c_4 \\
\hline
b_1 & 0.02 & 0.14 & 0.06 & 0.03 \\
b_2 & 0.03 & 0.05 & 0 & 0.17 \\
b_3 & 0.35 & 0.04 & 0 & 0.11 \\
\end{array}$$
\question[1] Which of the following statements are necessarily \textbf{false}? Note X \rotatebox[origin=c]{90}{$\models$} Y indicates that random variable X is independent of random variable Y.
\textbf{Select all that apply:}
\begin{checkboxes}{%
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice A \rotatebox[origin=c]{90}{$\models$} B
\choice B \rotatebox[origin=c]{90}{$\models$} C
\choice A \rotatebox[origin=c]{90}{$\models$} C
\choice None of the above.
}
\end{checkboxes}
\question[2] What is $P(B=b_1 | A = a_2, C = c_4)$? If this value cannot be computed, write N/A.
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
\end{tcolorbox}
\question[2] What is $P(B=b_2 | A = a_3, C = c_3)$? If this value cannot be computed, write N/A.
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\question[2] True or False: $\sum_{i=1}^3 P(B=b_i|C=c_1) = \sum_{j=1}^4 P(C=c_j|B=b_1)$
\textbf{Select one:}
\begin{checkboxes}
\choice True
\choice False
\end{checkboxes}
\question[2] Consider two random variables $X, Y$. Assume that we have $P(X=x) = \frac{1}{2^x}$ for $x \in \mathbb{Z}_{\geq 1}$ (integers greater than or equal to 1) and $P(Y=y|X=x) = \frac{1}{n}$ for $y \in \{1,2,...,n\}$. Assume $n$ is a fixed positive integer constant. What is $\mathbb{E}[Y]$?
\textbf{Select one:}
\begin{checkboxes}
\choice $\sum_{y=1}^n y \frac{1}{2^y}$
\choice $\sum_{y=1}^n y \frac{5}{3^y}$
\choice $\sum_{y=1}^n \frac{y}{n}$
\choice $\sum_{y=1}^n y$
\end{checkboxes}
\clearpage
\question[1] What is the mean, variance and entropy of a Bernoulli (p) random variable?
\textbf{Select one:}
\begin{checkboxes}
\choice $p, p(1-p), -(1-p)\log(1-p)-p \log(p)$
\choice $p(1-p), p, -(1-p)\log(1-p)-p\log(p)$
\choice $p, p(1-p),\log(1-p)-p\log(p)$
\choice The entropy of a Bernoulli variable is not defined.
\end{checkboxes}
\question[2] Please match the probability density function of the random variable X to its corresponding distribution name.
\begin{enumerate}
\item prob$(X=x) = \frac{1}{\sqrt{(2\pi)^d |\sum|}}\exp(-\frac{1}{2}(x - \mu)^T\sum^{-1}(x-\mu))$
\item prob$(X=x) = \lambda e^{-\lambda x}$ when $x \geq 0$; 0 otherwise
\item prob$(X=x) = \binom{n}{x} p^x (1-p)^{n-x}$
\item prob$(X=x) = \frac{1}{b-a}$ when $a \leq x \leq b$; 0 otherwise
\item prob$(X=x) = p^x(1-p)^{1-x}$
\end{enumerate}
\begin{list}{}
\item Multivariate Gaussian: \qquad
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\item Exponential: \qquad
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\item Uniform: \qquad
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\item Bernoulli: \qquad
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\item Binomial: \qquad
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\end{list}
\clearpage
\end{questions}
\subsection{Calculus [8pts]}
\begin{questions}
% \question[2] Find the derivative of y with respect to x, where $y=2x^4-x^3+5x-1$
% \textbf{Select one:}
% \begin{checkboxes}{}
% \choice $8x^3-3x^2+5$
% \choice $8x^4-3x^3+5x$
% \choice $6x^3-2x^2$
% \choice $16x^3-x^2+5$
% \end{checkboxes}
\question[2] Evaluate the derivative of y with respect to x, where $y = \ln(\frac{4}{x^2}-x^3)$ at x = 1.
\begin{tcolorbox}[fit,height=1cm, width=2cm, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\question[2] Find the partial derivative of $y$ with respect to $x$, where $y= 3x^2 \sin(z) e^{-x}$
\textbf{Select one:}
\begin{checkboxes}{}
\choice $3x\sin(z) e^{-x}(2+x)$
\choice $-6x\sin(z) e^{-x}$
\choice $3x\sin(z) e^{-x}(2-x)$
\choice $6x\cos(z)e^{-x}$
\end{checkboxes}
\question[2] For the function $f(x)= 5x^3 +2x^2-3x$ the value $x=\frac{1}{3}$ sets the derivative to be 0. Additionally, the second order derivative of $f(x)$ at x = $\frac{1}{3}$ is positive. What can you say about $f(x)$ at the point $\frac{1}{3}$:
\textbf{Select one:}
\begin{checkboxes}{}
\choice a local minimum
\choice a local maximum
\choice a local minimum or a local maximum
\choice None of the above
\end{checkboxes}
\question[2] Suppose that $f(\xv|\thetav)=\xv^T\thetav$, where $\xv, \thetav \in \mathcal{R}^n$. The function $g(\thetav)$ is defined as $g(\thetav) = (f(\xv^{(1)}|\thetav) - y^{(1)})^2$ for $\xv^{(1)} \in \mathcal{R}^n$ and $y^{(1)} \in \mathcal{R}$. What is the function type of $g(\thetav)$:
\textbf{Select one:}
\begin{checkboxes}{}
\choice $g: \mathcal{R}^n \rightarrow \mathcal{R}$
\choice $g: \mathcal{R} \rightarrow \mathcal{R}$
\choice $g: \mathcal{R} \rightarrow \mathcal{R}^n$
\choice $g: (\mathcal{R}^n \times \mathcal{R}^n) \rightarrow \mathcal{R}$
\end{checkboxes}
\clearpage
\end{questions}
\subsection{Vectors and Matrices}
\begin{questions}
\question[1] Consider the matrix $\mathbf{X}$ and the vectors $\mathbf{y}$ and $\mathbf{z}$ below: \textbf{X}=$\begin{bmatrix} 1 & 4 \\ 2 & 6 \end{bmatrix}$, \textbf{y}=$\begin{bmatrix} 2 \\ 1 \end{bmatrix}$, \textbf{z}=$\begin{bmatrix} 2 \\ 3 \end{bmatrix}$. What is the inner product of the vectors \textbf{y} and \textbf{z}? (this is also sometimes called the dot product)
\textbf{Select one:}
\begin{checkboxes}
\choice $\begin{bmatrix} 4 & 6 \\ 2 & 3 \end{bmatrix}$
\choice 9
\choice $\begin{bmatrix} 4 \\ 3 \end{bmatrix}$
\choice 7
\end{checkboxes}
\question[1] Using the same values for \textbf{X}, \textbf{y}, and \textbf{z} as above, what is the product of \textbf{Xy}?
\textbf{Select one:}
\begin{checkboxes}
\choice $\begin{bmatrix} 10 \\ 2 \end{bmatrix}$
\choice $\begin{bmatrix} 6 \\ 10 \end{bmatrix}$
\choice $\begin{bmatrix} 7 \\ 11 \end{bmatrix}$
\choice $\begin{bmatrix} 14 \\ 22 \end{bmatrix}$
\end{checkboxes}
\question[2] Consider $\uv = \begin{bmatrix} 1 \\ 2 \\ 3\end{bmatrix}$ and $\Vv = \begin{bmatrix} 0 & 7\\ 4 & 5\\ -1 & 0 \end{bmatrix}$. Which of these are valid operations? \\
\textbf{Select all that apply:}
\begin{checkboxes}
{%
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice $\uv^T\Vv$
\choice $\Vv^T\uv$
\choice $\uv \Vv$
\choice $\Vv \Vv$
\choice None of the above
}
\end{checkboxes}
\clearpage
\question[2] For the matrices $\mathbf{A}=\begin{bmatrix} 2 & 1 & 4 \\ -3 & 2 & 0 \\ 1 & 3 & -2 \end{bmatrix} $ and $\mathbf{B}=\begin{bmatrix} 3 & 4 & 5 \\ 3 & -1 & 3 \\ 1 & 3 & -2 \end{bmatrix}$
What is the product $\mathbf{AB}$?
\textbf{Select one:}
\begin{checkboxes}
\choice $ \begin{bmatrix} 13 & 19 & 5 \\ -3 & -14 & -9 \\ 4 & -4 & 18 \end{bmatrix} $
\choice $ \begin{bmatrix} 13 & 19 & 28 \\ 19 & 9 & -7 \\ -10 & -2 & 13 \end{bmatrix} $
\choice $ \begin{bmatrix} 20 & -20 & -28 \\ 3 & -14 & 9 \\ 3 & 2 & 13 \end{bmatrix} $
\choice $ \begin{bmatrix} 13 & 19 & 5 \\ -3 & -14 & -9 \\ 10 & -5 & 18 \end{bmatrix} $
\end{checkboxes}
\question[1] True or False: The matrix $\mathbf{A}$ from the previous question has an inverse?
\textbf{Select one:}
\begin{checkboxes}
\choice True
\choice False
\end{checkboxes}
\question[2] Consider two vectors $\xv = \begin{bmatrix} x_1 \\ x_2 \\ x_3 \end{bmatrix}$ and $\yv = \begin{bmatrix} y_1 \\ y_2 \\ y_3 \end{bmatrix}$, let $z = \xv^T \yv$. What is $\frac{\partial z}{\partial y_2}$?
\textbf{Select one:}
\begin{checkboxes}
\choice $y_2$
\choice $x_2$
\choice $\xv$
\choice $\yv$
\end{checkboxes}
\clearpage
\question[2] Given matrix $\mathbf{X}=\begin{bmatrix} 3 & 4 & 2 \\ 1 & 6 & 2 \\ 1 & 4 & 4 \end{bmatrix} $ and the column vector $\mathbf{y}=\begin{bmatrix} -6 \\ 1 \\ 1 \end{bmatrix}$, what is the eigenvalue of $\mathbf{X}$ associated with $\mathbf{y}$? (Recall an eigenvector of a matrix $\mathbf{A} \in \mathcal{R}^{n \times n}$ is a nonzero vector $\vv \in \mathcal{R}^n$ such that $\mathbf{A}\vv = \lambda\vv$ where we call the scalar $\lambda$ the associated eigenvalue for $\vv$.)
\textbf{Select one:}
\begin{checkboxes}
\choice $\mathbf{y}$ is not an eigenvector
\choice -3
\choice 2
\choice 1.5
\end{checkboxes}
\question[2] Preparing for his linear algebra final, Joe is finding eigenvectors and eigenvalues for different matrices. For one matrix (not given), he finds the following two \textbf{distinct} eigenvectors corresponding to an eigenvalue of 4: $\begin{bmatrix} 3 \\ 117 \\ 9 \end{bmatrix}$ and $\begin{bmatrix} 1 \\ 39 \\ 3 \end{bmatrix}$. Which statement regarding his solution is true?
\textbf{Select all that apply:}
\begin{checkboxes}{%
\checkboxchar{$\Box$} \checkedchar{$\blacksquare$}
\choice The solution must be wrong because there cannot be multiple eigenvectors corresponding to a single eigenvalue.
\choice The solution must be wrong because the eigenvectors are linearly dependent.
\choice The solution is correct because eigenvectors should be linearly dependent and there may be multiple eigenvectors corresponding to an eigenvalue.
\choice None of the above.
}
\end{checkboxes}
\clearpage
\end{questions}
\subsection{Geometry}
\begin{questions}
\question[2] What relationship does the vector $\wv$ share with the line $\mathbf{w^Tx}+b = 0$?
(assume $\mathbf{x}$ and $\mathbf{w}$ are both two dimensional column vectors, and $\mathbf{w}^T$ indicates the transpose of the column vector $\mathbf{w}$.)
\textbf{Select one:}
\begin{checkboxes}{}
\choice parallel
\choice orthogonal
\choice depends on the value of b
\end{checkboxes}
\question[1] With reference to the above question, select the statement which best explains why $\mathbf{w}$ and $\mathbf{w^Tx} + b = 0$ share the above relationship.
\textbf{Select one:}
\begin{checkboxes}{}
\choice The inner product $\mathbf{w^T(x' - x'')}$, where $\mathbf{x'}$ and $\mathbf{x''}$ are two points on the line $\mathbf{w^Tx}+b=0$, is 0
\choice The inner product $\mathbf{w^T(x' - x'')}$, where $\mathbf{x'}$ and $\mathbf{x''}$ are two points on the line $\mathbf{w^Tx}+b=0$, is 1
\choice The inner product $\mathbf{w^T(x' - x'')}$, where $\mathbf{x'}$ and $\mathbf{x''}$ are two points on the line $\mathbf{w^Tx}+b=0$, is $b$
\end{checkboxes}
\question[2] What is the distance from the origin to the line $\mathbf{w^Tx}+b=0$?
(In the following answers, $\lambda$ is some constant)
\textbf{Select one:}
\begin{checkboxes}{}
\choice $\frac{|b|}{||\mathbf{w}||}$
\choice $\frac{|b|}{\mathbf{w^Tw}}\mathbf{w}$
\choice $\frac{2\lambda}{\mathbf{w}b}$
\choice $\frac{||\mathbf{w}||}{|b|}$
\end{checkboxes}
\clearpage
\end{questions}
\subsection{CS Foundations}
\begin{questions}
\question[1] If $f(n)=\ln(n)$ and $g(n)=\log_3(n)$ which of the following are true?
\textbf{Select one:}
\begin{checkboxes}
\choice $f(n) \in O(g(n))$
\choice $g(n) \in O(f(n))$
\choice Both
\choice Neither
\end{checkboxes}
\question[1] If $f(n)=n^{10}$ and $g(n)=10^n$ which of the following are true?
\textbf{Select one:}
\begin{checkboxes}
\choice $f(n) \in O(g(n))$
\choice $g(n) \in O(f(n))$
\choice Both
\choice Neither
\end{checkboxes}
%\clearpage
\begin{figure}[H]
\centering
\includegraphics[width=0.6\textwidth]{BritiansRoyalFamily.jpg}
\caption{Britain's Royal Family}
\label{fig:family}
\end{figure}
\clearpage
\question[2] Using the tree shown in Figure \ref{fig:family}, how many nodes would depth-first-search visit in finding Mia Tindall (including her node)? Assuming we search left-to-right and top-down.
\textbf{Select one:}
\begin{checkboxes}
\choice 3
\choice 12
\choice 15
\choice 18
\end{checkboxes}
%\clearpage
\begin{figure}[H]
\centering
\includegraphics[width = 0.4\textwidth]{TreePlot.png}
\caption{A Binary Tree with indexed nodes}
\label{tree}
\end{figure}
\question[2] Figure \ref{tree} is a Binary Tree with indexed nodes. Assume root node is node 1. What is the node-visit order of \textbf{DFS} and \textbf{BFS} of the above Binary Tree?
A depth-first search (DFS) traversal of a binary tree starts with visiting the root node, and recursively searches down the left subtree (i.e., the tree rooted at the left node) before going to search the right subtree (i.e., the tree rooted at the right node) until the traversal is done.\\
Note: Alternatively, we can also look right subtree before left subtree too, for the question please consider left to right order!
A breadth-first search (BFS) traversal of a binary tree visits every node (assuming a left-to-right order) on a level (with the same distance to the root) before going to a lower level until the traversal is done.
The node-visit order of DFS is:
\begin{tcolorbox}[fit,height=1cm, width=\textwidth, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
The node-visit order of BFS is:
\begin{tcolorbox}[fit,height=1cm, width=\textwidth, blank, borderline={1pt}{-2pt},nobeforeafter]
%solution
\end{tcolorbox}
\clearpage
\question[2] Fill in the blanks in the pseudo code for key search using recursive depth-first search (DFS) traversal.
\begin{lstlisting}
class TreeNode:
def __init__(self, val):
self.val = val
self.leftNode = None
self.rightNode = None
# (a) the left/right node is denoted as
# node.leftNode/node.rightNode
# (b) left/right node are of type TreeNode
# (c) the value of the node is denoted as node.val
# (d) recursive DFS to search for the node
# with value key in a binary tree
# (e) the left node is assumed to be searched
# before the right node
def find_val(node, key):
if node is None:
return None
if (1)____________________________:
return node
else:
result = (2)___________________________
if result is None:
result = (3)___________________________
return (4)___________________________
\end{lstlisting}
\clearpage
\textbf{\underline{Consider writing a recursive program to solve question 6:}} \\
Lucas numbers are defined as:
\[ L_n = \begin{cases}
2 & \text{if } n = 0\\
1 & \text{if } n = 1\\
L_{n-1} + L_{n-2} & \text{if } n > 1
\end{cases}
\]
\question[2] Which of the following is the numerical value for $L_{32}$?
\textbf{Select one:}
\begin{checkboxes}
\choice 3010349
\choice 3524578
\choice 4870847
\choice 7881196
\end{checkboxes}
\newpage
\textbf{\underline{Consider the following information to answer questions 7-8:}} \\
Given the functions of computing a fibonacci number:
\begin{lstlisting}
def fib_1(n):
if n == 0 or n == 1:
return 1
return fib_1(n - 1) + fib_1(n - 2)
d = {}
d[0] = 1
d[1] = 1
def fib_2(n):
if n in d.keys():
return d[n]
d[n] = fib_2(n - 1) + fib_2(n - 2)
return d[n]
\end{lstlisting}
\question[2] Which of the following is the tightest upper bound on the time complexity of computing \lstinline{fib_1(n)}?
\textbf{Select one:}
\begin{checkboxes}
\choice $O(n)$
\choice $O(n \log n)$
\choice $O(2^n)$
\choice $O(n!)$
\end{checkboxes}
\question[2] Which of the following is the tightest upper bound on the time complexity of computing \lstinline{fib_2(n)}?
\textbf{Select one:}
\begin{checkboxes}
\choice $O(n)$
\choice $O(n \log n)$
\choice $O(2^n)$
\choice $O(n!)$
\end{checkboxes}
\clearpage
\end{questions}
%The following are questions I feel more aligned with the difficulty level of maths encountered in this course
%\input{new_questions.tex}
%\clearpage
\begin{comment}
{\bf Collaboration Questions} After you have completed all other components of this assignment, report your answers to the collaboration policy questions detailed in the Academic Integrity Policies found \href{http://www.cs.cmu.edu/~mgormley/courses/10601/syllabus.html#7-academic-integrity-policies}{here}.
\begin{enumerate*}
\item Did you receive any help whatsoever from anyone in solving this assignment? If so, include full details.
\item Did you give any help whatsoever to anyone in solving this assignment? If so, include full details?
\item Did you find or come across code that implements any part of this assignment ? If so, include full details.
\end{enumerate*}
\begin{tcolorbox}[fit,height=3cm,blank, borderline={1pt}{-2pt},nobeforeafter]
%Input your solution here. Do not change any of the specifications of this solution box.
\end{tcolorbox}
\end{comment}
\textbf{Collaboration Questions} Please answer the following:
\begin{enumerate}
\item Did you receive any help whatsoever from anyone in solving this assignment? \\Yes / No.
\begin{itemize}
\item If you answered `yes', give full details: \_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\item (e.g. “Jane Doe explained to me what is asked in Question 3.4”)
\end{itemize}
\item Did you give any help whatsoever to anyone in solving this assignment? \\Yes / No.
\begin{itemize}
\item If you answered `yes', give full details: \_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\item (e.g. “I pointed Joe Smith to section 2.3 since he didn’t know how to proceed with Question 2”)
\end{itemize}
\item Did you find or come across code that implements any part of this assignment ? \\Yes / No. (See below policy on “found code”)
\begin{itemize}
\item If you answered `yes', give full details: \_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\item (book \& page, URL \& location within the page, etc.).
\end{itemize}
\end{enumerate}
\end{document}
|
{"hexsha": "415cdfa8d4f4dcd2585a9c772ae07de4517669a5", "size": 61208, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "assignments/solutions/hw1/problems/latex_template/hw1.tex", "max_stars_repo_name": "punit-bhatt/cmu-10601-intro-to-ml", "max_stars_repo_head_hexsha": "a4b7bdb27a388a2996e3f4dac99f34156ff9f01e", "max_stars_repo_licenses": ["MIT-CMU"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/solutions/hw1/problems/latex_template/hw1.tex", "max_issues_repo_name": "punit-bhatt/cmu-10601-intro-to-ml", "max_issues_repo_head_hexsha": "a4b7bdb27a388a2996e3f4dac99f34156ff9f01e", "max_issues_repo_licenses": ["MIT-CMU"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/solutions/hw1/problems/latex_template/hw1.tex", "max_forks_repo_name": "punit-bhatt/cmu-10601-intro-to-ml", "max_forks_repo_head_hexsha": "a4b7bdb27a388a2996e3f4dac99f34156ff9f01e", "max_forks_repo_licenses": ["MIT-CMU"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4635718891, "max_line_length": 985, "alphanum_fraction": 0.6644066135, "num_tokens": 18139}
|
# author: Fernando Paolo;
# modify: xin luo, 2021.8.10.
"""
des: merges several HDF5 files into a single file or multiple larger files.
example
merge.py ifiles_*.h5 -o ofile.h5
merge.py ifiles_*.h5 -o ofile.h5 -m 5 -n 5
notes
- The parallel option (-n) only works for multiple outputs (-m)!
- If no 'key' is given, it merges files in the order they are passed/read.
- If receive "Argument list too long", pass a string.
- See complementary program: split.py
"""
import warnings
from numpy.core.fromnumeric import compress
warnings.filterwarnings("ignore")
import os
import h5py
import argparse
import numpy as np
from glob import glob
def get_args():
""" Pass command-line arguments. """
parser = argparse.ArgumentParser(
description='Merges several HDF5 files.')
parser.add_argument(
'file', metavar='file', type=str, nargs='+',
help='HDF5 files to merge')
parser.add_argument(
'-o', metavar='ofile', dest='ofile', type=str, nargs=1,
help=('output file name'),
default=[None], required=True,)
parser.add_argument(
'-m', metavar='nfiles', dest='nfiles', type=int, nargs=1,
help=('number of merged files (blocks)'),
default=[1],)
parser.add_argument(
'-v', metavar='var', dest='vnames', type=str, nargs='+',
help=('only merge specific vars if given, otherwise merge all'),
default=[],)
parser.add_argument(
'-z', metavar=None, dest='comp', type=str, nargs=1,
help=('compress merged file(s)'),
choices=('lzf', 'gzip'), default=[None],)
parser.add_argument(
'-k', metavar='key', dest='key', type=str, nargs=1,
help=('sort files by numbers after `key` in file name'),
default=[None],)
parser.add_argument(
'-n', metavar='njobs', dest='njobs', type=int, nargs=1,
help=('number of jobs for parallel processing when using -m'),
default=[1],)
return parser.parse_args()
def get_total_len(ifiles):
""" des: Get total output length from all input files.
arg:
ifiles: preprocessed h5 file, consist of only Dataset.
return:
N: length of the Dataset.
"""
N = 0
for fn in ifiles:
with h5py.File(fn) as f:
N += list(f.values())[0].shape[0]
return N
def get_var_names(ifile):
""" des: return all '/variable' names in the HDF5.
arg:
files: str, the h5 file name.
return:
vanems: list, Dataset names or the Group names in h5 file
if the processed h5 file consist of Dataset,
the names are var names.
"""
with h5py.File(ifile, 'r') as f:
vnames = list(f.keys())
return vnames
def get_multi_io(ifiles, ofile, nfiles):
""" des: Construct multiple input/output file names in the data merging.
required in the parallel processing
arg:
ifiles: list, consist of paths of multiple h5 files.
ofile: paths of output file
nfiles: groups of the input files should be divide into.
retrun:
ifiles: list, consist of multiple list of which contains files in one specific group.
ofiles: list, consist of output paths that corresponding to files in specific groups.
"""
# List of groups of input files
ifiles = [list(arr) for arr in np.array_split(ifiles, nfiles)]
# List of output file names
fname = os.path.splitext(ofile)[0] + '_%02d.h5'
ofiles = [(fname % k) for k in range(len(ifiles))]
return ifiles, ofiles
# Sort input files by key
def sort_files(ifiles, key=None):
""" des: sort files by numbers *after* the key in the file name.
example: file name-> ..._year_2010_....h5, the key set to year, the files will sorted by year
arg:
ifiles: list, contains multiple h5 files
key: str, the key feature for sorting
"""
if key:
import re
print('sorting input files ...')
natkey = lambda s: int(re.findall(key+'_\d+', s)[0].split('_')[-1])
ifiles.sort(key=natkey)
def merge(ifiles, ofile, vnames, comp):
''' des: merge the similar files into one file
arg:
ifiles: list with strs, files need to be merged.
ofile: str, the name of the merged file.
retrun:
none
'''
# Get length of output containers (from all input files)
print('Calculating lenght of output from all input files ...')
N = get_total_len(ifiles) #
with h5py.File(ofile, 'w') as out_f, h5py.File(ifiles[0], 'r') as in_f_0:
for key in vnames:
shape_var = in_f_0[key][:].shape
if len(shape_var) == 1:
out_f.create_dataset(key, (N,), dtype=None, compression=comp)
else:
# out_f.create_dataset(key, (N, shape_var[1]), dtype='float32', compression=comp)
out_f.create_dataset(key, (N, shape_var[1]), dtype=None, compression=comp)
# Iterate over the input files
k1 = 0
for ifile in ifiles:
print(('reading', ifile))
# Write next chunk (the input file)
with h5py.File(ifile, 'r') as f2:
k2 = k1 + list(f2.values())[0].shape[0] # k1, k2: the location of the var in the merged file
# Iterate over all variables
out_f[key][k1:k2] = f2[key][:]
k1 = k2
print(('merged', len(ifiles), 'files'))
print(('output ->', ofile))
if __name__ == '__main__':
args = get_args()
ifile = args.file[:] # list
ofile = args.ofile[0] # str
nfiles = args.nfiles[0]
vnames = args.vnames
comp = args.comp[0]
key = args.key[0]
njobs = args.njobs[0]
# In case a string is passed to avoid "argument list too long"
if len(ifile) == 1:
ifile = glob(ifile[0])
# sort files before merging
sort_files(ifile, key=key)
# get var names from first file, if not provided
vnames = get_var_names(ifile[0]) if not vnames else vnames
# groups of input files -> multiple output files
if nfiles > 1:
ifile, ofile = get_multi_io(ifile, ofile, nfiles)
else:
ifile, ofile = [ifile], [ofile]
if njobs > 1 and nfiles > 1:
print(('Running parallel code (%d jobs) ...' % njobs))
from joblib import Parallel, delayed
Parallel(n_jobs=njobs, verbose=5)(
delayed(merge)(fi, fo, vnames, comp) \
for fi, fo in zip(ifile, ofile))
else:
print('Running sequential code ...')
[merge(fi, fo, vnames, comp) for fi, fo in zip(ifile, ofile)]
|
{"hexsha": "d4647cf450266c851dd68cf8135af3cd1c89201d", "size": 6883, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/merge_files.py", "max_stars_repo_name": "xinluo2018/Glacier-Change-for-RGI1305", "max_stars_repo_head_hexsha": "c0850d7c681181a2046d87c7ede566050cc627b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-03T14:43:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T03:47:50.000Z", "max_issues_repo_path": "utils/merge_files.py", "max_issues_repo_name": "xinluo2018/Glacier-Change-for-RGI1305", "max_issues_repo_head_hexsha": "c0850d7c681181a2046d87c7ede566050cc627b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/merge_files.py", "max_forks_repo_name": "xinluo2018/Glacier-Change-for-RGI1305", "max_forks_repo_head_hexsha": "c0850d7c681181a2046d87c7ede566050cc627b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-03T14:43:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T14:43:22.000Z", "avg_line_length": 34.7626262626, "max_line_length": 113, "alphanum_fraction": 0.5807060875, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1772}
|
[STATEMENT]
lemma "\<not> no_spoofing_iface
(Iface ''eth0'')
[Iface ''eth0'' \<mapsto> [(ipv4addr_of_dotdecimal (192,168,0,0), 24)]]
[Rule (MatchAnd (Match (Src (IpAddrNetmask (ipv4addr_of_dotdecimal (192,168,0,0)) 24))) (MatchNot (Match (IIface (Iface ''eth0''))))) action.Drop,
Rule MatchAny action.Accept]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> no_spoofing_iface (Iface ''eth0'') [Iface ''eth0'' \<mapsto> [(ipv4addr_of_dotdecimal (192, 168, 0, 0), 24)]] [Rule (MatchAnd (Match (Src (IpAddrNetmask (ipv4addr_of_dotdecimal (192, 168, 0, 0)) 24))) (MatchNot (Match (IIface (Iface ''eth0''))))) Drop, Rule MatchAny Accept]
[PROOF STEP]
by eval
|
{"llama_tokens": 317, "file": "Iptables_Semantics_Primitive_Matchers_No_Spoof", "length": 1}
|
@testset "TinayHanabiEnv" begin
env = TinyHanabiEnv()
RLBase.test_interfaces!(env)
RLBase.test_runnable!(env)
end
|
{"hexsha": "23c1ae2043b8605ee60b06807135dbcf6e121629", "size": 129, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ReinforcementLearningEnvironments/test/environments/examples/tiny_hanabi.jl", "max_stars_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_stars_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 352, "max_stars_repo_stars_event_min_datetime": "2018-08-30T18:41:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:41:22.000Z", "max_issues_repo_path": "src/ReinforcementLearningEnvironments/test/environments/examples/tiny_hanabi.jl", "max_issues_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_issues_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 325, "max_issues_repo_issues_event_min_datetime": "2018-08-24T12:41:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:29:23.000Z", "max_forks_repo_path": "src/ReinforcementLearningEnvironments/test/environments/examples/tiny_hanabi.jl", "max_forks_repo_name": "LaarsOman/ReinforcementLearning.jl", "max_forks_repo_head_hexsha": "b04e3f192e71418dbca496331ada44f65b2822d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 62, "max_forks_repo_forks_event_min_datetime": "2018-09-02T03:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T12:35:19.000Z", "avg_line_length": 14.3333333333, "max_line_length": 32, "alphanum_fraction": 0.7054263566, "num_tokens": 39}
|
# (c) 2016 Gregor Mitscha-Baude
import nanopores
from matplotlib import pyplot as plt
from itertools import product
from folders import fields, FIGDIR
#fields.update()
r = 0.11
#D2D = fields.get_field("pugh_diff2D_test", "D")[0]
#data = fields.get_fields("pugh_diff3D_cross", bulkbc=True, rMolecule=2.0779)
data = fields.get_fields("pugh_diff3D_backup", rMolecule=r)
#def _sorted(data, key):
# I = sorted(range(len(key)), key=lambda k: key[k])
# return {k: [data[k][i] for i in I] for k in data}, [key[i] for i in I]
#x = [z[0] for z in data["x"]]
#print len(x)
#data, x = _sorted(data, x)
#dstr = ["x", "y", "z"]
#for i, j in product(range(3), range(3)):
# Dxx = [D[i][j] for D in data["D"]]
# style = "s-" if i==j else "--"
# plt.plot(x, Dxx, style, label=r"$D_{%s%s}$" % (dstr[i], dstr[j]))
#
#plt.plot(x, [D2D]*len(x), "-k", label="2D ref.")
#plt.legend(loc="best")
##plt.legend(bbox_to_anchor=(1.05, 1.), loc="upper left", borderaxespad=0.,)
##nanopores.savefigs("pugh_diff3D", folders.FIGDIR)
#plt.show()
x = [z[0]-r for z in data["x"]]
data, x = fields._sorted(data, x)
dstr = ["x", "y", "z"]
#print x
x0 = [0.009999999999999662, 0.08555555555555504, 0.16111111111111087,
0.16157894736842093, 0.23666666666666625, 0.31222222222222207,
0.31315789473684175, 0.38777777777777744, 0.46333333333333326,
0.464736842105263, 0.5388888888888886, 0.6144444444444445,
0.6163157894736843, 0.6899999999999998, 0.7678947368421051,
0.9194736842105261, 1.071052631578947, 1.2226315789473683,
1.3742105263157893, 1.5257894736842104, 1.6773684210526314,
1.8289473684210524, 1.9805263157894732, 2.1321052631578947,
2.283684210526316, 2.435263157894737, 2.586842105263158,
2.738421052631579, 2.89]
x = [0.009999999999999662, 0.08555555555555504,
0.16157894736842093, 0.23666666666666625,
0.31315789473684175, 0.38777777777777744,
0.464736842105263, 0.5388888888888886,
0.6163157894736843, 0.6899999999999998, 0.7678947368421051,
0.9194736842105261, 1.071052631578947, 1.2226315789473683,
1.3742105263157893, 1.5257894736842104, 1.6773684210526314,
1.8289473684210524, 1.9805263157894732, 2.1321052631578947,
2.283684210526316]
DD = data["D"]
Dxx = [D[0][0] for D in DD if x0[DD.index(D)] in x]
Dyy = [D[1][1] for D in DD if x0[DD.index(D)] in x]
Dzz = [D[2][2] for D in DD if x0[DD.index(D)] in x]
x = [t+r for t in x]
from nanopores.models.diffusion_interpolation import Dn_plane, Dt_plane
from numpy import linspace
fields.set_dir_default()
X, D = fields.get("diffz_pugh", "x", "D", diamPore=6.)
zmin = min([x1[2] for x1 in X], key=lambda x: abs(x))
i = X.index([0., 0., zmin])
D0 = D[i]
Dxx1 = max(Dxx)
Dzz1 = max(Dzz)
xlin = linspace(r+1e-3, 3., 100)
dn = [Dn_plane(t, r, N=20) for t in xlin]
dn = [d*Dxx1/dn[-1] for d in dn]
plt.plot(xlin, dn, "-b")
dt = [Dt_plane(t, r) for t in xlin]
dt = [d*Dzz1/dt[-1] for d in dt]
plt.plot(xlin, dt, "-g")
plt.xlim(0., 1.5)
plt.xticks([0, 0.5, 1., 1.5])
plt.plot(x, Dxx, "ob", label=r"$D_{xx}$")
plt.plot(x, Dyy, "sg", label=r"$D_{yy}$")
plt.plot(x, Dzz, ".r", label=r"$D_{zz}$")
#plt.plot(x, [D2D]*len(x), "--k", label="2D cyl.")
plt.xlabel("x distance from pore wall [nm]")
plt.ylabel("Rel. diffusivity")
plt.ylim(0, 1)
plt.axvline(x=0.11, linestyle="--", color="#666666")
plt.annotate("Ion radius", (0.11, 0.94),
xytext=(0.25, 0.94-0.002), color="#666666",
arrowprops=dict(arrowstyle="->", color="#666666"))
plt.yticks([i/10. for i in range(0, 11, 2)])
plt.legend(loc="lower right") #bbox_to_anchor=(1.05, 1.), loc="upper left", borderaxespad=0.,)
plt.gcf().set_size_inches(3.2, 3.2)
nanopores.savefigs("pugh_Dions", FIGDIR)
plt.show()
|
{"hexsha": "9cc4c0f46284a757ad732ad85356efd15515c397", "size": 3713, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pughpore/plot_diff3D.py", "max_stars_repo_name": "jhwnkim/nanopores", "max_stars_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/pughpore/plot_diff3D.py", "max_issues_repo_name": "jhwnkim/nanopores", "max_issues_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/pughpore/plot_diff3D.py", "max_forks_repo_name": "jhwnkim/nanopores", "max_forks_repo_head_hexsha": "98b3dbb5d36464fbdc03f59d224d38e4255324ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.13, "max_line_length": 94, "alphanum_fraction": 0.663345004, "include": true, "reason": "from numpy", "num_tokens": 1508}
|
from typing import *
import torch
import torch.optim as optim
import numpy as np
from allennlp.data import Instance
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.dataset_readers import DatasetReader
from allennlp.common.file_utils import cached_path
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn import util as nn_util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.data.iterators import BucketIterator
from allennlp.training.trainer import Trainer
from allennlp.predictors import SentenceTaggerPredictor
from allennlp.data.iterators import DataIterator
from tqdm import tqdm
from scipy.special import expit # the sigmoid function
from allennlp.models import Model
from sklearn.metrics import precision_recall_curve
import numpy as np
def tonp(tsr): return tsr.detach().cpu().numpy()
from allennlp.models.archival import Archive, load_archive
class Predictor:
def __init__(self, archive: Archive, iterator: DataIterator,
cuda_device: int) -> None:
self.model = archive.model
self.iterator = iterator
self.cuda_device = cuda_device
def _extract_data(self, batch) -> np.ndarray:
out_dict = self.model(**batch)
return expit(tonp(out_dict["class_logits"]))
def predict(self, ds: Iterable[Instance]) -> np.ndarray:
self.model = self.model.cuda()
pred_generator = self.iterator(ds, num_epochs=1, shuffle=False)
self.model.eval()
pred_generator_tqdm = tqdm(pred_generator,
total=self.iterator.get_num_batches(ds))
preds = []
labels = []
with torch.no_grad():
for batch in pred_generator_tqdm:
batch = nn_util.move_to_device(batch, self.cuda_device)
labels.append(batch['label'].cpu())
preds.append(self._extract_data(batch))
# for e in preds[0]:
# print("aaaaaaaaaaaaaa",len(e))
return np.concatenate(labels, axis=0),np.concatenate(preds, axis=0)
import argparse
parser = argparse.ArgumentParser(description='Examples: python predicator.py path/to/file path/to/model 1')
parser.add_argument("path")
parser.add_argument("model_path")
parser.add_argument("label")
args = parser.parse_args()
predict_label = ''
if args.label == '1':
predict_label = 'train'
elif args.label == '2':
predict_label = 'validation'
elif args.label == '3':
predict_label = 'test'
this_dir = 'accuracy_'+args.model_path.split('/')[-1]+predict_label
import os
os.mkdir(this_dir)
from packages import model
from packages import dataset_reader
from allennlp.models.archival import load_archive
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
USE_GPU = torch.cuda.is_available()
from allennlp.data.iterators import BasicIterator
# iterate over the dataset without changing its order
seq_iterator = BasicIterator(batch_size=64)
seq_iterator.index_with(Vocabulary())
Archive_ds = DatasetReader.by_name('quora_text_reader')
reader = Archive_ds(token_indexers={"elmo": ELMoTokenCharactersIndexer()})
data_path = args.path
train_ds = reader.read(data_path)
archive = load_archive(args.model_path+'/model.tar.gz')
# model = archive.model
predictor = Predictor(archive, seq_iterator, cuda_device=0 if USE_GPU else -1)
labels, train_preds = predictor.predict(train_ds)
np.savetxt(this_dir+'/train_preds.csv',np.array(train_preds))
# predictions = np.argmax(train_preds,axis=1)
# targets = np.argmax(labels,axis=1)
predictions = np.array(train_preds)[:,1]
targets = np.argmax(labels,axis=1)
from sklearn.metrics import precision_recall_curve
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.utils.fixes import signature
precision, recall, thresholds = precision_recall_curve(y_true=targets,probas_pred=predictions)
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(targets, predictions)
np.savetxt(this_dir+'/predictions.csv',np.array(predictions))
np.savetxt(this_dir+'/targets.csv',np.array(targets))
np.savetxt(this_dir+'/precision.csv',np.array(precision))
np.savetxt(this_dir+'/recall.csv',np.array(recall))
np.savetxt(this_dir+'/thresholds.csv',np.array(thresholds))
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2,
where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(
average_precision))
plt.savefig(this_dir+'/p_r_'+predict_label+'.png')
# python predicator.py ../cleaned_data/train_0125.csv ../result/model1_1 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model1_1 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model1_2_nodropout 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model1_2_nodropout 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model1_3_smaller_hidden_size 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model1_3_smaller_hidden_size 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model1_5_larger_filter_size 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model1_5_larger_filter_size 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model_with_stats_1 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model_with_stats_1 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model_with_stats_2_nodropout 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model_with_stats_2_nodropout 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model_with_stats_3_smaller_hidden_size 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model_with_stats_3_smaller_hidden_size 3
# python predicator.py ../cleaned_data/train_0125.csv ../result/model_with_stats_5_larger_filter_size 1
# python predicator.py ../cleaned_data/test_0125_modified.csv ../result/model_with_stats_5_larger_filter_size 3
|
{"hexsha": "08882aebfbcb2cdbe7c185f3415957abeeb27d13", "size": 7036, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/hybrid_model/predicator.py", "max_stars_repo_name": "ShawnLYU/A-Hybrid-Approach-of-Insincere-Questions-Detection", "max_stars_repo_head_hexsha": "0bc76c3fc186245f83e665732dac53a1af3f3fbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/hybrid_model/predicator.py", "max_issues_repo_name": "ShawnLYU/A-Hybrid-Approach-of-Insincere-Questions-Detection", "max_issues_repo_head_hexsha": "0bc76c3fc186245f83e665732dac53a1af3f3fbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/hybrid_model/predicator.py", "max_forks_repo_name": "ShawnLYU/A-Hybrid-Approach-of-Insincere-Questions-Detection", "max_forks_repo_head_hexsha": "0bc76c3fc186245f83e665732dac53a1af3f3fbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1974248927, "max_line_length": 112, "alphanum_fraction": 0.7633598636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1665}
|
import numpy as np
import matplotlib.pyplot as pl
import healpy as hp
import pickle
def graticule(ax):
ax.axhline(0.0, color='k')
for i in range(2):
ax.axhline(30+30*i, color='k', linestyle=':')
ax.axhline(-30-30*i, color='k', linestyle=':')
for i in range(12):
ax.axvline(-180+30*i, color='k', linestyle=':')
pl.close('all')
with open('iipeg_step1.pk', 'rb') as handle:
output, output_hd, stokesi, stokesi_median, median_stokesi, wl, phase, obs = pickle.load(handle)
with open('iipeg_step2.pk', 'rb') as handle:
output_step2, output_hd_step2, stokesi_step2, stokesi_median_step2, median_stokesi_step2, wl_step2, phase_step2, obs_step2 = pickle.load(handle)
with open('iipeg_step4.pk', 'rb') as handle:
output_step4, output_hd_step4, stokesi_step4, stokesi_median_step4, median_stokesi_step4, wl_step4, phase_step4, obs_step4 = pickle.load(handle)
# with open('validation.pk', 'rb') as handle:
# output, target, sini, velocity, nangles = pickle.load(handle)
savefig = True
median = np.median(output_hd, axis=0)
mad = np.std(output_hd, axis=0)
pct = np.percentile(output_hd, [10, 90], axis=0)
idr = pct[1, :] - pct[0, :]
median_step2 = np.median(output_hd_step2, axis=0)
mad_step2 = np.std(output_hd_step2, axis=0)
pct_step2 = np.percentile(output_hd_step2, [10, 90], axis=0)
idr_step2 = pct_step2[1, :] - pct_step2[0, :]
median_step4 = np.median(output_hd_step4, axis=0)
mad_step4 = np.std(output_hd_step4, axis=0)
pct_step4 = np.percentile(output_hd_step4, [10, 90], axis=0)
idr_step4 = pct_step4[1, :] - pct_step4[0, :]
proj_fun = lambda x,y,z : hp.vec2pix(16, x, y, z, nest=True)
projector = hp.projector.CartesianProj()
which = 2 #[0,1,2]
if (which == 0):
fig, ax = pl.subplots(nrows=3, ncols=5, figsize=(12,8))
for i in range(5):
pl.axes(ax[0,i])
hp.orthview(median, nest=True, hold=True, title=f'{i*0.2:3.1f}', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct), max=np.max(pct), cbar=False)
if (i == 0):
pl.text(-1.5, 0.9, 'Median', fontsize='large')
if (i == 2):
pl.text(-0.3, 1.5, r'N$_\mathrm{obs}$=12', fontsize='large')
hp.visufunc.graticule()
pl.axes(ax[1,i])
hp.orthview(pct[0, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct), max=np.max(pct), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '10%', fontsize='large')
pl.axes(ax[2,i])
hp.orthview(pct[1, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct), max=np.max(pct), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '90%', fontsize='large')
fig.subplots_adjust(bottom=0.1)
cbar_ax = fig.add_axes([0.35, 0.08, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(median), vmax=np.max(median)), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_median_idr.png')
pl.savefig('figs/iipeg_median_idr.pdf', bbox_inches='tight')
#----------------------------------
fig, ax = pl.subplots(nrows=3, ncols=5, figsize=(12,8))
for i in range(5):
pl.axes(ax[0,i])
hp.orthview(median_step2, nest=True, hold=True, title=f'{i*0.2:3.1f}', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step2), max=np.max(pct_step2), cbar=False)
if (i == 0):
pl.text(-1.5, 0.9, 'Median', fontsize='large')
if (i == 2):
pl.text(-0.3, 1.5, r'N$_\mathrm{obs}$=6', fontsize='large')
hp.visufunc.graticule()
pl.axes(ax[1,i])
hp.orthview(pct_step2[0, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step2), max=np.max(pct_step2), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '10%', fontsize='large')
pl.axes(ax[2,i])
hp.orthview(pct_step2[1, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step2), max=np.max(pct_step2), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '90%', fontsize='large')
fig.subplots_adjust(bottom=0.1)
cbar_ax = fig.add_axes([0.35, 0.08, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(median), vmax=np.max(median)), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_median_idr_step2.png')
pl.savefig('figs/iipeg_median_idr_step2.pdf', bbox_inches='tight')
#----------------------------------
fig, ax = pl.subplots(nrows=3, ncols=5, figsize=(12,8))
for i in range(5):
pl.axes(ax[0,i])
hp.orthview(median_step4, nest=True, hold=True, title=f'{i*0.2:3.1f}', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step4), max=np.max(pct_step4), cbar=False)
if (i == 0):
pl.text(-1.5, 0.9, 'Median', fontsize='large')
if (i == 2):
pl.text(-0.3, 1.5, r'N$_\mathrm{obs}$=3', fontsize='large')
hp.visufunc.graticule()
pl.axes(ax[1,i])
hp.orthview(pct_step4[0, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step4), max=np.max(pct_step4), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '10%', fontsize='large')
pl.axes(ax[2,i])
hp.orthview(pct_step4[1, :], nest=True, hold=True, title='', cmap=pl.cm.inferno, half_sky=True, rot=(i*360/5.,0,0), min=np.min(pct_step4), max=np.max(pct_step4), cbar=False)
hp.visufunc.graticule()
if (i == 0):
pl.text(-1.5, 0.9, '90%', fontsize='large')
fig.subplots_adjust(bottom=0.1)
cbar_ax = fig.add_axes([0.35, 0.08, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(median), vmax=np.max(median)), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_median_idr_step4.png')
pl.savefig('figs/iipeg_median_idr_step4.pdf', bbox_inches='tight')
if (which == 1):
#-------------- 12 obs
fig, ax = pl.subplots(nrows=4, ncols=4, figsize=(12,8), sharex=True, sharey=True)
for i in range(16):
tmp = projector.projmap(output_hd[i, :], proj_fun)
ax.flat[i].imshow(tmp, cmap=pl.cm.inferno, interpolation='none', origin='lower', extent=[-180, 180, -90, 90])
ax.flat[i].set_yticks([-90,0,90])
ax.flat[i].set_xticks([-180,-90,0,90,180])
graticule(ax.flat[i])
ax.flat[0].set_title(r'N$_\mathrm{obs}$=12')
fig.subplots_adjust(top=0.85)
cbar_ax = fig.add_axes([0.35, 0.93, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(output_hd[0:17,:]), vmax=np.max(output_hd[0:17,:])), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_samples.png')
pl.savefig('figs/iipeg_samples.pdf', bbox_inches='tight')
#-------------- 6 obs
fig, ax = pl.subplots(nrows=4, ncols=4, figsize=(12,8), sharex=True, sharey=True)
for i in range(16):
tmp = projector.projmap(output_hd_step2[i, :], proj_fun)
ax.flat[i].imshow(tmp, cmap=pl.cm.inferno, interpolation='none', origin='lower', extent=[-180, 180, -90, 90])
ax.flat[i].set_yticks([-90,0,90])
ax.flat[i].set_xticks([-180,-90,0,90,180])
graticule(ax.flat[i])
ax.flat[0].set_title(r'N$_\mathrm{obs}$=6')
fig.subplots_adjust(top=0.85)
cbar_ax = fig.add_axes([0.35, 0.93, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(output_hd_step2[0:17,:]), vmax=np.max(output_hd_step2[0:17,:])), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_samples_step2.png')
pl.savefig('figs/iipeg_samples_step2.pdf', bbox_inches='tight')
#-------------- 3 obs
fig, ax = pl.subplots(nrows=4, ncols=4, figsize=(12,8), sharex=True, sharey=True)
for i in range(16):
tmp = projector.projmap(output_hd_step4[i, :], proj_fun)
ax.flat[i].imshow(tmp, cmap=pl.cm.inferno, interpolation='none', origin='lower', extent=[-180, 180, -90, 90])
ax.flat[i].set_yticks([-90,0,90])
ax.flat[i].set_xticks([-180,-90,0,90,180])
graticule(ax.flat[i])
ax.flat[0].set_title(r'N$_\mathrm{obs}$=3')
fig.subplots_adjust(top=0.85)
cbar_ax = fig.add_axes([0.35, 0.93, 0.30, 0.02])
fig.colorbar(pl.cm.ScalarMappable(norm=pl.Normalize(vmin=np.min(output_hd_step4[0:17,:]), vmax=np.max(output_hd_step4[0:17,:])), cmap=pl.cm.inferno), cax=cbar_ax, orientation='horizontal', label='Temperature [K]')
if (savefig):
pl.savefig('figs/iipeg_samples_step4.png')
pl.savefig('figs/iipeg_samples_step4.pdf', bbox_inches='tight')
if (which == 2):
origin = [5980, 6000, 6020]
labels = ['5987.1', '6003.0', '6024.1']
#-------------- 12 obs
fig, ax = pl.subplots(nrows=1, ncols=3, figsize=(10,18))
for region in range(3):
for i in range(len(phase)):
ind = len(phase) - 1 - i
ax[region].errorbar(wl[region] - origin[region], obs[region][ind, :] + i*0.2, yerr=1e-3, color='C0')
for j in range(100):
ax[region].plot(wl[region] - origin[region], stokesi[j][region][ind, :] + i*0.2, color='C1', alpha=0.05)
ax[region].plot(wl[region] - origin[region], stokesi_median[region][ind, :] + i*0.2, color='C2', alpha=1.0)
ax[region].plot(wl[region] - origin[region], median_stokesi[region][ind, :] + i*0.2, color='C3', alpha=1.0)
if (region == 0):
ax[region].text(6.52, 1.03+i*0.2, f'{phase[ind]/(2.0*np.pi):5.3f}')
for region in range(3):
ax[region].set_xlabel(fr'$\lambda+${origin[region]} [$\AA$]')
ax[region].set_title(fr'Fe I {labels[region]} $\AA$')
if (savefig):
pl.savefig('figs/iipeg_spectra.png')
pl.savefig('figs/iipeg_spectra.pdf', bbox_inches='tight')
#-------------- 6 obs
fig, ax = pl.subplots(nrows=1, ncols=3, figsize=(10,18))
for region in range(3):
for i in range(len(phase_step2)):
ind = len(phase_step2) - 1 - i
ax[region].errorbar(wl[region] - origin[region], obs_step2[region][ind, :] + i*0.2, yerr=1e-3, color='C0')
for j in range(100):
ax[region].plot(wl[region] - origin[region], stokesi_step2[j][region][ind, :] + i*0.2, color='C1', alpha=0.05)
ax[region].plot(wl[region] - origin[region], stokesi_median_step2[region][ind, :] + i*0.2, color='C2', alpha=1.0)
ax[region].plot(wl[region] - origin[region], median_stokesi_step2[region][ind, :] + i*0.2, color='C3', alpha=1.0)
if (region == 0):
ax[region].text(6.52, 1.03+i*0.2, f'{phase_step2[ind]/(2.0*np.pi):5.3f}')
for region in range(3):
ax[region].set_xlabel(fr'$\lambda+${origin[region]} [$\AA$]')
ax[region].set_title(fr'Fe I {labels[region]} $\AA$')
if (savefig):
pl.savefig('figs/iipeg_spectra_step2.png')
pl.savefig('figs/iipeg_spectra_step2.pdf', bbox_inches='tight')
#-------------- 3 obs
fig, ax = pl.subplots(nrows=1, ncols=3, figsize=(10,18))
for region in range(3):
for i in range(len(phase_step4)):
ind = len(phase_step4) - 1 - i
ax[region].errorbar(wl[region] - origin[region], obs_step4[region][ind, :] + i*0.2, yerr=1e-3, color='C0')
for j in range(100):
ax[region].plot(wl[region] - origin[region], stokesi_step4[j][region][ind, :] + i*0.2, color='C1', alpha=0.05)
ax[region].plot(wl[region] - origin[region], stokesi_median_step2[region][ind, :] + i*0.2, color='C2', alpha=1.0)
ax[region].plot(wl[region] - origin[region], median_stokesi_step4[region][ind, :] + i*0.2, color='C3', alpha=1.0)
if (region == 0):
ax[region].text(6.52, 1.03+i*0.2, f'{phase_step4[ind]/(2.0*np.pi):5.3f}')
for region in range(3):
ax[region].set_xlabel(fr'$\lambda+${origin[region]} [$\AA$]')
ax[region].set_title(fr'Fe I {labels[region]} $\AA$')
if (savefig):
pl.savefig('figs/iipeg_spectra_step4.png')
pl.savefig('figs/iipeg_spectra_step4.pdf', bbox_inches='tight')
pl.show()
|
{"hexsha": "5f3478e35fe26b3122c6d10e04895ba6622e0946", "size": 12879, "ext": "py", "lang": "Python", "max_stars_repo_path": "doplot_iipeg.py", "max_stars_repo_name": "aasensio/bayesDI", "max_stars_repo_head_hexsha": "4ddad57d89c3512b4c4ee5684ddc5608060ebdec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-20T07:59:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T20:19:48.000Z", "max_issues_repo_path": "doplot_iipeg.py", "max_issues_repo_name": "aasensio/bayesDI", "max_issues_repo_head_hexsha": "4ddad57d89c3512b4c4ee5684ddc5608060ebdec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doplot_iipeg.py", "max_forks_repo_name": "aasensio/bayesDI", "max_forks_repo_head_hexsha": "4ddad57d89c3512b4c4ee5684ddc5608060ebdec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.6630434783, "max_line_length": 217, "alphanum_fraction": 0.597018402, "include": true, "reason": "import numpy", "num_tokens": 4301}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from pmdarima.arima._arima import C_is_not_finite
import numpy as np
def test_not_finite():
assert C_is_not_finite(np.nan)
assert C_is_not_finite(np.inf)
assert not C_is_not_finite(5.)
|
{"hexsha": "213285dcd8e57b152df4d8650dffd9668d12aa4b", "size": 265, "ext": "py", "lang": "Python", "max_stars_repo_path": "pmdarima/arima/tests/test_c_arima.py", "max_stars_repo_name": "Saravji/pmdarima", "max_stars_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-17T19:00:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-17T19:00:42.000Z", "max_issues_repo_path": "pmdarima/arima/tests/test_c_arima.py", "max_issues_repo_name": "Saravji/pmdarima", "max_issues_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pmdarima/arima/tests/test_c_arima.py", "max_forks_repo_name": "Saravji/pmdarima", "max_forks_repo_head_hexsha": "7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.9285714286, "max_line_length": 49, "alphanum_fraction": 0.7509433962, "include": true, "reason": "import numpy", "num_tokens": 75}
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import min_max_axis
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils._encode import _encode, _unique
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
class LabelEncoder(TransformerMixin, BaseEstimator):
"""Encode target labels with value between 0 and n_classes-1.
This transformer should be used to encode target values, *i.e.* `y`, and
not the input `X`.
Read more in the :ref:`User Guide <preprocessing_targets>`.
.. versionadded:: 0.12
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OrdinalEncoder : Encode categorical features
using an ordinal encoding scheme.
sklearn.preprocessing.OneHotEncoder : Encode categorical features
as a one-hot numeric array.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = _unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = _unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self)
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
return _encode(y, uniques=self.classes_)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self)
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if len(diff):
raise ValueError(
"y contains previously unseen labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
def _more_tags(self):
return {'X_types': ['1dlabels']}
class LabelBinarizer(TransformerMixin, BaseEstimator):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer()
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer()
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
@_deprecate_positional_args
def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self)
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, classes=self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self)
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def _more_tags(self):
return {'X_types': ['1dlabels']}
@_deprecate_positional_args
def label_binarize(y, *, classes, neg_label=0, pos_label=1,
sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if y_type == "multilabel-indicator":
y_n_classes = y.shape[1] if hasattr(y, 'shape') else len(y[0])
if classes.size != y_n_classes:
raise ValueError("classes {0} mismatch with the labels {1}"
" found in the data"
.format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = min_max_axis(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(TransformerMixin, BaseEstimator):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels.
All entries should be unique (cannot contain duplicate classes).
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
A common mistake is to pass in a list, which leads to the following issue:
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit(['sci-fi', 'thriller', 'comedy'])
MultiLabelBinarizer()
>>> mlb.classes_
array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't',
'y'], dtype=object)
To correct this, the list of labels should be passed in as:
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit([['sci-fi', 'thriller', 'comedy']])
MultiLabelBinarizer()
>>> mlb.classes_
array(['comedy', 'sci-fi', 'thriller'], dtype=object)
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
@_deprecate_positional_args
def __init__(self, *, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing :term:`classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
self._cached_dict = None
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
elif len(set(self.classes)) < len(self.classes):
raise ValueError("The classes argument contains duplicate "
"classes. Remove these duplicates before passing "
"them to MultiLabelBinarizer.")
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
self._cached_dict = None
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self)
class_to_index = self._build_cache()
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _build_cache(self):
if self._cached_dict is None:
self._cached_dict = dict(zip(self.classes_,
range(len(self.classes_))))
return self._cached_dict
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'
.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self)
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
def _more_tags(self):
return {'X_types': ['2dlabels']}
|
{"hexsha": "43ab31d5782ecde55cfe379656b20dd60a7ff259", "size": 29644, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing/_label.py", "max_stars_repo_name": "jessica-tu/jupyter", "max_stars_repo_head_hexsha": "917e02bc29e0fa06bd8adb25fe5388ac381ec829", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocessing/_label.py", "max_issues_repo_name": "jessica-tu/jupyter", "max_issues_repo_head_hexsha": "917e02bc29e0fa06bd8adb25fe5388ac381ec829", "max_issues_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing/_label.py", "max_forks_repo_name": "jessica-tu/jupyter", "max_forks_repo_head_hexsha": "917e02bc29e0fa06bd8adb25fe5388ac381ec829", "max_forks_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9744160178, "max_line_length": 79, "alphanum_fraction": 0.583861827, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6979}
|
'''
Recurrent Deterministic Policy Gradient (DDPG with LSTM network)
Update with batch of episodes for each time, so requires each episode has the same length.
'''
import math
import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from torch.distributions import Categorical
from collections import namedtuple
from common.buffers import *
from common.value_networks import *
from common.policy_networks import *
from common.utils import *
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
from gym import spaces
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
class RDPG():
def __init__(self, replay_buffer, state_space, action_space, hidden_dim):
self.replay_buffer = replay_buffer
self.hidden_dim = hidden_dim
self.qnet = QNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.target_qnet = QNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.policy_net = DPG_PolicyNetworkLSTM(state_space, action_space, hidden_dim).to(device)
self.target_policy_net = DPG_PolicyNetworkLSTM(state_space, action_space, hidden_dim).to(device)
print('Q network: ', self.qnet)
print('Policy network: ', self.policy_net)
for target_param, param in zip(self.target_qnet.parameters(), self.qnet.parameters()):
target_param.data.copy_(param.data)
self.q_criterion = nn.MSELoss()
q_lr=1e-3
policy_lr = 1e-3
self.update_cnt=0
self.q_optimizer = optim.Adam(self.qnet.parameters(), lr=q_lr)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)
def target_soft_update(self, net, target_net, soft_tau):
# Soft update the target net
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.data.copy_( # copy data value into target parameters
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
return target_net
def update(self, batch_size, reward_scale=10.0, gamma=0.99, soft_tau=1e-2, policy_up_itr=10, target_update_delay=3, warmup=True):
self.update_cnt+=1
hidden_in, hidden_out, state, action, last_action, reward, next_state, done = self.replay_buffer.sample(batch_size)
# print('sample:', state, action, reward, done)
state = torch.FloatTensor(state).to(device)
next_state = torch.FloatTensor(next_state).to(device)
action = torch.FloatTensor(action).to(device)
last_action = torch.FloatTensor(last_action).to(device)
reward = torch.FloatTensor(reward).unsqueeze(-1).to(device)
done = torch.FloatTensor(np.float32(done)).unsqueeze(-1).to(device)
# use hidden states stored in the memory for initialization, hidden_in for current, hidden_out for target
predict_q, _ = self.qnet(state, action, last_action, hidden_in) # for q
new_action, _ = self.policy_net.evaluate(state, last_action, hidden_in) # for policy
new_next_action, _ = self.target_policy_net.evaluate(next_state, action, hidden_out) # for q
predict_target_q, _ = self.target_qnet(next_state, new_next_action, action, hidden_out) # for q
predict_new_q, _ = self.qnet(state, new_action, last_action, hidden_in) # for policy. as optimizers are separated, no detach for q_h_in is also fine
target_q = reward+(1-done)*gamma*predict_target_q # for q
# reward = reward_scale * (reward - reward.mean(dim=0)) /reward.std(dim=0) # normalize with batch mean and std
q_loss = self.q_criterion(predict_q, target_q.detach())
policy_loss = -torch.mean(predict_new_q)
# train qnet
self.q_optimizer.zero_grad()
q_loss.backward(retain_graph=True) # no need for retain_graph here actually
self.q_optimizer.step()
# train policy_net
self.policy_optimizer.zero_grad()
policy_loss.backward(retain_graph=True)
self.policy_optimizer.step()
# update the target_qnet
if self.update_cnt%target_update_delay==0:
self.target_qnet=self.target_soft_update(self.qnet, self.target_qnet, soft_tau)
self.target_policy_net=self.target_soft_update(self.policy_net, self.target_policy_net, soft_tau)
return q_loss.detach().cpu().numpy(), policy_loss.detach().cpu().numpy()
def save_model(self, path):
torch.save(self.qnet.state_dict(), path+'_q')
torch.save(self.target_qnet.state_dict(), path+'_target_q')
torch.save(self.policy_net.state_dict(), path+'_policy')
def load_model(self, path):
self.qnet.load_state_dict(torch.load(path+'_q'))
self.target_qnet.load_state_dict(torch.load(path+'_target_q'))
self.policy_net.load_state_dict(torch.load(path+'_policy'))
self.qnet.eval()
self.target_qnet.eval()
self.policy_net.eval()
def plot(rewards):
plt.figure(figsize=(20,5))
plt.plot(rewards)
plt.savefig('rdpg.png')
# plt.show()
plt.clf()
class NormalizedActions(gym.ActionWrapper): # gym env wrapper
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
if __name__ == '__main__':
NUM_JOINTS=2
LINK_LENGTH=[200, 140]
INI_JOING_ANGLES=[0.1, 0.1]
SCREEN_SIZE=1000
# SPARSE_REWARD=False
# SCREEN_SHOT=False
ENV = ['Pendulum', 'Reacher'][0]
if ENV == 'Reacher':
env=Reacher(screen_size=SCREEN_SIZE, num_joints=NUM_JOINTS, link_lengths = LINK_LENGTH, \
ini_joint_angles=INI_JOING_ANGLES, target_pos = [369,430], render=True)
action_space = spaces.Box(low=-1.0, high=1.0, shape=(env.num_actions,), dtype=np.float32)
state_space = spaces.Box(low=-np.inf, high=np.inf, shape=(env.num_observations, ))
elif ENV == 'Pendulum':
# env = NormalizedActions(gym.make("Pendulum-v0"))
env = gym.make("Pendulum-v0")
action_space = env.action_space
state_space = env.observation_space
hidden_dim = 64
explore_steps = 0 # for random exploration
batch_size = 3 # each sample in batch is an episode for lstm policy (normally it's timestep)
update_itr = 1 # update iteration
replay_buffer_size=1e6
replay_buffer = ReplayBufferLSTM2(replay_buffer_size)
model_path='./model/rdpg'
torch.autograd.set_detect_anomaly(True)
alg = RDPG(replay_buffer, state_space, action_space, hidden_dim)
if args.train:
# alg.load_model(model_path)
# hyper-parameters
max_episodes = 1000
max_steps = 100
frame_idx = 0
rewards=[]
for i_episode in range (max_episodes):
q_loss_list=[]
policy_loss_list=[]
state = env.reset()
episode_reward = 0
last_action = env.action_space.sample()
episode_state = []
episode_action = []
episode_last_action = []
episode_reward = []
episode_next_state = []
episode_done = []
hidden_out = (torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda(), \
torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda()) # initialize hidden state for lstm, (hidden, cell), each is (layer, batch, dim)
for step in range(max_steps):
hidden_in = hidden_out
action, hidden_out = alg.policy_net.get_action(state, last_action, hidden_in)
next_state, reward, done, _ = env.step(action)
if ENV !='Reacher':
env.render()
if step>0:
ini_hidden_in = hidden_in
ini_hidden_out = hidden_out
episode_state.append(state)
episode_action.append(action)
episode_last_action.append(last_action)
episode_reward.append(reward)
episode_next_state.append(next_state)
episode_done.append(done)
state = next_state
last_action = action
frame_idx += 1
if len(replay_buffer) > batch_size:
for _ in range(update_itr):
q_loss, policy_loss = alg.update(batch_size)
q_loss_list.append(q_loss)
policy_loss_list.append(policy_loss)
if done: # should not break for lstm cases to make every episode with same length
break
if i_episode % 20 == 0:
plot(rewards)
alg.save_model(model_path)
print('Eps: ', i_episode, '| Reward: ', np.sum(episode_reward), '| Loss: ', np.average(q_loss_list), np.average(policy_loss_list))
replay_buffer.push(ini_hidden_in, ini_hidden_out, episode_state, episode_action, episode_last_action, \
episode_reward, episode_next_state, episode_done)
rewards.append(np.sum(episode_reward))
alg.save_model(model_path)
if args.test:
test_episodes = 10
max_steps=100
alg.load_model(model_path)
for i_episode in range (test_episodes):
q_loss_list=[]
policy_loss_list=[]
state = env.reset()
episode_reward = 0
last_action = np.zeros(action_space.shape[0])
hidden_out = (torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda(), \
torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda()) # initialize hidden state for lstm, (hidden, cell), each is (layer, batch, dim)
for step in range(max_steps):
hidden_in = hidden_out
action, hidden_out= alg.policy_net.get_action(state, last_action, hidden_in, noise_scale=0.0) # no noise for testing
next_state, reward, done, _ = env.step(action)
env.render()
last_action = action
state = next_state
episode_reward += reward
if done:
break
print('Eps: ', i_episode, '| Reward: ', episode_reward)
|
{"hexsha": "c324c1b90ad31344c6d8cdb5cfcc631c37a8eb00", "size": 11370, "ext": "py", "lang": "Python", "max_stars_repo_path": "rdpg.py", "max_stars_repo_name": "chagri/SOTA-RL-Algorithms", "max_stars_repo_head_hexsha": "58b416e7c706d8426dc402482e72ca7283568e71", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rdpg.py", "max_issues_repo_name": "chagri/SOTA-RL-Algorithms", "max_issues_repo_head_hexsha": "58b416e7c706d8426dc402482e72ca7283568e71", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rdpg.py", "max_forks_repo_name": "chagri/SOTA-RL-Algorithms", "max_forks_repo_head_hexsha": "58b416e7c706d8426dc402482e72ca7283568e71", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-31T18:13:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-31T18:13:43.000Z", "avg_line_length": 40.1766784452, "max_line_length": 156, "alphanum_fraction": 0.6322779244, "include": true, "reason": "import numpy", "num_tokens": 2557}
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(GD_GPL)
*)
theory Fastpath_C
imports
SyscallArgs_C
Delete_C
Syscall_C
"../refine/$L4V_ARCH/RAB_FN"
"../../lib/clib/MonadicRewrite_C"
begin
context begin interpretation Arch . (*FIXME: arch_split*)
definition
"fastpaths sysc \<equiv> case sysc of
SysCall \<Rightarrow> doE
curThread \<leftarrow> liftE $ getCurThread;
mi \<leftarrow> liftE $ getMessageInfo curThread;
cptr \<leftarrow> liftE $ asUser curThread $ getRegister capRegister;
fault \<leftarrow> liftE $ threadGet tcbFault curThread;
pickFastpath \<leftarrow> liftE $ alternative (return True) (return False);
unlessE (fault = None \<and> msgExtraCaps mi = 0
\<and> msgLength mi \<le> scast n_msgRegisters \<and> pickFastpath)
$ throwError ();
ctab \<leftarrow> liftE $ getThreadCSpaceRoot curThread >>= getCTE;
epCap \<leftarrow> unifyFailure (doE t \<leftarrow> resolveAddressBits (cteCap ctab) cptr (size cptr);
liftE (getSlotCap (fst t)) odE);
unlessE (isEndpointCap epCap \<and> capEPCanSend epCap)
$ throwError ();
ep \<leftarrow> liftE $ getEndpoint (capEPPtr epCap);
unlessE (isRecvEP ep) $ throwError ();
dest \<leftarrow> returnOk $ hd $ epQueue ep;
newVTable \<leftarrow> liftE $ getThreadVSpaceRoot dest >>= getCTE;
unlessE (isValidVTableRoot $ cteCap newVTable) $ throwError ();
pd \<leftarrow> returnOk $ capPDBasePtr $ capCap $ cteCap newVTable;
curPrio \<leftarrow> liftE $ threadGet tcbPriority curThread;
destPrio \<leftarrow> liftE $ threadGet tcbPriority dest;
destFault \<leftarrow>
unlessE (destPrio \<ge> curPrio) $ throwError ();
unlessE (capEPCanGrant epCap) $ throwError ();
asidMap \<leftarrow> liftE $ gets $ armKSASIDMap o ksArchState;
unlessE (\<exists>v. {hwasid. (hwasid, pd) \<in> ran asidMap} = {v})
$ throwError ();
curDom \<leftarrow> liftE $ curDomain;
destDom \<leftarrow> liftE $ threadGet tcbDomain dest;
unlessE (destDom = curDom) $ throwError ();
liftE $ do
setEndpoint (capEPPtr epCap)
(case tl (epQueue ep) of [] \<Rightarrow> IdleEP | _ \<Rightarrow> RecvEP (tl (epQueue ep)));
threadSet (tcbState_update (\<lambda>_. BlockedOnReply)) curThread;
replySlot \<leftarrow> getThreadReplySlot curThread;
callerSlot \<leftarrow> getThreadCallerSlot dest;
replySlotCTE \<leftarrow> getCTE replySlot;
assert (mdbNext (cteMDBNode replySlotCTE) = 0
\<and> isReplyCap (cteCap replySlotCTE)
\<and> capReplyMaster (cteCap replySlotCTE)
\<and> mdbFirstBadged (cteMDBNode replySlotCTE)
\<and> mdbRevocable (cteMDBNode replySlotCTE));
cteInsert (ReplyCap curThread False) replySlot callerSlot;
forM_x (take (unat (msgLength mi)) ARM_H.msgRegisters)
(\<lambda>r. do v \<leftarrow> asUser curThread (getRegister r);
asUser dest (setRegister r v) od);
setThreadState Running dest;
Arch.switchToThread dest;
setCurThread dest;
asUser dest $ zipWithM_x setRegister
[ARM_H.badgeRegister, ARM_H.msgInfoRegister]
[capEPBadge epCap, wordFromMessageInfo (mi\<lparr> msgCapsUnwrapped := 0 \<rparr>)]
od
odE <catch> (\<lambda>_. callKernel (SyscallEvent sysc))
| SysReplyRecv \<Rightarrow> doE
curThread \<leftarrow> liftE $ getCurThread;
mi \<leftarrow> liftE $ getMessageInfo curThread;
cptr \<leftarrow> liftE $ asUser curThread $ getRegister capRegister;
fault \<leftarrow> liftE $ threadGet tcbFault curThread;
pickFastpath \<leftarrow> liftE $ alternative (return True) (return False);
unlessE (fault = None \<and> msgExtraCaps mi = 0
\<and> msgLength mi \<le> scast n_msgRegisters \<and> pickFastpath)
$ throwError ();
ctab \<leftarrow> liftE $ getThreadCSpaceRoot curThread >>= getCTE;
epCap \<leftarrow> unifyFailure (doE t \<leftarrow> resolveAddressBits (cteCap ctab) cptr (size cptr);
liftE (getSlotCap (fst t)) odE);
unlessE (isEndpointCap epCap \<and> capEPCanReceive epCap)
$ throwError ();
bound_ntfn \<leftarrow> liftE $ getBoundNotification curThread;
active_ntfn \<leftarrow> liftE $ case bound_ntfn of None \<Rightarrow> return False
| Some ntfnptr \<Rightarrow> liftM isActive $ getNotification ntfnptr;
unlessE (\<not> active_ntfn) $ throwError ();
ep \<leftarrow> liftE $ getEndpoint (capEPPtr epCap);
unlessE (\<not> isSendEP ep) $ throwError ();
callerSlot \<leftarrow> liftE $ getThreadCallerSlot curThread;
callerCTE \<leftarrow> liftE $ getCTE callerSlot;
callerCap \<leftarrow> returnOk $ cteCap callerCTE;
unlessE (isReplyCap callerCap \<and> \<not> capReplyMaster callerCap)
$ throwError ();
caller \<leftarrow> returnOk $ capTCBPtr callerCap;
callerFault \<leftarrow> liftE $ threadGet tcbFault caller;
unlessE (callerFault = None) $ throwError ();
newVTable \<leftarrow> liftE $ getThreadVSpaceRoot caller >>= getCTE;
unlessE (isValidVTableRoot $ cteCap newVTable) $ throwError ();
pd \<leftarrow> returnOk $ capPDBasePtr $ capCap $ cteCap newVTable;
curPrio \<leftarrow> liftE $ threadGet tcbPriority curThread;
callerPrio \<leftarrow> liftE $ threadGet tcbPriority caller;
unlessE (callerPrio \<ge> curPrio) $ throwError ();
asidMap \<leftarrow> liftE $ gets $ armKSASIDMap o ksArchState;
unlessE (\<exists>v. {hwasid. (hwasid, pd) \<in> ran asidMap} = {v})
$ throwError ();
curDom \<leftarrow> liftE $ curDomain;
callerDom \<leftarrow> liftE $ threadGet tcbDomain caller;
unlessE (callerDom = curDom) $ throwError ();
liftE $ do
threadSet (tcbState_update (\<lambda>_. BlockedOnReceive (capEPPtr epCap))) curThread;
setEndpoint (capEPPtr epCap)
(case ep of IdleEP \<Rightarrow> RecvEP [curThread] | RecvEP ts \<Rightarrow> RecvEP (ts @ [curThread]));
mdbPrev \<leftarrow> liftM (mdbPrev o cteMDBNode) $ getCTE callerSlot;
assert (mdbPrev \<noteq> 0);
updateMDB mdbPrev (mdbNext_update (K 0) o mdbFirstBadged_update (K True)
o mdbRevocable_update (K True));
setCTE callerSlot makeObject;
forM_x (take (unat (msgLength mi)) ARM_H.msgRegisters)
(\<lambda>r. do v \<leftarrow> asUser curThread (getRegister r);
asUser caller (setRegister r v) od);
setThreadState Running caller;
Arch.switchToThread caller;
setCurThread caller;
asUser caller $ zipWithM_x setRegister
[ARM_H.badgeRegister, ARM_H.msgInfoRegister]
[0, wordFromMessageInfo (mi\<lparr> msgCapsUnwrapped := 0 \<rparr>)]
od
odE <catch> (\<lambda>_. callKernel (SyscallEvent sysc))
| _ \<Rightarrow> callKernel (SyscallEvent sysc)"
lemma setCTE_obj_at'_queued:
"\<lbrace>obj_at' (\<lambda>tcb. P (tcbQueued tcb)) t\<rbrace> setCTE p v \<lbrace>\<lambda>rv. obj_at' (\<lambda>tcb. P (tcbQueued tcb)) t\<rbrace>"
unfolding setCTE_def
by (rule setObject_cte_obj_at_tcb', simp+)
crunch obj_at'_queued: cteInsert "obj_at' (\<lambda>tcb. P (tcbQueued tcb)) t"
(wp: setCTE_obj_at'_queued crunch_wps)
crunch obj_at'_not_queued: emptySlot "obj_at' (\<lambda>a. \<not> tcbQueued a) p"
(wp: setCTE_obj_at'_queued)
lemma getEndpoint_obj_at':
"\<lbrace>obj_at' P ptr\<rbrace> getEndpoint ptr \<lbrace>\<lambda>rv s. P rv\<rbrace>"
apply (wp getEndpoint_wp)
apply (clarsimp simp: obj_at'_def projectKOs)
done
lemma setEndpoint_obj_at_tcb':
"\<lbrace>obj_at' (P :: tcb \<Rightarrow> bool) p\<rbrace> setEndpoint p' val \<lbrace>\<lambda>rv. obj_at' P p\<rbrace>"
apply (simp add: setEndpoint_def)
apply (rule obj_at_setObject2)
apply (clarsimp simp: updateObject_default_def in_monad)
done
lemma tcbSchedEnqueue_tcbContext[wp]:
"\<lbrace>obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>
tcbSchedEnqueue t'
\<lbrace>\<lambda>rv. obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>"
apply (rule tcbSchedEnqueue_obj_at_unchangedT[OF all_tcbI])
apply simp
done
lemma setCTE_tcbContext:
"\<lbrace>obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>
setCTE slot cte
\<lbrace>\<lambda>rv. obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>"
apply (simp add: setCTE_def)
apply (rule setObject_cte_obj_at_tcb', simp_all)
done
lemma seThreadState_tcbContext:
"\<lbrace>obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>
setThreadState a b
\<lbrace>\<lambda>_. obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>"
apply (rule setThreadState_obj_at_unchanged)
apply (clarsimp simp: atcbContext_def)+
done
lemma setBoundNotification_tcbContext:
"\<lbrace>obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>
setBoundNotification a b
\<lbrace>\<lambda>_. obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t\<rbrace>"
apply (rule setBoundNotification_obj_at_unchanged)
apply (clarsimp simp: atcbContext_def)+
done
declare comp_apply [simp del]
crunch tcbContext[wp]: deleteCallerCap "obj_at' (\<lambda>tcb. P ((atcbContextGet o tcbArch) tcb)) t"
(wp: setEndpoint_obj_at_tcb' setBoundNotification_tcbContext
setNotification_tcb crunch_wps seThreadState_tcbContext
ignore: getObject setObject simp: crunch_simps unless_def)
declare comp_apply [simp]
crunch ksArch[wp]: asUser "\<lambda>s. P (ksArchState s)"
(wp: crunch_wps)
definition
tcbs_of :: "kernel_state => word32 => tcb option"
where
"tcbs_of s = (%x. if tcb_at' x s then projectKO_opt (the (ksPSpace s x)) else None)"
lemma obj_at_tcbs_of:
"obj_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P tcb)"
apply (simp add: tcbs_of_def split: if_split)
apply (intro conjI impI)
apply (clarsimp simp: obj_at'_def projectKOs)
apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI])
done
lemma st_tcb_at_tcbs_of:
"st_tcb_at' P t s = (EX tcb. tcbs_of s t = Some tcb & P (tcbState tcb))"
by (simp add: st_tcb_at'_def obj_at_tcbs_of)
end
context kernel_m begin
lemma ccorres_disj_division:
"\<lbrakk> P \<or> Q; P \<Longrightarrow> ccorres_underlying sr G r xf ar axf R S hs a c;
Q \<Longrightarrow> ccorres_underlying sr G r xf ar axf T U hs a c \<rbrakk>
\<Longrightarrow> ccorres_underlying sr G r xf ar axf
(\<lambda>s. (P \<longrightarrow> R s) \<and> (Q \<longrightarrow> T s)) {s. (P \<longrightarrow> s \<in> S) \<and> (Q \<longrightarrow> s \<in> U)}
hs a c"
apply (erule disjE, simp_all)
apply (auto elim!: ccorres_guard_imp)
done
lemma disj_division_bool: "b \<or> \<not> b" by simp
lemmas ccorres_case_bools2 = ccorres_disj_division [OF disj_division_bool]
lemma capMasterCap_NullCap_eq:
"(capMasterCap c = NullCap) = (c = NullCap)"
by (auto dest!: capMasterCap_eqDs)
lemma getCTE_h_val_ccorres_split:
assumes var: "\<And>s f s'. var (var_update f s) = f (var s)
\<and> ((s', var_update f s) \<in> rf_sr) = ((s', s) \<in> rf_sr)"
and "\<And>rv' t t'. ceqv \<Gamma> var rv' t t' g (g' rv')"
and "\<And>rv rv'. \<lbrakk> ccap_relation (cteCap rv) rv'; P rv \<rbrakk>
\<Longrightarrow> ccorres r xf (Q rv) (Q' rv rv') hs (f rv) (g' rv')"
shows
"ccorres r xf (\<lambda>s. \<forall>cte. ctes_of s slot = Some cte \<longrightarrow> P cte \<and> Q cte s)
{s. (\<forall>cte cap. ccap_relation (cteCap cte) cap \<and> P cte
\<longrightarrow> var_update (\<lambda>_. cap) s \<in> Q' cte cap)
\<and> slot' = cte_Ptr slot} hs
(getCTE slot >>= (\<lambda>rv. f rv))
((Basic (\<lambda>s. var_update (\<lambda>_. h_val (hrs_mem (t_hrs_' (globals s))) (cap_Ptr &(slot' \<rightarrow>[''cap_C'']))) s));; g)"
(is "ccorres r xf ?G ?G' hs ?f ?g")
apply (rule ccorres_guard_imp2)
apply (rule ccorres_pre_getCTE)
apply (rule_tac A="cte_wp_at' (op = rv and P) slot and Q rv" and A'="?G'" in ccorres_guard_imp2)
apply (rule_tac P="P rv" in ccorres_gen_asm)
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'=var in ccorres_abstract)
apply (rule assms)
apply (rule ccorres_gen_asm2, erule(1) assms)
apply vcg
apply (rule conseqPre, vcg, clarsimp simp: var)
apply (clarsimp simp: cte_wp_at_ctes_of var)
apply (erule(1) cmap_relationE1[OF cmap_relation_cte])
apply (clarsimp simp: typ_heap_simps' dest!: ccte_relation_ccap_relation)
apply (clarsimp simp: cte_wp_at_ctes_of)
done
lemma cap_'_cap_'_update_var_props:
"cap_' (cap_'_update f s) = f (cap_' s) \<and>
((s', cap_'_update f s) \<in> rf_sr) = ((s', s) \<in> rf_sr)"
by simp
lemmas getCTE_cap_h_val_ccorres_split
= getCTE_h_val_ccorres_split[where var_update=cap_'_update and P=\<top>,
OF cap_'_cap_'_update_var_props]
lemma getCTE_ccorres_helper:
"\<lbrakk> \<And>\<sigma> cte cte'. \<Gamma> \<turnstile> {s. (\<sigma>, s) \<in> rf_sr \<and> P \<sigma> \<and> s \<in> P' \<and> ctes_of \<sigma> slot = Some cte
\<and> cslift s (cte_Ptr slot) = Some cte'
\<and> ccte_relation cte cte'}
f {s. (\<sigma>, s) \<in> rf_sr \<and> r cte (xf s)} \<rbrakk> \<Longrightarrow>
ccorres r xf P P' hs (getCTE slot) f"
apply atomize
apply (rule ccorres_guard_imp2)
apply (rule ccorres_add_return2)
apply (rule ccorres_pre_getCTE)
apply (rule_tac P="cte_wp_at' (op = x) slot and P"
in ccorres_from_vcg[where P'=P'])
apply (erule allEI)
apply (drule_tac x="the (ctes_of \<sigma> slot)" in spec)
apply (erule HoarePartial.conseq)
apply (clarsimp simp: return_def cte_wp_at_ctes_of)
apply (erule(1) cmap_relationE1[OF cmap_relation_cte])
apply simp
apply (clarsimp simp: cte_wp_at_ctes_of)
done
lemma acc_CNodeCap_repr:
"isCNodeCap cap
\<Longrightarrow> cap = CNodeCap (capCNodePtr cap) (capCNodeBits cap)
(capCNodeGuard cap) (capCNodeGuardSize cap)"
by (clarsimp simp: isCap_simps)
lemma valid_cnode_cap_cte_at':
"\<lbrakk> s \<turnstile>' c; isCNodeCap c; ptr = capCNodePtr c; v < 2 ^ capCNodeBits c \<rbrakk>
\<Longrightarrow> cte_at' (ptr + v * 0x10) s"
apply (drule less_mask_eq)
apply (drule(1) valid_cap_cte_at'[where addr=v])
apply (simp add: mult.commute mult.left_commute)
done
lemma ccorres_abstract_all:
"\<lbrakk>\<And>rv' t t'. ceqv Gamm xf' rv' t t' d (d' rv');
\<And>rv'. ccorres_underlying sr Gamm r xf arrel axf (G rv') (G' rv') hs a (d' rv')\<rbrakk>
\<Longrightarrow> ccorres_underlying sr Gamm r xf arrel axf (\<lambda>s. \<forall>rv'. G rv' s) {s. s \<in> G' (xf' s)} hs a d"
apply (erule ccorres_abstract)
apply (rule ccorres_guard_imp2)
apply assumption
apply simp
done
lemma of_int_sint_scast [simp]:
"of_int (sint (x :: 'a::len word)) = (scast x :: 'b::len word)"
by (metis scast_def word_of_int)
lemma stateAssert_bind_out_of_if:
"If P f (stateAssert Q xs >>= g) = stateAssert (\<lambda>s. \<not> P \<longrightarrow> Q s) [] >>= (\<lambda>_. If P f (g ()))"
"If P (stateAssert Q xs >>= g) f = stateAssert (\<lambda>s. P \<longrightarrow> Q s) [] >>= (\<lambda>_. If P (g ()) f)"
by (simp_all add: fun_eq_iff stateAssert_def exec_get split: if_split)
lemma isCNodeCap_capUntypedPtr_capCNodePtr:
"isCNodeCap c \<Longrightarrow> capUntypedPtr c = capCNodePtr c"
by (clarsimp simp: isCap_simps)
lemma of_bl_from_bool:
"of_bl [x] = from_bool x"
by (cases x, simp_all)
lemma lookup_fp_ccorres':
assumes bits: "bits = size cptr"
shows
"ccorres (\<lambda>mcp ccp. ccap_relation (case mcp of Inl v => NullCap | Inr v => v) ccp)
ret__struct_cap_C_'
(valid_cap' cap and valid_objs')
(UNIV \<inter> {s. ccap_relation cap (cap_' s)} \<inter> {s. cptr_' s = cptr}) []
(cutMon (op = s) (doE t \<leftarrow> resolveAddressBits cap cptr bits;
liftE (getSlotCap (fst t))
odE))
(Call lookup_fp_'proc)"
apply (cinit' lift: cptr_')
apply (rule ccorres_rhs_assoc2)
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'=ret__int_' in ccorres_abstract, ceqv)
apply (rule_tac P="rv' = from_bool (isCNodeCap cap)" in ccorres_gen_asm2)
apply (simp add: from_bool_0 del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: resolveAddressBits.simps split_def del: Collect_const
split del: if_split)
apply (rule ccorres_drop_cutMon)
apply (rule ccorres_from_vcg_split_throws[where P=\<top> and P'=UNIV])
apply vcg
apply (rule conseqPre, vcg)
apply (clarsimp simp: throwError_def return_def isRight_def isLeft_def
ccap_relation_NullCap_iff)
apply (clarsimp simp del: Collect_const cong: call_ignore_cong)
apply (rule_tac P="valid_cap' cap and valid_objs'"
and P'="UNIV \<inter> {s. ccap_relation cap (cap_' s) \<and> isCNodeCap cap}
\<inter> {s. bits_' s = 32 - of_nat bits \<and> bits \<le> 32 \<and> bits \<noteq> 0}"
in ccorres_inst)
apply (thin_tac "isCNodeCap cap")
defer
apply vcg
apply (rule conseqPre, vcg)
apply clarsimp
apply (clarsimp simp: word_size cap_get_tag_isCap bits
of_bl_from_bool from_bool_0)
proof (induct cap cptr bits arbitrary: s
rule: resolveAddressBits.induct)
case (1 acap acptr abits as)
have sub_mask_neq_0_eq:
"\<And>v :: word32. v && 0x1F \<noteq> 0 \<Longrightarrow> 0x20 - (0x20 - (v && 0x1F) && mask 5) = v && 0x1F"
apply (subst word_le_mask_eq)
apply (simp only: mask_def)
apply (rule word_le_minus_mono, simp_all add: word_le_sub1 word_sub_le_iff)[1]
apply (rule order_trans, rule word_and_le1, simp)
apply (simp add: word_bits_def)
apply simp
done
have valid_cnode_bits_0:
"\<And>s acap. \<lbrakk> isCNodeCap acap; s \<turnstile>' acap \<rbrakk> \<Longrightarrow> capCNodeBits acap \<noteq> 0"
by (clarsimp simp: isCap_simps valid_cap'_def)
have cap_get_tag_update_1:
"\<And>f cap. cap_get_tag (cap_C.words_C_update (\<lambda>w. Arrays.update w (Suc 0) (f w)) cap) = cap_get_tag cap"
by (simp add: cap_get_tag_def)
show ?case
apply (cinitlift cap_' bits_')
apply (rename_tac cbits ccap)
apply (elim conjE)
apply (rule_tac F="capCNodePtr_CL (cap_cnode_cap_lift ccap)
= capCNodePtr acap
\<and> capCNodeGuardSize acap < 32
\<and> capCNodeBits acap < 32
\<and> capCNodeGuard_CL (cap_cnode_cap_lift ccap)
= capCNodeGuard acap
\<and> unat (capCNodeGuardSize_CL (cap_cnode_cap_lift ccap))
= capCNodeGuardSize acap
\<and> unat (capCNodeRadix_CL (cap_cnode_cap_lift ccap))
= capCNodeBits acap
\<and> unat (0x20 - capCNodeRadix_CL (cap_cnode_cap_lift ccap))
= 32 - capCNodeBits acap
\<and> unat ((0x20 :: word32) - of_nat abits) = 32 - abits
\<and> unat (capCNodeGuardSize_CL (cap_cnode_cap_lift ccap)
+ capCNodeRadix_CL (cap_cnode_cap_lift ccap))
= capCNodeGuardSize acap + capCNodeBits acap"
in Corres_UL_C.ccorres_req)
apply (clarsimp simp: cap_get_tag_isCap[symmetric])
apply (clarsimp simp: cap_lift_cnode_cap cap_to_H_simps valid_cap'_def
capAligned_def cap_cnode_cap_lift_def objBits_simps
word_mod_2p_is_mask[where n=5, simplified]
elim!: ccap_relationE)
apply (simp add: unat_sub[unfolded word_le_nat_alt]
unat_of_nat32 word_bits_def)
apply (subst unat_plus_simple[symmetric], subst no_olen_add_nat)
apply (rule order_le_less_trans, rule add_le_mono)
apply (rule word_le_nat_alt[THEN iffD1], rule word_and_le1)+
apply simp
apply (rule ccorres_guard_imp2)
apply csymbr+
apply (rule ccorres_Guard_Seq, csymbr)
apply (simp add: resolveAddressBits.simps bindE_assoc extra_sle_sless_unfolds
Collect_True
split del: if_split del: Collect_const cong: call_ignore_cong)
apply (simp add: cutMon_walk_bindE del: Collect_const
split del: if_split cong: call_ignore_cong)
apply (rule ccorres_drop_cutMon_bindE, rule ccorres_assertE)
apply (rule ccorres_cutMon)
apply csymbr
apply (simp add: locateSlot_conv liftE_bindE cutMon_walk_bind)
apply (rule ccorres_drop_cutMon_bind, rule ccorres_stateAssert)
apply (rule_tac P="abits < capCNodeBits acap + capCNodeGuardSize acap"
in ccorres_case_bools2)
apply (rule ccorres_drop_cutMon)
apply csymbr+
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'=ret__int_' in ccorres_abstract_all, ceqv)
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_from_vcg_split_throws[where P=\<top> and P'=UNIV])
apply vcg
apply (rule conseqPre, vcg)
apply (clarsimp simp: unlessE_def split: if_split)
apply (simp add: throwError_def return_def cap_tag_defs
isRight_def isLeft_def
ccap_relation_NullCap_iff
in_bindE)
apply auto[1]
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Guard_Seq)+
apply csymbr+
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_array_assertion_cnode_ctes ccorres_move_c_guard_cte
| csymbr)+
apply (rule ccorres_symb_exec_r)
apply ccorres_remove_UNIV_guard
apply csymbr+
apply (rule ccorres_cond_false_seq)
apply (simp add: ccorres_expand_while_iff_Seq[symmetric]
whileAnno_def cong: call_ignore_cong)
apply (rule ccorres_cond_false)
apply (rule ccorres_cond_true_seq)
apply (rule ccorres_from_vcg_split_throws[where P=\<top> and P'=UNIV])
apply vcg
apply (rule conseqPre, vcg)
apply (clarsimp simp: unlessE_def split: if_split cong: call_ignore_cong)
apply (simp add: throwError_def return_def cap_tag_defs isRight_def
isLeft_def ccap_relation_NullCap_iff)
apply fastforce
apply (simp del: Collect_const)
apply vcg
apply (rule conseqPre, vcg, clarsimp)
apply (simp del: Collect_const)
apply vcg
apply (rule conseqPre, vcg, clarsimp)
apply (rule ccorres_cutMon)
apply (simp add: cutMon_walk_bindE unlessE_whenE
del: Collect_const
split del: if_split cong: call_ignore_cong)
apply (rule ccorres_drop_cutMon_bindE)
apply csymbr+
apply (rule ccorres_rhs_assoc2)
apply (rule_tac r'=dc and xf'=xfdc in ccorres_splitE[OF _ ceqv_refl])
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_Guard_Seq)+
apply csymbr
apply (simp add: unat_sub word_le_nat_alt if_1_0_0 shiftl_shiftr3 word_size
del: Collect_const)
apply (rule ccorres_Cond_rhs)
apply (rule ccorres_from_vcg_throws[where P=\<top> and P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: whenE_def throwError_def return_def
ccap_relation_NullCap_iff isRight_def isLeft_def)
apply (simp add: whenE_def)
apply (rule ccorres_returnOk_skip)
apply simp
apply (rule ccorres_cond_false)
apply (rule_tac P="valid_cap' acap" in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: valid_cap'_def isCap_simps if_1_0_0)
apply (simp add: unat_eq_0[symmetric] whenE_def returnOk_def return_def)
apply (rule ccorres_cutMon)
apply (simp add: liftE_bindE locateSlot_conv
del: Collect_const cong: call_ignore_cong)
apply (rule_tac P="abits = capCNodeBits acap + capCNodeGuardSize acap"
in ccorres_case_bools2)
apply (rule ccorres_drop_cutMon)
apply (simp del: Collect_const)
apply (simp add: liftE_def getSlotCap_def del: Collect_const)
apply (rule ccorres_Guard_Seq)+
apply csymbr+
apply (simp)
apply (rule ccorres_move_array_assertion_cnode_ctes
ccorres_move_c_guard_cte
ccorres_rhs_assoc | csymbr)+
apply (rule getCTE_cap_h_val_ccorres_split)
apply ceqv
apply (rename_tac "getCTE_cap")
apply (csymbr | rule ccorres_Guard_Seq)+
apply (rule ccorres_cond_false_seq)
apply (simp add: ccorres_expand_while_iff_Seq[symmetric]
whileAnno_def del: Collect_const)
apply (rule ccorres_cond_false)
apply (rule ccorres_Guard_Seq)+
apply (rule ccorres_cond_false_seq)
apply (simp del: Collect_const)
apply (rule_tac P'="{s. cap_' s = getCTE_cap}"
in ccorres_from_vcg_throws[where P=\<top>])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: word_sle_def return_def returnOk_def
isRight_def)
apply (simp add: bind_bindE_assoc
del: Collect_const cong: call_ignore_cong if_cong)
apply (simp add: liftE_bindE "1.prems" unlessE_def
cutMon_walk_bind cnode_cap_case_if
del: Collect_const cong: if_cong call_ignore_cong)
apply (rule ccorres_Guard_Seq)+
apply csymbr+
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_drop_cutMon_bind)
apply (rule ccorres_getSlotCap_cte_at)
apply (rule ccorres_move_c_guard_cte
ccorres_move_array_assertion_cnode_ctes
| csymbr)+
apply ctac
apply (csymbr | rule ccorres_Guard_Seq)+
apply (rule ccorres_cond_true_seq)
apply (rule ccorres_rhs_assoc | csymbr)+
apply (simp add: ccorres_expand_while_iff_Seq[symmetric]
whileAnno_def if_to_top_of_bindE bindE_assoc
split_def
cong: if_cong call_ignore_cong)
apply (rule ccorres_cutMon)
apply (simp add: cutMon_walk_if cong: call_ignore_cong)
apply (rule_tac Q'="\<lambda>s. ret__int_' s = from_bool (isCNodeCap rv)"
in ccorres_cond_both'[where Q=\<top>])
apply (clarsimp simp: from_bool_0)
apply (rule ccorres_rhs_assoc)+
apply (rule_tac P="ccorres r xf Gd Gd' hs a" for r xf Gd Gd' hs a in rsubst)
apply (rule "1.hyps",
(rule refl in_returns in_bind[THEN iffD2, OF exI, OF exI, OF conjI]
acc_CNodeCap_repr
| assumption
| clarsimp simp: unlessE_whenE locateSlot_conv
"1.prems"
| clarsimp simp: whenE_def[where P=False])+)[1]
apply (simp add: whileAnno_def extra_sle_sless_unfolds)
apply (rule ccorres_drop_cutMon)
apply (simp add: liftE_def getSlotCap_def)
apply (rule ccorres_Guard_Seq)+
apply (rule ccorres_pre_getCTE)
apply (rule ccorres_cond_false_seq)
apply (rule_tac P="\<lambda>s. cteCap rva = rv" and P'="{s. cap_' s = cap}"
in ccorres_from_vcg_throws)
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: return_def returnOk_def word_sle_def isRight_def)
apply simp
apply (wp getSlotCap_wp)
apply (simp add: if_1_0_0)
apply vcg
apply (wp whenE_throwError_wp)
apply (simp add: ccHoarePost_def del: Collect_const)
apply vcg
apply (clarsimp simp: Collect_const_mem if_1_0_0 of_bl_from_bool
split del: if_split cong: if_cong)
apply (clarsimp simp: cap_get_tag_isCap
option.split[where P="\<lambda>x. x"]
isCNodeCap_capUntypedPtr_capCNodePtr
)
apply (clarsimp simp: word_less_nat_alt word_le_nat_alt linorder_not_less
cong: conj_cong)
apply (clarsimp simp: word_less_nat_alt word_le_nat_alt linorder_not_less
cong: rev_conj_cong)
apply (subgoal_tac "\<not> isZombie acap \<and> \<not> isThreadCap acap")
prefer 2
apply (clarsimp simp: isCap_simps)
apply (simp add: imp_conjL)
apply (simp only: all_simps[symmetric] imp_conjL cong: imp_cong,
simp only: all_simps, simp)
apply (simp add: unat_shiftr_le_bound)
apply (frule(1) valid_cnode_bits_0, clarsimp)
apply (intro conjI impI)
apply (simp add: size_of_def)
apply (erule (1) valid_cnode_cap_cte_at')
apply simp
apply (rule shiftr_less_t2n')
apply simp
apply simp
apply (simp add:size_of_def)
apply (erule (1) valid_cnode_cap_cte_at')
apply simp
apply (rule shiftr_less_t2n')
apply simp
apply simp
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (clarsimp dest!: ctes_of_valid')
apply (simp add: cte_level_bits_def size_of_def field_simps)
apply (simp add: shiftl_shiftr3 word_size)
apply (simp add: word_bw_assocs mask_and_mask min.absorb2)
apply (simp_all add: unat_sub word_le_nat_alt unat_eq_0[symmetric])
apply (simp_all add: unat_plus_if' if_P)
apply (clarsimp simp: rightsFromWord_and shiftr_over_and_dist
size_of_def cte_level_bits_def field_simps shiftl_shiftl
shiftl_shiftr3 word_size)+
apply (clarsimp simp: unat_gt_0 from_bool_0 trans [OF eq_commute from_bool_eq_if])
apply (intro conjI impI, simp_all)[1]
apply (rule word_unat.Rep_inject[THEN iffD1], subst unat_plus_if')
apply (simp add: unat_plus_if' unat_of_nat32 word_bits_def)
apply (clarsimp simp: rightsFromWord_and shiftr_over_and_dist
size_of_def cte_level_bits_def field_simps shiftl_shiftl
shiftl_shiftr3 word_size)+
apply (clarsimp simp: unat_gt_0 from_bool_0 trans [OF eq_commute from_bool_eq_if])
apply (intro conjI impI, simp_all)[1]
apply (rule word_unat.Rep_inject[THEN iffD1], simp add: unat_of_nat32 word_bits_def)
done
qed
lemmas lookup_fp_ccorres
= lookup_fp_ccorres'[OF refl, THEN ccorres_use_cutMon]
lemma ccap_relation_case_sum_Null_endpoint:
"ccap_relation (case x of Inl v => NullCap | Inr v => v) ccap
\<Longrightarrow> (cap_get_tag ccap = scast cap_endpoint_cap)
= (isRight x \<and> isEndpointCap (theRight x))"
by (clarsimp simp: cap_get_tag_isCap isRight_def isCap_simps
split: sum.split_asm)
lemma findPDForASID_pd_at_asid_noex:
"\<lbrace>pd_at_asid' pd asid\<rbrace> findPDForASID asid \<lbrace>\<lambda>rv s. rv = pd\<rbrace>,\<lbrace>\<bottom>\<bottom>\<rbrace>"
apply (simp add: findPDForASID_def
liftME_def bindE_assoc
cong: option.case_cong)
apply (rule seqE, rule assertE_sp)+
apply (rule seqE, rule liftE_wp, rule gets_sp)
apply (rule hoare_pre)
apply (rule seqE[rotated])
apply wpc
apply wp
apply (rule seqE[rotated])
apply (rule seqE[rotated])
apply (rule returnOk_wp)
apply (simp add:checkPDAt_def)
apply wp
apply (rule assertE_wp)
apply wpc
apply wp
apply (rule liftE_wp)
apply (rule getASID_wp)
apply (clarsimp simp: pd_at_asid'_def obj_at'_def projectKOs
inv_ASIDPool)
done
lemma ccorres_catch_bindE_symb_exec_l:
"\<lbrakk> \<And>s. \<lbrace>op = s\<rbrace> f \<lbrace>\<lambda>rv. op = s\<rbrace>; empty_fail f;
\<And>rv. ccorres_underlying sr G r xf ar axf (Q rv) (Q' rv) hs (catch (g rv) h >>= j) c;
\<And>ex. ccorres_underlying sr G r xf ar axf (R ex) (R' ex) hs (h ex >>= j) c;
\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,\<lbrace>R\<rbrace> \<rbrakk>
\<Longrightarrow>
ccorres_underlying sr G r xf ar axf P {s. (\<forall>rv. s \<in> Q' rv) \<and> (\<forall>ex. s \<in> R' ex)} hs
(catch (f >>=E g) h >>= j) c"
apply (simp add: catch_def bindE_def bind_assoc lift_def)
apply (rule ccorres_guard_imp2)
apply (rule ccorres_symb_exec_l[where G=P])
apply wpc
apply (simp add: throwError_bind)
apply assumption+
apply (clarsimp simp: valid_def validE_def split_def split: sum.split_asm)
apply assumption
apply clarsimp
done
lemmas ccorres_catch_symb_exec_l
= ccorres_catch_bindE_symb_exec_l[where g=returnOk,
simplified bindE_returnOk returnOk_catch_bind]
lemma ccorres_alt_rdonly_bind:
"\<lbrakk> ccorres_underlying sr Gamm r xf arrel axf A A' hs
(f >>= (\<lambda>x. alternative (g x) h)) c;
\<And>s. \<lbrace>op = s\<rbrace> f \<lbrace>\<lambda>rv. op = s\<rbrace>; empty_fail f \<rbrakk>
\<Longrightarrow> ccorres_underlying sr Gamm r xf arrel axf A A' hs
(alternative (f >>= (\<lambda>x. g x)) h) c"
apply (rule ccorresI')
apply (erule(3) ccorresE)
defer
apply assumption
apply (subst alternative_left_readonly_bind, assumption)
apply (rule notI, drule(1) empty_failD)
apply (simp add: alternative_def bind_def)
apply fastforce
apply (subgoal_tac "\<forall>x \<in> fst (f s). snd x = s")
apply (simp add: bind_def alternative_def image_image split_def
cong: image_cong)
apply clarsimp
apply (drule use_valid, assumption, simp+)
done
definition
"pd_has_hwasid pd =
(\<lambda>s. \<exists>v. asid_map_pd_to_hwasids (armKSASIDMap (ksArchState s)) pd = {v})"
lemma ccap_relation_pd_helper:
"\<lbrakk> ccap_relation cap cap'; cap_get_tag cap' = scast cap_page_directory_cap \<rbrakk>
\<Longrightarrow> capPDBasePtr_CL (cap_page_directory_cap_lift cap') = capPDBasePtr (capCap cap)"
by (clarsimp simp: cap_lift_page_directory_cap cap_to_H_simps
cap_page_directory_cap_lift
elim!: ccap_relationE)
lemma stored_hw_asid_get_ccorres_split':
assumes ptr: "ptr = CTypesDefs.ptr_add pd 0xFF0"
assumes ceqv: "\<And>rv' t t'. ceqv Gamm stored_hw_asid___struct_pde_C_' rv' t t' c (c' rv')"
and ccorres: "\<And>shw_asid. pde_get_tag shw_asid = scast pde_pde_invalid \<Longrightarrow>
ccorres_underlying rf_sr Gamm r xf ar axf
(Q shw_asid) (R shw_asid) hs
a (c' shw_asid)"
shows "ccorres_underlying rf_sr Gamm r xf ar axf
(\<lambda>s. page_directory_at' (ptr_val pd) s \<and> valid_pde_mappings' s
\<and> (\<forall>shw_asid. asid_map_pd_to_hwasids (armKSASIDMap (ksArchState s)) (ptr_val pd)
= set_option (pde_stored_asid shw_asid) \<and> pde_get_tag shw_asid = scast pde_pde_invalid
\<longrightarrow> P shw_asid \<and> Q shw_asid s))
{s. \<forall>stored_hw_asid. P stored_hw_asid \<and> pde_get_tag stored_hw_asid = scast pde_pde_invalid
\<and> (cslift s \<circ>\<^sub>m pd_pointer_to_asid_slot) (ptr_val pd) = Some stored_hw_asid
\<longrightarrow> s \<lparr> stored_hw_asid___struct_pde_C_' := stored_hw_asid \<rparr>
\<in> R stored_hw_asid} hs
a (Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t ptr\<rbrace>
(\<acute>stored_hw_asid___struct_pde_C :==
h_val (hrs_mem \<acute>t_hrs) ptr);; c)"
unfolding ptr
apply (rule ccorres_guard_imp2)
apply (rule ccorres_Guard_Seq)
apply (rule ccorres_symb_exec_r)
apply (rule ccorres_abstract_all[OF ceqv])
apply (rule_tac A="\<lambda>s. asid_map_pd_to_hwasids (armKSASIDMap (ksArchState s)) (ptr_val pd)
= set_option (pde_stored_asid rv') \<and> pde_get_tag rv' = scast pde_pde_invalid
\<longrightarrow> P rv' \<and> Q rv' s"
and A'="{s. P rv' \<longrightarrow> s \<in> R rv'}
\<inter> {s. (cslift s \<circ>\<^sub>m pd_pointer_to_asid_slot) (ptr_val pd)
= Some rv' \<and> pde_get_tag rv' = scast pde_pde_invalid}"
in ccorres_guard_imp2)
apply (rule_tac P="pde_get_tag rv' = scast pde_pde_invalid" in ccorres_gen_asm)
apply (erule ccorres)
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def
carch_state_relation_def
map_comp_Some_iff)
apply vcg
apply (rule conseqPre, vcg)
apply clarsimp
apply clarsimp
apply (frule_tac x=pd_asid_slot in page_directory_pde_atI')
apply (simp add: pd_asid_slot_def pageBits_def)
apply (cases pd)
apply (simp add: typ_at_to_obj_at_arches)
apply (drule obj_at_ko_at')
apply (clarsimp simp: pd_asid_slot_def)
apply (erule cmap_relationE1[OF rf_sr_cpde_relation], erule ko_at_projectKO_opt)
apply (frule(1) valid_pde_mappings_ko_atD')
apply (clarsimp simp: typ_heap_simps' map_comp_Some_iff
valid_pde_mapping'_def)
apply (clarsimp simp: pd_pointer_to_asid_slot_def page_directory_at'_def
add_mask_eq pdBits_def pageBits_def word_bits_def
valid_pde_mapping_offset'_def pd_asid_slot_def)
apply (simp add: cpde_relation_def Let_def pde_lift_def
split: if_split_asm)
done
lemma ptr_add_0xFF0:
"pde_Ptr (pd + 0x3FC0) = CTypesDefs.ptr_add (pde_Ptr pd) 0xFF0"
by simp
lemmas stored_hw_asid_get_ccorres_split
= stored_hw_asid_get_ccorres_split'[OF refl]
stored_hw_asid_get_ccorres_split'[OF ptr_add_0xFF0]
lemma doMachineOp_pd_at_asid':
"\<lbrace>\<lambda>s. P (pd_at_asid' pd asid s)\<rbrace> doMachineOp oper \<lbrace>\<lambda>rv s. P (pd_at_asid' pd asid s)\<rbrace>"
apply (simp add: doMachineOp_def split_def)
apply wp
apply (clarsimp simp: pd_at_asid'_def)
done
lemma doMachineOp_page_directory_at_P':
"\<lbrace>\<lambda>s. P (page_directory_at' pd s)\<rbrace> doMachineOp oper \<lbrace>\<lambda>rv s. P (page_directory_at' pd s)\<rbrace>"
apply (simp add: doMachineOp_def split_def)
apply wp
apply (clarsimp simp: pd_at_asid'_def)
done
lemma pde_stored_asid_Some:
"(pde_stored_asid pde = Some v)
= (pde_get_tag pde = scast pde_pde_invalid
\<and> to_bool (stored_asid_valid_CL (pde_pde_invalid_lift pde))
\<and> v = ucast (stored_hw_asid_CL (pde_pde_invalid_lift pde)))"
by (auto simp add: pde_stored_asid_def split: if_split)
lemma pointerInUserData_c_guard':
"\<lbrakk> pointerInUserData ptr s; no_0_obj' s; is_aligned ptr 2 \<rbrakk>
\<Longrightarrow> c_guard (Ptr ptr :: word32 ptr)"
apply (simp add: pointerInUserData_def)
apply (simp add: c_guard_def ptr_aligned_def)
apply (rule conjI)
apply (simp add: is_aligned_def)
apply (simp add: c_null_guard_def)
apply (subst intvl_aligned_bottom_eq[where n=2 and bits=2], simp_all)
apply clarsimp
done
lemma heap_relation_user_word_at_cross_over:
"\<lbrakk> user_word_at x p s; cmap_relation (heap_to_user_data (ksPSpace s)
(underlying_memory (ksMachineState s))) (cslift s') Ptr cuser_user_data_relation;
p' = Ptr p \<rbrakk>
\<Longrightarrow> c_guard p' \<and> hrs_htd (t_hrs_' (globals s')) \<Turnstile>\<^sub>t p'
\<and> h_val (hrs_mem (t_hrs_' (globals s'))) p' = x"
apply (erule cmap_relationE1)
apply (clarsimp simp: heap_to_user_data_def Let_def
user_word_at_def pointerInUserData_def
typ_at_to_obj_at'[where 'a=user_data, simplified])
apply (drule obj_at_ko_at', clarsimp)
apply (rule conjI, rule exI, erule ko_at_projectKO_opt)
apply (rule refl)
apply (thin_tac "heap_to_user_data a b c = d" for a b c d)
apply (cut_tac x=p and w="~~ mask pageBits" in word_plus_and_or_coroll2)
apply (rule conjI)
apply (clarsimp simp: user_word_at_def pointerInUserData_def)
apply (simp add: c_guard_def c_null_guard_def ptr_aligned_def)
apply (drule lift_t_g)
apply (clarsimp simp: )
apply (simp add: align_of_def user_data_C_size_of user_data_C_align_of
size_of_def user_data_C_typ_name)
apply (fold is_aligned_def[where n=2, simplified], simp)
apply (erule contra_subsetD[rotated])
apply (rule order_trans[rotated])
apply (rule_tac x="p && mask pageBits" and y=4 in intvl_sub_offset)
apply (cut_tac y=p and a="mask pageBits && (~~ mask 2)" in word_and_le1)
apply (subst(asm) word_bw_assocs[symmetric], subst(asm) aligned_neg_mask,
erule is_aligned_andI1)
apply (simp add: word_le_nat_alt mask_def pageBits_def)
apply simp
apply (clarsimp simp: cuser_user_data_relation_def user_word_at_def)
apply (frule_tac f="[''words_C'']" in h_t_valid_field[OF h_t_valid_clift],
simp+)
apply (drule_tac n="uint (p && mask pageBits >> 2)" in h_t_valid_Array_element)
apply simp
apply (simp add: shiftr_over_and_dist mask_def pageBits_def uint_and)
apply (insert int_and_leR [where a="uint (p >> 2)" and b=1023], clarsimp)[1]
apply (simp add: field_lvalue_def
field_lookup_offset_eq[OF trans, OF _ arg_cong[where f=Some, symmetric], OF _ prod.collapse]
word_shift_by_2 shiftr_shiftl1 is_aligned_neg_mask_eq is_aligned_andI1)
apply (drule_tac x="ucast (p >> 2)" in spec)
apply (simp add: byte_to_word_heap_def Let_def ucast_ucast_mask)
apply (fold shiftl_t2n[where n=2, simplified, simplified mult.commute mult.left_commute])
apply (simp add: aligned_shiftr_mask_shiftl pageBits_def)
apply (rule trans[rotated], rule_tac hp="hrs_mem (t_hrs_' (globals s'))"
and x="Ptr &(Ptr (p && ~~ mask 12) \<rightarrow> [''words_C''])"
in access_in_array)
apply (rule trans)
apply (erule typ_heap_simps)
apply simp+
apply (rule order_less_le_trans, rule unat_lt2p)
apply simp
apply (fastforce simp add: typ_info_word)
apply simp
apply (rule_tac f="h_val hp" for hp in arg_cong)
apply simp
apply (simp add: field_lvalue_def)
apply (simp add: ucast_nat_def ucast_ucast_mask)
apply (fold shiftl_t2n[where n=2, simplified, simplified mult.commute mult.left_commute])
apply (simp add: aligned_shiftr_mask_shiftl)
done
lemma pointerInUserData_h_t_valid2:
"\<lbrakk> pointerInUserData ptr s; cmap_relation (heap_to_user_data (ksPSpace s)
(underlying_memory (ksMachineState s))) (cslift s') Ptr cuser_user_data_relation;
is_aligned ptr 2 \<rbrakk>
\<Longrightarrow> hrs_htd (t_hrs_' (globals s')) \<Turnstile>\<^sub>t (Ptr ptr :: word32 ptr)"
apply (frule_tac p=ptr in
heap_relation_user_word_at_cross_over[rotated, OF _ refl])
apply (simp add: user_word_at_def)
apply simp
done
lemma dmo_clearExMonitor_setCurThread_swap:
"(do _ \<leftarrow> doMachineOp ARM.clearExMonitor;
setCurThread thread
od)
= (do _ \<leftarrow> setCurThread thread;
doMachineOp ARM.clearExMonitor od)"
apply (simp add: setCurThread_def doMachineOp_def split_def)
apply (rule oblivious_modify_swap[symmetric])
apply (intro oblivious_bind,
simp_all add: select_f_oblivious)
done
lemma ccorres_bind_assoc_rev:
"ccorres_underlying sr E r xf arrel axf G G' hs ((a1 >>= a2) >>= a3) c
\<Longrightarrow> ccorres_underlying sr E r xf arrel axf G G' hs
(do x \<leftarrow> a1; y \<leftarrow> a2 x; a3 y od) c"
by (simp add: bind_assoc)
lemma monadic_rewrite_gets_l:
"(\<And>x. monadic_rewrite F E (P x) (g x) m)
\<Longrightarrow> monadic_rewrite F E (\<lambda>s. P (f s) s) (gets f >>= (\<lambda>x. g x)) m"
by (auto simp add: monadic_rewrite_def exec_gets)
lemma pd_at_asid_inj':
"pd_at_asid' pd asid s \<Longrightarrow> pd_at_asid' pd' asid s \<Longrightarrow> pd' = pd"
by (clarsimp simp: pd_at_asid'_def obj_at'_def)
lemma armv_contextSwitch_HWASID_fp_rewrite:
"monadic_rewrite True False
(pd_has_hwasid pd and pd_at_asid' pd asid and
(\<lambda>s. asid_map_pd_to_hwasids (armKSASIDMap (ksArchState s)) pd
= set_option (pde_stored_asid v)))
(armv_contextSwitch pd asid)
(doMachineOp (armv_contextSwitch_HWASID pd (the (pde_stored_asid v))))"
apply (simp add: getHWASID_def armv_contextSwitch_def
bind_assoc loadHWASID_def
findPDForASIDAssert_def
checkPDAt_def checkPDUniqueToASID_def
checkPDASIDMapMembership_def
stateAssert_def2[folded assert_def])
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_gets_l)
apply (rule monadic_rewrite_symb_exec_l)
apply (wp | simp)+
apply (simp add: empty_fail_findPDForASID empty_fail_catch)
apply (rule monadic_rewrite_assert monadic_rewrite_gets_l)+
apply (rule_tac P="asidMap asid \<noteq> None \<and> fst (the (asidMap asid)) = the (pde_stored_asid v)"
in monadic_rewrite_gen_asm)
apply (simp only: case_option_If2 simp_thms if_True if_False
split_def, simp)
apply (rule monadic_rewrite_refl)
apply (wp findPDForASID_pd_at_wp | simp only: const_def)+
apply (clarsimp simp: pd_has_hwasid_def cte_level_bits_def
field_simps cte_wp_at_ctes_of
word_0_sle_from_less
isCap_simps invs_valid_pspace'
simp del: Collect_const rf_sr_upd_safe)
apply (drule(1) pd_at_asid_inj')
apply (clarsimp simp: singleton_eq_o2s singleton_eq_o2s[THEN trans[OF eq_commute]])
apply (cases "pde_stored_asid v", simp_all)
apply (clarsimp simp: asid_map_pd_to_hwasids_def set_eq_subset
elim!: ranE)
apply (case_tac "x = asid")
apply clarsimp
apply (erule notE, rule_tac a=x in ranI)
apply simp
done
lemma switchToThread_fp_ccorres:
"ccorres dc xfdc (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj'
and valid_pde_mappings' and valid_arch_state'
and tcb_at' thread
and cte_wp_at' (\<lambda>cte. isValidVTableRoot (cteCap cte)
\<and> capPDBasePtr (capCap (cteCap cte)) = pd)
(thread + tcbVTableSlot * 0x10)
and pd_has_hwasid pd
and (\<lambda>s. asid_map_pd_to_hwasids (armKSASIDMap (ksArchState s)) pd
= set_option (pde_stored_asid v)))
(UNIV \<inter> {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}
\<inter> {s. cap_pd_' s = pde_Ptr pd}
\<inter> {s. stored_hw_asid___struct_pde_C_' s = v}) []
(Arch.switchToThread thread
>>= (\<lambda>_. setCurThread thread))
(Call switchToThread_fp_'proc)"
apply (cinit' lift: thread_' cap_pd_' stored_hw_asid___struct_pde_C_')
apply (simp add: ARM_H.switchToThread_def bind_assoc
setVMRoot_def cap_case_isPageDirectoryCap
del: Collect_const cong: call_ignore_cong)
apply (simp add: getThreadVSpaceRoot_def locateSlot_conv getSlotCap_def
del: Collect_const cong: call_ignore_cong)
apply (simp only: )
apply (rule ccorres_symb_exec_r, rule_tac xf'="hw_asid_'" in ccorres_abstract,
ceqv, rename_tac "hw_asid")
apply (rule ccorres_getCTE, rename_tac cte)
apply (rule_tac P="isValidVTableRoot (cteCap cte)
\<and> capPDBasePtr (capCap (cteCap cte)) = pd" in ccorres_gen_asm)
apply (erule conjE, drule isValidVTableRootD)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_catch_bindE_symb_exec_l,
rule findPDForASID_inv,
rule empty_fail_findPDForASID)
apply (rename_tac "pd_found")
apply (rule_tac P="pd_found \<noteq> pd"
in ccorres_case_bools2)
apply (simp add: bindE_assoc catch_liftE_bindE bind_assoc
checkPDNotInASIDMap_def
checkPDASIDMapMembership_def
catch_throwError)
apply (rule ccorres_stateAssert)
apply (rule ccorres_False[where P'=UNIV])
apply (simp add: catch_liftE bind_assoc
del: Collect_const cong: call_ignore_cong)
apply (rule monadic_rewrite_ccorres_assemble[rotated])
apply (rule monadic_rewrite_bind_head)
apply (rule_tac pd=pd and v=v
in armv_contextSwitch_HWASID_fp_rewrite)
apply (simp only: ccorres_seq_IF_False ccorres_seq_skip)
apply (ctac(no_vcg) add: armv_contextSwitch_HWASID_ccorres)
apply (simp add: storeWordUser_def bind_assoc case_option_If2
split_def
del: Collect_const)
apply (simp only: dmo_clearExMonitor_setCurThread_swap
dc_def[symmetric])
apply (rule ccorres_split_nothrow_novcg_dc)
apply (rule ccorres_from_vcg[where P=\<top> and P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp del: rf_sr_upd_safe)
apply (clarsimp simp: setCurThread_def simpler_modify_def
rf_sr_def cstate_relation_def Let_def
carch_state_relation_def cmachine_state_relation_def)
apply (ctac add: clearExMonitor_fp_ccorres)
apply wp
apply (simp add: guard_is_UNIV_def)
apply wp
apply (simp add: bind_assoc checkPDNotInASIDMap_def
checkPDASIDMapMembership_def)
apply (rule ccorres_stateAssert)
apply (rule ccorres_False[where P'=UNIV])
apply simp
apply (wp findPDForASID_pd_at_wp)[1]
apply (simp del: Collect_const)
apply vcg
apply (rule conseqPre, vcg, clarsimp)
apply (clarsimp simp: pd_has_hwasid_def cte_level_bits_def
field_simps cte_wp_at_ctes_of
pd_at_asid'_def word_0_sle_from_less
isCap_simps invs_valid_pspace'
simp del: Collect_const rf_sr_upd_safe)
apply (frule_tac P="\<lambda>Sf. Sf x = S'" for x S'
in subst[OF meta_eq_to_obj_eq, OF asid_map_pd_to_hwasids_def])
apply (clarsimp simp: isCap_simps dest!: isValidVTableRootD)
apply (rule context_conjI)
apply (drule singleton_eqD[OF sym])
apply clarsimp
apply (fastforce simp: ran_def)
apply (frule ctes_of_valid', clarsimp, clarsimp simp: valid_cap'_def)
apply (auto simp: singleton_eq_o2s projectKOs obj_at'_def
pde_stored_asid_def split: if_split_asm)
done
lemma thread_state_ptr_set_tsType_np_spec:
defines "ptr s \<equiv> cparent \<^bsup>s\<^esup>ts_ptr [''tcbState_C''] :: tcb_C ptr"
shows
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ptr s
\<and> (tsType_' s = scast ThreadState_Running \<or> tsType_' s = scast ThreadState_Restart
\<or> tsType_' s = scast ThreadState_BlockedOnReply)\<rbrace>
Call thread_state_ptr_set_tsType_np_'proc
{t. (\<exists>thread_state.
tsType_CL (thread_state_lift thread_state) = tsType_' s \<and>
tcbQueued_CL (thread_state_lift thread_state)
= tcbQueued_CL (thread_state_lift (tcbState_C (the (cslift s (ptr s))))) \<and>
t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s)
(the (cslift s (ptr s))\<lparr>tcbState_C := thread_state\<rparr>))
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: ptr_def)
apply (clarsimp simp: h_t_valid_clift_Some_iff)
apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+,
simp add: typ_uinfo_t_def)
apply (frule clift_subtype, simp+)
apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def)
apply (subst parent_update_child, erule typ_heap_simps', simp+)
apply (clarsimp simp: typ_heap_simps')
apply (rule exI, rule conjI[OF _ conjI [OF _ refl]])
apply (simp_all add: thread_state_lift_def)
apply (auto simp: "StrictC'_thread_state_defs")
done
lemma thread_state_ptr_mset_blockingObject_tsType_spec:
defines "ptr s \<equiv> cparent \<^bsup>s\<^esup>ts_ptr [''tcbState_C''] :: tcb_C ptr"
shows
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ptr s \<and> is_aligned (ep_ref_' s) 4
\<and> tsType_' s && mask 4 = tsType_' s\<rbrace>
Call thread_state_ptr_mset_blockingObject_tsType_'proc
{t. (\<exists>thread_state.
tsType_CL (thread_state_lift thread_state) = tsType_' s
\<and> blockingObject_CL (thread_state_lift thread_state) = ep_ref_' s
\<and> tcbQueued_CL (thread_state_lift thread_state)
= tcbQueued_CL (thread_state_lift (tcbState_C (the (cslift s (ptr s)))))
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s)
(the (cslift s (ptr s))\<lparr>tcbState_C := thread_state\<rparr>))
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: ptr_def)
apply (frule h_t_valid_c_guard_cparent, simp+)
apply (simp add: typ_uinfo_t_def)
apply (clarsimp simp: h_t_valid_clift_Some_iff)
apply (frule clift_subtype, simp+)
apply (clarsimp simp: typ_heap_simps')
apply (subst parent_update_child, erule typ_heap_simps', simp+)
apply (clarsimp simp: typ_heap_simps' word_sless_def word_sle_def)
apply (rule exI, intro conjI[rotated], rule refl)
apply (simp_all add: thread_state_lift_def word_ao_dist
is_aligned_mask mask_def mask_eq_0_eq_x,
simp_all add: mask_eq_x_eq_0)
done
lemma mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_spec:
defines "ptr s \<equiv> cparent \<^bsup>s\<^esup>node_ptr [''cteMDBNode_C''] :: cte_C ptr"
shows
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ptr s \<and> is_aligned (mdbNext___unsigned_long_' s) 4
\<and> mdbRevocable___unsigned_long_' s && mask 1 = mdbRevocable___unsigned_long_' s
\<and> mdbFirstBadged___unsigned_long_' s && mask 1 = mdbFirstBadged___unsigned_long_' s\<rbrace>
Call mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_'proc
{t. (\<exists>mdb_node.
mdb_node_lift mdb_node = mdb_node_lift (cteMDBNode_C (the (cslift s (ptr s))))
\<lparr> mdbNext_CL := mdbNext___unsigned_long_' s, mdbRevocable_CL := mdbRevocable___unsigned_long_' s,
mdbFirstBadged_CL := mdbFirstBadged___unsigned_long_' s \<rparr>
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s)
(the (cslift s (ptr s)) \<lparr> cteMDBNode_C := mdb_node \<rparr>))
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: ptr_def)
apply (clarsimp simp: h_t_valid_clift_Some_iff)
apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+,
simp add: typ_uinfo_t_def)
apply (frule clift_subtype, simp+)
apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def)
apply (subst parent_update_child, erule typ_heap_simps', simp+)
apply (clarsimp simp: typ_heap_simps')
apply (rule exI, rule conjI[OF _ refl])
apply (simp add: mdb_node_lift_def word_ao_dist shiftr_over_or_dist ucast_id)
apply (fold limited_and_def)
apply (simp add: limited_and_simps)
done
lemma mdb_node_ptr_set_mdbPrev_np_spec:
defines "ptr s \<equiv> cparent \<^bsup>s\<^esup>node_ptr [''cteMDBNode_C''] :: cte_C ptr"
shows
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ptr s \<and> is_aligned (mdbPrev___unsigned_long_' s) 4\<rbrace>
Call mdb_node_ptr_set_mdbPrev_np_'proc
{t. (\<exists>mdb_node.
mdb_node_lift mdb_node = mdb_node_lift (cteMDBNode_C (the (cslift s (ptr s))))
\<lparr> mdbPrev_CL := mdbPrev___unsigned_long_' s \<rparr>
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s)
(the (cslift s (ptr s)) \<lparr> cteMDBNode_C := mdb_node \<rparr>))
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: ptr_def)
apply (clarsimp simp: h_t_valid_clift_Some_iff)
apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift], simp+,
simp add: typ_uinfo_t_def)
apply (frule clift_subtype, simp+)
apply (clarsimp simp: typ_heap_simps')
apply (subst parent_update_child, erule typ_heap_simps', simp+)
apply (clarsimp simp: typ_heap_simps' word_sle_def word_sless_def)
apply (rule exI, rule conjI [OF _ refl])
apply (simp add: mdb_node_lift_def limited_and_simps)
done
lemma cap_reply_cap_ptr_new_np_spec2:
defines "ptr s \<equiv> cparent \<^bsup>s\<^esup>cap_ptr [''cap_C''] :: cte_C ptr"
shows
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ptr s \<and> is_aligned (capTCBPtr___unsigned_long_' s) 8
\<and> capReplyMaster___unsigned_long_' s && 1 = capReplyMaster___unsigned_long_' s\<rbrace>
Call cap_reply_cap_ptr_new_np_'proc
{t. (\<exists>cap.
cap_lift cap = Some (Cap_reply_cap \<lparr> capReplyMaster_CL = capReplyMaster___unsigned_long_' s,
capTCBPtr_CL = capTCBPtr___unsigned_long_' s \<rparr>)
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ptr s)
(the (cslift s (ptr s)) \<lparr> cte_C.cap_C := cap \<rparr>))
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: ptr_def)
apply (clarsimp simp: h_t_valid_clift_Some_iff word_sle_def)
apply (frule h_t_valid_c_guard_cparent[OF h_t_valid_clift],
simp+, simp add: typ_uinfo_t_def)
apply (frule clift_subtype, simp+)
apply (clarsimp simp: typ_heap_simps')
apply (subst parent_update_child, erule typ_heap_simps', simp+)
apply (clarsimp simp: typ_heap_simps' word_sless_def word_sle_def)
apply (rule exI, rule conjI [OF _ refl])
apply (fold limited_and_def)
apply (simp add: cap_get_tag_def mask_def cap_tag_defs
word_ao_dist limited_and_simps
cap_lift_reply_cap shiftr_over_or_dist)
done
lemma endpoint_ptr_mset_epQueue_tail_state_spec:
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ep_ptr_' s \<and> is_aligned (epQueue_tail_' s) 4
\<and> state_' s && mask 2 = state_' s\<rbrace>
Call endpoint_ptr_mset_epQueue_tail_state_'proc
{t. (\<exists>endpoint.
endpoint_lift endpoint = endpoint_lift (the (cslift s (ep_ptr_' s)))
\<lparr> endpoint_CL.state_CL := state_' s, epQueue_tail_CL := epQueue_tail_' s \<rparr>
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ep_ptr_' s)
endpoint)
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: h_t_valid_clift_Some_iff typ_heap_simps'
word_sle_def word_sless_def)
apply (rule exI, rule conjI[OF _ refl])
apply (simp add: endpoint_lift_def word_ao_dist
mask_def)
apply (fold limited_and_def)
apply (simp add: limited_and_simps)
done
lemma endpoint_ptr_set_epQueue_head_np_spec:
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t ep_ptr_' s \<and> is_aligned (epQueue_head_' s) 4\<rbrace>
Call endpoint_ptr_set_epQueue_head_np_'proc
{t. (\<exists>endpoint.
endpoint_lift endpoint = endpoint_lift (the (cslift s (ep_ptr_' s)))
\<lparr> epQueue_head_CL := epQueue_head_' s \<rparr>
\<and> t_hrs_' (globals t) = hrs_mem_update (heap_update (ep_ptr_' s)
endpoint)
(t_hrs_' (globals s))
)}"
apply (intro allI, rule conseqPre, vcg)
apply (clarsimp simp: h_t_valid_clift_Some_iff typ_heap_simps'
word_sless_def word_sle_def)
apply (rule exI, rule conjI[OF _ refl])
apply (simp add: endpoint_lift_def word_ao_dist
mask_def)
apply (simp add: limited_and_simps)
done
lemma ccorres_call_hSkip':
assumes cul: "ccorres_underlying sr \<Gamma> r xf' r xf' P (i ` P') [SKIP] a (Call f)"
and gsr: "\<And>a b x s t. (x, t) \<in> sr \<Longrightarrow> (x, g a b (clean s t)) \<in> sr"
and csr: "\<And>x s t. (x, t) \<in> sr \<Longrightarrow> (x, clean s t) \<in> sr"
and res: "\<And>a s t rv. r rv (xf' t) \<Longrightarrow> r rv (xf (g a t (clean s t)))"
and ares: "\<And>s t rv. r rv (xf' t) \<Longrightarrow> r rv (xf (clean s t))"
and ist: "\<And>x s. (x, s) \<in> sr \<Longrightarrow> (x, i s) \<in> sr"
shows "ccorres_underlying sr \<Gamma> r xf r xf P P' [SKIP] a (call i f clean (\<lambda>x y. Basic (g x y)))"
apply (rule ccorresI')
apply (erule exec_handlers.cases, simp_all)[1]
apply clarsimp
apply (erule exec_call_Normal_elim, simp_all)[1]
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHAbrupt)
apply (erule(1) exec.Call)
apply (rule EHOther, rule exec.Skip, simp)
apply clarsimp
apply (erule exec_handlers.cases, simp_all)[1]
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (erule rev_bexI)
apply (simp add: unif_rrel_simps csr ares)
apply clarsimp
apply (erule exec_call_Normal_elim, simp_all)[1]
apply (clarsimp elim!: exec_Normal_elim_cases)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply (simp add: unif_rrel_simps)
apply (erule rev_bexI)
apply (simp add: gsr res)
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply simp
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule(1) exec.Call)
apply simp
apply simp
apply (rule ccorresE[OF cul ist], assumption+, simp+)
apply (rule EHOther, erule exec.CallUndefined)
apply simp
apply simp
done
(* The naming convention here is that xf', xfr, and xfru are the terms we instantiate *)
lemma ccorres_call_hSkip:
assumes cul: "ccorres_underlying rf_sr \<Gamma> r xfdc r xfdc A C' [SKIP] a (Call f)"
and ggl: "\<And>x y s. globals (g x y s) = globals s"
and igl: "\<And>s. globals (i s) = globals s"
shows "ccorres_underlying rf_sr \<Gamma> r xfdc r xfdc
A {s. i s \<in> C'} [SKIP] a (call i f (\<lambda>s t. s\<lparr>globals := globals t\<rparr>) (\<lambda>x y. Basic (g x y)))"
using cul
unfolding rf_sr_def
apply -
apply (rule ccorres_call_hSkip')
apply (erule ccorres_guard_imp)
apply simp
apply clarsimp
apply (simp_all add: ggl xfdc_def)
apply (clarsimp simp: igl)
done
lemma bind_case_sum_rethrow:
"rethrowFailure fl f >>= case_sum e g
= f >>= case_sum (e \<circ> fl) g"
apply (simp add: rethrowFailure_def handleE'_def
bind_assoc)
apply (rule bind_cong[OF refl])
apply (simp add: throwError_bind split: sum.split)
done
lemma ccorres_alt_rdonly_liftE_bindE:
"\<lbrakk> ccorres_underlying sr Gamm r xf arrel axf A A' hs
(f >>= (\<lambda>x. alternative (g x) h)) c;
\<And>s. \<lbrace>op = s\<rbrace> f \<lbrace>\<lambda>rv. op = s\<rbrace>; empty_fail f \<rbrakk>
\<Longrightarrow> ccorres_underlying sr Gamm r xf arrel axf A A' hs
(alternative (liftE f >>=E (\<lambda>x. g x)) h) c"
by (simp add: liftE_bindE ccorres_alt_rdonly_bind)
lemma ccorres_pre_getCTE2:
"(\<And>rv. ccorresG rf_sr \<Gamma> r xf (P rv) (P' rv) hs (f rv) c) \<Longrightarrow>
ccorresG rf_sr \<Gamma> r xf (\<lambda>s. \<forall>cte. ctes_of s p = Some cte \<longrightarrow> P cte s)
{s. \<forall>cte cte'. cslift s (cte_Ptr p) = Some cte' \<and> ccte_relation cte cte'
\<longrightarrow> s \<in> P' cte} hs
(getCTE p >>= (\<lambda>rv. f rv)) c"
apply (rule ccorres_guard_imp2, erule ccorres_pre_getCTE)
apply (clarsimp simp: map_comp_Some_iff ccte_relation_def
c_valid_cte_def cl_valid_cte_def
c_valid_cap_def)
done
declare empty_fail_assertE[iff]
declare empty_fail_resolveAddressBits[iff]
lemma ccap_relation_ep_helpers:
"\<lbrakk> ccap_relation cap cap'; cap_get_tag cap' = scast cap_endpoint_cap \<rbrakk>
\<Longrightarrow> capCanSend_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanSend cap)
\<and> capCanReceive_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanReceive cap)
\<and> capEPPtr_CL (cap_endpoint_cap_lift cap') = capEPPtr cap
\<and> capEPBadge_CL (cap_endpoint_cap_lift cap') = capEPBadge cap
\<and> capCanGrant_CL (cap_endpoint_cap_lift cap') = from_bool (capEPCanGrant cap)"
by (clarsimp simp: cap_lift_endpoint_cap cap_to_H_simps
cap_endpoint_cap_lift_def word_size
from_bool_to_bool_and_1
elim!: ccap_relationE)
lemma lookupExtraCaps_null:
"msgExtraCaps info = 0 \<Longrightarrow>
lookupExtraCaps thread buffer info = returnOk []"
by (clarsimp simp: lookupExtraCaps_def
getExtraCPtrs_def liftE_bindE
upto_enum_step_def mapM_Nil
split: Types_H.message_info.split option.split)
lemma fastpath_mi_check:
"((mi && mask 9) + 3) && ~~ mask 3 = 0
= (msgExtraCaps (messageInfoFromWord mi) = 0
\<and> msgLength (messageInfoFromWord mi) \<le> scast n_msgRegisters
\<and> length_CL (seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi))))
\<le> scast n_msgRegisters)"
(is "?P = (?Q \<and> ?R \<and> ?S)")
proof -
have le_Q: "?P = (?Q \<and> ?S)"
apply (simp add: mask_def messageInfoFromWord_def Let_def
msgExtraCapBits_def msgLengthBits_def
seL4_MessageInfo_lift_def fcp_beta n_msgRegisters_def)
apply word_bitwise
apply blast
done
have Q_R: "?S \<Longrightarrow> ?R"
apply (clarsimp simp: messageInfoFromWord_def Let_def msgLengthBits_def
msgExtraCapBits_def mask_def n_msgRegisters_def
seL4_MessageInfo_lift_def fcp_beta)
apply (subst if_not_P, simp_all)
apply (simp add: msgMaxLength_def linorder_not_less)
apply (erule order_trans, simp)
done
from le_Q Q_R show ?thesis
by blast
qed
lemma messageInfoFromWord_raw_spec:
"\<forall>s. \<Gamma>\<turnstile> {s} Call messageInfoFromWord_raw_'proc
\<lbrace>\<acute>ret__struct_seL4_MessageInfo_C
= (seL4_MessageInfo_C (FCP (K \<^bsup>s\<^esup>w)))\<rbrace>"
apply vcg
apply (clarsimp simp: word_sless_def word_sle_def)
apply (case_tac v)
apply (simp add: cart_eq fcp_beta)
done
lemma mi_check_messageInfo_raw:
"length_CL (seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi))))
\<le> scast n_msgRegisters
\<Longrightarrow> seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K mi)))
= mi_from_H (messageInfoFromWord mi)"
apply (simp add: messageInfoFromWord_def Let_def mi_from_H_def
seL4_MessageInfo_lift_def fcp_beta msgLengthBits_def msgExtraCapBits_def
msgMaxExtraCaps_def shiftL_nat)
apply (subst if_not_P)
apply (simp add: linorder_not_less msgMaxLength_def n_msgRegisters_def)
apply (erule order_trans, simp)
apply simp
apply (thin_tac "P" for P)
apply word_bitwise
done
lemma fastpath_mi_check_spec:
"\<forall>s. \<Gamma> \<turnstile> \<lbrace>s. True\<rbrace> Call fastpath_mi_check_'proc
\<lbrace>(\<acute>ret__int = 0) = (msgExtraCaps (messageInfoFromWord \<^bsup>s\<^esup>msgInfo) = 0
\<and> msgLength (messageInfoFromWord \<^bsup>s\<^esup>msgInfo) \<le> scast n_msgRegisters
\<and> seL4_MessageInfo_lift (seL4_MessageInfo_C (FCP (K \<^bsup>s\<^esup>msgInfo)))
= mi_from_H (messageInfoFromWord \<^bsup>s\<^esup>msgInfo))\<rbrace>"
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: seL4_MsgLengthBits_def seL4_MsgExtraCapBits_def
word_sle_def if_1_0_0)
apply (cut_tac mi="msgInfo_' s" in fastpath_mi_check)
apply (simp add: mask_def)
apply (auto intro: mi_check_messageInfo_raw[unfolded K_def])
done
lemma isValidVTableRoot_fp_lemma:
"(index (cap_C.words_C ccap) 0 && 0x1F = 0x10 || scast cap_page_directory_cap)
= isValidVTableRoot_C ccap"
apply (simp add: isValidVTableRoot_C_def ARM_H.isValidVTableRoot_def
cap_case_isPageDirectoryCap if_bool_simps)
apply (subst split_word_eq_on_mask[where m="mask 4"])
apply (simp add: mask_def word_bw_assocs word_ao_dist cap_page_directory_cap_def)
apply (subgoal_tac "cap_get_tag ccap = scast cap_page_directory_cap
\<Longrightarrow> (index (cap_C.words_C ccap) 0 && 0x10 = 0x10) = to_bool (capPDIsMapped_CL (cap_page_directory_cap_lift ccap))")
apply (clarsimp simp add: cap_get_tag_eq_x mask_def
cap_page_directory_cap_def split: if_split)
apply (rule conj_cong[OF refl])
apply clarsimp
apply (clarsimp simp: cap_lift_page_directory_cap
cap_to_H_simps
to_bool_def bool_mask[folded word_neq_0_conv]
cap_page_directory_cap_lift_def
elim!: ccap_relationE split: if_split)
apply (thin_tac "P" for P)
apply word_bitwise
done
lemma isValidVTableRoot_fp_spec:
"\<forall>s. \<Gamma> \<turnstile> {s} Call isValidVTableRoot_fp_'proc
{t. ret__unsigned_long_' t = from_bool (isValidVTableRoot_C (pd_cap_' s))}"
apply vcg
apply (clarsimp simp: word_sle_def word_sless_def isValidVTableRoot_fp_lemma)
apply (simp add: from_bool_def split: if_split)
done
lemma isRecvEP_endpoint_case:
"isRecvEP ep \<Longrightarrow> case_endpoint f g h ep = f (epQueue ep)"
by (clarsimp simp: isRecvEP_def split: endpoint.split_asm)
lemma ccorres_cond_both_seq:
"\<lbrakk> \<forall>s s'. (s, s') \<in> sr \<and> R s \<longrightarrow> P s = (s' \<in> P');
ccorres_underlying sr \<Gamma> r xf arrel axf Pt Rt hs a (c ;; d);
ccorres_underlying sr \<Gamma> r xf arrel axf Pf Rf hs a (c' ;; d) \<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf
(R and (\<lambda>s. P s \<longrightarrow> Pt s) and (\<lambda>s. \<not> P s \<longrightarrow> Pf s))
{s. (s \<in> P' \<longrightarrow> s \<in> Rt) \<and> (s \<notin> P' \<longrightarrow> s \<in> Rf)}
hs a (Cond P' c c' ;; d)"
apply (subst ccorres_seq_cond_raise)
apply (rule ccorres_guard_imp2, rule ccorres_cond_both, assumption+)
apply auto
done
lemma unifyFailure_catch_If:
"catch (unifyFailure f >>=E g) h
= f >>= (\<lambda>rv. if isRight rv then catch (g (theRight rv)) h else h ())"
apply (simp add: unifyFailure_def rethrowFailure_def
handleE'_def catch_def bind_assoc
bind_bindE_assoc cong: if_cong)
apply (rule bind_cong[OF refl])
apply (simp add: throwError_bind isRight_def return_returnOk
split: sum.split)
done
end
abbreviation "tcb_Ptr_Ptr \<equiv> (Ptr :: word32 \<Rightarrow> tcb_C ptr ptr)"
abbreviation(input)
"ptr_basic_update ptrfun vfun
\<equiv> Basic (\<lambda>s. globals_update (t_hrs_'_update (hrs_mem_update
(heap_update (ptrfun s) (vfun s)))) s)"
context kernel_m begin
lemma fastpath_dequeue_ccorres:
"dest1 = dest2 \<and> dest2 = tcb_ptr_to_ctcb_ptr dest \<and> ep_ptr1 = ep_Ptr ep_ptr \<Longrightarrow>
ccorres dc xfdc
(ko_at' (RecvEP (dest # xs)) ep_ptr and invs')
{s. dest2 = tcb_ptr_to_ctcb_ptr dest
\<and> dest1 = tcb_ptr_to_ctcb_ptr dest
\<and> ep_ptr1 = ep_Ptr ep_ptr} hs
(setEndpoint ep_ptr (case xs of [] \<Rightarrow> IdleEP | _ \<Rightarrow> RecvEP xs))
(Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t dest1\<rbrace>
(CALL endpoint_ptr_set_epQueue_head_np(ep_ptr1,ptr_val (h_val (hrs_mem \<acute>t_hrs) (tcb_Ptr_Ptr &(dest2\<rightarrow>[''tcbEPNext_C''])))));;
Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t dest1\<rbrace>
(IF h_val (hrs_mem \<acute>t_hrs) (tcb_Ptr_Ptr &(dest1\<rightarrow>[''tcbEPNext_C''])) \<noteq> tcb_Ptr 0 THEN
Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t h_val (hrs_mem \<acute>t_hrs) (tcb_Ptr_Ptr &(dest1\<rightarrow>[''tcbEPNext_C'']))\<rbrace>
(Guard C_Guard {s. s \<Turnstile>\<^sub>c dest1} (
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &(h_val (hrs_mem (t_hrs_' (globals s)))
(tcb_Ptr_Ptr &(dest1\<rightarrow>[''tcbEPNext_C'']))\<rightarrow>[''tcbEPPrev_C''])) (\<lambda>_. NULL))))
ELSE
CALL endpoint_ptr_mset_epQueue_tail_state(ep_ptr1,scast 0,scast EPState_Idle)
FI))"
unfolding setEndpoint_def
apply (rule setObject_ccorres_helper[rotated])
apply simp
apply (simp add: objBits_simps)
apply (rule conseqPre, vcg)
apply clarsimp
apply (drule(1) ko_at_obj_congD')
apply (frule ko_at_valid_ep', clarsimp)
apply (rule cmap_relationE1[OF cmap_relation_ep], assumption,
erule ko_at_projectKO_opt)
apply (clarsimp simp: typ_heap_simps' valid_ep'_def
isRecvEP_endpoint_case neq_Nil_conv)
apply (drule(1) obj_at_cslift_tcb)
apply (clarsimp simp: typ_heap_simps')
apply (case_tac "xs")
apply (clarsimp simp: cendpoint_relation_def Let_def
isRecvEP_endpoint_case
tcb_queue_relation'_def
typ_heap_simps' endpoint_state_defs)
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def update_ep_map_tos
typ_heap_simps')
apply (erule(1) cpspace_relation_ep_update_ep2)
apply (simp add: cendpoint_relation_def endpoint_state_defs)
apply simp
apply (simp add: carch_state_relation_def cmachine_state_relation_def
h_t_valid_clift_Some_iff update_ep_map_tos
typ_heap_simps')
apply (clarsimp simp: neq_Nil_conv cendpoint_relation_def Let_def
isRecvEP_endpoint_case tcb_queue_relation'_def
typ_heap_simps' endpoint_state_defs)
apply (clarsimp simp: is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr]
tcb_at_not_NULL)
apply (drule(1) obj_at_cslift_tcb)+
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def
typ_heap_simps' tcb_at_not_NULL[OF obj_at'_weakenE, OF _ TrueI])
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def update_ep_map_tos
update_tcb_map_tos typ_heap_simps')
apply (rule conjI, erule ctcb_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_queue_ptrs_def
split: if_split)
apply (rule conjI)
apply (rule cpspace_relation_ep_update_ep, assumption+)
apply (simp add: Let_def cendpoint_relation_def EPState_Recv_def)
apply (simp add: tcb_queue_relation'_def tcb_queue_update_other)
apply (simp add: isRecvEP_def)
apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1])
apply simp
apply (rule cnotification_relation_ep_queue [OF invs_sym'], assumption+)
apply (simp add: isRecvEP_def)
apply simp
apply (erule (1) map_to_ko_atI')
apply (simp add: carch_state_relation_def typ_heap_simps'
cmachine_state_relation_def h_t_valid_clift_Some_iff
update_ep_map_tos)
apply (erule cready_queues_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split)
done
lemma tcb_NextPrev_C_update_swap:
"tcbEPPrev_C_update f (tcbEPNext_C_update g tcb)
= tcbEPNext_C_update g (tcbEPPrev_C_update f tcb)"
by simp
lemma st_tcb_at_not_in_ep_queue:
"\<lbrakk> st_tcb_at' P t s; ko_at' ep epptr s; sym_refs (state_refs_of' s);
ep \<noteq> IdleEP; \<And>ts. P ts \<Longrightarrow> tcb_st_refs_of' ts = {} \<rbrakk>
\<Longrightarrow> t \<notin> set (epQueue ep)"
apply clarsimp
apply (drule(1) sym_refs_ko_atD')
apply (cases ep, simp_all add: st_tcb_at_refs_of_rev')
apply (fastforce simp: st_tcb_at'_def obj_at'_def projectKOs)+
done
lemma st_tcb_at_not_in_ntfn_queue:
"\<lbrakk> st_tcb_at' P t s; ko_at' ntfn ntfnptr s; sym_refs (state_refs_of' s); ntfnObj ntfn = WaitingNtfn xs;
\<And>ts. P ts \<Longrightarrow> (ntfnptr, TCBSignal) \<notin> tcb_st_refs_of' ts \<rbrakk>
\<Longrightarrow> t \<notin> set xs"
apply (drule(1) sym_refs_ko_atD')
apply (clarsimp simp: st_tcb_at_refs_of_rev')
apply (drule_tac x="(t, NTFNSignal)" in bspec, simp)
apply (fastforce simp: st_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def tcb_bound_refs'_def)
done
lemma cntfn_relation_double_fun_upd:
"\<lbrakk> cnotification_relation mp ntfn ntfn'
= cnotification_relation (mp(a := b)) ntfn ntfn';
cnotification_relation (mp(a := b)) ntfn ntfn'
= cnotification_relation (mp(a := b, c := d)) ntfn ntfn' \<rbrakk>
\<Longrightarrow> cnotification_relation mp ntfn ntfn'
= cnotification_relation (mp(a := b, c := d)) ntfn ntfn'"
by simp
lemma sym_refs_upd_ko_atD':
"\<lbrakk> ko_at' ko p s; sym_refs ((state_refs_of' s) (p' := S)); p \<noteq> p' \<rbrakk>
\<Longrightarrow> \<forall>(x, tp) \<in> refs_of' (injectKO ko). (x = p' \<and> (p, symreftype tp) \<in> S)
\<or> (x \<noteq> p' \<and> ko_wp_at' (\<lambda>ko. (p, symreftype tp) \<in> refs_of' ko)x s)"
apply (clarsimp del: disjCI)
apply (drule ko_at_state_refs_ofD')
apply (drule_tac y=a and tp=b and x=p in sym_refsD[rotated])
apply simp
apply (case_tac "a = p'")
apply simp
apply simp
apply (erule state_refs_of'_elemD)
done
lemma sym_refs_upd_sD:
"\<lbrakk> sym_refs ((state_refs_of' s) (p := S)); valid_pspace' s;
ko_at' ko p s; refs_of' (injectKO koEx) = S;
objBits koEx = objBits ko \<rbrakk>
\<Longrightarrow> \<exists>s'. sym_refs (state_refs_of' s')
\<and> (\<forall>p' (ko' :: endpoint). ko_at' ko' p' s \<and> injectKO ko' \<noteq> injectKO ko
\<longrightarrow> ko_at' ko' p' s')
\<and> (\<forall>p' (ko' :: Structures_H.notification). ko_at' ko' p' s \<and> injectKO ko' \<noteq> injectKO ko
\<longrightarrow> ko_at' ko' p' s')
\<and> (ko_at' koEx p s')"
apply (rule exI, rule conjI)
apply (rule state_refs_of'_upd[where ko'="injectKO koEx" and ptr=p and s=s,
THEN ssubst[where P=sym_refs], rotated 2])
apply simp+
apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs)
apply (clarsimp simp: project_inject objBits_def)
apply (clarsimp simp: obj_at'_def ps_clear_upd projectKOs
split: if_split)
apply (clarsimp simp: project_inject objBits_def)
apply auto
done
lemma sym_refs_upd_tcb_sD:
"\<lbrakk> sym_refs ((state_refs_of' s) (p := {r \<in> state_refs_of' s p. snd r = TCBBound})); valid_pspace' s;
ko_at' (tcb :: tcb) p s \<rbrakk>
\<Longrightarrow> \<exists>s'. sym_refs (state_refs_of' s')
\<and> (\<forall>p' (ko' :: endpoint).
ko_at' ko' p' s \<longrightarrow> ko_at' ko' p' s')
\<and> (\<forall>p' (ko' :: Structures_H.notification).
ko_at' ko' p' s \<longrightarrow> ko_at' ko' p' s')
\<and> (st_tcb_at' (op = Running) p s')"
apply (drule(2) sym_refs_upd_sD[where koEx="makeObject\<lparr>tcbState := Running, tcbBoundNotification := tcbBoundNotification tcb\<rparr>"])
apply (clarsimp dest!: ko_at_state_refs_ofD')
apply (simp add: objBits_simps)
apply (erule exEI)
apply clarsimp
apply (auto simp: st_tcb_at'_def elim!: obj_at'_weakenE)
done
lemma fastpath_enqueue_ccorres:
"\<lbrakk> epptr' = ep_Ptr epptr \<rbrakk> \<Longrightarrow>
ccorres dc xfdc
(ko_at' ep epptr and (\<lambda>s. thread = ksCurThread s)
and (\<lambda>s. sym_refs ((state_refs_of' s) (thread := {r \<in> state_refs_of' s thread. snd r = TCBBound})))
and K (\<not> isSendEP ep) and valid_pspace' and cur_tcb')
UNIV hs
(setEndpoint epptr (case ep of IdleEP \<Rightarrow> RecvEP [thread] | RecvEP ts \<Rightarrow> RecvEP (ts @ [thread])))
(\<acute>ret__unsigned :== CALL endpoint_ptr_get_epQueue_tail(epptr');;
\<acute>endpointTail :== tcb_Ptr \<acute>ret__unsigned;;
IF \<acute>endpointTail = tcb_Ptr 0 THEN
(Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t \<acute>ksCurThread\<rbrace>
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\<rightarrow>[''tcbEPPrev_C''])) (\<lambda>_. NULL)));;
(Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t \<acute>ksCurThread\<rbrace>
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\<rightarrow>[''tcbEPNext_C''])) (\<lambda>_. NULL)));;
(CALL endpoint_ptr_set_epQueue_head_np(epptr',ucast (ptr_val \<acute>ksCurThread)));;
(CALL endpoint_ptr_mset_epQueue_tail_state(epptr',ucast (ptr_val \<acute>ksCurThread),
scast EPState_Recv))
ELSE
Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t \<acute>endpointTail\<rbrace>
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &((endpointTail_' s)\<rightarrow>[''tcbEPNext_C'']))
(ksCurThread_' o globals));;
(Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t \<acute>ksCurThread\<rbrace>
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\<rightarrow>[''tcbEPPrev_C'']))
endpointTail_'));;
(Guard C_Guard \<lbrace>hrs_htd \<acute>t_hrs \<Turnstile>\<^sub>t \<acute>ksCurThread\<rbrace>
(ptr_basic_update (\<lambda>s. tcb_Ptr_Ptr &((ksCurThread_' (globals s))\<rightarrow>[''tcbEPNext_C'']))
(\<lambda>_. NULL)));;
(CALL endpoint_ptr_mset_epQueue_tail_state(epptr',ucast (ptr_val \<acute>ksCurThread),
scast EPState_Recv))
FI)"
unfolding setEndpoint_def
apply clarsimp
apply (rule setObject_ccorres_helper[rotated])
apply simp
apply (simp add: objBits_simps)
apply (rule conseqPre, vcg)
apply clarsimp
apply (drule(1) ko_at_obj_congD')
apply (frule ko_at_valid_ep', clarsimp)
apply (rule cmap_relationE1[OF cmap_relation_ep], assumption,
erule ko_at_projectKO_opt)
apply (simp add: cur_tcb'_def)
apply (drule(1) obj_at_cslift_tcb)
apply (clarsimp simp: typ_heap_simps' valid_ep'_def rf_sr_ksCurThread)
apply (cases ep,
simp_all add: isSendEP_def cendpoint_relation_def Let_def
tcb_queue_relation'_def)
apply (rename_tac list)
apply (clarsimp simp: NULL_ptr_val[symmetric] tcb_queue_relation_last_not_NULL
ct_in_state'_def
dest!: trans [OF sym [OF ptr_val_def] arg_cong[where f=ptr_val]])
apply (frule obj_at_cslift_tcb[rotated], erule(1) bspec[OF _ last_in_set])
apply clarsimp
apply (drule(2) sym_refs_upd_tcb_sD)
apply clarsimp
apply (frule st_tcb_at_not_in_ep_queue,
fastforce, simp+)
apply (subgoal_tac "ksCurThread \<sigma> \<noteq> last list")
prefer 2
apply clarsimp
apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def
is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr])
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def update_ep_map_tos
typ_heap_simps')
apply (rule conjI, erule ctcb_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_queue_ptrs_def
split: if_split)
apply (rule conjI)
apply (rule_tac S="tcb_ptr_to_ctcb_ptr ` set (ksCurThread \<sigma> # list)"
in cpspace_relation_ep_update_an_ep,
assumption+)
apply (simp add: cendpoint_relation_def Let_def EPState_Recv_def
tcb_queue_relation'_def)
apply (drule_tac qend="tcb_ptr_to_ctcb_ptr (last list)"
and qend'="tcb_ptr_to_ctcb_ptr (ksCurThread \<sigma>)"
and tn_update="tcbEPNext_C_update"
and tp_update="tcbEPPrev_C_update"
in tcb_queue_relation_append,
clarsimp+, simp_all)[1]
apply (rule sym, erule init_append_last)
apply (fastforce simp: tcb_at_not_NULL)
apply (clarsimp simp add: tcb_at_not_NULL[OF obj_at'_weakenE[OF _ TrueI]])
apply clarsimp+
apply (subst st_tcb_at_not_in_ep_queue, assumption, blast, clarsimp+)
apply (drule(1) ep_ep_disjoint[rotated -1, where epptr=epptr],
blast, blast,
simp_all add: Int_commute endpoint_not_idle_cases image_image)[1]
apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1])
apply simp
apply (rule cntfn_relation_double_fun_upd)
apply (rule cnotification_relation_ep_queue, assumption+)
apply fastforce
apply (simp add: isRecvEP_def)
apply simp
apply (fastforce dest!: map_to_ko_atI)
apply (rule cnotification_relation_q_cong)
apply (clarsimp split: if_split)
apply (clarsimp simp: restrict_map_def ntfn_q_refs_of'_def
split: if_split Structures_H.notification.split_asm Structures_H.ntfn.split_asm)
apply (erule notE[rotated], erule_tac ntfnptr=p and ntfn=a in st_tcb_at_not_in_ntfn_queue,
auto dest!: map_to_ko_atI)[1]
apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos
cmachine_state_relation_def h_t_valid_clift_Some_iff)
apply (erule cready_queues_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split)
apply (clarsimp simp: typ_heap_simps' EPState_Recv_def mask_def
is_aligned_weaken[OF is_aligned_tcb_ptr_to_ctcb_ptr])
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def)
apply (drule(2) sym_refs_upd_tcb_sD)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def update_ep_map_tos
typ_heap_simps' ct_in_state'_def)
apply (rule conjI, erule ctcb_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_queue_ptrs_def
split: if_split)
apply (rule conjI)
apply (rule_tac S="{tcb_ptr_to_ctcb_ptr (ksCurThread \<sigma>)}"
in cpspace_relation_ep_update_an_ep, assumption+)
apply (simp add: cendpoint_relation_def Let_def EPState_Recv_def
tcb_queue_relation'_def)
apply clarsimp+
apply (erule notE[rotated], erule st_tcb_at_not_in_ep_queue,
auto)[1]
apply (erule iffD1 [OF cmap_relation_cong, OF refl refl, rotated -1])
apply simp
apply (rule cnotification_relation_q_cong)
apply (clarsimp split: if_split)
apply (clarsimp simp: restrict_map_def ntfn_q_refs_of'_def
split: if_split Structures_H.notification.split_asm Structures_H.ntfn.split_asm)
apply (erule notE[rotated], rule_tac ntfnptr=p and ntfn=a in st_tcb_at_not_in_ntfn_queue,
assumption+, auto dest!: map_to_ko_atI)[1]
apply (simp add: carch_state_relation_def typ_heap_simps' update_ep_map_tos
cmachine_state_relation_def h_t_valid_clift_Some_iff)
apply (erule cready_queues_relation_null_queue_ptrs)
apply (rule ext, simp add: tcb_null_ep_ptrs_def split: if_split)
done
lemma ccorres_updateCap [corres]:
fixes ptr :: "cstate \<Rightarrow> cte_C ptr" and val :: "cstate \<Rightarrow> cap_C"
shows "ccorres dc xfdc \<top>
({s. ccap_relation cap (val s)} \<inter> {s. ptr s = Ptr dest}) hs
(updateCap dest cap)
(Basic
(\<lambda>s. globals_update
(t_hrs_'_update
(hrs_mem_update (heap_update (Ptr &(ptr s\<rightarrow>[''cap_C''])) (val s)))) s))"
unfolding updateCap_def
apply (cinitlift ptr)
apply (erule ssubst)
apply (rule ccorres_guard_imp2)
apply (rule ccorres_pre_getCTE)
apply (rule_tac P = "\<lambda>s. ctes_of s dest = Some rva" in ccorres_from_vcg [where P' = "{s. ccap_relation cap (val s)}"])
apply (rule allI)
apply (rule conseqPre)
apply vcg
apply clarsimp
apply (rule fst_setCTE [OF ctes_of_cte_at], assumption)
apply (erule bexI [rotated])
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (frule (1) rf_sr_ctes_of_clift)
apply (clarsimp simp add: rf_sr_def cstate_relation_def typ_heap_simps
Let_def cpspace_relation_def)
apply (rule conjI)
apply (erule (3) cpspace_cte_relation_upd_capI)
apply (erule_tac t = s' in ssubst)
apply (simp add: heap_to_user_data_def)
apply (rule conjI)
apply (erule (1) setCTE_tcb_case)
apply (simp add: carch_state_relation_def cmachine_state_relation_def
cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]
typ_heap_simps h_t_valid_clift_Some_iff)
apply clarsimp
done
lemma setCTE_rf_sr:
"\<lbrakk> (\<sigma>, s) \<in> rf_sr; ctes_of \<sigma> ptr = Some cte'';
t_hrs_' (globals s') = hrs_mem_update
(heap_update (cte_Ptr ptr) cte')
(t_hrs_' (globals s));
ccte_relation cte cte';
(globals s')\<lparr> t_hrs_' := undefined \<rparr>
= (globals s)\<lparr> t_hrs_' := undefined \<rparr> \<rbrakk>
\<Longrightarrow>
\<exists>x\<in>fst (setCTE ptr cte \<sigma>).
(snd x, s') \<in> rf_sr"
apply (rule fst_setCTE[OF ctes_of_cte_at], assumption)
apply (erule rev_bexI)
apply clarsimp
apply (frule(1) rf_sr_ctes_of_clift)
apply (subgoal_tac "\<exists>hrs. globals s' = globals s
\<lparr> t_hrs_' := hrs \<rparr>")
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def
typ_heap_simps' cpspace_relation_def)
apply (rule conjI)
apply (erule(2) cmap_relation_updI, simp)
apply (erule_tac t = s'a in ssubst)
apply (simp add: heap_to_user_data_def)
apply (rule conjI)
apply (erule(1) setCTE_tcb_case)
apply (simp add: carch_state_relation_def cmachine_state_relation_def
cvariable_array_map_const_add_map_option[where f="tcb_no_ctes_proj"]
typ_heap_simps' h_t_valid_clift_Some_iff)
apply (cases "globals s", cases "globals s'")
apply simp
done
lemma getCTE_setCTE_rf_sr:
"\<lbrakk> (\<sigma>, s) \<in> rf_sr; ctes_of \<sigma> ptr = Some cte;
t_hrs_' (globals s') = hrs_mem_update
(heap_update (cte_Ptr ptr) cte')
(t_hrs_' (globals s));
ccte_relation (f cte) cte';
(globals s')\<lparr> t_hrs_' := undefined \<rparr>
= (globals s)\<lparr> t_hrs_' := undefined \<rparr> \<rbrakk>
\<Longrightarrow>
\<exists>x\<in>fst ((do cte \<leftarrow> getCTE ptr;
setCTE ptr (f cte)
od)
\<sigma>).
(snd x, s') \<in> rf_sr"
apply (drule setCTE_rf_sr, assumption+)
apply (clarsimp simp: Bex_def in_bind_split in_getCTE2 cte_wp_at_ctes_of)
done
lemma ccte_relation_eq_ccap_relation:
notes option.case_cong_weak [cong]
shows
"ccte_relation cte ccte
= (ccap_relation (cteCap cte) (cte_C.cap_C ccte)
\<and> mdb_node_to_H (mdb_node_lift (cteMDBNode_C ccte))
= (cteMDBNode cte))"
apply (simp add: ccte_relation_def map_option_Some_eq2 cte_lift_def
ccap_relation_def)
apply (simp add: cte_to_H_def split: option.split)
apply (cases cte, clarsimp simp: c_valid_cte_def conj_comms)
done
lemma cap_reply_cap_ptr_new_np_updateCap_ccorres:
"ccorres dc xfdc
(cte_at' ptr and tcb_at' thread)
(UNIV \<inter> {s. cap_ptr_' s = cap_Ptr &(cte_Ptr ptr \<rightarrow> [''cap_C''])}
\<inter> {s. capTCBPtr___unsigned_long_' s = ptr_val (tcb_ptr_to_ctcb_ptr thread)}
\<inter> {s. capReplyMaster___unsigned_long_' s = from_bool m}) []
(updateCap ptr (ReplyCap thread m))
(Call cap_reply_cap_ptr_new_np_'proc)"
apply (rule ccorres_from_vcg, rule allI)
apply (rule conseqPre, vcg)
apply (clarsimp simp: cte_wp_at_ctes_of word_sle_def)
apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+)
apply (clarsimp simp: updateCap_def split_def typ_heap_simps'
word_sless_def word_sle_def)
apply (erule(1) getCTE_setCTE_rf_sr, simp_all add: typ_heap_simps')
apply (clarsimp simp: ccte_relation_eq_ccap_relation
ccap_relation_def c_valid_cap_def)
apply (frule is_aligned_tcb_ptr_to_ctcb_ptr)
apply (rule ssubst[OF cap_lift_reply_cap])
apply (simp add: cap_get_tag_def cap_reply_cap_def
mask_def word_ao_dist
limited_and_simps
limited_and_simps1[OF lshift_limited_and, OF limited_and_from_bool])
apply (simp add: cap_to_H_simps word_ao_dist cl_valid_cap_def
limited_and_simps cap_reply_cap_def
limited_and_simps1[OF lshift_limited_and, OF limited_and_from_bool]
shiftr_over_or_dist word_bw_assocs)
done
lemma fastpath_copy_mrs_ccorres:
notes nat_min_simps [simp del]
shows
"ccorres dc xfdc (\<top> and (\<lambda>_. len <= length ARM_H.msgRegisters))
(UNIV \<inter> {s. unat (length___unsigned_long_' s) = len}
\<inter> {s. src_' s = tcb_ptr_to_ctcb_ptr src}
\<inter> {s. dest_' s = tcb_ptr_to_ctcb_ptr dest}) []
(forM_x (take len ARM_H.msgRegisters)
(\<lambda>r. do v \<leftarrow> asUser src (getRegister r);
asUser dest (setRegister r v) od))
(Call fastpath_copy_mrs_'proc)"
apply (rule ccorres_gen_asm)
apply (cinit' lift: length___unsigned_long_' src_' dest_' simp: word_sle_def word_sless_def)
apply (unfold whileAnno_def)
apply (rule ccorres_rel_imp)
apply (rule_tac F="K \<top>" in ccorres_mapM_x_while)
apply clarsimp
apply (rule ccorres_guard_imp2)
apply (rule ccorres_rhs_assoc)+
apply (rule_tac xf'="i_'" in ccorres_abstract, ceqv)
apply (rule ccorres_Guard_Seq)+
apply csymbr
apply (ctac(no_vcg))
apply ctac
apply wp
apply (clarsimp simp: rf_sr_ksCurThread)
apply (simp add: msgRegisters_ccorres[symmetric] length_msgRegisters)
apply (simp add: n_msgRegisters_def msgRegisters_unfold)
apply (drule(1) order_less_le_trans)
apply (clarsimp simp: "StrictC'_register_defs" msgRegistersC_def fupdate_def
| drule nat_less_cases' | erule disjE)+
apply (simp add: min.absorb2)
apply (rule allI, rule conseqPre, vcg)
apply (simp)
apply (simp add: length_msgRegisters n_msgRegisters_def
word_bits_def hoare_TrueI)+
done
lemma switchToThread_ksCurThread:
"\<lbrace>\<lambda>s. P t\<rbrace> switchToThread t \<lbrace>\<lambda>rv s. P (ksCurThread s)\<rbrace>"
apply (simp add: switchToThread_def setCurThread_def)
apply (wp | simp)+
done
lemma updateCap_cte_wp_at_cteMDBNode:
"\<lbrace>cte_wp_at' (\<lambda>cte. P (cteMDBNode cte)) p\<rbrace>
updateCap ptr cap
\<lbrace>\<lambda>rv. cte_wp_at' (\<lambda>cte. P (cteMDBNode cte)) p\<rbrace>"
apply (wp updateCap_cte_wp_at_cases)
apply (simp add: o_def)
done
lemma ctes_of_Some_cte_wp_at:
"ctes_of s p = Some cte \<Longrightarrow> cte_wp_at' P p s = P cte"
by (clarsimp simp: cte_wp_at_ctes_of)
lemma user_getreg_wp:
"\<lbrace>\<lambda>s. tcb_at' t s \<and> (\<forall>rv. obj_at' (\<lambda>tcb. (atcbContextGet o tcbArch) tcb r = rv) t s \<longrightarrow> Q rv s)\<rbrace>
asUser t (getRegister r) \<lbrace>Q\<rbrace>"
apply (rule_tac Q="\<lambda>rv s. \<exists>rv'. rv' = rv \<and> Q rv' s" in hoare_post_imp)
apply simp
apply (rule hoare_pre, wp hoare_vcg_ex_lift user_getreg_rv)
apply (clarsimp simp: obj_at'_def)
done
lemma cap_page_directory_cap_get_capPDBasePtr_spec2:
"\<forall>s. \<Gamma>\<turnstile> \<lbrace>s. True\<rbrace>
Call cap_page_directory_cap_get_capPDBasePtr_'proc
\<lbrace>cap_get_tag \<^bsup>s\<^esup>cap = scast cap_page_directory_cap
\<longrightarrow> \<acute>ret__unsigned = capPDBasePtr_CL (cap_page_directory_cap_lift \<^bsup>s\<^esup>cap)\<rbrace>"
apply (hoare_rule HoarePartial.ProcNoRec1)
apply vcg
apply (clarsimp simp: word_sle_def word_sless_def
cap_page_directory_cap_lift_def
cap_lift_page_directory_cap)
done
lemma ccorres_flip_Guard2:
assumes cc: "ccorres_underlying sr \<Gamma> r xf arrel axf A C hs a (Guard F S (Guard F1 S1 c) ;; d)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf A C hs a (Guard F1 S1 (Guard F S c) ;; d)"
apply (rule ccorres_name_pre_C)
using cc
apply (case_tac "s \<in> (S1 \<inter> S)")
apply (clarsimp simp: ccorres_underlying_def)
apply (erule exec_handlers.cases;
fastforce elim!: exec_Normal_elim_cases intro: exec_handlers.intros exec.Guard exec.Seq)
apply (clarsimp simp: ccorres_underlying_def)
apply (case_tac "s \<in> S")
apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros exec.Seq)
apply (fastforce intro: exec.Guard exec.GuardFault exec_handlers.intros exec.Seq)
done
lemma ccorres_abstract_ksCurThread:
assumes ceqv: "\<And>rv' t t'. ceqv \<Gamma> (\<lambda>s. ksCurThread_' (globals s)) rv' t t' d (d' rv')"
and cc: "\<And>ct. ccorres_underlying rf_sr \<Gamma> r xf arrel axf (G ct) (G' ct) hs a (d' (tcb_ptr_to_ctcb_ptr ct))"
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf (\<lambda>s. G (ksCurThread s) s)
{s. s \<in> G' (ctcb_ptr_to_tcb_ptr (ksCurThread_' (globals s)))} hs a d"
apply (rule ccorres_guard_imp)
prefer 2
apply assumption
apply (rule ccorres_abstract[OF ceqv, where G'="\<lambda>ct. \<lbrace>ct = \<acute>ksCurThread\<rbrace> \<inter> G' (ctcb_ptr_to_tcb_ptr ct)"])
apply (subgoal_tac "\<exists>t. rv' = tcb_ptr_to_ctcb_ptr t")
apply clarsimp
apply (rule ccorres_guard_imp2)
apply (rule cc)
apply (clarsimp simp: rf_sr_ksCurThread)
apply (metis tcb_ptr_to_tcb_ptr)
apply simp
done
lemmas cte_C_numeral_fold = cte_C_size[THEN meta_eq_to_obj_eq,
THEN arg_cong[where f="of_nat :: _ \<Rightarrow> word32"], simplified, symmetric]
lemmas ccorres_move_c_guard_tcb_ctes2
= ccorres_move_c_guard_tcb_ctes[unfolded cte_C_numeral_fold]
lemma setUntypedCapAsFull_replyCap[simp]:
"setUntypedCapAsFull cap (ReplyCap curThread False) slot = return ()"
by (clarsimp simp:setUntypedCapAsFull_def isCap_simps)
end
context kernel_m begin
lemma obj_at_bound_tcb_grandD:
"\<lbrakk> obj_at' P t s; valid_objs' s; no_0_obj' s; (s, s') \<in> rf_sr \<rbrakk>
\<Longrightarrow> \<exists>tcb tcb' ntfn ntfn'. ko_at' tcb t s \<and> P tcb
\<and> cslift s' (tcb_ptr_to_ctcb_ptr t) = Some tcb'
\<and> ctcb_relation tcb tcb'
\<and> ((tcbBoundNotification_C tcb' = NULL) = (tcbBoundNotification tcb = None))
\<and> (tcbBoundNotification tcb \<noteq> None \<longrightarrow> ko_at' ntfn (the (tcbBoundNotification tcb)) s)
\<and> (tcbBoundNotification tcb \<noteq> None \<longrightarrow> cslift s' (tcbBoundNotification_C tcb') = Some ntfn')
\<and> (tcbBoundNotification tcb \<noteq> None \<longrightarrow> cnotification_relation (cslift s') ntfn ntfn')"
apply (clarsimp simp: pred_tcb_at'_def)
apply (drule(1) obj_at_cslift_tcb, clarsimp)
apply (rule exI, rule conjI, assumption)
apply (clarsimp simp: ctcb_relation_def
option_to_ptr_def option_to_0_def)
apply (simp add: return_def split: option.split_asm)
apply (drule_tac s="ntfn_Ptr x"for x in sym)
apply (drule(1) ko_at_valid_objs', clarsimp simp: projectKOs)
apply (clarsimp simp: projectKOs valid_obj'_def valid_tcb'_def)
apply (drule obj_at_ko_at', clarsimp)
apply (rule conjI, clarsimp)
apply (rule cmap_relationE1[OF cmap_relation_ntfn], assumption, erule ko_at_projectKO_opt)
apply auto
done
lemma cnotification_relation_isActive:
"cnotification_relation tcbs ntfn ntfn'
\<Longrightarrow> (notification_CL.state_CL (notification_lift ntfn') = scast NtfnState_Active)
= EndpointDecls_H.isActive ntfn"
apply (clarsimp simp: cnotification_relation_def Let_def)
apply (cases ntfn, simp)
apply (rename_tac ntfna ooeuoue)
apply (case_tac ntfna, simp_all add: notification_state_defs isActive_def)
done
lemma option_case_liftM_getNotification_wp:
"\<lbrace>\<lambda>s. \<forall>rv. (case x of None \<Rightarrow> rv = v | Some p \<Rightarrow> obj_at' (\<lambda>ntfn. f ntfn = rv) p s)
\<longrightarrow> Q rv s\<rbrace> case x of None \<Rightarrow> return v | Some ptr \<Rightarrow> liftM f $ getNotification ptr \<lbrace> Q \<rbrace>"
apply (rule hoare_pre, (wpc; wp getNotification_wp))
apply (auto simp: obj_at'_def)
done
lemma threadSet_st_tcb_at_state:
"\<lbrace>\<lambda>s. tcb_at' t s \<longrightarrow> (if p = t
then obj_at' (\<lambda>tcb. P (tcbState (f tcb))) t s
else st_tcb_at' P p s)\<rbrace>
threadSet f t \<lbrace>\<lambda>_. st_tcb_at' P p\<rbrace>"
apply (rule hoare_chain)
apply (rule threadSet_obj_at'_really_strongest)
prefer 2
apply (simp add: st_tcb_at'_def)
apply (clarsimp split: if_splits simp: st_tcb_at'_def o_def)
done
lemma fastpath_call_ccorres:
notes hoare_TrueI[simp]
shows "ccorres dc xfdc
(\<lambda>s. invs' s \<and> ct_in_state' (op = Running) s
\<and> obj_at' (\<lambda>tcb. (atcbContextGet o tcbArch) tcb ARM_H.capRegister = cptr
\<and> (atcbContextGet o tcbArch) tcb ARM_H.msgInfoRegister = msginfo)
(ksCurThread s) s)
(UNIV \<inter> {s. cptr_' s = cptr} \<inter> {s. msgInfo_' s = msginfo}) []
(fastpaths SysCall) (Call fastpath_call_'proc)"
proof -
have [simp]: "scast Kernel_C.tcbCaller = tcbCallerSlot"
by (simp add:Kernel_C.tcbCaller_def tcbCallerSlot_def)
have [simp]: "scast Kernel_C.tcbVTable = tcbVTableSlot"
by (simp add:Kernel_C.tcbVTable_def tcbVTableSlot_def)
have tcbs_of_cte_wp_at_vtable:
"\<And>s tcb ptr. tcbs_of s ptr = Some tcb \<Longrightarrow>
cte_wp_at' \<top> (ptr + 0x10 * tcbVTableSlot) s"
apply (clarsimp simp:tcbs_of_def cte_at'_obj_at'
split:if_splits)
apply (drule_tac x = "0x10 * tcbVTableSlot" in bspec)
apply (simp add:tcb_cte_cases_def tcbVTableSlot_def)
apply simp
done
have tcbs_of_cte_wp_at_caller:
"\<And>s tcb ptr. tcbs_of s ptr = Some tcb \<Longrightarrow>
cte_wp_at' \<top> (ptr + 0x10 * tcbCallerSlot) s"
apply (clarsimp simp:tcbs_of_def cte_at'_obj_at'
split:if_splits)
apply (drule_tac x = "0x10 * tcbCallerSlot" in bspec)
apply (simp add:tcb_cte_cases_def tcbCallerSlot_def)
apply simp
done
have tcbs_of_aligned':
"\<And>s ptr tcb. \<lbrakk>tcbs_of s ptr = Some tcb;pspace_aligned' s\<rbrakk> \<Longrightarrow> is_aligned ptr 9"
apply (clarsimp simp:tcbs_of_def obj_at'_def split:if_splits)
apply (drule pspace_alignedD')
apply simp+
apply (simp add:projectKO_opt_tcb objBitsKO_def
split: Structures_H.kernel_object.splits)
done
show ?thesis
apply (cinit lift: cptr_' msgInfo_')
apply (simp add: catch_liftE_bindE unlessE_throw_catch_If
unifyFailure_catch_If catch_liftE
getMessageInfo_def alternative_bind
cong: if_cong call_ignore_cong del: Collect_const)
apply (rule ccorres_pre_getCurThread)
apply (rename_tac curThread)
apply (rule ccorres_symb_exec_l3[OF _ user_getreg_inv' _ empty_fail_user_getreg])+
apply (rename_tac msginfo' cptr')
apply (rule_tac P="msginfo' = msginfo \<and> cptr' = cptr" in ccorres_gen_asm)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (simp only: )
apply (csymbr, csymbr)
apply (rule_tac r'="\<lambda>ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)"
and xf'="fault_type_'" in ccorres_split_nothrow)
apply (rule_tac P="cur_tcb' and (\<lambda>s. curThread = ksCurThread s)"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread)
apply (drule(1) obj_at_cslift_tcb, clarsimp)
apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def)
apply (rule rev_bexI, erule threadGet_eq)
apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm)
apply ceqv
apply csymbr
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_alternative2)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres)
apply simp
apply simp
apply (vcg exspec=slowpath_noreturn_spec)
apply (rule ccorres_alternative1)
apply (rule ccorres_if_lhs[rotated])
apply (rule ccorres_inst[where P=\<top> and P'=UNIV])
apply simp
apply (simp del: Collect_const cong: call_ignore_cong)
apply (elim conjE)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (simp add: getThreadCSpaceRoot_def locateSlot_conv
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_pre_getCTE2)
apply (rule ccorres_move_array_assertion_tcb_ctes
ccorres_move_c_guard_tcb_ctes2
ccorres_move_const_guard
ccorres_rhs_assoc)+
apply (simp only: )
apply (ctac add: lookup_fp_ccorres)
apply (rename_tac luRet ep_cap)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rule ccorres_move_array_assertion_tcb_ctes
| simp del: Collect_const cong: call_ignore_cong)+
apply (csymbr, csymbr)
apply (simp add: ccap_relation_case_sum_Null_endpoint
of_bl_from_bool from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: from_bool_0 if_1_0_0 cong: if_cong)
apply (rule ccorres_cond_true_seq)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (rule ccorres_rhs_assoc)+
apply csymbr+
apply (simp add: if_1_0_0 isRight_case_sum
del: Collect_const cong: call_ignore_cong)
apply (elim conjE)
apply (frule(1) cap_get_tag_isCap[THEN iffD2])
apply (simp add: ccap_relation_ep_helpers from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (csymbr, csymbr)
apply (simp add: ccap_relation_ep_helpers
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2)
apply (rule_tac xf'="\<lambda>s. (dest_' s, ret__unsigned_' s)"
and r'="\<lambda>ep v. snd v = scast EPState_Recv = isRecvEP ep
\<and> (isRecvEP ep \<longrightarrow> epQueue ep \<noteq> []
\<and> fst v = tcb_ptr_to_ctcb_ptr (hd (epQueue ep)))"
in ccorres_split_nothrow)
apply (rule ccorres_add_return2)
apply (rule ccorres_pre_getEndpoint, rename_tac ep)
apply (rule_tac P="ko_at' ep (capEPPtr (theRight luRet)) and valid_objs'"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: return_def)
apply (erule cmap_relationE1[OF cmap_relation_ep], erule ko_at_projectKO_opt)
apply (frule(1) ko_at_valid_ep')
apply (clarsimp simp: typ_heap_simps')
apply (simp add: cendpoint_relation_def Let_def isRecvEP_def
endpoint_state_defs valid_ep'_def
split: endpoint.split_asm)
apply (clarsimp simp: tcb_queue_relation'_def neq_Nil_conv)
apply (rule ceqv_tuple2)
apply ceqv
apply ceqv
apply (rename_tac send_ep send_ep_c)
apply (rule_tac P="ko_at' send_ep (capEPPtr (theRight luRet))
and valid_objs'" in ccorres_cross_over_guard)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: getThreadVSpaceRoot_def locateSlot_conv
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_c_guard_tcb_ctes2
ccorres_move_array_assertion_tcb_ctes
ccorres_move_const_guard)+
apply (rule_tac var="newVTable_'" and var_update="newVTable_'_update"
in getCTE_h_val_ccorres_split[where P=\<top>])
apply simp
apply ceqv
apply (rename_tac pd_cap pd_cap_c)
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'=ret__unsigned_' in ccorres_abstract, ceqv)
apply (rename_tac pd_cap_c_ptr_maybe)
apply csymbr+
apply (simp add: isValidVTableRoot_conv from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (drule isValidVTableRootD)
apply (rule_tac P="pd_cap_c_ptr_maybe = capUntypedPtr (cteCap pd_cap)"
in ccorres_gen_asm2)
apply (simp add: ccap_relation_pd_helper ptr_add_assertion_positive
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_array_assertion_pd
| (rule ccorres_flip_Guard ccorres_flip_Guard2,
rule ccorres_move_array_assertion_pd)
| rule ccorres_flip_Guard2, rule ccorres_Guard_True_Seq)+
apply (rule stored_hw_asid_get_ccorres_split[where P=\<top>], ceqv)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rename_tac ksCurThread_x)
apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rename_tac curPrio destPrio)
apply (rule ccorres_seq_cond_raise[THEN iffD2])
apply (rule_tac R="obj_at' (op = curPrio \<circ> tcbPriority) curThread
and obj_at' (op = destPrio \<circ> tcbPriority)
(hd (epQueue send_ep))
and (\<lambda>s. ksCurThread s = curThread)
and (\<lambda>s. ksCurThread s = ksCurThread_x)"
in ccorres_cond2')
apply clarsimp
apply (drule(1) obj_at_cslift_tcb)+
apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurThread)
apply (simp add: ctcb_relation_unat_tcbPriority_C
word_less_nat_alt linorder_not_le)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply csymbr+
apply (simp add: if_1_0_0 ccap_relation_ep_helpers from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: ccap_relation_pd_helper cap_get_tag_isCap_ArchObject2
del: Collect_const Word_Lib.ptr_add_def cong: call_ignore_cong)
apply csymbr
apply (rule ccorres_symb_exec_l3[OF _ gets_inv _ empty_fail_gets])
apply (rename_tac asidMap)
apply (rule_tac P="asid_map_pd_to_hwasids asidMap (capPDBasePtr (capCap ((cteCap pd_cap))))
= set_option (pde_stored_asid shw_asid)" in ccorres_gen_asm)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def
to_bool_def
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+
apply (rule ccorres_symb_exec_l3[OF _ curDomain_inv _])
prefer 3
apply (simp only: curDomain_def, rule empty_fail_gets)
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rename_tac curDom destDom)
apply (rule ccorres_seq_cond_raise[THEN iffD2])
apply (rule_tac R="obj_at' (op = destDom \<circ> tcbDomain)
(hd (epQueue send_ep))
and (\<lambda>s. ksCurDomain s = curDom)"
in ccorres_cond2')
apply clarsimp
apply (drule(1) obj_at_cslift_tcb)+
apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurDomain)
apply (drule ctcb_relation_tcbDomain[symmetric])
apply (clarsimp simp: up_ucast_inj_eq[symmetric] maxDom_def)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_rhs_assoc2)
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (simp only: ucast_id tl_drop_1 One_nat_def)
apply (rule fastpath_dequeue_ccorres)
apply simp
apply ceqv
apply csymbr
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (rule_tac P="cur_tcb' and (\<lambda>s. ksCurThread s = curThread)"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread)
apply (drule(1) obj_at_cslift_tcb)
apply (clarsimp simp: typ_heap_simps')
apply (rule rev_bexI, erule threadSet_eq)
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def typ_heap_simps'
update_tcb_map_tos map_to_tcbs_upd)
apply (subst map_to_ctes_upd_tcb_no_ctes, assumption)
apply (rule ball_tcb_cte_casesI, simp_all)[1]
apply (simp add: cep_relations_drop_fun_upd)
apply (erule cmap_relation_updI, erule ko_at_projectKO_opt)
apply (simp add: ctcb_relation_def cthread_state_relation_def)
apply simp
apply (rule conjI, erule cready_queues_relation_not_queue_ptrs)
apply (rule ext, simp split: if_split add: typ_heap_simps')
apply (rule ext, simp split: if_split add: typ_heap_simps')
apply (simp add: carch_state_relation_def cmachine_state_relation_def
typ_heap_simps' map_comp_update projectKO_opt_tcb
cvariable_relation_upd_const ko_at_projectKO_opt)
apply ceqv
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rule ccorres_move_c_guard_tcb_ctes
ccorres_move_array_assertion_tcb_ctes
ccorres_move_const_guard)+
apply (simp add: getThreadReplySlot_def getThreadCallerSlot_def
locateSlot_conv
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'="replySlot_'" in ccorres_abstract, ceqv)
apply (rename_tac replySlot,
rule_tac P="replySlot = cte_Ptr (curThread
+ (tcbReplySlot << cte_level_bits))"
in ccorres_gen_asm2)
apply (rule ccorres_move_const_guard
ccorres_move_array_assertion_tcb_ctes
ccorres_move_c_guard_tcb_ctes)+
apply csymbr
apply (simp add: cteInsert_def bind_assoc dc_def[symmetric]
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_pre_getCTE2 ccorres_assert2)+
apply (rename_tac curThreadReplyCTE curThreadReplyCTE2
destCallerCTE)
apply (rule_tac P="curThreadReplyCTE2 = curThreadReplyCTE"
in ccorres_gen_asm)
apply (rule ccorres_move_c_guard_tcb_ctes2)
apply (ctac add: cap_reply_cap_ptr_new_np_updateCap_ccorres)
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (rule_tac P="cte_wp_at' (\<lambda>cte. cteMDBNode cte = nullMDBNode)
(hd (epQueue send_ep)
+ (tcbCallerSlot << cte_level_bits))
and cte_wp_at' (op = curThreadReplyCTE) (curThread
+ (tcbReplySlot << cte_level_bits))
and tcb_at' curThread and (no_0 o ctes_of)
and tcb_at' (hd (epQueue send_ep))"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cte_wp_at_ctes_of size_of_def
tcb_cnode_index_defs tcbCallerSlot_def
tcbReplySlot_def cte_level_bits_def
valid_mdb'_def valid_mdb_ctes_def)
apply (subst aligned_add_aligned, erule tcb_aligned',
simp add: is_aligned_def, simp add: word_bits_def, simp)
apply (rule_tac x="hd (epQueue send_ep) + v" for v
in cmap_relationE1[OF cmap_relation_cte], assumption+)
apply (clarsimp simp: typ_heap_simps' updateMDB_def Let_def)
apply (subst if_not_P)
apply clarsimp
apply (simp add: split_def)
apply (rule getCTE_setCTE_rf_sr, simp_all)[1]
apply (case_tac destCallerCTE, case_tac curThreadReplyCTE,
case_tac "cteMDBNode curThreadReplyCTE")
apply (clarsimp simp add: ccte_relation_eq_ccap_relation)
apply (clarsimp simp: nullMDBNode_def)
apply ceqv
apply (rule ccorres_move_c_guard_cte)
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (rule_tac P="cte_at' (hd (epQueue send_ep)
+ (tcbCallerSlot << cte_level_bits))
and cte_wp_at' (op = curThreadReplyCTE) (curThread
+ (tcbReplySlot << cte_level_bits))
and tcb_at' (hd (epQueue send_ep))
and (no_0 o ctes_of)"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cte_wp_at_ctes_of size_of_def
tcb_cnode_index_defs tcbCallerSlot_def
tcbReplySlot_def cte_level_bits_def)
apply (subst aligned_add_aligned, erule tcb_aligned',
simp add: is_aligned_def, simp add: word_bits_def, simp)
apply (rule_tac x="curThread + 0x20" in cmap_relationE1[OF cmap_relation_cte],
assumption+)
apply (clarsimp simp: typ_heap_simps' updateMDB_def Let_def)
apply (subst if_not_P)
apply clarsimp
apply (simp add: split_def)
apply (rule getCTE_setCTE_rf_sr, simp_all)[1]
apply (simp add: ccte_relation_eq_ccap_relation)
apply (case_tac curThreadReplyCTE,
case_tac "cteMDBNode curThreadReplyCTE",
simp)
apply ceqv
apply (simp add: updateMDB_def
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_split_nothrow_dc)
apply simp
apply (ctac add: fastpath_copy_mrs_ccorres[unfolded forM_x_def])
apply (rule ccorres_move_c_guard_tcb)
apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow)
apply (simp add: setThreadState_runnable_simp)
apply (rule_tac P=\<top> in threadSet_ccorres_lemma2, vcg)
apply (clarsimp simp: typ_heap_simps' rf_sr_def
cstate_relation_def Let_def)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def typ_heap_simps'
update_tcb_map_tos map_to_tcbs_upd)
apply (subst map_to_ctes_upd_tcb_no_ctes, assumption)
apply (rule ball_tcb_cte_casesI, simp_all)[1]
apply (simp add: cep_relations_drop_fun_upd)
apply (erule cmap_relation_updI, erule ko_at_projectKO_opt)
apply (simp add: ctcb_relation_def cthread_state_relation_def)
apply simp
apply (rule conjI, erule cready_queues_relation_not_queue_ptrs)
apply (rule ext, simp split: if_split)
apply (rule ext, simp split: if_split)
apply (simp add: carch_state_relation_def cmachine_state_relation_def
typ_heap_simps' map_comp_update projectKO_opt_tcb
cvariable_relation_upd_const ko_at_projectKO_opt)
apply ceqv
apply (simp only: bind_assoc[symmetric])
apply (rule ccorres_split_nothrow_novcg_dc)
apply simp
apply (rule ccorres_call,
rule_tac v=shw_asid and pd="capUntypedPtr (cteCap pd_cap)"
in switchToThread_fp_ccorres,
simp+)[1]
apply (rule_tac P="\<lambda>s. ksCurThread s = hd (epQueue send_ep)"
in ccorres_cross_over_guard)
apply csymbr
apply csymbr
apply (rule ccorres_call_hSkip)
apply (fold dc_def)[1]
apply (rule fastpath_restore_ccorres)
apply simp
apply simp
apply (simp add: setCurThread_def)
apply wp
apply (rule_tac P=\<top> in hoare_triv, simp)
apply (simp add: imp_conjL rf_sr_ksCurThread del: all_imp_to_ex)
apply (clarsimp simp: ccap_relation_ep_helpers guard_is_UNIV_def
mi_from_H_def)
apply (simp add: pd_has_hwasid_def)
apply (wp sts_ct_in_state_neq' sts_valid_objs')
apply (simp del: Collect_const)
apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies)
apply (simp add: pred_conj_def)
apply (rule mapM_x_wp'[OF hoare_weaken_pre])
apply wp
apply clarsimp
apply simp
apply (vcg exspec=fastpath_copy_mrs_modifies)
apply (simp add: valid_tcb_state'_def)
apply wp
apply (wp updateMDB_weak_cte_wp_at)
apply simp
apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies)
apply (simp add: o_def)
apply (wp | simp
| wp_once updateMDB_weak_cte_wp_at
| wp_once updateMDB_cte_wp_at_other)+
apply (vcg exspec=mdb_node_ptr_set_mdbPrev_np_modifies)
apply (wp updateCap_cte_wp_at_cteMDBNode
updateCap_cte_wp_at_cases
updateCap_no_0 | simp)+
apply (vcg exspec=cap_reply_cap_ptr_new_np_modifies)
apply (simp add: word_sle_def)
apply vcg
apply (rule conseqPre, vcg, clarsimp)
apply (simp add: cte_level_bits_def field_simps shiftl_t2n
ctes_of_Some_cte_wp_at
del: all_imp_to_ex)
apply (wp hoare_vcg_all_lift threadSet_ctes_of
hoare_vcg_imp_lift threadSet_valid_objs'
threadSet_st_tcb_at_state threadSet_cte_wp_at'
threadSet_cur
| simp add: cur_tcb'_def[symmetric])+
apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies)
apply (simp only: imp_conv_disj[symmetric])
apply simp
apply (simp add: valid_tcb'_def tcb_cte_cases_def
valid_tcb_state'_def)
apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift
set_ep_valid_objs'
setObject_no_0_obj'[where 'a=endpoint, folded setEndpoint_def])
apply (simp del: Collect_const)
apply (vcg exspec=endpoint_ptr_mset_epQueue_tail_state_modifies
exspec=endpoint_ptr_set_epQueue_head_np_modifies)
apply simp
apply (rule threadGet_wp)
apply simp
apply wp[1]
apply simp
apply wp[1]
apply simp
apply (rule threadGet_wp)
apply simp
apply (rule threadGet_wp)
apply (simp del: Collect_const)
apply (vcg exspec=cap_page_directory_cap_get_capPDBasePtr_spec2)
apply (rule conseqPre,
vcg exspec=cap_page_directory_cap_get_capPDBasePtr_spec2,
clarsimp)
apply simp
apply (rule getEndpoint_wp)
apply (simp del: Collect_const)
apply (vcg exspec=endpoint_ptr_get_epQueue_head_modifies
exspec=endpoint_ptr_get_state_modifies)
apply (simp add: if_1_0_0 getSlotCap_def)
apply (rule valid_isRight_theRight_split)
apply simp
apply (wp getCTE_wp')
apply (rule validE_R_abstract_rv)
apply wp
apply (simp add: if_1_0_0 del: Collect_const)
apply (vcg exspec=lookup_fp_modifies)
apply simp
apply (rule threadGet_wp)
apply (simp del: Collect_const)
apply vcg
apply simp
apply (rule user_getreg_wp)
apply simp
apply (rule user_getreg_wp)
apply (rule conjI)
apply (clarsimp simp: obj_at_tcbs_of ct_in_state'_def st_tcb_at_tcbs_of
invs_cur' invs_valid_objs' ctes_of_valid'
word_sle_def
tcb_ptr_to_ctcb_ptr_mask[OF tcb_at_invs'])
apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp)
apply (clarsimp simp: isCap_simps valid_cap'_def maskCapRights_def)
apply (clarsimp simp add:obj_at'_def projectKO_eq)
apply (frule invs_valid_objs')
apply (erule valid_objsE')
apply simp
apply (clarsimp simp:projectKO_opt_ep split:Structures_H.kernel_object.splits)
apply (clarsimp simp:isRecvEP_def valid_obj'_def valid_ep'_def
split:Structures_H.endpoint.split_asm)
apply (erule not_NilE)
apply (drule_tac x = x in bspec)
apply fastforce
apply (clarsimp simp:obj_at_tcbs_of)
apply (frule_tac ptr2 = x in tcbs_of_aligned')
apply (simp add:invs_pspace_aligned')
apply (frule_tac ptr2 = x in tcbs_of_cte_wp_at_vtable)
apply (clarsimp simp:size_of_def field_simps word_sless_def word_sle_def
dest!:ptr_val_tcb_ptr_mask2[unfolded mask_def, simplified])
apply (frule_tac p="x + offs" for offs in ctes_of_valid', clarsimp)
apply (clarsimp simp: isCap_simps valid_cap'_def invs_valid_pde_mappings'
dest!: isValidVTableRootD)
apply (clarsimp simp: invs_sym' tcbCallerSlot_def
tcbVTableSlot_def tcbReplySlot_def
conj_comms tcb_cnode_index_defs field_simps
obj_at_tcbs_of)
apply (clarsimp simp: cte_level_bits_def isValidVTableRoot_def
ARM_H.isValidVTableRoot_def cte_wp_at_ctes_of
capAligned_def objBits_simps)
apply (simp cong: conj_cong)
apply (frule invs_mdb', clarsimp simp: valid_mdb'_def valid_mdb_ctes_def)
apply (case_tac xb, clarsimp, drule(1) nullcapsD')
apply (clarsimp simp: pde_stored_asid_def to_bool_def
length_msgRegisters word_le_nat_alt[symmetric])
apply (frule tcb_aligned'[OF obj_at_tcbs_of[THEN iffD2], OF exI, simplified])
apply clarsimp
apply (safe del: notI)[1]
apply (rule not_sym, clarsimp)
apply (drule aligned_sub_aligned[where x="x + 0x10" and y=x for x])
apply (erule tcbs_of_aligned')
apply (simp add:invs_pspace_aligned')
apply simp
apply (simp add:is_aligned_def dvd_def)
apply (clarsimp simp:tcbs_of_def obj_at'_def projectKO_opt_tcb
split:if_splits Structures_H.kernel_object.splits)
apply (drule pspace_distinctD')
apply (simp add:invs_pspace_distinct')
apply (simp add:objBits_simps)
apply (clarsimp simp: obj_at_tcbs_of split: list.split)
apply (erule_tac x = v0 in valid_objsE'[OF invs_valid_objs',rotated])
apply (clarsimp simp: valid_obj'_def valid_ep'_def isRecvEP_def neq_Nil_conv size_of_def
split: Structures_H.endpoint.split_asm
cong: list.case_cong)
apply (simp add:obj_at_tcbs_of)
apply simp
apply (clarsimp simp: syscall_from_H_def[split_simps syscall.split]
word_sle_def word_sless_def rf_sr_ksCurThread
ptr_val_tcb_ptr_mask' size_of_def cte_level_bits_def
tcb_cnode_index_defs tcbCTableSlot_def tcbVTableSlot_def
tcbReplySlot_def tcbCallerSlot_def
simp del: Collect_const split del: if_split)
apply (drule(1) obj_at_cslift_tcb)
apply (clarsimp simp: ccte_relation_eq_ccap_relation of_bl_from_bool from_bool_0
if_1_0_0 ccap_relation_case_sum_Null_endpoint
isRight_case_sum typ_heap_simps')
apply (frule(1) cap_get_tag_isCap[THEN iffD2])
apply (clarsimp simp: typ_heap_simps' ccap_relation_ep_helpers)
apply (erule cmap_relationE1[OF cmap_relation_ep],
erule ko_at_projectKO_opt)
apply (frule(1) ko_at_valid_ep')
apply (clarsimp simp: cendpoint_relation_def Let_def
isRecvEP_endpoint_case neq_Nil_conv
tcb_queue_relation'_def valid_ep'_def
mi_from_H_def)
apply (clarsimp simp: ccap_relation_ep_helpers from_bool_0
isValidVTableRoot_conv
ptr_add_assertion_positive
pdBits_def pageBits_def
cap_get_tag_isCap_ArchObject2
ccap_relation_pd_helper)
apply (clarsimp simp: isCap_simps dest!: isValidVTableRootD)
done
qed
lemma isMasterReplyCap_fp_conv:
"ccap_relation cap cap' \<Longrightarrow>
(index (cap_C.words_C cap') 0 && 0x1F = scast cap_reply_cap)
= (isReplyCap cap \<and> \<not> capReplyMaster cap)"
apply (rule trans)
apply (rule_tac m="mask 4" in split_word_eq_on_mask)
apply (simp add: cap_get_tag_isCap[symmetric])
apply (rule conj_cong)
apply (simp add: mask_def word_bw_assocs cap_get_tag_eq_x
cap_reply_cap_def split: if_split)
apply (clarsimp simp: cap_lift_reply_cap cap_to_H_simps
isCap_simps
elim!: ccap_relationE)
apply (simp add: mask_def cap_reply_cap_def word_bw_assocs
to_bool_def)
apply (thin_tac "P" for P)
apply (rule iffI)
apply (drule_tac f="\<lambda>v. v >> 4" in arg_cong)
apply (simp add: shiftr_over_and_dist)
apply (drule_tac f="\<lambda>v. v << 4" in arg_cong)
apply (simp add: shiftl_over_and_dist shiftr_shiftl1 mask_def
word_bw_assocs)
done
lemma ccap_relation_reply_helper:
"\<lbrakk> ccap_relation cap cap'; isReplyCap cap \<rbrakk>
\<Longrightarrow> cap_reply_cap_CL.capTCBPtr_CL (cap_reply_cap_lift cap')
= ptr_val (tcb_ptr_to_ctcb_ptr (capTCBPtr cap))"
by (clarsimp simp: cap_get_tag_isCap[symmetric]
cap_lift_reply_cap cap_to_H_simps
cap_reply_cap_lift_def
elim!: ccap_relationE)
lemma valid_ep_typ_at_lift':
"\<lbrakk> \<And>p. \<lbrace>typ_at' TCBT p\<rbrace> f \<lbrace>\<lambda>rv. typ_at' TCBT p\<rbrace> \<rbrakk>
\<Longrightarrow> \<lbrace>\<lambda>s. valid_ep' ep s\<rbrace> f \<lbrace>\<lambda>rv s. valid_ep' ep s\<rbrace>"
apply (cases ep, simp_all add: valid_ep'_def)
apply (wp hoare_vcg_const_Ball_lift typ_at_lifts | assumption)+
done
lemma threadSet_tcbState_valid_objs:
"\<lbrace>valid_tcb_state' st and valid_objs'\<rbrace>
threadSet (tcbState_update (\<lambda>_. st)) t
\<lbrace>\<lambda>rv. valid_objs'\<rbrace>"
apply (wp threadSet_valid_objs')
apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def)
done
lemmas array_assertion_abs_tcb_ctes_add
= array_assertion_abs_tcb_ctes_add[where
tcb="\<lambda>s. Ptr (tcb' s)" for tcb', simplified]
lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre]
= ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where
tcb="\<lambda>s. Ptr (tcb' s)" for tcb', simplified]]
ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)]
ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add]
ccorres_move_Guard[OF array_assertion_abs_tcb_ctes_add]
lemmas ccorres_move_c_guard_tcb_ctes3
= ccorres_move_c_guards [OF c_guard_abs_tcb_ctes[where
tcb="\<lambda>s. Ptr (tcb' s)" for tcb', simplified],
unfolded cte_C_numeral_fold]
lemma fastpath_reply_cap_check_ccorres:
"ccorres (\<lambda>rv rv'. \<forall>cap. ccap_relation cap ccap
\<longrightarrow> rv' = from_bool (isReplyCap cap \<and> \<not> capReplyMaster cap)) ret__int_'
\<top> ({s. cap_' s = ccap}) []
(return ()) (Call fastpath_reply_cap_check_'proc)"
apply (rule ccorres_from_vcg)
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: extra_sle_sless_unfolds isMasterReplyCap_fp_conv
from_bool_def return_def)
apply (simp split: bool.split if_split)
done
lemma fastpath_reply_recv_ccorres:
notes hoare_TrueI[simp]
shows "ccorres dc xfdc
(\<lambda>s. invs' s \<and> ct_in_state' (op = Running) s
\<and> obj_at' (\<lambda>tcb. (atcbContextGet o tcbArch) tcb capRegister = cptr
\<and> (atcbContextGet o tcbArch) tcb msgInfoRegister = msginfo)
(ksCurThread s) s)
(UNIV \<inter> {s. cptr_' s = cptr} \<inter> {s. msgInfo_' s = msginfo}) []
(fastpaths SysReplyRecv) (Call fastpath_reply_recv_'proc)"
proof -
have [simp]: "Kernel_C.tcbCaller = scast tcbCallerSlot"
by (simp add:Kernel_C.tcbCaller_def tcbCallerSlot_def)
have [simp]: "Kernel_C.tcbVTable = scast tcbVTableSlot"
by (simp add:Kernel_C.tcbVTable_def tcbVTableSlot_def)
have tcbs_of_cte_wp_at_vtable:
"\<And>s tcb ptr. tcbs_of s ptr = Some tcb \<Longrightarrow>
cte_wp_at' \<top> (ptr + 0x10 * tcbVTableSlot) s"
apply (clarsimp simp:tcbs_of_def cte_at'_obj_at'
split:if_splits)
apply (drule_tac x = "0x10 * tcbVTableSlot" in bspec)
apply (simp add:tcb_cte_cases_def tcbVTableSlot_def)
apply simp
done
have tcbs_of_cte_wp_at_caller:
"\<And>s tcb ptr. tcbs_of s ptr = Some tcb \<Longrightarrow>
cte_wp_at' \<top> (ptr + 0x10 * tcbCallerSlot) s"
apply (clarsimp simp:tcbs_of_def cte_at'_obj_at'
split:if_splits)
apply (drule_tac x = "0x10 * tcbCallerSlot" in bspec)
apply (simp add:tcb_cte_cases_def tcbCallerSlot_def)
apply simp
done
have tcbs_of_aligned':
"\<And>s ptr tcb. \<lbrakk>tcbs_of s ptr = Some tcb;pspace_aligned' s\<rbrakk> \<Longrightarrow> is_aligned ptr 9"
apply (clarsimp simp:tcbs_of_def obj_at'_def split:if_splits)
apply (drule pspace_alignedD')
apply simp+
apply (simp add:projectKO_opt_tcb objBitsKO_def
split: Structures_H.kernel_object.splits)
done
show ?thesis
using [[goals_limit = 1]]
apply (cinit lift: cptr_' msgInfo_')
apply (simp add: catch_liftE_bindE unlessE_throw_catch_If
unifyFailure_catch_If catch_liftE
getMessageInfo_def alternative_bind
cong: if_cong call_ignore_cong del: Collect_const)
apply (rule ccorres_pre_getCurThread)
apply (rename_tac curThread)
apply (rule ccorres_symb_exec_l3[OF _ user_getreg_inv' _ empty_fail_user_getreg])+
apply (rename_tac msginfo' cptr')
apply (rule_tac P="msginfo' = msginfo \<and> cptr' = cptr" in ccorres_gen_asm)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (simp only:)
apply (csymbr, csymbr)
apply (rule_tac r'="\<lambda>ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)"
and xf'="fault_type_'" in ccorres_split_nothrow)
apply (rule_tac P="cur_tcb' and (\<lambda>s. curThread = ksCurThread s)"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cur_tcb'_def rf_sr_ksCurThread)
apply (drule(1) obj_at_cslift_tcb, clarsimp)
apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def)
apply (rule rev_bexI, erule threadGet_eq)
apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm)
apply ceqv
apply csymbr
apply (simp only:)
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_alternative2)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres)
apply simp
apply simp
apply (vcg exspec=slowpath_noreturn_spec)
apply (rule ccorres_alternative1)
apply (rule ccorres_if_lhs[rotated])
apply (rule ccorres_inst[where P=\<top> and P'=UNIV])
apply simp
apply (simp del: Collect_const cong: call_ignore_cong)
apply (elim conjE)
apply (simp add: getThreadCSpaceRoot_def locateSlot_conv
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_pre_getCTE2)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rule ccorres_move_array_assertion_tcb_ctes
ccorres_move_c_guard_tcb_ctes3
ccorres_move_const_guard
ccorres_rhs_assoc)+
apply (ctac add: lookup_fp_ccorres)
apply (rename_tac luRet ep_cap)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rule ccorres_move_array_assertion_tcb_ctes
| simp del: Collect_const cong: call_ignore_cong)+
apply (csymbr, csymbr)
apply (simp add: ccap_relation_case_sum_Null_endpoint
of_bl_from_bool from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: if_1_0_0 cong: if_cong)
apply (rule ccorres_cond_true_seq)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres)
apply simp
apply simp
apply (vcg exspec=slowpath_noreturn_spec)
apply (rule ccorres_rhs_assoc)+
apply csymbr+
apply (simp add: if_1_0_0 isRight_case_sum
del: Collect_const cong: call_ignore_cong)
apply (elim conjE)
apply (frule(1) cap_get_tag_isCap[THEN iffD2])
apply (simp add: ccap_relation_ep_helpers from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres)
apply simp
apply simp
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_pre_getBoundNotification)
apply (rule ccorres_rhs_assoc2)
apply (rule_tac xf'=ret__int_' and r'="\<lambda>rv rv'. rv' = from_bool rv"
in ccorres_split_nothrow)
apply (rule_tac P="bound_tcb_at' (op = bound_ntfn) curThread
and valid_objs' and no_0_obj'
and (\<lambda>s. curThread = ksCurThread s)" in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: rf_sr_ksCurThread pred_tcb_at'_def)
apply (drule(3) obj_at_bound_tcb_grandD, clarsimp simp: typ_heap_simps if_1_0_0 return_def)
apply (simp add: in_liftM Bex_def getNotification_def getObject_return objBits_simps
return_def cnotification_relation_isActive
trans [OF eq_commute from_bool_eq_if])
apply ceqv
apply (simp only: from_bool_0)
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_split_throws)
apply simp
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (csymbr, csymbr)
apply (simp add: ccap_relation_ep_helpers
del: Collect_const cong: call_ignore_cong)
apply (rule_tac xf'="ret__unsigned_'"
and r'="\<lambda>ep v. (v = scast EPState_Send) = isSendEP ep"
in ccorres_split_nothrow)
apply (rule ccorres_add_return2)
apply (rule ccorres_pre_getEndpoint, rename_tac ep)
apply (rule_tac P="ko_at' ep (capEPPtr (theRight luRet)) and valid_objs'"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: return_def)
apply (erule cmap_relationE1[OF cmap_relation_ep], erule ko_at_projectKO_opt)
apply (clarsimp simp: typ_heap_simps')
apply (simp add: cendpoint_relation_def Let_def isSendEP_def
endpoint_state_defs
split: endpoint.split_asm)
apply ceqv
apply (rename_tac send_ep send_ep_is_send)
apply (rule_tac P="ko_at' send_ep (capEPPtr (theRight luRet))
and valid_objs'" in ccorres_cross_over_guard)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp del: Collect_const not_None_eq)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: getThreadVSpaceRoot_def locateSlot_conv
getThreadCallerSlot_def
del: Collect_const cong: if_cong call_ignore_cong)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rename_tac ksCurThread_y)
apply (rule ccorres_move_const_guard
ccorres_move_c_guard_tcb_ctes2
ccorres_move_array_assertion_tcb_ctes)+
apply (rule_tac xf'="ksCurThread_' \<circ> globals"
and val="tcb_ptr_to_ctcb_ptr curThread"
in ccorres_abstract_known)
apply (rule Seq_weak_ceqv, rule Basic_ceqv)
apply (rule rewrite_xfI, clarsimp simp only: o_def)
apply (rule refl)
apply csymbr
apply (rule ccorres_move_c_guard_cte)
apply (rule_tac var="callerCap_'" and var_update="callerCap_'_update"
in getCTE_h_val_ccorres_split[where P=\<top>])
apply simp
apply ceqv
apply (rename_tac caller_cap caller_cap_c)
apply (rule_tac P="\<lambda>_. capAligned (cteCap caller_cap)"
in ccorres_cross_over_guard)
apply (rule ccorres_add_return, ctac add: fastpath_reply_cap_check_ccorres)
apply (drule spec, drule_tac P="ccap_relation cp caller_cap_c" for cp in mp, assumption)
apply (simp add: from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp cong: conj_cong)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (csymbr, csymbr)
apply (rule_tac r'="\<lambda>ft ft'. (ft' = scast seL4_Fault_NullFault) = (ft = None)"
and xf'="fault_type_'" in ccorres_split_nothrow)
apply (rule threadGet_vcg_corres)
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: obj_at_tcbs_of)
apply (clarsimp simp: typ_heap_simps' ctcb_relation_def cfault_rel_def
ccap_relation_reply_helper)
apply (clarsimp simp: seL4_Fault_lift_def Let_def split: if_split_asm)
apply ceqv
apply (simp del: Collect_const not_None_eq cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp del: Collect_const not_None_eq)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_c_guard_tcb_ctes3 ccorres_move_const_guards
ccorres_move_array_assertion_tcb_ctes)+
apply (rule_tac var="newVTable_'" and var_update="newVTable_'_update"
in getCTE_h_val_ccorres_split[where P=\<top>])
apply simp
apply ceqv
apply (rename_tac pd_cap pd_cap_c)
apply (rule ccorres_symb_exec_r)
apply (rule_tac xf'=ret__unsigned_' in ccorres_abstract, ceqv)
apply (rename_tac pd_cap_c_ptr_maybe)
apply csymbr+
apply (simp add: isValidVTableRoot_conv from_bool_0
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (drule isValidVTableRootD)
apply (rule_tac P="pd_cap_c_ptr_maybe = capUntypedPtr (cteCap pd_cap)"
in ccorres_gen_asm2)
apply (simp add: ccap_relation_pd_helper cap_get_tag_isCap_ArchObject2
ccap_relation_reply_helper
ptr_add_assertion_positive
del: Collect_const Word_Lib.ptr_add_def cong: call_ignore_cong)
apply (rule ccorres_move_array_assertion_pd
| (rule ccorres_flip_Guard ccorres_flip_Guard2,
rule ccorres_move_array_assertion_pd)
| rule ccorres_flip_Guard2, rule ccorres_Guard_True_Seq)+
apply (rule stored_hw_asid_get_ccorres_split[where P=\<top>], ceqv)
apply (rule ccorres_abstract_ksCurThread, ceqv)
apply (rename_tac ksCurThread_x)
apply (rule_tac P="ksCurThread_y = ksCurThread_x" in ccorres_gen_asm)
apply (rule ccorres_move_c_guard_tcb
ccorres_move_const_guard)+
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rename_tac curPrio destPrio)
apply (rule ccorres_seq_cond_raise[THEN iffD2])
apply (rule_tac R="obj_at' (op = curPrio \<circ> tcbPriority) curThread
and obj_at' (op = destPrio \<circ> tcbPriority)
(capTCBPtr (cteCap caller_cap))
and (\<lambda>s. ksCurThread s = curThread)
and (\<lambda>s. ksCurThread s = ksCurThread_x)"
in ccorres_cond2')
apply clarsimp
apply (drule(1) obj_at_cslift_tcb)+
apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurThread)
apply (simp add: ctcb_relation_unat_tcbPriority_C
word_less_nat_alt linorder_not_le)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp del: Collect_const cong: call_ignore_cong)
apply csymbr+
apply (rule ccorres_symb_exec_l3[OF _ gets_inv _ empty_fail_gets])
apply (rename_tac asidMap)
apply (rule_tac P="asid_map_pd_to_hwasids asidMap (capPDBasePtr (capCap ((cteCap pd_cap))))
= set_option (pde_stored_asid shw_asid)" in ccorres_gen_asm)
apply (simp del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def)
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def
to_bool_def
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_c_guard_tcb ccorres_move_const_guard)+
apply (rule ccorres_symb_exec_l3[OF _ curDomain_inv _])
prefer 3
apply (simp only: curDomain_def, rule empty_fail_gets)
apply (rule ccorres_symb_exec_l3[OF _ threadGet_inv _ empty_fail_threadGet])
apply (rename_tac curDom destDom)
apply (rule ccorres_seq_cond_raise[THEN iffD2])
apply (rule_tac R="obj_at' (op = destDom \<circ> tcbDomain)
(capTCBPtr (cteCap caller_cap))
and (\<lambda>s. ksCurDomain s = curDom)"
in ccorres_cond2')
apply clarsimp
apply (drule(1) obj_at_cslift_tcb)+
apply (clarsimp simp: typ_heap_simps' rf_sr_ksCurDomain)
apply (drule ctcb_relation_tcbDomain[symmetric])
apply (clarsimp simp: up_ucast_inj_eq[symmetric] maxDom_def)
apply simp
apply (rule ccorres_split_throws)
apply (fold dc_def)[1]
apply (rule ccorres_call_hSkip)
apply (rule slowpath_ccorres, simp+)
apply (vcg exspec=slowpath_noreturn_spec)
apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def
to_bool_def
del: Collect_const cong: call_ignore_cong)
apply simp
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (rule_tac P="capAligned (theRight luRet)" in ccorres_gen_asm)
apply (rule_tac P=\<top> and P'="\<lambda>s. ksCurThread s = curThread"
in threadSet_ccorres_lemma3)
apply vcg
apply (clarsimp simp: rf_sr_ksCurThread typ_heap_simps'
h_t_valid_clift_Some_iff)
apply (clarsimp simp: capAligned_def isCap_simps objBits_simps
"StrictC'_thread_state_defs" mask_def)
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def
typ_heap_simps')
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def typ_heap_simps'
update_tcb_map_tos map_to_tcbs_upd)
apply (subst map_to_ctes_upd_tcb_no_ctes, assumption)
apply (rule ball_tcb_cte_casesI, simp_all)[1]
apply (simp add: cep_relations_drop_fun_upd)
apply (erule cmap_relation_updI, erule ko_at_projectKO_opt)
apply (simp add: ctcb_relation_def cthread_state_relation_def
"StrictC'_thread_state_defs" from_bool_0
to_bool_def if_1_0_0)
apply simp
apply (rule conjI, erule cready_queues_relation_not_queue_ptrs)
apply (rule ext, simp split: if_split)
apply (rule ext, simp split: if_split)
apply (simp add: carch_state_relation_def cmachine_state_relation_def
typ_heap_simps' map_comp_update projectKO_opt_tcb
cvariable_relation_upd_const ko_at_projectKO_opt)
apply ceqv
apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2)
apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow)
apply (rule fastpath_enqueue_ccorres[unfolded o_def,simplified])
apply simp
apply ceqv
apply (simp add: liftM_def del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_move_c_guard_tcb_ctes3)
apply (rule_tac r'="\<lambda>rv rv'. rv' = mdbPrev (cteMDBNode rv)"
and xf'=ret__unsigned_' in ccorres_split_nothrow)
apply (rule_tac P="tcb_at' curThread
and K (curThread = ksCurThread_x)
and (\<lambda>s. ksCurThread s = ksCurThread_x)"
in getCTE_ccorres_helper[where P'=UNIV])
apply (rule conseqPre, vcg)
apply (clarsimp simp: typ_heap_simps' cte_level_bits_def
tcbCallerSlot_def size_of_def
tcb_cnode_index_defs tcb_ptr_to_ctcb_ptr_mask)
apply (clarsimp simp: ccte_relation_def map_option_Some_eq2)
apply ceqv
apply (rule ccorres_assert)
apply (rename_tac mdbPrev_cte mdbPrev_cte_c)
apply (rule ccorres_split_nothrow_dc)
apply (simp add: updateMDB_def Let_def
del: Collect_const cong: if_cong)
apply (rule_tac P="cte_wp_at' (op = mdbPrev_cte)
(curThread + (tcbCallerSlot << cte_level_bits))
and valid_mdb'"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (drule(2) valid_mdb_ctes_of_prev[rotated])
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+)
apply (clarsimp simp: typ_heap_simps' split_def)
apply (rule getCTE_setCTE_rf_sr, simp_all)[1]
apply (clarsimp simp: ccte_relation_def map_option_Some_eq2
cte_to_H_def mdb_node_to_H_def
c_valid_cte_def)
apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2,
rule ccorres_rhs_assoc2)
apply (rule ccorres_split_nothrow_dc)
apply (rule_tac P="cte_at' (curThread + (tcbCallerSlot << cte_level_bits))
and tcb_at' curThread
and K (curThread = ksCurThread_x)"
in ccorres_from_vcg[where P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule cmap_relationE1[OF cmap_relation_cte], assumption+)
apply (clarsimp simp: typ_heap_simps' split_def tcbCallerSlot_def
tcb_cnode_index_defs tcb_ptr_to_ctcb_ptr_mask
cte_level_bits_def size_of_def
packed_heap_update_collapse_hrs)
apply (rule setCTE_rf_sr, simp_all add: typ_heap_simps')[1]
apply (clarsimp simp: ccte_relation_eq_ccap_relation makeObject_cte
mdb_node_to_H_def nullMDBNode_def
ccap_relation_NullCap_iff)
apply csymbr
apply (ctac add: fastpath_copy_mrs_ccorres[unfolded forM_x_def])
apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow)
apply (simp add: setThreadState_runnable_simp)
apply (rule_tac P=\<top> in threadSet_ccorres_lemma2, vcg)
apply (clarsimp simp: typ_heap_simps' rf_sr_def
cstate_relation_def Let_def)
apply (rule conjI)
apply (clarsimp simp: cpspace_relation_def typ_heap_simps'
update_tcb_map_tos map_to_tcbs_upd)
apply (subst map_to_ctes_upd_tcb_no_ctes, assumption)
apply (rule ball_tcb_cte_casesI, simp_all)[1]
apply (simp add: cep_relations_drop_fun_upd)
apply (erule cmap_relation_updI, erule ko_at_projectKO_opt)
apply (simp add: ctcb_relation_def cthread_state_relation_def)
apply simp
apply (rule conjI, erule cready_queues_relation_not_queue_ptrs)
apply (rule ext, simp split: if_split)
apply (rule ext, simp split: if_split)
apply (simp add: carch_state_relation_def cmachine_state_relation_def
typ_heap_simps' map_comp_update projectKO_opt_tcb
cvariable_relation_upd_const ko_at_projectKO_opt)
apply ceqv
apply (simp only: bind_assoc[symmetric])
apply (rule ccorres_split_nothrow_novcg_dc)
apply (rule ccorres_call,
rule_tac v=shw_asid and pd="capUntypedPtr (cteCap pd_cap)"
in switchToThread_fp_ccorres,
simp+)[1]
apply (rule_tac P="\<lambda>s. ksCurThread s = capTCBPtr (cteCap caller_cap)"
in ccorres_cross_over_guard)
apply csymbr
apply csymbr
apply (rule ccorres_call_hSkip)
apply (fold dc_def)[1]
apply (rule fastpath_restore_ccorres)
apply simp
apply simp
apply (simp add: setCurThread_def)
apply wp
apply (rule_tac P=\<top> in hoare_triv, simp)
apply (simp add: imp_conjL rf_sr_ksCurThread del: all_imp_to_ex)
apply (clarsimp simp: ccap_relation_ep_helpers guard_is_UNIV_def
mi_from_H_def)
apply (simp add: pd_has_hwasid_def)
apply (wp sts_ct_in_state_neq' sts_valid_objs')
apply (simp del: Collect_const)
apply (vcg exspec=thread_state_ptr_set_tsType_np_modifies)
apply simp
apply (rule mapM_x_wp'[OF hoare_weaken_pre], wp)
apply clarsimp
apply simp
apply (vcg exspec=fastpath_copy_mrs_modifies)
apply (simp add: valid_tcb_state'_def)
apply wp
apply (wp setCTE_cte_wp_at_other)
apply (simp del: Collect_const)
apply vcg
apply (simp add: o_def)
apply (wp | simp
| wp_once updateMDB_weak_cte_wp_at
| wp_once updateMDB_cte_wp_at_other)+
apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies)
apply simp
apply (wp getCTE_wp')
apply simp
apply vcg
apply (simp add: shiftl_t2n)
apply (wp hoare_drop_imps setEndpoint_valid_mdb' set_ep_valid_objs'
setObject_no_0_obj'[where 'a=endpoint, folded setEndpoint_def])
apply simp
apply (vcg exspec=endpoint_ptr_mset_epQueue_tail_state_modifies
exspec=endpoint_ptr_set_epQueue_head_np_modifies
exspec=endpoint_ptr_get_epQueue_tail_modifies)
apply (simp add: valid_pspace'_def pred_conj_def conj_comms
valid_mdb'_def)
apply (wp threadSet_cur threadSet_tcbState_valid_objs
threadSet_state_refs_of' threadSet_ctes_of
valid_ep_typ_at_lift' threadSet_cte_wp_at'
| simp)+
apply (vcg exspec=thread_state_ptr_mset_blockingObject_tsType_modifies)
apply simp
apply (rule threadGet_wp)
apply simp
apply wp[1]
apply simp
apply wp
apply (simp cong: if_cong)
apply (rule threadGet_wp)
apply (simp cong: if_cong)
apply (rule threadGet_wp)
apply (simp add: syscall_from_H_def del: Collect_const)
apply (vcg exspec=cap_page_directory_cap_get_capPDBasePtr_spec2)
apply (rule conseqPre,
vcg exspec=cap_page_directory_cap_get_capPDBasePtr_spec2,
clarsimp)
apply (simp add:ccap_relation_reply_helper cong:if_cong)
apply (rule threadGet_wp)
apply (simp add: syscall_from_H_def ccap_relation_reply_helper)
apply (vcg exspec=seL4_Fault_get_seL4_FaultType_modifies)
apply simp
apply wp
apply simp
apply (vcg exspec=fastpath_reply_cap_check_modifies)
apply simp
apply (rule getEndpoint_wp)
apply (simp add: syscall_from_H_def ccap_relation_reply_helper)
apply (vcg exspec=endpoint_ptr_get_state_modifies)
apply simp
apply (wp option_case_liftM_getNotification_wp[unfolded fun_app_def])
apply (simp del: Collect_const)
apply vcg
apply (simp add: if_1_0_0 getSlotCap_def)
apply (rule valid_isRight_theRight_split)
apply (wp getCTE_wp')
apply (rule validE_R_abstract_rv)
apply wp
apply (simp del: Collect_const)
apply (vcg exspec=lookup_fp_modifies)
apply simp
apply (rule threadGet_wp)
apply (simp del: Collect_const)
apply vcg
apply simp
apply (rule user_getreg_wp)
apply simp
apply (rule user_getreg_wp)
apply (rule conjI)
apply (clarsimp simp: ct_in_state'_def obj_at_tcbs_of word_sle_def)
apply (frule tcbs_of_aligned')
apply (simp add:invs_pspace_aligned')
apply (frule tcbs_of_cte_wp_at_caller)
apply (clarsimp simp:size_of_def field_simps
dest!:ptr_val_tcb_ptr_mask2[unfolded mask_def])
apply (frule st_tcb_at_state_refs_ofD')
apply (clarsimp simp: obj_at_tcbs_of ct_in_state'_def st_tcb_at_tcbs_of
invs_cur' invs_valid_objs' ctes_of_valid'
fun_upd_def[symmetric] fun_upd_idem pred_tcb_at'_def invs_no_0_obj')
apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp)
apply (clarsimp simp: isCap_simps valid_cap'_def[split_simps capability.split]
maskCapRights_def cte_wp_at_ctes_of cte_level_bits_def)
apply (frule_tac p = a in ctes_of_valid',clarsimp)
apply (simp add:valid_cap_simps')
apply (clarsimp simp:cte_level_bits_def)
apply (frule_tac p="p + tcbCallerSlot * 0x10"for p in ctes_of_valid',clarsimp)
apply (clarsimp simp: valid_capAligned)
apply (frule_tac ptr2 = v0a in tcbs_of_cte_wp_at_vtable)
apply (frule_tac ptr2 = v0a in tcbs_of_aligned')
apply (simp add:invs_pspace_aligned')
apply (clarsimp simp:size_of_def field_simps cte_wp_at_ctes_of
word_sle_def word_sless_def
dest!:ptr_val_tcb_ptr_mask2[unfolded mask_def])
apply (clarsimp simp: valid_cap_simps' obj_at_tcbs_of)
apply (frule_tac p="p + tcbVTableSlot * 0x10" for p in ctes_of_valid', clarsimp)
apply (clarsimp simp: isCap_simps valid_cap_simps' capAligned_def
invs_valid_pde_mappings' obj_at_tcbs_of
dest!: isValidVTableRootD)
apply (frule invs_mdb')
apply (clarsimp simp: cte_wp_at_ctes_of tcbSlots cte_level_bits_def
makeObject_cte isValidVTableRoot_def
ARM_H.isValidVTableRoot_def
pde_stored_asid_def to_bool_def
valid_mdb'_def valid_tcb_state'_def
word_le_nat_alt[symmetric] length_msgRegisters)
apply (frule ko_at_valid_ep', clarsimp)
apply (safe del: notI)[1]
apply (simp add: isSendEP_def valid_ep'_def tcb_at_invs'
split: Structures_H.endpoint.split_asm)
apply (rule subst[OF epQueue.simps(1)],
erule st_tcb_at_not_in_ep_queue[where P="op = Running", rotated],
clarsimp+)
apply (simp add: obj_at_tcbs_of st_tcb_at_tcbs_of)
apply (drule invs_sym')
apply (erule_tac P=sym_refs in subst[rotated])
apply (rule fun_upd_idem[symmetric])
apply (clarsimp simp: tcb_bound_refs'_def)
apply (case_tac ntfnptr, simp_all)[1]
apply (clarsimp simp: set_eq_subset)
apply (clarsimp simp: field_simps)
apply (clarsimp simp: syscall_from_H_def[split_simps syscall.split]
word_sle_def word_sless_def rf_sr_ksCurThread
ptr_val_tcb_ptr_mask' size_of_def cte_level_bits_def
tcb_cnode_index_defs tcbSlots
simp del: Collect_const)
apply (frule obj_at_bound_tcb_grandD, clarsimp, clarsimp, simp)
apply (clarsimp simp: typ_heap_simps if_1_0_0)
apply (clarsimp simp: ccte_relation_eq_ccap_relation
if_1_0_0 ccap_relation_case_sum_Null_endpoint
isRight_case_sum typ_heap_simps'
pdBits_def pageBits_def
cap_get_tag_isCap mi_from_H_def)
apply (auto simp: isCap_simps capAligned_def objBits_simps ccap_relation_pd_helper
cap_get_tag_isCap_ArchObject2
dest!: ptr_val_tcb_ptr_mask2[unfolded mask_def] isValidVTableRootD)
done
qed
end
crunch tcb2[wp]: "Arch.switchToThread" "tcb_at' t"
(ignore: ARM.clearExMonitor)
context kernel_m begin
lemma resolveAddressBits_points_somewhere:
"\<lbrace>\<lambda>s. \<forall>slot. Q slot s\<rbrace> resolveAddressBits cp cptr bits \<lbrace>Q\<rbrace>,-"
apply (rule_tac Q'="\<lambda>rv s. \<forall>rv. Q rv s" in hoare_post_imp_R)
apply wp
apply clarsimp
done
lemma user_getregs_wp:
"\<lbrace>\<lambda>s. tcb_at' t s \<and> (\<forall>tcb. ko_at' tcb t s \<longrightarrow> Q (map ((atcbContextGet o tcbArch) tcb) regs) s)\<rbrace>
asUser t (mapM getRegister regs) \<lbrace>Q\<rbrace>"
apply (rule hoare_strengthen_post)
apply (rule hoare_vcg_conj_lift)
apply (rule asUser_get_registers)
apply (rule asUser_inv)
apply (wp mapM_wp' getRegister_inv)
apply clarsimp
apply (drule obj_at_ko_at', clarsimp)
done
lemma foldr_copy_register_tsrs:
"foldr (\<lambda>r . copy_register_tsrs x y r r (\<lambda>x. x)) rs s
= (s (y := TCBStateRegs (tsrState (s y))
(\<lambda>r. if r \<in> set rs then tsrContext (s x) r
else tsrContext (s y) r)))"
apply (induct rs)
apply simp
apply (simp add: copy_register_tsrs_def fun_eq_iff
split: if_split)
done
lemma monadic_rewrite_add_lookup_both_sides:
assumes inv: "\<And>P. \<lbrace>P\<rbrace> lu \<lbrace>\<lambda>r. P\<rbrace>"
and ef: "empty_fail lu"
and nf: "no_fail Q lu"
shows
"monadic_rewrite E F P (do lu; f od) (do lu; g od)
\<Longrightarrow> monadic_rewrite E F (P and Q) f g"
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans[rotated])
apply (rule monadic_rewrite_symb_exec_l'[where m=lu], (wp inv ef nf impI)+)
apply (rule monadic_rewrite_refl, wp)
apply (simp; erule monadic_rewrite_trans[rotated])
apply (rule monadic_rewrite_transverse[OF _ monadic_rewrite_refl])
apply (rule monadic_rewrite_symb_exec_l'[where m=lu], (wp inv ef nf impI)+)
apply (rule monadic_rewrite_refl, wp)
apply simp
done
lemmas cteInsert_obj_at'_not_queued = cteInsert_obj_at'_queued[of "\<lambda>a. \<not> a"]
lemma monadic_rewrite_exists_v:
"[| !! v. monadic_rewrite E F (Q v) f g |]
==> monadic_rewrite E F (%x. (EX v. P v x) & (ALL v. P v x --> Q v x)) f g"
apply (rule monadic_rewrite_name_pre)
apply clarsimp
apply (erule_tac x=v in meta_allE)
apply (erule monadic_rewrite_imp)
apply clarsimp
done
lemma monadic_rewrite_threadGet_tcbIPCBuffer:
"monadic_rewrite E F (obj_at' (%tcb. tcbIPCBuffer tcb = v) t)
(threadGet tcbIPCBuffer t) (return v)"
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans[rotated])
apply (rule monadic_rewrite_gets_known)
apply (unfold threadGet_def liftM_def fun_app_def)
apply (rule monadic_rewrite_symb_exec_l' | wp | rule empty_fail_getObject getObject_inv)+
apply (clarsimp; rule no_fail_getObject_tcb)
apply (simp only: exec_gets)
apply (rule_tac P = "(\<lambda>s. (tcbIPCBuffer rv)=v) and tcb_at' t" in monadic_rewrite_refl3)
apply (simp add:)
apply (wp OMG_getObject_tcb | wpc)+
apply (auto intro: obj_tcb_at')
done
lemma setCTE_obj_at'_tcbIPCBuffer:
"\<lbrace>obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t\<rbrace> setCTE p v \<lbrace>\<lambda>rv. obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t\<rbrace>"
unfolding setCTE_def
by (rule setObject_cte_obj_at_tcb', simp+)
crunch obj_at'_tcbIPCBuffer[wp]: cteInsert, asUser "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: setCTE_obj_at'_queued crunch_wps threadSet_obj_at'_really_strongest)
lemma fastpath_callKernel_SysCall_corres:
"monadic_rewrite True False
(invs' and ct_in_state' (op = Running)
and (\<lambda>s. ksSchedulerAction s = ResumeCurrentThread))
(callKernel (SyscallEvent SysCall)) (fastpaths SysCall)"
apply (rule monadic_rewrite_introduce_alternative)
apply (simp add: callKernel_def)
apply (rule monadic_rewrite_imp)
apply (simp add: handleEvent_def handleCall_def
handleInvocation_def liftE_bindE_handle
bind_assoc getMessageInfo_def)
apply (simp add: catch_liftE_bindE unlessE_throw_catch_If
unifyFailure_catch_If catch_liftE
getMessageInfo_def alternative_bind
fastpaths_def
cong: if_cong)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rename_tac msgInfo)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_symb_exec_r
[OF threadGet_inv no_fail_threadGet])
apply (rename_tac thread msgInfo ptr tcbFault)
apply (rule monadic_rewrite_alternative_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: split_def Syscall_H.syscall_def
liftE_bindE_handle bind_assoc
capFaultOnFailure_def)
apply (simp only: bindE_bind_linearise[where f="rethrowFailure fn f'" for fn f']
bind_case_sum_rethrow)
apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def
lookupSlotForThread_def bindE_assoc
liftE_bind_return_bindE_returnOk split_def
getThreadCSpaceRoot_def locateSlot_conv
returnOk_liftE[symmetric] const_def
getSlotCap_def)
apply (simp only: liftE_bindE_assoc)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_rdonly_bind_l)
apply (wp | simp)+
apply (rule_tac fn="case_sum Inl (Inr \<circ> fst)" in monadic_rewrite_split_fn)
apply (simp add: liftME_liftM[symmetric] liftME_def bindE_assoc)
apply (rule monadic_rewrite_refl)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: isRight_right_map isRight_case_sum)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_rdonly_bind_l[OF lookupIPC_inv])
apply (rule monadic_rewrite_symb_exec_l[OF lookupIPC_inv empty_fail_lookupIPCBuffer])
apply (simp add: lookupExtraCaps_null returnOk_bind liftE_bindE_handle
bind_assoc liftE_bindE_assoc
decodeInvocation_def Let_def from_bool_0
performInvocation_def liftE_handle
liftE_bind)
apply (rule monadic_rewrite_symb_exec_r [OF getEndpoint_inv no_fail_getEndpoint])
apply (rename_tac "send_ep")
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: getThreadVSpaceRoot_def locateSlot_conv)
apply (rule monadic_rewrite_symb_exec_r [OF getCTE_inv no_fail_getCTE])
apply (rename_tac "pdCapCTE")
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r [OF threadGet_inv no_fail_threadGet])+
apply (rename_tac "curPrio" "destPrio")
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: isRight_case_sum)
apply (rule monadic_rewrite_symb_exec_r [OF gets_inv non_fail_gets])
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv],
simp only: curDomain_def, rule non_fail_gets)
apply (rename_tac "curDom")
apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet])
apply (rename_tac "destDom")
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_trans,
rule monadic_rewrite_pick_alternative_1)
apply (rule monadic_rewrite_symb_exec_l[OF get_mrs_inv' empty_fail_getMRs])
apply (rule monadic_rewrite_trans)
apply (rule_tac F=True and E=True in monadic_rewrite_weaken)
apply simp
apply (rule monadic_rewrite_bind_tail)
apply (rule_tac x=thread in monadic_rewrite_symb_exec,
(wp empty_fail_getCurThread)+)
apply (simp add: sendIPC_def bind_assoc)
apply (rule_tac x=send_ep in monadic_rewrite_symb_exec,
(wp empty_fail_getEndpoint getEndpoint_obj_at')+)
apply (rule_tac P="epQueue send_ep \<noteq> []" in monadic_rewrite_gen_asm)
apply (simp add: isRecvEP_endpoint_case list_case_helper bind_assoc)
apply (rule monadic_rewrite_bind_tail)
apply (elim conjE)
apply (match premises in "isEndpointCap ep" for ep \<Rightarrow>
\<open>rule monadic_rewrite_symb_exec[where x="BlockedOnReceive (capEPPtr ep)"]\<close>,
(wp empty_fail_getThreadState)+)
apply (rule monadic_rewrite_symb_exec2, (wp | simp)+)
apply (rule monadic_rewrite_bind)
apply (rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_bind)
apply (rule_tac curPrio=curPrio and destPrio=destPrio
and curDom=curDom and destDom=destDom and thread=thread
in attemptSwitchTo_rewrite)
apply (rule monadic_rewrite_symb_exec2, (wp empty_fail_threadGet)+)
apply (rule monadic_rewrite_bind)
apply (rule monadic_rewrite_trans)
apply (rule setupCallerCap_rewrite)
apply (rule monadic_rewrite_bind_head)
apply (rule setThreadState_blocked_rewrite, simp)
apply (rule monadic_rewrite_trans)
apply (rule_tac x=BlockedOnReply in monadic_rewrite_symb_exec,
(wp empty_fail_getThreadState)+)
apply simp
apply (rule monadic_rewrite_refl)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_head)
apply (rule_tac t="hd (epQueue send_ep)" in schedule_rewrite_ct_not_runnable')
apply (simp add: bind_assoc)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_bind)
apply (rule switchToThread_rewrite)
apply (rule activateThread_simple_rewrite)
apply wp_once
apply wp_once
apply (wp_once setCurThread_ct_in_state)
apply ((rule Arch_switchToThread_pred_tcb'
| simp only: st_tcb_at'_def[symmetric])+)[1]
apply (wp, clarsimp simp: cur_tcb'_def ct_in_state'_def)
apply (simp add: getThreadCallerSlot_def getThreadReplySlot_def
locateSlot_conv ct_in_state'_def cur_tcb'_def)
apply ((wp assert_inv threadSet_pred_tcb_at_state cteInsert_obj_at'_not_queued | wps)+)[1]
apply (simp add: setSchedulerAction_def)
apply wp
apply (simp cong: if_cong conj_cong add: if_bool_simps)
apply (simp_all only:)[5]
apply ((wp setThreadState_oa_queued[of _ "\<lambda>a _ _. \<not> a"]
setThreadState_obj_at_unchanged
asUser_obj_at_unchanged mapM_x_wp'
sts_st_tcb_at'_cases
setThreadState_no_sch_change
setEndpoint_obj_at_tcb'
| simp add: setMessageInfo_def)+)
apply (simp add: setThreadState_runnable_simp
getThreadCallerSlot_def getThreadReplySlot_def
locateSlot_conv bind_assoc)
apply (rule_tac P="\<lambda>v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (hd (epQueue send_ep))"
in monadic_rewrite_exists_v)
apply (rename_tac ipcBuffer)
apply (simp add: ARM_H.switchToThread_def bind_assoc)
apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse])
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp mapM_x_wp' getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp mapM_x_wp' getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (rule_tac P="inj (case_bool thread (hd (epQueue send_ep)))"
in monadic_rewrite_gen_asm)
apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse])
apply (rule monadic_rewrite_weaken[where F=False and E=True], simp)
apply (rule isolate_thread_actions_rewrite_bind
fastpath_isolate_rewrites fastpath_isolatables
bool.simps setRegister_simple
setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable
doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable
zipWithM_setRegister_simple
thread_actions_isolatable_bind
| assumption
| wp assert_inv)+
apply (rule_tac P="\<lambda>s. ksSchedulerAction s = ResumeCurrentThread
\<and> tcb_at' thread s"
and F=True and E=False in monadic_rewrite_weaken)
apply simp
apply (rule monadic_rewrite_isolate_final)
apply (simp add: isRight_case_sum cong: list.case_cong)
apply (clarsimp simp: fun_eq_iff if_flip
cong: if_cong)
apply (drule obj_at_ko_at', clarsimp)
apply (frule get_tcb_state_regs_ko_at')
apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map
foldl_fun_upd
foldr_copy_register_tsrs
isRight_case_sum
cong: if_cong)
apply (simp add: upto_enum_def fromEnum_def
enum_register toEnum_def
msgRegisters_unfold
cong: if_cong)
apply (clarsimp split: if_split)
apply (rule ext)
apply (simp add: badgeRegister_def msgInfoRegister_def
ARM.badgeRegister_def
ARM.msgInfoRegister_def
split: if_split)
apply simp
apply (wp | simp cong: if_cong bool.case_cong
| rule getCTE_wp' gts_wp' threadGet_wp
getEndpoint_wp)+
apply (rule validE_cases_valid)
apply (simp add: isRight_def getSlotCap_def)
apply (wp getCTE_wp')
apply (rule resolveAddressBits_points_somewhere)
apply (simp cong: if_cong bool.case_cong)
apply wp
apply simp
apply (wp user_getreg_wp user_getregs_wp threadGet_wp)+
apply (clarsimp simp: ct_in_state'_def pred_tcb_at')
apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+)
apply (clarsimp simp: isCap_simps valid_cap'_def maskCapRights_def)
apply (frule ko_at_valid_ep', clarsimp)
apply (frule sym_refs_ko_atD'[where 'a=endpoint], clarsimp)
apply (clarsimp simp: valid_ep'_def isRecvEP_endpoint_case neq_Nil_conv
tcbVTableSlot_def cte_level_bits_def
cte_at_tcb_at_16' length_msgRegisters
n_msgRegisters_def order_less_imp_le
ep_q_refs_of'_def st_tcb_at_refs_of_rev'
cong: if_cong)
apply (rename_tac blockedThread ys tcba tcbb v tcbc)
apply (frule invs_mdb')
apply (thin_tac "Ball S P" for S P)+
apply (clarsimp simp: invs'_def valid_state'_def)
apply (frule_tac t="blockedThread" in valid_queues_not_runnable_not_queued)
apply (simp)
apply (clarsimp simp: st_tcb_at'_def obj_at'_def objBits_simps projectKOs
valid_mdb'_def valid_mdb_ctes_def inj_case_bool
split: bool.split)+
apply (simp(no_asm) add: eq_commute)
apply (clarsimp simp: sch_act_simple_def)
done
lemmas fastpath_call_ccorres_callKernel
= monadic_rewrite_ccorres_assemble[OF fastpath_call_ccorres fastpath_callKernel_SysCall_corres]
lemma capability_case_Null_ReplyCap:
"(case cap of NullCap \<Rightarrow> f | ReplyCap t b \<Rightarrow> g t b | _ \<Rightarrow> h)
= (if isReplyCap cap then g (capTCBPtr cap) (capReplyMaster cap)
else if isNullCap cap then f else h)"
by (simp add: isCap_simps split: capability.split)
lemma in_getCTE_slot:
"(\<exists>s. (rv, s) \<in> fst (getCTE slot s)) = (is_aligned slot cte_level_bits)"
apply (simp add: getCTE_assert_opt exec_gets assert_opt_member)
apply (rule iffI)
apply clarsimp
apply (subgoal_tac "cte_wp_at' (op = rv) slot s")
apply (simp add: cte_wp_at_cases')
apply (erule disjE)
apply simp
apply clarsimp
apply (drule(1) tcb_cte_cases_aligned[where cte=rv])
apply (simp add: objBits_simps cte_level_bits_def)
apply (simp add: cte_wp_at_ctes_of)
apply (rule_tac x="undefined \<lparr> ksPSpace := empty (slot \<mapsto> KOCTE rv) \<rparr>" in exI)
apply (simp add: map_to_ctes_def Let_def objBits_simps cte_level_bits_def)
done
end
context begin interpretation Arch . (*FIXME: arch_split*)
lemma inj2_assert_opt:
"(assert_opt v s = assert_opt v' s') = (v = v' \<and> (v' = None \<or> s = s'))"
by (simp add: assert_opt_def return_def fail_def split: option.split)
lemma gets_the_inj:
"inj gets_the"
apply (rule injI)
apply (clarsimp simp: gets_the_def fun_eq_iff exec_gets inj2_assert_opt)
done
lemmas gets_the_eq = inj_eq[OF gets_the_inj]
lemma gets_the_eq2:
"(gets_the f s = gets_the g s') = (f s = g s' \<and> (g s' = None \<or> s = s'))"
by (simp add: gets_the_def exec_gets inj2_assert_opt)
lemma return_gets_the:
"return x = gets_the (\<lambda>_. Some x)"
by (simp add: gets_the_def assert_opt_def)
lemma injection_handler_catch:
"catch (injection_handler f x) y
= catch x (y o f)"
apply (simp add: injection_handler_def catch_def handleE'_def
bind_assoc)
apply (rule bind_cong[OF refl])
apply (simp add: throwError_bind split: sum.split)
done
lemma doReplyTransfer_simple:
"monadic_rewrite True False
(obj_at' (\<lambda>tcb. tcbFault tcb = None) receiver)
(doReplyTransfer sender receiver slot)
(do state \<leftarrow> getThreadState receiver;
assert (isReply state);
cte \<leftarrow> getCTE slot;
mdbnode \<leftarrow> return $ cteMDBNode cte;
assert (mdbPrev mdbnode \<noteq> 0 \<and> mdbNext mdbnode = 0);
parentCTE \<leftarrow> getCTE (mdbPrev mdbnode);
assert (isReplyCap (cteCap parentCTE) \<and> capReplyMaster (cteCap parentCTE));
doIPCTransfer sender Nothing 0 True receiver;
cteDeleteOne slot;
setThreadState Running receiver;
attemptSwitchTo receiver
od )"
apply (simp add: doReplyTransfer_def liftM_def nullPointer_def getSlotCap_def)
apply (rule monadic_rewrite_bind_tail)+
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_threadGet)+)
apply (rule_tac P="rv = None" in monadic_rewrite_gen_asm, simp)
apply (rule monadic_rewrite_refl)
apply (wp threadGet_const gts_wp' getCTE_wp')+
apply (simp add: o_def)
done
lemma monadic_rewrite_if_known:
"monadic_rewrite F E ((\<lambda>s. C = X) and \<top>) (if C then f else g) (if X then f else g)"
apply (rule monadic_rewrite_gen_asm)
apply (simp split del: if_split)
apply (rule monadic_rewrite_refl)
done
end
context kernel_m begin
lemma receiveIPC_simple_rewrite:
"monadic_rewrite True False
((\<lambda>_. isEndpointCap ep_cap \<and> \<not> isSendEP ep) and (ko_at' ep (capEPPtr ep_cap) and
(\<lambda>s. \<forall>ntfnptr. bound_tcb_at' (op = (Some ntfnptr)) thread s \<longrightarrow> obj_at' (Not \<circ> isActive) ntfnptr s)))
(receiveIPC thread ep_cap True)
(do
setThreadState (BlockedOnReceive (capEPPtr ep_cap)) thread;
setEndpoint (capEPPtr ep_cap) (RecvEP (case ep of RecvEP q \<Rightarrow> (q @ [thread]) | _ \<Rightarrow> [thread]))
od)"
apply (rule monadic_rewrite_gen_asm)
apply (simp add: receiveIPC_def)
apply (rule monadic_rewrite_imp)
apply (rule_tac rv=ep in monadic_rewrite_symb_exec_l_known,
(wp empty_fail_getEndpoint)+)
apply (rule monadic_rewrite_symb_exec_l, (wp | simp add: getBoundNotification_def)+)
apply (rule monadic_rewrite_symb_exec_l)
apply (rule hoare_pre, wpc, wp+, simp)
apply (simp split: option.split)
apply (rule monadic_rewrite_trans, rule monadic_rewrite_if_known[where X=False], simp)
apply (rule monadic_rewrite_refl3[where P=\<top>])
apply (cases ep, simp_all add: isSendEP_def)[1]
apply (wp getNotification_wp gbn_wp' getEndpoint_wp | wpc)+
apply (clarsimp simp: obj_at'_def projectKOs pred_tcb_at'_def)
done
lemma empty_fail_isFinalCapability:
"empty_fail (isFinalCapability cte)"
by (simp add: isFinalCapability_def Let_def split: if_split)
lemma cteDeleteOne_replycap_rewrite:
"monadic_rewrite True False
(cte_wp_at' (\<lambda>cte. isReplyCap (cteCap cte)) slot)
(cteDeleteOne slot)
(emptySlot slot None)"
apply (simp add: cteDeleteOne_def)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rule_tac P="cteCap rv \<noteq> NullCap \<and> isReplyCap (cteCap rv)
\<and> \<not> isEndpointCap (cteCap rv)
\<and> \<not> isNotificationCap (cteCap rv)"
in monadic_rewrite_gen_asm)
apply (simp add: finaliseCapTrue_standin_def
capRemovable_def)
apply (rule monadic_rewrite_symb_exec_l,
(wp isFinalCapability_inv empty_fail_isFinalCapability)+)
apply (rule monadic_rewrite_refl)
apply (wp getCTE_wp')+
apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps)
done
lemma cteDeleteOne_nullcap_rewrite:
"monadic_rewrite True False
(cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) slot)
(cteDeleteOne slot)
(return ())"
apply (simp add: cteDeleteOne_def)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rule_tac P="cteCap rv = NullCap" in monadic_rewrite_gen_asm)
apply simp
apply (rule monadic_rewrite_refl)
apply (wp getCTE_wp')
apply (clarsimp simp: cte_wp_at_ctes_of)
done
lemma deleteCallerCap_nullcap_rewrite:
"monadic_rewrite True False
(cte_wp_at' (\<lambda>cte. cteCap cte = NullCap) (thread + 2 ^ cte_level_bits * tcbCallerSlot))
(deleteCallerCap thread)
(return ())"
apply (simp add: deleteCallerCap_def getThreadCallerSlot_def locateSlot_conv
getSlotCap_def)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rule monadic_rewrite_assert)
apply (rule cteDeleteOne_nullcap_rewrite)
apply (wp getCTE_wp)
apply (clarsimp simp: cte_wp_at_ctes_of)
done
end
lemma emptySlot_cnode_caps:
"\<lbrace>\<lambda>s. P (only_cnode_caps (ctes_of s)) \<and> cte_wp_at' (\<lambda>cte. \<not> isCNodeCap (cteCap cte)) slot s\<rbrace>
emptySlot slot None
\<lbrace>\<lambda>rv s. P (only_cnode_caps (ctes_of s))\<rbrace>"
apply (simp add: only_cnode_caps_def map_option_comp2
o_assoc[symmetric] cteCaps_of_def[symmetric])
apply (wp emptySlot_cteCaps_of)
apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of
elim!: rsubst[where P=P] intro!: ext
split: if_split)
done
lemma cteDeleteOne_cnode_caps:
"\<lbrace>\<lambda>s. P (only_cnode_caps (ctes_of s))\<rbrace>
cteDeleteOne slot
\<lbrace>\<lambda>rv s. P (only_cnode_caps (ctes_of s))\<rbrace>"
apply (simp add: only_cnode_caps_def map_option_comp2
o_assoc[symmetric] cteCaps_of_def[symmetric])
apply (wp cteDeleteOne_cteCaps_of)
apply clarsimp
apply (erule rsubst[where P=P], rule ext)
apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of isCap_simps)
apply (rule_tac x="cteCap cte" in exI)
apply (clarsimp simp: finaliseCap_def finaliseCapTrue_standin_def isCap_simps)
done
lemma asUser_obj_at_ep[wp]:
"\<lbrace>obj_at' P p\<rbrace> asUser t m \<lbrace>\<lambda>rv. obj_at' (P :: endpoint \<Rightarrow> bool) p\<rbrace>"
apply (simp add: asUser_def split_def)
apply (wp hoare_drop_imps | simp)+
done
lemma setCTE_obj_at_ep[wp]:
"\<lbrace>obj_at' (P :: endpoint \<Rightarrow> bool) p\<rbrace> setCTE ptr cte \<lbrace>\<lambda>rv. obj_at' P p\<rbrace>"
unfolding setCTE_def
apply (rule obj_at_setObject2)
apply (clarsimp simp: updateObject_cte typeError_def in_monad
split: Structures_H.kernel_object.split_asm
if_split_asm)
done
lemma setCTE_obj_at_ntfn[wp]:
"\<lbrace>obj_at' (P :: Structures_H.notification \<Rightarrow> bool) p\<rbrace> setCTE ptr cte \<lbrace>\<lambda>rv. obj_at' P p\<rbrace>"
unfolding setCTE_def
apply (rule obj_at_setObject2)
apply (clarsimp simp: updateObject_cte typeError_def in_monad
split: Structures_H.kernel_object.split_asm
if_split_asm)
done
crunch obj_at_ep[wp]: emptySlot "obj_at' (P :: endpoint \<Rightarrow> bool) p"
crunch nosch[wp]: emptySlot "\<lambda>s. P (ksSchedulerAction s)"
crunch gsCNodes[wp]: emptySlot, asUser "\<lambda>s. P (gsCNodes s)"
(wp: crunch_wps)
crunch ctes_of[wp]: attemptSwitchTo "\<lambda>s. P (ctes_of s)"
(wp: crunch_wps)
crunch cte_wp_at'[wp]: attemptSwitchTo "cte_wp_at' P p"
crunch tcbContext[wp]: attemptSwitchTo "obj_at' (\<lambda>tcb. P ( (atcbContextGet o tcbArch) tcb)) t"
(wp: crunch_wps simp_del: comp_apply)
crunch only_cnode_caps[wp]: doFaultTransfer "\<lambda>s. P (only_cnode_caps (ctes_of s))"
(wp: crunch_wps simp: crunch_simps)
context kernel_m begin
lemma tcbSchedDequeue_rewrite_not_queued: "monadic_rewrite True False (tcb_at' t and obj_at' (Not \<circ> tcbQueued) t) (tcbSchedDequeue t) (return ())"
apply (simp add: tcbSchedDequeue_def)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_tail)
apply (rule_tac P="\<not> queued" in monadic_rewrite_gen_asm)
apply (simp add: when_def)
apply (rule monadic_rewrite_refl)
apply (wp threadGet_const)
apply (rule monadic_rewrite_symb_exec_l)
apply wp+
apply (rule monadic_rewrite_refl)
apply (wp)
apply (clarsimp simp: o_def obj_at'_def)
done
lemma schedule_known_rewrite:
"monadic_rewrite True False
(\<lambda>s. ksSchedulerAction s = SwitchToThread t
\<and> tcb_at' t s
\<and> obj_at' (Not \<circ> tcbQueued) t s
\<and> ksCurThread s = t'
\<and> st_tcb_at' (Not \<circ> runnable') t' s)
(schedule)
(do Arch.switchToThread t;
setCurThread t;
setSchedulerAction ResumeCurrentThread od)"
apply (simp add: schedule_def)
apply (simp only: switchToThread_def)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_bind_tail)
apply (rule_tac P="action = SwitchToThread t" in monadic_rewrite_gen_asm,simp)
apply (rule monadic_rewrite_bind_tail)
apply (rule_tac P="\<not> curRunnable \<and> action = SwitchToThread t" in monadic_rewrite_gen_asm, simp)
apply (simp add: bind_assoc)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_bind)
apply (rule monadic_rewrite_trans)
apply (rule tcbSchedDequeue_rewrite_not_queued)
apply (rule monadic_rewrite_refl)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_refl)
apply ((wp Arch_switchToThread_obj_at_pre)+, simp, wp+)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_symb_exec_l)
apply (wp)
apply simp
apply (rule monadic_rewrite_symb_exec_l)
apply wp
apply (simp add: getSchedulerAction_def)
apply (rule monadic_rewrite_symb_exec_l)
apply (wp)
apply (simp add: isRunnable_def)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_symb_exec_l)
apply (wp, simp)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_refl)
apply wp+
apply (rule monadic_rewrite_refl)
apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def)
done
lemma setThreadState_schact_set:
"monadic_rewrite True False
(\<lambda>s. ksSchedulerAction s \<noteq> ResumeCurrentThread)
(setThreadState st t)
(threadSet (tcbState_update (\<lambda>_. st)) t)"
apply (simp add: setThreadState_def)
apply (rule monadic_rewrite_imp)
apply (subst bind_return[symmetric], rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_isRunnable)+)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCurThread)+)
apply (rule monadic_rewrite_symb_exec_l, wp)
apply (simp add: getSchedulerAction_def)
apply (rename_tac sa)
apply (rule_tac P="sa \<noteq> ResumeCurrentThread" in monadic_rewrite_gen_asm)
apply (simp add: when_def)
apply (rule monadic_rewrite_refl)
apply (wp | simp)+
done
lemma tcb_at_cte_at_offset:
"\<lbrakk> tcb_at' t s; 2 ^ cte_level_bits * off \<in> dom tcb_cte_cases \<rbrakk>
\<Longrightarrow> cte_at' (t + 2 ^ cte_level_bits * off) s"
apply (clarsimp simp: obj_at'_def projectKOs objBits_simps)
apply (erule(2) cte_wp_at_tcbI')
apply fastforce
apply simp
done
lemma attemptSwitchTo_rewrite2:
"monadic_rewrite True True
(\<lambda>s. obj_at' (\<lambda>tcb. tcbPriority tcb = curPrio) ct s
\<and> obj_at' (\<lambda>tcb. tcbPriority tcb = destPrio \<and> tcbDomain tcb = destDom) t s
\<and> curPrio \<le> destPrio \<and> ct = ksCurThread s
\<and> ksSchedulerAction s = ResumeCurrentThread
\<and> curDom = ksCurDomain s \<and> destDom = curDom)
(attemptSwitchTo t) (setSchedulerAction (SwitchToThread t))"
apply (rule monadic_rewrite_imp,
rule attemptSwitchTo_rewrite[where thread=ct and curPrio=curPrio and destPrio=destPrio
and curDom=curDom and destDom=destDom])
apply clarsimp
done
lemma emptySlot_cte_wp_at_cteCap:
"\<lbrace>\<lambda>s. (p = p' \<longrightarrow> P NullCap) \<and> (p \<noteq> p' \<longrightarrow> cte_wp_at' (\<lambda>cte. P (cteCap cte)) p s)\<rbrace>
emptySlot p' irqopt
\<lbrace>\<lambda>rv s. cte_wp_at' (\<lambda>cte. P (cteCap cte)) p s\<rbrace>"
apply (simp add: tree_cte_cteCap_eq[unfolded o_def])
apply (wp emptySlot_cteCaps_of)
apply (clarsimp split: if_split)
done
lemma real_cte_at_tcbs_of_neq:
"[| real_cte_at' p s; tcbs_of s t = Some tcb;
2 ^ cte_level_bits * offs : dom tcb_cte_cases |]
==> p ~= t + 2 ^ cte_level_bits * offs"
apply (clarsimp simp: tcbs_of_def obj_at'_def projectKOs objBits_simps
split: if_split_asm)
apply (erule notE[rotated], erule(2) tcb_ctes_clear[rotated])
apply fastforce
done
lemma setEndpoint_getCTE_pivot[unfolded K_bind_def]:
"do setEndpoint p val; v <- getCTE slot; f v od
= do v <- getCTE slot; setEndpoint p val; f v od"
apply (simp add: getCTE_assert_opt setEndpoint_def
setObject_modify_assert
fun_eq_iff bind_assoc)
apply (simp add: exec_gets assert_def assert_opt_def
exec_modify update_ep_map_tos
split: if_split option.split)
done
lemma setEndpoint_setCTE_pivot[unfolded K_bind_def]:
"do setEndpoint p val; setCTE slot cte; f od =
do setCTE slot cte; setEndpoint p val; f od"
apply (rule monadic_rewrite_to_eq)
apply simp
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans,
rule_tac f="ep_at' p" in monadic_rewrite_add_gets)
apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets,
rule monadic_rewrite_bind_tail)
apply (rename_tac epat)
apply (rule monadic_rewrite_transverse)
apply (rule monadic_rewrite_bind_tail)
apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc)
apply (rule_tac rv=epat in monadic_rewrite_gets_known)
apply (wp setCTE_typ_at'[where T="koType TYPE(endpoint)", unfolded typ_at_to_obj_at']
| simp)+
apply (simp add: setCTE_assert_modify bind_assoc)
apply (rule monadic_rewrite_trans, rule monadic_rewrite_add_gets,
rule monadic_rewrite_bind_tail)+
apply (rename_tac cteat tcbat)
apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_trans)
apply (rule_tac rv=cteat in monadic_rewrite_gets_known)
apply (rule_tac rv=tcbat in monadic_rewrite_gets_known)
apply (wp setEndpoint_typ_at'[where T="koType TYPE(tcb)", unfolded typ_at_to_obj_at']
setEndpoint_typ_at'[where T="koType TYPE(cte)", unfolded typ_at_to_obj_at']
| simp)+
apply (rule_tac P="\<lambda>s. epat = ep_at' p s \<and> cteat = real_cte_at' slot s
\<and> tcbat = (tcb_at' (slot && ~~ mask 9) and (%y. slot && mask 9 : dom tcb_cte_cases)) s"
in monadic_rewrite_refl3)
apply (simp add: setEndpoint_def setObject_modify_assert bind_assoc
exec_gets assert_def exec_modify
split: if_split)
apply (auto split: if_split simp: obj_at'_def projectKOs
intro!: arg_cong[where f=f] ext kernel_state.fold_congs)[1]
apply wp+
apply simp
done
lemma setEndpoint_updateMDB_pivot[unfolded K_bind_def]:
"do setEndpoint p val; updateMDB slot mf; f od =
do updateMDB slot mf; setEndpoint p val; f od"
by (clarsimp simp: updateMDB_def bind_assoc
setEndpoint_getCTE_pivot
setEndpoint_setCTE_pivot
split: if_split)
lemma setEndpoint_updateCap_pivot[unfolded K_bind_def]:
"do setEndpoint p val; updateCap slot mf; f od =
do updateCap slot mf; setEndpoint p val; f od"
by (clarsimp simp: updateCap_def bind_assoc
setEndpoint_getCTE_pivot
setEndpoint_setCTE_pivot)
lemma modify_setEndpoint_pivot[unfolded K_bind_def]:
"\<lbrakk> \<And>ksf s. ksPSpace_update ksf (sf s) = sf (ksPSpace_update ksf s) \<rbrakk>
\<Longrightarrow> (do modify sf; setEndpoint p val; f od) =
(do setEndpoint p val; modify sf; f od)"
apply (subgoal_tac "\<forall>s. ep_at' p (sf s) = ep_at' p s")
apply (simp add: setEndpoint_def setObject_modify_assert
bind_assoc fun_eq_iff
exec_gets exec_modify assert_def
split: if_split)
apply atomize
apply clarsimp
apply (drule_tac x="\<lambda>_. ksPSpace s" in spec)
apply (drule_tac x="s" in spec)
apply (drule_tac f="ksPSpace" in arg_cong)
apply simp
apply (metis obj_at'_pspaceI)
done
lemma setEndpoint_clearUntypedFreeIndex_pivot[unfolded K_bind_def]:
"do setEndpoint p val; v <- clearUntypedFreeIndex slot; f od
= do v <- clearUntypedFreeIndex slot; setEndpoint p val; f od"
by (simp add: clearUntypedFreeIndex_def bind_assoc
getSlotCap_def
setEndpoint_getCTE_pivot
updateTrackedFreeIndex_def
modify_setEndpoint_pivot
split: capability.split
| rule bind_cong[OF refl] allI impI
bind_apply_cong[OF refl])+
lemma emptySlot_setEndpoint_pivot[unfolded K_bind_def]:
"(do emptySlot slot None; setEndpoint p val; f od) =
(do setEndpoint p val; emptySlot slot None; f od)"
apply (rule ext)
apply (simp add: emptySlot_def bind_assoc
setEndpoint_getCTE_pivot
setEndpoint_updateCap_pivot
setEndpoint_updateMDB_pivot
case_Null_If
setEndpoint_clearUntypedFreeIndex_pivot
split: if_split
| rule bind_apply_cong[OF refl])+
done
lemma set_getCTE[unfolded K_bind_def]:
"do setCTE p cte; v <- getCTE p; f v od
= do setCTE p cte; f cte od"
apply simp
apply (rule monadic_rewrite_to_eq)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_bind_tail)
apply (simp add: getCTE_assert_opt bind_assoc)
apply (rule monadic_rewrite_trans,
rule_tac rv="Some cte" in monadic_rewrite_gets_known)
apply (simp add: assert_opt_def)
apply (rule monadic_rewrite_refl)
apply wp
apply simp
done
lemma set_setCTE[unfolded K_bind_def]:
"do setCTE p val; setCTE p val' od = setCTE p val'"
apply simp
apply (rule monadic_rewrite_to_eq)
apply (rule monadic_rewrite_imp)
apply (rule monadic_rewrite_trans,
rule_tac f="real_cte_at' p" in monadic_rewrite_add_gets)
apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets,
rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_trans,
rule_tac f="tcb_at' (p && ~~ mask 9) and K (p && mask 9 \<in> dom tcb_cte_cases)"
in monadic_rewrite_add_gets)
apply (rule monadic_rewrite_transverse, rule monadic_rewrite_add_gets,
rule monadic_rewrite_bind_tail)
apply (rename_tac cteat tcbat)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_tail)
apply (simp add: setCTE_assert_modify)
apply (rule monadic_rewrite_trans, rule_tac rv=cteat in monadic_rewrite_gets_known)
apply (rule_tac rv=tcbat in monadic_rewrite_gets_known)
apply (wp setCTE_typ_at'[where T="koType TYPE(tcb)", unfolded typ_at_to_obj_at']
setCTE_typ_at'[where T="koType TYPE(cte)", unfolded typ_at_to_obj_at']
| simp)+
apply (simp add: setCTE_assert_modify bind_assoc)
apply (rule monadic_rewrite_bind_tail)+
apply (rule_tac P="c = cteat \<and> t = tcbat
\<and> (tcbat \<longrightarrow>
(\<exists> getF setF. tcb_cte_cases (p && mask 9) = Some (getF, setF)
\<and> (\<forall> f g tcb. setF f (setF g tcb) = setF (f o g) tcb)))"
in monadic_rewrite_gen_asm)
apply (rule monadic_rewrite_refl2)
apply (simp add: exec_modify split: if_split)
apply (auto simp: simpler_modify_def projectKO_opt_tcb
intro!: kernel_state.fold_congs ext
split: if_split)[1]
apply wp+
apply (clarsimp intro!: all_tcbI)
apply (auto simp: tcb_cte_cases_def split: if_split_asm)
done
lemma setCTE_updateCapMDB:
"p \<noteq> 0 \<Longrightarrow>
setCTE p cte = do updateCap p (cteCap cte); updateMDB p (const (cteMDBNode cte)) od"
apply (simp add: updateCap_def updateMDB_def bind_assoc set_getCTE
cte_overwrite set_setCTE)
apply (simp add: getCTE_assert_opt setCTE_assert_modify bind_assoc)
apply (rule ext, simp add: exec_gets assert_opt_def exec_modify
split: if_split option.split)
apply (cut_tac P=\<top> and p=p and s=x in cte_wp_at_ctes_of)
apply (cases cte)
apply (simp add: cte_wp_at_obj_cases')
apply (auto simp: mask_out_sub_mask)
done
lemma clearUntypedFreeIndex_simple_rewrite:
"monadic_rewrite True False
(cte_wp_at' (Not o isUntypedCap o cteCap) slot)
(clearUntypedFreeIndex slot) (return ())"
apply (simp add: clearUntypedFreeIndex_def getSlotCap_def)
apply (rule monadic_rewrite_name_pre)
apply (clarsimp simp: cte_wp_at_ctes_of)
apply (rule monadic_rewrite_imp)
apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, wp+)
apply (simp split: capability.split,
strengthen monadic_rewrite_refl, simp)
apply clarsimp
apply (wp getCTE_wp')
apply (clarsimp simp: cte_wp_at_ctes_of)
done
lemma emptySlot_replymaster_rewrite[OF refl]:
"mdbn = cteMDBNode cte \<Longrightarrow>
monadic_rewrite True False
((\<lambda>_. mdbNext mdbn = 0 \<and> mdbPrev mdbn \<noteq> 0)
and ((\<lambda>_. cteCap cte \<noteq> NullCap)
and (cte_wp_at' (op = cte) slot
and cte_wp_at' (\<lambda>cte. isReplyCap (cteCap cte)) slot
and cte_wp_at' (\<lambda>cte. isReplyCap (cteCap cte) \<and> capReplyMaster (cteCap cte))
(mdbPrev mdbn)
and (\<lambda>s. reply_masters_rvk_fb (ctes_of s))
and (\<lambda>s. no_0 (ctes_of s)))))
(emptySlot slot None)
(do updateMDB (mdbPrev mdbn) (mdbNext_update (K 0) o mdbFirstBadged_update (K True)
o mdbRevocable_update (K True));
setCTE slot makeObject
od)"
apply (rule monadic_rewrite_gen_asm)+
apply (rule monadic_rewrite_imp)
apply (rule_tac P="slot \<noteq> 0" in monadic_rewrite_gen_asm)
apply (clarsimp simp: emptySlot_def setCTE_updateCapMDB)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_head)
apply (rule clearUntypedFreeIndex_simple_rewrite)
apply simp
apply (rule_tac rv=cte in monadic_rewrite_symb_exec_l_known, (wp empty_fail_getCTE)+)
apply (simp add: updateMDB_def Let_def bind_assoc makeObject_cte case_Null_If)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_bind)
apply (rule_tac P="mdbFirstBadged (cteMDBNode ctea) \<and> mdbRevocable (cteMDBNode ctea)"
in monadic_rewrite_gen_asm)
apply (rule monadic_rewrite_refl2)
apply (case_tac ctea, rename_tac mdbnode, case_tac mdbnode)
apply simp
apply (rule monadic_rewrite_refl)
apply (wp getCTE_wp')+
apply (clarsimp simp: cte_wp_at_ctes_of reply_masters_rvk_fb_def)
apply (fastforce simp: isCap_simps)
done
(* FIXME: Move *)
lemma asUser_obj_at_not_queued[wp]:
"\<lbrace>obj_at' (\<lambda>tcb. \<not> tcbQueued tcb) p\<rbrace> asUser t m \<lbrace>\<lambda>rv. obj_at' (\<lambda>tcb. \<not> tcbQueued tcb) p\<rbrace>"
apply (simp add: asUser_def split_def)
apply (wp hoare_drop_imps | simp)+
done
lemma all_prio_not_inQ_not_tcbQueued: "\<lbrakk> obj_at' (\<lambda>a. (\<forall>d p. \<not> inQ d p a)) t s \<rbrakk> \<Longrightarrow> obj_at' (\<lambda>a. \<not> tcbQueued a) t s"
apply (clarsimp simp: obj_at'_def inQ_def)
done
crunch ntfn_obj_at[wp]: setThreadState, emptySlot, asUser "obj_at' (P::(Structures_H.notification \<Rightarrow> bool)) ntfnptr"
(ignore: getObject setObject wp: obj_at_setObject2 crunch_wps
simp: crunch_simps updateObject_default_def in_monad)
lemma st_tcb_at_is_Reply_imp_not_tcbQueued: "\<And>s t.\<lbrakk> invs' s; st_tcb_at' isReply t s\<rbrakk> \<Longrightarrow> obj_at' (\<lambda>a. \<not> tcbQueued a) t s"
apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def st_tcb_at'_def valid_queues_no_bitmap_def)
apply (rule all_prio_not_inQ_not_tcbQueued)
apply (clarsimp simp: obj_at'_def)
apply (erule_tac x="d" in allE)
apply (erule_tac x="p" in allE)
apply (erule conjE)
apply (erule_tac x="t" in ballE)
apply (clarsimp simp: obj_at'_def runnable'_def isReply_def)
apply (case_tac "tcbState obj")
apply ((clarsimp simp: inQ_def)+)[8]
apply (clarsimp simp: valid_queues'_def obj_at'_def)
done
lemma valid_objs_ntfn_at_tcbBoundNotification:
"ko_at' tcb t s \<Longrightarrow> valid_objs' s \<Longrightarrow> tcbBoundNotification tcb \<noteq> None
\<Longrightarrow> ntfn_at' (the (tcbBoundNotification tcb)) s"
apply (drule(1) ko_at_valid_objs', simp add: projectKOs)
apply (simp add: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def)
apply clarsimp
done
crunch bound_tcb_at'_Q[wp]: setThreadState "\<lambda>s. Q (bound_tcb_at' P t s)"
(wp: threadSet_pred_tcb_no_state crunch_wps simp: unless_def)
lemmas emptySlot_pred_tcb_at'_Q[wp] = lift_neg_pred_tcb_at'[OF emptySlot_typ_at' emptySlot_pred_tcb_at']
lemma emptySlot_tcb_at'[wp]:
"\<lbrace>\<lambda>s. Q (tcb_at' t s)\<rbrace> emptySlot a b \<lbrace>\<lambda>_ s. Q (tcb_at' t s)\<rbrace>"
by (simp add: tcb_at_typ_at', wp)
lemmas cnode_caps_gsCNodes_lift
= hoare_lift_Pf2[where P="\<lambda>gs s. cnode_caps_gsCNodes (f s) gs" and f=gsCNodes for f]
hoare_lift_Pf2[where P="\<lambda>gs s. Q s \<longrightarrow> cnode_caps_gsCNodes (f s) gs" and f=gsCNodes for f Q]
lemma monadic_rewrite_option_cases:
"\<lbrakk> v = None \<Longrightarrow> monadic_rewrite F E Q a b; \<And>x. v = Some x \<Longrightarrow> monadic_rewrite F E (R x) a b \<rbrakk>
\<Longrightarrow> monadic_rewrite F E (\<lambda>s. (v = None \<longrightarrow> Q s) \<and> (\<forall>x. v = Some x \<longrightarrow> R x s)) a b"
by (cases v, simp_all)
lemma resolveAddressBitsFn_eq_name_slot:
"monadic_rewrite F E (\<lambda>s. (isCNodeCap cap \<longrightarrow> cte_wp_at' (\<lambda>cte. cteCap cte = cap) (slot s) s)
\<and> valid_objs' s \<and> cnode_caps_gsCNodes' s)
(resolveAddressBits cap capptr bits)
(gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))"
apply (rule monadic_rewrite_imp, rule resolveAddressBitsFn_eq)
apply auto
done
crunch bound_tcb_at'_Q[wp]: asUser "\<lambda>s. Q (bound_tcb_at' P t s)"
(simp: crunch_simps wp: threadSet_pred_tcb_no_state crunch_wps)
lemma asUser_tcb_at'_Q[wp]:
"\<lbrace>\<lambda>s. Q (tcb_at' t s)\<rbrace> asUser a b \<lbrace>\<lambda>_ s. Q (tcb_at' t s)\<rbrace>"
by (simp add: tcb_at_typ_at', wp)
lemma active_ntfn_check_wp:
"\<lbrace>\<lambda>s. Q (\<exists>ntfnptr. bound_tcb_at' (op = (Some ntfnptr)) thread s
\<and> \<not> obj_at' (Not o isActive) ntfnptr s) s \<rbrace> do bound_ntfn \<leftarrow> getBoundNotification thread;
case bound_ntfn of None \<Rightarrow> return False
| Some ntfnptr \<Rightarrow> liftM EndpointDecls_H.isActive $ getNotification ntfnptr
od \<lbrace>Q\<rbrace>"
apply (rule hoare_pre)
apply (wp getNotification_wp gbn_wp' | wpc)+
apply (auto simp: pred_tcb_at'_def obj_at'_def projectKOs)
done
lemma tcbSchedEnqueue_tcbIPCBuffer:
"\<lbrace>obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t\<rbrace>
tcbSchedEnqueue t'
\<lbrace>\<lambda>_. obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t\<rbrace>"
apply (simp add: tcbSchedEnqueue_def unless_when)
apply (wp threadSet_obj_at' hoare_drop_imps threadGet_wp
|simp split: if_split)+
done
crunch obj_at'_tcbIPCBuffer[wp]: rescheduleRequired "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps tcbSchedEnqueue_tcbIPCBuffer simp: rescheduleRequired_def)
crunch obj_at'_tcbIPCBuffer[wp]: setThreadState "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps threadSet_obj_at'_really_strongest)
crunch obj_at'_tcbIPCBuffer[wp]: getCTE "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: setCTE_obj_at'_queued crunch_wps threadSet_obj_at'_really_strongest)
crunch obj_at'_tcbIPCBuffer[wp]: emptySlot "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps)
crunch obj_at'_tcbIPCBuffer[wp]: transferCapsToSlots "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps transferCapsToSlots_pres1 simp: crunch_simps ignore: constOnFailure)
crunch obj_at'_tcbIPCBuffer[wp]: asUser "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps)
crunch obj_at'_tcbIPCBuffer[wp]: handleFault "obj_at' (\<lambda>tcb. P (tcbIPCBuffer tcb)) t"
(wp: crunch_wps constOnFailure_wp tcbSchedEnqueue_tcbIPCBuffer threadSet_obj_at'_really_strongest
simp: zipWithM_x_mapM ignore: sequenceE mapME getObject setObject)
lemma fastpath_callKernel_SysReplyRecv_corres:
"monadic_rewrite True False
(invs' and ct_in_state' (op = Running) and (\<lambda>s. ksSchedulerAction s = ResumeCurrentThread)
and cnode_caps_gsCNodes')
(callKernel (SyscallEvent SysReplyRecv)) (fastpaths SysReplyRecv)"
including no_pre
apply (rule monadic_rewrite_introduce_alternative)
apply ( simp add: callKernel_def)
apply (rule monadic_rewrite_imp)
apply (simp add: handleEvent_def handleReply_def
handleRecv_def liftE_bindE_handle liftE_handle
bind_assoc getMessageInfo_def liftE_bind)
apply (simp add: catch_liftE_bindE unlessE_throw_catch_If
unifyFailure_catch_If catch_liftE
getMessageInfo_def alternative_bind
fastpaths_def getThreadCallerSlot_def
locateSlot_conv capability_case_Null_ReplyCap
getThreadCSpaceRoot_def
cong: if_cong)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac thread msgInfo)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac cptr)
apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet])
apply (rename_tac tcbFault)
apply (rule monadic_rewrite_alternative_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: lookupCap_def liftME_def lookupCapAndSlot_def
lookupSlotForThread_def bindE_assoc
split_def getThreadCSpaceRoot_def
locateSlot_conv liftE_bindE bindE_bind_linearise
capFaultOnFailure_def rethrowFailure_injection
injection_handler_catch bind_bindE_assoc
getThreadCallerSlot_def bind_assoc
getSlotCap_def
case_bool_If o_def
isRight_def[where x="Inr v" for v]
isRight_def[where x="Inl v" for v]
cong: if_cong)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac "cTableCTE")
apply (rule monadic_rewrite_transverse,
rule monadic_rewrite_bind_head,
rule resolveAddressBitsFn_eq)
apply (rule monadic_rewrite_symb_exec_r, (wp | simp)+)
apply (rename_tac "rab_ret")
apply (rule_tac P="isRight rab_ret" in monadic_rewrite_cases[rotated])
apply (case_tac rab_ret, simp_all add: isRight_def)[1]
apply (rule monadic_rewrite_alternative_l)
apply clarsimp
apply (simp add: isRight_case_sum liftE_bind
isRight_def[where x="Inr v" for v])
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac ep_cap)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r[OF _ _ _ active_ntfn_check_wp, unfolded bind_assoc fun_app_def])
apply (rule hoare_pre, (wp | wpc | simp)+)[1]
apply (unfold getBoundNotification_def)[1]
apply (wp threadGet_wp)
apply (rename_tac ep)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac ep)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_rdonly_bind_l, wp)
apply (rule monadic_rewrite_bind_tail)
apply (rename_tac replyCTE)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: bind_assoc)
apply (rule monadic_rewrite_rdonly_bind_l, wp assert_inv)
apply (rule monadic_rewrite_assert)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac callerFault)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (simp add: getThreadVSpaceRoot_def locateSlot_conv)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rename_tac vTableCTE)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r, wp+)+
apply (rename_tac curPrio callerPrio)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r, wp+)
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_symb_exec_r[OF curDomain_inv],
simp only: curDomain_def, rule non_fail_gets)
apply (rename_tac "curDom")
apply (rule monadic_rewrite_symb_exec_r[OF threadGet_inv no_fail_threadGet])
apply (rename_tac "callerDom")
apply (rule monadic_rewrite_if_rhs[rotated])
apply (rule monadic_rewrite_alternative_l)
apply (rule monadic_rewrite_trans,
rule monadic_rewrite_pick_alternative_1)
apply (rule_tac P="\<lambda>v. obj_at' (%tcb. tcbIPCBuffer tcb = v) (capTCBPtr (cteCap replyCTE))"
in monadic_rewrite_exists_v)
apply (rename_tac ipcBuffer)
apply (simp add: ARM_H.switchToThread_def bind_assoc)
apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse])
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp mapM_x_wp' getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv getCTE_obj_at'_tcbIPCBuffer mapM_x_wp' getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind_head)
apply (rule monadic_rewrite_trans)
apply (rule doReplyTransfer_simple)
apply simp
apply (((rule monadic_rewrite_weaken2,
(rule_tac msgInfo=msgInfo in doIPCTransfer_simple_rewrite
| rule_tac curPrio=curPrio and destPrio=callerPrio
and curDom=curDom and destDom=callerDom
and ct=thread in attemptSwitchTo_rewrite2))
| rule cteDeleteOne_replycap_rewrite
| rule monadic_rewrite_bind monadic_rewrite_refl
| wp assert_inv mapM_x_wp'
setThreadState_obj_at_unchanged
asUser_obj_at_unchanged
hoare_strengthen_post[OF _ obj_at_conj'[simplified atomize_conjL], rotated]
| simp add: setMessageInfo_def setThreadState_runnable_simp)+)[1]
apply (simp add: setMessageInfo_def)
apply (rule monadic_rewrite_bind_tail)
apply (rule_tac rv=thread in monadic_rewrite_symb_exec_l_known,
(wp empty_fail_getCurThread)+)
apply (rule_tac rv=cptr in monadic_rewrite_symb_exec_l_known,
(wp empty_fail_asUser empty_fail_getRegister)+)
apply (rule monadic_rewrite_bind)
apply (rule monadic_rewrite_catch[OF _ monadic_rewrite_refl True_E_E])
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rename_tac cTableCTE2,
rule_tac P="cteCap cTableCTE2 = cteCap cTableCTE"
in monadic_rewrite_gen_asm)
apply simp
apply (rule monadic_rewrite_trans,
rule monadic_rewrite_bindE[OF _ monadic_rewrite_refl])
apply (rule_tac slot="\<lambda>s. ksCurThread s + 2 ^ cte_level_bits * tcbCTableSlot"
in resolveAddressBitsFn_eq_name_slot)
apply wp
apply (rule monadic_rewrite_trans)
apply (rule_tac rv=rab_ret
in monadic_rewrite_gets_known[where m="NonDetMonad.lift f"
for f, folded bindE_def])
apply (simp add: NonDetMonad.lift_def isRight_case_sum)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rename_tac ep_cap2)
apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm)
apply (simp add: cap_case_EndpointCap_NotificationCap)
apply (rule monadic_rewrite_liftE)
apply (rule monadic_rewrite_trans)
apply (rule monadic_rewrite_bind)
apply (rule deleteCallerCap_nullcap_rewrite)
apply (rule_tac ep=ep in receiveIPC_simple_rewrite)
apply (wp, simp)
apply (rule monadic_rewrite_bind_head)
apply (rule setThreadState_schact_set)
apply (wp getCTE_known_cap)+
apply (rule monadic_rewrite_bind)
apply (rule_tac t="capTCBPtr (cteCap replyCTE)"
and t'=thread
in schedule_known_rewrite)
apply (rule monadic_rewrite_weaken[where E=True and F=True], simp)
apply (rule activateThread_simple_rewrite)
apply wp
apply (simp add: ct_in_state'_def)
apply ((wp setCurThread_ct_in_state[folded st_tcb_at'_def]
Arch_switchToThread_pred_tcb')+)[2]
apply (simp add: catch_liftE)
apply (wp setEndpoint_obj_at_tcb' threadSet_pred_tcb_at_state[unfolded if_bool_eq_conj])
apply (simp cong: rev_conj_cong)
apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s])
apply (unfold setSchedulerAction_def)[3]
apply ((wp setThreadState_oa_queued user_getreg_rv setThreadState_no_sch_change
setThreadState_obj_at_unchanged
sts_st_tcb_at'_cases sts_bound_tcb_at'
emptySlot_obj_at'_not_queued
emptySlot_cte_wp_at_cteCap
emptySlot_cnode_caps
user_getreg_inv asUser_typ_ats
asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp'
static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift
static_imp_wp cnode_caps_gsCNodes_lift
hoare_vcg_ex_lift
| simp del: comp_apply
| clarsimp simp: obj_at'_weakenE[OF _ TrueI])+)
apply (wp getCTE_wp' gts_imp')+
apply (simp add: ARM_H.switchToThread_def bind_assoc)
apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse])
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp mapM_x_wp' handleFault_obj_at'_tcbIPCBuffer getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (rule_tac v=ipcBuffer in monadic_rewrite_threadGet_tcbIPCBuffer | rule monadic_rewrite_bind monadic_rewrite_refl)+
apply (wp setCTE_obj_at'_tcbIPCBuffer assert_inv getCTE_obj_at'_tcbIPCBuffer mapM_x_wp' getObject_inv | wpc | simp add:
| wp_once hoare_drop_imps )+
apply (simp add: bind_assoc catch_liftE
receiveIPC_def Let_def liftM_def
setThreadState_runnable_simp)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getThreadState)+)
apply (rule monadic_rewrite_assert)
apply (rule_tac P="inj (case_bool thread (capTCBPtr (cteCap replyCTE)))"
in monadic_rewrite_gen_asm)
apply (rule monadic_rewrite_trans[OF _ monadic_rewrite_transverse])
apply (rule monadic_rewrite_weaken[where F=False and E=True], simp)
apply (rule isolate_thread_actions_rewrite_bind
fastpath_isolate_rewrites fastpath_isolatables
bool.simps setRegister_simple
zipWithM_setRegister_simple
thread_actions_isolatable_bind
thread_actions_isolatableD[OF setCTE_isolatable]
setCTE_isolatable
setVMRoot_isolatable[THEN thread_actions_isolatableD] setVMRoot_isolatable
doMachineOp_isolatable[THEN thread_actions_isolatableD] doMachineOp_isolatable
| assumption
| wp assert_inv)+
apply (simp only: )
apply (rule_tac P="(\<lambda>s. ksSchedulerAction s = ResumeCurrentThread)
and tcb_at' thread
and (cte_wp_at' (\<lambda>cte. isReplyCap (cteCap cte))
(thread + 2 ^ cte_level_bits * tcbCallerSlot)
and (\<lambda>s. \<forall>x. tcb_at' (case_bool thread (capTCBPtr (cteCap replyCTE)) x) s)
and valid_mdb')"
and F=True and E=False in monadic_rewrite_weaken)
apply (rule monadic_rewrite_isolate_final2)
apply simp
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rename_tac callerCTE)
apply (rule monadic_rewrite_assert)
apply (rule monadic_rewrite_symb_exec_l, (wp empty_fail_getCTE)+)
apply (rule monadic_rewrite_assert)
apply (simp add: emptySlot_setEndpoint_pivot)
apply (rule monadic_rewrite_bind)
apply (rule monadic_rewrite_refl2)
apply (clarsimp simp: isSendEP_def split: Structures_H.endpoint.split)
apply (rule_tac Q="\<lambda>rv. (\<lambda>_. rv = callerCTE) and Q'" for Q'
in monadic_rewrite_symb_exec_r, wp+)
apply (rule monadic_rewrite_gen_asm, simp)
apply (rule monadic_rewrite_trans, rule monadic_rewrite_bind_head,
rule_tac cte=callerCTE in emptySlot_replymaster_rewrite)
apply (simp add: bind_assoc o_def)
apply (rule monadic_rewrite_refl)
apply (simp add: cte_wp_at_ctes_of pred_conj_def)
apply (wp getCTE_ctes_wp)+
apply (clarsimp simp: fun_eq_iff if_flip
cong: if_cong)
apply (drule obj_at_ko_at', clarsimp)
apply (frule get_tcb_state_regs_ko_at')
apply (clarsimp simp: zip_map2 zip_same_conv_map foldl_map
foldl_fun_upd
foldr_copy_register_tsrs
isRight_case_sum
cong: if_cong)
apply (simp add: upto_enum_def fromEnum_def
enum_register toEnum_def
msgRegisters_unfold
cong: if_cong)
apply (clarsimp split: if_split)
apply (rule ext)
apply (simp add: badgeRegister_def msgInfoRegister_def
ARM.msgInfoRegister_def
ARM.badgeRegister_def
cong: if_cong
split: if_split)
apply simp
apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps
map_to_ctes_partial_overwrite)
apply (simp add: valid_mdb'_def valid_mdb_ctes_def)
apply simp
apply (simp cong: if_cong bool.case_cong
| rule getCTE_wp' gts_wp' threadGet_wp
getEndpoint_wp gets_wp
user_getreg_wp user_getregs_wp
gets_the_wp gct_wp getNotification_wp
return_wp liftM_wp gbn_wp'
| (simp only: curDomain_def, wp)[1])+
apply (clarsimp simp: ct_in_state'_def pred_tcb_at')
apply (subst tcb_at_cte_at_offset,
erule obj_at'_weakenE[OF _ TrueI],
simp add: tcb_cte_cases_def cte_level_bits_def tcbSlots)
apply (clarsimp simp: valid_objs_ntfn_at_tcbBoundNotification
invs_valid_objs' if_apply_def2)
apply (rule conjI[rotated])
apply (fastforce elim: cte_wp_at_weakenE')
apply (clarsimp simp: isRight_def)
apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+)
apply (frule resolveAddressBitsFn_real_cte_at',
(clarsimp | erule cte_wp_at_weakenE')+)
apply (frule real_cte_at', clarsimp)
apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp,
clarsimp simp: isCap_simps, simp add: valid_cap_simps')
apply (clarsimp simp: maskCapRights_def isCap_simps)
apply (frule_tac p="p' + 2 ^ cte_level_bits * tcbCallerSlot" for p'
in cte_wp_at_valid_objs_valid_cap', clarsimp+)
apply (clarsimp simp: valid_cap_simps')
apply (subst tcb_at_cte_at_offset,
assumption, simp add: tcb_cte_cases_def cte_level_bits_def tcbSlots)
apply (clarsimp simp: inj_case_bool cte_wp_at_ctes_of
length_msgRegisters
n_msgRegisters_def order_less_imp_le
tcb_at_invs' invs_mdb'
split: bool.split)
apply (clarsimp simp: obj_at_tcbs_of tcbSlots
cte_level_bits_def)
apply (frule(1) st_tcb_at_is_Reply_imp_not_tcbQueued)
apply (auto simp: obj_at_tcbs_of tcbSlots
cte_level_bits_def)
done
lemmas fastpath_reply_recv_ccorres_callKernel
= monadic_rewrite_ccorres_assemble[OF fastpath_reply_recv_ccorres fastpath_callKernel_SysReplyRecv_corres]
lemma cnode_caps_gsCNodes_from_sr:
"valid_objs s \<Longrightarrow> (s, s') \<in> state_relation
\<Longrightarrow> cnode_caps_gsCNodes' s'"
apply (clarsimp simp: cnode_caps_gsCNodes_def only_cnode_caps_def
o_def ran_map_option)
apply (safe, simp_all)
apply (clarsimp elim!: ranE)
apply (frule(1) pspace_relation_cte_wp_atI[rotated])
apply clarsimp
apply (clarsimp simp: is_cap_simps)
apply (frule(1) cte_wp_at_valid_objs_valid_cap)
apply (clarsimp simp: valid_cap_simps cap_table_at_gsCNodes_eq)
done
end
end
|
{"author": "SEL4PROJ", "repo": "jormungand", "sha": "bad97f9817b4034cd705cd295a1f86af880a7631", "save_path": "github-repos/isabelle/SEL4PROJ-jormungand", "path": "github-repos/isabelle/SEL4PROJ-jormungand/jormungand-bad97f9817b4034cd705cd295a1f86af880a7631/case_study/l4v/proof/crefine/Fastpath_C.thy"}
|
Require Import FunctionalExtensionality.
Require Import ProofIrrelevance.
Require Import Program.
Require Import Category.
Require Import Isomorphism.
Require Import Coq.
Require Import Co.
Set Universe Polymorphism.
Class Terminal `{Category} := {
terminal : object;
receivesAll o : o → terminal;
receivesAllUnique {o f} : receivesAll o = f
}.
Instance co `(Category) : Category := {|
object := object;
morphism a b := morphism b a;
id a := id;
composition a b c f g := g ∘ f
|}.
Proof.
- intros.
apply Category.rightId.
- intros.
apply Category.leftId.
- intros.
symmetry.
apply Category.assoc.
Defined.
Definition Initial {C} := @Terminal (co C).
Definition coqInitialEmptySet : @Initial Coq.
refine {|
terminal := Empty_set:@object (co Coq);
receivesAll o := Empty_set_rect (const o)
|}.
Proof.
compute.
intros.
extensionality a.
destruct a.
Defined.
Instance coqTerminalUnit : @Terminal Coq.
refine {|
terminal := unit:@object Coq;
receivesAll o := fun _ => tt
|}.
Proof.
compute.
intros.
extensionality a.
destruct (f a).
reflexivity.
Defined.
Instance coqTerminalTrue : @Terminal Coq.
refine {|
terminal := True:@object Coq;
receivesAll o := fun _ => I
|}.
Proof.
compute.
intros.
apply proof_irrelevance.
Defined.
Instance BoolCat : Category := {|
object := bool:Type;
morphism a b := match a,b with true,false => Empty_set | _,_ => unit end:Type;
id := $( intros []; exact tt )$;
composition := $( intros [] [] [] [] []; exact tt)$
|}.
Proof.
all: repeat intros []; reflexivity.
Defined.
Instance boolTerminal : @Terminal BoolCat := {|
terminal := true:@object BoolCat;
receivesAll := $( intros []; exact tt )$
|}.
Proof.
repeat intros []; reflexivity.
Defined.
Definition boolInitial : @Initial BoolCat.
refine {|
terminal := false:@object (co BoolCat);
receivesAll := $( intros []; exact tt )$
|}.
Proof.
repeat intros []; reflexivity.
Defined.
Instance allTerminalsIso `{C:Category} (t t':Terminal) : @terminal _ t ≈ @terminal _ t'.
refine {|
f := receivesAll terminal;
g := receivesAll terminal
|}.
- match goal with |- ?H = _ => generalize H; intros h end.
specialize (@receivesAllUnique _ t _ id).
specialize (@receivesAllUnique _ t _ h).
congruence.
- match goal with |- ?H = _ => generalize H; intros h end.
specialize (@receivesAllUnique _ t' _ id).
specialize (@receivesAllUnique _ t' _ h).
congruence.
Defined.
(* dualityPrinciple, should be formalized *)
Instance allInitialsIso `{C:Category} (t t':Initial) : @terminal _ t ≈ @terminal _ t'.
apply allTerminalsIso.
Defined.
Lemma allTerminalsIsoUnique `{C:Category} (t t':@Terminal C)
(i:@terminal _ t ≈ @terminal _ t')
(j:@terminal _ t ≈ @terminal _ t') : i = j.
destruct i as [fi gi].
destruct j as [fj gj].
f_equal.
- intros; subst.
f_equal; apply proof_irrelevance.
- specialize (@receivesAllUnique _ _ _ fj).
specialize (@receivesAllUnique _ _ _ fi).
congruence.
- specialize (@receivesAllUnique _ _ _ gj).
specialize (@receivesAllUnique _ _ _ gi).
congruence.
Qed.
|
{"author": "konne88", "repo": "category-theory", "sha": "883c4edd35ad47c82300315d1cd5c7f9238bede6", "save_path": "github-repos/coq/konne88-category-theory", "path": "github-repos/coq/konne88-category-theory/category-theory-883c4edd35ad47c82300315d1cd5c7f9238bede6/Construction/Terminal.v"}
|
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing
from time import sleep
import pickle
import utils
import search.csp as csp
import search.viz as viz
#LEFT = 0
#RIGHT = 1
#UP = 2
#DOWN = 3
#ANY = -1
LEFT = 'left'
RIGHT = 'right'
UP = 'up'
DOWN = 'down'
ANY = 'any'
class Problem(csp.CspProblem):
def __init__(self, state: np.ndarray, domain, constraints: dict):
self.state = state
self.constraints = constraints
self.domain = domain
self.rows, self.cols = state.shape
def iterate_variables(self):
for i in range(self.rows):
for j in range(self.cols):
yield i, j
def domain_values(self, var) -> list:
i, j = var
return self.domain[i, j]
def remove_from_domain(self, var, val):
#print('Removing: {} from {}'.format(val, var))
i, j = var
self.domain[i, j].remove(val)
def get_constraints(self):
pass
def iterate_neighbours(self, var) -> list:
i, j = var
if i > 0:
yield i-1, j
if i < self.rows - 1:
yield i+1, j
if j > 0:
yield i, j-1
if j < self.cols - 1:
yield i, j+1
def count_conflicts(self, var, val) -> int:
i, j = var
conflicts = 0
for v, direction in self.constraints[val]:
if v == ANY:
continue
if i > 0:
if direction == UP and self.state[i - 1, j] != v:
conflicts += 1
if i < self.state.shape[0] - 1:
if direction == DOWN and self.state[i + 1, j] != v:
conflicts += 1
if j > 0:
if direction == LEFT and self.state[i, j - 1] != v:
conflicts += 1
if j < self.state.shape[1] - 1:
if direction == RIGHT and self.state[i, j + 1] != v:
conflicts += 1
return conflicts
def values_conflicting(self, var1, val1, var2, val2) -> bool:
i1, j1 = var1
i2, j2 = var2
dx = j2 - j1
dy = i2 - i1
v_dir = 0
if dx == 1:
v_dir = RIGHT
if dx == -1:
v_dir = LEFT
if dy == 1:
v_dir = DOWN
if dy == -1:
v_dir = UP
# For every constraint for var1
for c_val, c_dir in self.constraints[val1]:
# Constrains are defined as VAL, DIRECTION
# So in order to satisfy constraint the val2 should be equal to constraint val
if v_dir == c_dir and (val2 == c_val or c_val == ANY):
# No conflict, at least one constraint satisfied
return False
return True
def get_value(self, var):
i, j = var
return self.state[i, j]
def set_value(self, var, val):
i, j = var
self.state[i, j] = val
def get_possible_dimensions(size: int) -> list:
dimensions = []
for i in range(1, size + 1):
if size % i == 0:
dimensions.append((i, size // i))
return dimensions
def get_possible_shapes(ingredients: int, max_slice: int) -> list:
valid_shapes = []
min_size = ingredients * 2
for s in range(min_size, max_slice + 1):
for d in get_possible_dimensions(s):
valid_shapes.append(d)
return valid_shapes
def create_state(rows, cols):
state = np.empty((rows, cols), dtype=object)
for i in range(rows):
for j in range(cols):
state[i, j] = []
return state
def analyze_shape(pizza: np.ndarray,
min_ingredients: int,
max_slice: int,
offset: int,
shape_w: int,
shape_h: int):
rows, cols = pizza.shape
entropy = np.zeros(pizza.shape, dtype=np.uint8)
domain = create_state(rows, cols)
for i in range(rows):
for j in range(cols):
r1 = i
r2 = i + shape_h
c1 = j
c2 = j + shape_w
if is_valid_slice(pizza, r1, r2, c1, c2, min_ingredients, max_slice):
entropy[r1:r2, c1:c2] += 1
# Mark all the cells inside slice that they could be potentially sliced by this slice
for i2 in range(r1, r2):
for j2 in range(c1, c2):
i_local = i2 - r1
j_local = j2 - c1
domain[i2, j2].append(offset + i_local * shape_w + j_local)
# Return all values
return entropy, domain, offset
def analyze(pizza: np.ndarray, min_ingredients: int, max_slice: int, pool_class=multiprocessing.Pool):
valid_shapes = get_possible_shapes(min_ingredients, max_slice)
for shape_idx, shape in enumerate(valid_shapes):
print('Shape {0}: {1}'.format(shape_idx, shape))
rows, cols = pizza.shape
domain = create_state(rows, cols)
entropy = np.zeros(pizza.shape, dtype=np.uint16)
pool = pool_class()
results = []
offset = 0
for shape_idx in range(len(valid_shapes)):
w, h = valid_shapes[shape_idx]
r = pool.apply_async(analyze_shape, kwds={
'pizza': pizza,
'min_ingredients': min_ingredients,
'max_slice': max_slice,
'offset': offset,
'shape_w': w,
'shape_h': h
})
offset += w * h
results.append(r)
pool.close()
pool.join()
for r in results:
local_entropy, local_state, local_shape_idx = r.get()
entropy += local_entropy
# State aggregation
for i in range(rows):
for j in range(cols):
assert local_entropy[i, j] == len(local_state[i, j])
domain[i, j] += local_state[i, j]
"""
for rect_idx in local_state[i, j]:
state[i, j].append((rect_idx, local_shape_idx))
"""
return entropy, domain
def get_index(i, j, cols, offset=0):
return i * cols + j + offset
def solve(pizza: np.ndarray, min_ingredients: int, max_slice: int):
rows, cols = pizza.shape
state = np.empty(pizza.shape, dtype=np.int16)
entropy, domain = analyze(pizza, min_ingredients, max_slice, ThreadPool)
shapes = get_possible_shapes(ingredients=min_ingredients, max_slice=max_slice)
constraints = []
""" Build constraints """
"""
offset = 0
for shape in shapes:
n = shape[0] * shape[1]
print(np.reshape(np.arange(n) + offset, shape))
offset += n
any_val = list(range(offset))
"""
offset = 0
for shape in shapes:
shape_rows, shape_cols = shape
for i in range(shape_rows):
for j in range(shape_cols):
left = get_index(i, j - 1, cols=shape_cols, offset=offset)
right = get_index(i, j, cols=shape_cols, offset=offset)
up = get_index(i - 1, j, cols=shape_cols, offset=offset)
down = get_index(i, j, cols=shape_cols, offset=offset)
if j > 0:
constraints.append((right, left, LEFT))
constraints.append((left, right, RIGHT))
if i > 0:
constraints.append((down, up, UP))
constraints.append((up, down, DOWN))
if i == 0:
constraints.append((down, ANY, UP))
if i == shape_rows - 1:
constraints.append((down, ANY, DOWN))
if j == 0:
constraints.append((right, ANY, LEFT))
if j == shape_cols - 1:
constraints.append((right, ANY, RIGHT))
offset += shape[0] * shape[1]
constraints_dict = defaultdict(list)
for src, tgt, direction in constraints:
constraints_dict[src].append((tgt, direction))
for k, v in constraints_dict.items():
print(k, v)
offset = 0
for shape in shapes:
n = shape[0] * shape[1]
print(np.reshape(np.arange(n) + offset, shape))
offset += n
# AC-3 arc consistency (domain reduction)
#csp.reduce_domain_ac3(domain, constraints_dict)
problem = Problem(state, domain, constraints_dict)
csp.arc_consistency3(problem)
# Random initial state
for i in range(rows):
for j in range(cols):
state[i, j] = np.random.choice(domain[i, j])
# Count initial conflicts
conflicts = np.zeros(pizza.shape, dtype=np.uint8)
for i in range(rows):
for j in range(cols):
conflicts[i, j] = count_conflicts(state[i, j], i, j, state, constraints_dict)
""" Visualization """
wnd = viz.Window(1300, 610)
pizza_drawer = viz.ImgPlotDrawer(viz.Rect(10, 10, 400, 600), 'Pizza')
state_drawer = viz.ImgPlotDrawer(viz.Rect(420, 10, 400, 600), 'State')
conflicts_drawer = viz.ImgPlotDrawer(viz.Rect(830, 10, 400, 600), 'Conflicts')
wnd.add_drawer(state_drawer)
wnd.add_drawer(pizza_drawer)
wnd.add_drawer(conflicts_drawer)
pizza_drawer.set_value(pizza)
conflicts_drawer.set_value(conflicts)
state_drawer.set_value(state)
wnd.draw()
while True:
csp.min_conflicts_step(problem)
for i, j in problem.iterate_variables():
conflicts[i, j] = problem.count_conflicts((i, j), problem.get_value((i, j)))
conflicts_drawer.set_value(conflicts)
state_drawer.set_value(state)
wnd.draw()
while np.sum(conflicts) > 0:
# Randomly chosen variable from the set of conflicted variables
indices = np.argwhere(conflicts > 0)
i, j = indices[np.random.choice(len(indices))]
prev_conf = conflicts[i, j]
"""
vals = domain[i, j]
confs = [count_conflicts(v, i, j, state, constraints_dict) for v in vals]
p = np.exp(1.0 - np.array(confs) / np.max(confs))
idx = np.random.choice(len(vals), p=p / p.sum())
if confs[idx] <= prev_conf:
state[i, j] = vals[idx]
conflicts[i, j] = confs[idx]
"""
# TODO: RANDOM OVER MIN CONFLICTS
#"""
min_conf_val = None
min_conf = 99999
for v in domain[i, j]:
conf = count_conflicts(v, i, j, state, constraints_dict)
if conf <= min_conf:
min_conf = conf
min_conf_val = v
if min_conf_val is None:
print('WHOA')
break
if min_conf <= prev_conf:
state[i, j] = min_conf_val
conflicts[i, j] = min_conf
#"""
#"""
for _i in range(rows):
for _j in range(cols):
conflicts[_i, _j] = count_conflicts(state[_i, _j], _i, _j, state, constraints_dict)
#"""
"""
if i > 0:
conflicts[i-1, j] = count_conflicts(state[i-1, j], i-1, j, state, constraints_dict)
if i < rows - 1:
conflicts[i+1, j] = count_conflicts(state[i+1, j], i+1, j, state, constraints_dict)
if j > 0:
conflicts[i, j-1] = count_conflicts(state[i, j-1], i, j-1, state, constraints_dict)
if j < cols - 1:
conflicts[i, j+1] = count_conflicts(state[i, j+1], i, j+1, state, constraints_dict)
"""
"""
if i > 0 and is_conflicting(state[i - 1, j], i-1, j, state, constraints_dict):
conflicting[i - 1, j] = 1
if i < rows - 1 and is_conflicting(state[i + 1, j], i+1, j, state, constraints_dict):
conflicting[i + 1, j] = 1
if j > 0 and is_conflicting(state[i, j - 1], i, j - 1, state, constraints_dict):
conflicting[i, j - 1] = 1
if j > cols - 1 and is_conflicting(state[i, j - 1], i, j + 1, state, constraints_dict):
conflicting[i, j + 1] = 1
"""
state_drawer.set_value(state)
conflicts_drawer.set_value(conflicts)
wnd.draw()
print('DONW')
print(state)
def is_conflicting(val, i, j, state, constraints):
for val, direction in constraints[val]:
if i > 0:
if direction == UP and state[i - 1, j] != val:
return True
if i < state.shape[0] - 1:
if direction == DOWN and state[i + 1, j] != val:
return True
if j > 0:
if direction == LEFT and state[i, j - 1] != val:
return True
if j < state.shape[1] - 1:
if direction == RIGHT and state[i, j + 1] != val:
return True
return False
def count_conflicts(val, i, j, state, constraints):
conflicts = 0
for v, direction in constraints[val]:
if i > 0:
if direction == UP and state[i - 1, j] != v:
conflicts += 1
if i < state.shape[0] - 1:
if direction == DOWN and state[i + 1, j] != v:
conflicts += 1
if j > 0:
if direction == LEFT and state[i, j - 1] != v:
conflicts += 1
if j < state.shape[1] - 1:
if direction == RIGHT and state[i, j + 1] != v:
conflicts += 1
return conflicts
|
{"hexsha": "d8949458bd93e20c4665d5eb27f0c488da5c9513", "size": 13278, "ext": "py", "lang": "Python", "max_stars_repo_path": "practice/attempt3.py", "max_stars_repo_name": "bshishov/HashCode2019", "max_stars_repo_head_hexsha": "026ab14fd22d269deec6d809d4426e78a9417677", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "practice/attempt3.py", "max_issues_repo_name": "bshishov/HashCode2019", "max_issues_repo_head_hexsha": "026ab14fd22d269deec6d809d4426e78a9417677", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "practice/attempt3.py", "max_forks_repo_name": "bshishov/HashCode2019", "max_forks_repo_head_hexsha": "026ab14fd22d269deec6d809d4426e78a9417677", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8790697674, "max_line_length": 102, "alphanum_fraction": 0.5388612743, "include": true, "reason": "import numpy", "num_tokens": 3412}
|
"""
require(package::Symbol; [fun_name]::String="", [explanation]::String="")
Helper method to check for optional packages and print an error message.
### Input
- `package` -- symbol (the package name)
- `fun_name` -- (optional; default: `""`) name of the function that requires
the package
- `explanation` -- (optional; default: `""`) additional explanation in the error
message
### Output
If the package is loaded, this function has no effect.
Otherwise it prints an error message.
### Algorithm
This function uses `@assert` and hence loses its ability to print an error
message if assertions are deactivated.
"""
function require(package::Symbol; fun_name::String="", explanation::String="")
check = isdefined(@__MODULE__, package)
@assert check "package '$package' not loaded" *
(fun_name == "" ? "" :
" (it is required for executing `$fun_name`" *
(explanation == "" ? "" : " " * explanation) * ")")
end
"""
require(packages::Vector{Symbol}; [fun_name]::String="",
[explanation]::String="")
Helper method to check for optional packages and print an error message.
### Input
- `packages` -- list of symbols (the package names)
- `fun_name` -- (optional; default: `""`) name of the function that requires
the package
- `explanation` -- (optional; default: `""`) additional explanation in the error
message
### Output
If at least one of the packages is loaded, this function has no effect.
Otherwise it prints an error message.
### Algorithm
This function uses `@assert` and hence loses its ability to print an error
message if assertions are deactivated.
"""
function require(packages::Vector{Symbol}; fun_name::String="",
explanation::String="")
check = any(isdefined(@__MODULE__, package) for package in packages)
@assert check "no package from '$packages' loaded" *
(fun_name == "" ? "" :
" (one of them is required for executing `$fun_name`" *
(explanation == "" ? "" : " " * explanation) * ")")
end
|
{"hexsha": "93d57018579e0979709ad83f6eb3dcb8ba0e2e92", "size": 2180, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Utils/require.jl", "max_stars_repo_name": "nablabits/LazySets.jl", "max_stars_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Utils/require.jl", "max_issues_repo_name": "nablabits/LazySets.jl", "max_issues_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Utils/require.jl", "max_forks_repo_name": "nablabits/LazySets.jl", "max_forks_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0625, "max_line_length": 80, "alphanum_fraction": 0.6183486239, "num_tokens": 483}
|
from pythonequipmentdrivers import Scpi_Instrument
import numpy as np
from typing import Union, Tuple
from pathlib import Path
class Lecroy_WR8xxx(Scpi_Instrument):
"""
Lecroy_WR8xxx(address)
address : str, address of the connected oscilloscope
object for accessing basic functionallity of the Lecroy_WR8xxx
Family of Oscilloscopes
Addtional information on the remote control capabilities of the scope can
be accessed at:
http://cdn.teledynelecroy.com/files/manuals/maui-remote-control-and-automation-manual.pdf
"""
valid_trigger_states = ['AUTO', 'NORM', 'SINGLE', 'STOP']
def __init__(self, address: str, **kwargs) -> None:
super().__init__(address, **kwargs)
self.instrument.clear()
self.set_comm_header('short')
def select_channel(self, channel: int, state: bool) -> None:
"""
select_channel(channel, state)
Selects the specified channel on the front panel display.
Args:
channel (int): Channel number to select
state (bool): Whether or not the respective channel is
selected/visable on the screen.
"""
cmd_str = f"C{int(channel)}:TRACE {'ON' if state else 'OFF'}"
self.instrument.write(cmd_str)
def set_channel_scale(self, channel: int, scale: float) -> None:
"""
set_channel_scale(channel, scale)
sets the scale of vertical divisons for the specified channel
Args:
channel (int): channel number to configure
scale (float): scale of the channel amplitude across one
vertical division on the display.
"""
self.instrument.write(f'C{int(channel)}:VDIV {float(scale)}')
def get_channel_scale(self, channel: int) -> float:
"""
get_channel_scale(channel)
Retrives the scale for vertical divisons for the specified channel
Args:
channel (int): channel number to query information on
Returns:
(float): vertical scale
"""
response = self.instrument.query(f'C{int(channel)}:VDIV?')
val = response.split()[1]
return float(val)
def set_channel_offset(self, channel: int, off: float, **kwargs) -> None:
"""
set_channel_offset(channel, off, **kwargs)
Sets the vertical offset for the display of the specified channel.
Args:
channel (int): Channel number to query
off (float): vertical/amplitude offset
Kwargs
use_divisions (bool, optional): Whether or not the offset is
treated as a number of vertical divisions instead of an
amplltude. Defaults to False.
"""
if kwargs.get('use_divisions', False):
off = float(off)*self.get_channel_scale(int(channel))
self.instrument.write(f"C{int(channel)}:OFFSET {float(off)}")
def get_channel_offset(self, channel: int) -> float:
"""
get_channel_offset(channel)
Retrives the vertical offset for the display of the specified channel.
Args:
channel (int): Channel number to query
Returns:
float: vertical/amplitude offset
"""
response = self.instrument.query(f"C{int(channel)}:OFFSET?")
val = response.split()[1]
return float(val)
def set_channel_coupling(self, channel: int, coupling: str) -> None:
"""
set_channel_coupling(channel, coupling)
Sets the coupling used on a the specified channel. For this
oscilloscope the following coupling options are supported:
"dc" == "dc_1meg", "dc_50", "ac" == "ac_1meg", and "gnd".
Args:
channel (int): channel number to adjust
coupling (str): coupling mode
"""
coupling_map = {'dc_1meg': 'D1M', "dc": 'D1M',
'dc_50': 'D50',
'ac_1meg': 'A1M', 'ac': 'A1M',
'gnd': 'gnd'}
coupling = str(coupling).lower()
if coupling not in coupling_map.keys():
raise ValueError(f"Invalid Coupling option: {coupling}. "
f"Suuport options are: {coupling_map.keys()}")
cmd_str = f"C{int(channel)}:COUPLING {coupling_map[coupling]}"
self.instrument.write(cmd_str)
def get_channel_coupling(self, channel: int) -> str:
"""
get_channel_coupling(channel)
Retirives the coupling used on a the specified channel. For this
oscilloscope the following coupling options are supported:
"dc", "dc_50", "ac", and "gnd".
Args:
channel (int): channel number to query
Returns:
str: coupling mode
"""
coupling_map = {'D1M': 'dc', 'D50': 'dc_50',
'A1M': 'ac', 'gnd': 'gnd'}
response = self.instrument.query(f"C{int(channel)}:COUPLING?")
return coupling_map[response.split()[-1]]
def set_horizontal_scale(self, scale: float) -> None:
"""
set_horizontal_scale(scale)
sets the scale of horizontal divisons (for all channels) to the
specified value in seconds.
Args:
scale (float): time scale across one horizontal division on the
display in seconds.
"""
self.instrument.write(f'TIME_DIV {float(scale)}')
def get_horizontal_scale(self) -> float:
"""
get_horizontal_scale()
Retrieves the horizontal scale used to accquire waveform data.
Returns:
float: horizontal scale in seconds per division.
"""
response = self.instrument.query('TIME_DIV?')
val = response.split()[1]
return float(val)
def set_measure_config(self, channel, meas_type, meas_idx,
source_type='channel'):
"""
AMPL, AREA, BASE, DLY, DUTY, FALL, FALL82, FREQ, MAX, MEAN, MIN, NULL,
OVSN, OVSP, PKPK, PER, PHASE, RISE, RISE28, RMS, SDEV, TOP, WID, WIDN,
AVG, CYCL, DDLY, DTRIG, DUR, FRST, FWHM, HAMPL, HBASE, HMEAN, HMEDI,
HRMS, HTOP, LAST, LOW, MAXP, MEDI, MODE, NCYCLE, PKS, PNTS, RANGE,
SIGMA, TOTP, XMAX, XMIN, XAPK, TOP
"""
source_mapping = {'channel': 'C', 'math': 'F', 'zoom': 'Z'}
src_code = source_mapping[source_type.lower()]
self.instrument.write('PACU {},{},{}{}'.format(meas_idx,
meas_type,
src_code,
channel))
return None
def get_measure_config(self, meas_idx):
response = self.instrument.query(f'PACU? {meas_idx}')
info = response.split()[-1]
resp_fields = ['index', 'type', 'source', 'status']
return {k: v for k, v in zip(resp_fields, info.split(','))}
def get_measure_data(self, *meas_idx: int) -> Union[float, tuple]:
"""
get_measure_data(*meas_idx)
Returns the current value of the requesed measurement(s) reference by
the provided index(s).
Args:
meas_idx (int): measurement index(s) for the measurement(s) to
query. Can be a signal index or an arbitrary sequence of
indices.
Returns:
float: Current value of the requested measurement. If no value as
been assigned to the measurement yet the returned value is nan.
"""
data = []
for idx in meas_idx:
q_str = f"VBS? 'return=app.Measure.P{int(idx)}.Out.Result.Value' "
response = self.instrument.query(q_str)
try:
data.append(float(response.split()[-1]))
except ValueError:
data.append(float('nan'))
if len(data) == 1:
return data[0]
return tuple(data)
def get_measure_statistics(self, meas_idx, stat_filter=''):
query_str = f'PAST? CUST,,P{meas_idx}'
response = self.instrument.query(query_str)
# strip out header info about measurement
data = response[response.index(',') + 1:].strip().split(',')
data = data[3:]
keys = map(str.lower, data[::2])
vals = data[1::2]
rename_map = {'avg': 'mean', 'high': 'max', 'low': 'min',
'last': 'last', 'sigma': 'stdev', 'sweeps': 'n'}
stats = {}
for k, v in zip(keys, vals):
if v != 'UNDEF':
value = float(v.split()[0]) # remove units
else:
value = None
stats[rename_map[k]] = value
if (stat_filter != ''):
if (stat_filter in stats.keys()):
return stats[stat_filter.lower()]
else:
raise ValueError(f'Invalid option stat_filter = {stat_filter},'
f' valid options are: {tuple(stats.keys())}')
return stats
def enable_measure_statistics(self, histogram=False):
if histogram:
self.instrument.write('PARM CUST,BOTH')
else:
self.instrument.write('PARM CUST,STAT')
return None
def disable_measure_statistics(self):
self.instrument.write('PARM CUST,OFF')
return None
def reset_measure_statistics(self):
"""
reset_measure_statistics()
resets the accumlated measurements used to calculate statistics
"""
self.instrument.write("VBS 'app.ClearSweeps' ")
return None
def clear_all_measure(self):
self.instrument.write('PACL')
return None
def trigger_run(self) -> None:
"""
trigger_run()
sets the state of the oscilloscopes acquision mode to acquire new
data.
"""
self.instrument.write("ARM")
self.instrument.write("TRMD NORM")
def trigger_single(self) -> None:
"""
trigger_single()
arms the oscilloscope to capture a single trigger event.
"""
self.instrument.write("ARM")
self.instrument.write("TRMD SINGLE")
def trigger_stop(self) -> None:
"""
trigger_stop()
sets the state of the oscilloscopes acquision mode to stop
acquiring new data. equivalent to set_trigger_acquire_state(0).
"""
self.instrument.write('STOP')
def trigger_force(self) -> None:
"""
trigger_force()
forces a trigger event to occur
"""
self.instrument.write("ARM")
self.instrument.write("FRTR")
def trigger_auto(self) -> None:
"""
trigger_auto()
Sets the state of the oscilloscopes acquision mode to acquire new
data automatically.
"""
self.instrument.write("ARM")
self.instrument.write("TRMD AUTO")
def get_trigger_mode(self) -> str:
"""
get_trigger_mode()
Gets the mode of the trigger used for data acquisition.
Returns:
str: trigger mode.
"""
response = self.instrument.query('TRMD?')
return response.split()[-1].lower()
def set_trigger_source(self, channel: int) -> None:
"""
set_trigger_source(channel)
Sets the trigger source to the indicated source channel
Args:
channel (int): channel number to configure
"""
response = self.instrument.query('TRSE?') # get current trigger config
# extract indecies that bound the current trigger source
i_start = response.index('SR,') + 3
i_end = response.index(',', i_start)
# replace source with new source, send to device
write_cmd = f'{response[:i_start]}C{int(channel)}{response[i_end:]}'
self.instrument.write(write_cmd)
def get_trigger_source(self) -> int:
"""
get_trigger_source()
Gets the channel number for the trigger source
Returns:
int: channel number used for the trigger source
"""
response = self.instrument.query('TRSE?') # get current trigger config
# extract indecies that bound the current trigger source
i_start = response.index('SR,') + 3
i_end = response.index(',', i_start)
channel = response[i_start:i_end].lstrip('C')
return int(channel)
def set_trigger_acquire_state(self, state):
"""
set_trigger_acquire_state(state)
state: (str) trigger state, valid arguements are listed in
self.valid_trigger_states
sets the state of the oscilloscopes trigger, whether its
currently acquiring new data or not and which method is used for
triggering additional acquision events.
"""
state = state.upper()
if state in self.valid_trigger_states:
self.instrument.write(f"TRMD {state}")
else:
raise ValueError("invalid option for arg 'state'")
return None
def get_trigger_acquire_state(self):
"""
get_trigger_acquire_state()
returns:
state: (str) acquire state, valid arguements are listed in
self.valid_trigger_states
returns the state of the oscilloscopes trigger, whether its
currently acquiring new data or not and which method is used for
triggering additional acquision events.
"""
response = self.instrument.query('TRMD?')
response = response.strip().split()[-1] # strip newline and CMD name
return response
def set_trigger_level(self, level: float, **kwargs) -> None:
"""
set_trigger_level(level, **kwargs)
Sets the vertical position of the trigger point in the units of the
triggering waveform
Args:
level (float): vertical position of the trigger, units depend on
the signal being triggered on.
Kwargs:
source (int): channel number to set the trigger level for. If not
provided the default behavior is to use whichever channel is
currently being used for triggering.
"""
source = kwargs.get('source', self.get_trigger_source())
self.instrument.write(f'C{int(source)}:TRLV {float(level)}\n')
def get_trigger_level(self, **kwargs) -> float:
"""
get_level(**kwargs)
Returns the vertical position of the trigger level in the units of the
triggering waveform
Kwargs:
source (int): channel number to set the trigger level for. If not
provided the default behavior is to use whichever channel is
currently being used for triggering.
Returns:
float: vertical position of the trigger, units depend on
the signal being triggered on
"""
source = kwargs.get('source', self.get_trigger_source())
read_cmd = f'C{int(source)}:TRLV'
response = self.instrument.query(f'{read_cmd}?')
return float(response.lstrip(read_cmd).split()[0])
def set_trigger_slope(self, slope: str, **kwargs) -> None:
"""
set_trigger_slope(slope, **kwargs)
Sets the edge polarity of the acquistion trigger. Valid options for
this oscilloscope are 'rise'/'pos' or 'fall'/'neg'.
Args:
slope (str): trigger edge polarity.
Kwargs:
source (int): channel number to set the trigger level for. If not
provided the default behavior is to use whichever channel is
currently being used for triggering.
"""
valid_options = {'POS': 'POS', 'RISE': 'POS',
'NEG': 'NEG', 'FALL': 'NEG'}
source = kwargs.get('source', self.get_trigger_source())
slope = str(slope).upper()
if slope not in valid_options.keys():
raise ValueError('Invalid option for Arg "slope".'
f' Valid option are {valid_options.keys()}')
self.instrument.write(f'C{int(source)}:TRSL {valid_options[slope]}')
def get_trigger_slope(self, **kwargs) -> str:
"""
get_trigger_slope(**kwargs)
Retrives the edge polarity of the acquistion trigger. Valid options for
this oscilloscope are 'rise'/'pos' or 'fall'/'neg'.
Kwargs:
source (int): channel number to set the trigger level for. If not
provided the default behavior is to use whichever channel is
currently being used for triggering.
Returns:
str: trigger edge polarity
"""
source = kwargs.get('source', self.get_trigger_source())
response = self.instrument.query(f'C{source}:TRSL?')
return response.split()[-1].lower()
def set_trigger_position(self, offset: float, **kwargs) -> None:
"""
set_trigger_position(offset)
Sets the horizontal position of the trigger point which represents the
t=0 point of the data capture.
Args:
offset (float): Horizontal position of the trigger. Represents a
time offset in seconds. If use_divisions=True then it can be
interpreted a number of horizontal divisions in the capture
window.
Kwargs:
use_divisions (bool, optional): Whether to interpret offset as a
time (False) or a number of horizontal division (True).
Defaults to False.
"""
if kwargs.get('use_divisions', False):
scale = self.get_horizontal_scale()
else:
scale = 1
self.instrument.write(f'TRDL {float(offset)*scale}')
def get_trigger_position(self) -> float:
"""
get_trigger_position()
Retrives the horizontal position of the trigger point which
representing the t=0 point of the data capture.
Returns:
float: Horizontal position of the trigger. Represents a time offset
in seconds
"""
response = self.instrument.query('TRDL?')
return float(response.split()[1].lower())
def get_image(self, image_title: Union[str, Path], **kwargs) -> None:
"""
get_image(image_title, **kwargs)
Saves the screen image to the location specified by "image_title".
"image_title" can contain path information to the desired save
directory. Specifying an extension is not nessary, a file extension
will be automatically be added based on the image format used (default:
PNG)
Args:
image_title (Union[str, Path]): Path name of image, file extension
will be added automatically
Kwargs:
image_format (str, optional): File extention to save the image
with, valid options are png, and 'tiff'. Defaults to png.
image_orientation (str, optional): Orientation of the resulting
image, valid options are 'portrait' and 'landscape'. Defaults
to landscape'.
bg_color (str, optional): Grid background color to use for saving
the image, valid options are 'black' and 'white'. Defaults to
black.
screen_area (str, optional): Area of the screen to capture as an
image, valid options are 'dsowindow', 'gridareaonly', and
'fullscreen'. Defaults to dsowindow.
"""
# valid image settings
valid_formats = ('BMP', 'JPEG', 'PNG', 'TIFF')
valid_orientations = ('PORTRAIT', 'LANDSCAPE')
valid_bg_colors = ('BLACK', 'WHITE')
valid_screen_areas = ('DSOWINDOW', 'GRIDAREAONLY', 'FULLSCREEN')
# handle kwargs
xfer_ext = str(kwargs.get('image_format', 'PNG')).upper()
image_orient = kwargs.get('image_orientation', 'LANDSCAPE').upper()
bg_color = kwargs.get('bg_color', 'BLACK').upper()
screen_area = kwargs.get('screen_area', 'DSOWINDOW').upper()
port = 'NET' # no support for others atm
if xfer_ext not in valid_formats:
raise ValueError('Invalid option for kwarg "image_format"')
elif image_orient not in valid_orientations:
raise ValueError('Invalid option for kwarg "image_orientation"')
elif bg_color not in valid_bg_colors:
raise ValueError('Invalid option for kwarg "bg_color"')
elif screen_area not in valid_screen_areas:
raise ValueError('Invalid option for kwarg "screen_area"')
# add file extension to final filepath
write_ext = xfer_ext.lower() if xfer_ext != 'JPEG' else 'jpg'
if isinstance(image_title, Path):
f_name = f'{image_title.name}.{write_ext}'
file_path = image_title.parent.joinpath(f_name)
elif isinstance(image_title, str):
file_path = f"{image_title}.{write_ext}"
else:
raise ValueError('image_title must be a str or path-like object')
# initiate transfer
template = (r'HARDCOPY_SETUP DEV, {}, FORMAT, {}, '
r'BCKG, {}, AREA, {}, PORT, {}')
write_cmd = template.format(xfer_ext, image_orient, bg_color,
screen_area, port)
self.instrument.write(write_cmd)
self.instrument.write('SCREEN_DUMP')
# read back raw image data
screen_data = self.instrument.read_raw()
# save to file
with open(file_path, 'wb+') as file:
file.write(screen_data)
def get_waveform_description(self, channel):
response = self.instrument.query(f'C{channel}:INSP? "WAVEDESC"')
description = {}
for item in response.splitlines()[2:-1]:
idx = item.index(':')
key = item[:idx].strip().lower()
value = item[idx+1:].strip().lower()
try:
value = float(value)
if value.is_integer():
value = int(value)
except ValueError: # wasn't numeric
pass
description[key] = value
return description
def get_channel_data(self, *channels: int,
**kwargs) -> Union[Tuple[np.ndarray], np.ndarray]:
"""
get_channel_data(*channels, return_time=True, dtype=np.float32)
Retrieves waveform data from the oscilloscope on the specified
channel(s). A sparse representation of each waveform and be returned by
setting the sparsing factor, "sparsing", to a values > 1.
Args:
*channels: (int, Iterable[int]) or sequence of ints, channel
number(s) of the waveform(s) to be transferred.
Kwargs:
sparsing (int, optional): sparsing factor, every n-th point of the
waveform will be returned.
return_time (bool, optional): Whether or not to return the time
array with the rest of the waveform data. Defaults to True.
dtype (type, optional): data type to be used for waveform data.
Defaults to float32.
Returns:
Union[tuple, numpy.array]: waveform data. if len(channels) > 1 or
or if return_time is true this is a tuple of numpy arrays. If
time information is returned it will always be the first value,
any additional waveforms will be returned in the same order
they were passed. In the case of len(channels) == 1 and
return_time is False a single numpy array is returned
"""
# formatting info
sparsing = int(kwargs.get('sparsing', 1))
dtype = kwargs.get('dtype', np.float32)
# set up scope for data transfer
# format: (sparsing, num_points, first_point, seg_num)
self.instrument.write(f'WAVEFORM_SETUP SP,{sparsing},NP,0,FP,0,SN,0')
# for now only sparsing is supported (defaults to no sparsing)
waves = []
for channel in channels:
# get waveform metadata
desc = self.get_waveform_description(channel)
y_offset = desc['vertical_offset']
y_scale = desc['vertical_gain']
# get raw data, strip header
self.instrument.write(f'C{channel}:WF? DAT1')
raw_data = self.instrument.read_raw()[22:-1]
data = np.frombuffer(raw_data, np.byte, count=len(raw_data))
# decode into measured value using waveform metadata
wave = data*y_scale - y_offset
waves.append(wave.astype(dtype))
if kwargs.get('return_time', True):
t_div, t_off = desc['horiz_interval'], desc['horiz_offset']
# all waveforms assumed to have same duration (just use last)
t = np.arange(len(wave), dtype=dtype)*t_div*sparsing + t_off
return (t, *waves)
else:
if len(waves) == 1:
return waves[0]
return tuple(waves)
def set_channel_label(self, channel: int, label: str) -> None:
"""
set_channel_label(channel, label)
updates the text label on a channel specified by "channel" with the
value given in "label".
Args:
channel (int): channel number to update label of.
label (str): text label to assign to the specified
"""
q_str = f"""vbs 'app.acquisition.C{channel}.LabelsText = "{label}" '"""
self.instrument.write(q_str)
def get_channel_label(self, channel: int) -> str:
"""
get_channel_label(channel)
Queries the text label of the channel specified by "channel".
Args:
channel (int): channel number to query label of.
Returns:
(str): text label to assigned to the specified channel
"""
q_str = f"""vbs? 'return = app.acquisition.C{channel}.LabelsText'"""
response = self.instrument.query(q_str)
return ' '.join(response.strip().split()[1:])
def set_channel_display(self, channel, mode):
# mode = "true" or "false"
q_str = f"""vbs 'app.acquisition.C{channel}.View = {mode} '"""
self.instrument.write(q_str)
return None
def set_persistence_state(self, state):
if state:
self.instrument.write('PERSIST ON')
else:
self.instrument.write('PERSIST OFF')
return None
def get_persistence_state(self):
response = self.instrument.query('PERSIST?')
response = response.split()[1]
if response == 'ON':
return True
return False
def set_persistence_time(self, duration: Union[float, str]) -> None:
"""
set_persistence_time(duration)
sets the persistence of the waveform buffers to include all captures
within a time window specified by "duration"
Args:
duration (Union[float, str]): The exposure time for the
oscilloscope capture display in seconds. Valid values are
positive numbers or "inf" to set the maximum exposure time
"""
valid_durs = (0.5, 1, 2, 5, 1, 20, 'inf')
if isinstance(duration, str):
duration = duration.lower()
if duration in valid_durs:
self.instrument.write(f'PESU {duration},ALL')
else:
raise ValueError('Invalid duration, valid times (s): ' +
', '.join(map(str, valid_durs)))
def get_persistence_time(self) -> Union[float, str]:
"""
get_persistence_time()
Retrives the persistence time set for the waveform buffers.
Returns:
(Union[float, str]): persistence time
"""
response = self.instrument.query('PESU?')
dur = response.split()[1].split(',')[0]
if response.isnumeric():
return float(dur)
return 'inf'
def set_comm_header(self, header: str) -> None:
"""
set_comm_header(header)
Sets the header type used in the response of query commands. Valid
options are 'long', 'short', and 'off'. An example of each is noted
below.
Query : C1:OFFSET? # returns the vertical offset used by channel 1
response with 'long': 'C1:OFFSET -50 V\n'
response with 'short': 'C1:OFST -50 V\n'
response with 'off': '-50\n'
This class was written assuming that the short or long formats
are in use. Therefore by default it is set to short. Changes to this
format may result in issues with other commands.
Args:
header (str): query header format name. valid values are 'long',
'short', and 'off'
"""
header = str(header).upper()
if header not in ('OFF', 'SHORT', 'LONG'):
raise ValueError('Invalid option for arg "header"')
self.instrument.write(f'CHDR {header}')
def get_comm_header(self) -> str:
"""
get_comm_header()
Rreturns the header type used in the response of query commands.
Response is either 'long', 'short', and 'off'. An example of each is
noted below.
Query : C1:OFFSET? # returns the vertical offset used by channel 1
response with 'long': 'C1:OFFSET -50 V\n'
response with 'short': 'C1:OFST -50 V\n'
response with 'off': '-50\n'
Returns:
header (str): query header format name. valid values are 'long',
'short', and 'off'
"""
response = self.instrument.query('CHDR?')
if ' ' in response:
header = response.split()[-1]
else:
header = response
header = header.strip().lower()
return header
if __name__ == '__main__':
pass
|
{"hexsha": "689ce5191c667ba849a3126c97144e9a0ed98e83", "size": 30083, "ext": "py", "lang": "Python", "max_stars_repo_path": "pythonequipmentdrivers/oscilloscope/Lecroy_WR8xxx.py", "max_stars_repo_name": "admleman/PythonEquipmentDrivers", "max_stars_repo_head_hexsha": "1e1fbf96ae372757ad90339af5863ab64daef2a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-01T14:23:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T12:26:07.000Z", "max_issues_repo_path": "pythonequipmentdrivers/oscilloscope/Lecroy_WR8xxx.py", "max_issues_repo_name": "admleman/PythonEquipmentDrivers", "max_issues_repo_head_hexsha": "1e1fbf96ae372757ad90339af5863ab64daef2a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-08-17T12:59:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T02:19:31.000Z", "max_forks_repo_path": "pythonequipmentdrivers/oscilloscope/Lecroy_WR8xxx.py", "max_forks_repo_name": "TedKus/PythonEquipmentDrivers", "max_forks_repo_head_hexsha": "d758a6fb0f7d505d46b24fcd55170e90d2a06cd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-30T17:25:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-27T13:52:44.000Z", "avg_line_length": 34.0305429864, "max_line_length": 93, "alphanum_fraction": 0.5849483097, "include": true, "reason": "import numpy", "num_tokens": 6524}
|
import numpy as np
import collections
from .penalized_regression import PenalizedRegression as PLR
from . import elbo as elbo_py
from ..models.normal_means_ash_scaled import NormalMeansASHScaled
RES_FIELDS = ['theta', 'coef', 'prior', 'residual_var', 'intercept', 'elbo_path', 'outer_elbo_path', 'obj_path', 'niter']
class ResInfo(collections.namedtuple('_ResInfo', RES_FIELDS)):
__slots__ = ()
def ws_one_step(X, y, b, winit, s2init, sk, dj):
n, p = X.shape
### calculate posterior
r = y - np.dot(X, b)
btilde = b + np.dot(X.T, r) / dj
nmash = NormalMeansASHScaled(btilde, np.sqrt(s2init), winit, sk, d = dj, debug = False)
phijk, mujk, varjk = nmash.posterior()
### Update w
wnew = np.sum(phijk, axis = 0) / p
### Update s2
bbar = np.sum(phijk * mujk, axis = 1)
a1 = np.sum(dj * bbar * btilde)
varobj = np.dot(r, r) - np.dot(np.square(b), dj) + a1
s2new = (varobj + p * (1 - wnew[0]) * s2init) / (n + p * (1 - wnew[0]))
### Update ELBO
elbo = elbo_py.scalemix(X, y, sk, bbar, wnew, s2new,
dj = dj, phijk = phijk, mujk = mujk, varjk = varjk, eps = 1e-8)
return bbar, wnew, s2new, elbo
def ebfit(X, y, sk,
binit = None, winit = None, s2init = None,
maxiter = 400, qb_maxiter = 50,
calculate_elbo = True,
epstol = 1e-8,
unshrink_method = 'heuristic',
is_prior_scaled = True,
display_progress = False,
debug = False,
plr_debug = False
):
n, p = X.shape
k = sk.shape[0]
intercept = np.mean(y)
y = y - intercept
dj = np.sum(np.square(X), axis = 0)
niter = 0
wk = winit
s2 = s2init
bbar = binit
elbo = np.inf
theta = np.zeros(p)
elbo_path = list()
obj_path = list()
outer_elbo_path = list()
plr = PLR(method = 'L-BFGS-B', optimize_w = False, optimize_s = False,
debug = debug,
display_progress = display_progress,
calculate_elbo = calculate_elbo,
maxiter = qb_maxiter,
unshrink_method = unshrink_method,
prior_optim_method = 'softmax',
call_from_em = True)
for itr in range(maxiter):
'''
New coefficients
'''
is_step_one = True if itr == 0 else False
bold = binit if is_step_one else theta
plr.fit(X, y, sk, binit = bold, winit = wk, s2init = s2, inv_binit = theta, is_binit_coef = is_step_one)
theta = plr.theta
if calculate_elbo:
elbo_path += plr.elbo_path
obj_path += plr.obj_path
'''
Empirical Bayes update for wk and s2, also advances coef one step
but we drop that advance
'''
bbar, wk, s2, elbo = ws_one_step(X, y, plr.coef, plr.prior, plr.residual_var, sk, dj)
outer_elbo_path.append(elbo)
'''
Termination criteria
'''
if (itr > 0) and (elboold - elbo < epstol): break
elboold = elbo.copy()
print (f"mr.ash.pen (EM) terminated at iteration {itr + 1}.")
res = ResInfo(theta = theta,
coef = bbar,
prior = wk,
residual_var = s2,
intercept = intercept,
elbo_path = elbo_path,
outer_elbo_path = outer_elbo_path,
obj_path = obj_path,
niter = len(obj_path))
return res
def ebfit_old(X, y, sk, wk, binit = None, s2init = 1,
maxiter = 1000, qb_maxiter = 100,
calculate_elbo = True,
epstol = 1e-8):
n, p = X.shape
k = sk.shape[0]
intercept = np.mean(y)
y = y - intercept
dj = np.sum(np.square(X), axis = 0)
niter = 0
w = wk
s2 = s2init
b = binit
elbo_path = list()
obj_path = list()
elbo = np.inf
outer_elbo_path = list()
for it in range(maxiter):
### Remember old parameters
bold = b.copy() if b is not None else b
wold = w.copy()
s2old = s2
elboold = elbo
### Update b
plr = PLR(method = 'L-BFGS-B', optimize_w = False, optimize_s = False, is_prior_scaled = True,
debug = False, display_progress = False, calculate_elbo = calculate_elbo, maxiter = qb_maxiter,
call_from_em = True, unshrink_method = 'heuristic', prior_optim_method = 'mixsqp')
plr.fit(X, y, sk, binit = bold, winit = wold, s2init = s2old)
b = plr.coef
theta = plr.theta
r = y - np.dot(X, b)
elbo_path += plr.elbo_path
obj_path += plr.obj_path
### calculate ELBO before updating w and s2
btilde = b + np.dot(X.T, r) / dj
nmash = NormalMeansASHScaled(btilde, np.sqrt(s2), w, sk, d = dj, debug = False)
phijk, mujk, varjk = nmash.posterior()
#elbo = elbo_py.scalemix(X, y, sk, b, w, s2,
# dj = dj, phijk = phijk, mujk = mujk, varjk = varjk, eps = 1e-8)
#outer_elbo_path.append(elbo)
### Update w
w = np.sum(phijk, axis = 0) / p
### Update s2
bbar = np.sum(phijk * mujk, axis = 1)
a1 = np.sum(dj * bbar * btilde)
varobj = np.dot(r, r) - np.dot(np.square(b), dj) + a1
s2 = (varobj + p * (1 - w[0]) * s2old) / (n + p * (1 - w[0]))
### Update ELBO / new b
b = bbar.copy()
elbo = elbo_py.scalemix(X, y, sk, b, w, s2,
dj = dj, phijk = phijk, mujk = mujk, varjk = varjk, eps = 1e-8)
outer_elbo_path.append(elbo)
### Convergence
### No elbo in history before one iteration so cannot compare
if (it > 0) and (elboold - elbo < epstol):
break
print (f"mr.ash.pen (EM) terminated at iteration {it + 1}.")
res = ResInfo(theta = theta,
coef = b,
prior = w,
residual_var = s2,
intercept = intercept,
elbo_path = elbo_path,
outer_elbo_path = outer_elbo_path,
obj_path = obj_path,
niter = len(obj_path))
return res
|
{"hexsha": "b2c088fdab7b05823959ad5d81a8b1ff896bfc4a", "size": 6280, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mrashpen/inference/ebfit.py", "max_stars_repo_name": "banskt/mr-ash-pen", "max_stars_repo_head_hexsha": "a9e574f66ce64265bff22cf0661d23a5706e4515", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mrashpen/inference/ebfit.py", "max_issues_repo_name": "banskt/mr-ash-pen", "max_issues_repo_head_hexsha": "a9e574f66ce64265bff22cf0661d23a5706e4515", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mrashpen/inference/ebfit.py", "max_forks_repo_name": "banskt/mr-ash-pen", "max_forks_repo_head_hexsha": "a9e574f66ce64265bff22cf0661d23a5706e4515", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3711340206, "max_line_length": 121, "alphanum_fraction": 0.528343949, "include": true, "reason": "import numpy", "num_tokens": 1905}
|
"""
Class and function definitions for word-based modifications
"""
import json
import torch
import numpy as np
from tqdm import tqdm
class WordBasedModifications():
def __init__(self, data_args):
self.data_args = data_args
# Function for modifying string json to integer json
# https://stackoverflow.com/questions/1450957/pythons-json-module-converts-int-dictionary-keys-to-strings
def jsonKV2int(x):
if isinstance(x, dict):
return {int(k):(int(v) if isinstance(v, str) else v) for k,v in x.items()}
return x
# Load the the vocabulary file
if self.data_args.permute_vocabulary:
with open(self.data_args.vocab_permutation_file, 'r') as fp:
self.vocab_mapping = json.load(fp, object_hook=jsonKV2int)
def modify_inputs_permute(self, inputs):
# Information about inputs:
# inputs['input_ids'].device is cpu
# inputs['input_ids'] is torch.Tensor
# TODO: Optimize this code. Currently using for loops
# Check if all the inputs need to be modified
if self.data_args.vocab_modification == 'random':
# With a 50% probability, just return the original inputs
if np.random.uniform() < 0.5:
return inputs
# for i in range(inputs['input_ids'].shape[0]):
# for j in range(inputs['input_ids'].shape[1]):
# inputs['input_ids'][i,j] = self.vocab_mapping[inputs['input_ids'][i,j].item()]
# # if inputs['labels'][i,j] >= 0:
# # inputs['labels'][i,j] = self.vocab_mapping[inputs['labels'][i,j].item()]
for i in range(len(inputs['input_ids'])):
inputs['input_ids'][i] = self.vocab_mapping[inputs['input_ids'][i]]
return inputs
def modify_inputs_permute_all(self, train_dataset):
"""
Modify all the inputs in the dataset
"""
length_of_dataset = len(train_dataset)
# Print statement
print("Word-based transformation: Permuting the vocabulary")
# Loop over all the sentences
for i in tqdm(range(len(train_dataset))):
modified_inputs = self.modify_inputs_permute(train_dataset[i])
train_dataset[i]['input_ids'] = modified_inputs['input_ids']
# train_dataset[i]['labels'] = modified_inputs['labels']
return train_dataset
|
{"hexsha": "2c2f05400ce28fbd8a5ea58be8fe612e04ae65e1", "size": 2477, "ext": "py", "lang": "Python", "max_stars_repo_path": "archive/synthetic_language_modifications_utils.py", "max_stars_repo_name": "princeton-nlp/MultilingualAnalysis", "max_stars_repo_head_hexsha": "b0d61c93c0c020a698a06264897dde14c9db471c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-11-03T05:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T20:28:06.000Z", "max_issues_repo_path": "archive/synthetic_language_modifications_utils.py", "max_issues_repo_name": "princeton-nlp/MultilingualAnalysis", "max_issues_repo_head_hexsha": "b0d61c93c0c020a698a06264897dde14c9db471c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "archive/synthetic_language_modifications_utils.py", "max_forks_repo_name": "princeton-nlp/MultilingualAnalysis", "max_forks_repo_head_hexsha": "b0d61c93c0c020a698a06264897dde14c9db471c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-24T15:07:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T15:07:35.000Z", "avg_line_length": 37.5303030303, "max_line_length": 113, "alphanum_fraction": 0.6108195398, "include": true, "reason": "import numpy", "num_tokens": 546}
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python3
"""
Model for SEMI-CLDC task
"""
# ************************************************************
# Imported Libraries
# ************************************************************
import math
import numpy as np
import sympy
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from nn_model.xlingva import XlingVA
from nn_model.inferer import Inferer
from nn_model.mlp.cldc_classifier import CLDCClassifier
from utils.logpdfs import multi_diag_normal, cal_kl_gau1, cal_kl_gau2, cal_kl_gau1_fb, \
cal_kl_gau2_fb
import pdb
const = np.float128((sympy.log(2 * sympy.pi)).evalf(64))
# fb
l_z1_fb = 1.0
l_z2_fb = 1.0
u_z1_fb = 0.1
u_z2_fb = 0.1
class SEMICLDCModel(nn.Module):
def __init__(self, params, data_list, model_dict=None):
super(SEMICLDCModel, self).__init__()
# label size
self.label_size = params.cldc_label_size
# batch size for U / label size
self.bs_u = params.semicldc_U_bs // self.label_size
# alpha for cldc classifier
self.semicldc_classifier_alpha = params.semicldc_classifier_alpha
# get y prior
self.yprior = self.get_yprior(params, data_list)
# xling vae
self.xlingva = XlingVA(params, data_list, model_dict=model_dict)
self.init_semicldc_cond = getattr(self, 'init_semicldc_cond_{}'.format(params.semicldc_cond_type))
# initialize the setting of how to combine x & y and z & y
self.init_semicldc_cond(params)
# cldc MLP
self.cldc_classifier = CLDCClassifier(params, params.cldc_classifier_config)
# functions
self.forward = getattr(self, 'forward_{}'.format(params.cldc_train_mode))
self.get_z1 = getattr(self, 'get_z1_{}'.format(params.cldc_train_mode))
# x & y
self.get_z1y = getattr(self, 'get_z1y_{}'.format(params.semicldc_cond_type))
# z & y
self.get_z2y = getattr(self, 'get_z2y_{}'.format(params.semicldc_cond_type))
# calculate kl of z2
self.kl_z2 = getattr(self, 'kl_z2_{}'.format(params.semicldc_cond_type))
self.step = 0
self.anneal_warm_up = params.semicldc_anneal_warm_up
# warm up stage
self.warm_up = False
self.use_cuda = params.cuda
if self.use_cuda:
self.cuda()
def init_model(self, model_dict):
if model_dict is None:
return
else:
# 3. load the new state dict
# parameter names need to be exactly the same
self.load_state_dict(model_dict)
def train_classifier(self, lang, batch_in, batch_lens, batch_lb, training=True):
# x -> hid_x -> mu1, logva1 -> z1
mu1, logvar1, z1, x, loss_dis, loss_enc = self.get_z1(lang, batch_in, batch_lens)
# cldc loss for training
cldc_loss, pred_p, pred = self.cldc_classifier(z1, y=batch_lb, training=training)
if training:
L_dict = defaultdict(float)
if not hasattr(self, 'adv_training') or self.adv_training is False:
cldc_loss.mean().backward()
L_dict['L_dis_loss'] = loss_dis
L_dict['L_enc_loss'] = loss_enc
elif self.adv_training is True:
(cldc_loss.mean() +
loss_dis.mean() +
loss_enc.mean()).backward()
L_dict['L_dis_loss'] = loss_dis.mean().item()
L_dict['L_enc_loss'] = loss_enc.mean().item()
L_dict['L_cldc_loss'] = cldc_loss.mean().item()
return L_dict, pred
else:
return cldc_loss, pred_p, pred
def get_yprior(self, params, data_list):
if params.semicldc_yprior_type == 'uniform':
# prior scores of y
yprior_score = torch.ones(self.label_size)
# uniform distribution
m = nn.LogSoftmax(dim=-1)
yprior = m(yprior_score)
elif params.semicldc_yprior_type == 'train_prop':
# same distribution as lablled training data
train_prop = data_list[params.lang_dict[params.cldc_langs[0]]].train_prop
yprior = torch.log(torch.tensor(train_prop + 1e-32, dtype=torch.float, requires_grad=False))
if params.cuda:
yprior = yprior.cuda()
return yprior
def init_semicldc_cond_concat(self, params):
# concat directly z1 and one hot y
self.z1y_z2 = Inferer(params, in_dim=params.z_dim + self.label_size)
self.z2y_z1 = Inferer(params, in_dim=params.z_dim + self.label_size)
def init_semicldc_cond_transconcat(self, params):
# trans one hot y to dense y, then concat z1 and y
self.yohtoy = nn.Linear(self.label_size, params.z_dim)
self.z1y_z2 = Inferer(params, in_dim=params.z_dim + params.z_dim)
self.z2y_z1 = Inferer(params, in_dim=params.z_dim + params.z_dim)
def init_semicldc_cond_transadd(self, params):
# trans one_hot_y to dense_y, then add dense_y to z1
self.leakyrelu = nn.LeakyReLU()
self.yohtoy_toz2 = nn.Linear(self.label_size, params.z_dim)
self.hbn_z1y = nn.BatchNorm1d(params.z_dim)
self.yohtoy_toz1 = nn.Linear(self.label_size, params.z_dim)
self.hbn_z2y = nn.BatchNorm1d(params.z_dim)
self.z1y_z2 = Inferer(params, in_dim=params.z_dim)
self.z2y_z1 = Inferer(params, in_dim=params.z_dim)
def init_semicldc_cond_gmix(self, params):
# concat
# concat z1 and y
self.z1y_z2 = Inferer(params, in_dim=params.z_dim + self.label_size)
# p(z2 | y)
self.y_z2 = Inferer(params, in_dim=self.label_size)
# p(z1 | z2)
self.z2y_z1 = Inferer(params, in_dim=params.z_dim)
'''
# transadd
# trans one_hot_y to dense_y, then add dense_y to z1
self.yohtoy = nn.Linear(self.label_size, params.z_dim)
self.z1y_z2 = Inferer(params, in_dim = params.z_dim)
self.y2z2 = Inferer(params, in_dim = params.z_dim)
self.z2y_z1 = Inferer(params, in_dim = params.z_dim)
'''
def init_semicldc_cond_gmix_transadd(self, params):
# trans one_hot_y to dense_y, then add dense_y to z1
self.leakyrelu = nn.LeakyReLU()
self.yohtoy_toz2 = nn.Linear(self.label_size, params.z_dim)
self.hbn_z1y = nn.BatchNorm1d(params.z_dim)
self.z1y_z2 = Inferer(params, in_dim=params.z_dim)
self.y_z2 = Inferer(params, in_dim=params.cldc_label_size)
self.z2y_z1 = Inferer(params, in_dim=params.z_dim)
def forward_trainenc(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb, batch_uin,
batch_ulens, batch_ulb, batch_uohlb):
# warm up
if self.warm_up:
return self.train_classifier(lang, batch_in, batch_lens, batch_lb)
# au
self.L_mu1, self.U_mu1 = [], []
self.L_mu2, self.U_mu2 = [], []
# z1, z2 distance
self.L_l2dist, self.L_cosdist, self.L_z1z2kld = .0, .0, .0
self.U_l2dist, self.U_cosdist, self.U_z1z2kld = .0, .0, .0
# calculate L loss and classfication loss
L_dict, L_pred = self.forward_L_trainenc(lang, batch_in, batch_lens, batch_lb, batch_ohlb,
cls_alpha=self.semicldc_classifier_alpha)
# calculate U loss
U_dict = self.forward_U_trainenc(lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb)
# merge two dicts
loss_dict = {**L_dict, **U_dict}
self.step += 1
# z1, z2 distance
loss_dict['L_l2_dist'], loss_dict['L_cosdist'], loss_dict[
'L_z1z2kld'] = self.L_l2dist, self.L_cosdist, self.L_z1z2kld
loss_dict['U_l2_dist'], loss_dict['U_cosdist'], loss_dict[
'U_z1z2kld'] = self.U_l2dist, self.U_cosdist, self.U_z1z2kld
# total MEAN loss
loss_dict['total_loss'] = loss_dict['L_loss_trainenc'] + loss_dict[
'U_loss_trainenc'] + self.semicldc_classifier_alpha * loss_dict['L_cldc_loss']
# au
loss_dict['L_mu1'] = calc_au(self.L_mu1)[0]
loss_dict['L_mu2'] = calc_au(self.L_mu2)[0]
loss_dict['U_mu1'] = calc_au(self.U_mu1)[0]
loss_dict['U_mu2'] = calc_au(self.U_mu2)[0]
# au
print()
print('L_mu1: {}'.format(loss_dict['L_mu1']))
print('L_mu2: {}'.format(loss_dict['L_mu2']))
print('U_mu1: {}'.format(loss_dict['U_mu1']))
print('U_mu2: {}'.format(loss_dict['U_mu2']))
return loss_dict, L_pred
def forward_L_trainenc(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb, le=1.0,
cls_alpha=0.1):
L_dict, L_pred = self.forward_L_trainenc_batch(lang, batch_in, batch_lens, batch_lb, batch_ohlb)
# calculate all necessary losses
L_dict = self.cal_L_trainenc(L_dict)
# backward
L_dict = self.backward_L_trainenc(L_dict, le, cls_alpha)
return L_dict, L_pred
def forward_L_trainenc_batch(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb):
L_dict, L_pred, mu1, logvar1, z1, rec_mu1, rec_logvar1 = self.forward_L_fixenc_batch(lang,
batch_in,
batch_lens,
batch_lb,
batch_ohlb)
# nll_loss
L_dict['L_nll'] = self.xlingva.decoder(lang, z1, batch_in, reduction=None)
# H(q(z1|x))
# k/2 + k/2 log(2pi) + 1/2 log(|covariance|)
# L_dict['L_Hz1'] = -multi_diag_normal(z1, mu1, logvar1)
L_dict['L_Hz1'] = mu1.shape[1] / 2.0 * (1 + const) + 1 / 2.0 * logvar1.sum(dim=-1)
# regroup
L_dict['L_z1kld'] = cal_kl_gau2(mu1, logvar1, rec_mu1, rec_logvar1)
# fb
L_dict['L_z1kld_fb'] = cal_kl_gau2_fb(mu1, logvar1, rec_mu1, rec_logvar1, l_z1_fb)
return L_dict, L_pred
def cal_L_trainenc(self, L_dict):
lkld_fix = 5.0
lz1kld_fix = 5.0
L_dict = self.cal_L_fixenc(L_dict)
# L_dict['L_loss_trainenc'] = L_dict['L_loss'] + lnll * L_dict['L_nll'] - lz1kl * L_dict['L_Hz1']
# regroup
'''
kl_weight_z1 = get_cyclic_weight(self.step, self.cyclic_period)
print()
print(kl_weight_z1)
kl_weight_z2 = get_cyclic_weight(self.step, self.cyclic_period)
'''
'''
kl_weight_z1 = min(1.0, self.step / self.anneal_warm_up)
kl_weight_z2 = min(1.0, self.step / self.anneal_warm_up)
'''
kl_weight_z1 = 1.0
kl_weight_z2 = 1.0
L_dict['L_loss_trainenc'] = L_dict['L_nll'] + kl_weight_z2 * L_dict['L_kld'] + kl_weight_z1 * \
L_dict['L_z1kld'] - L_dict['L_yprior']
# L_dict['L_loss_trainenc'] = L_dict['L_nll'] + torch.abs(lkld_fix - L_dict['L_kld']) + torch.abs(lz1kld_fix - L_dict['L_z1kld']) - L_dict['L_yprior']
L_dict['L_TKL'] = L_dict['L_kld'] + L_dict['L_z1kld'] - L_dict['L_yprior']
# fb
'''
kl_weight_nll = min(1.0, self.step / self.anneal_warm_up)
kl_weight_z1 = get_cyclic_weight(self.step, self.cyclic_period)
print()
print(kl_weight_z1)
kl_weight_z2 = get_cyclic_weight(self.step, self.cyclic_period)
kl_weight_nll = 1.0
kl_weight_z1 = 1.0
kl_weight_z2 = 1.0
L_dict['L_loss_trainenc'] = kl_weight_nll * L_dict['L_nll'] + kl_weight_z2 * L_dict['L_kld_fb'] + kl_weight_z1 * L_dict['L_z1kld_fb'] - L_dict['L_yprior']
'''
# autoencoding wo KL
# L_dict['L_loss_trainenc'] = L_dict['L_nll']
return L_dict
def backward_L_trainenc(self, L_dict, e, alpha):
'''
# annealing
total_step = 5000 * 2
alpha = self.semicldc_classifier_alpha - (self.semicldc_classifier_alpha - 0.1) * (
self.step / total_step)
print()
print(alpha)
# cyclic annealing
# number of steps for increasing
total_step = 100 * 2
cur_step = self.step % total_step
alpha = self.semicldc_classifier_alpha - (self.semicldc_classifier_alpha - 0.1) * (
cur_step / total_step)
print()
print(alpha)
'''
if not hasattr(self, 'adv_training') or self.adv_training is False:
(e * (L_dict['L_loss_trainenc'].mean())
+ alpha * L_dict['L_cldc_loss'].mean()).backward()
elif self.adv_training is True:
(e * (L_dict['L_loss_trainenc'].mean()) +
alpha * L_dict['L_cldc_loss'].mean() +
L_dict['L_dis_loss'].mean() +
L_dict['L_enc_loss'].mean()
).backward()
# autoencoding wo KL
# (L_dict['L_loss_trainenc'].mean()).backward()
# get mean().item(), reduce memory
L_dict = {k: (v.mean().item() if torch.is_tensor(v) else float(v))
for k, v in L_dict.items()}
return L_dict
def forward_U_trainenc(self, lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb, ue=1.0):
U_dict = defaultdict(float)
cur_bs = batch_uin.shape[0]
n_bs = math.ceil(cur_bs / self.bs_u)
for i in range(n_bs):
U_dict_batch, U_pred_p = self.forward_U_trainenc_batch(lang,
batch_uin[
i * self.bs_u: (i + 1) * self.bs_u],
batch_ulens[
i * self.bs_u: (i + 1) * self.bs_u],
batch_ulb[
i * self.bs_u * self.label_size:
(i + 1) * self.bs_u * self.label_size],
batch_uohlb[
i * self.bs_u * self.label_size:
(i + 1) * self.bs_u * self.label_size])
# calculate all necessary losses
U_dict_batch = self.cal_U_trainenc(U_dict_batch, U_pred_p)
# backward
U_dict_batch = self.backward_U_trainenc(U_dict_batch, cur_bs, ue)
U_dict = {k: (U_dict[k] + v) for k, v in U_dict_batch.items()}
# z1, z2 distance
self.U_l2dist /= n_bs
self.U_cosdist /= n_bs
self.U_z1z2kld /= n_bs
U_dict = {k: v / cur_bs for k, v in U_dict.items()}
return U_dict
def forward_U_trainenc_batch(self, lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb):
U_dict, U_pred_p, mu1, logvar1, z1, dup_mu1, dup_logvar1, rec_mu1, rec_logvar1 = self.forward_U_fixenc_batch(
lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb)
U_dict['U_nll'] = self.xlingva.decoder(lang, z1, batch_uin, reduction=None)
# H(q(z1|x))
# k/2 + k/2 log(2pi) + 1/2 log(|covariance|)
# U_dict['U_Hz1'] = -multi_diag_normal(z1, mu1, logvar1)
U_dict['U_Hz1'] = mu1.shape[1] / 2.0 * (1 + const) + 1 / 2.0 * logvar1.sum(dim=-1)
# regroup
U_dict['U_z1kld'] = cal_kl_gau2(dup_mu1, dup_logvar1, rec_mu1, rec_logvar1)
# fb
U_dict['U_z1kld_fb'] = cal_kl_gau2_fb(dup_mu1, dup_logvar1, rec_mu1, rec_logvar1, u_z1_fb)
return U_dict, U_pred_p
def cal_U_trainenc(self, U_dict, U_pred_p):
ukld_fix = 5.0
uz1kld_fix = 5.0
U_dict = self.cal_U_fixenc(U_dict, U_pred_p)
# U_dict['U_loss_trainenc'] = U_dict['U_loss'] + unll * U_dict['U_nll'] - uz1kl * U_dict['U_Hz1']
# regroup
U_dict['U_z1kld'] = torch.sum((U_dict['U_z1kld'] * U_pred_p).view(-1, self.label_size), dim=1)
'''
kl_weight_z1 = get_cyclic_weight(self.step, self.cyclic_period)
kl_weight_z2 = get_cyclic_weight(self.step, self.cyclic_period)
'''
'''
kl_weight_z1 = min(1.0, self.step / self.anneal_warm_up)
kl_weight_z2 = min(1.0, self.step / self.anneal_warm_up)
'''
kl_weight_z1 = 1.0
kl_weight_z2 = 1.0
U_dict['U_loss_trainenc'] = U_dict['U_nll'] + kl_weight_z2 * U_dict['U_kld'] + kl_weight_z1 * \
U_dict['U_z1kld'] + U_dict['kldy']
# U_dict['U_loss_trainenc'] = U_dict['U_nll'] + torch.abs(ukld_fix - U_dict['U_kld']) + torch.abs(uz1kld_fix - U_dict['U_z1kld']) + U_dict['kldy']
U_dict['U_TKL'] = U_dict['U_kld'] + U_dict['U_z1kld'] + U_dict['kldy']
'''
# fb
U_dict['U_z1kld_fb'] = torch.sum((U_dict['U_z1kld_fb'] * U_pred_p).view(-1, self.label_size), dim = 1)
#kl_weight_nll = min(1.0, self.step / self.anneal_warm_up)
#kl_weight_z1 = get_cyclic_weight(self.step, self.cyclic_period)
#kl_weight_z2 = get_cyclic_weight(self.step, self.cyclic_period)
kl_weight_nll = 1.0
kl_weight_z1 = 1.0
kl_weight_z2 = 1.0
kl_weight_y = 1.0
U_dict['U_loss_trainenc'] = kl_weight_nll * U_dict['U_nll'] + kl_weight_z2 * U_dict['U_kld_fb'] + kl_weight_z1 * U_dict['U_z1kld_fb'] + kl_weight_y * U_dict['kldy']
'''
# autoencoding wo KL
# U_dict['U_loss_trainenc'] = U_dict['U_nll']
return U_dict
def backward_U_trainenc(self, U_dict, cur_bs, e):
# backward
if not hasattr(self, 'adv_training') or self.adv_training is False:
(e * U_dict['U_loss_trainenc'].sum() / cur_bs).backward()
elif self.adv_training is True:
(e * U_dict['U_loss_trainenc'].sum() / cur_bs +
U_dict['U_dis_loss'].sum() / cur_bs +
U_dict['U_enc_loss'].sum() / cur_bs).backward()
# get sum().item()
U_dict = {k: (v.sum().item() if torch.is_tensor(v) else float(v))
for k, v in U_dict.items()}
return U_dict
def forward_fixenc(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb, batch_uin, batch_ulens,
batch_ulb, batch_uohlb):
# au
self.L_mu1, self.U_mu1 = [], []
self.L_mu2, self.U_mu2 = [], []
# z1, z2 distance
self.L_l2dist, self.L_cosdist, self.L_z1z2kld = .0, .0, .0
self.U_l2dist, self.U_cosdist, self.U_z1z2kld = .0, .0, .0
# calculate L loss and classfication loss
L_dict, L_pred = self.forward_L_fixenc(lang, batch_in, batch_lens, batch_lb, batch_ohlb)
# calculate U loss
U_dict = self.forward_U_fixenc(lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb)
# merge two dicts
loss_dict = {**L_dict, **U_dict}
self.step += 1
# z1, z2 distance
loss_dict['L_l2_dist'], loss_dict['L_cosdist'], loss_dict[
'L_z1z2kld'] = self.L_l2dist, self.L_cosdist, self.L_z1z2kld
loss_dict['U_l2_dist'], loss_dict['U_cosdist'], loss_dict[
'U_z1z2kld'] = self.U_l2dist, self.U_cosdist, self.U_z1z2kld
# total MEAN loss
# total_loss = L_loss.mean()
# total_loss = 0.1 * L_cldc_loss.mean()
# total_loss = L_loss.mean() + 0.1 * L_cldc_loss.mean()
# total_loss = L_loss + U_loss
loss_dict['total_loss'] = loss_dict['L_loss'] + loss_dict[
'U_loss'] + self.semicldc_classifier_alpha * loss_dict['L_cldc_loss']
# au
loss_dict['L_mu1'] = calc_au(self.L_mu1)[0]
loss_dict['L_mu2'] = calc_au(self.L_mu2)[0]
loss_dict['U_mu1'] = calc_au(self.U_mu1)[0]
loss_dict['U_mu2'] = calc_au(self.U_mu2)[0]
return loss_dict, L_pred
def forward_L_fixenc(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb):
L_dict, L_pred, _, _, _, _, _ = self.forward_L_fixenc_batch(lang, batch_in, batch_lens,
batch_lb, batch_ohlb)
# calculate all necessary losses
L_dict = self.cal_L_fixenc(L_dict)
# backward
L_dict = self.backward_L_fixenc(L_dict)
return L_dict, L_pred
def forward_L_fixenc_batch(self, lang, batch_in, batch_lens, batch_lb, batch_ohlb):
# x -> hid_x -> mu1, logva1 -> z1
mu1, logvar1, z1, hid, loss_dis, loss_enc = self.get_z1(lang, batch_in, batch_lens)
# cldc loss for training
cldc_loss, _, pred = self.cldc_classifier(z1, y=batch_lb, training=True)
# z1y -> z2
mu2, logvar2, z2 = self.get_z2(z1, batch_ohlb)
# au
self.L_mu1.append(mu1)
self.L_mu2.append(mu2)
# z1, z2 distance
self.L_z1z2kld += 0.5 * torch.mean(
torch.sum(logvar1 - logvar2 - 1 + ((mu2 - mu1).pow(2) + logvar2.exp()) / logvar1.exp(),
dim=1)).item()
self.L_l2dist += torch.mean(torch.sqrt(torch.sum(((z1 - z2) ** 2), dim=1))).item()
self.L_cosdist += torch.mean(F.cosine_similarity(z1, z2)).item()
# reconstruct z1 from z2
rec_loss, rec_mu1, rec_logvar1 = self.z2_rec_z1(z1, z2, batch_ohlb)
# kl divergence of z2
kld = self.kl_z2(mu2, logvar2, batch_ohlb)
# get y prior
yprior = batch_ohlb @ self.yprior
# fb
kld_fb = cal_kl_gau1_fb(mu2, logvar2, l_z2_fb)
# fb
L_dict = {
'L_rec': rec_loss,
'L_kld': kld,
'L_yprior': yprior,
'L_cldc_loss': cldc_loss,
'L_kld_fb': kld_fb,
'L_dis_loss': loss_dis,
'L_enc_loss': loss_enc,
}
return L_dict, pred, mu1, logvar1, z1, rec_mu1, rec_logvar1
def kl_z2_gmix(self, mu_post, logvar_post, batch_ohlb):
# concat
mu_prior, logvar_prior = self.y2z2(batch_ohlb)
kld = 0.5 * torch.sum(logvar_prior - logvar_post - 1 + (
(mu_post - mu_prior).pow(2) + logvar_post.exp()) / logvar_prior.exp(), dim=1)
'''
# transadd
y = self.yohtoy(batch_ohlb)
mu_prior, logvar_prior = self.y2z2(y)
kld = 0.5 * torch.sum(logvar_prior - logvar_post - 1 + ((mu_post - mu_prior).pow(2) + logvar_post.exp()) / logvar_prior.exp(), dim = 1)
'''
return kld
def kl_z2_gmix_transadd(self, mu_post, logvar_post, batch_ohlb):
# transadd
mu_prior, logvar_prior = self.y_z2(batch_ohlb)
kld = 0.5 * torch.sum(logvar_prior - logvar_post - 1 + (
(mu_post - mu_prior).pow(2) + logvar_post.exp()) / logvar_prior.exp(), dim=1)
return kld
def kl_z2_general(self, mu_post, logvar_post, batch_ohlb):
kld = -0.5 * torch.sum(1 + logvar_post - mu_post.pow(2) - logvar_post.exp(), dim=1)
return kld
def kl_z2_transadd(self, mu_post, logvar_post, batch_ohlb):
return self.kl_z2_general(mu_post, logvar_post, batch_ohlb)
def cal_L_fixenc(self, L_dict):
# L
L_dict['L_loss'] = L_dict['L_rec'] + L_dict['L_kld'] - L_dict['L_yprior']
# L_dict['L_loss'] = lrec * L_dict['L_rec'] + lkld * torch.abs(lkld_fix - L_dict['L_kld']) - L_dict['L_yprior']
# L_loss = L_rec + min(1.0, self.step / 1000) * L_kld - L_yprior
return L_dict
def backward_L_fixenc(self, L_dict):
# backprop
(L_dict['L_loss'].mean() + self.semicldc_classifier_alpha * L_dict[
'L_cldc_loss'].mean()).backward()
# get mean().item(), reduce memory
L_dict = {k: v.mean().item() for k, v in L_dict.items()}
return L_dict
def forward_U_fixenc(self, lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb):
U_dict = defaultdict(float)
cur_bs = batch_uin.shape[0]
n_bs = math.ceil(cur_bs / self.bs_u)
for i in range(n_bs):
U_dict_batch, U_pred_p, _, _, _, _, _, _, _ = self.forward_U_fixenc_batch(lang,
batch_uin[
i * self.bs_u: (
i + 1) * self.bs_u],
batch_ulens[
i * self.bs_u: (
i + 1) * self.bs_u],
batch_ulb[
i * self.bs_u * self.label_size: (
i + 1) * self.bs_u * self.label_size],
batch_uohlb[
i * self.bs_u * self.label_size: (
i + 1) * self.bs_u * self.label_size])
# calculate all necessary losses
U_dict_batch = self.cal_U_fixenc(U_dict_batch, U_pred_p)
# backward
U_dict_batch = self.backward_U_fixenc(U_dict_batch, cur_bs)
U_dict = {k: (U_dict[k] + v) for k, v in U_dict_batch.items()}
U_dict = {k: v / cur_bs for k, v in U_dict.items()}
return U_dict
def forward_U_fixenc_batch(self, lang, batch_uin, batch_ulens, batch_ulb, batch_uohlb):
mu1, logvar1, z1, hid, loss_dis, loss_enc = self.get_z1(lang, batch_uin, batch_ulens)
# use classifier to infer
_, pred_p, _ = self.cldc_classifier(z1, y=None, training=True)
# gumbel softmax
# duplicate z1, enumerate y
# bs * label_size, z_dim
dup_z1 = self.enumerate_label(z1, batch_uohlb)
dup_mu1 = self.enumerate_label(mu1, batch_uohlb)
dup_logvar1 = self.enumerate_label(logvar1, batch_uohlb)
# z1y -> z2
mu2, logvar2, z2 = self.get_z2(dup_z1, batch_uohlb)
self.U_mu1.append(dup_mu1)
self.U_mu2.append(mu2)
# z1, z2 distance
self.U_z1z2kld += 0.5 * torch.mean(torch.sum(
dup_logvar1 - logvar2 - 1 + ((mu2 - dup_mu1).pow(2) + logvar2.exp()) / dup_logvar1.exp(),
dim=1)).item()
self.U_l2dist += torch.mean(torch.sqrt(torch.sum(((dup_z1 - z2) ** 2), dim=1))).item()
self.U_cosdist += torch.mean(F.cosine_similarity(dup_z1, z2)).item()
# reconstruct z1 from z2
rec_loss, rec_mu1, rec_logvar1 = self.z2_rec_z1(dup_z1, z2, batch_uohlb)
# kl divergence of z2
kld = self.kl_z2(mu2, logvar2, batch_uohlb)
# get y prior
yprior = batch_uohlb @ self.yprior
# calculate H(q(y | x ))
H = -torch.sum(torch.mul(pred_p, torch.log(pred_p + 1e-32)), dim=1)
# bs * label_size
pred_p = pred_p.view(-1)
# fb
kld_fb = cal_kl_gau1_fb(mu2, logvar2, u_z2_fb)
# fb
U_dict = {
'U_rec': rec_loss,
'U_kld': kld,
'U_yprior': yprior,
'H': H,
'U_kld_fb': kld_fb,
'U_dis_loss': loss_dis,
'U_enc_loss': loss_enc,
}
return U_dict, pred_p, mu1, logvar1, z1, dup_mu1, dup_logvar1, rec_mu1, rec_logvar1
def cal_U_fixenc(self, U_dict, U_pred_p):
# L for U
UL_rec, UL_kld, UL_yprior = U_dict['U_rec'], U_dict['U_kld'], U_dict['U_yprior']
UL_loss = UL_rec + UL_kld - UL_yprior
# UL_loss = urec * UL_rec + ukld * torch.abs(ukld_fix - UL_kld) - uyp * UL_yprior
# U_L_loss = U_rec + min(1.0, self.step / 1000) * U_kld - U_yprior
# expectation of UL_loss
U_dict['UL_mean_loss'] = torch.sum((U_pred_p * UL_loss).view(-1, self.label_size), dim=-1)
# Total U
# U_loss = U_L_mean_loss - H
U_dict['U_loss'] = U_dict['UL_mean_loss'] - U_dict['H']
# calculate expectations for each term
U_dict['U_rec'] = torch.sum((U_pred_p * UL_rec).view(-1, self.label_size), dim=-1)
U_dict['U_kld'] = torch.sum((U_pred_p * UL_kld).view(-1, self.label_size), dim=-1)
U_dict['U_yprior'] = torch.sum((U_pred_p * UL_yprior).view(-1, self.label_size), dim=-1)
# fb
U_dict['U_kld_fb'] = torch.sum((U_pred_p * U_dict['U_kld_fb']).view(-1, self.label_size),
dim=-1)
# KL(q(y|x) || p(y)) for U
# bs, label_size
U_pred_p_rv = U_pred_p.view(-1, self.label_size)
U_dict['kldy'] = (U_pred_p_rv * (torch.log(U_pred_p_rv + 1e-32) - self.yprior)).mean(dim=1)
# U_dict['U_loss'] += - U_dict['kldy'] + torch.abs(U_dict['kldy'] - 0.3)
return U_dict
def backward_U_fixenc(self, U_dict, cur_bs):
# backward
(U_dict['U_loss'].sum() / cur_bs).backward()
# get sum().item()
U_dict = {k: v.sum().item() for k, v in U_dict.items()}
return U_dict
def get_z1_fixenc(self, lang, batch_in, batch_lens):
with torch.no_grad():
# extract feature: mu1, logvar1, eval mode
self.xlingva.eval()
mu1, logvar1, hid, loss_dis, loss_enc = self.xlingva.get_gaus(lang, batch_in, batch_lens)
# stochastic sampling, z for training, mu for eval
if self.training:
self.xlingva.inferer.train()
else:
self.xlingva.inferer.eval()
z1 = self.xlingva.inferer.reparameterize(mu1, logvar1)
# mu debug
# z1 = mu1
# mu debug
return mu1, logvar1, z1, hid, loss_dis, loss_enc
def get_z1_trainenc(self, lang, batch_in, batch_lens):
mu1, logvar1, hid, loss_dis, loss_enc = self.xlingva.get_gaus(lang, batch_in, batch_lens)
z1 = self.xlingva.inferer.reparameterize(mu1, logvar1)
return mu1, logvar1, z1, hid, loss_dis, loss_enc
def get_z2(self, z1, batch_ohlb):
z1y = self.get_z1y(z1, batch_ohlb)
# z1y -> z2
mu2, logvar2 = self.z1y_z2(z1y)
z2 = self.z1y_z2.reparameterize(mu2, logvar2)
# mu debug
# z2 = mu2
# mu debug
return mu2, logvar2, z2
def get_z1y_concat(self, z1, batch_ohlb):
# z1y -> z2
z1y = torch.cat([z1, batch_ohlb], dim=-1)
return z1y
def get_z1y_transconcat(self, z1, batch_ohlb):
# z1y -> z2
batch_lb = self.yohtoy(batch_ohlb)
z1y = torch.cat([z1, batch_lb], dim=-1)
return z1y
def get_z1y_transadd(self, z1, batch_ohlb):
# z1y -> z2
batch_lb = self.yohtoy_toz2(batch_ohlb)
z1y = z1 + batch_lb
if z1y.shape[-1] > 1:
z1y = self.hbn_z1y(z1y)
z1y = self.leakyrelu(z1y)
return z1y
def get_z1y_gmix(self, z1, batch_ohlb):
# z1y -> z2
# concat
z1y = torch.cat([z1, batch_ohlb], dim=-1)
'''
# transadd
y = self.yohtoy(batch_ohlb)
z1y = z1 + y
'''
return z1y
def get_z1y_gmix_transadd(self, z1, batch_ohlb):
# z1y -> z2
batch_lb = self.yohtoy_toz2(batch_ohlb)
z1y = z1 + batch_lb
if z1y.shape[-1] > 1:
z1y = self.hbn_z1y(z1y)
z1y = self.leakyrelu(z1y)
return z1y
def z2_rec_z1(self, z1, z2, batch_ohlb):
z2y = self.get_z2y(z2, batch_ohlb)
# z2y -> z1
rec_mu1, rec_logvar1 = self.z2y_z1(z2y)
rec_z1 = self.z2y_z1.reparameterize(rec_mu1, rec_logvar1)
logpz1 = multi_diag_normal(z1, rec_mu1, rec_logvar1)
return -logpz1, rec_mu1, rec_logvar1
def get_z2y_concat(self, z2, batch_ohlb):
# reconstruction
z2y = torch.cat([z2, batch_ohlb], dim=-1)
return z2y
def get_z2y_transconcat(self, z2, batch_ohlb):
batch_lb = self.yohtoy(batch_ohlb)
z2y = torch.cat([z2, batch_lb], dim=-1)
return z2y
def get_z2y_transadd(self, z2, batch_ohlb):
batch_lb = self.yohtoy_toz1(batch_ohlb)
z2y = z2 + batch_lb
if z2y.shape[-1] > 1:
z2y = self.hbn_z2y(z2y)
z2y = self.leakyrelu(z2y)
return z2y
def get_z2y_gmix(self, z2, batch_ohlb):
return z2
def get_z2y_gmix_transadd(self, z2, batch_ohlb):
return z2
def enumerate_label(self, batch_uin, batch_uohlb):
# batch_dup_ulens = np.repeat(batch_ulens, repeats = batch_uohlb.shape[1], axis = 0)
batch_dup_uin = batch_uin.unsqueeze(1).repeat(1, batch_uohlb.shape[1], 1).view(-1,
batch_uin.shape[
1])
return batch_dup_uin
def calc_au(mus, delta=0.01):
"""compute the number of active units
"""
if len(mus) == 0:
return 0, 0
cnt = 0
for mean in mus:
if cnt == 0:
means_sum = mean.sum(dim=0, keepdim=True)
else:
means_sum = means_sum + mean.sum(dim=0, keepdim=True)
cnt += mean.size(0)
# (1, nz)
mean_mean = means_sum / cnt
cnt = 0
for mean in mus:
if cnt == 0:
var_sum = ((mean - mean_mean) ** 2).sum(dim=0)
else:
var_sum = var_sum + ((mean - mean_mean) ** 2).sum(dim=0)
cnt += mean.size(0)
# (nz)
au_var = var_sum / (cnt - 1)
return (au_var >= delta).sum().item(), au_var
def get_cyclic_weight(step, step_period):
# number of steps for increasing
if (step // step_period) % 2 == 0:
return 1.0 * (step % step_period) / step_period
elif (step // step_period) % 2 == 1:
# number of steps for 1
return 1.0
|
{"hexsha": "58e791d5e78b4286e54417a7c1c77c5ef1361621", "size": 31648, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn_model/semicldc_model.py", "max_stars_repo_name": "onlyrico/mling_sdgms", "max_stars_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-01T02:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T02:14:07.000Z", "max_issues_repo_path": "nn_model/semicldc_model.py", "max_issues_repo_name": "onlyrico/mling_sdgms", "max_issues_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn_model/semicldc_model.py", "max_forks_repo_name": "onlyrico/mling_sdgms", "max_forks_repo_head_hexsha": "ef6015d1a815a317f16fa1e42cbb048e4fe443f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-28T05:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T11:59:13.000Z", "avg_line_length": 36.6720741599, "max_line_length": 168, "alphanum_fraction": 0.6054095046, "include": true, "reason": "import numpy,import sympy", "num_tokens": 9959}
|
from __future__ import division
from .checks import *
from pymc3 import *
from numpy import array, inf
from nose.tools import raises
class DistTest(Continuous):
def __init__(self, a, b, *args, **kwargs):
super(DistTest, self).__init__(*args, **kwargs)
self.a = a
self.b = b
def logp(self, v):
return 0
@raises(AttributeError)
def test_default_nan_fail():
with Model() as model:
x = DistTest('x', np.nan, 2, defaults=['a'])
@raises(AttributeError)
def test_default_empty_fail():
with Model() as model:
x = DistTest('x', 1, 2, defaults=[])
def test_default_testval():
with Model() as model:
x = DistTest('x', 1, 2, testval=5, defaults=[])
assert x.tag.test_value == 5
def test_default_testval_nan():
with Model() as model:
x = DistTest('x', 1,2, testval=np.nan, defaults=['a'])
np.testing.assert_almost_equal(x.tag.test_value, np.nan)
def test_default_a():
with Model() as model:
x = DistTest('x', 1, 2, defaults=['a'])
assert x.tag.test_value == 1
def test_default_b():
with Model() as model:
x = DistTest('x', np.nan, 2, defaults=['a', 'b'])
assert x.tag.test_value == 2
def test_default_b():
with Model() as model:
y = DistTest('y', 7, 8, testval=94)
x = DistTest('x', y, 2, defaults=['a', 'b'])
assert x.tag.test_value == 94
|
{"hexsha": "b3cd04d244f633c5515da3c26c97372bcaf75764", "size": 1412, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc3/tests/test_distribution_defaults.py", "max_stars_repo_name": "MichielCottaar/pymc3", "max_stars_repo_head_hexsha": "f37198653e7d09881e7bc411cbd10fffbab442c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-16T22:03:21.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-16T22:03:21.000Z", "max_issues_repo_path": "pymc3/tests/test_distribution_defaults.py", "max_issues_repo_name": "MichielCottaar/pymc3", "max_issues_repo_head_hexsha": "f37198653e7d09881e7bc411cbd10fffbab442c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymc3/tests/test_distribution_defaults.py", "max_forks_repo_name": "MichielCottaar/pymc3", "max_forks_repo_head_hexsha": "f37198653e7d09881e7bc411cbd10fffbab442c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1481481481, "max_line_length": 64, "alphanum_fraction": 0.604815864, "include": true, "reason": "from numpy,from pymc3", "num_tokens": 404}
|
[STATEMENT]
lemma F_base_aux: "{l. length l=n \<and> valid l} = {replicate n B}" if "n > 0" "n < m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {l. length l = n \<and> local.valid l} = {replicate n B}
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
0 < n
n < m
goal (1 subgoal):
1. {l. length l = n \<and> local.valid l} = {replicate n B}
[PROOF STEP]
proof (induction n)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>0 < 0; 0 < m\<rbrakk> \<Longrightarrow> {l. length l = 0 \<and> local.valid l} = {replicate 0 B}
2. \<And>n. \<lbrakk>\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; 0 < Suc n; Suc n < m\<rbrakk> \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
0 < 0
0 < m
goal (2 subgoals):
1. \<lbrakk>0 < 0; 0 < m\<rbrakk> \<Longrightarrow> {l. length l = 0 \<and> local.valid l} = {replicate 0 B}
2. \<And>n. \<lbrakk>\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; 0 < Suc n; Suc n < m\<rbrakk> \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < 0
0 < m
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
0 < 0
0 < m
goal (1 subgoal):
1. {l. length l = 0 \<and> local.valid l} = {replicate 0 B}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{l. length l = 0 \<and> local.valid l} = {replicate 0 B}
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; 0 < Suc n; Suc n < m\<rbrakk> \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; 0 < Suc n; Suc n < m\<rbrakk> \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
case (Suc n)
[PROOF STATE]
proof (state)
this:
\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}
0 < Suc n
Suc n < m
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; 0 < Suc n; Suc n < m\<rbrakk> \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
proof (cases "n = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. n = 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
2. n \<noteq> 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
n = 0
goal (2 subgoals):
1. n = 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
2. n \<noteq> 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
with Suc.prems
[PROOF STATE]
proof (chain)
picking this:
0 < Suc n
Suc n < m
n = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < Suc n
Suc n < m
n = 0
goal (1 subgoal):
1. {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
by (auto intro: valid.intros elim: valid.cases)
[PROOF STATE]
proof (state)
this:
{l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
n \<noteq> 0
goal (1 subgoal):
1. n \<noteq> 0 \<Longrightarrow> {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
with Suc.prems
[PROOF STATE]
proof (chain)
picking this:
0 < Suc n
Suc n < m
n \<noteq> 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < Suc n
Suc n < m
n \<noteq> 0
goal (1 subgoal):
1. {l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
[PROOF STEP]
apply safe
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; local.valid x\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
2. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n\<rbrakk> \<Longrightarrow> length (replicate (Suc n) B) = Suc n
3. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n\<rbrakk> \<Longrightarrow> local.valid (replicate (Suc n) B)
[PROOF STEP]
using Suc.IH
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}
goal (3 subgoals):
1. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; local.valid x\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
2. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n\<rbrakk> \<Longrightarrow> length (replicate (Suc n) B) = Suc n
3. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n\<rbrakk> \<Longrightarrow> local.valid (replicate (Suc n) B)
[PROOF STEP]
apply -
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; local.valid x; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
2. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}\<rbrakk> \<Longrightarrow> length (replicate (Suc n) B) = Suc n
3. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}\<rbrakk> \<Longrightarrow> local.valid (replicate (Suc n) B)
[PROOF STEP]
apply (erule valid.cases)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; x = []\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
2. \<And>x xs. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; x = B # xs; local.valid xs\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
3. \<And>x xs n. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; x \<notin> {}; length x = Suc n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}; x = replicate n R @ xs; local.valid xs; m \<le> n\<rbrakk> \<Longrightarrow> x = replicate (Suc n) B
4. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}\<rbrakk> \<Longrightarrow> length (replicate (Suc n) B) = Suc n
5. \<And>x. \<lbrakk>0 < Suc n; Suc n < m; 0 < n; \<lbrakk>0 < n; n < m\<rbrakk> \<Longrightarrow> {l. length l = n \<and> local.valid l} = {replicate n B}\<rbrakk> \<Longrightarrow> local.valid (replicate (Suc n) B)
[PROOF STEP]
apply (auto intro: valid.intros elim: valid.cases)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
{l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
{l. length l = Suc n \<and> local.valid l} = {replicate (Suc n) B}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3541, "file": "Monad_Memo_DP_example_Counting_Tiles", "length": 26}
|
program add_real
implicit none
INTEGER, PARAMETER :: np=100
REAL, DIMENSION(np) :: A, B
INTEGER:: i
DO i=1, np
A(i)=1
B(i)=1
END DO
!$OMP PARALLEL DO REDUCTION(+:A)
DO i=1, np
A(i)=A(i)+B(i)
END DO
!$OMP END PARALLEL DO
DO i=1, np
IF (A(i) .NE. 2) THEN
WRITE(*, *) "ERROR AT INDEX ", i, "EXPECT 2 BUT RECEIVED", A(i)
ENDIF
END DO
end program add_real
|
{"hexsha": "762cfbf6ff384379590eae3c091c840359c63e69", "size": 490, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/smoke/flang-272285/flang-272285.f90", "max_stars_repo_name": "raramakr/aomp", "max_stars_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 106, "max_stars_repo_stars_event_min_datetime": "2019-02-05T13:07:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T11:15:03.000Z", "max_issues_repo_path": "test/smoke/flang-272285/flang-272285.f90", "max_issues_repo_name": "raramakr/aomp", "max_issues_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 195, "max_issues_repo_issues_event_min_datetime": "2019-02-26T23:42:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T10:08:19.000Z", "max_forks_repo_path": "test/smoke/flang-272285/flang-272285.f90", "max_forks_repo_name": "raramakr/aomp", "max_forks_repo_head_hexsha": "9a224fe01ca8eff4209b8b79aa1fa15a18da65db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2019-05-17T10:33:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T16:17:48.000Z", "avg_line_length": 19.6, "max_line_length": 74, "alphanum_fraction": 0.4673469388, "num_tokens": 162}
|
import os
import json
import numpy as np
import torch
import cv2
from PIL import Image
import torch.utils.data as data
from torch.utils import data
from matplotlib.image import imread
from pycocotools.coco import COCO
from effdet.data.parsers import create_parser
class VdotDataset(data.Dataset):
def __init__(self, image_dir, ann_file, transform=None):
super(VdotDataset, self).__init__
self.transform = transform
self.image_dir=image_dir
self.ann_file = ann_file
self.coco = None
#ann = open('/home/ekta/AI_current/vdot/vdot/train_annotations/train_annotations.json', 'r')
ann = open(self.ann_file)
data_json = json.load(ann)
self.yxyx = True
self.data_json = data_json
#image_dir = os.listdir('/home/ekta/AI_current/vdot/vdot/train_set')
image_dir=os.listdir(self.image_dir)
total_num_images = len(image_dir)
self.total_num_images = total_num_images
self.imgs_list, self.annot_list = self.parse_labels(self.data_json)
self._transform = transform
self.cat_dicts = [{'id': 1, 'name': 'storm_drain', 'id':2, 'name': 'drop_inlet'}]
def parse_labels(self, ann_file):
#annot_lists=[]
filename_labels = []
frame_boxes =[]
det_dict = {}
all_boxes = []
'''for i,v in self.data_json.items():
v=np.array(v['assets'], dtype=np.float32)
#v=v[:,[1,0,3,2]]
det_dict = {'bbox' : np.concatenate([v['assets'][]]), 'cls' : , 'img_size': (800, 600)}
frame_boxes.append(det_dict) '''
#all_boxes.append(frame_boxes)
cls_ids={'0':'background', '1': 'storm_drain', '2':'drop_inlet' }
cls=[]
bboxes=[]
classes=[]
for k,v in self.data_json.items():
cls.append(v['assets'])
filename_labels.append(v['filename'])
for i in range(len(cls)):
for j,l in cls[i].items():
bbox=l
#yxyx
[ymin, xmin, ymax, xmax]= [bbox[1], bbox[0], bbox[3], bbox[2]]
#xyxy
#[xmin, ymin, xmax, ymax]= [bbox[0], bbox[1], bbox[2], bbox[3]]
# the model requires in yxyx
bbox=[ymin, xmin, ymax, xmax]
#bbox=[xmin,ymin,xmax,ymax]
#if j=='storm_drain':
#clss=1
#bbox=np.array([v], dtype=np.float32)
if j=='drop_inlet':
clss=1
#bbox=np.array([v], dtype=np.int64)
bboxes.append(bbox)
classes.append(clss)
det_dict = {'bbox' :np.array(bboxes, dtype=np.float32), 'cls':np.array(classes, dtype=np.int64) , 'img_size': (512, 512)}
bboxes = []
classes = []
frame_boxes.append(det_dict)
'''for i in self.data_json.values():
if i['filename'] in self.image_dir:
filename_labels.append(i['filename'])
bboxes= i['assets']['drop_inlet']
[ymin, xmin, ymax, xmax]= [bboxes[1], bboxes[0], bboxes[3], bboxes[2]]
bboxes=[ymin, xmin, ymax, xmax]
det_dict = {'bbox' :np.array(bboxes, dtype=np.float32), 'cls':np.array([1], dtype=np.int64) , 'img_size': (800, 600)}
frame_boxes.append(det_dict)'''
return filename_labels, frame_boxes
def __len__(self):
return self.total_num_images
def __getitem__(self, index):
self.image_name = self.imgs_list[index]
labels = self.annot_list[index]
labels['img_id'] = int(index)
#labels = torch.ones((boxes.shape[0],), dtype=torch.int64)
#img = imread(os.path.join('/home/ekta/AI_current/vdot/vdot/train_set', self.image_name))
img= Image.open(os.path.join(self.image_dir, self.image_name)).convert('RGB')
'''img = imread(os.path.join(self.image_dir, self.image_name))
width = 512
height = 512
dim = (width, height)
img = cv2.resize(img, dim) #interpolation = cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img= img.transpose(2,0,1)
#img /= 255.0
self.sample= {'img' : img, 'labels' : labels}'''
if self.transform is not None:
img, labels = self.transform(img, labels)
return img, labels
# self.img_info(self.image_dir)
#np.array([800, 600])
#'img_size' : img.size
#'img_scale' :
|
{"hexsha": "757232ac11cb525f233d0c04568bde8d3f9167a6", "size": 4586, "ext": "py", "lang": "Python", "max_stars_repo_path": "vdot.py", "max_stars_repo_name": "Ekta246/efficientdet-pytorch", "max_stars_repo_head_hexsha": "f284a465a9050c26723c268de71fb31bf080048a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vdot.py", "max_issues_repo_name": "Ekta246/efficientdet-pytorch", "max_issues_repo_head_hexsha": "f284a465a9050c26723c268de71fb31bf080048a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vdot.py", "max_forks_repo_name": "Ekta246/efficientdet-pytorch", "max_forks_repo_head_hexsha": "f284a465a9050c26723c268de71fb31bf080048a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-19T16:15:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T16:15:00.000Z", "avg_line_length": 34.4812030075, "max_line_length": 133, "alphanum_fraction": 0.5573484518, "include": true, "reason": "import numpy", "num_tokens": 1155}
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer.functions import convolution_2d
from chainer.functions import deformable_convolution_2d_sampler
from chainer import utils
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2),
(1, 2, 3, 4, 1, 2),
(1, 2, 3, 4, 4, 5),
(3, 3, 2, 2, 1, 1),
],
'use_cudnn': ['always', 'never']
}))
class TestDeformableConvolution2DSamplerFunctionZeroOffset(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
batch_size = 2
h = 9
w = 9
kh, kw, sy, sx, ph, pw = self.params
self.stride = (sy, sx)
self.pad = (ph, pw)
self.W = numpy.random.normal(
size=(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
size=(out_channels,)).astype(numpy.float32)
self.x = numpy.random.uniform(
size=(batch_size, in_channels, h, w)).astype(numpy.float32)
out_h = utils.conv.get_conv_outsize(h, kh, sy, ph)
out_w = utils.conv.get_conv_outsize(w, kw, sx, pw)
self.offset = numpy.zeros(
(batch_size, 2 * kh * kw, out_h, out_w), dtype=numpy.float32)
def check_forward(self, x, offset, W, b, stride, pad):
with chainer.using_config('use_cudnn', self.use_cudnn):
x = chainer.Variable(x)
offset = chainer.Variable(offset)
out = deformable_convolution_2d_sampler(
x, offset, W, b, stride, pad).data
expeceted = convolution_2d(
x, W, b, stride, pad).data
testing.assert_allclose(out, expeceted)
def test_forward_cpu(self):
self.check_forward(
self.x, self.offset, self.W, self.b, self.stride, self.pad)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x),
cuda.to_gpu(self.offset),
cuda.to_gpu(self.W),
cuda.to_gpu(self.b),
self.stride, self.pad)
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2),
(1, 2, 3, 4, 1, 2),
(1, 2, 3, 4, 4, 5),
(3, 3, 2, 2, 1, 1),
],
'use_cudnn': ['always', 'never']
}))
class TestDeformableConvolution2DSamplerFunctionLeftBottomOffset(
unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
batch_size = 2
h = 9
w = 9
kh, kw, sy, sx, ph, pw = self.params
self.stride = (sy, sx)
self.pad = (ph, pw)
self.W = numpy.random.normal(
size=(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
size=(out_channels,)).astype(numpy.float32)
self.x = numpy.random.uniform(
size=(batch_size, in_channels, h, w)).astype(numpy.float32)
out_h = utils.conv.get_conv_outsize(h, kh, sy, ph)
out_w = utils.conv.get_conv_outsize(w, kw, sx, pw)
self.offset = numpy.zeros(
(batch_size, 2 * kh * kw, out_h, out_w), dtype=numpy.float32)
def check_forward(self, x, offset, W, b, stride, pad):
with chainer.using_config('use_cudnn', self.use_cudnn):
_, _, h, w = x.shape
_, _, kh, kw = W.shape
offset[:, :kh * kw] = -1 * stride[1]
offset[:, kh * kw:] = 1 * stride[0]
x = chainer.Variable(x)
offset = chainer.Variable(offset)
out = deformable_convolution_2d_sampler(
x, offset, W, b, stride, pad).data
pad = (pad[0] + 1 * stride[0], pad[1] + 1 * stride[1])
expeceted = convolution_2d(
x, W, b, stride, pad).data
expeceted = expeceted[:, :, 2:, :-2]
testing.assert_allclose(out, expeceted)
def test_forward_cpu(self):
self.check_forward(
self.x, self.offset, self.W, self.b, self.stride, self.pad)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x),
cuda.to_gpu(self.offset),
cuda.to_gpu(self.W),
cuda.to_gpu(self.b),
self.stride, self.pad)
testing.run_module(__name__, __file__)
|
{"hexsha": "a3147c76d61ed100a0f25d5ceb95e8daee484906", "size": 4512, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/chainer_tests/functions_tests/connection_tests/test_deformable_convolution_2d_sampler.py", "max_stars_repo_name": "zaltoprofen/chainer", "max_stars_repo_head_hexsha": "3b03f9afc80fd67f65d5e0395ef199e9506b6ee1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3705, "max_stars_repo_stars_event_min_datetime": "2017-06-01T07:36:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:46:15.000Z", "max_issues_repo_path": "tests/chainer_tests/functions_tests/connection_tests/test_deformable_convolution_2d_sampler.py", "max_issues_repo_name": "zaltoprofen/chainer", "max_issues_repo_head_hexsha": "3b03f9afc80fd67f65d5e0395ef199e9506b6ee1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5998, "max_issues_repo_issues_event_min_datetime": "2017-06-01T06:40:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T01:42:44.000Z", "max_forks_repo_path": "tests/chainer_tests/functions_tests/connection_tests/test_deformable_convolution_2d_sampler.py", "max_forks_repo_name": "zaltoprofen/chainer", "max_forks_repo_head_hexsha": "3b03f9afc80fd67f65d5e0395ef199e9506b6ee1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1150, "max_forks_repo_forks_event_min_datetime": "2017-06-02T03:39:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:29:32.000Z", "avg_line_length": 30.4864864865, "max_line_length": 78, "alphanum_fraction": 0.5567375887, "include": true, "reason": "import numpy", "num_tokens": 1300}
|
# -*- coding: utf-8 -*-
"""
Transport example using GSTools.
Plotting the plumes at t=15d and calculating the breakthrough curves at
the observation wells.
Authors: Alraune Zech and Sebastian Müller
"""
import os
import numpy as np
from ogs5py.reader import readtec_polyline
import meshio as mio
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
def dashes(i=1, max_n=6, width=1):
"""Dashes for matplotlib."""
return i * [width, width] + [max_n * 2 * width - 2 * i * width, width]
out_path = os.path.join("..", "results", "tracer_test_conn")
ow_file_name = "model_ply_ow{0}_t{0}_MASS_TRANSPORT.tec"
connectivity = ["mean", "low", "high"]
conn_count = len(connectivity)
ow_pos = [2.5, 5.5, 8.5]
ow_count = len(ow_pos)
ow_label = ", ".join([f"ow{j+1}" for j in range(ow_count)])
sub_factor = 2
fig = plt.figure(constrained_layout=False, figsize=[8, 9])
gs = fig.add_gridspec(conn_count + 1, ow_count * sub_factor, wspace=0)
ax_fld = {}
for i, conn in enumerate(connectivity):
ax_shr = None if i == 0 else ax_fld["mean"]
ax_fld[conn] = fig.add_subplot(gs[i, :-2], sharex=ax_shr)
plt.setp(ax_fld[conn].get_xticklabels(), visible=(i == conn_count - 1))
ax_fld[conn].margins(x=0)
ax = [fig.add_subplot(gs[-1, 0:sub_factor])]
for i in range(1, ow_count):
ax.append(
fig.add_subplot(
gs[-1, sub_factor * i : sub_factor * (i + 1)],
sharey=ax[0],
sharex=ax[0],
)
)
plt.setp(ax[-1].get_yticklabels(), visible=False)
cax1 = fig.add_subplot(gs[:-1, -2])
cax1.axis("off")
cax2 = fig.add_subplot(gs[:-1, -1])
cax2.axis("off")
fld_t = {}
fld_c = {}
bnd_t = [np.inf, -np.inf]
max_c = -np.inf
for conn in connectivity:
# transmissivity
fld = mio.read(os.path.join(out_path, f"trans_field_{conn}.vtk"))
fld_t[conn] = np.log10(fld.cell_data["transmissivity"][0].reshape(40, 400))
bnd_t[0] = min(bnd_t[0], fld_t[conn].min())
bnd_t[1] = max(bnd_t[1], fld_t[conn].max())
# concentration at t=15d (step 180)
fld = mio.read(os.path.join(out_path, f"out_{conn}", "model0180.vtk"))
x, z = fld.points[:, [0, 2]].T
c = fld.point_data["CONCENTRATION1"][:, 0]
fld_c[conn] = [x, z, c]
max_c = max(max_c, c.max())
# low-cut for concentration at 0.001
lvl_c = np.linspace(0.001, max_c, 16)
kw_t = dict(vmin=bnd_t[0], vmax=bnd_t[1], interpolation="bicubic")
for conn in connectivity:
im = ax_fld[conn].imshow(
fld_t[conn], origin="lower", extent=[-1, 9, -5, -1], **kw_t
)
ax_fld[conn].set_ylabel("z / m")
ax_fld[conn].set_title(f"Connected region: '{conn}' transmissivity")
# plot source line
lbl = "source" if conn == "mean" else None
ax_fld[conn].plot(
[0, 0], [-2, -4], color="k", alpha=0.7, linewidth=3, label=lbl
)
# plot observation wells
for i, ow in enumerate(ow_pos, start=1):
lbl = f"ow{i}"
ax_fld[conn].axvline(
ow, color="k", alpha=0.7, linewidth=2, dashes=dashes(i), label=lbl
)
x, z, c = fld_c[conn]
tri = ax_fld[conn].tricontourf(
x, z, c, levels=lvl_c, cmap="Reds", alpha=0.7
)
ax_fld["high"].set_xlabel("x / m")
handles, labels = ax_fld["mean"].get_legend_handles_labels()
fig.legend(
handles, labels, loc="upper right", ncol=2, title="Plumes for t=15d"
)
c1 = fig.colorbar(im, ax=cax1, use_gridspec=False, pad=0, fraction=0.25)
c2 = fig.colorbar(tri, ax=cax2, use_gridspec=False, pad=0, fraction=0.25)
c1.ax.set_ylabel(r"transmissivity / $\frac{m^2}{s}$")
c1.ax.yaxis.set_label_position("left")
c2.ax.set_ylabel(r"concentration")
c2.ax.yaxis.set_label_position("left")
c1.ax.margins(x=0)
c2.ax.margins(x=0)
# to actually determine the labels, draw the figure
fig.canvas.draw()
lbl_t = c1.ax.get_yticklabels()
lbl_t = [f"$10^{{{lbl.get_text()}}}$" for lbl in lbl_t]
c1.ax.set_yticklabels(lbl_t)
c2.ax.yaxis.set_major_formatter(FormatStrFormatter("%.3f"))
for i, conn in enumerate(connectivity):
for j in range(ow_count):
name = os.path.join(f"out_{conn}", ow_file_name.format(j + 1))
out = readtec_polyline(single_file=os.path.join(out_path, name))
t = out["TIME"] / 3600 / 24 # convert secs to days
# mean concentration along observation well
c = np.trapz(out["CONCENTRATION1"], x=out["DIST"]) / out["DIST"][:, -1]
ax[j].plot(np.cumsum(c), t, label=f"{conn}")
ax[j].set_xticks([])
ax[j].invert_yaxis()
ax[j].set_xlabel(f"breakthrough curves: ow{j+1}")
ax[-1].legend(loc="upper right")
for j in range(ow_count):
ax[j].axhline(15, color="k", alpha=0.7, linestyle=":")
ax[0].set_ylabel("time / d")
fig.tight_layout()
fig.savefig(os.path.join("..", "results", "comparison.pdf"), dpi=300)
|
{"hexsha": "748d72d3dab4459993a2e18ddf0fd2aae919b7d1", "size": 4745, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/01_plot.py", "max_stars_repo_name": "GeoStat-Examples/gstools-connectivity-and-transport", "max_stars_repo_head_hexsha": "64229f989ff04ad1b822db1369f334353df206ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/01_plot.py", "max_issues_repo_name": "GeoStat-Examples/gstools-connectivity-and-transport", "max_issues_repo_head_hexsha": "64229f989ff04ad1b822db1369f334353df206ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/01_plot.py", "max_forks_repo_name": "GeoStat-Examples/gstools-connectivity-and-transport", "max_forks_repo_head_hexsha": "64229f989ff04ad1b822db1369f334353df206ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T04:55:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T04:55:32.000Z", "avg_line_length": 33.6524822695, "max_line_length": 79, "alphanum_fraction": 0.643203372, "include": true, "reason": "import numpy", "num_tokens": 1549}
|
import numpy as np
import loupe
def test_expc():
a = loupe.rand(size=(10,10))
res = loupe.expc(a)
assert np.array_equal(res, np.exp(a.data*1j))
def test_expc_backward():
a = loupe.rand(size=(10,10), requires_grad=True)
res = loupe.expc(a)
res.backward(grad=np.ones((10,10)))
assert np.allclose(a.grad, np.imag(np.conj(res)))
|
{"hexsha": "d6e6f8ad1994d7ffff4f05c2194a4b492359690b", "size": 355, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/numeric/test_expc.py", "max_stars_repo_name": "andykee/loupe", "max_stars_repo_head_hexsha": "8b10781598973aac7c129e190209acad7e5a9559", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-31T03:42:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:42:39.000Z", "max_issues_repo_path": "tests/numeric/test_expc.py", "max_issues_repo_name": "andykee/loupe", "max_issues_repo_head_hexsha": "8b10781598973aac7c129e190209acad7e5a9559", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-17T16:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T21:23:32.000Z", "max_forks_repo_path": "tests/numeric/test_expc.py", "max_forks_repo_name": "andykee/loupe", "max_forks_repo_head_hexsha": "8b10781598973aac7c129e190209acad7e5a9559", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3571428571, "max_line_length": 53, "alphanum_fraction": 0.6563380282, "include": true, "reason": "import numpy", "num_tokens": 107}
|
from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
import numpy as np
import cv2
import matplotlib.pyplot as plt
from .tesisfunctions import hist_cdf,findminima,threshold
import glob
def brightness(img):
### LESS BRIGHT http://alienryderflex.com/hsp.html
#b,g,r = cv2.split(img.astype("float"))
#return np.sqrt( .299*(b**2) + .587*(g**2) + .114*(r**2)).astype("uint8")
### HSV
return cv2.cvtColor(img,cv2.COLOR_BGR2HSV)[:,:,2]
def stem(x,y,color):
markerline, stemlines, baseline = plt.stem(x,y,linefmt='b-', markerfmt='bo', basefmt='r-')
plt.setp(stemlines, linewidth=1, color = color) # set stems
plt.setp(markerline, 'markerfacecolor', color) # make points
def otsuthresh(hist):
#http://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html
# find normalized_histogram, and its cumulative distribution function
hist_norm = old_div(hist.astype("float").ravel(),hist.max())
Q = hist_norm.cumsum()
bins = np.arange(len(hist_norm))
fn_min = np.inf
thresh = -1
for i in range(1,len(hist_norm)):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[len(hist_norm)-1]-Q[i] # cum sum of classes
b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances
m1,m2 = old_div(np.sum(p1*b1),q1), old_div(np.sum(p2*b2),q2)
v1,v2 = old_div(np.sum(((b1-m1)**2)*p1),q1),old_div(np.sum(((b2-m2)**2)*p2),q2)
# calculates the minimization function
fn = v1*q1 + v2*q2
if fn < fn_min:
fn_min = fn
thresh = i
return thresh
imlist= glob.glob("im*.jpg")
imlist.extend(glob.glob("good_*.jpg"))
dpi = 100
grapths = True # True to plot images
proposed = False # True to use proposed method
ishull = False # True to apply convex hull
save = False # True to save figure
show = True # True to show figure
shape = (20,5*len(imlist))
fig = plt.figure("threshs",figsize=shape) # window name
# make title
title = "Normal brightness - "
if proposed: title += "Proposed thresh"
else: title += "Otsu thresh"
if ishull: title += " with convex hull"
for i,fn in enumerate(imlist):
## get data
img =cv2.imread(fn) # read image
P = brightness(img) # get brightness
hist,cdf = hist_cdf(P)
if proposed:
fhist,fcdf = hist_cdf(P,2) # get filtered histogram
hist_shift = len(fhist)-256
th1 = otsuthresh(fhist)-hist_shift #np.min(np.where(cdf.max()*0.5<=cdf)) # user criteria
th2 = np.max(np.where(fhist.max()==fhist))-hist_shift # max value
th3 = np.min(np.where(np.mean(fcdf)<=fcdf))-hist_shift # mean of cdf
thresh=findminima(fhist,np.mean([th1,th2,th3]))
#thresh = np.mean([th1,th2,th3,th4])
#thresh2 = findminima(fhist,len(fhist)-10)-hist_shift
#th = cv2.inRange(P, thresh, thresh2)
th = threshold(P,thresh,255,0)
else:
#thresh = getOtsuThresh(hist)
#th = threshold(P,thresh,255,0)
thresh,th = cv2.threshold(P,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
if ishull:
contours,hierarchy = cv2.findContours(th,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
allcontours = np.vstack(contours[i] for i in np.arange(len(contours)))
hull = cv2.convexHull(allcontours)
cv2.drawContours(th,[hull],-1,255,-1)
## plot image
if grapths:
plt.subplot(len(imlist),3,i*3+1),plt.imshow(cv2.resize(P,(300,300)),'gray')
plt.title(fn)
plt.xticks([]),plt.yticks([])
## plot thresh
if grapths:
plt.subplot(len(imlist),3,i*3+3),plt.imshow(cv2.resize(th,(300,300)),'gray')
plt.title("thresh="+str(thresh))
plt.xticks([]),plt.yticks([])
## plot data
if grapths: plt.subplot(len(imlist),3,i*3+2)
else: plt.subplot(len(imlist),1,i+1)
if i==0: plt.title(title)
plt.plot(hist, color = 'r') # plot histogram
plt.plot(cdf, color = 'b') # plot cumulative distribution function
# colors: b: blue, g: green, r: red, c: cyan, m: magenta, y: yellow, k: black, w: white
x = np.arange(len(hist)) # get x axis
if proposed:
plt.plot(x[th1],hist[th1], "o",color="g") # plot user criteria
plt.plot(x[th2],hist[th2], "o",color="c") # plot max value
plt.plot(x[th3],cdf[th3], "o",color="y") # plot mean of cdf
#plt.plot(x[thresh2], hist[thresh2], "ro",color="orangered") # plot selected threshold
plt.plot(x[thresh], hist[thresh], "ro",color="r") # plot selected threshold
plt.xlim([0,len(hist)]) # space the x axis
#if len(fns)>1 and i != len(fns)-1: pass#plt.xticks([])
#plt.legend(('histogram','cdf'), loc = 'upper left')
#plt.legend(('histogram','cdf'),bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#saving
if save and i == len(imlist)-1:
fileName = title+"_th"+str(thresh)
if len(imlist)==1:
fileName +="_"+fn.split(".")[0]
fileName += ".jpg"
fig.savefig(fileName,dpi=dpi)
if show: plt.show()
|
{"hexsha": "caec59851d589e9da5017946dfce466afe58ef4c", "size": 5066, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/hypothesis2.py", "max_stars_repo_name": "davtoh/RRTools", "max_stars_repo_head_hexsha": "6dde2d4622719d9031bf21ffbf7723231a0e2003", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-16T03:54:22.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-16T03:54:22.000Z", "max_issues_repo_path": "tests/hypothesis2.py", "max_issues_repo_name": "davtoh/RRTools", "max_issues_repo_head_hexsha": "6dde2d4622719d9031bf21ffbf7723231a0e2003", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/hypothesis2.py", "max_forks_repo_name": "davtoh/RRTools", "max_forks_repo_head_hexsha": "6dde2d4622719d9031bf21ffbf7723231a0e2003", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-09T02:49:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-09T02:49:06.000Z", "avg_line_length": 37.25, "max_line_length": 96, "alphanum_fraction": 0.6285037505, "include": true, "reason": "import numpy", "num_tokens": 1546}
|
import time
import numpy as np
import pandas as pd
import os
class SpotifyDataExtractor:
def __init__(self, sp, artist_uri, artist_name, conn):
self.sp = sp
self.artist_uri = artist_uri
self.artist_name = artist_name
self.conn = conn
self.sp_albums = self.sp.artist_albums(self.artist_uri, album_type='album')
self.album_names = []
self.album_uris = []
for i in range(len(self.sp_albums['items'])):
self.album_names.append(self.sp_albums['items'][i]['name'])
self.album_uris.append(self.sp_albums['items'][i]['uri'])
self.spotify_albums = {}
self.album_count = 0
def _extract_album_songs(self, uri):
album = uri
self.spotify_albums[album] = {}
self.spotify_albums[album]['album'] = []
self.spotify_albums[album]['track_number'] = []
self.spotify_albums[album]['id'] = []
self.spotify_albums[album]['name'] = []
self.spotify_albums[album]['uri'] = []
tracks = self.sp.album_tracks(album)
for i in range(len(tracks['items'])):
self.spotify_albums[album]['album'].append(self.album_names[self.album_count])
self.spotify_albums[album]['track_number'].append(tracks['items'][i]['track_number'])
self.spotify_albums[album]['id'].append(tracks['items'][i]['id'])
self.spotify_albums[album]['name'].append(tracks['items'][i]['name'])
self.spotify_albums[album]['uri'].append(tracks['items'][i]['uri'])
def _extract_audio_features(self, album):
self.spotify_albums[album]['acousticness'] = []
self.spotify_albums[album]['danceability'] = []
self.spotify_albums[album]['energy'] = []
self.spotify_albums[album]['instrumentalness'] = []
self.spotify_albums[album]['liveness'] = []
self.spotify_albums[album]['loudness'] = []
self.spotify_albums[album]['speechiness'] = []
self.spotify_albums[album]['tempo'] = []
self.spotify_albums[album]['valence'] = []
self.spotify_albums[album]['popularity'] = []
track_count = 0
for track in self.spotify_albums[album]['uri']:
features = self.sp.audio_features(track)
self.spotify_albums[album]['acousticness'].append(features[0]['acousticness'])
self.spotify_albums[album]['danceability'].append(features[0]['danceability'])
self.spotify_albums[album]['energy'].append(features[0]['energy'])
self.spotify_albums[album]['instrumentalness'].append(features[0]['instrumentalness'])
self.spotify_albums[album]['liveness'].append(features[0]['liveness'])
self.spotify_albums[album]['loudness'].append(features[0]['loudness'])
self.spotify_albums[album]['speechiness'].append(features[0]['speechiness'])
self.spotify_albums[album]['tempo'].append(features[0]['tempo'])
self.spotify_albums[album]['valence'].append(features[0]['valence'])
#popularity is stored elsewhere
pop = self.sp.track(track)
self.spotify_albums[album]['popularity'].append(pop['popularity'])
track_count+=1
def build_dataframe(self):
sleep_min = 2
sleep_max = 5
start_time = time.time()
request_count = 0
for uri in self.album_uris:
self._extract_album_songs(uri)
print("Album " + str(self.album_names[self.album_count]) + " songs has been added to spotify_albums dictionary")
self.album_count+=1
for album in self.spotify_albums:
self._extract_audio_features(album)
request_count+=1
if request_count % 5 == 0:
print(str(request_count) + " playlists completed")
time.sleep(np.random.uniform(sleep_min, sleep_max))
print('Loop #: {}'.format(request_count))
print('Elapsed Time: {} seconds'.format(time.time() - start_time))
genre_dict = {}
genre_dict['album'] = []
genre_dict['track_number'] = []
genre_dict['id'] = []
genre_dict['name'] = []
genre_dict['uri'] = []
genre_dict['acousticness'] = []
genre_dict['danceability'] = []
genre_dict['energy'] = []
genre_dict['instrumentalness'] = []
genre_dict['liveness'] = []
genre_dict['loudness'] = []
genre_dict['speechiness'] = []
genre_dict['tempo'] = []
genre_dict['valence'] = []
genre_dict['popularity'] = []
for album in self.spotify_albums:
for feature in self.spotify_albums[album]:
genre_dict[feature].extend(self.spotify_albums[album][feature])
genre_df = pd.DataFrame(genre_dict)
genre_df['artist'] = self.artist_name
genre_df = genre_df.sort_values('popularity', ascending=False).drop_duplicates('name').sort_index()
genre_df.to_csv('{}_data.csv'.format(str(time.time())), index=False)
print('Dataframe exported to csv')
start_time = time.time()
print('Data transfer to sql table started...')
print(genre_df.shape)
genre_df.to_sql('WKR_SPOTIFY_DATA', con=self.conn, if_exists='append', index=False, method='multi', chunksize=100)
print('Data dumped to the working sql table in {} seconds.'.format(time.time()-start_time))
|
{"hexsha": "ab318e8aed17367900e3d71f8c678b4aa15554e4", "size": 5559, "ext": "py", "lang": "Python", "max_stars_repo_path": "AI/spotifydataextractor.py", "max_stars_repo_name": "pradeepsalunke/muser-data-analysis", "max_stars_repo_head_hexsha": "08ea051d422431b29e6e32841d8e349e136c6f14", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AI/spotifydataextractor.py", "max_issues_repo_name": "pradeepsalunke/muser-data-analysis", "max_issues_repo_head_hexsha": "08ea051d422431b29e6e32841d8e349e136c6f14", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AI/spotifydataextractor.py", "max_forks_repo_name": "pradeepsalunke/muser-data-analysis", "max_forks_repo_head_hexsha": "08ea051d422431b29e6e32841d8e349e136c6f14", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1951219512, "max_line_length": 124, "alphanum_fraction": 0.5984889369, "include": true, "reason": "import numpy", "num_tokens": 1253}
|
#importing the libraries
import tensorflow as tf
import numpy as np
import pandas as pd
import json
import nltk
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import Input, Embedding, LSTM , Dense,GlobalMaxPooling1D,Flatten
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
#importing the dataset
with open('content.json') as content:
data1 = json.load(content)
#getting all the data to lists
tags = []
inputs = []
responses={}
for intent in data1['intents']:
responses[intent['tag']]=intent['responses']
for lines in intent['input']:
inputs.append(lines)
tags.append(intent['tag'])
#converting to dataframe
data = pd.DataFrame({"inputs":inputs,
"tags":tags})
data = data.sample(frac=1)
#removing punctuations
import string
data['inputs'] = data['inputs'].apply(lambda wrd:[ltrs.lower() for ltrs in wrd if ltrs not in string.punctuation])
data['inputs'] = data['inputs'].apply(lambda wrd: ''.join(wrd))
data
#tokenize the data
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=2000)
tokenizer.fit_on_texts(data['inputs'])
train = tokenizer.texts_to_sequences(data['inputs'])
#apply padding
from tensorflow.keras.preprocessing.sequence import pad_sequences
x_train = pad_sequences(train)
#encoding the outputs
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(data['tags'])
input_shape = x_train.shape[1]
print(input_shape)
#define vocabulary
vocabulary = len(tokenizer.word_index)
print("number of unique words : ",vocabulary)
output_length = le.classes_.shape[0]
print("output length: ",output_length)
#creating the model
i = Input(shape=(input_shape,))
x = Embedding(vocabulary+1,10)(i)
x = LSTM(10,return_sequences=True)(x)
x = Flatten()(x)
x = Dense(output_length,activation="softmax")(x)
model = Model(i,x)
#compiling the model
model.compile(loss="sparse_categorical_crossentropy",optimizer='adam',metrics=['accuracy'])
#training the model
train = model.fit(x_train,y_train,epochs=200)
#plotting model accuracy
plt.plot(train.history['accuracy'],label='training set accuracy')
plt.plot(train.history['loss'],label='training set loss')
plt.legend()
#chatting
import random
while True:
texts_p = []
prediction_input = input('You : ')
#removing punctuation and converting to lowercase
prediction_input = [letters.lower() for letters in prediction_input if letters not in string.punctuation]
prediction_input = ''.join(prediction_input)
texts_p.append(prediction_input)
#tokenizing and padding
prediction_input = tokenizer.texts_to_sequences(texts_p)
prediction_input = np.array(prediction_input).reshape(-1)
prediction_input = pad_sequences([prediction_input],input_shape)
#getting output from model
output = model.predict(prediction_input)
output = output.argmax()
#finding the right tag and predicting
response_tag = le.inverse_transform([output])[0]
print("Dew : ",random.choice(responses[response_tag]))
if response_tag == "goodbye":
break
|
{"hexsha": "33bdb85416dc22da20f39bdeca277af1fe2141e2", "size": 3092, "ext": "py", "lang": "Python", "max_stars_repo_path": "DEW.py", "max_stars_repo_name": "Eeman1113/DEW", "max_stars_repo_head_hexsha": "1b970bf0c8cf83b79d483c3836a6a50349d9ab41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-20T03:36:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T03:36:00.000Z", "max_issues_repo_path": "DEW.py", "max_issues_repo_name": "Eeman1113/DEW", "max_issues_repo_head_hexsha": "1b970bf0c8cf83b79d483c3836a6a50349d9ab41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DEW.py", "max_forks_repo_name": "Eeman1113/DEW", "max_forks_repo_head_hexsha": "1b970bf0c8cf83b79d483c3836a6a50349d9ab41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8558558559, "max_line_length": 114, "alphanum_fraction": 0.7593790427, "include": true, "reason": "import numpy", "num_tokens": 713}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
__all__ = ['Stack', 'Pad', 'Tuple', 'Dict']
class Stack(object):
"""
Stacks the input data samples to construct the batch. The N input samples
must have the same shape/length and will be stacked to construct a batch.
Args:
axis (int, optional): The axis in the result data along which the input
data are stacked. Default: 0.
dtype (str|numpy.dtype, optional): The value type of the output. If it
is set to None, the type of input data is used. Default: None.
"""
def __init__(self, axis=0, dtype=None):
self._axis = axis
self._dtype = dtype
def __call__(self, data):
"""
Batchifies the input data by stacking.
Args:
data (list[numpy.ndarray]): The input data samples. It is a list.
Each element is a numpy.ndarray or list.
Returns:
numpy.ndarray: Stacked batch data.
Example:
.. code-block:: python
from paddlenlp.data import Stack
a = [1, 2, 3, 4]
b = [3, 4, 5, 6]
c = [5, 6, 7, 8]
result = Stack()([a, b, c])
'''
[[1, 2, 3, 4],
[3, 4, 5, 6],
[5, 6, 7, 8]]
'''
"""
data = np.stack(
data,
axis=self._axis).astype(self._dtype) if self._dtype else np.stack(
data, axis=self._axis)
return data
class Pad(object):
"""
Pads the input data samples to the largest length at `axis`.
Args:
pad_val (float|int, optional): The padding value. Default: 0.
axis (int, optional): The axis to pad the arrays. The arrays will be
padded to the largest length at `axis`. For example, assume the
input arrays have shape (10, 8, 5), (6, 8, 5), (3, 8, 5) and the
axis is 0. Each input will be padded into (10, 8, 5) and then
stacked to form the final output, which has shape (3, 10, 8, 5).
Default: 0.
ret_length (bool|numpy.dtype, optional): If it is bool, indicate whether
to return the valid length in the output, and the data type of
returned length is int32 if True. If it is numpy.dtype, indicate the
data type of returned length. Default: None.
dtype (numpy.dtype, optional): The value type of the output. If it is
set to None, the input data type is used. Default: None.
pad_right (bool, optional): Whether the padding direction is right-side.
If True, it indicates we pad to the right side, while False indicates
we pad to the left side. Default: True.
"""
def __init__(self,
pad_val=0,
axis=0,
ret_length=None,
dtype=None,
pad_right=True):
self._pad_val = pad_val
self._axis = axis
self._ret_length = ret_length
self._dtype = dtype
self._pad_right = pad_right
def __call__(self, data):
"""
Batchifies the input data by padding. The input will be padded to the
largest dimension at `axis` and then stacked to form the final output.
In addition, the function will output the original dimensions at the
`axis` if `ret_length` is not None or False.
Args:
data (list[numpy.ndarray|list]): The input data samples. It is a
list. Each element is a numpy.ndarray or list.
Returns:
numpy.ndarray|tuple[numpy.ndarray]: If `ret_length` is False, it
is a numpy.ndarray representing the padded batch data and the
shape is (N, …). Otherwise, it is a tuple, besides the padded batch
data, the tuple also includes a numpy.ndarray representing original
length at `axis` of all input samples, which shaped `(N,)`.
Example:
.. code-block:: python
from paddlenlp.data import Pad
a = [1, 2, 3, 4]
b = [5, 6, 7]
c = [8, 9]
result = Pad(pad_val=0)([a, b, c])
'''
[[1, 2, 3, 4],
[5, 6, 7, 0],
[8, 9, 0, 0]]
'''
"""
arrs = [np.asarray(ele) for ele in data]
original_length = [ele.shape[self._axis] for ele in arrs]
max_size = max(original_length)
ret_shape = list(arrs[0].shape)
ret_shape[self._axis] = max_size
ret_shape = (len(arrs), ) + tuple(ret_shape)
ret = np.full(
shape=ret_shape,
fill_value=self._pad_val,
dtype=arrs[0].dtype if self._dtype is None else self._dtype)
for i, arr in enumerate(arrs):
if arr.shape[self._axis] == max_size:
ret[i] = arr
else:
slices = [slice(None) for _ in range(arr.ndim)]
if self._pad_right:
slices[self._axis] = slice(0, arr.shape[self._axis])
else:
slices[self._axis] = slice(
max_size - arr.shape[self._axis], max_size)
if slices[self._axis].start != slices[self._axis].stop:
slices = [slice(i, i + 1)] + slices
ret[tuple(slices)] = arr
if self._ret_length:
return ret, np.asarray(
original_length,
dtype="int32") if self._ret_length == True else np.asarray(
original_length, self._ret_length)
else:
return ret
class Tuple(object):
"""
Wraps multiple batchify functions together. The input functions will be applied
to the corresponding input fields.
Each sample should be a list or tuple containing multiple fields. The i'th
batchify function stored in Tuple will be applied on the i'th field.
For example, when data sample is (nd_data, label), you can wrap two batchify
functions using `Tuple(DataBatchify, LabelBatchify)` to batchify nd_data and
label correspondingly.
Args:
fn (callable|list[callable]|tuple[callable]): The batchify functions to
wrap. It is a callable function or a list/tuple of callable functions.
args (tuple[callable]): The additional batchify functions to wrap.
"""
def __init__(self, fn, *args):
if isinstance(fn, (list, tuple)):
assert len(args) == 0, 'Input pattern not understood. The input of Tuple can be ' \
'Tuple(A, B, C) or Tuple([A, B, C]) or Tuple((A, B, C)). ' \
'Received fn=%s, args=%s' % (str(fn), str(args))
self._fn = fn
else:
self._fn = (fn, ) + args
for i, ele_fn in enumerate(self._fn):
assert callable(
ele_fn
), 'Batchify functions must be callable! type(fn[%d]) = %s' % (
i, str(type(ele_fn)))
def __call__(self, data):
"""
Batchifies data samples by applying each function on the corresponding
data field, and each data field is produced by stacking the field data
of samples.
Args:
data (list|tuple): The samples to batchfy. Each sample in list/tuple
should contain `N` fields.
Returns:
tuple: A tuple composed of results from all including batchifying
functions.
Example:
.. code-block:: python
from paddlenlp.data import Stack, Pad, Tuple
data = [
[[1, 2, 3, 4], [1]],
[[5, 6, 7], [0]],
[[8, 9], [1]],
]
batchify_fn = Tuple(Pad(pad_val=0), Stack())
ids, label = batchify_fn(data)
'''
ids:
[[1, 2, 3, 4],
[5, 6, 7, 0],
[8, 9, 0, 0]]
label: [[1], [0], [1]]
'''
"""
assert len(data[0]) == len(self._fn),\
'The number of attributes in each data sample should contain' \
' {} elements'.format(len(self._fn))
ret = []
for i, ele_fn in enumerate(self._fn):
result = ele_fn([ele[i] for ele in data])
if isinstance(result, (tuple, list)):
ret.extend(result)
else:
ret.append(result)
return tuple(ret)
class Dict(object):
"""
Wraps multiple batchify functions together. The input functions will be
applied to the corresponding input fields.
Each sample should be a dict containing multiple fields. Each batchify
function with key stored in `Dict` will be applied on the field which has
the same key.
For example, when data sample is {'tokens': tokens, 'labels': labels}, you
can wrap two batchify functions using
`Dict({'tokens': DataBatchify, 'labels': LabelBatchify})` to batchify tokens
and labels correspondingly.
Args:
fn (dict): The batchify functions to wrap. It is a dict, which values is
callable functions.
"""
def __init__(self, fn):
assert isinstance(fn, (dict)), 'Input pattern not understood. The input of Dict must be a dict with key of input column name and value of collate_fn ' \
'Received fn=%s' % (str(fn))
self._fn = fn
for col_name, ele_fn in self._fn.items():
assert callable(
ele_fn
), 'Batchify functions must be callable! type(fn[%d]) = %s' % (
col_name, str(type(ele_fn)))
def __call__(self, data):
"""
Batchifies data samples by applying each function on the corresponding
data field, and each data field is produced by stacking the field data
with the same key as batchify functions of all samples.
Args:
data (list[dict]|tuple[dict]): The samples to batchfy. Each sample
in list/tuple is a dict with `N` key-values.
Returns:
tuple: A tuple composed of results from all including batchifying
functions.
Example:
.. code-block:: python
from paddlenlp.data import Stack, Pad, Dict
data = [
{'labels':[1], 'token_ids':[1, 2, 3, 4]},
{'labels':[0], 'token_ids':[5, 6, 7]},
{'labels':[1], 'token_ids':[8, 9]},
]
batchify_fn = Dict({'token_ids':Pad(pad_val=0), 'labels':Stack()})
ids, label = batchify_fn(data)
'''
ids:
[[1, 2, 3, 4],
[5, 6, 7, 0],
[8, 9, 0, 0]]
label: [[1], [0], [1]]
'''
"""
ret = []
for col_name, ele_fn in self._fn.items():
result = ele_fn([ele[col_name] for ele in data])
if isinstance(result, (tuple, list)):
ret.extend(result)
else:
ret.append(result)
return tuple(ret)
|
{"hexsha": "6a56ff75465b9609e3452617eb4fc91b8dc6263a", "size": 12061, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddlenlp/data/collate.py", "max_stars_repo_name": "JunnYu/ConvBERT-Prod", "max_stars_repo_head_hexsha": "a1351e1e7f9400cb8c71d0a15d23629b4cb055d4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2022-01-06T07:39:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T06:18:40.000Z", "max_issues_repo_path": "paddlenlp/data/collate.py", "max_issues_repo_name": "JunnYu/ConvBERT-Prod", "max_issues_repo_head_hexsha": "a1351e1e7f9400cb8c71d0a15d23629b4cb055d4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paddlenlp/data/collate.py", "max_forks_repo_name": "JunnYu/ConvBERT-Prod", "max_forks_repo_head_hexsha": "a1351e1e7f9400cb8c71d0a15d23629b4cb055d4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5732087227, "max_line_length": 160, "alphanum_fraction": 0.5351131747, "include": true, "reason": "import numpy", "num_tokens": 2819}
|
#!/usr/bin/env python3
#
# (C) 2014-2020 Ed Bueler
import sys, argparse
import numpy as np
import PetscBinaryIO # may use link
petsc = PetscBinaryIO.PetscBinaryIO()
parser = argparse.ArgumentParser(description='Generate a structured grid on the unit square in PETSc binary format (.vec,.is), readable by ch9/unfem.')
# positional args; both required
parser.add_argument('root', metavar='NAMEROOT',
help='output file name root')
parser.add_argument('M', type=int,
help='mesh has N = MxM points')
parser.add_argument('-debug', default=False, action='store_true',
help='print vectors when generated')
args = parser.parse_args()
vecname = args.root + '.vec'
isname = args.root + '.is'
N = args.M * args.M
h = 1.0 / ((float)(args.M) - 1.0)
def n(i,j): # computes node index from local
return j*args.M + i
# write node locations to .vec file
print(' creating N=%d node locations ...' % N)
xy = np.zeros(2*N)
for j in range(args.M):
for i in range(args.M):
xy[2*n(i,j):2*(n(i,j)+1)] = [(float)(i) * h, (float)(j) * h]
if args.debug:
print(xy)
print(' writing node locations as PETSc Vec to %s ...' % vecname)
petsc.writeBinaryFile(vecname,[xy.view(PetscBinaryIO.Vec),])
# create element triples
K = (args.M-1)*(args.M-1) * 2 # two triangles per square cell
print(' creating K=%d element triples ...' % K)
e = np.zeros(3*K,dtype=int)
for j in range(args.M-1):
for i in range(args.M-1):
# write first triangle in cell
k = 2*(j*(args.M-1) + i)
A = n(i,j)
B = A + 1
C = n(i,j+1)
e[3*k:3*(k+1)] = [A,B,C]
# write second triangle in cell
k += 1
A1 = B
B1 = C + 1
C1 = C
e[3*k:3*(k+1)] = [A1,B1,C1]
if args.debug:
print(e)
# create boundary flags
print(' creating N=%d boundary flags ...' % N)
bf = np.zeros(N,dtype=int)
for j in range(args.M):
for i in range(args.M):
if (i == 0) or (j == 0) or (i == args.M-1) or (j == args.M-1):
bf[n(i,j)] = 2
if args.debug:
print(bf)
# create bogus negative Neumann boundary segment
# FIXME this kluge caused by inability to check if binary file is empty; see FIXME in UMReadISs()
ns = np.array([-1,-1],dtype=int)
# write ISs
print(' writing element triple and boundary flags as PETSc IS to %s ...' % isname)
IS = PetscBinaryIO.IS
petsc.writeBinaryFile(isname,[e.view(IS),bf.view(IS),ns.view(IS)])
|
{"hexsha": "ff98442a7130057e2c720294822b270354018dd9", "size": 2462, "ext": "py", "lang": "Python", "max_stars_repo_path": "c/ch10/genstructured.py", "max_stars_repo_name": "thw1021/p4pdes", "max_stars_repo_head_hexsha": "421fd3d809b1e23e5a6f3c3e51252cb275a76140", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 115, "max_stars_repo_stars_event_min_datetime": "2015-03-13T04:35:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T23:12:02.000Z", "max_issues_repo_path": "c/ch10/genstructured.py", "max_issues_repo_name": "thw1021/p4pdes", "max_issues_repo_head_hexsha": "421fd3d809b1e23e5a6f3c3e51252cb275a76140", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2015-09-24T17:42:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T12:36:20.000Z", "max_forks_repo_path": "c/ch10/genstructured.py", "max_forks_repo_name": "thw1021/p4pdes", "max_forks_repo_head_hexsha": "421fd3d809b1e23e5a6f3c3e51252cb275a76140", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2016-07-23T09:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T07:43:17.000Z", "avg_line_length": 31.164556962, "max_line_length": 151, "alphanum_fraction": 0.6076360682, "include": true, "reason": "import numpy", "num_tokens": 753}
|
import os
import sys
from glob import glob
import numpy as np
from setuptools import Extension, find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# MSVC compiler has different flags; assume that's what we are using on Windows
if os.name == "nt":
# Enable extra warnings except implicit cast, which throws a few
# see https://bugzilla.mozilla.org/show_bug.cgi?id=857863 for justification
extra_compile_args = ["/WX", "/wd4244"]
else:
extra_compile_args = ["-Wextra", "-Werror"]
if "--debug" in sys.argv:
extra_compile_args.extend(["-g", "-UNDEBUG", "-O0"])
pantab_module = Extension(
"libpantab",
include_dirs=[np.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "0")],
sources=list(glob("pantab/src/*.c")),
depends=list(glob("pantab/src/*.h")),
extra_compile_args=extra_compile_args,
)
setup(
name="pantab",
version="2.1.0",
description="Converts pandas DataFrames into Tableau Hyper Extracts and back",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/WillAyd/pantab",
author="Will Ayd",
author_email="william.ayd@icloud.com",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Office/Business",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="tableau visualization pandas dataframe",
packages=find_packages(),
package_data={"": ["*.h"], "pantab.tests": ["data/*"]},
data_files=[("", ["LICENSE.txt", "README.md"])],
python_requires=">=3.7",
install_requires=["pandas", "tableauhyperapi", "numpy"],
extras_require={"dev": ["pytest"]},
ext_modules=[pantab_module],
)
|
{"hexsha": "7548e10dee63bb8dce13b37a12d169bb829b12e3", "size": 2044, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "duynguyenhoang/pantab", "max_stars_repo_head_hexsha": "d6d44a1a03ab50adfb8f8d850fc6ba98195b6056", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2019-11-01T16:56:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T05:37:54.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "duynguyenhoang/pantab", "max_issues_repo_head_hexsha": "d6d44a1a03ab50adfb8f8d850fc6ba98195b6056", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 107, "max_issues_repo_issues_event_min_datetime": "2019-10-15T15:37:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:39:38.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "duynguyenhoang/pantab", "max_forks_repo_head_hexsha": "d6d44a1a03ab50adfb8f8d850fc6ba98195b6056", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2019-11-28T22:24:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T21:00:29.000Z", "avg_line_length": 32.4444444444, "max_line_length": 82, "alphanum_fraction": 0.6590019569, "include": true, "reason": "import numpy", "num_tokens": 519}
|
import numpy as np
from .transform import sph2vec, vec2sph
def angle_between(ang1, ang2, sign=True):
d = (ang1 - ang2 + np.pi) % (2 * np.pi) - np.pi
if not sign:
d = np.abs(d)
return d
def angdist(v1, v2, zenith=True):
if v1.shape[0] == 2:
v1 = sph2vec(v1, zenith=zenith)
if v2.shape[0] == 2:
v2 = sph2vec(v2, zenith=zenith)
v1 /= np.linalg.norm(v1, axis=0)
v2 /= np.linalg.norm(v2, axis=0)
if v1.ndim > 1 or v2.ndim > 1:
d = np.einsum('ij,ij->j', v1, v2)
else:
d = np.dot(v1.T, v2)
# if d.ndim > 1:
# d = d.diagonal()
return np.absolute(np.arccos(d))
def eledist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[0] - v2[0] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
def azidist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[1] - v2[1] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
|
{"hexsha": "790ec48bd68710ab73361d201cd9d2a9e9505382", "size": 1142, "ext": "py", "lang": "Python", "max_stars_repo_path": "sphere/distance.py", "max_stars_repo_name": "jannsta1/insectvision", "max_stars_repo_head_hexsha": "d98a7acbcde1d5faf00131485fa85c706f313814", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sphere/distance.py", "max_issues_repo_name": "jannsta1/insectvision", "max_issues_repo_head_hexsha": "d98a7acbcde1d5faf00131485fa85c706f313814", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sphere/distance.py", "max_forks_repo_name": "jannsta1/insectvision", "max_forks_repo_head_hexsha": "d98a7acbcde1d5faf00131485fa85c706f313814", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T08:14:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T08:14:41.000Z", "avg_line_length": 24.8260869565, "max_line_length": 53, "alphanum_fraction": 0.5350262697, "include": true, "reason": "import numpy", "num_tokens": 453}
|
import unittest
import numpy as np
import scipy.sparse
from sklearn.datasets import load_boston, load_iris, load_wine
from flaml import AutoML
from flaml.data import get_output_from_log
from flaml.model import SKLearnEstimator
from rgf.sklearn import RGFClassifier, RGFRegressor
from flaml import tune
class MyRegularizedGreedyForest(SKLearnEstimator):
def __init__(self, task = 'binary:logistic', n_jobs = 1, max_leaf = 4,
n_iter = 1, n_tree_search = 1, opt_interval = 1, learning_rate = 1.0,
min_samples_leaf = 1, **params):
super().__init__(task, **params)
if 'regression' in task:
self.estimator_class = RGFRegressor
else:
self.estimator_class = RGFClassifier
# round integer hyperparameters
self.params = {
"n_jobs": n_jobs,
'max_leaf': int(round(max_leaf)),
'n_iter': int(round(n_iter)),
'n_tree_search': int(round(n_tree_search)),
'opt_interval': int(round(opt_interval)),
'learning_rate': learning_rate,
'min_samples_leaf':int(round(min_samples_leaf))
}
@classmethod
def search_space(cls, data_size, task):
space = {
'max_leaf': {'domain': tune.qloguniform(
lower = 4, upper = data_size, q = 1), 'init_value': 4},
'n_iter': {'domain': tune.qloguniform(
lower = 1, upper = data_size, q = 1), 'init_value': 1},
'n_tree_search': {'domain': tune.qloguniform(
lower = 1, upper = 32768, q = 1), 'init_value': 1},
'opt_interval': {'domain': tune.qloguniform(
lower = 1, upper = 10000, q = 1), 'init_value': 100},
'learning_rate': {'domain': tune.loguniform(
lower = 0.01, upper = 20.0)},
'min_samples_leaf': {'domain': tune.qloguniform(
lower = 1, upper = 20, q = 1), 'init_value': 20},
}
return space
@classmethod
def size(cls, config):
max_leaves = int(round(config['max_leaf']))
n_estimators = int(round(config['n_iter']))
return (max_leaves*3 + (max_leaves-1)*4 + 1.0)*n_estimators*8
@classmethod
def cost_relative2lgbm(cls):
return 1.0
def custom_metric(X_test, y_test, estimator, labels, X_train, y_train,
weight_test=None, weight_train=None):
from sklearn.metrics import log_loss
y_pred = estimator.predict_proba(X_test)
test_loss = log_loss(y_test, y_pred, labels=labels,
sample_weight=weight_test)
y_pred = estimator.predict_proba(X_train)
train_loss = log_loss(y_train, y_pred, labels=labels,
sample_weight=weight_train)
alpha = 0.5
return test_loss * (1 + alpha) - alpha * train_loss, [test_loss, train_loss]
class TestAutoML(unittest.TestCase):
def test_custom_learner(self):
automl = AutoML()
automl.add_learner(learner_name = 'RGF',
learner_class = MyRegularizedGreedyForest)
X_train, y_train = load_wine(return_X_y=True)
settings = {
"time_budget": 10, # total running time in seconds
"estimator_list": ['RGF', 'lgbm', 'rf', 'xgboost'],
"task": 'classification', # task type
"sample": True, # whether to subsample training data
"log_file_name": "test/wine.log",
"log_training_metric": True, # whether to log training metric
"n_jobs": 1,
}
'''The main flaml automl API'''
automl.fit(X_train = X_train, y_train = y_train, **settings)
def test_ensemble(self):
automl = AutoML()
automl.add_learner(learner_name = 'RGF',
learner_class = MyRegularizedGreedyForest)
X_train, y_train = load_wine(return_X_y=True)
settings = {
"time_budget": 10, # total running time in seconds
# "estimator_list": ['lgbm', 'xgboost'],
"estimator_list": ['RGF', 'lgbm', 'rf', 'xgboost'],
"task": 'classification', # task type
"sample": True, # whether to subsample training data
"log_file_name": "test/wine.log",
"log_training_metric": True, # whether to log training metric
"ensemble": True,
"n_jobs": 1,
}
'''The main flaml automl API'''
automl.fit(X_train = X_train, y_train = y_train, **settings)
def test_dataframe(self):
self.test_classification(True)
def test_custom_metric(self):
X_train, y_train = load_iris(return_X_y=True)
automl_experiment = AutoML()
automl_settings = {
"time_budget": 10,
'eval_method': 'holdout',
"metric": custom_metric,
"task": 'classification',
"log_file_name": "test/iris_custom.log",
"log_training_metric": True,
'log_type': 'all',
"n_jobs": 1,
"model_history": True,
"sample_weight": np.ones(len(y_train)),
}
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.classes_)
print(automl_experiment.predict_proba(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
automl_experiment = AutoML()
estimator = automl_experiment.get_estimator_from_log(
automl_settings["log_file_name"], record_id=0,
task='multi')
print(estimator)
time_history, best_valid_loss_history, valid_loss_history, \
config_history, train_loss_history = get_output_from_log(
filename=automl_settings['log_file_name'], time_budget=6)
print(train_loss_history)
def test_classification(self, as_frame=False):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 4,
"metric": 'accuracy',
"task": 'classification',
"log_file_name": "test/iris.log",
"log_training_metric": True,
"n_jobs": 1,
"model_history": True
}
X_train, y_train = load_iris(return_X_y=True, as_frame=as_frame)
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.classes_)
print(automl_experiment.predict_proba(X_train)[:5])
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
del automl_settings["metric"]
del automl_settings["model_history"]
del automl_settings["log_training_metric"]
automl_experiment = AutoML()
duration = automl_experiment.retrain_from_log(
log_file_name=automl_settings["log_file_name"],
X_train=X_train, y_train=y_train,
train_full=True, record_id=0)
print(duration)
print(automl_experiment.model)
print(automl_experiment.predict_proba(X_train)[:5])
def test_regression(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
"metric": 'mse',
"task": 'regression',
"log_file_name": "test/boston.log",
"log_training_metric": True,
"n_jobs": 1,
"model_history": True
}
X_train, y_train = load_boston(return_X_y=True)
n = int(len(y_train)*9//10)
automl_experiment.fit(X_train=X_train[:n], y_train=y_train[:n],
X_val=X_train[n:], y_val=y_train[n:],
**automl_settings)
assert automl_experiment._state.eval_method == 'holdout'
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
print(get_output_from_log(automl_settings["log_file_name"], 1))
def test_sparse_matrix_classification(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
"metric": 'auto',
"task": 'classification',
"log_file_name": "test/sparse_classification.log",
"split_type": "uniform",
"n_jobs": 1,
"model_history": True
}
X_train = scipy.sparse.random(1554, 21, dtype=int)
y_train = np.random.randint(3, size=1554)
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.classes_)
print(automl_experiment.predict_proba(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
def test_sparse_matrix_regression(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
"metric": 'mae',
"task": 'regression',
"log_file_name": "test/sparse_regression.log",
"n_jobs": 1,
"model_history": True
}
X_train = scipy.sparse.random(300, 900, density=0.0001)
y_train = np.random.uniform(size=300)
X_val = scipy.sparse.random(100, 900, density=0.0001)
y_val = np.random.uniform(size=100)
automl_experiment.fit(X_train=X_train, y_train=y_train,
X_val=X_val, y_val=y_val,
**automl_settings)
assert automl_experiment._state.X_val.shape == X_val.shape
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
print(automl_experiment.best_config)
print(automl_experiment.best_loss)
print(automl_experiment.best_config_train_time)
def test_sparse_matrix_xgboost(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
"metric": 'ap',
"task": 'classification',
"log_file_name": "test/sparse_classification.log",
"estimator_list": ["xgboost"],
"log_type": "all",
"n_jobs": 1,
}
X_train = scipy.sparse.eye(900000)
y_train = np.random.randint(2, size=900000)
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
def test_sparse_matrix_lr(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
"metric": 'f1',
"task": 'classification',
"log_file_name": "test/sparse_classification.log",
"estimator_list": ["lrl1", "lrl2"],
"log_type": "all",
"n_jobs": 1,
}
X_train = scipy.sparse.random(3000, 900, density=0.1)
y_train = np.random.randint(2, size=3000)
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
def test_sparse_matrix_regression_cv(self):
automl_experiment = AutoML()
automl_settings = {
"time_budget": 2,
'eval_method': 'cv',
"task": 'regression',
"log_file_name": "test/sparse_regression.log",
"n_jobs": 1,
"model_history": True
}
X_train = scipy.sparse.random(100, 100)
y_train = np.random.uniform(size=100)
automl_experiment.fit(X_train=X_train, y_train=y_train,
**automl_settings)
print(automl_experiment.predict(X_train))
print(automl_experiment.model)
print(automl_experiment.config_history)
print(automl_experiment.model_history)
print(automl_experiment.best_iteration)
print(automl_experiment.best_estimator)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "d502cb056ce0fb28434fa8fe07f4b0924470faf1", "size": 13120, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_automl.py", "max_stars_repo_name": "dan0nchik/FLAML", "max_stars_repo_head_hexsha": "9d661759b49de6e403d9288af7a015606528fe7e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_automl.py", "max_issues_repo_name": "dan0nchik/FLAML", "max_issues_repo_head_hexsha": "9d661759b49de6e403d9288af7a015606528fe7e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_automl.py", "max_forks_repo_name": "dan0nchik/FLAML", "max_forks_repo_head_hexsha": "9d661759b49de6e403d9288af7a015606528fe7e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1395348837, "max_line_length": 80, "alphanum_fraction": 0.6116615854, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3028}
|
## Open a dataset
```python
import pandas as pd
fn = "../data/benchmarks/diffeq/predpreyfrac_clean.csv"
df = pd.read_csv(fn, skipinitialspace=True)
print df.columns
```
Index([u'T', u'x', u'y', u'dx', u'dy'], dtype='object')
## Graph the data
```python
# visualization libraries
import matplotlib.pyplot as plt
# plot the visuals in ipython
%matplotlib inline
df[["x","y"]].plot()
df[["dx","dy"]].plot()
print ""
```
```python
from pypge import (evaluate, model)
import sympy
X_train = df[["x", "y"]].as_matrix().T
Y_train = df[["dx"]].as_matrix().reshape(5000,)
print X_train.shape, Y_train.shape
xs = sympy.symbols("x y")
eqn_str = '-0.2*x + 0.001*x*y'
# eqn_str = "C*x + C*x*y"
eqn = sympy.sympify(eqn_str)
modl = model.Model(eqn, xs)
y_pred = evaluate.Eval(modl, xs, X_train)
print y_pred.shape, Y_train.shape
mae, err = evaluate.Score(Y_train, y_pred, "rmse")
print mae, err
```
(2, 5000) (5000,)
(5000,) (5000,)
0.0198915007578 None
|
{"hexsha": "af5e0ed93e10ec79c93769dcb0ed86c18aa5544b", "size": 68956, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/Dissertation/experiments/diffeq_clean.ipynb", "max_stars_repo_name": "verdverm/pypge", "max_stars_repo_head_hexsha": "7f94595735c08e147bd17056f15d944da61eec6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2015-09-09T21:22:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-04T08:15:10.000Z", "max_issues_repo_path": "notebooks/Dissertation/experiments/diffeq_clean.ipynb", "max_issues_repo_name": "verdverm/pypge", "max_issues_repo_head_hexsha": "7f94595735c08e147bd17056f15d944da61eec6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2016-03-31T21:54:06.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-26T22:40:32.000Z", "max_forks_repo_path": "notebooks/Dissertation/experiments/diffeq_clean.ipynb", "max_forks_repo_name": "verdverm/pypge", "max_forks_repo_head_hexsha": "7f94595735c08e147bd17056f15d944da61eec6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-06-13T16:14:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-26T14:26:42.000Z", "avg_line_length": 456.6622516556, "max_line_length": 35932, "alphanum_fraction": 0.9333052961, "converted": true, "num_tokens": 331}
|
[STATEMENT]
lemma split_two_block_non_interfering:
assumes "split_block (two_block_non_interfering A B) (dim_row A) (dim_col A) = (Q1, Q2, Q3, Q4)"
shows "Q1 = A" "Q4 = B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Q1 = A &&& Q4 = B
[PROOF STEP]
using split_four_block_dual_fst_lst[of A _ _ B Q1 Q2 Q3 Q4]
assms
[PROOF STATE]
proof (prove)
using this:
split_block (four_block_mat A ?B ?C B) (dim_row A) (dim_col A) = (Q1, Q2, Q3, Q4) \<Longrightarrow> Q1 = A
split_block (four_block_mat A ?B ?C B) (dim_row A) (dim_col A) = (Q1, Q2, Q3, Q4) \<Longrightarrow> Q4 = B
split_block (two_block_non_interfering A B) (dim_row A) (dim_col A) = (Q1, Q2, Q3, Q4)
goal (1 subgoal):
1. Q1 = A &&& Q4 = B
[PROOF STEP]
by auto
|
{"llama_tokens": 349, "file": "Linear_Programming_LP_Preliminaries", "length": 2}
|
#' Unit Testing script for NMF package: NMF utility functions.
#'
#' @author Renaud Gaujoux
#' @creation 10 Aug 2010
#' Unit test for rmatrix: random matrix generation
test.rmatrix <- function(){
n <- 100; p <- 20
A <- matrix(1, n, p)
# square matrix if y is missing
set.seed(123456); M <- matrix(runif(n*n), n, n)
set.seed(123456); checkIdentical(M, rmatrix(n), "Square matrix if 'y' is missing")
set.seed(123456); checkIdentical(M, rmatrix(matrix(NA, nrow(M), ncol(M))), "Correct if 'x' is a matrix")
# from NMF model
model <- rnmf(3, A)
set.seed(123456); M <- fitted(model) + matrix(runif(n*p), n, p)
set.seed(123456); checkIdentical(M, rmatrix(model), "Correct if 'x' is an NMF model")
set.seed(123456); M <- fitted(model) + matrix(rnorm(n*p), n, p)
set.seed(123456); checkIdentical(M, rmatrix(model, dist=rnorm), "dist is passed correctly if 'x' is an NMF model")
# default dist is uniform
set.seed(123456); M <- matrix(runif(n*p), n, p)
set.seed(123456); checkIdentical(M, rmatrix(n, p), "Default correctly to 'runif'")
set.seed(123456); checkIdentical(M, rmatrix(A), "Default correctly to 'runif' (arg: matrix)")
# argument byrow is correctly passed
set.seed(123456); M <- matrix(runif(n*p), n, p, byrow=TRUE)
set.seed(123456); checkIdentical(M, rmatrix(n, p, byrow=TRUE), "argument byrow is correctly passed")
set.seed(123456); checkIdentical(M, rmatrix(A, byrow=TRUE), "argument byrow is correctly passed (arg: matrix)")
# argument dimnames is correctly passed
dims <-list(rep('a',n), rep('b',p))
set.seed(123456); M <- matrix(runif(n*p), n, p, dimnames=dims)
set.seed(123456); checkIdentical(M, rmatrix(n, p, dimnames=dims), "argument dimnames is correctly passed")
set.seed(123456); checkIdentical(M, rmatrix(A, dimnames=dims), "argument dimnames is correctly passed (arg: matrix)")
# can pass distribution function
set.seed(123456); M <- matrix(rnorm(n*p), n, p)
set.seed(123456); checkIdentical(M, rmatrix(n, p, dist=rnorm), "argument dist is correctly passed")
set.seed(123456); checkIdentical(M, rmatrix(A, dist=rnorm), "argument dist is correctly passed (arg: matrix)")
# can pass distribution functions as third argument
set.seed(123456); M <- matrix(rnorm(n*p), n, p)
set.seed(123456); checkIdentical(M, rmatrix(n, p, rnorm), "argument dist is the third argument")
set.seed(123456); checkIdentical(M, rmatrix(A, rnorm), "argument dist is the second argument (arg: matrix)")
# can pass extra arguments to distribution function
set.seed(123456); M <- matrix(rnorm(n*p, 20), n, p)
set.seed(123456); checkIdentical(M, rmatrix(n, p, rnorm, mean=20), "extra arguments are passed to the distribution function")
set.seed(123456); checkIdentical(M, rmatrix(A, rnorm, mean=20), "extra arguments are passed to the distribution function (arg: matrix)")
}
#test.ptr_neq_constraints <- function(){
#
# .do_constrain <- function(...){
#
# }
#
# .check <- function(c, msg){
# .msg <- function(...) paste(msg, ':', ...)
# x <- rmatrix(20,3)
# y <- NMF:::neq.constraints.inplace(x, copy=TRUE)
# checkIdentical(max.col(y[1:9,]), c(rep(1,3), rep(2,3), rep(3,3)), .msg("Max are ok"))
# checkIdentical(y[-(1:9,], , .msg("Non constrained rows are identical"))
# }
#
# #.check(list(1:3,4:6,7:9), "")
#
#
#
#}
test.nmfWrapper <- function(){
.msg <- NULL
msg <- function(...) paste(.msg, ': ', ..., sep='')
f <- nmfWrapper('lee')
x <- rmatrix(20, 10)
checkTrue( isNMFfit(res <- f(x, 3)), msg('result is an NMFfit object') )
checkIdentical(nbasis(res), 3L, msg('result was computed using the correct rank') )
checkIdentical(algorithm(res), 'lee', msg('result was computed using the correct algorithm') )
.msg <- 'with default maxIter and seed value'
f <- nmfWrapper('nsNMF', maxIter=3, seed='nndsvd')
checkTrue( isNMFfit(res <- f(x, 2)), msg('result is an NMFfit object' ))
checkIdentical(nbasis(res), 2L, msg('result was computed using the correct rank') )
checkIdentical(algorithm(res), 'nsNMF', msg('result was computed using the correct algorithm') )
checkIdentical(niter(res), 3L, msg('result was computed using the correct number of iterations') )
checkIdentical(seeding(res), 'nndsvd', msg('result was computed using the correct seed') )
# overwrite default in call
.msg <- 'overwriting defaults in call'
checkTrue( isNMFfit(res <- f(x, 4, seed='random')), msg('result is an NMFfit object' ))
checkIdentical(nbasis(res), 4L, msg('result was computed using the correct rank') )
checkIdentical(algorithm(res), 'nsNMF', msg('result was computed using the correct algorithm') )
checkIdentical(niter(res), 3L, msg('result was computed using the correct number of iterations') )
checkIdentical(seeding(res), 'random', msg('result was computed using the correct seed') )
# pass method as well
.msg <- 'overwriting defaults in call + try overwrite method'
checkWarning(res <- f(x, 4, method='lee', seed='random'), 'Discarding fixed arguments.*', msg('a warning is thrown'))
checkTrue( isNMFfit(res), msg('result is an NMFfit object' ))
checkIdentical(algorithm(res), 'nsNMF', msg('result was still computed using the correct algorithm defined in nmfWrapper') )
}
|
{"hexsha": "d393e2ce559b5da3957050af16034fe19dee8020", "size": 5168, "ext": "r", "lang": "R", "max_stars_repo_path": "packrat/lib/x86_64-pc-linux-gnu/3.2.5/NMF/tests/runit.utils.r", "max_stars_repo_name": "Chicago-R-User-Group/2017-n3-Meetup-RStudio", "max_stars_repo_head_hexsha": "71a3204412c7573af2d233208147780d313430af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packrat/lib/x86_64-pc-linux-gnu/3.2.5/NMF/tests/runit.utils.r", "max_issues_repo_name": "Chicago-R-User-Group/2017-n3-Meetup-RStudio", "max_issues_repo_head_hexsha": "71a3204412c7573af2d233208147780d313430af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-12T14:06:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T23:26:27.000Z", "max_forks_repo_path": "packrat/lib/x86_64-pc-linux-gnu/3.2.5/NMF/tests/runit.utils.r", "max_forks_repo_name": "Chicago-R-User-Group/2017-n3-Meetup-RStudio", "max_forks_repo_head_hexsha": "71a3204412c7573af2d233208147780d313430af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.9818181818, "max_line_length": 137, "alphanum_fraction": 0.6900154799, "num_tokens": 1530}
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import logging
import pickle
import numpy as np
# from allennlp.commands.elmo import ElmoEmbedder
from nlp_architect.common.cdc.mention_data import MentionDataLight
from nlp_architect.utils.embedding import ELMoEmbedderTFHUB
logger = logging.getLogger(__name__)
class ElmoEmbedding(object):
def __init__(self):
logger.info('Loading Elmo Embedding module')
self.embeder = ELMoEmbedderTFHUB()
self.cache = dict()
# self.embeder = ElmoEmbedder(options, weigths)
logger.info('Elmo Embedding module lead successfully')
def get_feature_vector(self, mention: MentionDataLight):
if mention.mention_context:
sentence = mention.mention_context
else:
sentence = mention.tokens_str
return self.apply_get_from_cache(sentence)
def apply_get_from_cache(self, sentence):
if sentence in self.cache:
elmo_avg = self.cache[sentence]
else:
elmo_avg = self.get_elmo_avg(sentence.split())
self.cache[sentence] = elmo_avg
return elmo_avg
def get_avrg_feature_vector(self, tokens_str):
if tokens_str is not None:
return self.apply_get_from_cache(tokens_str)
return None
def get_elmo_avg(self, sentence):
sentence_embedding = self.embeder.get_vector(sentence)
return np.mean(sentence_embedding, axis=0)
# sentence_embeding = self.embeder.embed_sentence(sentence)
# embed_avg_layer = np.zeros(sentence_embeding.shape[2], dtype=np.float64)
# for embed_layer in sentence_embeding:
# embed_avg_sent = np.zeros(sentence_embeding.shape[2], dtype=np.float64)
# for token_vec in embed_layer:
# embed_avg_sent = np.add(embed_avg_sent, token_vec)
#
# embed_avg_sent = np.true_divide(embed_avg_sent, sentence_embeding.shape[1])
# embed_avg_layer = np.add(embed_avg_layer, embed_avg_sent)
# return np.true_divide(embed_avg_layer, sentence_embeding.shape[0])
class ElmoEmbeddingOffline(object):
def __init__(self, dump_file):
logger.info('Loading Elmo Offline Embedding module')
with open(dump_file, 'rb') as out:
self.embeder = pickle.load(out)
logger.info('Elmo Offline Embedding module lead successfully')
def get_feature_vector(self, mention: MentionDataLight):
embed = None
ment_str = mention.tokens_str
if ment_str in self.embeder:
embed = self.embeder[ment_str]
return embed
def get_avrg_feature_vector(self, tokens_str):
embed = None
if tokens_str in self.embeder:
embed = self.embeder[tokens_str]
return embed
|
{"hexsha": "8bc717b875565a49b08f5346c4077b04d6a70e03", "size": 3491, "ext": "py", "lang": "Python", "max_stars_repo_path": "nlp_architect/data/cdc_resources/embedding/embed_elmo.py", "max_stars_repo_name": "maheshwarigagan/nlp-architect", "max_stars_repo_head_hexsha": "f6466edfd3ec6fe7d3682ec54306a1c65980d288", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nlp_architect/data/cdc_resources/embedding/embed_elmo.py", "max_issues_repo_name": "maheshwarigagan/nlp-architect", "max_issues_repo_head_hexsha": "f6466edfd3ec6fe7d3682ec54306a1c65980d288", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nlp_architect/data/cdc_resources/embedding/embed_elmo.py", "max_forks_repo_name": "maheshwarigagan/nlp-architect", "max_forks_repo_head_hexsha": "f6466edfd3ec6fe7d3682ec54306a1c65980d288", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-02-21T08:41:04.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-21T08:53:11.000Z", "avg_line_length": 36.7473684211, "max_line_length": 89, "alphanum_fraction": 0.6614150673, "include": true, "reason": "import numpy", "num_tokens": 750}
|
import investor_simulator as invsim
import os
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from datetime import datetime
# Handle date time conversions between pandas and matplotlib
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# set dates and budget
start_date = datetime(2016, 9, 1)
end_date = datetime(2021, 1, 1)
# Call Portfolios
defensive_group = [invsim.Portfolio(invsim.Investor(invsim.defensive, max(random.gauss(20000, 5000), 0)),
start_date=start_date, end_date=end_date) for _ in range(500)]
aggressive_group = [invsim.Portfolio(invsim.Investor(invsim.aggressive, max(random.gauss(20000, 5000), 0)),
start_date=start_date, end_date=end_date) for _ in range(500)]
mixed_group = [invsim.Portfolio(invsim.Investor(invsim.mixed, max(random.gauss(20000, 5000), 0)),
start_date=start_date, end_date=end_date) for _ in range(500)]
groups = [defensive_group, aggressive_group, mixed_group]
names = ['defensive', 'aggressive', 'mixed']
results = invsim.return_and_vol_on_portfolios(groups, names)
fig, ax = plt.subplots(figsize=(8, 2))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
tabla = pd.plotting.table(ax, results, loc='upper right', colWidths=[0.2]*len(results.columns))
tabla.auto_set_font_size(False)
tabla.set_fontsize(13)
tabla.scale(1.2, 1.5)
plt.savefig('../Results/pb_portfolios_returns_and_vol.png', transparent=True)
means = invsim.mean_monthly_value_on_portfolios(groups, names)
sns.set_theme()
sns.set_context("paper", font_scale=1.5)
fig, ax = plt.subplots(figsize=(15, 10))
g = sns.lineplot(data=means.reset_index(), x='Date', y="Value", hue="Portfolio_group", linewidth=2.5)
g.set(xlabel='Years', ylabel='Price', title='Portfolio Value')
date_form = DateFormatter("%m-%Y")
ax.yaxis.set_major_locator(ticker.MultipleLocator(500))
ax.xaxis.set_major_formatter(date_form)
plt.savefig(os.path.abspath('../Results/pb_portfolios_monthly_price_plot.png'), dpi=800)
results = invsim.mean_yearly_return_on_portfolios(groups, names)
sns.set_theme()
sns.set_context("paper", font_scale=1.5)
fig, ax = plt.subplots(figsize=(15, 10))
g = sns.barplot(data=results[results.Date >= '2017'].reset_index(), x='Date', y="Value",
hue="Portfolio_group", linewidth=2.5)
g.set(xlabel='Years', ylabel='Annual return', title='Portfolio Returns')
labels = [2017, 2018, 2019, 2020]
x = np.arange(len(labels))
ax.set_xticks(x)
ax.set_xticklabels(labels)
plt.savefig(os.path.abspath('../Results/pb_portfolios_annual_return_plot.png'), dpi=800)
# What is the best stock.
return_on_stocks = pd.DataFrame([invsim.Stocks(x, start_date=datetime(2016, 12, 31),
end_date=datetime(2017, 12, 31)).return_on_stock(datetime(2017, 12, 31))
for x in invsim.tickers], index=invsim.tickers,
columns=['Return']).sort_values('Return', ascending=False)
sns.set_theme()
sns.set_context("paper", font_scale=1.5)
fig, ax = plt.subplots(figsize=(15, 10))
g = sns.barplot(data=return_on_stocks.reset_index(), x='Return', y="index",
linewidth=2.5)
g.set(xlabel='Returns', ylabel='Stocks', title='Return on Stocks - 2017')
sns.despine(left=True, bottom=True)
plt.savefig(os.path.abspath('../Results/pb_2017_stocks_returns.png'), dpi=800)
|
{"hexsha": "b89c02337b7151aee7f608cfc0eb1814497204ea", "size": 3532, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/Simulation_bonus.py", "max_stars_repo_name": "caiomts/financial-programming", "max_stars_repo_head_hexsha": "ad23c091b6d7238e3dffdf748eedd0b8a2e41874", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-07T10:10:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T10:10:54.000Z", "max_issues_repo_path": "Code/Simulation_bonus.py", "max_issues_repo_name": "caiomts/financial-programming", "max_issues_repo_head_hexsha": "ad23c091b6d7238e3dffdf748eedd0b8a2e41874", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/Simulation_bonus.py", "max_forks_repo_name": "caiomts/financial-programming", "max_forks_repo_head_hexsha": "ad23c091b6d7238e3dffdf748eedd0b8a2e41874", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2444444444, "max_line_length": 107, "alphanum_fraction": 0.7163080408, "include": true, "reason": "import numpy", "num_tokens": 921}
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from torch.optim.lr_scheduler import ExponentialLR
from probabilistic_forecast.utils.torch_utils import get_device
from probabilistic_forecast.utils.plot_utils import plot_training_curve, plot_regression, plot_classification
class LSTM_MC():
def __init__(self, input_dim, output_dim, args):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.net_arch = [128, 32]
self.task = args.task
self.device = get_device()
self.network = Network(self.input_dim, self.output_dim, self.net_arch, self.task, self.device)
self.network.to(self.device)
if self.task == "regression":
self.criterion = torch.nn.GaussianNLLLoss(full=True, reduction='sum')
elif self.task == "classification":
self.criterion = nn.BCELoss(reduction='sum')
def get_loss(self, output, target):
if self.task == "regression":
return self.criterion(output[0], target, output[1])
elif self.task == "classification":
return self.criterion(output, target)
def train(self, train_loader, n_epochs, batch_size, stats, pre_trained_dir, Nbatches, adversarial_training=True):
print('Training {} model {} adversarial training. Task: {}'.format(type(self).__name__,
'with' if adversarial_training else 'without', self.task))
learning_rate=1e-2
weight_decay = 1e-3
optimizer = torch.optim.Adam(self.network.parameters(), lr=learning_rate, weight_decay=weight_decay)
lr_scheduler = ExponentialLR(optimizer, gamma=0.999)
if adversarial_training:
delta = torch.zeros([batch_size, stats['historical_sequence_length'], self.input_dim]).to(self.device)
X_train_max= torch.tensor(stats['X_train_max'])
X_train_max= X_train_max.to(self.device)
X_train_min= torch.tensor(stats['X_train_min'])
X_train_min= X_train_min.to(self.device)
if self.task == 'regression':
clip_eps = 1.0 / stats['X_train_max'].max()
fgsm_step = 1.0 / stats['X_train_max'].max()
elif self.task == 'classification':
clip_eps = 0.5 / stats['X_train_max'].max()
fgsm_step = 0.5 / stats['X_train_max'].max()
n_repeats = 4
n_epochs = int(n_epochs / n_repeats)
self.network.train()
loss_history, lr_history = [], []
for epoch in range(1, n_epochs+1 ):
epoch_loss =[]
for _ , (features , target) in enumerate(train_loader):
features = features.to(self.device)
target = target.to(self.device)
if adversarial_training:
for _ in range(n_repeats):
delta_batch = delta[0:features.size(0)]
delta_batch.requires_grad = True
adv_features = features + delta_batch
adv_features.clamp_(X_train_min, X_train_max)
output = self.network(adv_features)
loss = self.get_loss(output, target)
loss.backward()
optimizer.step()
optimizer.zero_grad()
pert = fgsm_step * torch.sign(delta_batch.grad)
delta[0:features.size(0)] += pert.data
delta.clamp_(-clip_eps, clip_eps)
epoch_loss.append(loss.item())
else:
output = self.network(features)
loss = self.get_loss(output, target)
loss.backward()
optimizer.step()
optimizer.zero_grad()
epoch_loss.append(loss.item())
lr_history.append(optimizer.param_groups[0]['lr'])
lr_scheduler.step()
loss_history.append(np.mean(epoch_loss))
if epoch % 10 == 0:
print("Epoch: {0:0.3g}, NLL: {1:0.3g}, lr: {2:0.3g}".format(epoch,loss_history[-1], lr_history[-1]), end='\r')
pre_trained_dir = os.path.join(pre_trained_dir, type(self).__name__)
os.makedirs(pre_trained_dir , exist_ok=True)
model_save_name = pre_trained_dir + '/trained_network_' + self.task + ('_adv.pt' if adversarial_training else '.pt')
fig_save_name = pre_trained_dir + '/training_curve_' +self.task + ('_adv.pdf' if adversarial_training else '.pdf')
torch.save(self.network.state_dict(), model_save_name)
plot_training_curve(loss_history, lr_history, fig_save_name)
def evaluate(self, test_loader, n_samples, pre_trained_dir, adversarial_training=True):
print('Evaluating a pretrained {} model {} adversarial training. Task: {}'.format(type(self).__name__,
'with' if adversarial_training else 'without', self.task))
pre_trained_dir = os.path.join(pre_trained_dir, type(self).__name__)
model_save_name = pre_trained_dir + '/trained_network_'+ self.task + ('_adv.pt' if adversarial_training else '.pt')
self.network.load_state_dict(torch.load(model_save_name))
self.network.eval()
if self.task =='regression':
samples_mean, samples_var = [], []
for _ in range(n_samples):
pred_mean_set, pred_var_set = [], []
for _, (features , _ ) in enumerate(test_loader):
features = features.to(self.device)
pred_mean, pred_var= self.network(features)
pred_mean_set.append(pred_mean.detach().cpu().numpy())
pred_var_set.append(pred_var.detach().cpu().numpy())
pred_mean_i, pred_var_i = np.concatenate(pred_mean_set, axis=0), np.concatenate(pred_var_set, axis=0)
samples_mean.append(pred_mean_i)
samples_var.append(pred_var_i)
samples_mean = np.array(samples_mean)
samples_var = np.array(samples_var)
mixture_mean = np.mean(samples_mean, axis=0)
mixture_var = np.mean(samples_var + np.square(samples_mean), axis=0) - np.square(mixture_mean)
target_test = self.get_target_test(test_loader)
return target_test, mixture_mean, mixture_var
elif self.task =='classification':
samples = []
for _ in range(n_samples):
pred = []
for _, (features , _ ) in enumerate(test_loader):
features = features.to(self.device)
output= self.network(features)
pred.append(output.detach().cpu().numpy())
pred_i = np.concatenate(pred, axis=0)
samples.append(pred_i)
samples = np.array(samples)
target_test = self.get_target_test(test_loader)
return target_test, samples
def get_target_test(self, test_loader):
target_set = []
for _ , ( _ , target) in enumerate(test_loader):
target_set.append(target.numpy())
return np.concatenate(target_set, axis=0)
class Network(nn.Module):
def __init__(self, input_dim, output_dim, net_arch, task, device):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dropout_probability = 0.5
self.task=task
self.device = device
self.hidden_size_1 = net_arch[0]
self.hidden_size_2 = net_arch[1]
self.stacked_layers = 2
self.lstm1 = nn.LSTM(self.input_dim,
self.hidden_size_1,
num_layers=self.stacked_layers,
batch_first=True)
self.lstm2 = nn.LSTM(self.hidden_size_1,
self.hidden_size_2,
num_layers=self.stacked_layers,
batch_first=True)
if self.task == 'regression':
self.fc = nn.Linear(self.hidden_size_2, 2*self.output_dim)
self.Softplus= nn.Softplus()
elif self.task == 'classification':
self.fc = nn.Linear(self.hidden_size_2, self.output_dim)
self.Sigmoid= nn.Sigmoid()
def forward(self, x):
batch_size, seq_len, _ = x.size()
hidden = self.init_hidden1(batch_size)
output, _ = self.lstm1(x, hidden)
output = F.dropout(output, p=self.dropout_probability, training=True)
state = self.init_hidden2(batch_size)
output, state = self.lstm2(output, state)
output = F.dropout(output, p=self.dropout_probability, training=True)
output = output[:, -1, :]
out = self.fc(output)
if self.task == 'regression':
mean = out[:, :self.output_dim]
# The variance should always be positive (softplus) and la
variance = self.Softplus(out[:, self.output_dim:])+ 1e-06
return mean, variance
elif self.task == 'classification':
prob = self.Sigmoid(out)
return prob
def init_hidden1(self, batch_size):
hidden_state = Variable(torch.zeros(self.stacked_layers, batch_size, self.hidden_size_1)).to(self.device)
cell_state = Variable(torch.zeros(self.stacked_layers, batch_size, self.hidden_size_1)).to(self.device)
return hidden_state, cell_state
def init_hidden2(self, batch_size):
hidden_state = Variable(torch.zeros(self.stacked_layers, batch_size, self.hidden_size_2)).to(self.device)
cell_state = Variable(torch.zeros(self.stacked_layers, batch_size, self.hidden_size_2)).to(self.device)
return hidden_state, cell_state
|
{"hexsha": "a8b18f8791797976a6ecc092baf95976ee4389ab", "size": 10017, "ext": "py", "lang": "Python", "max_stars_repo_path": "probabilistic_forecast/lstm_mc.py", "max_stars_repo_name": "Abdulmajid-Murad/deep_probabilistic_forecast", "max_stars_repo_head_hexsha": "399846381af4bb789021c9f63f121dd69fa0125d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-11-25T12:05:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T04:02:54.000Z", "max_issues_repo_path": "probabilistic_forecast/lstm_mc.py", "max_issues_repo_name": "Abdulmajid-Murad/deep_probabilistic_forecast", "max_issues_repo_head_hexsha": "399846381af4bb789021c9f63f121dd69fa0125d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "probabilistic_forecast/lstm_mc.py", "max_forks_repo_name": "Abdulmajid-Murad/deep_probabilistic_forecast", "max_forks_repo_head_hexsha": "399846381af4bb789021c9f63f121dd69fa0125d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.127753304, "max_line_length": 127, "alphanum_fraction": 0.5944893681, "include": true, "reason": "import numpy", "num_tokens": 2111}
|
import pandas as pd
import numpy as np
from os import listdir, makedirs
from os.path import join, exists
import gc
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-index_batch', type=int)
parser.add_argument('--data_split', default='temporal_5')
args = parser.parse_args()
index_batch = args.index_batch
data_split = args.data_split
data_version = '180918'
bern_path = '/cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/'
impute_path = join(bern_path, '5_imputed', 'imputed_'+data_version, 'reduced', data_split)
label_path = join(bern_path, '7_ml_input', data_version, 'reduced', data_split, 'AllLabels_0.0_8.0', 'y')
output_path = join(bern_path, '7_ml_input', 'lstm_%s'%data_version, 'reduced', data_split, 'all_signals', 'unnormalized')
ep_path = join(bern_path, '3a_endpoints', 'v6b', 'reduced')
if not exists(output_path):
makedirs(output_path)
df_tmp = pd.read_hdf(join(impute_path, listdir(impute_path)[0]), start=0, stop=0, mode='r')
info_cols = ['PatientID', 'AbsDatetime', 'RelDatetime']
variables = [col for col in df_tmp.columns if col not in info_cols and 'CUM' not in col and 'TIME_TO' not in col]
variables.sort()
obs_cnt_cumsum = np.array([col for col in df_tmp.columns if 'CUM' in col])
obs_cnt_cumsum = obs_cnt_cumsum[np.argsort([col.split('_')[0] for col in obs_cnt_cumsum])]
obs_time_to = np.array([col for col in df_tmp.columns if 'TIME_TO' in col])
obs_time_to = obs_time_to[np.argsort([col.split('_')[0] for col in obs_time_to])]
non_pharma_vars = [col for col in variables if 'vm' in col]
pharma_vars = [col for col in variables if 'pm' in col]
print('# columns in imputed dataframe:', len(df_tmp.columns))
print('# columns for cumulative cnt of observations:', len(obs_cnt_cumsum))
print('# columns for time to last observations:', len(obs_cnt_cumsum))
print('# columns for true observed values:', len(variables))
print('# non-pharma variables:', len(non_pharma_vars))
print('# pharma variables:', len(pharma_vars))
del df_tmp
gc.collect()
cnt_pid_no_data = 0
columns = np.concatenate((info_cols, variables, obs_cnt_cumsum))
rename_dict = {col: col.split('_')[0]+'_obs' for col in obs_cnt_cumsum}
obs_true = [col.split('_')[0]+'_obs' for col in obs_cnt_cumsum]
ep_files = listdir(ep_path)
df_impute = pd.read_hdf(join(impute_path, 'batch_%d.h5'%index_batch), columns=columns).rename(columns=rename_dict)
f_ep = [f for f in ep_files if '_%d_'%index_batch in f][0]
pids = df_impute.PatientID.unique()
print('# patients in batch %d:'%index_batch, len(pids))
for n, pid in enumerate(pids):
df_label = pd.read_hdf(join(label_path, 'batch_%d.h5'%index_batch), str(pid))
if len(df_label) == 0 or np.sum(df_label['SampleStatus_WorseStateFromZero0.0To8.0Hours']=='VALID') == 0:
print('patient %s does not have valid data'%pid)
cnt_pid_no_data += 1
continue
del df_label
# Use real observation values as features
df = df_impute[df_impute.PatientID==pid].copy()
# Use features showing whether a variable value is imputed or real, 0 for imputed value and 1 for real value
for col in obs_true:
df.loc[df.index[1:], col] = np.diff(df[col])
df[col] = (df[col].values > 0).astype(int)
# Use the history of non-interpolated endponit status as a feature
df_ep = pd.read_hdf(join(ep_path, f_ep), where='PatientID=%d'%pid,
columns=['PatientID', 'Datetime', 'endpoint_status_nointerp'])
df_ep['ep_category'] = 0
idx_category_1 = df_ep.index[df_ep.endpoint_status_nointerp.isin(['event 1', 'event 2', 'event 3'])]
idx_category_2 = df_ep.index[df_ep.endpoint_status_nointerp.isin(['probably not 1', 'probably not 2', 'probably not 3'])]
idx_category_3 = df_ep.index[df_ep.endpoint_status_nointerp.isin(['maybe 1', 'maybe 2', 'maybe 3'])]
df_ep.loc[idx_category_1, 'ep_category'] = 1
df_ep.loc[idx_category_2, 'ep_category'] = 2
df_ep.loc[idx_category_3, 'ep_category'] = 3
# Aligning the absolute time of the endpoint dataframe with the imputed dataframe
dt_ref = df.iloc[0].AbsDatetime
idx_dt_ref_in_ep = np.where(np.abs(df_ep.Datetime - dt_ref) / np.timedelta64(1,'s') <= 2.5*60)[0][0]
df_ep['AbsDatetime'] = (df_ep.Datetime - df_ep.iloc[idx_dt_ref_in_ep].Datetime) + dt_ref
df_ep.drop(['PatientID', 'Datetime', 'endpoint_status_nointerp'], inplace=True, axis=1)
# Merge the imputed dataframe with the endpoint feature dataframe
df_ep.set_index('AbsDatetime', inplace=True)
df.set_index('AbsDatetime', inplace=True)
df = df.merge(df_ep, how='left', left_index=True, right_index=True)
df.reset_index(inplace=True)
# Fill in the nan values for endpoint features with 0
df['ep_category'] = df.ep_category.fillna(0)
# Check if there is other nan values in the feature matrix
assert(df.isnull().sum().sum()==0)
# Output data to file
df.to_hdf(join(output_path, 'batch_%d.h5'%index_batch), 'p'+str(pid),
complib='blosc:lz4', complevel=5)
print('patient %s written to disk.'%pid)
gc.collect()
|
{"hexsha": "1603b805350c2810080360348310bd5dd221bb11", "size": 5115, "ext": "py", "lang": "Python", "max_stars_repo_path": "lstm/data_processing/signal_extraction.py", "max_stars_repo_name": "ratschlab/circEWS", "max_stars_repo_head_hexsha": "b2b1f00dac4f5d46856a2c7abe2ca4f12d4c612d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2020-03-17T16:42:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:53:24.000Z", "max_issues_repo_path": "lstm/data_processing/signal_extraction.py", "max_issues_repo_name": "ranxiao/circEWS", "max_issues_repo_head_hexsha": "1e52880c268f8f763bbc16763131634ffc217153", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-07-30T22:37:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-10T00:02:30.000Z", "max_forks_repo_path": "lstm/data_processing/signal_extraction.py", "max_forks_repo_name": "ranxiao/circEWS", "max_forks_repo_head_hexsha": "1e52880c268f8f763bbc16763131634ffc217153", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-04-22T01:13:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-27T20:23:41.000Z", "avg_line_length": 44.0948275862, "max_line_length": 125, "alphanum_fraction": 0.7106549365, "include": true, "reason": "import numpy", "num_tokens": 1445}
|
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from flowket.callbacks.exact import ExactLocalEnergy
from flowket.callbacks.monte_carlo import LocalEnergyStats
from flowket.evaluation import evaluate, exact_evaluate
from flowket.operators import Heisenberg, NetketOperatorWrapper
from flowket.optimization import ExactVariational, VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import ExactSampler, Sampler
from .simple_models import complex_values_linear_1d_model, real_values_1d_model
DEFAULT_TF_GRAPH = tf.get_default_graph()
ONE_DIM_OPERATOR = Heisenberg(hilbert_state_shape=[7], pbc=True)
def test_monte_carlo_update_unbalanced_local_energy():
with DEFAULT_TF_GRAPH.as_default():
model = complex_values_linear_1d_model()
sample = np.array([[1, 1, 1, -1, -1, -1, -1],
[1, 1, 1, -1, 1, -1, -1],
[1, -1, 1, 1, -1, -1, -1]])
local_connections = np.random.choice([-1, 1], size=(5, 3, 7))
local_connections[0, ...] = sample
hamiltonian_values = np.array([[2.0, 7j + 8, 0.0, 0.0, 3],
[0.0, 0.0, 0.0, 0.0, -1.0],
[5.0, 3j, 0.0, -2, 9]]).T
all_use_conn = np.array([[True, True, False, False, True],
[True, False, False, False, True],
[True, True, False, True, True]]).T
class SimpleSampler(Sampler):
def __init__(self):
super(SimpleSampler, self).__init__((7,), 3)
def __next__(self):
return sample
variational_monte_carlo = VariationalMonteCarlo(model, Heisenberg(hilbert_state_shape=(7, )), SimpleSampler())
unbalanced_local_energy = np.mean(variational_monte_carlo.energy_observable.local_values_optimized_for_unbalanced_local_connections(
variational_monte_carlo .wave_function, local_connections, hamiltonian_values, all_use_conn))
balanced_local_energy = np.mean(variational_monte_carlo.energy_observable.local_values_optimized_for_balanced_local_connections(
variational_monte_carlo .wave_function, local_connections, hamiltonian_values))
assert np.allclose(balanced_local_energy, unbalanced_local_energy)
@pytest.mark.parametrize('model_builder, operator, batch_size, num_of_mc_iterations', [
(real_values_1d_model, ONE_DIM_OPERATOR, 2 ** 10, 1000),
(complex_values_linear_1d_model, ONE_DIM_OPERATOR, 2 ** 10, 1000),
])
def test_exact_and_monte_carlo_agree(model_builder, operator, batch_size, num_of_mc_iterations):
with DEFAULT_TF_GRAPH.as_default():
model = model_builder()
exact_variational = ExactVariational(model, operator, batch_size)
reduce_variance(exact_variational, model)
sampler = ExactSampler(exact_variational, batch_size)
variational_monte_carlo = VariationalMonteCarlo(model, operator, sampler)
exact_logs = exact_evaluate(exact_variational,
[ExactLocalEnergy(exact_variational)])
exact_energy = exact_logs['energy/energy']
monte_carlo_energy = evaluate(variational_monte_carlo, num_of_mc_iterations,
[LocalEnergyStats(variational_monte_carlo)])['energy/energy']
monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] / (batch_size * num_of_mc_iterations))
assert monte_carlo_energy == pytest.approx(exact_energy, monte_carlo_std)
def reduce_variance(exact_variational, model):
optimizer = Adam(lr=0.01, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.fit_generator(exact_variational.to_generator(),
steps_per_epoch=1000, epochs=1, max_queue_size=0,
workers=0)
def test_monte_carlo_and_netket_agree(netket):
input_size = 7
batch_size = 1000
num_of_mc_iterations = 1000
g = netket.graph.Hypercube(length=input_size, n_dim=1)
hi = netket.hilbert.Spin(s=0.5, graph=g)
ha = netket.operator.Heisenberg(hilbert=hi)
layers = (
netket.layer.FullyConnected(
input_size=input_size,
output_size=1),
)
ma = netket.machine.FFNN(hi, layers)
sa = netket.sampler.ExactSampler(machine=ma)
op = netket.optimizer.Sgd(learning_rate=0.00)
flowket_model = complex_values_linear_1d_model()
exact_variational = ExactVariational(flowket_model,
NetketOperatorWrapper(ha, (input_size,)), batch_size)
exact_logs = exact_evaluate(exact_variational,
[ExactLocalEnergy(exact_variational)])
real_weights, imag_weights = flowket_model.get_weights()
ma.parameters = (real_weights + imag_weights * -1j).flatten()
gs = netket.variational.Vmc(
hamiltonian=ha,
sampler=sa,
optimizer=op,
method='Gd',
n_samples=batch_size,
diag_shift=0.01)
netket_energy = np.zeros((num_of_mc_iterations,))
for i in range(num_of_mc_iterations):
gs.advance(1)
netket_energy[i] = gs.get_observable_stats()['Energy']['Mean']
netket_energy_mean = np.mean(netket_energy)
exact_energy = exact_logs['energy/energy']
monte_carlo_std = np.sqrt(exact_logs['energy/local_energy_variance'] / (batch_size * num_of_mc_iterations))
assert netket_energy_mean == pytest.approx(exact_energy, monte_carlo_std)
|
{"hexsha": "b78b499c69dccf3702024011451e3fdd24f47744", "size": 5578, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_variational.py", "max_stars_repo_name": "vigsterkr/FlowKet", "max_stars_repo_head_hexsha": "0d8f301b5f51a1bab83021f10f65cfb5f2751079", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2019-11-19T13:59:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T10:26:30.000Z", "max_issues_repo_path": "tests/test_variational.py", "max_issues_repo_name": "HUJI-Deep/PyKet", "max_issues_repo_head_hexsha": "61238afd3fe1488d35c57d280675f544c559bd01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-11-15T12:07:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-07T18:12:18.000Z", "max_forks_repo_path": "tests/test_variational.py", "max_forks_repo_name": "HUJI-Deep/PyKet", "max_forks_repo_head_hexsha": "61238afd3fe1488d35c57d280675f544c559bd01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-12-09T22:51:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T22:05:41.000Z", "avg_line_length": 48.5043478261, "max_line_length": 140, "alphanum_fraction": 0.6835783435, "include": true, "reason": "import numpy", "num_tokens": 1401}
|
"""Created on 25 mars 2019.
@author: Aurele Durand
"""
import datetime, decimal
import pandas as pd
import numpy as np
from _collections_abc import dict_keys
from flask.json import JSONEncoder
from sqlalchemy.exc import OperationalError
class AlphaJSONEncoder(JSONEncoder):
rules = {}
def __init__(self, *args, **kwargs):
super(AlphaJSONEncoder, self).__init__(*args, **kwargs)
self.rules[np.int64] = int
self.rules[np.bool_] = lambda o: o is True
self.rules[datetime.datetime] = lambda o: str(o.strftime("%Y-%m-%dT%H:%M:%S")) if 'T' in str(o) else str(o.strftime("%Y-%m-%d %H:%M:%S"))
self.rules[pd.DataFrame] = lambda o: o.to_json(orient='index')
self.rules[bytes] = lambda o: o.decode('utf-8')
self.rules[dict_keys] = lambda o: list(o)
self.rules[datetime.timedelta] = lambda o: str(o)
self.rules[decimal.Decimal] = lambda o: str(o)
#self.rules[NameError] = lambda o:str(o)
#self.rules[ValueError] = lambda o:str(o)
#self.rules[TypeError] = lambda o:str(o)
#self.rules[AttributeError] = lambda o:str(o)
#self.rules[OperationalError] = lambda o:str(o)
self.rules[type] = lambda o:str(o)
def default(self, o): # pylint: disable=E0202
try:
if hasattr(o,'to_json'):
try:
output = o.to_json()
return output
except Exception as ex:
print(ex)
return None
for key_type, fct in self.rules.items():
if isinstance(o, key_type):
returned_value = fct(o)
return returned_value
iterable = iter(o)
except TypeError as ex:
print('Cannot convert %s: %s'%(o,ex))
else:
return list(iterable)
return JSONEncoder.default(self, o=o)
"""results_json = {}
if hasattr(model,"schema"):
schema = model.get_schema()
structures = schema(many=True) if not first else schema()
results_json = structures.dump(results)
else:
self.log.error('Missing schema for model <%s>'%str(model.__name__))"""
|
{"hexsha": "9a7fc7c87c940cadec790a63805b1cb3a1c432db", "size": 2255, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/json/_converters.py", "max_stars_repo_name": "ZAurele/alpha-py", "max_stars_repo_head_hexsha": "b6330f1e714d07a2010ebe500d5ccdf4cc637998", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/json/_converters.py", "max_issues_repo_name": "ZAurele/alpha-py", "max_issues_repo_head_hexsha": "b6330f1e714d07a2010ebe500d5ccdf4cc637998", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/json/_converters.py", "max_forks_repo_name": "ZAurele/alpha-py", "max_forks_repo_head_hexsha": "b6330f1e714d07a2010ebe500d5ccdf4cc637998", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7936507937, "max_line_length": 145, "alphanum_fraction": 0.5711751663, "include": true, "reason": "import numpy", "num_tokens": 530}
|
[STATEMENT]
lemma ListAif1: "bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C1 l s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C1 l s
[PROOF STEP]
apply(induct upds)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. bval b s \<Longrightarrow> preList [] (IF b THEN C1 ELSE C2) l s = preList [] C1 l s
2. \<And>a upds. \<lbrakk>bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C1 l s; bval b s\<rbrakk> \<Longrightarrow> preList (a # upds) (IF b THEN C1 ELSE C2) l s = preList (a # upds) C1 l s
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a upds. \<lbrakk>bval b s \<Longrightarrow> preList upds (IF b THEN C1 ELSE C2) l s = preList upds C1 l s; bval b s\<rbrakk> \<Longrightarrow> preList (a # upds) (IF b THEN C1 ELSE C2) l s = preList (a # upds) C1 l s
[PROOF STEP]
by force
|
{"llama_tokens": 404, "file": "Hoare_Time_Nielson_VCG", "length": 3}
|
# ======================================================================
# Copyright (c) 2010, G. Fiori, University of Pisa
#
# This file is released under the BSD license.
# See the file "license.txt" for information on usage and
# redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
# ======================================================================
from numpy import *
import os
import sys
if sys.version > '3':
import subprocess;
else:
import subprocess
def section(slicedir,quantity,coordslice,grid):
if (slicedir=="x"):
# I find the index of the closest
# coordinate to coordslice
index=nonzero(abs(grid.gridx-coordslice)==
min(abs(grid.gridx-coordslice)))[0][0]+1;
count=0;
swapx=argsort(grid.gridx);
swapy=argsort(grid.gridy);
swapz=argsort(grid.gridz);
swapx3D=meshgrid(meshgrid(swapx,swapy)[0].flatten(),swapz)[0].flatten();
swapy3D=meshgrid(meshgrid(swapx,swapy)[1].flatten(),swapz)[0].flatten();
swapz3D=meshgrid(meshgrid(swapx,swapy)[1].flatten(),swapz)[1].flatten();
indexsort=swapx3D+swapy3D*grid.nx+swapz3D*grid.nx*grid.ny;
quantitys=quantity[indexsort];
out=zeros(grid.ny*grid.nz);
yy=zeros(grid.ny*grid.nz);
zz=zeros(grid.ny*grid.nz);
fp=open("newplot","w");
for k in range(0,grid.nz):
for j in range(0,grid.ny):
ix=index+j*grid.nx+k*grid.nx*grid.ny;
fp.write("%s %s %s \n" %(grid.gridy[swapy[j]],grid.gridz[swapz[k]],
quantitys[ix]))
fp.write("\n");
fp.close();
fp=os.popen("gnuplot","w")
fp.write("set title 'SECTIONX' \n")
fp.write("set mouse \n")
fp.write("splot 'newplot' w l \n")
fp.flush()
eval(input("waiting ...... Press any key to continue"))
fp.close()
elif (slicedir=="y"):
# I find the index of the closest
# coordinate to coordslice
index=nonzero(abs(grid.gridy-coordslice)==
min(abs(grid.gridy-coordslice)))[0][0];
count=0;
out=zeros(grid.nx*grid.nz);
xx=zeros(grid.nx*grid.nz);
zz=zeros(grid.nx*grid.nz);
fp=open("newplot","w");
for k in range(0,grid.nz):
for i in range(0,grid.nx):
ix=i+index*grid.nx+k*grid.nx*grid.ny;
fp.write("%s %s %s \n" %(grid.gridx[i],grid.gridz[k],
quantity[ix]))
fp.write("\n");
fp.close();
fp=os.popen("gnuplot","w")
fp.write("set title 'SECTIONY' \n")
fp.write("set mouse \n")
fp.write("splot 'newplot' w l \n")
fp.flush()
eval(input("waiting ...... Press any key to continue"))
fp.close()
elif (slicedir=="z"):
# I find the index of the closest
# coordinate to coordslice
index=nonzero(abs(grid.gridz-coordslice)==
min(abs(grid.gridz-coordslice)))[0][0];
count=0;
out=zeros(grid.nx*grid.ny);
xx=zeros(grid.nx*grid.ny);
yy=zeros(grid.nx*grid.ny);
fp=open("newplot","w");
for j in range(0,grid.ny):
for i in range(0,grid.nx):
ix=i+j*grid.nx+index*grid.nx*grid.ny;
fp.write("%s %s %s \n" %(grid.gridx[i],grid.gridy[j],
quantity[ix]))
fp.write("\n");
fp.close();
fp=os.popen("gnuplot","w")
fp.write("set title 'SECTIONZ' \n")
fp.write("set mouse \n")
fp.write("splot 'newplot' w l \n")
fp.flush()
eval(input("waiting ...... Press any key to continue"))
fp.close()
return;
def plot2D(quantity,grid,filename):
fp=open(filename,"w");
for j in range(0,grid.ny):
for i in range(0,grid.nx):
s="%s %s %s \n" %(grid.gridx[i],grid.gridy[j],quantity[i+j*grid.nx]);
fp.write(s);
s="\n";
fp.write(s);
fp.close();
fp=os.popen("gnuplot","w")
fp.write("set mouse \n")
string="splot '%s' w l \n" %filename
print(string)
fp.write(string)
fp.flush()
eval(input("waiting ...... Press any key to continue"))
fp.close()
|
{"hexsha": "e7ed367b26169c48713f315052f85ad6fa10092f", "size": 4370, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/section.py", "max_stars_repo_name": "aravindhk/Vides", "max_stars_repo_head_hexsha": "65d9ea9764ddf5f6ef40e869bd31387d0e3e378f", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-03T17:24:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T06:06:50.000Z", "max_issues_repo_path": "src/section.py", "max_issues_repo_name": "aravindhk/Vides", "max_issues_repo_head_hexsha": "65d9ea9764ddf5f6ef40e869bd31387d0e3e378f", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/section.py", "max_forks_repo_name": "aravindhk/Vides", "max_forks_repo_head_hexsha": "65d9ea9764ddf5f6ef40e869bd31387d0e3e378f", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5284552846, "max_line_length": 84, "alphanum_fraction": 0.5100686499, "include": true, "reason": "from numpy", "num_tokens": 1149}
|
import numpy as np
import random
from rl.core import Env
class MultiInputTestEnv(Env):
def __init__(self, observation_shape):
self.observation_shape = observation_shape
def step(self, action):
return self._get_obs(), random.choice([0, 1]), random.choice([True, False]), {}
def reset(self):
return self._get_obs()
def _get_obs(self):
if type(self.observation_shape) is list:
return [np.random.random(s) for s in self.observation_shape]
else:
return np.random.random(self.observation_shape)
def __del__(self):
pass
|
{"hexsha": "5d8cee49375c0a894cb8c20f0732e6ec340536ae", "size": 612, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/rl/util.py", "max_stars_repo_name": "stefanbschneider/keras-rl", "max_stars_repo_head_hexsha": "216c3145f3dc4d17877be26ca2185ce7db462bad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3350, "max_stars_repo_stars_event_min_datetime": "2018-03-07T09:46:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T11:25:35.000Z", "max_issues_repo_path": "tests/rl/util.py", "max_issues_repo_name": "stefanbschneider/keras-rl", "max_issues_repo_head_hexsha": "216c3145f3dc4d17877be26ca2185ce7db462bad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 223, "max_issues_repo_issues_event_min_datetime": "2018-03-11T00:07:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T13:26:01.000Z", "max_forks_repo_path": "tests/rl/util.py", "max_forks_repo_name": "stefanbschneider/keras-rl", "max_forks_repo_head_hexsha": "216c3145f3dc4d17877be26ca2185ce7db462bad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1007, "max_forks_repo_forks_event_min_datetime": "2018-03-08T11:26:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T05:19:34.000Z", "avg_line_length": 24.48, "max_line_length": 87, "alphanum_fraction": 0.6535947712, "include": true, "reason": "import numpy", "num_tokens": 140}
|
###### Content provided under a Creative Commons Attribution license, CC-BY 4.0; code under MIT License. (c)2014 [David I. Ketcheson](http://davidketcheson.info)
#An illustrated guide to limiters
## Or: how to interpolate non-smooth data without creating wiggles
Many interesting wave phenomena -- like fluid dynamics, lasers, and water waves -- are described by nonlinear hyperbolic partial differential equations. The solutions of these problems are discontinuous. So-called **limiters** (sometimes referred to as *slope limiters* or *flux limiters* are one of the key ingredients in approximating these discontinuous solutions.
##Table of contents
- [Motivation: interpolation and wiggles](#Interpolation-and-wiggles)
- [The simplest limiter: Minmod](#The-simplest-limiter:-Minmod)
- [Other TVD limiters](#TVD-limiters)
- [WENO](#Higher-order-interpolation:-WENO)
```python
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
```
```python
import mpld3 # Skip this cell if you don't have mpld3 installed
mpld3.enable_notebook() # or just go and do it now: pip install mpld3
# This allows you to zoom and pan the plots.
```
##Interpolation and wiggles
Suppose you're given a set of data samples:
```python
k = 5
x=np.arange(-k+1,k)
y=np.sin(x/2.)+1.
width = 12
size = (width,4)
plt.figure(figsize=size)
plt.plot(x,y,'or',markersize=10,alpha=0.5)
plt.axis( (-k, k, -0.1, 2.1) );
```
Now what you really want to know is, what is the state of the system at the points halfway between your samples? And to figure that out, you need to guess at what's going on in the times in-between those samples. The simplest approximation would be to assume that the system just jumps from one value to the next somewhere in-between:
```python
def piecewise_constant_interp(x,y,xx):
"From samples (x,y) generate piecewise constant function sampled at points xx."
diff = np.abs(x.reshape(1,-1) - xx.reshape(-1,1)) # Here we use numpy broadcasting.
closest = np.argmin(diff,axis=1)
return y[closest]
xx = np.linspace(-k+1,k-1,1000)
yy = piecewise_constant_interp(x,y,xx)
plt.figure(figsize=size)
plt.plot(xx,yy,'-k',lw=2)
plt.hold(True)
plt.plot(x,y,'or',markersize=10,alpha=0.5)
plt.axis( (-k, k, -0.1, 2.1) );
plt.title('Piecewise-constant approximation',fontsize=20);
```
For this set of data, you don't really believe that's what's happening, do you? But our goal is to deal with systems that exhibit non-smooth (possibly discontinuous) behavior, so we need to at least admit the possibility of sudden jumps. That's why we won't simply "connect the dots" to get a continuous approximation.
Instead, we can try to approximate the slope around each of our sample points. The simplest way to do so is using finite differences. If we let $\sigma_i$ denote our approximation of the slope at $x_i$, then three common approximations are:
- Forward difference: $\sigma_i = \frac{y_{i+1}-y_i}{x_{i+1}-x_i}$
- Backward difference: $\sigma_i = \frac{y_i - y_{i-1}}{x_i - x_{i-1}}$
- Centered difference: $\sigma_i = \frac{y_{i+1}-y_{i-1}}{x_{i+1}-x_{i-1}}$
Here's what each of these approximations looks like for our data:
```python
def piecewise_linear_interp(x,y,xx, fd='centered'):
"From samples (x,y) generate piecewise-linear function sampled at points xx using finite difference slopes."
diff = np.abs(x.reshape(1,-1) - xx.reshape(-1,1))
closest = np.argmin(diff,axis=1)
sigma = np.zeros_like(y)
if fd == 'centered':
sigma[1:-1] = (y[2:]-y[:-2])/(x[2:]-x[:-2])
elif fd == 'forward':
sigma[:-1] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
elif fd == 'backward':
sigma[1:] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
return y[closest] + sigma[closest]*(xx-x[closest])
def compare_fd(x,y,xx, axis=(-4, 4, -0.1, 2.1)):
fig, ax = plt.subplots(3,1,figsize=(width,8))
for i, fd in enumerate( ('centered','forward','backward') ):
yy = piecewise_linear_interp(x,y,xx,fd=fd)
ax[i].plot(xx,yy,'-k',lw=2)
ax[i].hold(True)
ax[i].plot(x,y,'or',markersize=10,alpha=0.5)
ax[i].axis( axis );
ax[i].text(.5,.9,fd,
horizontalalignment='center',
transform=ax[i].transAxes,fontsize=20)
compare_fd(x,y,xx)
```
I've used $\sigma=0$ for the points at the edges where we don't have enough data to compute the appropriate slope.
## The problem: overshoots!
Looking closely, you can see that each of these approximations adds little jumps (called "overshoots") in some region where the data itself was monotone. Worse still, each of them generates negative values, whereas the original values were non-negative! If our data represent concentrations or probabilities, then we have no way to make sense of negative values.
Things look even worse if we take data samples from a function that is in fact discontinuous:
```python
y = np.sin(x/2.)+1. + 2.*(x>0)
compare_fd(x,y,xx,axis=(-4,4,-0.5,4.8))
```
Now all three approaches have large, obvious overshoots. This becomes even more problematic when actually solving a hyperbolic PDE; see [Lesson 3 of my HyperPython course](http://nbviewer.ipython.org/github/ketch/HyperPython/blob/master/Lesson_03_High-resolution_methods.ipynb) for details.
Is there a better way?
## The simplest limiter: Minmod
We'd like to avoid those overshoots and ensure that monotone regions of the data give monotone interpolations. We can do that by choosing the slope $\sigma_i$ small enough that the interpolant near $x_i$ stays bounded between the neighboring averages $(y_{i-1}+y_i)/2$ and $(y_i+y_{i+1})/2$. There's an easy way to do that: just compute the forward and backward differences (like we did above), and then use *whichever is smaller* in absolute value. If $y_i$ is an extremum, then to avoid increasing the overall range of the data we always choose $\sigma_i=0$.
Here's what that looks like:
```python
def pw_minmod(x,y,xx):
"From samples (x,y) generate piecewise-linear function sampled at points xx using Minmod slopes."
diff = np.abs(x.reshape(1,-1) - xx.reshape(-1,1))
closest = np.argmin(diff,axis=1)
forward = np.zeros_like(y)
backward = np.zeros_like(y)
sigma = np.zeros_like(y)
forward[:-1] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
backward[1:] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
sigma = (np.sign(forward)+np.sign(backward))/2. * np.minimum(np.abs(forward),np.abs(backward))
return y[closest] + sigma[closest]*(xx-x[closest])
```
```python
yy = pw_minmod(x,y,xx)
plt.figure(figsize=size)
plt.plot(xx,yy,'-k',lw=2)
plt.hold(True)
plt.plot(x,y,'or',markersize=10,alpha=0.5)
plt.axis( (-4,4,-0.5,4.8) );
plt.title('Minmod approximation',fontsize=20);
```
Let's apply minmod to a monotone sequence of values, to illustrate the average-boundedness property:
```python
from matplotlib.patches import Rectangle
y = np.exp(x/3.)
yy = pw_minmod(x,y,xx)
plt.figure(figsize=(width,6))
plt.plot(xx,yy,'-k',lw=2)
plt.hold(True)
plt.plot(x,y,'or',markersize=10,alpha=0.5)
plt.axis( (-4,4,-0.1,4.1) );
plt.title('minmod approximation',fontsize=20);
for i in range(len(y)-1):
if 1<=i<len(y):
x_avgs = [(x[i]+x[i-1])/2.,(x[i]+x[i+1])/2.]
y_avgs = [(y[i]+y[i-1])/2.,(y[i]+y[i+1])/2.]
currentAxis = plt.gca()
currentAxis.add_patch(Rectangle((x_avgs[0], y_avgs[0]),
x_avgs[1]-x_avgs[0], y_avgs[1]-y_avgs[0],
facecolor="grey",alpha=0.2))
```
The grey regions show the average of each value and its neighbors; the Minmod interpolant is guaranteed to stay within these bounds, so that the right edge of one interpolated region doesn't "overshoot" the leftmost value of the next.
##Total variation
In fact, the minmod limiter guarantees an important mathematical property: it does not increase the total variation (TV) of the data. For discrete data, TV is defined as
$$ TV(y) = \sum_i |y_i - y_{i-1}|.$$
Essentially, TV is a measure of how much a function wiggles! This *total variation diminishing* (TVD) property of minmod is nice, because the true solutions of scalar hyperbolic PDEs have the same property. Because of this, in the 1970s-80s, researchers developed many other limiters that enforce the same property; they are referred to as
## TVD limiters
Just for fun, let's look at 3 more such limiters. If we define $F_i, B_i$ as forward- and backward-difference slope approximations at point $i$, and $\theta_i$ as the ratio of the two, then each of these limiters can be described in terms of a function $\phi(\theta)$. The slope to be used is given by
$$\sigma_i = \phi(\theta_i) B_i.$$
The limiter names and functions are:
- Monotonized centered difference (**MC**): $$\phi(\theta) = \max(0,\min((1+\theta)/2,2,\theta))$$
- **van Leer**: $$\phi(\theta) = \frac{\theta + |\theta|}{1+ |\theta|}$$
- **Superbee**: $$\phi(\theta) = \max(0,\min(1,2\theta),\min(2,\theta))$$
Here's what each of them looks like for our data:
```python
def phi(theta,limiter):
if limiter == 'minmod':
phi = (1+np.sign(theta))/2. * np.minimum(1,theta)
elif limiter == 'vanleer':
phi = (theta + np.abs(theta))/(1+np.abs(theta))
elif limiter == 'MC':
phi = np.maximum(0,np.minimum( (1.+theta)/2., np.minimum(2.,theta)))
elif limiter == 'superbee':
phi = np.maximum(0,np.maximum(np.minimum(1.,2*theta),np.minimum(2.,theta)))
return phi
def pw_limited(x,y,xx,limiter='minmod'):
"From samples (x,y) generate piecewise-linear function sampled at points xx using Minmod slopes."
diff = np.abs(x.reshape(1,-1) - xx.reshape(-1,1))
closest = np.argmin(diff,axis=1)
forward = np.zeros_like(y)
backward = np.zeros_like(y)
theta = np.zeros_like(y)
forward[:-1] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
backward[1:] = (y[1:]-y[:-1])/(x[1:]-x[:-1])
theta[1:-1] = forward[1:-1]/backward[1:-1]
sigma = phi(theta,limiter) * backward
return y[closest] + sigma[closest]*(xx-x[closest])
```
```python
y = np.sin(x/2.)+1. + 2.*(x>0)
fig, ax = plt.subplots(4,1,figsize=(width,10))
for i, limiter in enumerate( ('minmod', 'vanleer','superbee','MC') ):
yy = pw_limited(x,y,xx,limiter=limiter)
ax[i].plot(xx,yy,'-k',lw=2)
ax[i].hold(True)
ax[i].plot(x,y,'or',markersize=10,alpha=0.5)
ax[i].axis( (-4,4,-0.1,4.4) );
ax[i].text(.8,.2,limiter,
horizontalalignment='center',
transform=ax[i].transAxes,fontsize=20)
```
Compare these with [the finite difference approximations above](#The-problem:-overshoots!).
If you look closely (or zoom in) you'll notice that -- except for minmod -- all the limiters *do* produce some overshoot near the discontinuity. What gives? Well, these limiters are used within a larger algorithm for solving hyperbolic PDEs, and it turns out that if the overshoots are small enough, they'll go away in a full step of the algorithm. These limiters produce "small enough" overshoots so that no oscillations appear in the PDE solution.
#Interactive comparison
In each region, these limiter take three data points and give back a linear interpolant. It's illuminating to compare their behavior on a single set of 3 points. Note that the interactive plot below doesn't work on nbviewer; you'll need to download and run the notebook yourself.
```python
from IPython.html.widgets import interact, FloatSlider, RadioButtons
from IPython.display import display
%matplotlib inline
```
```python
xx = np.linspace(-0.5,0.5)
def compare_limiters(y1,y3):
fig, ax = plt.subplots(figsize=(width,4))
x = np.array((-1.,0.,1.))
y = np.array((y1,0.,y3))
ax.set_xlim(-1.1,1.1)
ax.set_ylim(-1.1,1.1)
if y1 == 0:
theta = y3
else:
theta = y3/(-y1)
ax.hold(True)
forward_slope = y3
backward_slope = -y1
plt.fill_between(xx,xx*forward_slope,xx*backward_slope,color='k',alpha=0.2,zorder=0)
for limiter in ('minmod', 'vanleer','superbee','MC'):
sigma = phi(np.array(theta),limiter)*(-y1)
ax.plot(xx,sigma*xx,alpha=0.5,lw=2)
ax.legend( ('minmod', 'vanleer','superbee','MC'), loc='best' )
ax.plot(x,y,'ok',markersize=15,alpha=0.5)
ax.hold(False)
return fig
interact(compare_limiters,y1=FloatSlider(min=-1., max=1., step=0.1, value=-0.3,description='$y_{i-1}$',labelcolor='k'),#,orientation='vertical'),
y3=FloatSlider(min=-1., max=1., step=0.1, value=0.8,description='$y_{i+1}$'));#,orientation='vertical'));
```
The shaded region in the plot above shows the range of slopes that would give at least 2nd-order accuracy. Play with the sliders and answer the following questions:
- Which limiter usually chooses the flattest approximation? Does it always?
- Which limiter usually chooses the steepest approximation? Does it always?
- In which situations do all the limiters give the same slope? Why?
# Higher-order interpolation: WENO
If we want to get higher-order accuracy (in smooth regions), then we have to give up the TVD property -- at least, in the sense defined above. The most common approach for higher order non-oscillatory piecewise interpolation is known as weighted essentially non-oscillatory (WENO) interpolation.
WENO is a very effective technique for interpolating or reconstructing functions that contain discontinuities without introducing oscillations. We'll focus on 5th-order WENO interpolation, which is the most commonly used.
Let's generate some function values to interpolate:
```python
# Note: uses PyWENO v. 0.11.2
import sympy
from pyweno import symbolic
```
```python
import mpld3 # Skip this cell if you don't have mpld3 installed
mpld3.enable_notebook() # or just go and do it now: pip install mpld3
# This allows you to zoom and pan the plots.
matplotlib.rcParams.update({'font.size': 18})
colors = 'brg'
```
```python
weno_order = 5 # must be odd
k = (weno_order+1)/2
size = (width,4)
plt.figure(figsize=size)
x=np.arange(-k+1,k)
y=np.random.rand(len(x))
#y = np.array((1.,1.,1.,0.,0.))
plt.plot(x,y,'ok')
plt.axis((-(k-.5),k-.5,-0.5,2.1));
```
```python
def stencil_interpolant(x,y,n,offset):
"""Return the polynomial interpolant (of degree n-1)
through the points (x_j,y_j) for offset <= j <= offset+n-1.
"""
return np.poly1d(np.polyfit(x[offset:offset+n],y[offset:offset+n],n-1))
def plot_interpolants(x,y,interpolants,axis=None,color='kbrg'):
if axis is None:
fig, axis = plt.subplots(figsize=size)
xc = np.linspace(-0.5,0.5)
xx = np.linspace(-(k-1),k-1)
plt.hold(True)
for i, interpolant in enumerate(interpolants):
axis.plot(xx,interpolant(xx),'-'+color[i])
axis.plot(xc,interpolant(xc),'-'+color[i],linewidth=5,alpha=0.5)
axis.plot(x,y,'ok')
axis.hold(False)
axis.axis((-(k-.5),k-.5,-0.5,2.1));
```
Ordinary polynomial interpolation yields an oscillatory polynomial that also exceeds the bounds of the data:
For application to hyperbolic conservation laws, our main interest is in getting values of the function at the half-integer points (interfaces). Let's suppose we're trying to interpolate around $x=0$, at $x=\pm 1/2$. Instead of using all 5 points, we could just use three points, which might give us a less oscillatory interpolant, at least in that interval. Using the 5 points we're given, there are three natural choices of interpolation stencil: the leftmost three, the middle three, or the rightmost three. Let's see what each of these quadratic interpolants looks like.
```python
p_opt = stencil_interpolant(x,y,5,0)
plot_interpolants(x,y,[p_opt])
plt.title('Quartic interpolant');
```
```python
fig, ax = plt.subplots(3,1,figsize=(width,10))
names = ['left','right','center']
p = []
for i in range(k):
p.append(stencil_interpolant(x,y,k,i))
plot_interpolants(x,y,[p[i]],axis=ax[i],color=[colors[i]])
ax[i].set_title(names[i]+' interpolant')
```
Here are all three quadratic interpolants together with the quartic interpolant for comparison:
```python
plot_interpolants(x,y,p+[p_opt],color='brgk')
```
The quadratic interpolants look less oscillatory, but they're also less accurate. The WENO idea is to use the high-order interpolant (with all 5 points) if the data is smooth, but to use one of the lower-order interpolants (or a combination of them) if the data is not smooth. This is achieved by computing point values of the interpolant as weighted averages of the point values of the candidate polynomials, e.g.
$$y_{x_{i-1/2}} = w_{1,-1/2} p_\text{left}(x_{i-1/2}) + w_{2,-1/2} p_\text{center}(x_{i-1/2}) + w_{3,-1/2} p_\text{right}(x_{i-1/2}).$$
Of course, there is some particular set of weights that gives the quartic interpolant:
$$y_{x_{i-1/2}} = \gamma_{1,-1/2} p_\text{left}(x_{i-1/2}) + \gamma_{2,-1/2} p_\text{center}(x_{i-1/2}) + \gamma_{3,-1/2} p_\text{right}(x_{i-1/2}).$$
We will want to have $w_{j,-1/2} \approx \gamma_{j,-1/2}$ for smooth data.
```python
def compute_opt_weights(k,xi):
"""
Get the optimal weights (gamma) at points xi.
"""
if not hasattr(xi,'__iter__'): xi = [xi]
opt_weights = symbolic.optimal_weights(k,xi)
gamma = {}
for i, xi_val in enumerate(xi):
gamma[xi_val] = np.empty(k)
for j in range(k):
gamma[xi_val][j] = opt_weights[0][(i,j)]
return gamma
gamma = compute_opt_weights(k,(-1,0.5,1))
print "$\gamma_{j,-1/2}$:", gamma[-1]
print "$\gamma_{j,+1/2}$:", gamma[1]
```
How does one determine if a polynomial is non-oscillatory? There are several ways proposed in the literature, but the original and most widely used is the weighted Sobolev norm:
$$\beta = \sum_{l=1}^k \Delta x^{2l-1} \int_{x_{i-1/2}}^{x_{i+1/2}} \left(\frac{d^l}{dx^l}p(x)\right)^2 dx.$$
Put simply, $\beta$ is a scaled sum of the square $L^2$ norms of all the derivatives of the polynomial over the interval where it will be used. The scaling is chosen to make the "smoothness indicator" $\beta$ independent of the choice of $\Delta x$ (note that $\Delta x = 1$ in our example data).
As each of the interpolants above is a linear function of the values $y_i$, the smoothness indicators are quadratic functions of the $y_i$ and can be expressed in the generic form
$$\beta = \sum_{m=-2}^{2} \sum_{n=-2}^{m}
C_{m,n} y_{i-k+m} y_{i-k+n}$$
Of course, the coefficients $C_{m,n}$ will be different for each of the candidate polynomials $p_\text{left},p_\text{center},p_\text{right}$. We can use the Python package PyWeno to automatically compute these coefficients and then apply them to our data.
```python
def compute_smoothness_indicators(y,k):
C = symbolic.jiang_shu_smoothness_coefficients(k)
beta = np.zeros((k,1))
for m in range(k):
for n in range(m+1):
for r in range(len(beta)):
beta[r] = beta[r] + C[(r,n,m)] * y[r+m] * y[r+n]
return beta
beta = compute_smoothness_indicators(y,k)
print beta
```
Next we use these smoothness indicators to determine a weighting for the candidate polynomials. Observe that a large smoothness indicator means a polynomial has large derivatives, so we will want to give it less weight (perhaps they should be called non-smoothness indicators).
$$\tilde{w}_j = \frac{\gamma_j}{(\epsilon + \beta_j)^2}$$
Here $\epsilon$ is a small number used to avoid division by zero. We also normalize the weights so that they sum to unity:
$$w_j = \frac{\tilde{w}_j}{\sum_j\tilde{w}_j}$$
```python
def compute_weights(gamma, beta, epsilon=1.e-6):
k = len(beta)
w = np.empty(k)
for j in range(k):
w[j] = gamma[j]/(epsilon+beta[j])**2
wsum = np.sum(w)
return w/wsum
```
```python
q = {}
for xi in (-1,1):
w = compute_weights(gamma[xi],beta)
q[xi] = w[0]*p[0](xi/2.) + w[1]*p[1](xi/2.) + w[2]*p[2](xi/2.)
```
Here are the final reconstructed values given by WENO (indicated by the large grey circles):
```python
plot_interpolants(x,y,p+[p_opt],color=['b','r','g','k'])
plt.hold(True)
plt.plot(-0.5,q[-1],'ok',alpha=0.3,markersize=15)
plt.plot(0.5,q[1],'ok',alpha=0.3,markersize=15)
plt.axis((-(k-.5),k-.5,-0.5,2.1));
```
Here's some code to plot everything for some given $(x,y)$ values.
```python
styles = { 'left' : 'b', 'center' : 'r', 'right' : 'g'}
size = (16,4); fs = 20
def WENO_visualization(x,y,xi=(-1,1)):
"""
(x,y): data to interpolate
xi: points at which to evaluate interpolant (w.r.t. (-1,1) reference interval)
"""
xx = np.linspace(np.min(x),np.max(x))
color=['b','r','g']
plt.figure(figsize=size)
plt.hold(True)
ax1 = plt.subplot2grid((1,8), (0,0), colspan=6)
ax2 = plt.subplot2grid((1,8), (0,6))
ax3 = plt.subplot2grid((1,8), (0,7))
K = len(y)
k = (K+1)/2
assert len(x)==K
p_opt=np.poly1d(np.polyfit(x,y,K-1))
p = {}
for name, offset in zip(('left','right','center'),range(k)):
p[name] = stencil_interpolant(x,y,k,offset)
gamma = compute_opt_weights(k,xi)
beta = compute_smoothness_indicators(y,k)
w = {}; q = {}
for loc in xi:
w[loc] = compute_weights(gamma[loc],beta)
q[loc] = w[loc][0]*p['left'](loc/2.) \
+ w[loc][1]*p['center'](loc/2.) \
+ w[loc][2]*p['right'](loc/2.)
ax2.bar(range(3),w[-1],color=color,align='center');
ax2.set_title(r'$w_{i-1/2}$',fontsize=fs)
ax3.bar(range(3),w[1],color=color,align='center')
ax3.set_title(r'$w_{i+1/2}$',fontsize=fs)
for ax in (ax2,ax3):
ax.set_xticks(range(3));
ax.set_xticklabels(('left','center','right'))
ax.set_ylim(0,1); ax.set_yticks((0,1))
for name, interpolant in p.iteritems():
ax1.plot(xx,interpolant(xx),styles[name])
xc = np.linspace(-0.5,0.5)
ax1.plot(xc,interpolant(xc),styles[name],linewidth=5)
ax1.plot(x,y,'ok')
ax1.hold(True)
ax1.plot(xx,p_opt(xx),'-k',x,y,'ok',linewidth=2)
for loc in xi:
ax1.plot(loc/2.,q[loc],'ok', alpha=0.3,markersize=15)
ax1.plot(loc/2.,q[loc],'ok',alpha=0.3,markersize=15)
ax1.axis((-(k-0.8),k-0.8,-0.5,2.1));
```
```python
%matplotlib inline
y=np.random.rand(len(x))
WENO_visualization(x,y)
```
The bar charts on the right show the relative weight given to each of the quadratic interpolants when computing the left and right interpolated values.
Try running the box above a few times, or insert your own $y$ values. What happens if you use a step function for $y$? Let's see:
```python
y=np.array( (1,1,1,0,0) )
WENO_visualization(x,y)
```
For a perfect step function, WENO picks the flat interpolant, just like any TVD limiter would!
## Comparison of several limiters for advection
In practice, all of these limiters are used as components of numerical solvers for hyperbolic PDEs. The simplest hyperbolic PDE is the advection equation:
$$ q_t + a q_x = 0.$$
The solution $q$ simply translates at velocity $a$; if you're not familiar with this, take a look at my [HyperPython lesson on advection](http://nbviewer.ipython.org/github/ketch/HyperPython/blob/master/Lesson_01_Advection.ipynb) and then the [lesson on high-resolution methods](http://nbviewer.ipython.org/github/ketch/HyperPython/blob/master/Lesson_03_High-resolution_methods.ipynb).
The cells below solve the advection equation using several of the limiters we've discussed. To run this part, you need to [install PyClaw](http://www.clawpack.org/installing.html) and Visclaw, which can be most easily accomplished via
pip install clawpack
```python
from clawpack import pyclaw
from clawpack import riemann
import matplotlib
from matplotlib import animation
from clawpack.visclaw.JSAnimation import IPython_display
def setup(scheme='minmod',cfl_max=0.9,IC='gauss_square',mx=100):
if 'weno' in scheme:
solver = pyclaw.SharpClawSolver1D(riemann.advection_1D)
else:
solver = pyclaw.ClawSolver1D(riemann.advection_1D)
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
if scheme in ('minmod','superbee','MC','vanleer'):
solver.limiters = getattr(pyclaw.limiters.tvd,scheme)
#elif scheme == 'CT':
#solver.limiters = pyclaw.limiters.tvd.cada_torrilhon_limiter
elif scheme == 'Lax-Wendroff':
solver.limiters = 0
elif scheme == 'first-order':
solver.order = 1
elif 'weno' in scheme:
solver.weno_order = int(scheme[4:]) #weno5, weno7, ...
else:
raise Exception('Unrecognized limiter')
solver.cfl_max = cfl_max
solver.cfl_desired = cfl_max*0.9
x = pyclaw.Dimension(0.0,1.0,mx)
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
state.problem_data['u']=1.
grid = state.grid
xc = grid.x.centers
if IC=='gauss_square':
beta=200.; x0=0.3
state.q[0,:] = np.exp(-beta * (xc-x0)**2) + (xc>0.6)*(xc<0.8)
elif IC=='wavepacket':
beta=100.; x0=0.5
state.q[0,:] = np.exp(-beta * (xc-x0)**2) * np.sin(80.*xc)
else:
raise Exception('Unrecognized initial condition.')
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.keep_copy = True
claw.output_format = None
claw.tfinal =10.0
return claw
```
```python
#This cell may take a few seconds to run
results = []
schemes = ('first-order','Lax-Wendroff','minmod','superbee','MC','vanleer','weno5','weno7','weno9')
for scheme in schemes:
claw = setup(scheme=scheme)
claw.verbosity = 0
claw.run()
results.append(claw.frames)
def animate(results,ymin=-0.1):
fig = plt.figure(figsize=(width,8))
N = len(results)
n = int(np.ceil(np.sqrt(N)))
axes = []
gs1 = matplotlib.gridspec.GridSpec(n, n)
gs1.update(wspace=0.,hspace=0.)
for i in range(n):
for j in range(n):
k = n*i + j
if k<N:
axes.append(plt.subplot(gs1[i,j]));
if i<n-1:
axes[-1].xaxis.set_ticklabels(())
if j>0:
axes[-1].yaxis.set_ticklabels(())
lines = [0]*len(schemes)
for i in range(len(lines)):
lines[i], = axes[i].plot([], [], lw=2)
xc = results[0][0].p_centers[0]
for i,ax in enumerate(axes):
ax.set_xlim(0,1); ax.set_ylim(ymin,1.3)
#ax.grid()
ax.set_title(schemes[i], x = 0.5, y=0.85 )
ax.plot(xc,results[i][0].q[0,:],color='k',alpha=0.3)
def fplot(frame_number):
fig.suptitle('Solution after %s cycles' % frame_number, fontsize=20)
for i, line in enumerate(lines):
line.set_data(xc,results[i][frame_number].q[0,:])
return lines,
return matplotlib.animation.FuncAnimation(fig, fplot, frames=len(claw.frames), interval=30)
animate(results)
```
In the plot above, the solution advects across the full domain once between each frame of the animation (the boundary is periodic). By stepping through the animation, you can see how each limiter modifies the shape of the solution over time. The Lax-Wendroff method is based on a centered-difference approximation with no limiting; notice that it creates oscillations and is also less accurate than the limiter-based methods. For the advection equation, oscillations and overshoots are not a serious problem, but in the context of fluid dynamics or water wave simulations, they can be catastrophic.
```python
```
|
{"hexsha": "92f8bbeaeafef5c456f0b31a54f5e1ff1807b234", "size": 39615, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Guide_to_limiters.ipynb", "max_stars_repo_name": "nemethedr/HyperPython", "max_stars_repo_head_hexsha": "ce3d8ccd898fcb3d54f04af283d92b2436ba3eaa", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2015-02-16T17:36:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T11:40:54.000Z", "max_issues_repo_path": "Guide_to_limiters.ipynb", "max_issues_repo_name": "volpatto/HyperPython", "max_issues_repo_head_hexsha": "ce3d8ccd898fcb3d54f04af283d92b2436ba3eaa", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Guide_to_limiters.ipynb", "max_forks_repo_name": "volpatto/HyperPython", "max_forks_repo_head_hexsha": "ce3d8ccd898fcb3d54f04af283d92b2436ba3eaa", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2015-02-16T17:36:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T21:09:44.000Z", "avg_line_length": 36.0464058235, "max_line_length": 607, "alphanum_fraction": 0.5594093147, "converted": true, "num_tokens": 8156}
|
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
########################################################################
#
# Date: 2014 Authors: Michel Sanner
#
# sanner@scripps.edu
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Michel Sanner and TSRI
#
#########################################################################
#
# $Header: /mnt/raid/services/cvs/DejaVu2/Qt/Viewer.py,v 1.1.1.1.4.1 2017/07/13 22:25:45 annao Exp $
#
# $Id: Viewer.py,v 1.1.1.1.4.1 2017/07/13 22:25:45 annao Exp $
#
import threading
from PySide import QtGui, QtCore
import DejaVu2
from DejaVu2.Qt.Camera import Camera
from DejaVu2.Viewer import ViewerBase
import numpy, math
#class Viewer(QtGui.QDockWidget, ViewerBase):
#class Viewer(QtGui.QMainWindow, ViewerBase):
#class Viewer(QtGui.QWidget, ViewerBase):
#class Viewer(QtGui.QWidget, ViewerBase):
class Viewer(ViewerBase):
def __init__(self, nogui=0,
guiMaster=None,
showViewerGUI=True,
autoRedraw=True,
cnf={}, **kw):
"""Viewer's constructor
"""
if guiMaster is not None:
self.ownMaster = True
#print 'VIEWER MASTER', id(master)
#QtGui.QDockWidget.__init__(self, master)
#QtGui.QMainWindow.__init__(self, master)
#self.setWindowTitle('DejaVu2_Viewer_Qt')
#QtGui.QWidget.__init__(self, master)
#self.fileMenu = self.menuBar().addMenu(self.tr("&File"))
#self.fileMenu.hide()
#self.toolBar = self.addToolBar(self.tr("Focus"))
#self.redrawTimer = QtCore.QTimer()
#self.connect(self.redrawTimer, QtCore.SIGNAL("timeout()"),
# self.ReallyRedraw)
# string var used to decide what the trackball is moving
self.Xform = 'Object'
self.contourTk = False
self.spinVar = DejaVu2.defaultSpinningMode
self.spinCallBack = None
# Decides if the call to enable GL_LIGHTNING will be considered
self.OverAllLightingIsOn = 1
# not sure about this but if it is not there I have 100x3 black box in upper left corner
#mainLayout = QtGui.QGridLayout()
#self.setLayout(mainLayout)
ViewerBase.__init__(self, nogui=0, #screenName=None,
guiMaster=None, #classCamera=None,
autoRedraw=True,
#verbose=True,
cnf=cnf, **kw)
#objects = QtGui.QDockWidget('ObjectTree', self)
#from tree import ObjectTree
#self.objTree = ObjectTree(self, parent=objects)
#objects.setWidget(self.objTree)
#from dashboard import Dashboard
#self.objTree = Dashboard(parent=objects)
#objects.setWidget(self.objTree)
#mainLayout.addWidget(objects)
#self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, objects)
#mainLayout.addWidget(self.cameras[0].master)
# create the material editor
self.materialEditor = None
## self.materialEditor = MaterialEditor(None, self)
## self.materialEditor.dismissTk.grid_forget()
## self.materialEditor.dismiss()
## # register Object that have their own OpenGL context but need to have
## # the same lighting model and lights
## for l in self.lights:
## l.applyTo = [self.materialEditor]
## self.lightModel.applyTo = [self.materialEditor]
## # create the ViewerGui
## if showViewerGUI:
## self.GUI = ViewerGUI(self, self.maxLights, self.maxClipP,
## nogui=nogui, master=guiMaster)
## #self.GUI.CameraBackgroundColor = self.CurrentCameraBackgroundColor
## #self.GUI.LightColor = self.CurrentLightColor
## #self.GUI.ClipColor = self.CurrentClipColor
## #self.GUI.ObjectFrontColor = self.CurrentObjectFrontColor
## #self.GUI.ObjectBackColor = self.CurrentObjectBackColor
## #self.GUI.LightModelColor = self.LMColor
## self.GUI.addObject(self.rootObject, None)
## self.GUI.bindResetButton( self.Reset_cb)
## self.GUI.bindNormalizeButton( self.Normalize_cb)
## self.GUI.bindCenterButton( self.Center_cb)
## self.GUI.bindDeleteButton( self.Delete_cb)
## ## self.GUI.Exit = self.__del__
## if nogui and isinstance(self.GUI.root, Tkinter.Toplevel):
## self.GUI.withdraw()
#self.GUI.addObject(self.pickVerticesSpheres, None)
#self.GUI.showPickedVertex.set(self.showPickedVertex)
def FocusOnBox(self, mini, maxi):
"""Moves camera to provided as [mini, maxi] bounding box"""
g = numpy.sum( (mini, maxi),0 ) * .5 # center of BoundingBox
self.rootObject.Set(scale=(1.,1.,1.))
lBox = maxi - mini
self.lHalfObject = max(lBox)/2.
if self.lHalfObject == 0.:
self.lHalfObject = 1.
cam = self.currentCamera
self.oldLookFrom = cam.lookFrom[2]
self.diff_fovy = cam.fovyNeutral - cam.fovy
cam.lookAt = numpy.array((0.,0.,0.))
Rmini, Rmaxi = self.rootObject.ComputeBB(self.currentCamera)
Rg = numpy.sum( (Rmini, Rmaxi), 0) * .5
self.diffVect = -g-self.rootObject.translation
if not (g-Rg).any():
self.diffVect = -Rg
cam.Set(fov=cam.fovy + self.diff_fovy)
self.rootObject.ConcatTranslation( self.diffVect[:3])
lNewDist = self.lHalfObject / math.tan(cam.fovy/2*math.pi/180.0)
newDist = cam.nearDefault+lNewDist+self.lHalfObject
dist = self.oldLookFrom + newDist
cam.lookFrom = numpy.array( ( 0., 0., dist ) )
cam.direction = cam.lookAt - cam.lookFrom
currentObject = self.currentObject
self.currentObject = self.rootObject
self.CenterCurrentObject()
self.currentObject = currentObject
self.currentCamera.AutoDepthCue(object=self.rootObject)
self.OneRedraw()
g = ( (0.5*(mini[0]+maxi[0])), (0.5*(mini[1]+maxi[1])),
(0.5*(mini[2]+maxi[2])) )
self.rootObject.SetPivot(g)
self.OneRedraw()
def postNextRedraw(self):
if self.autoRedraw:
if self.redrawTimer.isActive():
self.redrawTimer.stop()
self.redrawTimer.start(100)
def startAutoRedraw(self):
self.autoRedraw = True
self.redrawTimer.start(100)
def stopAutoRedraw(self):
if self.redrawTimer.isActive():
self.redrawTimer.stop()
self.autoRedraw = False
def checkIfRedrawIsNeeded(self):
if self.suspendRedraw:
self.redrawTimer.start(1000)
#print 'NO REDRAW SUSPEND'
return False
if not self.needsRedraw:
self.redrawTimer.start(100)
#print 'NO REDRAW no needs'
return False
if threading.currentThread().getName()!='MainThread':
print 'NO REDRAW not main thread'
#self.redrawTimer.start(100)
return False
if self.autoRedraw and not self.redrawTimer.isActive():
#print 'NO REDRAW no active timer'
return False
return True
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(
self, "Confirmation",
"Are you sure you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def AddCamera(self, master=None, screenName=None, classCamera=None,
stereo='none', num=None, verbose=True, cnf={}, **kw):
"""Add one more camera to this viewer"""
if num is None:
num = len(self.cameras)
if classCamera is None:
classCamera = Camera
name = 'camera '+str(num)
else:
name = classCamera.__name__+str(num)
cameraOwnsMaster = False
if not master:
cameraOwnsMaster = True
kw['stereo'] = stereo
c = classCamera(master, screenName, self, num, check=1, cnf=cnf, **kw)
c.show()
c.fog.Set(enabled=True)
#if hasattr(c.frame.master,"protocol"):
# c.frame.master.protocol("WM_DELETE_WINDOW",self.closeEvent)
## c.eventManager.AddCallback('<KeyPress>', self.modifierDown)
## c.eventManager.AddCallback('<KeyRelease>', self.modifierUp)
## c.eventManager.AddCallback('R', self.Reset_cb_arg)
## c.eventManager.AddCallback('r', self.Reset_cb_arg)
## c.eventManager.AddCallback('A', self.AutoDepthCue)
## c.eventManager.AddCallback('a', self.AutoDepthCue)
## c.eventManager.AddCallback('N', self.Normalize_cb_arg)
## c.eventManager.AddCallback('n', self.Normalize_cb_arg)
## c.eventManager.AddCallback('C', self.Center_cb_arg)
## c.eventManager.AddCallback('c', self.Center_cb_arg)
## c.eventManager.AddCallback('D', self.Depth_cb_arg)
## c.eventManager.AddCallback('d', self.Depth_cb_arg)
## c.eventManager.AddCallback('T', self.toggleTransformRootOnly)
## c.eventManager.AddCallback('t', self.toggleTransformRootOnly)
## c.eventManager.AddCallback('L', self.toggleOpenglLighting)
## c.eventManager.AddCallback('l', self.toggleOpenglLighting)
## c.eventManager.AddCallback('O', self.SSAO_cb_arg)
## c.eventManager.AddCallback('o', self.SSAO_cb_arg)
c.ownMaster = cameraOwnsMaster
## if self.GUI is not None:
## self.GUI.bindModifersToTransformInfo(master)
self.cameras.append( c )
#c.frame.config( background = "#900000" )
# make the trackball transform the current object
#if self.rootObject:
# self.BindTrackballToObject(self.rootObject)
c.firstRedraw = True
c.Activate()
if len(self.cameras)==1:
from opengltk.OpenGL import GL
from DejaVu2.Clip import ClippingPlane
from DejaVu2.Light import Light
self.currentCamera = c
c.hasBeenCurrent = 1
self.SetCurrentObject(self.rootObject)
#self.GLextensions = string.split(GL.glGetString(GL.GL_EXTENSIONS))
#self.GLversion = GL.glGetString(GL.GL_VERSION)
#self.GLvendor = GL.glGetString(GL.GL_VENDOR)
if verbose:
print 'OpenGL-based graphics'
print ' GL_VENDOR', GL.glGetString(GL.GL_VENDOR)
print ' GL_RENDERER', GL.glGetString(GL.GL_RENDERER)
print ' GL_VERSION', GL.glGetString(GL.GL_VERSION)
#self.hasOffsetExt = "GL_EXT_polygon_offset" in self.GLextensions
# Find out how many clipping planes OpenGL defines
maxClipP = 0
for i in range(6):
try:
cpn = getattr(GL, "GL_CLIP_PLANE%d"%i)
ClippingPlane.clipPlaneNames.append(cpn)
maxClipP +=1
except AttributeError:
break
self.maxClipP = maxClipP
# Find out how many clipping planes OpenGL "thinks" it supports
#maxClipP = min(int(GL.glGetDoublev(GL.GL_MAX_CLIP_PLANES)[0]), 6)
# Find out how many light sources
maxLights = int(GL.glGetDoublev(GL.GL_MAX_LIGHTS)[0])
if maxLights > 8:
print 'WARNING: Reducing number of light sources from %d to 8' % \
maxLights
maxLights = 8
self.maxLights = maxLights
# create the light sources
for i in range(self.maxLights):
l = Light(i, self)
self.lights.append( l )
l.viewer = self
self.InitLighting(c)
self.rootObject.clipSide = [1]*self.maxClipP
for i in range(self.maxClipP):
#from DejaVu2.csgClip import CsgClippingPlane
#cp = CsgClippingPlane(self.rootObject, i, self)
cp = ClippingPlane(self.rootObject, i, self)
self.clipP.append(cp)
if self.maxClipP:
self.currentClip = self.clipP[0]
else:
self.currentClip = None
# see if it PolygonOffset actually implemented
if self.hasOffsetExt:
try:
self.polyOffset(1.0, 1.0)
except ValueError:
self.viewer.hasOffsetExt = False
## if cnf.has_key("addScenarioButton"):
## self.addScenarioButton = cnf["addScenarioButton"]
## else:
## self.addScenarioButton = True
# this line causes networks wih viewers not to appear completely
# (missing connections, which appear only after a window takes focus
# MS march 26, 03
#self.master.wait_visibility()
#print "DejaVu2.enableStereo", DejaVu2.enableStereo
if DejaVu2.enableStereo is True:
self.activeStereoSupport = self.currentCamera.activeStereoSupport()
else:
self.activeStereoSupport = False
#print "self.activeStereoSupport", self.activeStereoSupport
#make sure that at the end of the init the current context is the one of the current camera
self.currentCamera.Redraw()
#self.currentCamera.Activate()
# create fake geometry to find out if VBO are supported
from DejaVu2.IndexedPolygons import IndexedPolygons
fake = IndexedPolygons('test', vertexArrayFlag=True,
vertices=((0,0,0),))
if verbose:
print 'Enable VBO:', DejaVu2.enableVBO
self.suspendRedraw = False # set to True to Prevent redrawing
if self.autoRedraw:
self.pendingAutoRedrawID = self.redrawTimer.start(10)
return c
## def AddObject(self, obj, parent=None, redraw=True, redo=True):
## ViewerBase.AddObject(self, obj, parent=parent, redraw=redraw, redo=redo)
## self.objTree.addObject(obj.parent, obj, obj.name, str(obj))
## def _DeleteCamera(self, camera):
## """Remove the given camera in the right order
## """
## #print 'Viewer._DeleteCamera ', camera
## # Check if this camera shareCTX with other camera.
## if hasattr(camera, 'shareCTXWith'):
## while len(camera.shareCTXWith):
## cam = camera.shareCTXWith[0]
## self._DeleteCamera(cam)
## camera.destroy()
## if camera.ownMaster:
## camera.frame.master.destroy()
## else:
## camera.frame.destroy()
## self.cameras.remove(camera)
## for c in self.cameras:
## if hasattr(c, 'shareCTXWith'):
## c.shareCTXWith.remove(camera)
## def DeleteCamera(self, camera):
## """
## Remove the given camera from the viewer and takes care
## of the dpyList if camera is cameras[0]
## """
## # #delete NPR rendering toplevel
## # if camera.imCanvastop:
## # camera.imCanvastop.destroy()
## # camera.imCanvas = None
## # camera.imCanvas1 = None
## # camera.imCanvas2 = None
## # camera.imCanvas3 = None
## # Remove the camera from the list of cameras associated to
## # the viewer.
## camIndex = self.cameras.index(camera)
## # the current openGL context has been destroyed so
## # the dpyList need to be destroyed only if the CTX is not
## # shared by any other camera.
## if camIndex == 0:
## self.objectsNeedingRedo = {}
## for g in self.rootObject.AllObjects():
## g.deleteOpenglList()
## self.objectsNeedingRedo[g] = None
## self._DeleteCamera(camera)
## # If this camera is the current camera
## if self.currentCamera == camera:
## if len(self.cameras) == 0:
## # There is no more cameras then set currentCamera to None
## self.currentCamera = None
## else:
## # Set the current Camera to be the first camera of the list.
## self.currentCamera = self.cameras[0]
|
{"hexsha": "4590c0b79bd42361d4a03a813d3960db493b7918", "size": 17719, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/ADFRsuite/CCSBpckgs/DejaVu2/Qt/Viewer.py", "max_stars_repo_name": "AngelRuizMoreno/Jupyter_Dock_devel", "max_stars_repo_head_hexsha": "6d23bc174d5294d1e9909a0a1f9da0713042339e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/ADFRsuite/CCSBpckgs/DejaVu2/Qt/Viewer.py", "max_issues_repo_name": "AngelRuizMoreno/Jupyter_Dock_devel", "max_issues_repo_head_hexsha": "6d23bc174d5294d1e9909a0a1f9da0713042339e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/ADFRsuite/CCSBpckgs/DejaVu2/Qt/Viewer.py", "max_forks_repo_name": "AngelRuizMoreno/Jupyter_Dock_devel", "max_forks_repo_head_hexsha": "6d23bc174d5294d1e9909a0a1f9da0713042339e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T21:48:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T21:48:14.000Z", "avg_line_length": 37.8611111111, "max_line_length": 103, "alphanum_fraction": 0.5799424347, "include": true, "reason": "import numpy", "num_tokens": 4060}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.