text stringlengths 0 27.1M | meta dict |
|---|---|
subroutine setaux(mbc,mx,my,xlower,ylower,dx,dy,maux,aux)
! Called at start of computation before calling qinit, and
! when AMR is used, also called every time a new grid patch is created.
! Use to set auxiliary arrays aux(1:maux, 1-mbc:mx+mbc, 1-mbc:my+mbc).
! Note that ghost cell values may need to be set if the aux arrays
! are used by the Riemann solver(s).
!
! This default version does nothing, unless using adjoint-flagging.
use amr_module, only : NEEDS_TO_BE_SET
use adjoint_module, only : adjoint_flagging,innerprod_index
implicit none
integer, intent(in) :: mbc,mx,my,maux
real(kind=8), intent(in) :: xlower,ylower,dx,dy
real(kind=8), intent(inout) :: aux(maux,1-mbc:mx+mbc,1-mbc:my+mbc)
integer :: jj, ii
if(adjoint_flagging) then
! If a new grid has been created, but hadn't been flagged
! set innerproduct to zero.
do jj=1-mbc,my+mbc
do ii=1-mbc,mx+mbc
if (aux(1,ii,jj) .eq. NEEDS_TO_BE_SET) then
aux(innerprod_index,ii,jj) = 0.d0
endif
enddo
enddo
endif
end subroutine setaux
| {
"alphanum_fraction": 0.6365187713,
"author": null,
"avg_line_length": 34.4705882353,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "6468d6eb469e77a5924daa2bd31c0ac7491197a4",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 28,
"max_forks_repo_forks_event_max_datetime": "2022-02-11T23:52:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-10T00:03:56.000Z",
"max_forks_repo_head_hexsha": "0e0d37dda52b8c813f7fc4bd7e61c5fdb33b0ada",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "mjberger/ho_amrclaw_amrcart",
"max_forks_repo_path": "src/2d/setaux.f90",
"max_issues_count": 107,
"max_issues_repo_head_hexsha": "0e0d37dda52b8c813f7fc4bd7e61c5fdb33b0ada",
"max_issues_repo_issues_event_max_datetime": "2021-11-24T03:35:32.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-02T19:51:43.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "mjberger/ho_amrclaw_amrcart",
"max_issues_repo_path": "src/2d/setaux.f90",
"max_line_length": 75,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "d42540468d09ecc902438cdc4ae13db5dc1dc969",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ClimateImpactLab/amrclaw",
"max_stars_repo_path": "src/2d/setaux.f90",
"max_stars_repo_stars_event_max_datetime": "2022-01-21T06:36:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-05-27T08:16:09.000Z",
"num_tokens": 367,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1172
} |
import pygimli as pg
import numpy as np
from settings import fpm
pd = pg.load("paraDomain_1.bms")
resinv = np.loadtxt("res_conventional.dat")
vest = np.loadtxt("vel_conventional.dat")
fae, fie, fwe, maske = fpm.all(resinv, vest)
print(np.min(fwe), np.max(fwe))
np.savez("conventional.npz", vel=np.array(vest), rho=np.array(resinv), fa=fae,
fi=fie, fw=fwe, mask=maske)
| {
"alphanum_fraction": 0.709762533,
"author": null,
"avg_line_length": 29.1538461538,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "28f0a36bcf106a153708f4ec25ed18179b50bbfc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-03-20T22:14:02.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-21T11:04:16.000Z",
"max_forks_repo_head_hexsha": "8a8c7ca3b9905e167aba5b9faa2b7e6658948da4",
"max_forks_repo_licenses": [
"CC-BY-4.0",
"BSD-3-Clause"
],
"max_forks_repo_name": "florian-wagner/four-phase-inversion",
"max_forks_repo_path": "code/scripts/field_case/4_apply_4PM.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "8a8c7ca3b9905e167aba5b9faa2b7e6658948da4",
"max_issues_repo_issues_event_max_datetime": "2021-07-25T10:51:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-21T19:42:31.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0",
"BSD-3-Clause"
],
"max_issues_repo_name": "florian-wagner/four-phase-inversion",
"max_issues_repo_path": "code/scripts/field_case/4_apply_4PM.py",
"max_line_length": 78,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "8a8c7ca3b9905e167aba5b9faa2b7e6658948da4",
"max_stars_repo_licenses": [
"CC-BY-4.0",
"BSD-3-Clause"
],
"max_stars_repo_name": "florian-wagner/four-phase-inversion",
"max_stars_repo_path": "code/scripts/field_case/4_apply_4PM.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-16T10:34:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-11-19T02:44:26.000Z",
"num_tokens": 122,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 379
} |
import Teams
import collections
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from constants import _Const
import dataLoader
CONST = _Const()
teams = {}
fixtures = dataLoader.LoadAllData(dt.datetime.now())
output = open(CONST.OUTPUT_PATH + 'Parameters.txt','w')
for fixture in fixtures:
fix = fixtures.get(fixture)
home_team = fix.hometeam.name
away_team = fix.awayteam.name
home_goals = fix.hometeam.goals_scored
away_goals = fix.awayteam.goals_scored
if home_team not in teams:
teams[home_team] = Teams.Team(name = home_team)
if away_team not in teams:
teams[away_team] = Teams.Team(name = away_team)
teams[home_team].scored(home_goals)
teams[away_team].scored(away_goals)
for team in teams.keys():
input_team = team
goal_list = []
prob = []
prob_err = []
goal_no = []
no_of_goals = []
i = 0
goal_list = teams.get(input_team).goals
goal_list.sort()
counted = collections.Counter(goal_list)
no_of_occurencies = list(counted.values())
no_of_goals = list(counted.keys())
for item in no_of_goals:
goal_no.append(no_of_goals[i])
probability = float(no_of_occurencies[i])/len(goal_list)
probability_err = probability/np.sqrt(no_of_occurencies[i])
prob.append(probability)
prob_err.append(probability_err)
i += 1
parameters, cov = curve_fit(Teams.poisson, no_of_goals, no_of_occurencies)
x_data = np.linspace(0, 8, 1000)
#if parameters[0] < 0:
#parameters[0] = 1
output.write(input_team + str(parameters) + '\n')
plt.plot(goal_no, prob, '*', label = 'Data points')
plt.plot(x_data, Teams.poisson(x_data, *parameters), 'r-', lw=3, label = 'Poisson fit')
plt.errorbar(goal_no, prob, yerr = prob_err, fmt='', linestyle = 'none')
plt.title('Probability distribution of number of goals in a match for ' + input_team)
plt.xlabel('Number of goals')
plt.ylabel('Probability')
plt.xlim(-1, (max(no_of_goals)+1))
plt.legend()
plt.savefig(CONST.OUTPUT_PATH + input_team + '_psn.jpg')
plt.clf()
output.close() | {
"alphanum_fraction": 0.674863388,
"author": null,
"avg_line_length": 30.9295774648,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "beb02ae7c1910ea0b8c9a11cbdc62c1260811004",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76588c4406ea9c8eeba04caa9eb4dd5f07b5382e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "githubLewis/Football-data-analysis",
"max_forks_repo_path": "fda/Poisson.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76588c4406ea9c8eeba04caa9eb4dd5f07b5382e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "githubLewis/Football-data-analysis",
"max_issues_repo_path": "fda/Poisson.py",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "76588c4406ea9c8eeba04caa9eb4dd5f07b5382e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "githubLewis/Football-data-analysis",
"max_stars_repo_path": "fda/Poisson.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 579,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2196
} |
import sys
sys.path.append('../')
import pytest
import LMR_utils as Utils
import numpy as np
@pytest.mark.parametrize("doc", [
"""This is class docstring""",
None])
def test_class_doc_inherit(doc):
class foo:
__doc__ = doc
pass
@Utils.class_docs_fixer
class bar(foo):
pass
assert bar.__doc__ == doc
@pytest.mark.parametrize("doc", [
"""This is func docstring""",
None])
def test_function_doc_inherit(doc):
class foo:
def lol(self):
pass
foo.lol.__func__.__doc__ = doc
@Utils.class_docs_fixer
class bar(foo):
def lol(self):
pass
assert bar.lol.__func__.__doc__ == doc
def test_function_doc_augment():
parent_doc = """This is the parents lol docstr"""
child_doc = """%%aug%%
The childs doc is here
"""
class foo:
def lol(self):
pass
foo.lol.__func__.__doc__ = parent_doc
@Utils.class_docs_fixer
class bar(foo):
@Utils.augment_docstr
def lol(self):
"""%%aug%%
The childs doc is here
"""
pass
assert bar.lol.__func__.__doc__ == (parent_doc +
child_doc.replace('%%aug%%', ''))
def test_generate_latlon_bnd_limits():
# TODO: could be parametrized input
# Defaults
Utils.generate_latlon(5, 5)
# Bad lat bounds
with pytest.raises(ValueError):
Utils.generate_latlon(5, 5, lat_bnd=(-100, 45))
with pytest.raises(ValueError):
Utils.generate_latlon(5, 5, lat_bnd=(-45, 91))
# Bad lon bounds
Utils.generate_latlon(5, 5, lon_bnd=(-90, 270))
with pytest.raises(ValueError):
Utils.generate_latlon(5, 5, lon_bnd=(-180, 181))
with pytest.raises(ValueError):
Utils.generate_latlon(5, 5, lon_bnd=(-181, 40))
with pytest.raises(ValueError):
Utils.generate_latlon(5, 5, lon_bnd=(14, 361))
def test_generate_latlon_output_shp():
nlats = 4
nlons = 5
lats, lons, clats, clons = Utils.generate_latlon(nlats, nlons)
assert lats.shape == (4, 5)
assert lons.shape == (4, 5)
assert clats.shape == (5,)
assert clons.shape == (6,)
def test_generate_latlon_center_corner():
lats, lons, clats, clons = Utils.generate_latlon(4,5,
lat_bnd=(-45, 45),
lon_bnd=(0, 180))
np.testing.assert_equal(lats[:, 0], [-33.75, -11.25, 11.25, 33.75])
np.testing.assert_equal(lons[0], [0, 36, 72, 108, 144])
np.testing.assert_equal(clats, [-45, -22.5, 0, 22.5, 45])
np.testing.assert_equal(clons, [-18, 18, 54, 90, 126, 162])
def test_generate_latlon_include_lat_endpts():
lats, lons, clats, clons = Utils.generate_latlon(3, 5, include_endpts=True)
np.testing.assert_equal(lats[:, 0], [-90, 0, 90])
assert clats[0] == -90
assert clats[-1] == 90
lats, lons, clats, clons = Utils.generate_latlon(4, 5, include_endpts=True)
np.testing.assert_equal(lats[:, 0], [-90, -30, 30, 90])
def test_calc_latlon_bnd_1d_input():
test_data = np.linspace(10, 50, 5)
with pytest.raises(ValueError):
_ = Utils.calculate_latlon_bnds(test_data[:, None], test_data)
with pytest.raises(ValueError):
_ = Utils.calculate_latlon_bnds(test_data, test_data[:, None])
def test_calc_latlon_bnd_monotonic():
test_data = np.linspace(0, 10, 11)
with pytest.raises(ValueError):
_ = Utils.calculate_latlon_bnds(test_data[::-1], test_data)
with pytest.raises(ValueError):
_ = Utils.calculate_latlon_bnds(test_data, test_data[::-1])
def test_calc_latlon_bnd_regular_grid():
irregular_data = np.array([1, 2, 3, 5, 8, 13, 21], dtype=np.float32)
regular_data = np.arange(10)
irreg_bnds = [0.5, 1.5, 2.5, 4, 6.5, 10.5, 17, 25]
reg_bnds = np.arange(11) - 0.5
lat_bnds, lon_bnds = Utils.calculate_latlon_bnds(regular_data, irregular_data)
np.testing.assert_equal(lat_bnds, reg_bnds)
np.testing.assert_equal(lon_bnds, irreg_bnds)
lat_bnds, lon_bnds = Utils.calculate_latlon_bnds(irregular_data, regular_data)
np.testing.assert_equal(lat_bnds, irreg_bnds)
np.testing.assert_equal(lon_bnds, reg_bnds)
def test_calc_latlon_bnd_bounds():
lat_data = np.array([-33.75, -11.25, 11.25, 33.75])
lon_data = np.array([18, 54, 90, 126, 162])
lat_bnds, lon_bnds = Utils.calculate_latlon_bnds(lat_data, lon_data)
np.testing.assert_equal(lat_bnds, [-45, -22.5, 0, 22.5, 45])
np.testing.assert_equal(lon_bnds, [0, 36, 72, 108, 144, 180])
def test_calc_latlon_bnd_bounds_half_shift():
lat_data = np.array([-90, -60, -30, 0, 30, 60, 90])
lon_data = np.array([0, 90, 180, 270])
lat_bnds, lon_bnds = Utils.calculate_latlon_bnds(lat_data, lon_data)
np.testing.assert_equal(lat_bnds, [-90, -75, -45, -15, 15, 45, 75, 90])
np.testing.assert_equal(lon_bnds, [-45, 45, 135, 225, 315])
| {
"alphanum_fraction": 0.6276234259,
"author": null,
"avg_line_length": 29.2573099415,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "71db5e744515b5e689ccb0374d96d8e0faac0702",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2021-08-17T12:08:11.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-10-11T19:35:34.000Z",
"max_forks_repo_head_hexsha": "4c00d3f9db96447e69bd3f426d59524f7b5f3ef5",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "mingsongli/LMR",
"max_forks_repo_path": "tests/test_lmr_utils.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "4c00d3f9db96447e69bd3f426d59524f7b5f3ef5",
"max_issues_repo_issues_event_max_datetime": "2019-04-26T11:45:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-10-15T22:13:27.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "mingsongli/LMR",
"max_issues_repo_path": "tests/test_lmr_utils.py",
"max_line_length": 82,
"max_stars_count": 17,
"max_stars_repo_head_hexsha": "4c00d3f9db96447e69bd3f426d59524f7b5f3ef5",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "frodre/LMR",
"max_stars_repo_path": "tests/test_lmr_utils.py",
"max_stars_repo_stars_event_max_datetime": "2021-03-17T22:48:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-27T18:50:36.000Z",
"num_tokens": 1498,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5003
} |
program opkdemo7
c-----------------------------------------------------------------------
c Demonstration program for the DLSODI package.
c This is the version of 14 June 2001.
c
c This version is in double precision.
c
C this program solves a semi-discretized form of the Burgers equation,
c
c u = -(u*u/2) + eta * u
c t x xx
c
c for a = -1 .le. x .le. 1 = b, t .ge. 0.
c Here eta = 0.05.
c Boundary conditions: u(-1,t) = u(1,t) = 0.
c Initial profile: square wave
c u(0,x) = 0 for 1/2 .lt. abs(x) .le. 1
c u(0,x) = 1/2 for abs(x) = 1/2
c u(0,x) = 1 for 0 .le. abs(x) .lt. 1/2
c
c An ODE system is generated by a simplified Galerkin treatment
c of the spatial variable x.
c
c Reference:
c R. C. Y. Chin, G. W. Hedstrom, and K. E. Karlsson,
c A Simplified Galerkin Method for Hyperbolic Equations,
c Math. Comp., vol. 33, no. 146 (April 1979), pp. 647-658.
c
c The problem is run with the DLSODI package with a 10-point mesh
c and a 100-point mesh. In each case, it is run with two tolerances
c and for various appropriate values of the method flag mf.
c Output is on unit lout, set to 6 in a data statement below.
c-----------------------------------------------------------------------
external res, addabd, addafl, jacbd, jacfl
integer i, io, istate, itol, iwork, j,
1 lout, liw, lrw, meth, miter, mf, ml, mu,
2 n, nout, npts, nerr,
3 nptsm1, n14, n34, n14m1, n14p1, n34m1, n34p1
integer nm1
double precision a, b, eta, delta,
1 zero, fourth, half, one, hun,
2 t, tout, tlast, tinit, errfac,
3 atol, rtol, rwork, y, ydoti, elkup
double precision eodsq, r4d
dimension n(1)
dimension y(99), ydoti(99), tout(4), atol(2), rtol(2)
dimension rwork(2002), iwork(125)
c Pass problem parameters in the Common block test1.
common /test1/ r4d, eodsq, nm1
c
c Set problem parameters and run parameters
data eta/0.05d0/, a/-1.0d0/, b/1.0d0/
data zero/0.0d0/, fourth/0.25d0/, half/.5d0/, one/1.0d0/,
1 hun/100.0d0/
data tinit/0.0d0/, tlast/0.4d0/
data tout/.10d0,.20d0,.30d0,.40d0/
data ml/1/, mu/1/, lout/6/
data nout/4/, lrw/2002/, liw/125/
data itol/1/, rtol/1.0d-3, 1.0d-6/, atol/1.0d-3, 1.0d-6/
c
iwork(1) = ml
iwork(2) = mu
nerr = 0
c
c Loop over two values of npts.
do 300 npts = 10, 100, 90
c
c Compute the mesh width delta and other parameters.
delta = (b - a)/npts
r4d = fourth/delta
eodsq = eta/delta**2
nptsm1 = npts - 1
n14 = npts/4
n34 = 3 * n14
n14m1 = n14 - 1
n14p1 = n14m1 + 2
n34m1 = n34 - 1
n34p1 = n34m1 + 2
n(1) = nptsm1
nm1 = n(1) - 1
c
c Set the initial profile (for output purposes only).
c
do 10 i = 1,n14m1
10 y(i) = zero
y(n14) = half
do 20 i = n14p1,n34m1
20 y(i) = one
y(n34) = half
do 30 i = n34p1,nptsm1
30 y(i) = zero
c
if (npts .gt. 10) write (lout,1010)
write (lout,1000)
write (lout,1100) eta,a,b,tinit,tlast,ml,mu,n(1)
write (lout,1200) zero, (y(i), i=1,n(1)), zero
c
c The j loop is over error tolerances.
c
do 200 j = 1,2
c
c Loop over method flag loop (for demonstration).
c
do 100 meth = 1,2
do 100 miter = 1,5
if (miter .eq. 3) go to 100
if (miter .le. 2 .and. npts .gt. 10) go to 100
if (miter .eq. 5 .and. npts .lt. 100) go to 100
mf = 10*meth + miter
c
c Set the initial profile.
c
do 40 i = 1,n14m1
40 y(i) = zero
y(n14) = half
do 50 i = n14p1,n34m1
50 y(i) = one
y(n34) = half
do 60 i = n34p1,nptsm1
60 y(i) = zero
c
t = tinit
istate = 0
c
write (lout,1500) rtol(j), atol(j), mf, npts
c
c Output loop for each case
c
do 80 io = 1,nout
c
c call DLSODI
if (miter .le. 2) call dlsodi (res, addafl, jacfl, n, y,
1 ydoti, t, tout(io), itol, rtol(j), atol(j),
2 1, istate, 0, rwork, lrw, iwork, liw, mf)
if (miter .ge. 4) call dlsodi (res, addabd, jacbd, n, y,
1 ydoti, t, tout(io), itol, rtol(j), atol(j),
2 1, istate, 0, rwork, lrw, iwork, liw, mf)
write (lout,2000) t, rwork(11), iwork(14),(y(i), i=1,n(1))
c
c If istate is not 2 on return, print message and loop.
if (istate .ne. 2) then
write (lout,4000) mf, t, istate
nerr = nerr + 1
go to 100
endif
c
80 continue
c
write (lout,3000) mf, iwork(11), iwork(12), iwork(13),
1 iwork(17), iwork(18)
c
c Estimate final error and print result.
itemp = n(1)
errfac = elkup( itemp, y, rwork(21), itol, rtol(j), atol(j) )
if (errfac .gt. hun) then
write (lout,5001) errfac
nerr = nerr + 1
else
write (lout,5000) errfac
endif
100 continue
200 continue
300 continue
c
write (lout,6000) nerr
c stop
c
1000 format(20x,' Demonstration Problem for DLSODI')
1010 format(///80('*')///)
1100 format(/10x,' Simplified Galerkin Solution of Burgers Equation'//
1 13x,'Diffusion coefficient is eta =',d10.2/
2 13x,'Uniform mesh on interval',d12.3,' to ',d12.3/
3 13x,'Zero boundary conditions'/
4 13x,'Time limits: t0 = ',d12.5,' tlast = ',d12.5/
5 13x,'Half-bandwidths ml = ',i2,' mu = ',i2/
6 13x,'System size neq = ',i3/)
c
1200 format('Initial profile:'/17(6d12.4/))
c
1500 format(///80('-')///'Run with rtol =',d12.2,' atol =',d12.2,
1 ' mf =',i3,' npts =',i4,':'//)
c
2000 format('Output for time t = ',d12.5,' current h =',
1 d12.5,' current order =',i2,':'/17(6d12.4/))
c
3000 format(//'Final statistics for mf = ',i2,':'/
1 i4,' steps,',i5,' res,',i4,' Jacobians,',
2 ' rwork size =',i6,', iwork size =',i6)
c
4000 format(///80('*')//20x,'Final time reached for mf = ',i2,
1 ' was t = ',d12.5/25x,'at which istate = ',i2////80('*'))
5000 format(' Final output is correct to within ',d8.1,
1 ' times local error tolerance')
5001 format(' Final output is wrong by ',d8.1,
1 ' times local error tolerance')
6000 format(//80('*')//
1 'Run completed. Number of errors encountered =',i3)
c
c end of main program for the DLSODI demonstration problem.
end
| {
"alphanum_fraction": 0.5435811327,
"author": null,
"avg_line_length": 32.592039801,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "5cf007c9de780f24f3e4f1c8640b83fb8d041eb0",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sbwhitecap/clocc-hg",
"max_forks_repo_path": "src/f2cl/packages/odepack/demo7/opkdemo7.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sbwhitecap/clocc-hg",
"max_issues_repo_path": "src/f2cl/packages/odepack/demo7/opkdemo7.f",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f6cf2591ceef8a3a80e04da9b414cdf60a25a90f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sbwhitecap/clocc-hg",
"max_stars_repo_path": "src/f2cl/packages/odepack/demo7/opkdemo7.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2520,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6551
} |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import os
import sys
from lib.utils import pc_util
DUMP_CONF_THRESH = 0.5 # Dump boxes with obj prob larger than that.
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
def dump_results(end_points, dump_dir, config, inference_switch=False):
''' Dump results.
Args:
end_points: dict
{..., pred_mask}
pred_mask is a binary mask array of size (batch_size, num_proposal) computed by running NMS and empty box removal
Returns:
None
'''
if not os.path.exists(dump_dir):
os.system('mkdir %s'%(dump_dir))
# INPUT
point_clouds = end_points['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
# NETWORK OUTPUTS
seed_xyz = end_points['seed_xyz'].detach().cpu().numpy() # (B,num_seed,3)
if 'vote_xyz' in end_points:
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
vote_xyz = end_points['vote_xyz'].detach().cpu().numpy() # (B,num_seed,3)
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
objectness_scores = end_points['objectness_scores'].detach().cpu().numpy() # (B,K,2)
pred_center = end_points['center'].detach().cpu().numpy() # (B,K,3)
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_class = pred_heading_class.detach().cpu().numpy() # B,num_proposal
pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2, pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal,3
# OTHERS
pred_mask = end_points['pred_mask'] # B,num_proposal
idx_beg = 0
for i in range(batch_size):
pc = point_clouds[i,:,:]
objectness_prob = softmax(objectness_scores[i,:,:])[:,1] # (K,)
# Dump various point clouds
pc_util.write_ply(pc, os.path.join(dump_dir, '%06d_pc.ply'%(idx_beg+i)))
pc_util.write_ply(seed_xyz[i,:,:], os.path.join(dump_dir, '%06d_seed_pc.ply'%(idx_beg+i)))
if 'vote_xyz' in end_points:
pc_util.write_ply(end_points['vote_xyz'][i,:,:], os.path.join(dump_dir, '%06d_vgen_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(pred_center[i,:,0:3], os.path.join(dump_dir, '%06d_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
pc_util.write_ply(pred_center[i,objectness_prob>DUMP_CONF_THRESH,0:3], os.path.join(dump_dir, '%06d_confident_proposal_pc.ply'%(idx_beg+i)))
# Dump predicted bounding boxes
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
num_proposal = pred_center.shape[1]
obbs = []
for j in range(num_proposal):
obb = config.param2obb(pred_center[i,j,0:3], pred_heading_class[i,j], pred_heading_residual[i,j],
pred_size_class[i,j], pred_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_proposal, 7)
pc_util.write_oriented_bbox(obbs[objectness_prob>DUMP_CONF_THRESH,:], os.path.join(dump_dir, '%06d_pred_confident_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[np.logical_and(objectness_prob>DUMP_CONF_THRESH, pred_mask[i,:]==1),:], os.path.join(dump_dir, '%06d_pred_confident_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[pred_mask[i,:]==1,:], os.path.join(dump_dir, '%06d_pred_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_pred_bbox.ply'%(idx_beg+i)))
# Return if it is at inference time. No dumping of groundtruths
if inference_switch:
return
# LABELS
gt_center = end_points['center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3)
gt_mask = end_points['box_label_mask'].cpu().numpy() # B,K2
gt_heading_class = end_points['heading_class_label'].cpu().numpy() # B,K2
gt_heading_residual = end_points['heading_residual_label'].cpu().numpy() # B,K2
gt_size_class = end_points['size_class_label'].cpu().numpy() # B,K2
gt_size_residual = end_points['size_residual_label'].cpu().numpy() # B,K2,3
objectness_label = end_points['objectness_label'].detach().cpu().numpy() # (B,K,)
objectness_mask = end_points['objectness_mask'].detach().cpu().numpy() # (B,K,)
for i in range(batch_size):
if np.sum(objectness_label[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_label[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_positive_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_mask[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_mask[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_mask_proposal_pc.ply'%(idx_beg+i)))
pc_util.write_ply(gt_center[i,:,0:3], os.path.join(dump_dir, '%06d_gt_centroid_pc.ply'%(idx_beg+i)))
pc_util.write_ply_color(pred_center[i,:,0:3], objectness_label[i,:], os.path.join(dump_dir, '%06d_proposal_pc_objectness_label.obj'%(idx_beg+i)))
# Dump GT bounding boxes
obbs = []
for j in range(gt_center.shape[1]):
if gt_mask[i,j] == 0: continue
obb = config.param2obb(gt_center[i,j,0:3], gt_heading_class[i,j], gt_heading_residual[i,j],
gt_size_class[i,j], gt_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_gt_objects, 7)
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_gt_bbox.ply'%(idx_beg+i)))
# OPTIONALL, also dump prediction and gt details
if 'batch_pred_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_pred_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_pred_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write(' '+str(t[2]))
fout.write('\n')
fout.close()
if 'batch_gt_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_gt_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_gt_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write('\n')
fout.close()
| {
"alphanum_fraction": 0.6495245748,
"author": null,
"avg_line_length": 53.7194244604,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8a1bb7882ac47f0c13ead5b3bc500c0348d54b91",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 33,
"max_forks_repo_forks_event_max_datetime": "2022-03-26T06:18:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-09T07:55:06.000Z",
"max_forks_repo_head_hexsha": "df32ccb93f953d371ff1f4c5a5cd25e2c7f6f067",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "raviteja-kvns/PointContrast",
"max_forks_repo_path": "downstream/votenet_det_new/models/dump_helper.py",
"max_issues_count": 27,
"max_issues_repo_head_hexsha": "df32ccb93f953d371ff1f4c5a5cd25e2c7f6f067",
"max_issues_repo_issues_event_max_datetime": "2022-03-10T08:52:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-20T13:19:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "raviteja-kvns/PointContrast",
"max_issues_repo_path": "downstream/votenet_det_new/models/dump_helper.py",
"max_line_length": 192,
"max_stars_count": 244,
"max_stars_repo_head_hexsha": "47e868281d35b26e186ca41a9b6d1906b50dbcfa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mbanani/PointContrast",
"max_stars_repo_path": "downstream/votenet_det_new/models/dump_helper.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T18:49:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-09T02:45:20.000Z",
"num_tokens": 1992,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7467
} |
import os
import random
from typing import List
import numpy as np
import torch
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_torch_lib.configs.training_configs.training_config_base import TrainingConfigBase
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import EhpiDataset, RemoveJointsOutsideImgEhpi, \
ScaleEhpi, TranslateEhpi, FlipEhpi, NormalizeEhpi, RemoveJointsEhpi
from nobos_torch_lib.datasets.samplers.imbalanced_dataset_sampler import ImbalancedDatasetSampler
from nobos_torch_lib.learning_rate_schedulers.learning_rate_scheduler_stepwise import \
LearningRateSchedulerStepwise
from nobos_torch_lib.models.detection_models.shufflenet_v2 import ShuffleNetV2
from torch.utils.data import ConcatDataset, DataLoader
from torchvision.transforms import transforms
from ehpi_action_recognition.config import ehpi_dataset_path, models_dir
from ehpi_action_recognition.trainer_ehpi import TrainerEhpi
foot_indexes: List[int] = [11, 14]
knee_indexes: List[int] = [10, 13]
def get_sim_pose_algo_only(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
EhpiDataset(os.path.join(dataset_path, "ofp_sim_pose_algo_equal_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_from_mocap_pose_algo_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_sim_gt_only(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
EhpiDataset(os.path.join(dataset_path, "ofp_sim_gt_equal_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_from_mocap_gt_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_sim(image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
EhpiDataset("/media/disks/beta/datasets/ehpi/ofp_sim_pose_algo_equal_30fps",
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset("/media/disks/beta/datasets/ehpi/ofp_from_mocap_pose_algo_30fps",
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset("/media/disks/beta/datasets/ehpi/ofp_sim_gt_equal_30fps",
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset("/media/disks/beta/datasets/ehpi/ofp_from_mocap_gt_30fps",
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints)
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_full(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
# Real
EhpiDataset(os.path.join(dataset_path, "ofp_webcam"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HSRT_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HELLA_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
# Freilichtmuseum
EhpiDataset(os.path.join(dataset_path, "2019_03_13_Freilichtmuseum_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
# Simulated
EhpiDataset(os.path.join(dataset_path, "ofp_from_mocap_30fps/"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_sim_pose_algo_equal_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_sim_gt_equal_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_from_mocap_gt_30fps"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
RemoveJointsEhpi(indexes_to_remove=foot_indexes, indexes_to_remove_2=knee_indexes,
probability=0.25),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_set_wo_sim(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
EhpiDataset(os.path.join(dataset_path, "ofp_webcam"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HSRT_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HELLA_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
# Freilichtmuseum
EhpiDataset(os.path.join(dataset_path, "2019_03_13_Freilichtmuseum_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def set_seed(seed):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(0)
if __name__ == '__main__':
batch_size = 128
seeds = [0, 104, 123, 142, 200]
datasets = {
"sim_pose_algo_only": get_sim_pose_algo_only,
"sim_gt_only": get_sim_gt_only,
"wo_sim": get_set_wo_sim,
"sim": get_sim,
"full": get_full
}
for seed in seeds:
use_case_dataset_path = os.path.join(ehpi_dataset_path, "use_case")
for dataset_name, get_dataset in datasets.items():
# Train set
set_seed(seed)
train_set = get_dataset(use_case_dataset_path, image_size=ImageSize(1280, 720))
sampler = ImbalancedDatasetSampler(train_set, dataset_type=EhpiDataset)
train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=8)
# config
train_config = TrainingConfigBase("itsc2019_{}_seed_{}".format(dataset_name, seed),
os.path.join(models_dir, "train_use_case"))
train_config.learning_rate_scheduler = LearningRateSchedulerStepwise(lr_decay=0.1, lr_decay_epoch=50)
train_config.learning_rate = 0.05
train_config.weight_decay = 5e-4
train_config.num_epochs = 140
trainer = TrainerEhpi()
trainer.train(train_loader, train_config, model=ShuffleNetV2(3))
| {
"alphanum_fraction": 0.591103307,
"author": null,
"avg_line_length": 51.1526946108,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a1fc74720ade7c49bc0d5ff8a98f49c7e45e0c19",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 21,
"max_forks_repo_forks_event_max_datetime": "2022-03-05T05:51:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-08T03:29:12.000Z",
"max_forks_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "steuwe/ehpi_action_recognition",
"max_forks_repo_path": "ehpi_action_recognition/paper_reproduction_code/trainings/ehpi/train_ehpi_itsc_2019_ofp.py",
"max_issues_count": 15,
"max_issues_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39",
"max_issues_repo_issues_event_max_datetime": "2022-02-17T12:16:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-14T13:30:12.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "steuwe/ehpi_action_recognition",
"max_issues_repo_path": "ehpi_action_recognition/paper_reproduction_code/trainings/ehpi/train_ehpi_itsc_2019_ofp.py",
"max_line_length": 120,
"max_stars_count": 100,
"max_stars_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "steuwe/ehpi_action_recognition",
"max_stars_repo_path": "ehpi_action_recognition/paper_reproduction_code/trainings/ehpi/train_ehpi_itsc_2019_ofp.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-23T08:59:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-16T17:18:02.000Z",
"num_tokens": 3625,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 17085
} |
"""
Octree geometry handler
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import h5py
import numpy as np
import string, re, gc, time
from yt.extern.six.moves import cPickle
from yt.extern.six.moves import zip as izip
import weakref
from itertools import chain
from yt.funcs import *
from yt.utilities.logger import ytLogger as mylog
from yt.arraytypes import blankRecordArray
from yt.config import ytcfg
from yt.fields.field_info_container import NullFunc
from yt.geometry.geometry_handler import Index, YTDataChunk
from yt.utilities.definitions import MAXLEVEL
from yt.utilities.io_handler import io_registry
from yt.utilities.parallel_tools.parallel_analysis_interface import \
ParallelAnalysisInterface
from yt.data_objects.data_containers import data_object_registry
class OctreeIndex(Index):
"""The Index subclass for oct AMR datasets"""
def _setup_geometry(self):
mylog.debug("Initializing Octree Geometry Handler.")
self._initialize_oct_handler()
def get_smallest_dx(self):
"""
Returns (in code units) the smallest cell size in the simulation.
"""
return (self.dataset.domain_width /
(self.dataset.domain_dimensions * 2**(self.max_level))).min()
def convert(self, unit):
return self.dataset.conversion_factors[unit]
| {
"alphanum_fraction": 0.6805896806,
"author": null,
"avg_line_length": 30.1481481481,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7b27e471f181ac1b4af50a5718d9e57036437443",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-12-05T05:51:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-05T05:51:09.000Z",
"max_forks_repo_head_hexsha": "5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"max_forks_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_forks_repo_name": "danielgrassinger/yt_new_frontend",
"max_forks_repo_path": "yt/geometry/oct_geometry_handler.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"max_issues_repo_issues_event_max_datetime": "2016-04-05T22:30:14.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-04-05T22:30:14.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_issues_repo_name": "danielgrassinger/yt_new_frontend",
"max_issues_repo_path": "yt/geometry/oct_geometry_handler.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"max_stars_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_stars_repo_name": "danielgrassinger/yt_new_frontend",
"max_stars_repo_path": "yt/geometry/oct_geometry_handler.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 314,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1628
} |
import config as cf
import numpy as np
import pygame as pg
class Circle:
"""Make a times table calculation."""
def __init__(self, points):
self.points = points
self.radius = cf.RADIUS
def draw(self, screen, factor, pt, lines, circle):
if circle:
pg.draw.circle(
screen,
cf.WHITE,
(int(cf.SCREEN_WIDTH / 2), int(cf.SCREEN_HEIGHT / 2)),
self.radius,
1,
)
for i in range(pt):
angle = np.pi * i / pt
x0 = int(self.radius * np.cos(angle) + cf.SCREEN_WIDTH / 2)
y0 = int(self.radius * np.sin(angle) + cf.SCREEN_HEIGHT / 2)
angle_ = np.pi * i / pt + np.pi
x1 = int(self.radius * np.cos(angle_) + cf.SCREEN_WIDTH / 2)
y1 = int(self.radius * np.sin(angle_) + cf.SCREEN_HEIGHT / 2)
x_ = (x1 - x0) * (0.5 + np.cos(angle + factor) / 2) + x0
y_ = (y1 - y0) * (0.5 + np.cos(angle + factor) / 2) + y0
pg.draw.circle(screen, cf.WHITE, (x_, y_), 4, 0)
if lines:
pg.draw.line(screen, cf.RED, (x0, y0), (x1, y1), 1)
| {
"alphanum_fraction": 0.4928270042,
"author": null,
"avg_line_length": 34.8529411765,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1012fbac3c8905e8af44df58d58006ea92c3501a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "engeir/maths_snack",
"max_forks_repo_path": "src/maths_snack/programs/circle_illusion/circle.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11",
"max_issues_repo_issues_event_max_datetime": "2022-03-12T01:08:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-12T01:08:36.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "engeir/maths_snack",
"max_issues_repo_path": "src/maths_snack/programs/circle_illusion/circle.py",
"max_line_length": 73,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "engeir/maths_snack",
"max_stars_repo_path": "src/maths_snack/programs/circle_illusion/circle.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 337,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1185
} |
function [all_bboxes_out] = localize_bboxes_of_all_imgs(...
model, image_paths, all_bboxes_in, dst_directory, image_set_name, varargin)
% localize_bboxes_of_all_imgs: given a bounding box localization model and
% a set of images with their input candidate bounding boxes, for each
% image it predicts new bounding box coordinates such that the new boxes
% will be close (i.e. better localize) the objects of interest.
%
% INPUTS:
% 1) model: (type struct) the bounding box localization model
% 2) image_paths: NI x 1 cell array with the image paths of the set in the
% form of strings. NI is the number of images.
% 3) all_bboxes_in: a cell array with the candidate bounding
% boxes that are going to be processed. It can be of two forms:
% a) (the flag is_per_class must being set to false) a NI x 1 cell array
% where NI is the number of images. The all_bboxes_in{i} element
% contains the candidate bounding boxes of the i-th image in the form of a
% NB_i x 4 array where NB_i is the number of candidate bounding boxes of
% the i-th image. The 4 columns of this array contain the bounding box
% coordinates in the form of [x0,y0,x1,y1] (where the (x0,y0) and (x1,y1)
% are the top-left and bottom-right corners).
% b) (the flag is_per_class must being set to true) a C x 1 cell array
% where C is the number of categories. The all_bboxes_in{j} element
% in this case is NI x 1 cell array, where NI is the number of images,
% with the candidate bounding boxes of the j-th category for each image.
% The element all_bboxes_in{j}{i} is a NB_{i,j} x 4 array, where
% NB_{i,j} is the number candidate bounding box of the i-th
% image for the j-th category. The 4 columns of this array are the
% bounding boxes in the form of [x0,y0,x1,y1] (where the (x0,y0) and (x1,y1)
% are the top-left and bottom-right corners).
% 4) dst_directory: string with the path of the destination directory where
% the results are going to be cached.
% 5) image_set_name: string with the name of the image set that is being
% processed. It will be used on the name of the file where the results will
% be saved.
% 6) The rest input arguments are given in the form of Name,Value pair
% arguments and are:
% 'is_per_class': boolean value that if set to false, then the 3.a) form
% of the all_bboxes_in input parameter will be expected; otherwise, if set to
% true then the 3.b) form of the all_bboxes_in input parameter will be
% expected. Default value: false
% 'suffix': string with a suffix that it will be used on the name of the
% file where the results will be saved. Default value: ''
% 'all_bbox_gt': (optional) a NI cell array with the ground truth bounding
% boxes of each image; it will be used (if given) for printing the mAP on
% regular (see param 'checkpoint_step') step during processing the image set
% 'checkpoint_step': scalar value; during processing the NI images, after each
% checkpoint_step number of images the results are cached and the mAP
% results are printed. Default value: 500
%
% This file is part of the code that implements the following paper:
% Title : "LocNet: Improving Localization Accuracy for Object Detection"
% Authors : Spyros Gidaris, Nikos Komodakis
% Institution: Universite Paris Est, Ecole des Ponts ParisTech
% ArXiv link : http://arxiv.org/abs/1511.07763
% code : https://github.com/gidariss/LocNet
%
% AUTORIGHTS
% --------------------------------------------------------
% Copyright (c) 2016 Spyros Gidaris
%
% Title : "LocNet: Improving Localization Accuracy for Object Detection"
% ArXiv link: http://arxiv.org/abs/1511.07763
% Licensed under The MIT License [see LICENSE for details]
% ---------------------------------------------------------
ip = inputParser;
ip.addParamValue('is_per_class', true, @islogical);
ip.addParamValue('suffix', '', @ischar);
ip.addParamValue('all_bbox_gt', {}, @iscell);
ip.addParamValue('checkpoint_step', 500, @isnumeric);
ip.parse(varargin{:});
opts = ip.Results;
mkdir_if_missing(dst_directory);
filepath_results = [dst_directory, filesep, 'loc_boxes_', opts.suffix, image_set_name, '.mat'];
in_progress_filepath = [dst_directory, filesep, 'loc_boxes_', opts.suffix, image_set_name, '_in_progress.mat'];
timestamp = datestr(datevec(now()), 'yyyymmdd_HHMMSS');
log_file = fullfile(dst_directory, 'output', ...
['log_file_localization',opts.suffix, image_set_name, '_', timestamp, '.txt']);
mkdir_if_missing(fileparts(log_file));
t_start = tic();
try
all_bboxes_out = load_bboxes_scores(filepath_results);
catch
diary(log_file);
all_bboxes_out = process_images(...
model, image_paths, all_bboxes_in, opts.all_bbox_gt, ...
opts.is_per_class, in_progress_filepath, opts.checkpoint_step);
save_bboxes_scores(filepath_results, all_bboxes_out);
delete(in_progress_filepath);
diary off;
end
fprintf('Localize bounding box proposals in %.4f minutes.\n', toc(t_start)/60);
end
function save_bboxes_scores(filename, bboxes_scores)
save(filename, 'bboxes_scores', '-v7.3');
end
function bboxes_scores = load_bboxes_scores(filename)
load(filename, 'bboxes_scores');
end
function [all_bboxes_out] = process_images(...
model, image_paths, all_bboxes_in, all_bbox_gt, is_per_class, ...
in_progress_filepath, checkpoint_step)
num_imgs = length(image_paths);
nms_iou_thrs = 0.3;
max_per_set = 5 * num_imgs;
max_per_image = 100;
num_classes = length(model.classes);
in_progress_filepath_prev = [in_progress_filepath, '.prev'];
try
% load (if any) cached subset of results from images that have being
% already processed from a previous run of the script that was stopped
% before finish processing all the images.
try
[fist_img_idx, aboxes, all_bboxes_out, thresh] = load_progress(in_progress_filepath);
catch
[fist_img_idx, aboxes, all_bboxes_out, thresh] = load_progress(in_progress_filepath_prev);
end
mAP = check_progress_on_mAP(aboxes, all_bbox_gt, fist_img_idx-1, model.classes);
mAP_i = fist_img_idx-1;
catch exception
fprintf('Exception message %s\n', getReport(exception));
aboxes = cell(num_classes, 1);
for i = 1:num_classes, aboxes{i} = cell(num_imgs, 1); end
if is_per_class
all_bboxes_out = cell(num_classes,1);
for i = 1:num_classes, all_bboxes_out{i} = cell(num_imgs, 1); end
else
all_bboxes_out = cell(num_imgs,1);
end
thresh = -2.5 * ones(num_classes, 1);
fist_img_idx = 1; mAP = 0; mAP_i = 0;
end
model.max_rois_num_in_gpu = 500; %find_max_rois_num_in_gpu(model);
num_chars = 0;
total_el_time = 0;
for i = fist_img_idx:num_imgs
th = tic;
% get the bounding box proposals of this image
[bbox_proposals, bbox_scores] = get_this_img_bbox_proposals(all_bboxes_in, i, is_per_class);
image = get_image(image_paths{i}); % read the image
% refine the location of each bounding box with the localization model
bbox_refined = localize_bboxes_of_image(model, image, bbox_proposals);
localization_time = toc(th);
th = tic;
% prepare the candidate boundng box detections of this image
bbox_cand_dets = prepare_bbox_cand_dets(bbox_refined, bbox_scores, num_classes, is_per_class);
% prepare the output for this image
all_bboxes_out = prepare_this_img_output(all_bboxes_out, i, is_per_class, bbox_cand_dets);
% perform the post-processing step of non-max-suppresion on the
% candidate boundng box detections of this image
bbox_detections = post_process_candidate_detections(bbox_cand_dets, ...
'thresholds',thresh, 'nms_iou_thrs',nms_iou_thrs,'use_gpu',true,...
'max_per_image',max_per_image);
for j = 1:num_classes, aboxes{j}{i} = bbox_detections{j}; end
postprocessing_time = toc(th);
if mod(i, checkpoint_step) == 0
th = tic;
for j = 1:num_classes, [aboxes{j}, thresh(j)] = keep_top_k(aboxes{j}, i, max_per_set, thresh(j)); end
% save the till now progress
save_progress(aboxes, all_bboxes_out, thresh, i, in_progress_filepath, in_progress_filepath_prev);
% evaluate the mAP of the images 1 till i
mAP = check_progress_on_mAP(aboxes, all_bbox_gt, i, model.classes);
mAP_i = i;
diary; diary; % flush diary
%disp(thresh(:)');
checkpoint_time = toc(th);
fprintf('check_point time %.2fs\n',checkpoint_time)
num_chars = 0;
end
elapsed_time = localization_time + postprocessing_time;
[total_el_time, avg_time, est_rem_time] = timing_process(elapsed_time, total_el_time, fist_img_idx, i, num_imgs);
fprintf(repmat('\b',[1, num_chars]));
num_chars = fprintf('%s: bbox loc %d/%d:| ET %.3fs + %.3fs | AT: %.3fs | TT %.4fmin | ERT %.4fmin | mAP[%d/%d] = %.4f\n', ...
procid(), i, num_imgs, localization_time, postprocessing_time, ...
avg_time, total_el_time/60, est_rem_time/60, mAP_i, num_imgs, mAP);
end
delete(in_progress_filepath_prev);
end
function [total_el_time, ave_time, est_rem_time] = timing_process(...
elapsed_time, total_el_time, fist_img_idx, i, num_imgs)
total_el_time = total_el_time + elapsed_time;
ave_time = total_el_time / (i-fist_img_idx+1);
est_rem_time = ave_time * (num_imgs - i);
end
function [bbox_proposals, bbox_scores] = get_this_img_bbox_proposals(all_bboxes_in, img_id, is_per_class)
if is_per_class
bbox_proposals_per_class = cellfun(@(x) x{img_id}(:,1:4), all_bboxes_in, 'UniformOutput', false);
class_indices = [];
for c = 1:length(all_bboxes_in)
class_indices = [class_indices; ones(size(all_bboxes_in{c}{img_id},1),1,'single')*c];
end
bbox_scores_per_class = cellfun(@(x) x{img_id}(:,5:end), all_bboxes_in, 'UniformOutput', false);
num_bbox_per_class = cellfun(@(x) size(x,1), bbox_proposals_per_class, 'UniformOutput', true);
bbox_proposals = cell2mat(bbox_proposals_per_class(num_bbox_per_class>0));
bbox_scores = cell2mat(bbox_scores_per_class(num_bbox_per_class>0));
bbox_proposals = [bbox_proposals, class_indices];
else
class_indices = ones(size(all_bboxes_in{img_id},1),1,'single');
bbox_proposals = [all_bboxes_in{img_id}(:,1:4),class_indices];
bbox_scores = all_bboxes_in{img_id}(:,5:end);
end
end
function bbox_cand_dets = prepare_bbox_cand_dets(bbox_refined, bbox_scores, num_classes, is_per_class)
if is_per_class
class_indices = bbox_refined(:,5);
bbox_cand_dets_tmp = single([bbox_refined(:,1:4),bbox_scores]);
bbox_cand_dets = cell(num_classes,1);
for c = 1:num_classes
bbox_cand_dets{c} = bbox_cand_dets_tmp(class_indices==c,:);
if isempty(bbox_cand_dets{c}), bbox_cand_dets{c} = zeros(0,5,'single'); end
end
else
assert(size(bbox_refined,2)==5);
assert(all(bbox_refined(:,5) == 1));
bbox_cand_dets = single([bbox_refined(:,1:4), bbox_scores]);
end
end
function all_bboxes_out = prepare_this_img_output(all_bboxes_out, img_idx, is_per_class, bbox_this_img)
if is_per_class
for j = 1:length(bbox_this_img), all_bboxes_out{j}{img_idx} = bbox_this_img{j}; end
else
all_bboxes_out{img_idx} = bbox_this_img;
end
end
function [fist_img_idx, aboxes, abbox_scores, thresh] = load_progress(in_progress_filepath)
ld = load(in_progress_filepath);
fist_img_idx = ld.progress_state.img_idx + 1;
aboxes = ld.progress_state.aboxes;
abbox_scores = ld.progress_state.abbox_scores;
thresh = ld.progress_state.thresh;
end
function save_progress(aboxes, abbox_scores, thresh, img_idx, in_progress_filepath, in_progress_filepath_prev)
progress_state = struct;
progress_state.img_idx = img_idx;
progress_state.aboxes = aboxes;
progress_state.abbox_scores = abbox_scores;
progress_state.thresh = thresh;
if exist(in_progress_filepath, 'file')
% in case it crash during updating in_progress_filepath
copyfile(in_progress_filepath, in_progress_filepath_prev);
end
save(in_progress_filepath, 'progress_state', '-v7.3');
end
function mAP = check_progress_on_mAP(aboxes, all_bbox_gt, img_idx, classes)
mAP = 0;
if ~isempty(all_bbox_gt)
aboxes = cellfun(@(x) x(1:img_idx), aboxes, 'UniformOutput', false);
mAP_result = evaluate_average_precision_pascal( all_bbox_gt(1:img_idx), aboxes, classes );
printAPResults(classes, mAP_result);
mAP = mean([mAP_result(:).ap]');
end
end
function [boxes, thresh] = keep_top_k(boxes, end_at, top_k, thresh)
% ------------------------------------------------------------------------
% Keep top K
X = cat(1, boxes{1:end_at});
if isempty(X), return; end
scores = sort(X(:,end), 'descend');
thresh = scores(min(length(scores), top_k));
for image_index = 1:end_at
bbox = boxes{image_index};
keep = find(bbox(:,end) >= thresh);
boxes{image_index} = bbox(keep,:);
end
end | {
"alphanum_fraction": null,
"author": "gidariss",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/gidariss-LocNet/LocNet-a4678b87d9e63dcea07d9afd978d1223174d8be3/code/object_localization/localize_bboxes_of_all_imgs.m",
"reason": null,
"repo": "LocNet",
"save_path": "github-repos/MATLAB/gidariss-LocNet",
"sha": "a4678b87d9e63dcea07d9afd978d1223174d8be3",
"size": null
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: ./datprint <img>
Options:
-h --help Show this message
"""
from __future__ import print_function
from dat2bmp import read
if __name__ == '__main__':
import numpy as np
from docopt import docopt
args = docopt(__doc__)
img = np.array(read(args['<img>']))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
print('%d' % img[i, j], end="")
print()
| {
"alphanum_fraction": 0.5801687764,
"author": null,
"avg_line_length": 16.9285714286,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5310f498d7ee282ec72a2fb7d207f3c1e2bca4ed",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ebb91adbd33291e80e6c49fa140f87085fac9e5a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "fpeder/pnpmat",
"max_forks_repo_path": "src/script/datprint.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ebb91adbd33291e80e6c49fa140f87085fac9e5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "fpeder/pnpmat",
"max_issues_repo_path": "src/script/datprint.py",
"max_line_length": 43,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "ebb91adbd33291e80e6c49fa140f87085fac9e5a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "fpeder/pnpmat",
"max_stars_repo_path": "src/script/datprint.py",
"max_stars_repo_stars_event_max_datetime": "2019-05-28T09:40:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-26T16:03:06.000Z",
"num_tokens": 129,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 474
} |
# Introduction to Geometric Algebra
Below I will try to motivate the case for Geometric Algebra (GA) using rotation of vectors in various spaces and compare to the other most common approaches.
(Source: https://commons.wikimedia.org/wiki/File:Counterclockwise_rotation.png)
Prerequisites
- Rotation matrices: good to know and to understand the motivation but not absolutely necessary
- Complex numbers: used to introduce Geometric Algebra rotors
TODO List
- Add graphics
- Add practical examples for PGA
## Rotating 2D vectors
Given a 2D vector $v = \begin{bmatrix} x \\ y \end{bmatrix} = x e_x + y e_y$ how do we rotate the vector given a rotation angle $\phi$? Let's look at some different solutions to this problem.
### Using 2x2 Rotation Matrices
A common solution is to use rotation matrices. In the 2D case it will be a 2x2 matrix of the following form
\begin{equation}
R(\phi) =
\begin{bmatrix}
cos(\phi) & -sin(\phi)\\
sin(\phi) & cos(\phi)
\end{bmatrix}
\end{equation}
Multiplying the vector and the matrix together we arrive at
\begin{equation}
v' = R(\phi) v = (x cos(\phi) - y sin(\phi)) e_x + (x sin(\phi) + y cos(\phi)) e_y
\end{equation}
Rotations can also be composed by multiplying two or more matrices together to obtain a single matrix
\begin{equation}
v' = R_a R_b v = R v$
\end{equation}
The downside of this method is that we need to calculate the `2x2` matrix which contains four numbers which seems more than actually necessary. Also while rotation matrices
do exist for other dimensions it is usually not easy to come up with them. They are often either memorized or require a lookup.
### Using Complex Numbers
Another common tool for 2D rotation are the complex numbers where we introduce an imaginary unit $i$ that squares to $-1$. First note that if we multiply a complex number with the imaginary unit $i$ the complex number will get rotated by 90°. To verify this we multiply $1 + i$ by $i$ the result will be $(1 + i) i = -1 + i$.
TODO: Add image of 90° rotation
By multiplying by integer powers of $i$ we can rotate by integer multiples of 90°, for example multiplying by $i^3$ will rotate by 270°.
TODO: Add image of 90° 180° 270° 360° rotations
However in order to create arbitrary rotations phi, we need to form the exponential $e^{i \phi}$ and multiply by it.
The resulting complex number of the exponential in terms of real and imaginary parts is given by [Euler's formula](https://en.wikipedia.org/wiki/Euler%27s_formula)
\begin{equation}
e^{i \phi} = cos(\phi) + i sin(\phi)
\end{equation}
Now in order to achieve what we wanted, we need to represent our vector $v$ as a complex number $c = x + i y$ (ie. the x coordinate becomes the real part and the y coordinate becomes the imaginary part).
Then we can apply the formula from above to rotate by an arbitrary angle $\phi$:
\begin{equation}
c' = c e^{i \phi} = (x + i y) e^{i \phi} = (x + i y) (cos(\phi) + i sin(\phi)) = (x cos(\phi) - y sin(\phi)) + i (x sin(\phi) + y cos(\phi))
\end{equation}
TODO: Show image of complex plane general rotation
If we compare this to what we arrived at using rotation matrices the result is identical. Similarly we can also compose rotations by multiplying two or more complex numbers together
to arrive at a single complex number that does both rotations. We now also only need to store two numbers for a rotation. Nevertheless the approach still leaves some questions. The conversion between vectors and complex numbers seems arbitrary. Furthermore this approach only works in 2 dimensions and it's not clear how to generalize it yet.
### Using 2D Geometric Algebra
Usually introductions to GA begin by defining various rules and going over derivations before doing anything useful with them. I will also define some rules but try to get to the interesting stuff more quickly.
Like for the standard 2D vector algebra in geometric algebra we have two basis vectors $e_x, e_y$ using which arbitrary vectors $v = x e_x + y e_y$ can be formed. Multiplying two same basis vectors together will result in $+1$ if they are the same (ie. $e_x e_x = 1, e_y e_y = 1$) similarly to how the dot product in standard vector algebra works.
What is new is that we can also multiply two different basis vectors and the result will not be zero, but can't be simplified further $e_x e_y = e_{xy}$. $e_xy$ here is not something new, it just stands for the two basis vectors multiplied together as a shorthand way of writing. These elements made up of two basis vectors are called bivectors.
Importantly the order of the product matters. A rule is that when you swap the factors of a product of basis vectors you pick up a minus sign, for example. $e_{xy} = e_x e_y = -e_y e_x = -e_{yx}$.
This kind of product is called the Geometric Product and it is the central idea of Geometric Algebra. Let's now use these three rules and see what some results are when we use them
\begin{aligned}
e_x e_y e_x & = & \text{(rewrite as shorthand)} \\
e_{xyx} & = & \text{(Swap neighbours on the right, pick up minus sign)} \\
-e_{xxy} & = & \text{(Multiplying same basis vectors results in 1, e_xx = e_x e_x = 1)} \\
-e_y &
\end{aligned}
Hopefully this gives some idea of how the rules work and how to use them. Now for something more interesting, let's see what happens if we multiply of these together, that is, squaring it:
\begin{equation}
e_{xy}^2 = e_{xy} e_{xy} = e_{xyxy} = -e_{xyyx} = -e_{xx} = -1
\end{equation}
We can see the square of the bivector $e_{xy}$ is $-1$, the same as the square of the imaginary unit of the complex numbers.
Back to the rotation problem, to resolve the disconnect of vectors and complex numbers we make use of the things we just learnt. Vectors are represented as before as $v = x e_x + y e_y$. As mentioned before $e_{xy}$ can be identified as the imaginary unit $i$ hence we present complex numbers as $a + b e_{xy}$ and a rotation by an arbitrary angle $\phi$ can be performed in the same way as before using Euler's formula
\begin{equation}
R(\phi) = e^{\phi e_{xy}} = cos(\phi) + e_{xy} sin(\phi)
\end{equation}
The entity $R$ you get after exponentiating is called a rotor (because it rotates when you multiply by it, d'uh). With this we don't need to perform the arbitrary conversion between vectors and complex numbers, we can simply multiply the vector and the rotor together as is
\begin{equation}
v R(\phi) = (x e_x + y e_y) (cos(\phi) + e_{xy} sin(\phi)) = e_x (x cos(\phi) - y sin(\phi)) + e_y (x sin(\phi) + y cos(\phi))
\end{equation}
This result is the same as the rotation formula we got for the rotation matrix and the procedure is pretty much identical to what we did with complex numbers. We can also see that the imaginary unit is in fact a rotation in the XY plane as the bivector $e_{xy}$ is formed by multiplying the two basis vectors together, so in some sense it represents the XY plane.
So far it seems that at best we gained some clarity, which is good, but might not justify using it over the well known complex numbers and vectors yet. The big advantage of this approach as we will see in the next
sections is that this generalizes to any dimension and does not require any memorization besides the basic geometric algebra rules we learnt above.
## Rotating 3D vectors
Now we go up one dimension to three dimensional space. We want to rotate a 3D vector $v = x e_x + y e_y + e_z$ by the angles $\alpha, \beta, \gamma$
around the three axes (or planes) of rotation. As we will see things will get a bit more complicated in the usual approaches.
### Using 3x3 Rotation Matrices
As in the 2D case we can look up the 3D rotation matrix formula on Wikipedia or some other place
\begin{equation}
R(\alpha, \beta, \gamma) =
\begin{bmatrix}
cos(\alpha) cos(\beta) & cos(\alpha) sin(\beta) sin(\gamma) - sin(\alpha) cos(\gamma) & cos(\alpha) sin(\beta) cos(\gamma) + sin(\alpha) sin(\gamma) \\
sin(\alpha) cos(\beta) & sin(\alpha) sin(\beta) sin(\gamma) + cos(\alpha) cos(\gamma) & sin(\alpha) sin(\beta) cos(\gamma) - cos(\alpha) sin(\gamma) \\
-sin(\beta) & cos(\beta) sin(\gamma) & cos(\beta) cos(\gamma)
\end{bmatrix}
\end{equation}
Multiplying a 3D vector with it will give us the desired result $v' = R(\alpha, \beta, \gamma) v$. Some people might be able to memorize this formula but I think it is obvious to see that the rotation matrices for higher dimensions
will be even harder to come up with. Additionally the `3x3` matrix now contains nine numbers which is way more than the original three we started with.
### Using 3D Geometric Algebra
We now have three basis vector $e_x, e_y, e_z$ and as a result three independent bivectors $e_{xy}, e_{xz}, e_{yz}$.
Let's see what the bivectors square to using the rules from before:
\begin{aligned}
e_{xy}^2 = e_{xyxy} = -e_{xyyx} = -1 \\
e_{xz}^2 = e_{xzxz} = -e_{xzzx} = -1 \\
e_{yz}^2 = e_{yzyz} = -e_{yzzy} = -1
\end{aligned}
All three of our bivectors square to $-1$. Some readers might be familiar with quaternions which have the same property for their three imaginary units $i, j, k$ and are like the 3D versions of the 2D complex numbers. Previously we demonstrated that $e_{xy}$ can be used to perform a rotation in the XY plane, so one might already suspect what $e_{xz}$ and $e_{yz}$ can be used for.
One might expect that the rotor $R_{xz} = e^{\pi e_{xz}}$ would produce a rotation by 90° ($\pi$ in degrees) in the XZ plane when multiplying a 3D vector by it.
This is almost correct. It turns out that the 2D case was special and the general formula for applying rotors requires a two sided product, also called the sandwich product
\begin{equation}
v' = R v \widetilde{R}
\end{equation}
where $\widetilde{R}$ means reversing the order of all basis vectors appearing in it (eg. $\widetilde{\phi e_{xz}} = \phi e_{zx} = -\phi e_{xz})$.
Another result of this is that the angle in the exponent needs to be half of the desired angle as we are now multiplying by it twice. As stated before the 2D case was special and one could then verify that this sandwich product yields the same results as the one sided product in 2D.
So, to put this all together, a 3D rotation in the XZ plane can be generated using
\begin{equation}
R_{xz} = e^{\frac{\phi}{2} e_{xz}} = cos(\frac{\phi}{2}) + e_{xz} sin(\frac{\phi}{2})
\end{equation}
by sandwiching a 3D vector with it
\begin{equation}
v' = R_{xz} v \widetilde{R}_{xz} = (cos(\frac{\phi}{2}) + e_{xz} sin(\frac{\phi}{2})) v \widetilde{(cos(\frac{\phi}{2}) + e_{xz} sin(\frac{\phi}{2}))} = (cos(\frac{\phi}{2}) + e_{xz} sin(\frac{\phi}{2})) x (cos(\frac{\phi}{2}) - e_{xz} sin(\frac{\phi}{2}))
\end{equation}
Now to perform arbitrary 3D rotations in multiple planes we can just apply multiple of these rotors, for example a rotation in XY plane then the XZ plane then YZ plane
by angles $\alpha, \beta, \gamma$ respectively can be performed using three sandwich products in the following way:
\begin{equation}
v' = R_{yz}(\gamma) R_{xz}(\beta) R_{xy}(\alpha) v \widetilde{R}_{xy}(\alpha) \widetilde{R}_{xz}(\beta) \widetilde{R}_{yz}(\gamma)
\end{equation}
The three rotors $R_{xy}(\alpha), R_{xz}(\beta), R_{yz}(\gamma)$ can also be combined into a single rotor $R(\alpha, \beta, \gamma)$ by multiplying them together. The same rotation around three axes can then be achieved
using a single sandwich product
\begin{equation}
v' = R(\alpha, \beta, \gamma) v \widetilde{R}(\alpha, \beta, \gamma)
\end{equation}
As we have three rotors and each rotor contains a unique bivector and a scalar part it turns out the combined rotor will contain four numbers, much less than the nine of the `3x3` rotation matrix.
Hopefully it is now possible to see that the Geometric Algebra approach will generalize to any dimension. For example in 4D we would have six planes of rotation and thus six bivectors, and one can generate rotations in them using exactly the same approach with the exponential and sandwich product.
## Translating and rotating 2D vectors
Besides rotation another common geometric operation is translation. In the following sections we will see where Geometric Algebra becomes really useful and provides more than just being insightful, minimal and generalizing well.
We're going back to two dimensions to keep things less verbose.We're going back to two dimensions to keep things less verbose. The goal now is to translate a vector $v = x e_x + y e_y$ by an offset $T = t_x e_x + t_y e_y$ and also rotate is as before by an angle $\phi$ in the three axes.
### Using Standard Vectors and Addition
The most straightforward way of achieving translation is simply adding the translation vector to the original vector
\begin{equation}
v' = v + T = (x + t_x) e_x + (y + t_y) e_y
\end{equation}
Now we also want to rotate the vector to arrive at the result. We can use the 2x2 rotation matrix from before to do this
\begin{equation}
v'' = R(\phi) v'
\end{equation}
This works, but has several issues. For one, we are using two different operations to represent translations and rotations. Furthermore it also makes combining these transformations (combined rotation and translation) harder. We can't just compose multiple transforms into a single one as we could do for rotation (eg. by multiplying two rotation matrices together we could get a single rotation matrix).
### Using Standard Vectors with homogenous coordinates and 3x3 matrices
To fix the issue of translation and rotation being represented in different ways using different operations (vector addition for translation, matrix multiplication for rotation) we want a way to unify them. For this the traditional solution is to use homogenous coordinates. Here we simply pad the 2D vector using a constant $1$ to get a vector with three elements
\begin{equation}
v_h = \begin{bmatrix} v \\ 1 \end{bmatrix} = \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}
\end{equation}
Translation can then be done using `3x3` matrices as the factors in the third column will add a term proportional to the constant $1$
\begin{aligned}
x' = R_{11} x + R_{12} y + R_{13} 1 \\
y' = R_{21} x + R_{22} y + R_{23} 1
\end{aligned}
The last row of the `3x3` matrix is usually chosen so that the resulting vector will also have a $1$ in the fourth position (ie. $R_{31} = R_{32} = 0, R_{33} = 1$).
Unlike the vector addition solution, we can now compose the transformations by multiplying `3x3` matrices together to get a single `3x3` matrix that does the combined rotation and translation. However now we have even more numbers.
A `3x3` matrix will contain nine numbers, far more than the minimal amount of three (two for translation, one for rotation).
### Using 2D Projective Geometric Algebra
In addition to the previous two basis vectors $e_x, e_y$ we now add a third basis vector $e_0$. The interesting thing about this basis vector is that instead of squaring to $+1$ it will square to $0$. This might seem strange at first but this allows performing translations in the same way we performed rotations using the exponential function and rotors. Remember, when we started with rotations we multiplied by $i$ (or the bivector $e_{xy}$ in GA) which generated a rotation by 90° and later we introduced the exponential with Euler's formula to produce a rotation of an arbitrary angle. Here we will do the same for translation.
Another change now is that previously we used $e_x, e_y$ as the two basis vectors to represent our points. Instead, we now use the bivectors $e_{y0}, e_{0x}$ instead and we will see that this seemingly unnatural choice will help us achieve the translation we wanted. We also add a third bivector $e_xy$ to the point representation with coefficient $1$. A point with coordinates $x, y$ is now represented as
\begin{equation}
P = x e_{y0} + y e_{0x} + e_{xy}
\end{equation}
Now to do the equivalent of the 90° rotation for translation, let's notice that right-multiplying the point with $(1 + e_{0x})$ generates a translation of $1$ in the x direction (ie. increments the coefficient of $e_{0y}$ by $1$)
\begin{aligned}
& (x e_{y0} + y e_{0x} + e_{xy}) (1 + e_{0x}) & = \\
& (x e_{y0} + y e_{0x} + e_{xy}) + e_{xy0x} & = \\
& (x e_{y0} + y e_{0x} + e_{xy}) + e_{y0} & = \\
& (x + 1) e_{y0} + y e_{0x} + e_{xy}
\end{aligned}
So now we can translate by $1$ in both the x and the y direction (a translation in the y direction would be done by right-multiplying with $(1 + e_{y0})$). As with rotation, to generate a translation by an arbitrary amount we now
form the exponential to get a rotor $R = e^{\frac{t_x}{2} e_{0x} + \frac{t_y}{2} e_{y0}}$. It turns out here we can't use Euler's formula to get the result of the exponential as it only applies for values that square to $-1$ such as our bivector (or the imaginary unit $i$). Instead the solution is a much simpler formula
\begin{equation}
e^{\frac{t_x}{2} e_{0x} + \frac{t_y}{2} e_{y0}} = 1 + (\frac{t_x}{2} e_{0x} + \frac{t_y}{2} e_{y0})
\end{equation}
so we just add a scalar $1$. This is also why there was a one plus in our x-translation by $1$ earlier as $e^{e_{0x}} = 1 + e_{0x}$. Now by multiplying a rotor that does rotation and a rotor that does translation, we can have
more general rotors that do both rotation and translation. These are often also called motors. In general they are applied as before using the sandwich product. A 2D motor contains four numbers (a scalar, two translation bivectors $e_{y0}, e_{0x}$ and a rotation bivector $e_{xy}$), very close to the minimum of three required for 2D translation and rotation.
## Applications of (Projective) Geometric Algebra
### Interpolating motors
A cool thing we can now do that we couldn't easily do before is interpolating between two motors.
Given two $m_1$ and $m_2$ we can interpolate them using the logarithm. Taking the logarithm of a motor allows us to get the term that was in the exponent before we took the exponential. Then we can linearly interpolate the two exponents and afterwards take the exponential again which should give us a nice and smooth interpolation that blends both translation and rotation. Putting those thoughts into a formula we arrive at
\begin{equation}
interpolate(m_1, m_2, \alpha) = e^{\alpha log(m_1) + (1 - \alpha) log(m_2)}
\end{equation}
with interpolation factor $\alpha \in [0, 1]$. This requires us to compute the logarithm of a motor. Fortunately for the 2D case there is a simple formula
\begin{equation}
log(m) = \frac{m}{||m||} = \frac{m}{\sqrt{m \widetilde{m}}}
\end{equation}
While in other dimensions we have [a way of computing the exponential of (most interesting) GA multivectors](https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm) the logarithm unfortunately doesn't generalize as easily. Formulas for the most important cases are known though including the 3D case.
## Other important things that were not explicitly mentioned
- Basis vectors can square to $[-1, 0, +1]$
- Geometric product can be decomposed into an inner an exterior product $a b = a \cdot b + a \wedge b$
## Further study
- [bivector.net](https://bivector.net/): Contains GA tools and links to various resources
- [CoffeeShop](https://enkimute.github.io/ganja.js/examples/coffeeshop.html): Interactive PGA examples
- [Geometric Algebra talk at SIGGRAPH2019](https://youtu.be/tX4H_ctggYo): Great introduction to PGA many motivating examples
| {
"alphanum_fraction": 0.6490641145,
"author": null,
"avg_line_length": 60.7043701799,
"converted": true,
"ext": "ipynb",
"file": null,
"hexsha": "ad2563ce43b6a3792ba01c70e94489651063bd7d",
"include": null,
"lang": "Jupyter Notebook",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e2d62d63e1c1e1bb16454efec3d43aedc5bd4029",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RobinKa/typed-ga",
"max_forks_repo_path": "intro.ipynb",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e2d62d63e1c1e1bb16454efec3d43aedc5bd4029",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RobinKa/typed-ga",
"max_issues_repo_path": "intro.ipynb",
"max_line_length": 641,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "e2d62d63e1c1e1bb16454efec3d43aedc5bd4029",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RobinKa/typed-ga",
"max_stars_repo_path": "intro.ipynb",
"max_stars_repo_stars_event_max_datetime": "2020-07-08T13:46:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-08T13:31:46.000Z",
"num_tokens": 5245,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 23614
} |
import cv2 as cv
import sys
import math
import numpy as np
from time import time
import image_utils as imutils
import Condensation as cons
#OpenCV default trackers begin
class boostingTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerBoosting_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class milTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerMIL_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class kcfTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerKCF_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class tldTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerTLD_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class medianFlowTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerMedianFlow_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class goturnTracking:
def __init__(self, frame, bbox):
self.tracker = cv.TrackerGOTURN_create()
self.tracker.init(frame, bbox)
def track(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
#OpenCV default trackers end
#Meanshift tracking
class MeanShiftTracking:
def __init__(self, frame, bbox):
self.x,self.y,self.w,self.h = bbox
self.track_window = bbox
self.roi = imutils.getROI(frame,self.x,self.y,self.w,self.h)
self.hsv_roi = cv.cvtColor(self.roi, cv.COLOR_BGR2HSV)
self.mask = cv.inRange(self.hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
self.roi_hist = cv.calcHist([self.hsv_roi],[0],self.mask,[180],[0,180])
cv.normalize(self.roi_hist,self.roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
self.term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
def restart(self, frame, bbox):
self.x,self.y,self.w,self.h = bbox
self.track_window = bbox
self.roi = imutils.getROI(frame,self.x,self.y,self.w,self.h)
self.hsv_roi = cv.cvtColor(self.roi, cv.COLOR_BGR2HSV)
self.mask = cv.inRange(self.hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
self.roi_hist = cv.calcHist([self.hsv_roi],[0],self.mask,[180],[0,180])
cv.normalize(self.roi_hist,self.roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
self.term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
def track(self, frame):
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],self.roi_hist,[0,180],1)
# apply meanshift to get the new location
#print(self.track_window)
ret, self.track_window = cv.meanShift(dst, tuple(self.track_window), self.term_crit)
# Draw it on image
#x,y,w,h = track_window
return ret, self.track_window
# camshfit tracking
class CAMShiftTracking:
def __init__(self, frame, bbox):
self.x,self.y,self.w,self.h = bbox
self.track_window = bbox
self.roi = imutils.getROI(frame,self.x,self.y,self.w,self.h)
self.hsv_roi = cv.cvtColor(self.roi, cv.COLOR_BGR2HSV)
self.mask = cv.inRange(self.hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
self.roi_hist = cv.calcHist([self.hsv_roi],[0],self.mask,[180],[0,180])
cv.normalize(self.roi_hist,self.roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
self.term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
def restart(self,frame,bbox):
self.x,self.y,self.w,self.h = bbox
self.track_window = bbox
self.roi = imutils.getROI(frame,self.x,self.y,self.w,self.h)
self.hsv_roi = cv.cvtColor(self.roi, cv.COLOR_BGR2HSV)
self.mask = cv.inRange(self.hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
self.roi_hist = cv.calcHist([self.hsv_roi],[0],self.mask,[180],[0,180])
cv.normalize(self.roi_hist,self.roi_hist,0,255,cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
self.term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
def track(self, frame):
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv],[0],self.roi_hist,[0,180],1)
# apply meanshift to get the new location
#print(self.track_window)
ret, self.track_window = cv.CamShift(dst, tuple(self.track_window), self.term_crit)
#ret = ((x,y),(w,h),angle)
#print(ret)
return ret, self.track_window
# optical flow tracking using shi-tomasi corner features
class OpticalFlowTracking:
def __init__(self, frame, bbox):
self.x,self.y,self.w,self.h = bbox
# params for ShiTomasi corner detection
self.feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
self.lk_params = dict( winSize = (40,40),
maxLevel = 3,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
self.old_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self.mask = imutils.getMask(self.old_gray,self.x,self.y,self.w,self.h)
self.p0 = cv.goodFeaturesToTrack(self.old_gray, mask = self.mask, **self.feature_params)
def track(self, frame):
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
mask2 = np.zeros_like(frame)
p1, st, err = cv.calcOpticalFlowPyrLK(self.old_gray, frame_gray, self.p0, None, **self.lk_params)
# Select good points
good_new = p1[st==1]
good_old = self.p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
#mask2 = cv2.line(mask2, (a,b),(c,d), color[i].tolist(), 2)
#frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv.add(frame,mask2)
x,y,w,h = cv.boundingRect(p1)
mask = imutils.getMask(frame_gray,x,y,w,h)
self.p0 = cv.goodFeaturesToTrack(frame_gray, mask = mask, **self.feature_params)
track_window = x,y,w,h
self.old_gray = frame_gray.copy()
self.p0 = good_new.reshape(-1,1,2)
return err,track_window
#optical flow tracking using ORB features
class OpticalFlowTrackingORB:
def __init__(self, frame, bbox):
self.x,self.y,self.w,self.h = bbox
# Parameters for lucas kanade optical flow
self.lk_params = dict( winSize = (40,40),
maxLevel = 3,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
self.old_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self.roi = imutils.getROI(self.old_gray,self.x,self.y,self.w,self.h)
# Initiate STAR detector
self.orb = cv.ORB_create()
# find the keypoints with ORB
self.kp = self.orb.detect(self.roi,None)
# compute the descriptors with ORB
self.kp, self.des = self.orb.compute(self.roi, self.kp)
self.p0=[]
for keypoint in self.kp:
#print(keypoint.pt)
self.pty=keypoint.pt[0]+self.y
self.ptx=keypoint.pt[1]+self.x
self.p0.append([[self.ptx, self.pty]])
self.p0=np.asfarray(self.p0, dtype=np.float32)
def track(self, frame):
print(len(self.p0))
print("+++++++++")
if(len(self.p0)==0):
err = 1,1,1,1
return 1,err
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
mask2 = np.zeros_like(frame)
p1, st, err = cv.calcOpticalFlowPyrLK(self.old_gray, frame_gray, self.p0, None, **self.lk_params)
# Select good points
good_new = p1[st==1]
good_old = self.p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
img = cv.add(frame,mask2)
x,y,w,h = cv.boundingRect(p1)
print(x,y,w,h)
print("=============")
if (x>5 and y>5 and w>5 and h>5):
roi = imutils.getROI(frame,x,y,w,h)
# find the keypoints with ORB
kp = self.orb.detect(roi,None)
# compute the descriptors with ORB
self.kp, self.des = self.orb.compute(roi, kp)
self.p0=[]
for keypoint in self.kp:
pty=keypoint.pt[0]+y
ptx=keypoint.pt[1]+x
self.p0.append([[ptx, pty]])
self.p0=np.asfarray(self.p0, dtype=np.float32)
track_window = x,y,w,h
self.old_gray = frame_gray.copy()
self.p0 = good_new.reshape(-1,1,2)
return err,track_window
#this class uses only camshift as sensor for the kalman filter
class KalmanTrackingCshift:
def __init__(self, frame, bbox):
self.kalman = cv.KalmanFilter(4,2)
self.kalman.measurementMatrix = np.array([[1,0,0,0],
[0,1,0,0]],np.float32)#H
self.kalman.transitionMatrix = np.array([[1,0,1,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]],np.float32)#F
self.kalman.processNoiseCov = np.array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]],np.float32) * 0.03#P
self.measurement = np.array((2,1), np.float32)
self.prediction = np.zeros((2,1), np.float32)
self.camshift = CAMShiftTracking(frame,bbox)
self.prevx = 0
self.prevy = 0
def restart(self, frame, bbox):
self.camshift = CAMShiftTracking(frame,bbox)
def track(self, frame):
ret, track_window = self.camshift.track(frame)
#print("ret")
#print(ret)
# draw observation on image
x,y,w,h = track_window;
#frame = cv.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),2);
# extract centre of this observation as points
pts = cv.boxPoints(ret)
#print("Box Points:",pts)
pts = np.int0(pts)
#print(pts)
#frame = cv.circle(frame,(pts[0][0],pts[0][1]), 5, (0,0,255), -1)
#frame = cv.circle(frame,(pts[1][0],pts[1][1]), 5, (0,0,255), -1)
#frame = cv.circle(frame,(pts[2][0],pts[2][1]), 5, (0,0,255), -1)
#frame = cv.circle(frame,(pts[3][0],pts[3][1]), 5, (0,0,255), -1)
# (cx, cy), radius = cv2.minEnclosingCircle(pts)
# use to correct kalman filter
self.kalman.correct(imutils.center(pts));
# get new kalman filter prediction
prediction = self.kalman.predict();
#print("kf prediction:", prediction)
#print("predictions")
#print(prediction)
x = prediction[0]-(0.5*w)
y = prediction[1]-(0.5*h)
#print("prevx and prev y =", self.prevx, self.prevy)
#print("x and y=", x, y)
#print("velocities: velx=",(x-self.prevx)/1,"vely=",(y-self.prevy)/1)
w = prediction[0]+(0.5*w)-x
h = prediction[1]+(0.5*h)-y
self.prevx = x
self.prevy = y
prediction_box = x,y,w,h
#print(prediction_box)
#draw prediction in image
#frame = cv.rectangle(frame, (prediction[0]-(0.5*w),prediction[1]-(0.5*h)), (prediction[0]+(0.5*w),prediction[1]+(0.5*h)), (0,255,0),2);
ok = 1
return ok, prediction_box
#This class uses CAMshift and optical flow as sensors for kalman filter
class KalmanTrackingCshiftOflow:
def __init__(self, frame, bbox):
self.kalman = cv.KalmanFilter(4,2)
self.kalman.measurementMatrix = np.array([[1,0,0,0],
[0,1,0,0]],np.float32)
self.kalman.transitionMatrix = np.array([[1,0,1,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]],np.float32)
self.kalman.processNoiseCov = np.array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]],np.float32) * 0.03
self.measurement = np.array((2,1), np.float32)
self.prediction = np.zeros((2,1), np.float32)
self.camshift = CAMShiftTracking(frame,bbox)
self.opticalflow = OpticalFlowTracking(frame,bbox)
def track(self, frame):
ret, track_window = self.camshift.track(frame)
ret2, track_window2 = self.opticalflow.track(frame)
ret2 = ((track_window2[0],track_window2[1]),(track_window2[2],track_window2[3]),ret[2])
draw_frame = frame.copy()
#print("ret")
#print(ret)
# draw observation on image
#camshift observtion is green
x,y,w,h = track_window;
draw_frame = cv.rectangle(draw_frame, (x,y), (x+w,y+h), (0,255,0),2);
#opticalflow observtion is red
x,y,w,h = track_window2;
draw_frame = cv.rectangle(draw_frame, (x,y), (x+w,y+h), (0,0,255),2);
# extract centre of camshift observation as points
pts = cv.boxPoints(ret)
pts = np.int0(pts)
# use to correct kalman filter
self.kalman.correct(imutils.center(pts));
# extract centre of optical flow observation as points
pts = cv.boxPoints(ret2)
pts = np.int0(pts)
# use to correct kalman filter
self.kalman.correct(imutils.center(pts));
# get new kalman filter prediction
prediction = self.kalman.predict();
#print("predictions")
#print(prediction)
x = prediction[0]-(0.5*w)
y = prediction[1]-(0.5*h)
w = prediction[0]+(0.5*w)-x
h = prediction[1]+(0.5*h)-y
prediction_box = x,y,w,h
#print(prediction_box)
#draw prediction in image
#frame = cv.rectangle(frame, (prediction[0]-(0.5*w),prediction[1]-(0.5*h)), (prediction[0]+(0.5*w),prediction[1]+(0.5*h)), (0,255,0),2);
return ret, prediction_box
class ParticleFilterTrackingCshift:
def __init__(self, w, h, nparticles, frame, bbox):
self.calculated = []
self.initialise_defaults(w,h, nparticles)
self.camshift = CAMShiftTracking(frame,bbox)
def initialise_defaults(self, w, h, nparticles):
""" This routine fills in the data structures with default constant
values. It could be enhanced by reading informatino from the
command line to allow e.g. """
dim = 2
nParticles = nparticles
self.xRange = w
self.yRange = h
LB = [0.0, 0.0]
UB = [self.xRange, self.yRange]
self.model = cons.Condensation(dim, dim, nParticles)
self.model.cvConDensInitSampleSet(LB, UB)
self.model.DynamMatr = [[1.0, 0.0],
[0.0, 1.0]]
def restart(self, frame, bbox):
self.camshift = CAMShiftTracking(frame,bbox)
def track(self, frame, draw=True):
ret, track_window = self.camshift.track(frame)
x,y,w,h = track_window;
#pts = imutils.center(track_window)
center_x = int(x + w/2.0)
center_y = int(y + h/2.0)
pts = np.array([center_x,center_y])
for z in range(self.model.SamplesNum):
#Calculate the confidence based on the observations
diffX = (pts[0] - self.model.flSamples[z][0])/self.xRange
diffY = (pts[1] - self.model.flSamples[z][1])/self.yRange
self.model.flConfidence[z] = 1.0/(np.sqrt(np.power(diffX,2) + \
np.power(diffY,2)))
# Updates
self.model.cvConDensUpdateByTime()
meanPos = self.update_after_iterating(frame,x,y,draw)
x2 = int(meanPos[0] - w/2)
y2 = int(meanPos[1] - h/2)
tracker_box = x2, y2, w, h
return 1, tracker_box
def drawCross(self, img, center, color, d):
#On error change cv2.CV_AA for cv2.LINE_AA
# (for differents versions of OpenCV)
cv.line(img, (center[0] - d, center[1] - d), \
(center[0] + d, center[1] + d), color, 2, cv.LINE_AA, 0)
cv.line(img, (center[0] + d, center[1] - d), \
(center[0]- d, center[1] + d), color, 2, cv.LINE_AA, 0)
def update_after_iterating(self, img, x, y, draw=True):
mean = self.model.State
meanInt = [int(s) for s in mean]
for j in range(len(self.model.flSamples)):
posNew = [int(s) for s in self.model.flSamples[j]]
if draw == True:
for j in range(len(self.model.flSamples)):
posNew = [int(s) for s in self.model.flSamples[j]]
self.drawCross(img, posNew, (255, 255, 0), 2)
else:
for j in range(len(self.model.flSamples)):
posNew = [int(s) for s in self.model.flSamples[j]]
self.calculated.append(meanInt)
for z in range(len(self.calculated)-1):
p1 = (self.calculated[z][0], self.calculated[z][1])
p2 = (self.calculated[z+1][0], self.calculated[z+1][1])
#cv2.line(img, p1, p2, (255,255,255), 1)
#print ("Mean: ", (meanInt[0], meanInt[1]))
#print ("Real: ", (x, y))
#print ('+++++++++++++++')
if draw == True:
self.drawCross(img, meanInt, (255, 0, 255), 2)
return meanInt
#global method to instatiate the object of a selected class
def selectTracker(tracker_type, frame, bbox, frame_w, frame_h):
nparticles = 100
if tracker_type == 'boosting':
tracker = boostingTracking(frame,bbox)
elif tracker_type == 'mil':
tracker = milTracking(frame,bbox)
elif tracker_type == 'kcf':
tracker = kcfTracking(frame,bbox)
elif tracker_type == 'tld':
tracker = tldTracking(frame,bbox)
elif tracker_type == 'medianFlow':
tracker = medianFlowTracking(frame,bbox)
elif tracker_type == 'goturn':
tracker = goturnTracking(frame,bbox)
elif tracker_type == 'meanshift':
tracker = MeanShiftTracking(frame,bbox)
elif tracker_type == 'camshift':
tracker = CAMShiftTracking(frame,bbox)
elif tracker_type == 'optical_flow':
tracker = OpticalFlowTracking(frame,bbox)
elif tracker_type == 'optical_flow_orb':
tracker = OpticalFlowTrackingORB(frame,bbox)
elif tracker_type == 'kalman_camshift':
tracker = KalmanTrackingCshift(frame,bbox)
elif tracker_type == 'kalman_camshift_opticalflow':
tracker = KalmanTrackingCshiftOflow(frame,bbox)
elif tracker_type == 'particle_filter_camshift':
tracker = ParticleFilterTrackingCshift(frame_w, frame_h, nparticles, frame, bbox)
return tracker
| {
"alphanum_fraction": 0.5790378876,
"author": null,
"avg_line_length": 36.6092592593,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0fd507ae5dd03912901e58a412480c2741f96a10",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f1f6f098a98f168b586e4bf2241e23f5c306786d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gabrielpeixoto-cvai/handarch",
"max_forks_repo_path": "handarch/hand_tracking.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f1f6f098a98f168b586e4bf2241e23f5c306786d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gabrielpeixoto-cvai/handarch",
"max_issues_repo_path": "handarch/hand_tracking.py",
"max_line_length": 144,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f1f6f098a98f168b586e4bf2241e23f5c306786d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gabrielpeixoto-cvai/handarch",
"max_stars_repo_path": "handarch/hand_tracking.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5383,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 19769
} |
"""Module contains Methods used for figuring out which/how many factors to retain
"""
import numpy as np
from fa_kit.broken_stick import BrokenStick
def retain_top_n(vals, num_keep):
"""Retain the top N largest components"""
if num_keep < 1:
raise ValueError(
"Must select num_keep >= 1 when using 'top_n' retention "
"criterion. Currently, num_keep = {}".format(num_keep))
absmag_order = np.argsort(-np.abs(vals))
retain_idx = absmag_order[:num_keep]
return retain_idx
def retain_top_pct(vals, pct_keep):
"""
Retain as many components as you need to capture `pct_keep` proportion
of the overall value
"""
if pct_keep > 1 or pct_keep <= 0:
raise ValueError(
"Must set pct_keep between 0 and 1 be when using "
"'retain_top_pct' retention criterion. "
"Currently, pct_keep = {}".format(pct_keep))
absmag_order = np.argsort(-np.abs(vals))
cum_pct = 0.0
retain_idx = []
for idx in absmag_order:
if cum_pct < pct_keep:
retain_idx.append(idx)
cum_pct += np.abs(vals[idx])
else:
break
return retain_idx
def retain_kaiser(vals, data_dim):
"""
Use Kaiser's criterion for retention.
Normally, this is 'keep anything with more than (1/dim)% of total variance'
but we don't always know how many dimensions there are because eigenvalues
of 0 get cropped out. So we have you enter the dimensionality yourself.
"""
if data_dim is None or data_dim < len(vals):
raise ValueError(
"data_dim is missing or improperly specified "
"for Kaiser criterion. Current value {}".format(data_dim)
)
cutoff_value = 1.0 / data_dim
retain_idx = [
key
for key, val in enumerate(vals)
if np.abs(val) > cutoff_value
]
return retain_idx
def retain_broken_stick(vals, broken_stick):
"""
Figure out how many components to keep by aligning
the dsitribution with a broken stick distribution
and seeing where your values are larger than expected
"""
vals = np.array(sorted(vals)[::-1])
retain_idx = broken_stick.find_where_target_exceeds(vals)
return retain_idx
| {
"alphanum_fraction": 0.6446099912,
"author": null,
"avg_line_length": 26.2298850575,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6e6b7756b05a1e30c86e196d5881963d5c42d1c6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2021-11-20T11:43:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-14T00:52:13.000Z",
"max_forks_repo_head_hexsha": "148700747720211007307434eb398c1d4ce60990",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bmcmenamin/fa_k",
"max_forks_repo_path": "fa_kit/retention.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "148700747720211007307434eb398c1d4ce60990",
"max_issues_repo_issues_event_max_datetime": "2018-05-14T01:39:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-20T23:24:40.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bmcmenamin/fa_k",
"max_issues_repo_path": "fa_kit/retention.py",
"max_line_length": 81,
"max_stars_count": 22,
"max_stars_repo_head_hexsha": "148700747720211007307434eb398c1d4ce60990",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bmcmenamin/fa_k",
"max_stars_repo_path": "fa_kit/retention.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-20T11:52:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-09-13T23:53:46.000Z",
"num_tokens": 535,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2282
} |
# This file is part of the OLD Sage notebook and is NOT actively developed,
# maintained, or supported. As of Sage v4.1.2, all notebook development has
# moved to the separate Sage Notebook project:
#
# http://nb.sagemath.org/
#
# The new notebook is installed in Sage as an spkg (e.g., sagenb-0.3.spkg).
#
# Please visit the project's home page for more information, including directions on
# obtaining the latest source code. For notebook-related development and support,
# please consult the sage-notebook discussion group:
#
# http://groups.google.com/group/sage-notebook
"""
A Cell.
A cell is a single input/output block. Worksheets are built out of
a list of cells.
"""
###########################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
# Maximum number of characters allowed in output. This is
# needed avoid overloading web browser. For example, it
# should be possible to gracefully survive:
# while True:
# print "hello world"
# On the other hand, we don't want to loose the output of big matrices
# and numbers, so don't make this too small.
MAX_OUTPUT = 32000
MAX_OUTPUT_LINES = 120
TRACEBACK = 'Traceback (most recent call last):'
import re
# This regexp matches "cell://blah..." in a non-greedy way (the ?), so
# we don't get several of these combined in one.
re_cell = re.compile('"cell://.*?"')
re_cell_2 = re.compile("'cell://.*?'") # same, but with single quotes
import os, shutil
from sage.misc.misc import word_wrap, SAGE_DOC
from sage.misc.html import math_parse
from sage.misc.preparser import strip_string_literals
from sage.misc.package import is_package_installed
from cgi import escape
if is_package_installed("tinyMCE"):
JEDITABLE_TINYMCE = True
else:
JEDITABLE_TINYMCE = False
# Introspection. The cache directory is a module-scope variable set
# in the first call to Cell.set_introspect_html().
Sphinx = None
_SAGE_INTROSPECT = None
class Cell_generic:
def is_interactive_cell(self):
"""
Returns True if this cell contains the use of interact either as a
function call or a decorator.
EXAMPLES::
sage: from sage.server.notebook.cell import Cell_generic
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: Cell_generic.is_interactive_cell(C)
False
"""
return False
def delete_output(self):
"""
Delete all output in this cell. This is not executed - it is an
abstract function that must be overwritten in a derived class.
EXAMPLES: This function just raises a NotImplementedError, since it
most be defined in derived class.
::
sage: C = sage.server.notebook.cell.Cell_generic()
sage: C.delete_output()
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
class TextCell(Cell_generic):
def __init__(self, id, text, worksheet):
"""
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C == loads(dumps(C))
True
"""
self.__id = int(id)
self.__text = text
self.__worksheet = worksheet
def __repr__(self):
"""
String representation of this text cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.__repr__()
'TextCell 0: 2+3'
"""
return "TextCell %s: %s"%(self.__id, self.__text)
def delete_output(self):
"""
Delete all output in this cell. This does nothing since text cells
have no output.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.delete_output()
sage: C
TextCell 0: 2+3
"""
pass # nothing to do -- text cells have no output
def set_input_text(self, input_text):
"""
Sets the input text of self to be input_text.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.set_input_text("3+2")
sage: C
TextCell 0: 3+2
"""
self.__text = input_text
def set_worksheet(self, worksheet, id=None):
"""
Sets the worksheet object of self to be worksheet and optionally
changes the id of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: W = "worksheet object"
sage: C.set_worksheet(W)
sage: C.worksheet()
'worksheet object'
sage: C.set_worksheet(None, id=2)
sage: C.id()
2
"""
self.__worksheet = worksheet
if id is not None:
self.__id = id
def worksheet(self):
"""
Returns the worksheet object associated to self.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', 'worksheet object')
sage: C.worksheet()
'worksheet object'
"""
return self.__worksheet
def html(self, ncols=0, do_print=False, do_math_parse=True, editing=False):
"""
Returns an HTML version of self as a string.
INPUT:
- ``do_math_parse`` - bool (default: True)
If True, call math_parse (defined in cell.py)
on the html.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.html()
'<div class="text_cell" id="cell_text_0">2+3...'
sage: C.set_input_text("$2+3$")
sage: C.html(do_math_parse=True)
'<div class="text_cell" id="cell_text_0"><span class="math">2+3</span>...'
"""
s = """<div class="text_cell" id="cell_text_%s">%s</div>"""%(self.__id,self.html_inner(ncols=ncols, do_print=do_print, do_math_parse=do_math_parse, editing=editing))
if JEDITABLE_TINYMCE and hasattr(self.worksheet(),'is_published') and not self.worksheet().is_published() and not self.worksheet().docbrowser():
s += """<script>$("#cell_text_%s").unbind('dblclick').editable(function(value,settings) {
evaluate_text_cell_input(%s,value,settings);
return(value);
}, {
tooltip : "",
placeholder : "",
// type : 'textarea',
type : 'mce',
onblur : 'ignore',
select : false,
submit : 'Save changes',
cancel : 'Cancel changes',
event : "dblclick",
style : "inherit",
data : %r
});
</script>"""%(self.__id,self.__id,((self.__text).decode('utf-8')).encode('ascii', 'xmlcharrefreplace'))
if editing:
s += """<script>$("#cell_text_%s").trigger('dblclick');</script>"""%self.__id
return s
def html_inner(self,ncols=0, do_print=False, do_math_parse=True, editing=False):
"""
Returns an HTML version of the content of self as a string.
INPUT:
- ``do_math_parse`` - bool (default: True)
If True, call math_parse (defined in cell.py)
on the html.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.html_inner()
'2+3...'
sage: C.set_input_text("$2+3$")
sage: C.html_inner(do_math_parse=True)
'<span class="math">2+3</span>...'
"""
t = self.__text
if do_math_parse:
# Do dollar sign math parsing
t = math_parse(t)
s = """%s"""%t
return s
def plain_text(self, prompts=False):
"""
Returns a plain text version of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.plain_text()
'2+3'
"""
return self.__text
def edit_text(self):
"""
Returns the text to be displayed in the Edit window.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.edit_text()
'2+3'
"""
return self.__text
def id(self):
"""
Returns self's ID.
OUTPUT:
- int -- self's ID.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.id()
0
"""
return self.__id
def is_auto_cell(self):
"""
Returns True if self is automatically evaluated.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.is_auto_cell()
False
"""
return False
def __cmp__(self, right):
"""
Compares cells by ID.
EXAMPLES::
sage: C1 = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C2 = sage.server.notebook.cell.TextCell(0, '3+2', None)
sage: C3 = sage.server.notebook.cell.TextCell(1, '2+3', None)
sage: C1 == C1
True
sage: C1 == C2
True
sage: C1 == C3
False
"""
return cmp(self.id(), right.id())
def set_cell_output_type(self, typ='wrap'):
"""
This does nothing for TextCells.
EXAMPLES::
sage: C = sage.server.notebook.cell.TextCell(0, '2+3', None)
sage: C.set_cell_output_type("wrap")
"""
pass # ignored
class Cell(Cell_generic):
def __init__(self, id, input, out, worksheet):
"""
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C == loads(dumps(C))
True
"""
self.__id = int(id)
self.__out = str(out).replace('\r','')
self.__worksheet = worksheet
self.__interrupted = False
self.__completions = False
self.has_new_output = False
self.__no_output_cell = False
self.__asap = False
self.__version = -1
self.set_input_text(str(input).replace('\r',''))
def set_asap(self, asap):
"""
Set whether this cell is evaluated as soon as possible.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
self.__asap = bool(asap)
def is_asap(self):
"""
Return True if this is an asap cell, i.e., evaluation of it is done
as soon as possible.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
try:
return self.__asap
except AttributeError:
self.__asap = False
return self.__asap
def delete_output(self):
"""
Delete all output in this cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0; in=2+3, out=5
sage: C.delete_output()
sage: C
Cell 0; in=2+3, out=
"""
self.__out = ''
self.__out_html = ''
self.__evaluated = False
def evaluated(self):
r"""
Return True if this cell has been successfully evaluated in a
currently running session.
This is not about whether the output of the cell is valid given the
input.
OUTPUT:
- ``bool`` - whether or not this cell has been
evaluated in this session
EXAMPLES: We create a worksheet with a cell that has wrong output::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('Sage\n{{{\n2+3\n///\n20\n}}}')
sage: C = W.cell_list()[0]
sage: C
Cell 0; in=2+3, out=
20
We re-evaluate that input cell::
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 0; in=2+3, out=
5
)
Now the output is right::
sage: C
Cell 0; in=2+3, out=
5
And the cell is considered to have been evaluated.
::
sage: C.evaluated()
True
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
# Cells are never considered evaluated in a new session.
if not self.worksheet().compute_process_has_been_started():
self.__evaluated = False
return False
# Figure out if the worksheet is using the same sage
# session as this cell. (I'm not sure when this would
# be False.)
same_session = self.worksheet().sage() is self.sage()
try:
# Always not evaluated if sessions are different.
if not same_session:
self.__evaluated = False
return False
return self.__evaluated
except AttributeError:
# Default assumption is that cell has not been evaluated.
self.__evaluated = False
return False
def set_no_output(self, no_output):
"""
Sets whether or not this is an no_output cell, i.e., a cell for
which we don't care at all about the output.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
self.__no_output = bool(no_output)
def is_no_output(self):
"""
Return True if this is an no_output cell, i.e., a cell for which
we don't care at all about the output.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
try:
return self.__no_output
except AttributeError:
self.__no_output = False
return self.__no_output
def set_cell_output_type(self, typ='wrap'):
"""
Sets the cell output type.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
self.__type = typ
def cell_output_type(self):
"""
Returns the cell output type.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
try:
return self.__type
except AttributeError:
self.__type = 'wrap'
return self.__type
def set_worksheet(self, worksheet, id=None):
"""
Sets the worksheet object of self to be worksheet and optionally
changes the id of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: W = "worksheet object"
sage: C.set_worksheet(W)
sage: C.worksheet()
'worksheet object'
sage: C.set_worksheet(None, id=2)
sage: C.id()
2
"""
self.__worksheet = worksheet
if id is not None:
self.set_id(id)
def worksheet(self):
"""
Returns the worksheet object associated to self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', 'worksheet object')
sage: C.worksheet()
'worksheet object'
"""
return self.__worksheet
def update_html_output(self, output=''):
"""
Update the list of files with html-style links or embeddings for
this cell.
For interactive cells the html output section is always empty,
mainly because there is no good way to distinguish content (e.g.,
images in the current directory) that goes into the interactive
template and content that would go here.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 0; in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.update_html_output()
sage: C.output_html()
'<img src="/home/sage/0/cells/0/sage0.png?...">'
"""
if self.is_interactive_cell():
self.__out_html = ""
else:
self.__out_html = self.files_html(output)
def id(self):
"""
Returns the id of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.id()
0
"""
return self.__id
def set_id(self, id):
"""
Sets the id of self to id.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_id(2)
sage: C.id()
2
"""
self.__id = int(id)
def worksheet(self):
"""
Returns the workseet associated to self.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet() is W
True
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self.__worksheet
def worksheet_filename(self):
"""
Returns the filename of the worksheet associated to self.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet_filename()
'sage/0'
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self.__worksheet.filename()
def notebook(self):
"""
Returns the notebook object associated to self.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.notebook() is nb
True
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self.__worksheet.notebook()
def directory(self):
"""
Returns the directory associated to self. If the directory doesn't
already exist, then this method creates it.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.directory()
'.../worksheets/sage/0/cells/0'
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
dir = self._directory_name()
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def _directory_name(self):
"""
Returns a string of the directory associated to self.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C._directory_name()
'.../worksheets/sage/0/cells/0'
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return '%s/cells/%s'%(self.__worksheet.directory(), self.id())
def __cmp__(self, right):
"""
Compares cells by their IDs.
EXAMPLES::
sage: C1 = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C2 = sage.server.notebook.cell.Cell(0, '3+2', '5', None)
sage: C3 = sage.server.notebook.cell.Cell(1, '2+3', '5', None)
sage: C1 == C1
True
sage: C1 == C2
True
sage: C1 == C3
False
"""
return cmp(self.id(), right.id())
def __repr__(self):
"""
Returns a string representation of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0; in=2+3, out=5
"""
return 'Cell %s; in=%s, out=%s'%(self.__id, self.__in, self.__out)
def word_wrap_cols(self):
"""
Returns the number of columns for word wrapping. This defaults to
70, but the default setting for a notebook is 72.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.word_wrap_cols()
70
::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.word_wrap_cols()
72
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
try:
return self.notebook().conf()['word_wrap_cols']
except AttributeError:
return 70
def plain_text(self, ncols=0, prompts=True, max_out=None):
r"""
Returns the plain text version of self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
"""
if ncols == 0:
ncols = self.word_wrap_cols()
s = ''
input_lines = self.__in
pr = 'sage: '
if prompts:
input_lines = input_lines.splitlines()
has_prompt = False
if pr == 'sage: ':
for v in input_lines:
w = v.lstrip()
if w[:5] == 'sage:' or w[:3] == '>>>' or w[:3] == '...':
has_prompt = True
break
else:
# discard first line since it sets the prompt
input_lines = input_lines[1:]
if has_prompt:
s += '\n'.join(input_lines) + '\n'
else:
in_loop = False
for v in input_lines:
if len(v) == 0:
pass
elif len(v.lstrip()) != len(v): # starts with white space
in_loop = True
s += '... ' + v + '\n'
elif v[:5] == 'else:':
in_loop = True
s += '... ' + v + '\n'
else:
if in_loop:
s += '...\n'
in_loop = False
s += pr + v + '\n'
else:
s += self.__in
if prompts:
msg = TRACEBACK
if self.__out.strip().startswith(msg):
v = self.__out.strip().splitlines()
w = [msg, '...']
for i in range(1,len(v)):
if not (len(v[i]) > 0 and v[i][0] == ' '):
w = w + v[i:]
break
out = '\n'.join(w)
else:
out = self.output_text(ncols, raw=True, html=False)
else:
out = self.output_text(ncols, raw=True, html=False, allow_interact=False)
out = '///\n' + out
if not max_out is None and len(out) > max_out:
out = out[:max_out] + '...'
# Get rid of spurious carriage returns
s = s.strip('\n')
out = out.strip('\n').strip('\r').strip('\r\n')
s = s + '\n' + out
if not prompts:
s = s.rstrip('\n')
return s
def edit_text(self, ncols=0, prompts=False, max_out=None):
r"""
Returns the text displayed in the Edit window.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.edit_text()
'{{{id=0|\n2+3\n///\n5\n}}}'
"""
s = self.plain_text(ncols, prompts, max_out)
return '{{{id=%s|\n%s\n}}}'%(self.id(), s)
def is_last(self):
"""
Returns True if self is the last cell in the worksheet.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2"); C
Cell 1; in=2^2, out=
sage: C.is_last()
True
sage: C = W.get_cell_with_id(0)
sage: C.is_last()
False
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self.__worksheet.cell_list()[-1] == self
def next_id(self):
"""
Returns the id of the next cell in the worksheet associated to
self. If self is not in the worksheet or self is the last cell in
the cell_list, then the id of the first cell is returned.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C = W.get_cell_with_id(0)
sage: C.next_id()
1
sage: C = W.get_cell_with_id(1)
sage: C.next_id()
0
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
L = self.__worksheet.cell_list()
try:
k = L.index(self)
except ValueError:
print "Warning -- cell %s no longer exists"%self.id()
return L[0].id()
try:
return L[k+1].id()
except IndexError:
return L[0].id()
def interrupt(self):
"""
Record that the calculation running in this cell was interrupted.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
sage: C.evaluated()
False
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
self.__interrupted = True
self.__evaluated = False
def interrupted(self):
"""
Returns True if the evaluation of this cell has been interrupted.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self.__interrupted
def computing(self):
"""
Returns True if self is in its worksheet's queue.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.computing()
False
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
return self in self.__worksheet.queue()
def is_interactive_cell(self):
r"""
Return True if this cell contains the use of interact either as a
function call or a decorator.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interactive_cell()
True
sage: C = W.new_cell_after(C.id(), "2+2")
sage: C.is_interactive_cell()
False
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
# Do *not* cache
s = strip_string_literals(self.input_text())[0]
return bool(re.search('(?<!\w)interact\s*\(.*\).*', s) or re.search('\s*@\s*interact\s*\n', s))
def is_interacting(self):
r"""
Returns True if this cell is currently interacting with the user.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interacting()
False
"""
return hasattr(self, 'interact')
def stop_interacting(self):
"""
Stops interaction with user.
TODO: Add doctests for :meth:`stop_interacting`.
"""
if self.is_interacting():
del self.interact
def set_input_text(self, input):
"""
Sets the input text of self to be the string input.
TODO: Add doctests for the code dealing with interact.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 1; in=2^2, out=
4
)
sage: C.version()
0
::
sage: C.set_input_text('3+3')
sage: C.input_text()
'3+3'
sage: C.evaluated()
False
sage: C.version()
1
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
# Stuff to deal with interact
if input.startswith('%__sage_interact__'):
self.interact = input[len('%__sage_interact__')+1:]
self.__version = self.version() + 1
return
elif self.is_interacting():
try:
del self.interact
del self._interact_output
except AttributeError:
pass
# We have updated the input text so the cell can't have
# been evaluated.
self.__evaluated = False
self.__version = self.version() + 1
self.__in = input
if hasattr(self, '_html_cache'):
del self._html_cache
#Run get the input text with all of the percent
#directives parsed
self._cleaned_input = self.parse_percent_directives()
def input_text(self):
"""
Returns self's input text.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.input_text()
'2+3'
"""
return self.__in
def cleaned_input_text(self):
r"""
Returns the input text with all of the percent directives
removed. If the cell is interacting, then the interacting
text is returned.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.cleaned_input_text()
'2+3'
"""
if self.is_interacting():
return self.interact
else:
return self._cleaned_input
def parse_percent_directives(self):
r"""
Returns a string which consists of the input text of this cell
with the percent directives at the top removed. As it's doing
this, it computes a list of all the directives and which
system (if any) the cell should be run under.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.parse_percent_directives()
'2+3'
sage: C.percent_directives()
['hide', 'maxima']
"""
self._system = None
text = self.input_text().split('\n')
directives = []
for i, line in enumerate(text):
if not line.startswith('%'):
#Handle the #auto case here for now
if line == "#auto":
pass
else:
break
elif line in ['%auto', '%hide', '%hideall', '%save_server', "%time", "%timeit"]:
#We do not consider any of the above percent
#directives as specifying a system.
pass
else:
self._system = line[1:]
directives.append(line[1:])
self._percent_directives = directives
return "\n".join(text[i:]).strip()
def percent_directives(self):
r"""
Returns a list of all the percent directives that appear
in this cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.percent_directives()
['hide', 'maxima']
"""
return self._percent_directives
def system(self):
r"""
Returns the system used to evaluate this cell. The system
is specified by a percent directive like '%maxima' at
the top of a cell.
If no system is explicitly specified, then None is returned
which tells the notebook to evaluate the cell using the
worksheet's default system.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '%maxima\n2+3', '5', None)
sage: C.system()
'maxima'
sage: prefixes = ['%hide', '%time', '']
sage: cells = [sage.server.notebook.cell.Cell(0, '%s\n2+3'%prefix, '5', None) for prefix in prefixes]
sage: [(C, C.system()) for C in cells if C.system() is not None]
[]
"""
return self._system
def is_auto_cell(self):
r"""
Returns True if self is an auto cell.
An auto cell is a cell that is automatically evaluated when the
worksheet starts up.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_auto_cell()
False
sage: C = sage.server.notebook.cell.Cell(0, '#auto\n2+3', '5', None)
sage: C.is_auto_cell()
True
"""
return 'auto' in self.percent_directives()
def changed_input_text(self):
"""
Returns the changed input text for the cell. If there was any
changed input text, then it is reset to " before this method
returns.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.changed_input_text()
''
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
'3+3'
sage: C.changed_input_text()
'3+3'
sage: C.changed_input_text()
''
sage: C.version()
0
"""
try:
t = self.__changed_input
del self.__changed_input
return t
except AttributeError:
return ''
def set_changed_input_text(self, new_text):
"""
Note that this does not update the version of the cell. This is
typically used for things like tab completion.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
'3+3'
sage: C.changed_input_text()
'3+3'
"""
self.__changed_input = new_text
self.__in = new_text
def set_output_text(self, output, html, sage=None):
r"""
Sets the output text for self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
sage: C.set_output_text('10', '10')
sage: len(C.plain_text())
12
"""
if output.count('<?__SAGE__TEXT>') > 1:
html = '<h3><font color="red">WARNING: multiple @interacts in one cell disabled (not yet implemented).</font></h3>'
output = ''
# In interacting mode, we just save the computed output
# (do not overwrite).
if self.is_interacting():
self._interact_output = (output, html)
return
if hasattr(self, '_html_cache'):
del self._html_cache
output = output.replace('\r','')
# We do not truncate if "notruncate" or "Output truncated!" already
# appears in the output. This notruncate tag is used right now
# in sage.server.support.help.
if 'notruncate' not in output and 'Output truncated!' not in output and \
(len(output) > MAX_OUTPUT or output.count('\n') > MAX_OUTPUT_LINES):
url = ""
if not self.computing():
file = "%s/full_output.txt"%self.directory()
open(file,"w").write(output)
url = "<a target='_new' href='%s/full_output.txt' class='file_link'>full_output.txt</a>"%(
self.url_to_self())
html+="<br>" + url
lines = output.splitlines()
start = '\n'.join(lines[:MAX_OUTPUT_LINES/2])[:MAX_OUTPUT/2]
end = '\n'.join(lines[-MAX_OUTPUT_LINES/2:])[-MAX_OUTPUT/2:]
warning = 'WARNING: Output truncated! '
if url:
# make the link to the full output appear at the top too.
warning += '\n<html>%s</html>\n'%url
output = warning + '\n\n' + start + '\n\n...\n\n' + end
self.__out = output
if not self.is_interactive_cell():
self.__out_html = html
self.__sage = sage
def sage(self):
"""
TODO: Figure out what exactly this does.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.sage() is None
True
"""
try:
return self.__sage
except AttributeError:
return None
def output_html(self):
"""
Returns the HTML for self's output.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.output_html()
''
sage: C.set_output_text('5', '<strong>5</strong>')
sage: C.output_html()
'<strong>5</strong>'
"""
try:
return self.__out_html
except AttributeError:
self.__out_html = ''
return ''
def process_cell_urls(self, urls):
"""
Processes URLs of the form ``'cell://.*?'`` by replacing the
protocol with the path to self and appending self's version
number.
INPUT:
- ``urls`` - a string
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.process_cell_urls('"cell://foobar"')
'/home/sage/0/cells/0/foobar?0"'
"""
end = '?%d"'%self.version()
begin = self.url_to_self()
for s in re_cell.findall(urls) + re_cell_2.findall(urls):
urls = urls.replace(s,begin + s[7:-1] + end)
return urls
def output_text(self, ncols=0, html=True, raw=False, allow_interact=True):
"""
Returns the text for self's output.
INPUT:
- ``ncols`` -- maximum number of columns
- ``html`` -- boolean stating whether to output HTML
- ``raw`` -- boolean stating whether to output raw text
(takes precedence over HTML)
- ``allow_interact`` -- boolean stating whether to allow interaction
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.output_text()
'<pre class="shrunk">5</pre>'
sage: C.output_text(html=False)
'<pre class="shrunk">5</pre>'
sage: C.output_text(raw=True)
'5'
"""
if allow_interact and hasattr(self, '_interact_output'):
# Get the input template
z = self.output_text(ncols, html, raw, allow_interact=False)
if not '<?__SAGE__TEXT>' in z or not '<?__SAGE__HTML>' in z:
return z
if ncols:
# Get the output template
try:
# Fill in the output template
output,html = self._interact_output
output = self.parse_html(output, ncols)
z = z.replace('<?__SAGE__TEXT>', output)
z = z.replace('<?__SAGE__HTML>', html)
return z
except (ValueError, AttributeError), msg:
print msg
pass
else:
# Get rid of the interact div to avoid updating the wrong output location
# during interact.
return ''
is_interact = self.is_interactive_cell()
if is_interact and ncols == 0:
if 'Traceback (most recent call last)' in self.__out:
s = self.__out.replace('cell-interact','')
is_interact=False
else:
return '<h2>Click to the left again to hide and once more to show the dynamic interactive window</h2>'
else:
s = self.__out
if raw:
return s
if html:
s = self.parse_html(s, ncols)
if not is_interact and not self.is_html() and len(s.strip()) > 0:
s = '<pre class="shrunk">' + s.strip('\n') + '</pre>'
return s.strip('\n')
def parse_html(self, s, ncols):
r"""
Parse HTML for output.
INPUT:
- ``s`` -- the input string containing HTML
- ``ncols`` -- maximum number of columns
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.parse_html('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">\n<html><head></head><body>Test</body></html>', 80)
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0...Test</body>'
"""
def format(x):
return word_wrap(escape(x), ncols=ncols)
def format_html(x):
return self.process_cell_urls(x)
# if there is an error in the output,
# specially format it.
if not self.is_interactive_cell():
s = format_exception(format_html(s), ncols)
# Everything not wrapped in <html> ... </html> should be
# escaped and word wrapped.
t = ''
while len(s) > 0:
i = s.find('<html>')
if i == -1:
t += format(s)
break
j = s.find('</html>')
if j == -1:
t += format(s[:i])
break
t += format(s[:i]) + format_html(s[i+6:j])
s = s[j+7:]
t = t.replace('</html>','')
# Get rid of the <script> tags, since we do not want them to
# be evaluated twice. They are only evaluated in the wrapped
# version of the output.
if ncols == 0:
while True:
i = t.lower().find('<script>')
if i == -1: break
j = t[i:].lower().find('</script>')
if j == -1: break
t = t[:i] + t[i+j+len('</script>'):]
return t
def has_output(self):
"""
Returns True if there is output for this cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.has_output()
True
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '', None)
sage: C.has_output()
False
"""
return len(self.__out.strip()) > 0
def is_html(self):
r"""
Returns True if this is an HTML cell. An HTML cell whose system is
'html' and is typically specified by ``%html``.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, "%html\nTest HTML", None, None)
sage: C.system()
'html'
sage: C.is_html()
True
sage: C = sage.server.notebook.cell.Cell(0, "Test HTML", None, None)
sage: C.is_html()
False
"""
try:
return self.__is_html
except AttributeError:
return self.system() == 'html'
def set_is_html(self, v):
"""
Sets whether or not this cell is an HTML cell.
This is called by check_for_system_switching in worksheet.py.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_html()
False
sage: C.set_is_html(True)
sage: C.is_html()
True
"""
self.__is_html = v
#################
# Introspection #
#################
def set_introspect_html(self, html, completing=False, verbose=False):
"""
If ``verbose`` is True, print verbose output about notebook
introspection to the command-line. However, the argument
``verbose`` is not easily accessible now -- if you need to
debug, you have to edit this file, changing its value to True,
and run 'sage -b'.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999)
('d', Cell 0; in=sage?, out=)
sage: C.set_introspect_html('foobar')
sage: C.introspect_html()
'<div class="docstring"><pre>foobar</pre></div>'
sage: C.set_introspect_html('`foobar`')
sage: C.introspect_html()
'<div class="docstring">...<span class="math">foobar</span>...</div>'
"""
if html == "" or completing:
self.__introspect_html = html
elif html.find("`") == -1 and html.find("::") == -1:
# html doesn't seem to be in ReST format so use docutils
# to process the preamble ("**File:**" etc.) and put
# everything else in a <pre> block.
i = html.find("**Docstring:**")
if i != -1:
preamble = html[:i+14]
from docutils.core import publish_parts
preamble = publish_parts(html[:i+14], writer_name='html')['body']
html = html[i+14:]
else:
preamble = ""
self.__introspect_html = '<div class="docstring">' + preamble + '<pre>' + html + '</pre></div>'
else:
# html is in ReST format, so use Sphinx to process it
# Set the location of the introspection cache, "permanent"
# or temporary. The former, DOT_SAGE/sage_notebook/doc,
# can pool queries from multiple worksheet processes. The
# latter is exclusive to a worksheet's process. The Sage
# cleaner should delete the temporary directory (or
# directories) after the notebook server exits.
import errno, hashlib
global Sphinx, _SAGE_INTROSPECT
if not Sphinx:
from sphinx.application import Sphinx
if _SAGE_INTROSPECT is None:
from sage.misc.misc import DOT_SAGE, tmp_dir
# It's important to use os.path.join, instead of +,
# because Sphinx counts on normalized paths. It's also
# more portable.
std_doc_dir = os.path.join(DOT_SAGE, 'sage_notebook/doc')
try:
os.makedirs(std_doc_dir)
_SAGE_INTROSPECT = std_doc_dir
except OSError, error:
if error.errno == errno.EEXIST:
if os.access(std_doc_dir, os.R_OK | os.W_OK | os.X_OK):
_SAGE_INTROSPECT = std_doc_dir
else:
_SAGE_INTROSPECT = tmp_dir()
else:
_SAGE_INTROSPECT = tmp_dir()
if verbose:
print 'Introspection cache: ', _SAGE_INTROSPECT
# We get a quick checksum of the input. MD5 admits
# collisions, but we're not concerned about such security
# issues here. Of course, if performance permits, we can
# choose a more robust hash function.
hash = hashlib.md5(html).hexdigest()
base_name = os.path.join(_SAGE_INTROSPECT, hash)
html_name = base_name + '.html'
# Multiple processes might try to read/write the target
# HTML file simultaneously. We use a file-based lock.
# Since we care only about the target's contents, and
# we've configured Sphinx accordingly, we allow multiple
# simultaneous instances of Sphinx, as long as their
# targets are different. Systems which don't properly
# implement os.O_EXCL may require coarser locking.
# The Pythonic cross-platform file lock below is adapted
# from
# http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python/
lock_name = base_name + '.lock'
# Try to acquire the lock, periodically. If we time out,
# we fall back to plainly formatted documentation.
timeout = 0.5
delay = 0.05
start_time = time.time()
while True:
try:
# This operation is atomic on platforms which
# properly implement os.O_EXCL:
fd_lock = os.open(lock_name, os.O_CREAT | os.O_EXCL | os.O_RDWR)
break;
except OSError, err:
if (err.errno != errno.EEXIST) or (time.time() - start_time >= timeout):
plain_html = escape(html).strip()
self.__introspect_html = '<pre class="introspection">' + plain_html + '</pre>'
return
time.sleep(delay)
# We've acquired the lock. Use cached HTML or run Sphinx.
try:
open(html_name, 'r')
if verbose:
print 'Found: %s' % html_name
except IOError:
html = html.replace('\\\\', '\\')
rst_name = base_name + '.rst'
fd_rst = open(rst_name, 'w')
fd_rst.write(html)
fd_rst.close()
# Sphinx setup. The constructor is Sphinx(srcdir,
# confdir, outdir, doctreedir, buildername,
# confoverrides, status, warning, freshenv).
srcdir = os.path.normpath(_SAGE_INTROSPECT)
# Note: It's crucial that confdir* contains a
# customized conf.py and layout.html. In particular,
# we've disabled index generation and told Sphinx to
# output almost exactly the HTML we display. Sphinx
# also pickles its environment in doctreedir, but we
# force Sphinx never to load this pickle with
# freshenv=True.
confdir = os.path.join(SAGE_DOC, 'en/introspect')
doctreedir = os.path.normpath(base_name)
confoverrides = {'html_context' : {}, 'master_doc' : hash}
# To suppress output, use this:
if verbose:
import sys
sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, 'html', confoverrides, sys.stdout, sys.stderr, True)
else:
sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, 'html', confoverrides, None, None, True)
# Run Sphinx. The first argument corresponds to
# sphinx-build's "write all files" -a flag, which we
# set to None.
sphinx_app.build(None, [rst_name])
# We delete .rst files, so future Sphinx runs don't
# keep track of them. We also delete doctrees.
try:
os.unlink(rst_name)
except OSError:
pass
try:
shutil.rmtree(doctreedir)
os.unlink(doctreedir)
except OSError:
pass
if verbose:
print 'Built: %s' % html_name
finally:
# Contents should be flushed on close().
fd_html = open(html_name, 'r')
new_html = fd_html.read()
fd_html.close()
# We release the lock and delete the lock file.
os.close(fd_lock)
os.unlink(lock_name)
new_html = new_html.replace('<pre>', '<pre class="literal-block">')
# Translate URLs for media from something like
# "../../media/...path.../blah.png"
# or
# "/media/...path.../blah.png"
# to
# "/doc/static/reference/media/...path.../blah.png"
new_html = re.sub("""src=['"](/?\.\.)*/?media/([^"']*)['"]""",
'src="/doc/static/reference/media/\\2"',
new_html)
self.__introspect_html = new_html
return
def introspect_html(self):
"""
Returns HTML for introspection.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999)
('d', Cell 0; in=sage?, out=)
sage: C.introspect_html()
u'<div class="docstring">...</pre></div>'
"""
if not self.introspect():
return ''
try:
return self.__introspect_html
except AttributeError:
self.__introspect_html = ''
return ''
def introspect(self):
"""
Returns self's introspection text.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999)
('d', Cell 0; in=sage?, out=)
sage: C.introspect()
['sage?', '']
"""
try:
return self.__introspect
except AttributeError:
return False
def unset_introspect(self):
"""
Unsets self's introspection text.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999)
('d', Cell 0; in=sage?, out=)
sage: C.introspect()
['sage?', '']
sage: C.unset_introspect()
sage: C.introspect()
False
"""
self.__introspect = False
def set_introspect(self, before_prompt, after_prompt):
"""
Set self's introspection text.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_introspect("a", "b")
sage: C.introspect()
['a', 'b']
"""
self.__introspect = [before_prompt, after_prompt]
def evaluate(self, introspect=False, time=None, username=None):
r"""
INPUT:
- ``username`` - name of user doing the evaluation
- ``time`` - if True return time computation takes
- ``introspect`` - either False or a pair
[before_cursor, after_cursor] of strings.
EXAMPLES:
We create a notebook, worksheet, and cell and evaluate it
in order to compute `3^5`::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('Sage\n{{{\n3^5\n}}}')
sage: C = W.cell_list()[0]; C
Cell 0; in=3^5, out=
sage: C.evaluate(username='sage')
sage: W.check_comp(wait=9999)
('d', Cell 0; in=3^5, out=
243
)
sage: C
Cell 0; in=3^5, out=
243
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
self.__interrupted = False
self.__evaluated = True
if time is not None:
self.__time = time
self.__introspect = introspect
self.__worksheet.enqueue(self, username=username)
self.__type = 'wrap'
dir = self.directory()
for D in os.listdir(dir):
F = dir + '/' + D
try:
os.unlink(F)
except OSError:
try:
shutil.rmtree(F)
except StandardError:
pass
def version(self):
"""
Returns the version number of this cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.version()
0
sage: C.set_input_text('2+3')
sage: C.version()
1
"""
try:
return self.__version
except AttributeError:
self.__version = 0
return self.__version
def time(self):
r"""
Returns True if the time it takes to evaluate this cell should be
printed.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.time()
False
sage: C = sage.server.notebook.cell.Cell(0, '%time\n2+3', '5', None)
sage: C.time()
True
"""
return ('time' in self.percent_directives() or
'timeit' in self.percent_directives() or
getattr(self, '__time', False))
def doc_html(self, wrap=None, div_wrap=True, do_print=False):
"""
Modified version of ``self.html`` for the doc browser.
This is a hack and needs to be improved. The problem is how to get
the documentation html to display nicely between the example cells.
The type setting (jsMath formatting) needs attention too.
TODO: Remove this hack (:meth:`doc_html`)
"""
self.evaluate()
if wrap is None:
wrap = self.notebook().conf()['word_wrap_cols']
evaluated = self.evaluated()
if evaluated:
cls = 'cell_evaluated'
else:
cls = 'cell_not_evaluated'
html_in = self.html_in(do_print=do_print)
introspect = "<div id='introspect_div_%s' class='introspection'></div>"%self.id()
#html_out = self.html_out(wrap, do_print=do_print)
html_out = self.html()
s = html_out
if div_wrap:
s = '\n\n<div id="cell_outer_%s" class="cell_visible"><div id="cell_%s" class="%s">'%(self.id(), self.id(), cls) + s + '</div></div>'
return s
def html(self, wrap=None, div_wrap=True, do_print=False):
r"""
Returns the HTML for self.
INPUT:
- ``wrap`` - a boolean stating whether to wrap lines. Defaults to
configuration if not given.
- ``div_wrap`` - a boolean stating whether to wrap ``div``.
- ``do_print`` - a boolean stating whether the HTML is for
print or not.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.html()
'\n\n<div id="cell_outer_0" cl...</div>'
"""
if do_print:
wrap = 68
div_wrap = 68
key = (wrap,div_wrap,do_print)
if wrap is None:
wrap = self.notebook().conf()['word_wrap_cols']
evaluated = self.evaluated()
if evaluated or do_print:
cls = 'cell_evaluated'
else:
cls = 'cell_not_evaluated'
html_in = self.html_in(do_print=do_print)
introspect = "<div id='introspect_div_%s' class='introspection'></div>"%self.id()
html_out = self.html_out(wrap, do_print=do_print)
if 'hideall' in self.percent_directives():
s = html_out
else:
s = html_in + introspect + html_out
if div_wrap:
s = '\n\n<div id="cell_outer_%s" class="cell_visible"><div id="cell_%s" class="%s">'%(self.id(), self.id(), cls) + s + '</div></div>'
#self._html_cache[key] = s
return s
def html_in(self, do_print=False, ncols=80):
"""
Returns the HTML code for the input of this cell.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: print C.html_in()
<div class="insert_new_cell" id="insert_new_cell_0"...</a>
"""
s = ''
id = self.__id
t = self.__in.rstrip()
cls = "cell_input_hide" if 'hide' in self.percent_directives() else "cell_input"
if not do_print:
s += self.html_new_cell_before()
r = max(1, number_of_rows(t.strip(), ncols))
if do_print:
if 'hide' in self.percent_directives():
return ''
tt = escape(t).replace('\n','<br>').replace(' ',' ') + ' '
s += '<div class="cell_input_print">%s</div>'%tt
else:
s += """
<textarea class="%s" rows=%s cols=%s
id = 'cell_input_%s'
onKeyPress = 'return input_keypress(%s,event);'
onKeyDown = 'return input_keydown(%s,event);'
onKeyUp = 'return input_keyup(%s, event);'
onBlur = 'cell_blur(%s); return true;'
onFocus = 'cell_focused(this,%s); return true;'
>%s</textarea>
"""%(cls, r, ncols, id, id, id, id, id, id, t)
if not do_print:
s+= '<a href="javascript:evaluate_cell(%s,0)" class="eval_button" id="eval_button%s" alt="Click here or press shift-return to evaluate">evaluate</a>'%(id,id)
t = escape(t)+" "
return s
def html_new_cell_before(self):
"""
Returns the HTML code for inserting a new cell before self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: print C.html_new_cell_before()
<div class="insert_new_cell" id="insert_new_cell_0">...
"""
return """<div class="insert_new_cell" id="insert_new_cell_%(id)s">
</div>
<script type="text/javascript">
$("#insert_new_cell_%(id)s").plainclick(function(e) {insert_new_cell_before(%(id)s);});
$("#insert_new_cell_%(id)s").shiftclick(function(e) {insert_new_text_cell_before(%(id)s);});
</script>"""%{'id': self.id()}
def html_new_cell_after(self):
"""
Returns the HTML code for inserting a new cell after self.
EXAMPLES::
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', None)
sage: print C.html_new_cell_after()
<div class="insert_new_cell" id="insert_new_cell_0">...
"""
return """<div class="insert_new_cell" id="insert_new_cell_%(id)s">
</div>
<script type="text/javascript">
$("#insert_new_cell_%(id)s").plainclick(function(e) {insert_new_cell_after(%(id)s);});
$("#insert_new_cell_%(id)s").shiftclick(function(e) {insert_new_text_cell_after(%(id)s);});
</script>"""%{'id': self.id()}
def url_to_self(self):
"""
Returns a notebook URL for this cell.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.url_to_self()
'/home/sage/0/cells/0'
"""
try:
return self.__url_to_self
except AttributeError:
self.__url_to_self = '/home/%s/cells/%s'%(self.worksheet_filename(), self.id())
return self.__url_to_self
def files(self):
"""
Returns a list of all the files in self's directory.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 0; in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files()
['sage0.png']
::
sage: import shutil; shutil.rmtree(nb.directory())
"""
dir = self.directory()
D = os.listdir(dir)
return D
def delete_files(self):
"""
Deletes all of the files associated with this cell.
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 0; in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files()
['sage0.png']
sage: C.delete_files()
sage: C.files()
[]
"""
try:
dir = self._directory_name()
except AttributeError:
return
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def files_html(self, out):
"""
Returns HTML to display the files in self's directory.
INPUT:
- ``out`` - string to exclude files.
Format: To exclude bar, foo, ... ``'cell://bar cell://foo ...'``
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999)
('d', Cell 0; in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files_html('')
'<img src="/home/sage/0/cells/0/sage0.png?...">'
"""
import time
D = self.files()
D.sort()
if len(D) == 0:
return ''
images = []
files = []
# The question mark trick here is so that images will be reloaded when
# the async request requests the output text for a computation.
# This is inspired by http://www.irt.org/script/416.htm/.
for F in D:
if 'cell://%s'%F in out:
continue
url = "%s/%s"%(self.url_to_self(), F)
if F.endswith('.png') or F.endswith('.bmp') or \
F.endswith('.jpg') or F.endswith('.gif'):
images.append('<img src="%s?%d">'%(url, time.time()))
elif F.endswith('.obj'):
images.append("""<a href="javascript:sage3d_show('%s', '%s_%s', '%s');">Click for interactive view.</a>"""%(url, self.__id, F, F[:-4]))
elif F.endswith('.mtl') or F.endswith(".objmeta"):
pass # obj data
elif F.endswith('.svg'):
images.append('<embed src="%s" type="image/svg+xml" name="emap">'%url)
elif F.endswith('.jmol'):
# If F ends in -size500.jmol then we make the viewer applet with size 500.
i = F.rfind('-size')
if i != -1:
size = F[i+5:-5]
else:
size = 500
if self.worksheet().docbrowser():
jmol_name = os.path.join(self.directory(), F)
jmol_file = open(jmol_name, 'r')
jmol_script = jmol_file.read()
jmol_file.close()
jmol_script = jmol_script.replace('defaultdirectory "', 'defaultdirectory "' + self.url_to_self() + '/')
jmol_file = open(jmol_name, 'w')
jmol_file.write(jmol_script)
jmol_file.close()
#popup = """<br><a href="javascript:jmol_popup('%s');">Enlarge</a>"""%url
#script = '<script>jmol_applet(%s, "%s");</script>%s' % (size, url, popup)
#script = '<script>jmol_popup("%s");</script>' % (url)
script = '<div><script>jmol_applet(%s, "%s?%d");</script></div>' % (size, url, time.time())
images.append(script)
elif F.endswith('.jmol.zip'):
pass # jmol data
elif F.endswith('.canvas3d'):
script = '<div><script>canvas3d.viewer("%s");</script></div>' % url
images.append(script)
else:
link_text = str(F)
if len(link_text) > 40:
link_text = link_text[:10] + '...' + link_text[-20:]
files.append('<a target="_new" href="%s" class="file_link">%s</a>'%(url, link_text))
if len(images) == 0:
images = ''
else:
images = "%s"%'<br>'.join(images)
if len(files) == 0:
files = ''
else:
files = (' '*3).join(files)
return images + files
def html_out(self, ncols=0, do_print=False):
r"""
Returns the HTML for self's output.
INPUT:
- ``do_print`` -- a boolean stating whether to output HTML
for print
- ``ncols`` -- the number of columns
EXAMPLES::
sage: nb = sage.server.notebook.notebook.Notebook(tmp_dir())
sage: nb.add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sage.server.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.html_out()
'\n...<div class="cell_output_div">\n...</div>'
"""
if do_print and self.cell_output_type() == 'hidden':
return '<pre>\n</pre>'
out_nowrap = self.output_text(0, html=True)
out_html = self.output_html()
if self.introspect():
out_wrap = out_nowrap
else:
out_wrap = self.output_text(ncols, html=True)
typ = self.cell_output_type()
if self.computing():
cls = "cell_div_output_running"
else:
cls = 'cell_div_output_' + typ
top = '<div class="%s" id="cell_div_output_%s">'%(
cls, self.__id)
if do_print:
prnt = "print_"
else:
prnt = ""
out_wrap = '<div class="cell_output_%s%s" id="cell_output_%s">%s</div>'%(
prnt, typ,self.__id, out_wrap)
out_nowrap = '<div class="cell_output_%snowrap_%s" id="cell_output_nowrap_%s">%s</div>'%(
prnt, typ, self.__id, out_nowrap)
out_html = '<div class="cell_output_html_%s" id="cell_output_html_%s">%s </div>'%(
typ, self.__id, out_html)
out = "%s%s%s"%(out_wrap, out_nowrap, out_html)
s = top + out + '</div>'
r = ''
r += ' '*(7-len(r))
tbl = """
<div class="cell_output_div">
<table class="cell_output_box"><tr>
<td class="cell_number" id="cell_number_%s" onClick="cycle_cell_output_type(%s);">
%s
</td>
<td class="output_cell">%s</td></tr></table></div>"""%(
self.__id, self.__id, r, s)
return tbl
########
def format_exception(s0, ncols):
r"""
Make it so exceptions don't appear expanded by default.
INPUT:
- ``s0`` - string
- ``ncols`` - integer
OUTPUT: string
If s0 contains "notracebacks" then this function always returns s0
EXAMPLES::
sage: sage.server.notebook.cell.format_exception(sage.server.notebook.cell.TRACEBACK,80)
'\nTraceback (click to the left for traceback)\n...\nTraceback (most recent call last):'
sage: sage.server.notebook.cell.format_exception(sage.server.notebook.cell.TRACEBACK + "notracebacks",80)
'Traceback (most recent call last):notracebacks'
"""
s = s0.lstrip()
# Add a notracebacks option -- if it is in the string then tracebacks aren't shrunk.
# This is currently used by the sage.server.support.help command.
if TRACEBACK not in s or 'notracebacks' in s:
return s0
if ncols > 0:
s = s.strip()
w = s.splitlines()
for k in range(len(w)):
if TRACEBACK in w[k]:
break
s = '\n'.join(w[:k]) + '\nTraceback (click to the left for traceback)' + '\n...\n' + w[-1]
else:
s = s.replace("exec compile(ur'","")
s = s.replace("' + '\\n', '', 'single')", "")
return s
ComputeCell=Cell
def number_of_rows(txt, ncols):
r"""
Returns the number of rows needed to display the string in txt if
there are a maximum of ncols columns per row.
EXAMPLES::
sage: from sage.server.notebook.cell import number_of_rows
sage: s = "asdfasdf\nasdfasdf\n"
sage: number_of_rows(s, 8)
2
sage: number_of_rows(s, 5)
4
sage: number_of_rows(s, 4)
4
"""
rows = txt.splitlines()
nrows = len(rows)
for i in range(nrows):
nrows += int((len(rows[i])-1)/ncols)
return nrows
| {
"alphanum_fraction": 0.5203117144,
"author": null,
"avg_line_length": 32.7677100494,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5e62e452d4a4e16af58f6a31f526f74cb51e0153",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "bopopescu/sage-5",
"max_forks_repo_path": "src/sage/server/notebook/cell.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "bopopescu/sage-5",
"max_issues_repo_path": "src/sage/server/notebook/cell.py",
"max_line_length": 173,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "9d85b34956ca2edd55af307f99c5d3859acd30bf",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "bopopescu/sage-5",
"max_stars_repo_path": "src/sage/server/notebook/cell.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-17T10:54:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-20T00:30:35.000Z",
"num_tokens": 19001,
"path": null,
"reason": "from sage",
"repo": null,
"save_path": null,
"sha": null,
"size": 79560
} |
#! /usr/bin/env python
import os
import argparse
import json
import cv2
from utils.utils import get_yolo_boxes
from utils.colors import get_color
from keras.models import load_model
import numpy as np
import glob
from lxml import etree as ET
def _main_(args):
config_path = args.conf
with open(config_path) as config_buffer:
config = json.load(config_buffer)
print_out = config['predict']['print']
image_out = config['predict']['image']
tiny = config['model']['tiny']
# load variables from config file
net_h, net_w = config['predict']['net_h'], config['predict']['net_w'] # a multiple of 32, the smaller the faster
obj_thresh, nms_thresh = config['predict']['obj_thresh'], config['predict']['nms_thresh']
labels = config['model']['labels']
anchors = config['model']['anchors']
# check for CPU or GPU(s)
os.environ['CUDA_VISIBLE_DEVICES'] = config['model']['gpus']
# load model weights
infer_model = load_model(config['train']['saved_weights_name'])
# gather file paths
output_path = config['predict']['output_folder']
test_image_path = config['predict']['test_images']
image_paths = glob.glob(test_image_path + '*.png')
# loop through the images
for k,image_path in enumerate(image_paths):
# open first image in folder
image = cv2.imread(image_path)
image_name = image_path.split("\\")[-1]
image_title = image_name.split('.')[0]
boxes = get_yolo_boxes(infer_model, [image], net_h, net_w, anchors, obj_thresh, nms_thresh)[0]
if print_out:
print('\n',image_name," 40ft Model")
# create an etree
if tiny:
dataset = ET.Element('dataset', name=config['model']['name'], comment="yolov3_tiny")
else:
dataset = ET.Element('dataset', name=config['model']['name'], comment="yolov3")
image_container = ET.SubElement(dataset, 'image', Name=image_name)
# loop through the predictions
for box in boxes:
label = -1
score = ""
# loop through the labels
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
score = str(round(box.get_score() * 100, 2))+'%'
label = i
if label >= 0:
ymin_s, xmin_s, ymax_s, xmax_s = str(box.ymin), str(box.xmin), str(box.ymax), str(box.xmax)
new_label = ET.SubElement(image_container,'bounding_box', object=labels[label], score=score, Ymin=ymin_s, Xmin=xmin_s, Ymax=ymax_s, Xmax=xmax_s)
image_container.append(new_label)
if print_out:
print(labels[label]+' '+score+' Ymin='+ymin_s+' Xmin='+xmin_s+' Ymax='+ymax_s+' Xmax='+xmax_s)
if image_out:
cv2.rectangle(img=image, pt1=(box.xmin, box.ymin), pt2=(box.xmax, box.ymax), color=get_color(label), thickness=1)
tree = ET.ElementTree(dataset)
tree.write(output_path + image_title+'.xml', pretty_print=True, xml_declaration=True, encoding="ISO-8859-1")
if image_out:
cv2.imwrite(output_path + image_name, np.uint8(image))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Predict with a trained yolo model')
argparser.add_argument('-c', '--conf', help='path to configuration file')
args = argparser.parse_args()
_main_(args)
| {
"alphanum_fraction": 0.6092472512,
"author": null,
"avg_line_length": 37.3368421053,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b794303b7cedcd61e4c48ac4e8a2412bc3d49e1c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "abc0ad0488e727b3d997a924b57356ace2caf444",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "LachlanMares/keras-yolo3-generic",
"max_forks_repo_path": "predict.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "abc0ad0488e727b3d997a924b57356ace2caf444",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "LachlanMares/keras-yolo3-generic",
"max_issues_repo_path": "predict.py",
"max_line_length": 161,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "abc0ad0488e727b3d997a924b57356ace2caf444",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "LachlanMares/keras-yolo3-generic",
"max_stars_repo_path": "predict.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 827,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3547
} |
""" Collection of novelty-based metrics """
from collections import defaultdict
import os
import matplotlib.pyplot as plt
import numpy as np
from reclist.current import current
def gini_index_at_k(y_preds, candidate_list, k: int=10, debug: bool=False) -> float:
no_items = len(set(candidate_list))
all_preds = []
for _p in y_preds:
all_preds += _p[:k]
all_preds_flattened = np.asarray(all_preds)
items, counts = np.unique(all_preds_flattened, return_counts=True)
counts.sort()
num_recommended_items = counts.shape[0]
total_num = len(y_preds)*k
idx = np.arange(no_items-num_recommended_items+1, no_items+1)
gini_index = np.sum((2*idx-no_items-1)*counts)/total_num
gini_index /= no_items
return gini_index
def gini_index_at_k_user_differential(y_preds, y_test, candidate_list, k=10,
debug=False, user_feature='age_range', **kwargs):
breakdown = _breakdown_preds_by_user_feature(y_test, y_preds,
user_feature=user_feature)
retval = _apply_func_to_breakdown(gini_index_at_k, breakdown, candidate_list,
k=k, debug=debug)
if debug:
plot_results_breakdown(retval, f"GiniIndex@{k}_by_{user_feature}.pdf", f"GiniIndex@{k}",
xmin=0, xmax=1)
return retval
def shannon_entropy_at_k(y_preds, k: int=10, debug: bool=False) -> float:
all_preds = []
for _p in y_preds:
all_preds += _p[:k]
all_preds_flattened = np.asarray(all_preds)
items, counts = np.unique(all_preds_flattened, return_counts=True)
total_num = len(y_preds)*k
p = counts/total_num
return (-p*np.log(p)).sum()
def shannon_entropy_at_k_user_differential(y_test, y_preds, k=10,
debug=False, user_feature='gender', **kwargs):
breakdown = _breakdown_preds_by_user_feature(y_test, y_preds,
user_feature=user_feature)
retval = _apply_func_to_breakdown(shannon_entropy_at_k, breakdown,
k=k, debug=debug)
if debug:
plot_results_breakdown(retval, f"ShannonEntropy@{k}_by_{user_feature}.pdf", f"ShannonEntropy@{k}")
return retval
def novelty_at_k(y_preds, x_train, k=10, debug=False):
all_inters = []
for user in x_train:
for interaction in user:
all_inters.append(interaction['resourceId'])
all_inters = np.asarray(all_inters)
items, counts = np.unique(all_inters, return_counts=True)
pop_lookup = {i[0]: i[1] for i in zip(items, counts)}
msi = []
n = 0
no_users = len(x_train) # each entry is a user
for _p in y_preds:
self_information = 0
n += 1
for i in _p[:k]:
try:
self_information += np.sum(-np.log2(pop_lookup[i]/no_users))
except KeyError:
self_information += np.sum(-np.log2(1/no_users))
msi.append(self_information/k)
return sum(msi)/n
def novelty_at_k_user_differential(x_train, y_test, y_preds, k=10,
debug=False, user_feature='gender', **kwargs):
breakdown = _breakdown_preds_by_user_feature(y_test, y_preds,
user_feature=user_feature)
retval = _apply_func_to_breakdown(novelty_at_k, breakdown, x_train, k=k, debug=debug)
if debug:
plot_results_breakdown(retval, f"Novelty@{k}_by_{user_feature}.pdf", f"Novelty@{k}")
return retval
def personalisation_at_k(y_preds, k=10, debug=False):
import itertools
import tqdm
_id = 0
item_ids = {}
for i in y_preds:
for j in i:
if j not in item_ids:
item_ids[j] = _id
_id += 1
total_similarity = 0
# iterate over each pair of users and compute similarity
no_users = len(y_preds)
no_combinations = no_users*(no_users-1)/2
user_vector_cache = {}
def get_vector(cache, id, k, item_ids, y_preds):
try:
return cache[id]
except KeyError:
user_vector_ids = set([item_ids[i] for i in y_preds[id][:k]])
cache[id] = user_vector_ids
return user_vector_ids
# Make this speedy by using a.b/|a|/|b|, and caching a,b throughout
# a, b are len(no_items), all zeros apart from indices of the top k items, which are ==1
# therefore a.b = number of items in both a, b
# As only k recommendations, |a| == |b|, |a|^2 = k, which factorises out
# so compute (a & b) for all unique a,b pairs
# divide by k
# divide by number of pairs
# n.b. doing it the full way (numpy vectors) for 10k users took 2hrs, this takes 30s
for id1, id2 in tqdm.tqdm(itertools.combinations(range(no_users), 2),
total=no_combinations):
v1 = get_vector(user_vector_cache, id1, k, item_ids, y_preds)
v2 = get_vector(user_vector_cache, id2, k, item_ids, y_preds)
total_similarity += len(v1 & v2)
return 1-(total_similarity/no_combinations/k)
def _breakdown_preds_by_user_feature(y_test, y_preds, user_feature='gender'):
breakdown = defaultdict(list)
for _t, _p in zip(y_test, y_preds):
target_user_feature = _t[0][user_feature]
if not target_user_feature:
target_user_feature = 'unknown'
breakdown[target_user_feature].append(_p)
return breakdown
def _apply_func_to_breakdown(func, breakdown, *args, **kwargs):
return {key: func(val, *args, **kwargs)
for key, val in sorted(breakdown.items(),
key=lambda x:x[0])}
def plot_results_breakdown(results, fname, metric, xmin=None, xmax=None):
plt.figure()
plt.barh(list(results.keys()), list(results.values()))
plt.xlabel(metric)
if xmin is not None:
plt.xlim(left=xmin)
if xmax is not None:
plt.xlim(right=xmax)
plt.grid(alpha=0.5, color='gray', ls=':')
plt.savefig(os.path.join(current.report_path, 'plots', fname))
plt.close()
| {
"alphanum_fraction": 0.6286554485,
"author": null,
"avg_line_length": 37.7839506173,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ae2cc565f697a3b61c185d45857a294e4d44aeb8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "42ea321856e02e46cca8e3b032b3b088ff328f57",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bbc/datalab-reclist",
"max_forks_repo_path": "reclist/metrics/novelty.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "42ea321856e02e46cca8e3b032b3b088ff328f57",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bbc/datalab-reclist",
"max_issues_repo_path": "reclist/metrics/novelty.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "42ea321856e02e46cca8e3b032b3b088ff328f57",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bbc/datalab-reclist",
"max_stars_repo_path": "reclist/metrics/novelty.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1541,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6121
} |
# !/usr/bin/python3
# -*-coding utf-8 -*-
# @Time : 2021/06/11 10:59
# @Author : Francisco Muñoz
# @Project : evolution_of_species
# @File : validators.py
# @Software : PyCharm
from abc import ABC, abstractmethod
import numpy as np
_SET_A_COPY = False
_GET_A_COPY = True
def validate_index(item):
"""
Validates if an index represents a row or a component.
"""
if not isinstance(item, (int, tuple)):
raise TypeError(f"'item' must be an 'int' or 'tuple'."
+ f" Currently is '{type(item).__name__}'.")
if isinstance(item, tuple) and len(item) != 2:
raise ValueError("'item' must be length 2."
+ f" Currently is {len(item)}.")
if isinstance(item, tuple) and not (isinstance(item[0], int) or isinstance(item[1], int)):
raise ValueError("The components of 'item' must be 'int'.")
def validate_nth_row(row, row_to_replace):
"""
Validates if the new row is compatible with the row to replace.
"""
if not isinstance(row, np.ndarray):
raise TypeError(f"'row' is not an 'array'."
+ f" Currently is '{type(row).__name__}'.")
if not row.shape == row_to_replace.shape:
raise ValueError(f"Dimensions does not fits."
+ f" Currently is {row.shape}, it must be {row_to_replace.shape}.")
class Validator(ABC):
"""
Abstract Class for General Validators
"""
def __set_name__(self, owner, name):
self.public_name = name
self.protected_name = "_" + name
# To make this variable final
setattr(owner, self.protected_name + "_attr_set", False)
def __get__(self, obj, obj_type=None):
return getattr(obj, self.protected_name)
def __set__(self, obj, value):
# Ask if the variable is not set yet
if not getattr(obj, self.protected_name + "_attr_set"):
self.validate(obj, value)
setattr(obj, self.protected_name, value)
setattr(obj, self.protected_name + "_attr_set", True)
# If it was, raise an error
else:
raise AttributeError(f"Attribute {self.public_name} was already set.")
@abstractmethod
def validate(self, obj, value):
"""
To use the template pattern.
Parameters
----------
obj : Object
An instance of the current object
value : object
Value to validate.
"""
pass
class Boundary(Validator):
"""
Validator for bounds, like (0.1, 10.5)
"""
def validate(self, obj, value):
if not isinstance(value, tuple):
raise TypeError(f"'{self.public_name}' must be a 'tuple'."
+ f" Currently is '{type(value).__name__}'.")
if len(value) != 2:
raise ValueError(f"'{self.public_name}' must be a length 2."
+ f" Currently is {len(value)}.")
if not (isinstance(value[0], (int, float)) and isinstance(value[1], (int, float))):
raise ValueError(f"The values of '{self.public_name}' must be 'int' or 'float'.")
class Integer(Validator):
"""
Validator for integers, like 10
"""
def validate(self, obj, value):
if not isinstance(value, int):
raise TypeError(f"'{self.public_name}' must be an 'int'."
+ f" Currently is '{type(value).__name__}'")
if not value >= 0:
raise ValueError(f"'{self.public_name}' must be greater or equals to 0.")
class Float(Validator):
"""
Validator for floats, like 3.1415
"""
def __init__(self, lower_bound=(None, None), upper_bound=(None, None)):
self.lower_bound = -float("inf") if lower_bound[0] is None else lower_bound[0]
self.lower_bound_eq = True if lower_bound[1] is None else lower_bound[1]
self.upper_bound = float("inf") if upper_bound[0] is None else upper_bound[0]
self.upper_bound_eq = True if upper_bound[1] is None else upper_bound[1]
str_low_bound = "[" if self.lower_bound_eq else "("
str_upp_bound = "]" if self.lower_bound_eq else ")"
self.str_bounds = f"{str_low_bound}{self.lower_bound}, {self.upper_bound}{str_upp_bound}"
def validate(self, obj, value):
if not isinstance(value, (float, int)):
raise TypeError(f"'{self.public_name}' must be a 'float'."
+ f" Currently is '{type(value).__name__}'.")
satisfied_lower_bound = value > self.lower_bound or (self.lower_bound_eq and value == self.lower_bound)
satisfied_upper_bound = value < self.upper_bound or (self.upper_bound_eq and value == self.upper_bound)
if not (satisfied_lower_bound and satisfied_upper_bound):
raise ValueError(f"'{self.public_name}' must be in {self.str_bounds}.")
class MatrixValidator(Validator, ABC):
"""
Abstract Class for General Matrix Validators
"""
SET_A_COPY = _SET_A_COPY
GET_A_COPY = _GET_A_COPY
def __get__(self, obj, obj_type=None):
# Returns a copy of the attribute
# return getattr(obj, self.protected_name)
if self.GET_A_COPY:
return np.copy(getattr(obj, self.protected_name))
else:
return getattr(obj, self.protected_name)
def __set__(self, obj, value):
if value is None:
setattr(obj, self.protected_name, value)
# Ask if the variable is not set yet
elif not getattr(obj, self.protected_name + "_attr_set"):
self.validate(obj, value)
value_transformed = self.transform_discrete_function(obj, value)
if self.SET_A_COPY:
value_transformed = np.copy(value_transformed)
setattr(obj, self.protected_name, value_transformed)
setattr(obj, self.protected_name + "_attr_set", True)
# If it was, raise an error
else:
raise AttributeError(f"Attribute {self.public_name} was already set.")
@abstractmethod
def transform_discrete_function(self, obj, value):
"""
To use template pattern.
"""
pass
# TODO: Utilizar template pattern en esta parte para generalizar comportamientos
class DiscreteFunctionValidator(MatrixValidator):
def __init__(self, x=None, y=None):
self.x = x
self.y = y
self.dict_obj = {}
def transform_discrete_function(self, obj, value):
if self.x is None and self.y is None:
return value
if isinstance(value, np.ndarray):
return value
x = np.linspace(*obj.x.lims, self.dict_obj[self.x])
if self.dict_obj[self.y] is None:
func_to_return = np.array([value(x_) for x_ in x])
else:
y = np.linspace(*obj.y.lims, self.dict_obj[self.y])
func_to_return = np.array([[value(x_, y_) for y_ in y] for x_ in x])
return func_to_return
def validate(self, obj, value):
if self.x is None and self.y is None:
return None
# Dictionary to save the current setting
self.dict_obj = {"x": obj.x.N + 2, "y": obj.y.M + 2, "z": obj.z.O + 2, "t": obj.t.T + 1, None: None}
# Make a tuple to compare
x_size, y_size = self.dict_obj[self.x], self.dict_obj[self.y]
tuple_to_compare = (x_size, y_size) if y_size is not None else (x_size,)
size = len(tuple_to_compare)
# Check if value is an array
is_ndarray = isinstance(value, np.ndarray)
# Case not is an array or a function
if not (is_ndarray or callable(value)):
raise TypeError(f"'{self.public_name}' must be an 'array' or a 'function'."
+ f" Currently is {type(value).__name__}.")
# If it is an array, make an instance to ensure the operations
if is_ndarray:
value = np.asarray(value)
# Case shape does not fit
if len(value.shape) != size:
raise ValueError(f"The dimensions of '{self.public_name}' must be {size}."
+ f" Currently is {len(value.shape)}.")
# Case dimensions does not fit
if value.shape != tuple_to_compare:
raise ValueError(f"The dimensions of '{self.public_name}' must be equals to {tuple_to_compare}."
+ f" Currently is {value.shape}.")
class InitialDiscreteFunctionValidator(DiscreteFunctionValidator):
def __init__(self, x=None, y=None):
super().__init__(x, y)
self.dict_bnd_obj = None
def transform_discrete_function(self, obj, value):
if self.x is None and self.y is None:
return value
func_to_return = np.zeros((self.dict_obj[self.x], self.dict_obj[self.y]))
if isinstance(value, np.ndarray):
func_to_return[0] = value
return func_to_return
y = np.linspace(*self.dict_bnd_obj[self.y], self.dict_obj[self.y])
func_to_return[0] = np.array([value(y_) for y_ in y])
return func_to_return
def validate(self, obj, value):
if self.x is None and self.y is None:
return None
# Dictionary to save the current setting
self.dict_obj = {"x": obj.x.N + 2, "y": obj.y.M + 2, "z": obj.z.O + 2, "t": obj.t.T + 1, None: None}
self.dict_bnd_obj = {"x": obj.x.lims, "y": obj.y.lims, "z": obj.z.lims, "t": obj.t.lims, None: None}
# Make a tuple to compare
y_size = self.dict_obj[self.y]
tuple_to_compare = (y_size,)
size = len(tuple_to_compare)
# Check if value is an array
is_ndarray = isinstance(value, np.ndarray)
# Case not is an array or a function
if not (is_ndarray or callable(value)):
raise TypeError(f"'{self.public_name}' must be an 'array' or a 'function'."
+ f" Currently is {type(value).__name__}.")
# If it is an array, make an instance to ensure the operations
if is_ndarray:
value = np.asarray(value)
# Case shape does not fit
if len(value.shape) != size:
raise ValueError(f"The dimensions of '{self.public_name}' must be {size}."
+ f" Currently is {len(value.shape)}.")
# Case dimensions does not fit
if value.shape != tuple_to_compare:
raise ValueError(f"The dimensions of '{self.public_name}' must be equals to {tuple_to_compare}."
+ f" Currently is {value.shape}.")
| {
"alphanum_fraction": 0.5955522051,
"author": null,
"avg_line_length": 40.196969697,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2679a8227077ec84fdbe1fdab85b4b53b3b6a887",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bdf9b78a976730b641d8cd9ff699698b0f5d31bb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PanchitoMunoz/evolution_of_species",
"max_forks_repo_path": "perthame_pde/utils/validators.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bdf9b78a976730b641d8cd9ff699698b0f5d31bb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PanchitoMunoz/evolution_of_species",
"max_issues_repo_path": "perthame_pde/utils/validators.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bdf9b78a976730b641d8cd9ff699698b0f5d31bb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PanchitoMunoz/evolution_of_species",
"max_stars_repo_path": "perthame_pde/utils/validators.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2502,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10612
} |
import os
import json
import numpy as np
from keras.utils import Sequence
from .utils import get_datetime_now
from .utils import string2shape
from .utils import shape2string
from .utils import timesequence_generator
from .utils import create_one_hots_from_labels
from .utils import one_hot_encode
from .waves import Wave
from .waves import MixedWave
class MixedSignal:
def __init__(self,
sigs_coeffs,
*features,
batch_size=1,
window_size=0,
window_type='sliding',
network_type='TCN',
sequence_type='many2many',
stateful=False,
run_label=None,
name='Mixed'):
self.features = features or ('x',)
self.n_features = len(self.features)
if stateful:
assert batch_size > 0
else:
batch_size = 1
self.stateful = stateful
self.batch_size = batch_size
# Should these be properties?
self.name = name
self.inputs = None
self.labels = None
self.one_hots = None
self.mixed_signal = None
self.n_timestamps = None
if window_size < 1:
self._window_size = None
self._n_samples = 1
else:
self._window_size = window_size
self._n_samples = None
self.window_type = window_type.lower()
assert self.window_type in ('sliding', 'boxcar', 'random')
self.network_type = network_type
assert self.network_type in ('MLP', 'RNN', 'LSTM', 'PLSTM', 'TCN')
self.sequence_type = sequence_type
self._sequence_code = None
assert self.sequence_type in ('one2one', 'one2many', 'many2one', 'many2many')
if 'time' in sigs_coeffs:
self.sequence_generator = timesequence_generator(**sigs_coeffs['time'])
mwave_indexes = []
n_mixed_waves = 0
mwave_idx = None
has_time = []
for i, coeffs in enumerate(sigs_coeffs):
if 'time' in coeffs:
has_time.append(i)
else:
mwave_indexes.append(i)
if 'name' in coeffs and coeffs['name'].lower() == 'mixed_wave':
n_mixed_waves += 1
mwave_idx = i
assert n_mixed_waves in (0, 1), print(f'only zero or one mixed waves allowed, found {n_mixed_waves}')
if n_mixed_waves:
assert mwave_idx in has_time, print('mixed-wave must have keyword, time.')
assert len(sigs_coeffs) > len(has_time)
assert len(mwave_indexes) > 1, print('Need more than one wave for a mixed-wave')
mwave_coeffs = sigs_coeffs.pop(mwave_idx)
mwave_indexes = [i if i < mwave_idx else i - 1 for i in mwave_indexes]
self.mixed_wave = MixedWave(classes=mwave_indexes, mwave_coeffs=mwave_coeffs)
else:
assert len(sigs_coeffs) == len(has_time)
self.mixed_wave = None
self.waves = [Wave(*self.features, label=i, **coeffs) for i, coeffs in enumerate(sigs_coeffs)]
self.n_classes = len(self.waves)
self.classification_type = 'binary' if self.n_classes == 1 else 'categorical'
run_label = run_label or get_datetime_now(fmt='%Y_%m%d_%H%M')
self.data_config = {
'run_label': run_label,
'window_size': self._window_size,
'window_type': self.window_type,
'network_type': self.network_type,
'sequence_type': self.sequence_type,
'sigs_coeffs': sigs_coeffs,
}
# TODO: What's the appropriate way to assign the out_dir (regular functionality, unit tests, etc.)
# TODO: Relative to the root directory of this project?
# TODO: Relative to the directory of the calling script?
# TODO: Relative to the directory of this module?
self.out_dir = os.path.join(os.getcwd(), 'out', run_label)
self.model_filename = os.path.join(self.out_dir, 'model.h5')
self.data_config_filename = os.path.join(self.out_dir, 'data_config.json')
self.model_weights_filename = os.path.join(self.out_dir, 'model_weights.h5')
self.training_stats_filename = os.path.join(self.out_dir, 'training_stats.csv')
def generate(self, sequence_code=None):
""" Generate waves from property values."""
if self.mixed_wave:
self.mixed_wave.generate()
# generate new waves.
timestamps = []
labels = []
inputs = []
for i, wave in enumerate(self.waves):
if self.mixed_wave and i in self.mixed_wave.classes:
indices = np.where(self.mixed_wave.labels == i)[0]
wave.generate(self.mixed_wave.timestamps, indices=indices, **self.mixed_wave.props)
else:
wave.generate()
timestamps.append(wave.timestamps)
labels.append(wave.labels)
inputs.append(wave.inputs)
timestamps = np.hstack(timestamps)
labels = np.hstack(labels)
inputs = np.vstack(inputs)
window_size = self.window_size or 1
# clip data from the left so that it divides batch_size evenly.
if self.window_type == 'boxcar':
assert len(timestamps) >= window_size * self.batch_size
chop_index = len(timestamps) % (window_size * self.batch_size)
else: # ('sliding' and 'random')
chop_index = (len(timestamps) - window_size + 1) % self.batch_size
# Sort the labels and mixed_signal chronologically.
sorted_indices = np.argsort(timestamps)[chop_index:]
self.timestamps = timestamps[sorted_indices]
self.labels = labels[sorted_indices]
self.inputs = inputs[sorted_indices]
self.n_timestamps = len(self.timestamps)
if self._window_size is None:
self.window_size = self.n_timestamps
# self.one_hots = create_one_hots_from_labels(self.labels, self.n_classes)
self.one_hots = one_hot_encode(self.labels, self.n_classes)
self.mixed_signal = self.inputs[..., 0]
# Sanity check
assert len(self.timestamps) == len(self.mixed_signal) == len(self.labels) == len(self.inputs) == len(self.one_hots)
# window_type, window_size
# sliding, ws < 0 -> raise ValueError('window_size must be non negative')
# boxcar , ws < 0 -> raise ValueError('window_size must be non negative')
# random , ws < 0 -> raise ValueError('window_size must be non negative')
# sliding, ws = 0 -> (sliding, n_timestamps)
# boxcar , ws = 0 -> (boxcar, n_timestamps)
# random , ws = 0 -> (random, n_timestamps)
# sliding, ws = 1 -> valid same as (boxcar, 1)
# boxcar , ws = 1 -> valid same as (sliding, 1)
# random , ws = 1 -> raise NotImplementedError
# sliding, 1 < ws < n_timestamps -> valid
# boxcar , 1 < ws < n_timestamps -> valid
# random , 1 < ws < n_timestamps -> valid
# sliding, n_timestamps = ws -> valid same as boxcar and random
# boxcar , n_timestamps = ws -> valid same as sliding and random
# random , n_timestamps = ws -> valid same as sliding and boxcar
# sliding, n_timestamps < ws -> raise ValueError('window_size must be <= n_timestamps')
# boxcar , n_timestamps < ws -> raise ValueError('window_size must be <= n_timestamps')
# random , n_timestamps < ws -> raise ValueError('window_size must be <= n_timestamps')
if self.window_type == 'sliding':
return self.generate_sliding(sequence_code=sequence_code)
elif self.window_type == 'boxcar':
return self.generate_boxcar()
elif self.window_type == 'random':
return self.generate_sliding(sequence_code=sequence_code)
else:
raise ValueError('Invalid window_type: {}. Use "sliding", "boxcar" or "random"')
@property
def n_samples(self):
if self._n_samples is None:
if self.n_timestamps is None: # then window_size will become n_timestamps. Delayed.
self._n_samples = None
else:
if self.window_type == 'boxcar':
assert self.n_timestamps % self.window_size == 0
self._n_samples = self.n_timestamps // self.window_size
else:
self._n_samples = self.n_timestamps - self.window_size + 1
return self._n_samples
@property
def window_size(self):
if self._window_size is None:
if self.n_timestamps is None:
self._window_size = None
else:
if self.window_type == 'boxcar':
assert self.n_timestamps % self.window_size == 0
self._window_size = self.n_timestamps // self.n_samples
else:
self._window_size = self.n_timestamps - self.n_samples + 1
return self._window_size
@window_size.setter
def window_size(self, val):
if val == 0:
val = self.n_timestamps
if self.window_type == 'boxcar' and self.n_timestamps % val != 0:
raise ValueError('window_size must divide n_timestamps evenly when using boxcar')
if val < 0 or val > self.n_timestamps:
raise ValueError('window_size must be in the range [0, n_timestamps]')
if 1 <= val <= self.n_timestamps:
self._window_size = val
self._n_samples = None
def in_out_shape_encoder(self, in_shape, out_shape):
# sequence_types
# t -> n_[t]imestamps
# x -> n_samples (or number of sub-samples)
# w -> [w]indow_size
# f -> n_[f]eatures
# c -> n_[c]lasses
# shape[0] -> number of sequences, number of samples
# shape[1] -> number of timesteps, sample length, sequence length, window_size
# shape[2] -> number of features (input), number of classes (output)
# (01tx, 01tw, 01fc)
# examples:
# t = 8
# x = 6 (sliding), x = 4 (boxcar)
# w = 3 (sliding), w = 2 (boxcar)
# f = 9
# c = 5
seq_bits = {
'1': 1,
't': self.n_timestamps,
'x': self.n_samples,
'w': self.window_size,
'f': self.n_features,
'c': self.n_classes,
}
ic = shape2string(in_shape, seq_bits)
oc = shape2string(out_shape, seq_bits)
in_out_code = '_'.join([ic, oc])
return in_out_code
def in_out_shape_decoder(self, in_out_code):
# one2one t00_t00 (8, ) (8, )
# one2one t01_t00 (8, 1) (8, )
# one2one t0f_t00 (8, 9) (8, )
# one2one t10_t00 (8 1, ) (8, )
# one2one t11_t00 (8, 1, 1) (8, )
# one2one t1f_t00 (8, 1, 9) (8, )
# one2one t00_t01 (8, ) (8, 1)
# one2one t01_t01 (8, 1) (8, 1)
# one2one t0f_t01 (8, 9) (8, 1)
# one2one t10_t01 (8 1, ) (8, 1)
# one2one t11_t01 (8, 1, 1) (8, 1)
# one2one t1f_t01 (8, 1, 9) (8, 1)
# one2one t00_t0c (8, ) (8, 5)
# one2one t01_t0c (8, 1) (8, 5)
# one2one t0f_t0c (8, 9) (8, 5)
# one2one t10_t0c (8 1, ) (8, 5)
# one2one t11_t0c (8, 1, 1) (8, 5)
# one2one t1f_t0c (8, 1, 9) (8, 5)
# one2one t00_t10 (8, ) (8, 1, )
# one2one t01_t10 (8, 1) (8, 1, )
# one2one t0f_t10 (8, 9) (8, 1, )
# one2one t10_t10 (8 1, ) (8, 1, )
# one2one t11_t10 (8, 1, 1) (8, 1, )
# one2one t1f_t10 (8, 1, 9) (8, 1, )
# one2one t00_t11 (8, ) (8, 1, 1)
# one2one t01_t11 (8, 1) (8, 1, 1)
# one2one t0f_t11 (8, 9) (8, 1, 1)
# one2one t10_t11 (8 1, ) (8, 1, 1)
# one2one t11_t11 (8, 1, 1) (8, 1, 1) 6
# one2one t1f_t11 (8, 1, 9) (8, 1, 1) 6
# one2one t00_t1c (8, ) (8, 1, 5)
# one2one t01_t1c (8, 1) (8, 1, 5)
# one2one t0f_t1c (8, 9) (8, 1, 5)
# one2one t10_t1c (8 1, ) (8, 1, 5)
# one2one t11_t1c (8, 1, 1) (8, 1, 5) 6
# one2one t1f_t1c (8, 1, 9) (8, 1, 5) 6
# many2one xw0_x00 (6, 3, ) (6, )
# many2one xw1_x00 (6, 3, 1) (6, )
# many2one xwf_x00 (6, 3, 9) (6, )
# many2one xw0_x01 (6, 3, ) (6, 1)
# many2one xw1_x01 (6, 3, 1) (6, 1)
# many2one xwf_x01 (6, 3, 9) (6, 1)
# many2one xw0_x0c (6, 3, ) (6, 5)
# many2one xw1_x0c (6, 3, 1) (6, 5)
# many2one xwf_x0c (6, 3, 9) (6, 5)
# many2one xw0_x10 (6, 3, ) (6, 1, )
# many2one xw1_x10 (6, 3, 1) (6, 1, )
# many2one xwf_x10 (6, 3, 9) (6, 1, )
# many2one xw0_x11 (6, 3, ) (6, 1, 1)
# many2one xw1_x11 (6, 3, 1) (6, 1, 1) 6
# many2one xwf_x11 (6, 3, 9) (6, 1, 1) 6
# many2one xw0_x1c (6, 3, ) (6, 1, 5)
# many2one xw1_x1c (6, 3, 1) (6, 1, 5) 6
# many2one xwf_x1c (6, 3, 9) (6, 1, 5) 6
# one2many x00_xw0 (6, ) (6, 3, )
# one2many x01_xw0 (6, 1) (6, 3, )
# one2many x0f_xw0 (6, 9) (6, 3, )
# one2many x00_xw1 (6, ) (6, 3, 1)
# one2many x01_xw1 (6, 1) (6, 3, 1)
# one2many x0f_xw1 (6, 9) (6, 3, 1)
# one2many x00_xwc (6, ) (6, 3, 5)
# one2many x01_xwc (6, 1) (6, 3, 5)
# one2many x0f_xwc (6, 9) (6, 3, 5)
# one2many x10_xw0 (6, 1, ) (6, 3, )
# one2many x11_xw0 (6, 1, 1) (6, 3, )
# one2many x1f_xw0 (6, 1, 9) (6, 3, )
# one2many x10_xw1 (6, 1, ) (6, 3, 1)
# one2many x11_xw1 (6, 1, 1) (6, 3, 1) 6
# one2many x1f_xw1 (6, 1, 9) (6, 3, 1) 6
# one2many x10_xwc (6, 1, ) (6, 3, 5)
# one2many x11_xwc (6, 1, 1) (6, 3, 5) 6
# one2many x1f_xwc (6, 1, 9) (6, 3, 5) 6
# many2many xw0_xw0 (6, 3 ) (6, 3, )
# many2many xw1_xw0 (6, 3, 1) (6, 3, )
# many2many xwf_xw0 (6, 3, 9) (6, 3, )
# many2many xw0_xw1 (6, 3 ) (6, 3, 1)
# many2many xw1_xw1 (6, 3, 1) (6, 3, 1) 6
# many2many xwf_xw1 (6, 3, 9) (6, 3, 1) 6
# many2many xw0_xwc (6, 3 ) (6, 3, 5)
# many2many xw1_xwc (6, 3, 1) (6, 3, 5) 6
# many2many xwf_xwc (6, 3, 9) (6, 3, 5) 6
# many2many 0t0_0t0 ( 8, ) ( 8, )
# many2many 0t1_0t0 ( 8, 1) ( 8, )
# many2many 0tf_0t0 ( 8, 9) ( 8, )
# many2many 1t0_0t0 (1, 8 ) ( 8, )
# many2many 1t1_0t0 (1, 8, 1) ( 8, )
# many2many 1tf_0t0 (1, 8, 9) ( 8, )
# many2many 0t0_0t1 ( 8, ) ( 8, 1)
# many2many 0t1_0t1 ( 8, 1) ( 8, 1)
# many2many 0tf_0t1 ( 8, 9) ( 8, 1)
# many2many 1t0_0t1 (1, 8 ) ( 8, 1)
# many2many 1t1_0t1 (1, 8, 1) ( 8, 1)
# many2many 1tf_0t1 (1, 8, 9) ( 8, 1)
# many2many 0t0_0tc ( 8, ) ( 8, 5)
# many2many 0t1_0tc ( 8, 1) ( 8, 5)
# many2many 0tf_0tc ( 8, 9) ( 8, 5)
# many2many 1t0_0tc (1, 8 ) ( 8, 5)
# many2many 1t1_0tc (1, 8, 1) ( 8, 5)
# many2many 1tf_0tc (1, 8, 9) ( 8, 5)
# many2many 0t0_1t0 ( 8, ) (1, 8, )
# many2many 0t1_1t0 ( 8, 1) (1, 8, )
# many2many 0tf_1t0 ( 8, 9) (1, 8, )
# many2many 1t0_1t0 (1, 8 ) (1, 8, )
# many2many 1t1_1t0 (1, 8, 1) (1, 8, )
# many2many 1tf_1t0 (1, 8, 9) (1, 8, )
# many2many 0t0_1t1 ( 8, ) (1, 8, 1)
# many2many 0t1_1t1 ( 8, 1) (1, 8, 1)
# many2many 0tf_1t1 ( 8, 9) (1, 8, 1)
# many2many 1t0_1t1 (1, 8 ) (1, 8, 1)
# many2many 1t1_1t1 (1, 8, 1) (1, 8, 1) 6
# many2many 1tf_1t1 (1, 8, 9) (1, 8, 1) 6
# many2many 0t0_1tc ( 8, ) (1, 8, 5)
# many2many 0t1_1tc ( 8, 1) (1, 8, 5)
# many2many 0tf_1tc ( 8, 9) (1, 8, 5)
# many2many 1t0_1tc (1, 8 ) (1, 8, 5)
# many2many 1t1_1tc (1, 8, 1) (1, 8, 5) 6
# many2many 1tf_1tc (1, 8, 9) (1, 8, 5) 6
seq_bits = {
'1': 1,
't': self.n_timestamps,
'x': self.n_samples,
'w': self.window_size,
'f': self.n_features,
'c': self.n_classes,
}
ic, oc = in_out_code.split('_')
in_shape = string2shape(ic, seq_bits)
out_shape = string2shape(oc, seq_bits)
return in_shape, out_shape
@property
def sequence_code(self):
# TODO: unit tests to make sure all these pathways are correct.
if self._sequence_code is None:
in_seq, out_seq = self.sequence_type.split('2')
# sequence_type
st = {'one': {'t0', 'x0'}, 'many': {'0t', 'xw'}}
if self.window_size in (0, None, self.n_timestamps):
ws = {'0t'}
elif self.window_size == 1:
ws = {'t0'}
else:
ws = {'x0', 'xw'}
in_base = st[in_seq] & ws
out_base = st[out_seq] & ws
if (in_seq == 'many' or out_seq == 'many') and self.window_size == 1:
raise ValueError('Only with one2one can you use a window_size == 1')
if self.n_features == 1:
n_feats = ('1',)
elif self.n_features >= 2:
n_feats = ('f',)
else:
raise ValueError('n_features cannot be negative or zero')
in_codes = [ib + nf for ib in in_base for nf in n_feats]
if self.n_classes == 1:
n_class = ('1',)
elif self.n_classes >= 2:
n_class = ('c',)
else:
raise ValueError('n_classes cannot be negative or zero')
out_codes = [ob + nc for ob in out_base for nc in n_class]
in_codes1 = list(set([ic.replace('0', '') for ic in in_codes]))
out_codes1 = list(set([oc.replace('0', '') for oc in out_codes]))
if len(in_codes1) == 0:
raise ValueError('unable to determine input shape')
if len(in_codes1) > 1:
raise ValueError('more than one possible input shape detected.')
if len(out_codes1) == 0:
raise ValueError('unable to determine output shape')
if len(out_codes1) > 1:
raise ValueError('more than one possible output shape detected.')
self._sequence_code = '_'.join([in_codes1[0], out_codes1[0]])
return self._sequence_code
def generate_sliding(self, sequence_code=None):
if self.sequence_type == 'many2one+time':
# PLSTM: many2one (1088, 100, 2) -> (1088, 3)
X = np.zeros((self.n_samples, self.window_size, 2))
for j in range(self.window_size):
X[:, j, 0] = self.mixed_signal[j:j + self.n_samples]
X[:, j, 1] = self.timestamps[j + self.n_samples - 1] - self.timestamps[j:j + self.n_samples]
y = self.one_hots[self.window_size - 1:]
return X, y
sequence_code = sequence_code or self.sequence_code
X_code, y_code = sequence_code.split('_')
if X_code in ('t1', 'tf'):
X = self.inputs
elif X_code in ('x1', 'xf'):
X = self.inputs[self.window_size - 1:]
elif X_code in ('xw1', 'xwf'):
X = np.zeros((self.n_samples, self.window_size, self.n_features))
for j in range(self.window_size):
X[:, j] = self.inputs[j:j + self.n_samples]
else:
raise NotImplementedError(X_code)
if y_code in ('t1',):
y = self.labels[..., None]
elif y_code in ('x1',):
y = self.labels[self.window_size - 1:, None]
elif y_code == 'xw1':
y = np.zeros((self.n_samples, self.window_size, 1))
for j in range(self.window_size):
y[:, j, 0] = self.labels[j:j + self.n_samples]
elif y_code in ('tc',):
y = self.one_hots
elif y_code in ('xc',):
y = self.one_hots[self.window_size - 1:]
elif y_code == 'xwc':
y = np.zeros((self.n_samples, self.window_size, self.n_classes))
for j in range(self.window_size):
y[:, j] = self.one_hots[j:j + self.n_samples]
else:
raise NotImplementedError(y_code)
return X, y
def generate_boxcar(self, sequence_code=None):
# TODO: Need unit tests to make sure all these pathways are correct.
# sequence_types
# t -> n_`t`imestamps
# x -> n_samples (e`x`amples, sub-samples, etc.)
# w -> `w`indow_size
# c -> n_`c`lasses
# f -> n_`f`eatures
# one2one t1_t1 (1088, 1) (1088, 1) <- binary outputs
# one2one t1_tc (1088, 1) (1088, 3)
# many2one xw_xc (1088, 100) (1088, 3)
# many2one xw1_xc (1088, 100, 1) (1088, 3)
# many2many xw_xwc (1088, 100) (1088, 100, 3)
# many2many xw1_xwc (1088, 100, 1) (1088, 100, 3)
sequence_code = sequence_code or self.sequence_code
X_code, y_code = sequence_code.split('_')
if X_code in ('t1',):
X = self.inputs
elif X_code in ('x1',):
X = self.inputs.reshape((self.n_samples, self.window_size, self.n_features))
X = X[:, -1, :]
X = X.reshape(self.n_samples, self.n_features)
elif X_code in ('xw1',):
X = self.inputs.reshape((self.n_samples, self.window_size, self.n_features))
elif X_code in ('tf',):
X = self.inputs
elif X_code in ('xf',):
X = self.inputs.reshape((self.n_samples, self.window_size, self.n_features))
X = X[:, -1, :]
X = X.reshape(self.n_samples, self.n_features)
elif X_code in ('xwf',):
X = self.inputs.reshape((self.n_samples, self.window_size, self.n_features))
else:
raise NotImplementedError(X_code)
if y_code in ('t1',):
y = self.labels[..., None]
elif y_code in ('x1',):
y = self.labels.reshape((self.n_samples, self.window_size, 1))
y = y[:, -1, :]
y = y.reshape(self.n_samples, 1)
elif y_code == 'xw1':
y = self.labels.reshape((self.n_samples, self.window_size, 1))
elif y_code in ('tc',):
y = self.one_hots
elif y_code in ('xc',):
y = self.one_hots.reshape((self.n_samples, self.window_size, self.n_classes))
y = y[:, -1, :]
y = y.reshape(self.n_samples, self.n_classes)
elif y_code == 'xwc':
y = self.one_hots.reshape((self.n_samples, self.window_size, self.n_classes))
else:
raise NotImplementedError(y_code)
return X, y
def generate_samples(self, n_samples, sequence_code=None):
# This is best suited for generating data where
# window_size == n_timestamps.
# and when using TCN's.
# The shape of Xi and yi should be,
# Xi.shape == (n_timestamps, n_features)
# yi.shape == (n_timestamps, n_classes)
# and the final output shape should be,
# X.shape == (n_samples, n_timestamps, n_features)
# y.shape == (n_samples, n_timestamps, n_classes)
if n_samples < 1:
raise ValueError('n_samples must be >= 1')
X = []
y = []
for i in range(n_samples):
Xi, yi = self.generate(sequence_code=sequence_code)
X.append(Xi)
y.append(yi)
return np.stack(X), np.stack(y)
def save_config(self):
os.makedirs(self.out_dir, exist_ok=True)
with open(self.data_config_filename, 'w') as ofs:
json.dump(self.data_config, ofs, indent=4)
class SignalGenerator(Sequence):
def __init__(self,
n_samples,
batch_size,
msig,
inout_shape_code='tf_tc'):
"""Initialization"""
self.n_samples = n_samples
self.batch_size = batch_size
self.inout_shape_code = inout_shape_code
in_shape, out_shape = msig.in_out_shape_decoder(inout_shape_code)
self.in_batch_shape = (batch_size,) + in_shape
self.out_batch_shape = (batch_size,) + out_shape
self.generate = msig.generate
self.indexes = np.arange(n_samples)
# self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
# return int(np.floor(self.n_samples / self.batch_size))
return self.n_samples // self.batch_size
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Generate and return the data
X, y = self._batch_generator(indexes)
return X, y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(self.n_samples)
def _batch_generator(self, indexes):
"""Generates data containing batch_size samples"""
# Initialization
X = np.empty(self.in_batch_shape)
y = np.empty(self.out_batch_shape, dtype=int)
# Generate data
for i, _ in enumerate(indexes):
X[i], y[i] = self.generate(sequence_code=self.inout_shape_code)
return X, y
| {
"alphanum_fraction": 0.523393802,
"author": null,
"avg_line_length": 38.7976539589,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3569d8ab8e1c1bb81bf7bb5cf91cb436de66222d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "718a9ebb599d64d698c98de58c993032b5937ae0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "WillieMaddox/DiscRimNN",
"max_forks_repo_path": "mixsig/mixed.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "718a9ebb599d64d698c98de58c993032b5937ae0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "WillieMaddox/DiscRimNN",
"max_issues_repo_path": "mixsig/mixed.py",
"max_line_length": 123,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "718a9ebb599d64d698c98de58c993032b5937ae0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "WillieMaddox/DiscRimNN",
"max_stars_repo_path": "mixsig/mixed.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8470,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 26460
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import os
import shutil
import tqdm
import numpy as np
from pycocotools.coco import COCO
from coco.label import coco91_labels
def coco91_to_yolo_id(yolo_names_file, coco_labels=coco91_labels()):
coco_names = np.array(coco_labels)
yolo_names = np.loadtxt(yolo_names_file, dtype="str", delimiter="\n", ndmin=1)
return yolo_names.tolist(), [list(coco_names[i] == yolo_names).index(True) if any(coco_names[i] == yolo_names) else None for i in range(coco_names.size)]
def coco2yolo(coco_img_dir, coco_ann_file, yolo_names_file, output_dir,
output_name, output_img_prefix=None):
path = os.path
output_txt = path.join(output_dir, output_name + ".txt")
output_img_dir = path.join(output_dir, output_name)
os.makedirs(output_img_dir, exist_ok=True)
output_txt_file = open(output_txt, "w")
cat_names, coco_to_yolo_id = coco91_to_yolo_id(yolo_names_file)
coco = COCO(coco_ann_file)
cat_ids = coco.getCatIds(catNms=cat_names)
for cat_id in cat_ids:
yolo_id = coco_to_yolo_id[cat_id-1]
img_ids = coco.getImgIds(catIds=cat_id)
print(f"\nCategory: {yolo_id}={cat_names[yolo_id]}, imgs: {len(img_ids)}")
cat_anns_n = 0
for img_id in tqdm.tqdm(img_ids):
img = coco.loadImgs(ids=img_id)
img_name = img[0]["file_name"]
image_width = img[0]["width"]
image_height = img[0]["height"]
output_txt_file.write(img_name if output_img_prefix is None \
else output_img_prefix + img_name)
output_txt_file.write(os.linesep)
img_src = path.join(coco_img_dir, img_name)
img_dst = path.join(output_img_dir, img_name)
img_dst_txt = path.splitext(img_dst)[0] + ".txt"
# print(f"{img_src}")
# print(f" {img_dst}")
# print(f" {img_dst_txt}")
shutil.copy(img_src, img_dst)
with open(img_dst_txt, "w") as txt:
ann_ids = coco.getAnnIds(imgIds=img_id, catIds=cat_id)
anns = coco.loadAnns(ann_ids)
cat_anns_n += len(anns)
for ann in anns:
x_top_left = ann["bbox"][0]
y_top_left = ann["bbox"][1]
bbox_width = ann["bbox"][2]
bbox_height = ann["bbox"][3]
x_center = x_top_left + bbox_width / 2
y_center = y_top_left + bbox_height / 2
# darknet annotation format
# <object-class> <x_center> <y_center> <width> <height>
a = x_center / image_width
b = y_center / image_height
c = bbox_width / image_width
d = bbox_height / image_height
print(f"{yolo_id} {a:.6f} {b:.6f} {c:.6f} {d:.6f}", file=txt)
print(f"Category: {yolo_id}={cat_names[yolo_id]}, anns: {cat_anns_n}, COMPLETED")
output_txt_file.close()
def _parse_args():
import argparse
parser = argparse.ArgumentParser(usage="python scripts/coco2yolo.py <options>")
parser.add_argument("--coco_img_dir", type=str,
default=f"{os.environ['HOME']}/Codes/devel/datasets/coco2017/train2017/",
help="coco image dir, default: %(default)s")
parser.add_argument("--coco_ann_file", type=str,
default=f"{os.environ['HOME']}/Codes/devel/datasets/coco2017/annotations/instances_train2017.json",
help="coco annotation file, default: %(default)s")
parser.add_argument("--yolo_names_file", type=str,
default="./cfg/coco/coco.names",
help="coco desired objects, default: %(default)s")
parser.add_argument("--output_dir", type=str,
default=f"{os.environ['HOME']}/yolov4/coco2017/",
help="output dir for yolo datasets, default: %(default)s")
parser.add_argument("--output_name", type=str,
default="train",
help="output name for img txt and dir, default: %(default)s")
parser.add_argument("--output_img_prefix", type=str,
help="output img prefix before img name, default: %(default)s")
args = parser.parse_args()
print("Args")
print(f" coco_img_dir: {args.coco_img_dir}")
print(f" coco_ann_file: {args.coco_ann_file}")
print(f" yolo_names_file: {args.yolo_names_file}")
print(f" output_dir: {args.output_dir}")
print(f" output_name: {args.output_name}")
print(f" output_img_prefix: {args.output_img_prefix}")
return args
if __name__ == "__main__":
args = _parse_args()
coco2yolo(args.coco_img_dir, args.coco_ann_file, args.yolo_names_file,
args.output_dir, args.output_name, args.output_img_prefix)
# https://gist.github.com/kaancolak/c66ba49540bbf075fbd46bd98100b544
# https://github.com/ultralytics/JSON2YOLO
| {
"alphanum_fraction": 0.6824592331,
"author": null,
"avg_line_length": 34.641221374,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "85597fd3d033f2c18b68228b69bffeae7277b536",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2022-03-29T13:54:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-24T02:36:39.000Z",
"max_forks_repo_head_hexsha": "a07a60fed551ae0cef774ab72550ad5e7bed6a29",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ikuokuo/start-yolov4",
"max_forks_repo_path": "scripts/coco2yolo.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a07a60fed551ae0cef774ab72550ad5e7bed6a29",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ikuokuo/start-yolov4",
"max_issues_repo_path": "scripts/coco2yolo.py",
"max_line_length": 155,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "a07a60fed551ae0cef774ab72550ad5e7bed6a29",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ikuokuo/start-yolov4",
"max_stars_repo_path": "scripts/coco2yolo.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-03T10:22:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-24T02:32:30.000Z",
"num_tokens": 1279,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4538
} |
using Yao
using QuDiffEq
using LinearAlgebra
using BitBasis
using Random
using Test
using YaoBlocks
using OrdinaryDiffEq
#Linear Diff Equation Unitary M
function f(du,u,p,t)
du[1] = -3*u[1]^2 + u[2]
du[2] = -u[2]^2 - u[1]*u[2]
end
@testset "QuNLDE_Test" begin
Random.seed!(4)
N = 2
k = 3
siz = nextpow(2, N + 1)
x = normalize!(rand(N))
A = zeros(ComplexF32,2^(siz),2^(siz))
A[1,1] = ComplexF32(1)
A[5,3] = ComplexF32(1)
A[5,6] = ComplexF32(-3)
A[9,11] = ComplexF32(-1)
A[9,7] = ComplexF32(-1)
tspan = (0.0,0.4)
qprob = QuLDEProblem(A, x, tspan)
r, N = func_transform(qprob.A, qprob.b, k)
out = N*vec(state(r))
r_out = zero(x)
f(r_out, x,1,1)
@test isapprox.(r_out, out[2:3]*sqrt(2), atol = 1e-3) |> all
prob = ODEProblem(f, x, tspan)
sol = solve(prob, Euler(), dt = 0.1, adaptive = false)
r_out = transpose(hcat(sol.u...))
out = solve(qprob, QuNLDE(3), dt = 0.1)
@test isapprox.(r_out,real(out), atol = 1e-3) |> all
end
| {
"alphanum_fraction": 0.5835777126,
"author": null,
"avg_line_length": 24.3571428571,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "62d3aeb321b262cf80c444cba7ecc0e664ee6f52",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2021-06-29T03:40:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-25T03:31:06.000Z",
"max_forks_repo_head_hexsha": "c79f27d80875c1f1536142598947749d0c6074ec",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "yezhengkai/QuDiffEq.jl",
"max_forks_repo_path": "test/QuNLDE_tests.jl",
"max_issues_count": 19,
"max_issues_repo_head_hexsha": "c79f27d80875c1f1536142598947749d0c6074ec",
"max_issues_repo_issues_event_max_datetime": "2021-06-29T06:33:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-05-30T01:19:42.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "yezhengkai/QuDiffEq.jl",
"max_issues_repo_path": "test/QuNLDE_tests.jl",
"max_line_length": 64,
"max_stars_count": 30,
"max_stars_repo_head_hexsha": "c79f27d80875c1f1536142598947749d0c6074ec",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "yezhengkai/QuDiffEq.jl",
"max_stars_repo_path": "test/QuNLDE_tests.jl",
"max_stars_repo_stars_event_max_datetime": "2021-12-16T04:30:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-25T11:34:11.000Z",
"num_tokens": 418,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1023
} |
SUBROUTINE MF_INTROE
c***********************************************************************
c This routine interpolates values of the elements of the magneto-
c ionic reflection matrix. Entry INIT OE determines the coefficients
c for the interpolation series if this kind of interpolation can be
c used. Otherwise, the parameter LENSET is set to be non-zero in the
c call to E MTRX indicating that the rectangle size is to be reduced
c or routine LAGRNG is to be used. Additional details are found in
c section V of NOSC TR 1143.
c Change History:
c 26 Oct 95 Changed to get the LOG unit from LWPC_LUN.CMN.
c*******************!***************************************************
implicit complex (a-h,o-z)
c LWPC parameters
include 'lwpc_lun.cmn'
parameter (mxboxes=200)
character*200 error_msg
real adjmsh,wdchng,
& unitr,boxlft,boxrht,
& uniti,boxtop,boxbot,
& omega,wavenr,
& thresh,rlnmax
common/mf_boxs/adjmsh,wdchng,
& unitr,nur(mxboxes),boxlft(mxboxes),boxrht(mxboxes),
& uniti,nui(mxboxes),boxtop(mxboxes),boxbot(mxboxes),
& nrboxs,kb
& /mf_mode/theta,c,s,csq,ssq,omega,wavenr,ideriv
& /mf_rntg/r(4),dr(4),drdh(4),drdcdh(4),
& roe(4),droe(4),rnd(5)
& /mf_roes/rlist(22,mxboxes)
& /mf_rseq/r0(4),drdt(4),d2rdt2(4),d3rdt3(4),
& zetap(2),gammap(2),theta0,extra
dimension rseq(22)
equivalence (rseq,r0)
data rlnmax/35./,nrlist/22/,thresh/23./
c Retrieve interpolation parameters.
call MF_XFER (rlist(1,kb),rseq,nrlist)
c Initialization for given value of THETA.
c=COS(theta*0.01745329252)
s=SIN(theta*0.01745329252)
csq=c**2
ssq=s**2
dcdt=-s*0.01745329252
dt=1./dcdt
c An interpolated value of each element of the magneto-ionic
c reflection matrix is found using a third order series for the
c log of each element.
delt=theta-theta0
do k=1,4
alnroe=((d3rdt3(k)*delt/6.+d2rdt2(k)*.5)*delt
& +drdt(k))*delt+r0(k)
if (REAL(alnroe) .gt. thresh)
& write(lwpcLOG_lun,
& '(''MF_INTROE: '',
& ''Log of ROE('',i1,'')='',2f8.3)')
& k,alnroe
if (REAL(alnroe) .gt. rlnmax) then
write(error_msg,
& '(''[MF_INTROE]: '',
& ''Interpolated ROE out of range'')')
call LWPC_ERROR ('ERROR',error_msg)
end if
roe(k)=EXP(alnroe)
end do
if (ideriv .eq. 1) then
do k=1,4
dlnrdt=(d3rdt3(k)*delt*.5+d2rdt2(k))*delt+drdt(k)
droe(k)=dlnrdt*roe(k)*dt
end do
end if
c The magneto-ionic reflection matrix is transformed to
c perpendicular/parallel form and then referred to the reference
c height D.
call MF_RMTRX
call MF_FSINTG
RETURN
END ! MF_INTROE
| {
"alphanum_fraction": 0.5386081983,
"author": null,
"avg_line_length": 31.47,
"converted": null,
"ext": "for",
"file": null,
"hexsha": "db4f8753b8b78723345c732a32700009d5b43462",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2021-10-01T03:31:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-06-22T01:48:48.000Z",
"max_forks_repo_head_hexsha": "6144eac3b1ac1322d0ee363ec689bf8123bdeebd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "spinorkit/LWPC",
"max_forks_repo_path": "LWPCv21/lib/mf_introe.for",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "6144eac3b1ac1322d0ee363ec689bf8123bdeebd",
"max_issues_repo_issues_event_max_datetime": "2021-07-12T11:01:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-07T10:52:53.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "spinorkit/LWPC",
"max_issues_repo_path": "LWPCv21/lib/mf_introe.for",
"max_line_length": 72,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "6144eac3b1ac1322d0ee363ec689bf8123bdeebd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "spinorkit/LWPC",
"max_stars_repo_path": "LWPCv21/lib/mf_introe.for",
"max_stars_repo_stars_event_max_datetime": "2021-10-01T03:29:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-17T17:13:28.000Z",
"num_tokens": 959,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3147
} |
# Import only
import pandas as pd
import pdb
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals import joblib
import seaborn as sns
from a_detection import AnomalyDetection
from feature_analysis import FeatureAnalysis
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import chi2, f_regression
from sklearn.linear_model import LassoLarsCV, Ridge, RidgeCV, LassoCV, Lasso, LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn import metrics
from skgarden import MondrianForestRegressor, ExtraTreesRegressor, ExtraTreesQuantileRegressor, RandomForestRegressor, RandomForestQuantileRegressor
from settings import OBJECT_TYPES, MODEL_FOLDER, IMAGE_FOLDER
import logging
import xgboost as xgb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def gen_subplots(fig, x, y):
for i in range(x*y):
ax = fig.add_subplot(x, y, i+1)
plt.grid()
yield ax
class SupervisedLearning(object):
def __init__(self, dataset, cleanup=True, drop_cols=['id', 'long', 'lat', 'price_brutto']):
if cleanup:
self.data = self.prepare_dataset(dataset, drop_cols)
else:
self.data = dataset
self.dv = DictVectorizer(sparse=True)
def prepare_dataset(self, dataset, drop_cols):
""" remove unwanted columns and clean up NaN values or insert
data in NaN values
"""
# Insert missing values
dataset.floor.fillna(0, inplace=True) # Add floor 0 if not present
dataset.num_rooms.fillna(dataset.num_rooms.median(), inplace=True)
# Drop colums we are not interessted
dataset = dataset.drop(drop_cols, axis=1)
return dataset.dropna() # Drop NaN Values
def plot_info(self, ads, save=False):
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4)
# We have to convert conaton_id and floor to integer to get the right order.
data = ads.copy()
data['canton_id'] = ads['canton_id'].astype(int)
data['floor'] = ads['floor'].astype(int)
# Canton_id
sns.barplot(x="canton_id", y="price_brutto_m2", data=data, ax=ax1)
# Num rooms
sns.barplot(x="num_rooms", y="price_brutto_m2", data=data, ax=ax2)
# Floor
sns.barplot(x="floor", y="price_brutto_m2", data=data, ax=ax3)
# Object type
g = sns.barplot(x="otype", y="price_brutto_m2", data=data, ax=ax4)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
if save:
plt.savefig("{}/Analytical_data.png".format(IMAGE_FOLDER))
plt.close()
sns.lmplot(x="avg_room_area", y="price_brutto_m2", data=data)
if save:
plt.savefig("{}/avg_room_area.png".format(IMAGE_FOLDER))
plt.close()
sns.lmplot(x="build_year", y="price_brutto_m2", data=data)
if save:
plt.savefig("{}/build_year.png".format(IMAGE_FOLDER))
plt.close()
#scatterplot
sns.set()
cols = ['price_brutto_m2', 'build_year', 'num_rooms', 'avg_room_area', 'last_construction']
sns.pairplot(data[cols], size = 2.5)
if save:
plt.savefig("{}/pairplot.png".format(IMAGE_FOLDER))
plt.close()
# Price distplot
sns.distplot(data['price_brutto_m2'], kde=True, bins=100, hist_kws={'alpha': 0.6})
if save:
plt.savefig("{}/Dist_Price_plot.png".format(IMAGE_FOLDER))
plt.close()
def one_hot(self, X, y):
# Prepare transformation from pandas dataframe to matrix
# One Hot Encoding for string
# Otype municiplaiy ogroup
# - - - - - - - - - - - - - - - -
print("Size of data {}".format(X.shape))
self.dv.fit(X.T.to_dict().values()) # Learn a list of feature name Important Price is present here
print("Len of features after one hot: {}".format(len(self.dv.feature_names_)))
# Transform feature -> value dicts to array or sparse matrix
X = self.dv.transform(X.T.to_dict().values())
y = y.values
return X, y
def generate_matrix(self, df, goal):
X = df.drop([goal], axis=1)
y = df[goal].astype(int)
return X, y
def generate_test_train(self, X, y):
return train_test_split(X, y, test_size=0.5)
def get_col_name(self, idx, show_id=False):
""" return the name of the feature from the dv
"""
if show_id:
return "{} ({})".format(self.dv.get_feature_names()[idx], idx)
return "{}".format(self.dv.get_feature_names()[idx])
def convert_to_df(self, X):
# Drop some features
# ads_with_out_anomaly
return pd.DataFrame(X.todense(), columns=self.dv.feature_names_)
def fit(self, X_train, y_train, X_test, y_test, models, store=True):
for name, model in models.items():
print("Fit model: {}".format(name))
# if name == 'lasso':
# pdb.set_trace()
# model.fit(X_train.todense(), y_train)
# else:
if store:
model.fit(X_train, y_train)
joblib.dump(model, '{}/{}.pkl'.format(MODEL_FOLDER, name))
else:
model = joblib.load('{}/{}.pkl'.format(MODEL_FOLDER, name))
print("MODEL: {} scores:".format(name))
print(model.score(X_train, y_train), model.score(X_test, y_test))
def main():
ads = pd.read_csv('all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads)
sl.plot_info(ads, save=True)
# Anomaly detection - Remove wired ads
# - - - - - - - - - - - - - - - - - -
anomaly_detection = AnomalyDetection(sl.data)
# Data for plot of anomaly detection 'floor',
features = [('floor', 0.03),
('build_year', 0.055),
('num_rooms', 0.03),
('avg_room_area', 0.03),
('last_construction', 0.045)]
meshgrid = {
'floor': np.meshgrid(np.linspace(-1, 40, 400), np.linspace(0, 60000, 1000)),
'build_year': np.meshgrid(np.linspace(0, 2025, 400), np.linspace(0, 60000, 1000)),
'num_rooms': np.meshgrid(np.linspace(-1, 100, 400), np.linspace(0, 60000, 1000)),
'avg_room_area': np.meshgrid(np.linspace(0, 500, 400), np.linspace(0, 60000, 1000)),
'last_construction': np.meshgrid(np.linspace(0, 800, 400), np.linspace(0, 60000, 1000))
}
# for feature in features:
# print("Analyse feature {}".format(feature))
# # Remove wired ads
# # fig, *ax = plt.subplots(11)
# # ax = ax[0]
# # ax[0].hist(np.sort(sl.data.avg_room_area))
# data_frames = []
# std = []
# std_percent = []
# data_frames.append(pd.DataFrame({feature: sl.data[feature].astype(int), 'percent': 0}))
# std.append(np.std(sl.data[feature].astype(int)))
# std_percent.append(0)
data = anomaly_detection.isolation_forest(features, meshgrid, 'price_brutto_m2', show_plot=True)
fig = plt.figure(figsize=(10, 15)) # länge x, länge y
subplots = gen_subplots(fig, 10, 2) # num y num x
for feature in features:
name = feature[0]
ax = next(subplots)
sns.distplot(sl.data[name].astype(int), hist=False, kde_kws={'cumulative': True }, ax=ax)
ax = next(subplots)
sns.distplot(sl.data[name].astype(int), ax=ax)
ax = next(subplots)
sns.distplot(data[name].astype(int), hist=False, kde_kws={'cumulative': True }, ax=ax)
ax = next(subplots)
sns.distplot(data[name].astype(int), ax=ax)
plt.savefig("{}/anomaly_detection_cleanup.png".format(IMAGE_FOLDER))
plt.close()
# for i in np.arange(0.5, 11, 0.5):
# data_frames.append(pd.DataFrame({feature: data[feature].astype(int), 'percent': i}))
# std.append(np.std(data[feature].astype(int)))
# std_percent.append((1-std[-1]/std[0])*100)
# # sl.data has the new values
# p_df = pd.concat(data_frames)
# g = sns.FacetGrid(p_df, row="percent")
# g.map(sns.distplot, feature)
# plt.savefig('{}/{}_dist.png'.format(IMAGE_FOLDER, feature))
# plt.close()
# sns.distplot(sl.data[feature].astype(int), hist=False, kde_kws={'cumulative': True })
# plt.savefig('{}/{}_cum.png'.format(IMAGE_FOLDER, feature))
# plt.close()
# fig, ax1 = plt.subplots()
# #ax1.plot(std)
# #ax1.set_ylabel('STD deviation')
# ax1.set_title('{}'.format(feature))
# ax1.plot(std_percent, list(np.arange(0, 11, 0.5)), c='r')
# ax1.set_ylabel('Reduction of STD deviation in %')
# ax1.set_xlabel('% removed of {}'.format(feature))
# plt.savefig('{}/{}_std.png'.format(IMAGE_FOLDER, feature))
# plt.close()
# Analyse features
#- - - - - - - - -
selected_features = set(['price_brutto_m2']) # Add y value
feature_analysis = FeatureAnalysis(data)
# set data
sl.data = data
# Make one hot coding
X, y = sl.generate_matrix(feature_analysis.data, 'price_brutto_m2')
X, y = sl.one_hot(X, y)
X_train, X_test, y_train, y_test = sl.generate_test_train(X, y)
df = sl.convert_to_df(X)
df['price_brutto_m2'] = y
feature_analysis.show_correlation(df, goal='price_brutto_m2', save=True)
# Print important features
# min_percent how many percent a feature should support
features = feature_analysis.tree_classifier(X_train, y_train,sl,
min_percent=0.01, store_model=True,
plot_feature=True)
for f in features:
selected_features.add(f[0])
print("{}".format(f))
# the smaller C the fewer features selected
features = feature_analysis.linear_svc(X_train, y_train, C=0.1, penalty='l1', dual=False, store=True)
print("Most {} important features in L1".format(len(features)))
for feature in features:
selected_features.add(sl.dv.get_feature_names()[feature])
print("Feature: {} ".format(sl.dv.get_feature_names()[feature]))
#model, features_chi2 = feature_analysis.select_KBest(sl, chi2, 40, name='chi2')
#for feature in features_chi2:
# print("Feature: {} {}".format(sl.dv.get_feature_names()[feature], model.scores_[feature]))
#model, features_chi2 = feature_analysis.select_KBest(sl, f_regression, 40, name='f_regression', True)
#for feature in features_chi2:
# print("Feature: {} {}".format(sl.dv.get_feature_names()[feature], model.scores_[feature]))
df = df[list(selected_features)]
df.to_csv('clean_all.csv', header=True, encoding='utf-8')
return
X, y = sl.generate_matrix(df, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
models = {'random forest': RandomForestRegressor(),
'ridge': Ridge(),
'lasso': LassoLarsCV(),
#'linearSVC_fit': LinearSVC(),
#'svc': SVC(),
'logistic': LogisticRegression(),
'gauss': GaussianNB()}
pdb.set_trace()
sl.fit(X_train, y_train, X_test, y_test, models)
def ape(y_true, y_pred):
return np.abs(y_true - y_pred) / y_true
def mape(y_true, y_pred):
return ape(y_true, y_pred).mean()
def mdape(y_true, y_pred):
return np.median(ape(y_true, y_pred))
def plot(y_test, y_pred, show=False, plot_name=""):
# sort both arrays by y_test
y_test, y_pred = zip(*sorted(zip(y_test, y_pred)))
y_test = np.asarray(y_test)
y_pred = np.asarray(y_pred)
markersize = 1
fig = plt.figure(figsize=(10, 10))
subplots = gen_subplots(fig, 3, 1)
ax = next(subplots)
ax.set_xlabel('Actual vs. Predicted values')
ax.plot(y_test, 'bo', markersize=markersize, label="Actual")
ax.plot(y_pred, 'ro', markersize=markersize, label="Predicted")
ax = next(subplots)
ax.set_xlabel('Predicted value')
ax.set_ylabel('Residuals')
ax.plot(y_pred, y_test - y_pred, 'bo', markersize=markersize)
ax.plot((np.min(y_pred),np.max(y_pred)), (0,0), 'r-')
if show:
plt.show()
else:
plt.savefig("{}/cumulative_prpability_{}.png".format(IMAGE_FOLDER, plot_name))
fig = plt.figure(figsize=(5, 5))
subplots = gen_subplots(fig, 1, 1)
ax = next(subplots)
ax.set_xlabel('APE')
my_ape = ape(y_test, y_pred)
ax.hist(my_ape, 100)
mape = ape(y_test, y_pred).mean()
ax.plot((mape, mape), (0,400), 'r-')
mdape = np.median(ape(y_test, y_pred))
ax.plot((mdape, mdape), (0,400), 'y-')
ax.set_xticklabels(['{:.2%}'.format(x) for x in ax.get_xticks()])
if show:
plt.show()
else:
plt.savefig("{}/verteilung_der_fehler_{}.png".format(IMAGE_FOLDER, plot_name))
def plot2(y_test, y_pred, show=False, plot_name=""):
# sort both array by y_test
#y_test, y_pred = zip(*sorted(zip([x for x in y_test], [x for x in y_pred])))
y_test = np.asarray(y_test)
y_pred = np.asarray(y_pred)
fig = plt.figure(figsize=(10, 10))
subplots = gen_subplots(fig, 3, 1)
ax = next(subplots)
ax.set_xlabel('Actual vs. Predicted values')
ax.plot(y_test, 'bo', markersize=1, label="Actual")
ax.plot(y_pred, 'ro', markersize=1, label="Predicted")
ax = next(subplots)
ax.set_xlabel('Residuals')
ax.plot(y_test - y_pred, 'bo', markersize=1)
ax.plot((0,len(y_test)), (0,0), 'r-')
if show:
plt.show()
else:
plt.savefig("{}/cumulative_prpability_{}.png".format(IMAGE_FOLDER, plot_name))
# - - - - - - -
fig = plt.figure(figsize=(5, 5))
subplots = gen_subplots(fig, 1, 1)
ax = next(subplots)
ax.set_xlabel('APE')
my_ape = ape(y_test, y_pred)
ax.hist(my_ape, 100)
m = ape(y_test, y_pred).mean()
ax.plot((m, m), (0,6000), 'r-')
mm = np.median(ape(y_test, y_pred))
ax.plot((mm, mm), (0,6000), 'y-')
ax.set_xticklabels(['{:.2%}'.format(x) for x in ax.get_xticks()])
if show:
plt.show()
else:
plt.savefig("{}/verteilung_der_fehler_{}.png".format(IMAGE_FOLDER, plot_name))
def statistics(y, pred):
diff = np.fabs(y - pred)
logger.info(" R²-Score: {:10n}".format(metrics.r2_score(y, pred)))
logger.info(" MAPE: {:.3%}".format(mape(y, pred)))
logger.info(" MdAPE: {:.3%}".format(mdape(y, pred)))
logger.info(" Min error: {:10n}".format(np.amin(diff)))
logger.info(" Max error: {:10n}".format(np.amax(diff)))
logger.info(" Mean absolute error: {:10n}".format(metrics.mean_absolute_error(y, pred)))
logger.info("Median absolute error: {:10n}".format(metrics.median_absolute_error(y, pred)))
logger.info(" Mean squared error: {:10n}".format(metrics.mean_squared_error(y, pred)))
num_elements = len(pred)
apes = ape(y, pred)
for i in np.arange(5, 100, 5):
logger.info("I {}: {}".format(i, len(np.where(apes < i/100)[0])/num_elements))
def randomForest():
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
#model = RandomForestRegressor()
model = RandomForestRegressor(n_estimators=800, max_features="auto",
n_jobs=4, min_samples_leaf=1,
max_depth=None)
print("FIT")
model.fit(X_train, y_train)
print("predict ")
y_predicted = model.predict(X_test)
statistics(y_test, y_predicted)
plot(y_test, y_predicted, show=False, plot_name="random_forest")
def adaBoost():
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
#bdt_discrete.fit(X_train, y_train)
y_real_predict = bdt_real.predict(X_test)
# y_discrete_predict = bdt_discrete.predict(X_test)
pdb.set_trace()
print("REAL MAPE: {:.3%}, MDAPE: {:.3%}".format(mape(y_test, y_real_predict), mdape(y_test, y_real_predict)))
plot(y_test, y_real_predict)
#print("DISCRETE: MAPE: {:.3%}, MDAPE: {:.3%}".format(mape(y_test, y_discrete_predict), mdape(y_test, #y_discrete_predict)))
#plot(y_test, y_discrete_predict)
def linearRegression():
"""
Doc about linear regression
Simple linear regression
"""
#import warnings
#warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
linreg = LinearRegression(normalize=True)
print("Fitting data")
linreg.fit(X_train, y_train)
print("Predicting Data")
y_pred = linreg.predict(X_test)
statistics(y_test, y_pred)
plot(y_test, y_pred, show=False, plot_name="simple_linear")
def ridge():
"""
Ridge Regression:
Ridge regression verwendet L2 regularization. Er fügt einen zusätzlichen Faktor
hinzu um Overfitting zu verhindern. Der Ridge algorithmus verwendet alle Features, deshalb
ist es wichtig, dass man vorher eine Feature analyse gemacht hat. Er bestitzt nicht die Eigenschaft
einzelne Features zu eliminieren.
Der Vorteil des Ridge Algorithmus ist, dass er die Koeffizienten schrumpft und somit auch die Komplexität
des model reduziert.
Kann auch it Matrizen umgehen die eine hohe Korrelation besitzen.
Formel: RSS + a *(Sum of square of coefficients)
a = 0: Normale lineare Regression und erhalten die gleichen koeffizienten
a = Inf: Die Koeffizienten werden alle 0 sein, weil die gewichte Unendlich sind durhch Inf * (sum of square)
0 < a < Inf: Die Magnitude von a ist das Gwicht
Je höher a ist, desto ungenauer wird die Prediction
"""
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
ridge = RidgeCV(alphas = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
logger.info("Try again for more precision with alphas centered around " + str(alpha))
ridge = RidgeCV(alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85,
alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,
alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4],
cv = 10)
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
logger.info("Best alpha: {}".format(alpha))
ridgereg = Ridge(alpha=alpha, normalize=True)
ridgereg.fit(X_train, y_train)
y_pred = ridgereg.predict(X_test)
statistics(y_test, y_pred)
plot(y_test, y_pred, show=False, plot_name="ridge")
def lasso():
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
lasso = LassoCV(alphas = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
logger.info("Try again for more precision with alphas centered around " + str(alpha))
lasso = LassoCV(alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85,
alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,
alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4],
cv = 10)
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
logger.info("Best alpha: {}".format(alpha))
lassoreg = Lasso(alpha=alpha, normalize=True, max_iter=1e5)
lassoreg.fit(X_train, y_train)
y_pred = lassoreg.predict(X_test)
statistics(y_test, y_pred)
plot(y_test, y_pred, show=False, plot_name="lasso")
def xgboost():
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
params = {"max_depth":100, "eta":0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
model.loc[:,["test-rmse-mean", "train-rmse-mean"]].plot()
plt.show()
model_xgb = xgb.XGBRegressor(n_estimators=350, max_depth=100, learning_rate=0.1) #the params were tuned using xgb.cv
model_xgb.fit(X_train, y_train)
y_pred = model_xgb.predict(X_test)
statistics(y_test, y_pred)
plot(y_test, y_pred, show=False, plot_name="xgboost")
def garden():
ads = pd.read_csv('clean_all.csv', index_col=0, engine='c', dtype=OBJECT_TYPES)
sl = SupervisedLearning(ads, cleanup=False)
X, y = sl.generate_matrix(ads, 'price_brutto_m2')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# ExtraTreesRegressor, RandomForestRegressor, RandomForestQuantileRegressor
mfr = MondrianForestRegressor(n_estimators=100)
mfr.fit(X_train, y_train)
y_pred = mfr.predict(X_test)
statistics(y_test, y_pred)
plot(y_test, y_pred, show=True, plot_name="MondrianForest")
def readCSV(file):
ads = pd.read_csv(file, index_col=0, engine='c', dtype=OBJECT_TYPES)
print("READED")
if __name__ == "__main__":
logger.setLevel(logging.INFO)
readCSV('all_transformed.csv')
# create a file handler
handler = logging.FileHandler('jupyter.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#main()
#feature()
# logger.info("="*20)
# logger.info("Running Linear Regression")
# linearRegression()
#logger.info("="*20)
#logger.info("Running Linear Regression with Ridge")
#ridge()
# logger.info("="*20)
# logger.info("Running Linear Regression with Lasso")
# lasso()
# logger.info("="*20)
# logger.info("Running Linear Regression with RandomForest")
# randomForest()
#xgboost()
#
#garden()
| {
"alphanum_fraction": 0.6327712056,
"author": null,
"avg_line_length": 38.2632423756,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3155d259ee5b87b790435d8cd2078a3ded9aeaea",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9b2cda72a7c5cfc7fb95596f629aebef9eaa2e98",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bhzunami/Immo",
"max_forks_repo_path": "immo/scikit/scripts/jupyter.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9b2cda72a7c5cfc7fb95596f629aebef9eaa2e98",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bhzunami/Immo",
"max_issues_repo_path": "immo/scikit/scripts/jupyter.py",
"max_line_length": 148,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "9b2cda72a7c5cfc7fb95596f629aebef9eaa2e98",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bhzunami/Immo",
"max_stars_repo_path": "immo/scikit/scripts/jupyter.py",
"max_stars_repo_stars_event_max_datetime": "2019-06-11T17:06:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-17T09:07:24.000Z",
"num_tokens": 6517,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 23838
} |
from __future__ import division
import mxnet as mx
import numpy as np
import cv2
__all__ = ['FaceGenderage', 'genderage_v1', 'get_genderage']
class FaceGenderage:
def __init__(self, name, download, param_file):
self.name = name
self.download = download
self.param_file = param_file
self.image_size = (112, 112)
if download:
assert param_file
def prepare(self, ctx_id):
if self.param_file:
pos = self.param_file.rfind('-')
prefix = self.param_file[0:pos]
pos2 = self.param_file.rfind('.')
epoch = int(self.param_file[pos + 1:pos2])
sym, arg_params, aux_params = mx.model.load_checkpoint(
prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
if ctx_id >= 0:
ctx = mx.gpu(ctx_id)
else:
ctx = mx.cpu()
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
data_shape = (1, 3) + self.image_size
model.bind(data_shapes=[('data', data_shape)])
model.set_params(arg_params, aux_params)
#warmup
data = mx.nd.zeros(shape=data_shape)
db = mx.io.DataBatch(data=(data, ))
model.forward(db, is_train=False)
embedding = model.get_outputs()[0].asnumpy()
self.model = model
else:
pass
def get(self, img):
assert self.param_file and self.model
assert img.shape[2] == 3 and img.shape[0:2] == self.image_size
data = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
data = np.transpose(data, (2, 0, 1))
data = np.expand_dims(data, axis=0)
data = mx.nd.array(data)
db = mx.io.DataBatch(data=(data, ))
self.model.forward(db, is_train=False)
ret = self.model.get_outputs()[0].asnumpy()
g = ret[:, 0:2].flatten()
gender = np.argmax(g)
a = ret[:, 2:202].reshape((100, 2))
a = np.argmax(a, axis=1)
age = int(sum(a))
return gender, age
def get_genderage(name, download=True, root='~/.insightface/models', **kwargs):
if not download:
return FaceGenderage(name, False, None)
else:
from .model_store import get_model_file
_file = get_model_file("genderage_%s" % name, root=root)
return FaceGenderage(name, True, _file)
def genderage_v1(**kwargs):
return get_genderage("v1", download=True, **kwargs)
| {
"alphanum_fraction": 0.5777426993,
"author": null,
"avg_line_length": 34.2432432432,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "69f56e12696dd8cbc87bc66538c933bbe1794142",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2019-09-11T06:10:39.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-08-29T06:41:19.000Z",
"max_forks_repo_head_hexsha": "ea172e4921c3960c0684404afff6d0d862447eae",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dwhite54/insightface",
"max_forks_repo_path": "python-package/insightface/model_zoo/face_genderage.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "ea172e4921c3960c0684404afff6d0d862447eae",
"max_issues_repo_issues_event_max_datetime": "2020-07-24T03:33:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-24T03:33:21.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dwhite54/insightface",
"max_issues_repo_path": "python-package/insightface/model_zoo/face_genderage.py",
"max_line_length": 79,
"max_stars_count": 18,
"max_stars_repo_head_hexsha": "ea172e4921c3960c0684404afff6d0d862447eae",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dwhite54/insightface",
"max_stars_repo_path": "python-package/insightface/model_zoo/face_genderage.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-12T01:37:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-04-13T02:55:03.000Z",
"num_tokens": 628,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2534
} |
import os
import numpy as np
import pytest
from napari_svg import napari_get_writer
from napari.layers import Image, Labels, Points, Shapes, Vectors
@pytest.fixture
def layer_data_and_types():
np.random.seed(0)
layers = [
Image(np.random.rand(20, 20)),
Labels(np.random.randint(10, size=(20, 2))),
Points(np.random.rand(20, 2)),
Shapes(np.random.rand(10, 2, 2)),
Vectors(np.random.rand(10, 2, 2)),
]
layer_data = [l.as_layer_data_tuple() for l in layers]
layer_types = [ld[2] for ld in layer_data]
return layer_data, layer_types
def test_get_writer(tmpdir, layer_data_and_types):
"""Test writing layers data."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file.svg')
writer = napari_get_writer(path, layer_types)
assert writer is not None
# Check file does not exist
assert not os.path.isfile(path)
# Write data
return_path = writer(path, layer_data)
assert return_path == path
# Check file now exists
assert os.path.isfile(path)
def test_get_writer_no_extension(tmpdir, layer_data_and_types):
"""Test writing layers data with no extension."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file')
writer = napari_get_writer(path, layer_types)
assert writer is not None
# Check file does not exist
assert not os.path.isfile(path)
# Write data
return_path = writer(path, layer_data)
assert return_path == path + '.svg'
# Check file now exists
assert os.path.isfile(path + '.svg')
def test_get_writer_bad_extension(tmpdir, layer_data_and_types):
"""Test not writing layers data with bad extension."""
layer_data, layer_types = layer_data_and_types
path = os.path.join(tmpdir, 'layers_file.csv')
writer = napari_get_writer(path, layer_types)
assert writer is None
# Check file does not exist
assert not os.path.isfile(path)
def test_get_writer_bad_layer_types(tmpdir):
"""Test not writing layers data with bad extension."""
layer_types = ['image', 'points', 'bad_type']
path = os.path.join(tmpdir, 'layers_file.svg')
writer = napari_get_writer(path, layer_types)
assert writer is None
# Check file does not exist
assert not os.path.isfile(path)
| {
"alphanum_fraction": 0.6821963394,
"author": null,
"avg_line_length": 26.4175824176,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f4f8ac48bcc18b69231760309200fe77cd389fd6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-01-25T16:39:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-19T21:47:37.000Z",
"max_forks_repo_head_hexsha": "0e222857c3a61befe1ea4b4e97fd48b285877d02",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "nclack/napari-svg",
"max_forks_repo_path": "napari_svg/_tests/test_get_writer.py",
"max_issues_count": 13,
"max_issues_repo_head_hexsha": "0e222857c3a61befe1ea4b4e97fd48b285877d02",
"max_issues_repo_issues_event_max_datetime": "2021-12-17T16:56:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-26T04:27:12.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "nclack/napari-svg",
"max_issues_repo_path": "napari_svg/_tests/test_get_writer.py",
"max_line_length": 64,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0e222857c3a61befe1ea4b4e97fd48b285877d02",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "nclack/napari-svg",
"max_stars_repo_path": "napari_svg/_tests/test_get_writer.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-13T12:20:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-13T12:20:00.000Z",
"num_tokens": 576,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2404
} |
/*=============================================================================
Copyright (c) 2009 Hartmut Kaiser
Copyright (c) 2014 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(SPIRIT_X3_BOOL_SEP_29_2009_0709AM)
#define SPIRIT_X3_BOOL_SEP_29_2009_0709AM
#include <boost/spirit/home/x3/core/parser.hpp>
#include <boost/spirit/home/x3/core/skip_over.hpp>
#include <boost/spirit/home/x3/numeric/bool_policies.hpp>
namespace boost { namespace spirit { namespace x3
{
template <typename T, typename Encoding, typename BoolPolicies = bool_policies<T>>
struct bool_parser : parser<bool_parser<T, Encoding, BoolPolicies>>
{
typedef Encoding encoding;
typedef T attribute_type;
static bool const has_attribute = true;
bool_parser()
: policies() {}
bool_parser(BoolPolicies const& policies)
: policies(policies) {}
template <typename Iterator, typename Context>
bool parse(Iterator& first, Iterator const& last
, Context const& context, unused_type, T& attr) const
{
x3::skip_over(first, last, context);
return policies.parse_true(first, last, attr, get_case_compare<encoding>(context))
|| policies.parse_false(first, last, attr, get_case_compare<encoding>(context));
}
template <typename Iterator, typename Context, typename Attribute>
bool parse(Iterator& first, Iterator const& last
, Context const& context, unused_type, Attribute& attr_param) const
{
// this case is called when Attribute is not T
T attr_;
if (parse(first, last, context, unused, attr_))
{
traits::move_to(attr_, attr_param);
return true;
}
return false;
}
BoolPolicies policies;
};
template <typename T, typename Encoding, typename BoolPolicies = bool_policies<T>>
struct literal_bool_parser : parser<bool_parser<T, Encoding, BoolPolicies>>
{
typedef Encoding encoding;
typedef T attribute_type;
static bool const has_attribute = true;
template <typename Value>
literal_bool_parser(Value const& n)
: policies(), n_(n) {}
template <typename Value>
literal_bool_parser(Value const& n, BoolPolicies const& policies)
: policies(policies), n_(n) {}
template <typename Iterator, typename Context>
bool parse_main(Iterator& first, Iterator const& last
, Context& context, T& attr) const
{
x3::skip_over(first, last, context);
return (n_ && policies.parse_true(first, last, attr, get_case_compare<encoding>(context)))
|| (!n_ && policies.parse_false(first, last, attr, get_case_compare<encoding>(context)));
}
template <typename Iterator, typename Context>
bool parse(Iterator& first, Iterator const& last
, Context& context, unused_type, T& attr) const
{
return parse_main(first, last, context, attr);
}
template <typename Iterator, typename Context, typename Attribute>
bool parse(Iterator& first, Iterator const& last
, Context const& context, unused_type, Attribute& attr_param) const
{
// this case is called when Attribute is not T
T attr_;
if (parse_main(first, last, context, attr_))
{
traits::move_to(attr_, attr_param);
return true;
}
return false;
}
BoolPolicies policies;
T n_;
};
namespace standard
{
typedef bool_parser<bool, char_encoding::standard> bool_type;
bool_type const bool_ = {};
typedef literal_bool_parser<bool, char_encoding::standard> true_type;
true_type const true_ = { true };
typedef literal_bool_parser<bool, char_encoding::standard> false_type;
false_type const false_ = { false };
}
#ifndef BOOST_SPIRIT_NO_STANDARD_WIDE
namespace standard_wide
{
typedef bool_parser<bool, char_encoding::standard_wide> bool_type;
bool_type const bool_ = {};
typedef literal_bool_parser<bool, char_encoding::standard_wide> true_type;
true_type const true_ = { true };
typedef literal_bool_parser<bool, char_encoding::standard_wide> false_type;
false_type const false_ = { false };
}
#endif
namespace ascii
{
typedef bool_parser<bool, char_encoding::ascii> bool_type;
bool_type const bool_ = {};
typedef literal_bool_parser<bool, char_encoding::ascii> true_type;
true_type const true_ = { true };
typedef literal_bool_parser<bool, char_encoding::ascii> false_type;
false_type const false_ = { false };
}
namespace iso8859_1
{
typedef bool_parser<bool, char_encoding::iso8859_1> bool_type;
bool_type const bool_ = {};
typedef literal_bool_parser<bool, char_encoding::iso8859_1> true_type;
true_type const true_ = { true };
typedef literal_bool_parser<bool, char_encoding::iso8859_1> false_type;
false_type const false_ = { false };
}
using standard::bool_;
using standard::true_;
using standard::false_;
}}}
#endif
| {
"alphanum_fraction": 0.6033674709,
"author": null,
"avg_line_length": 35.5617283951,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "ba4edf1ccb98e0b7c64a516a8bc52d561b42fcf4",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2019-07-13T13:36:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-28T01:38:22.000Z",
"max_forks_repo_head_hexsha": "5a69382ac9104d47383c1af0aaa0bc8a336c9744",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Mattlk13/innoextract-android",
"max_forks_repo_path": "Service/jni/boost/x86_64/include/boost-1_65_1/boost/spirit/home/x3/numeric/bool.hpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "5a69382ac9104d47383c1af0aaa0bc8a336c9744",
"max_issues_repo_issues_event_max_datetime": "2022-01-11T15:39:01.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-11T00:36:35.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Mattlk13/innoextract-android",
"max_issues_repo_path": "Service/jni/boost/x86_64/include/boost-1_65_1/boost/spirit/home/x3/numeric/bool.hpp",
"max_line_length": 106,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "5a69382ac9104d47383c1af0aaa0bc8a336c9744",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Mattlk13/innoextract-android",
"max_stars_repo_path": "Service/jni/boost/x86_64/include/boost-1_65_1/boost/spirit/home/x3/numeric/bool.hpp",
"max_stars_repo_stars_event_max_datetime": "2022-01-05T01:52:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-04-27T00:12:56.000Z",
"num_tokens": 1216,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5761
} |
# -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2017/4/30 15:48
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : optfit_0430.py
# @Software: PyCharm
from matplotlib.finance import _quotes_historical_yahoo
import numpy as np
import matplotlib.pyplot as plt
from scipy import fftpack
from scipy import signal
from matplotlib.dates import DateFormatter
from matplotlib.dates import DayLocator
from matplotlib.dates import MonthLocator
from scipy import optimize
#模拟正弦波
start = (2010, 7, 25)
end = (2011, 7, 25)
quotes = _quotes_historical_yahoo("QQQ", start, end)
quotes = np.array(quotes)
dates = quotes.T[0]
qqq = quotes.T[4]
y = signal.detrend(qqq)
alldays = DayLocator()
months = MonthLocator()
month_formatter = DateFormatter("%b %Y")
fig = plt.figure()
fig.subplots_adjust(hspace=.3)
ax = fig.add_subplot(211)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_formatter)
ax.tick_params(axis='both', which='major', labelsize='x-large')
amps = np.abs(fftpack.fftshift(fftpack.rfft(y)))
amps[amps < amps.max()] = 0
#正弦波模型
def residuals(p, y, x):
A,k,theta,b = p
err = y-A * np.sin(2* np.pi* k * x + theta) + b
return err
#滤波的信号变换回时域
filtered = -fftpack.irfft(fftpack.ifftshift(amps))
#猜测参数的值
N = len(qqq)
f = np.linspace(-N/2, N/2, N)
p0 = [filtered.max(), f[amps.argmax()]/(2*N), 0, 0]
print ("P0", p0)
plsq = optimize.leastsq(residuals, p0, args=(filtered, dates))
p = plsq[0]
print ("P", p)
plt.plot(dates, y, 'o', label="detrended")
plt.plot(dates, filtered, label="filtered")
plt.plot(dates, p[0] * np.sin(2 * np.pi * dates * p[1] + p[2]) + p[3], '^', label="fit")
fig.autofmt_xdate()
plt.legend(prop={'size':'x-large'})
ax2 = fig.add_subplot(212)
ax2.tick_params(axis='both', which='major', labelsize='x-large')
plt.plot(f, amps, label="transformed")
plt.legend(prop={'size':'x-large'})
plt.show()
| {
"alphanum_fraction": 0.6947477899,
"author": null,
"avg_line_length": 23.7407407407,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6eef6d0531f2e07224b71e2241dbd281f74df2a6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Jianyang-Hu/numpypractice",
"max_forks_repo_path": "optfit_0430.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Jianyang-Hu/numpypractice",
"max_issues_repo_path": "optfit_0430.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f4d4a3e28f5dd10f9722f83b1ac66f0f2ccef8b9",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Jianyang-Hu/numpypractice",
"max_stars_repo_path": "optfit_0430.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 643,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1923
} |
import math
import typing
import mesh_tensorflow as mtf
import numpy as np
import tensorflow as tf
from .backend import normal_var, orthogonal_var
from .. import tf_wrapper as tfw
from ..dataclass import BlockArgs, ModelParameter
from ..mtf_wrapper import einsum, reshape, multiply, zeros_like
from ..utils_core import random_name, scoped
from ..utils_mtf import DIM_LIST, SHAPE, linear_shapes, shape_size
def _multi_dim_range_tf(params: ModelParameter, dims: DIM_LIST) -> mtf.Tensor:
out, *items = [tfw.reshape(tfw.tf_range(0, dim.size * size, size),
[1] * idx + [dim.size] + [1] * (len(dims) - idx - 1))
for idx, (dim, size) in enumerate(zip(dims, np.cumprod([1] + [d.size for d in dims[:-1]])))]
for i in items:
out += i
return tfw.cast(out, params.variable_dtype.activation_dtype)
def squeeze(tensor: tf.Tensor, removed_dims: typing.List[int]):
shape = tensor.shape.as_list()
for i, d in enumerate(sorted(removed_dims)):
del shape[d - i]
return tf.reshape(tensor, shape)
def unsqueeze(tensor: tf.Tensor, added_dims: typing.List[int]):
shape = tensor.shape.as_list()
for d in sorted(added_dims):
shape.insert(d, 1)
return tf.reshape(tensor, shape)
class ScatterAdd(mtf.Operation):
"""Assign to one or more variables."""
def __init__(self, out: mtf.Tensor, indices: mtf.Tensor, gradient: mtf.Tensor,
squeeze_dims: typing.Optional[SHAPE]):
super().__init__([out, indices, gradient], out.mesh, random_name("sparse_assign"))
if isinstance(squeeze_dims, mtf.Shape):
squeeze_dims = squeeze_dims.dims
if squeeze_dims is None:
squeeze_dims = []
self.squeeze_dims = squeeze_dims
self.index_dims = [indices.shape.dims.index(dim) for dim in squeeze_dims if dim in indices.shape.dims]
self.embed_dims = [out.shape.dims.index(dim) for dim in squeeze_dims if dim in out.shape.dims]
self.grad_dims = [gradient.shape.dims.index(dim) for dim in squeeze_dims if dim in gradient.shape.dims]
self.indices = indices
self.grad = gradient
self._outputs = [mtf.Tensor(self, out.shape, out.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
flattened_dims = 0
def assign_fn(val: tf.Tensor, indices: tf.Tensor, gradient: tf.Tensor) -> tf.Tensor:
# Val: [1 (Heads), Keys, Features]
# Indices: [Batch, Sequence, 1 (Heads), 1 (Keys)]
# Gradient: [Batch, Sequence, 1 (Heads), Features]
shape = val.shape
val = squeeze(val, self.embed_dims)
indices = squeeze(indices, self.index_dims)
gradient = squeeze(gradient, self.grad_dims)
indices = tf.reshape(indices, indices.shape.as_list() + [1])
val = tf.reshape(val, val.shape.as_list()[:-flattened_dims] + [-1])
gradient = tf.cast(tf.reshape(gradient, gradient.shape.as_list()[:-flattened_dims] + [-1]), val.dtype)
return tf.reshape(tf.tensor_scatter_nd_add(val, indices, gradient), shape)
out, indices, gradients = self.inputs
for flattened_dims, (dim0, dim1) in enumerate(zip((out.shape - self.squeeze_dims).dims[::-1],
(gradients.shape - self.squeeze_dims).dims[::-1]), 0):
if dim0 != dim1:
break
flattened_dims = max(flattened_dims, 1)
y = mesh_impl.slicewise(assign_fn, lowering.tensors[out], lowering.tensors[indices],
lowering.tensors[gradients])
lowering.set_tensor_lowering(self.outputs[0], y)
def scatter_add(out: mtf.Tensor, indices: mtf.Tensor, gradient: mtf.Tensor, squeeze_dims: typing.Optional[SHAPE] = None
) -> mtf.Tensor:
return ScatterAdd(out, indices, gradient, squeeze_dims).outputs[0]
class Gather(mtf.Operation):
def __init__(self, args: BlockArgs, embedding: mtf.Tensor, squeeze_dims: typing.Optional[SHAPE]):
super().__init__([args.tensor, embedding], args.params.mesh, name=random_name("gather"))
if isinstance(squeeze_dims, mtf.Shape):
squeeze_dims = squeeze_dims.dims
if squeeze_dims is None:
squeeze_dims = []
self.squeeze_dims = squeeze_dims
self.squeezed_index_dims = [args.tensor.shape.dims.index(dim) for dim in squeeze_dims
if dim in args.tensor.shape.dims]
self.squeezed_embed_dims = [embedding.shape.dims.index(dim) for dim in squeeze_dims
if dim in embedding.shape.dims]
out_shape = args.tensor.shape - squeeze_dims + embedding.shape.dims[1:]
self.args = args
self.unsqueezed_dims = [out_shape.dims.index(dim) for dim in squeeze_dims if dim in out_shape.dims]
self._outputs = [mtf.Tensor(self, out_shape,
args.params.variable_dtype.activation_dtype)]
def gradient(self, grad_ys: typing.List[mtf.Tensor]) -> typing.Tuple[None, mtf.Tensor]:
indices, embedding = self.inputs
return None, scatter_add(zeros_like(embedding), indices, grad_ys[0], self.squeeze_dims)
def lower(self, lowering: mtf.Lowering):
mesh_impl: mtf.simd_mesh_impl.SimdMeshImpl = lowering.mesh_impl(self)
indices, embeddings = self.inputs
def slicewise_fn(idx: tf.Tensor, embd: tf.Tensor) -> tf.Tensor:
idx = squeeze(idx, self.squeezed_index_dims)
embd = squeeze(embd, self.squeezed_embed_dims)
out = tf.gather(embd, idx, axis=0)
return unsqueeze(out, self.unsqueezed_dims)
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[indices], lowering.tensors[embeddings])
lowering.set_tensor_lowering(self.outputs[0], y)
class RelativeEmbeddingForward(mtf.Operation):
def __init__(self, args: BlockArgs, shape: SHAPE):
super().__init__([], args.params.mesh, name=random_name("rel_embed"))
if isinstance(shape, list):
shape = mtf.Shape(shape)
self.args = args
self.shape = shape
self._outputs = [mtf.Tensor(self, shape, args.params.variable_dtype.activation_dtype)]
def has_gradient(self):
return False
def lower(self, lowering: mtf.Lowering):
mesh_impl: mtf.simd_mesh_impl.SimdMeshImpl = lowering.mesh_impl(self)
params = self.args.params
shape = self.shape
position_dims: SHAPE = (shape - params.feature_dims) - params.intermediate
feature_dims = linear_shapes(self.args).old
position_count = shape_size(position_dims)
cosine = 'cosine' in params.position_embedding
shape_formula = ''.join(chr(ord('a') + i) for i in range(shape.ndims))
position_formula = ''.join(shape_formula[shape.dims.index(d)] for d in position_dims)
feature_formula = ''.join(shape_formula[shape.dims.index(d)] for d in feature_dims)
positions = _multi_dim_range_tf(params, position_dims)
features = _multi_dim_range_tf(params, feature_dims)
additive = 0
feature_count = shape_size(feature_dims)
if cosine:
additive = tfw.mod(features, 2)
features = (features - additive) / 2
additive = additive * math.pi
feature_count /= 2
features += 4 / feature_count
features -= math.log(position_count / 2 / math.pi)
features = tfw.exp(features) + additive
out = tfw.einsum(f'{position_formula},{feature_formula}->{shape_formula}', positions, features)
out = multiply(tfw.sin(out), params.embedding_stddev)
lowering.set_tensor_lowering(self.outputs[0], mesh_impl.import_tf_tensor(self.outputs[0], out))
def _embed_var(args: BlockArgs, shape: SHAPE) -> mtf.Tensor:
if 'orthogonal' in args:
return orthogonal_var(args, shape)
return normal_var(args, shape, args.params.embedding_stddev)
def _embed(args: BlockArgs, shape: SHAPE) -> mtf.Tensor:
if isinstance(shape, (list, tuple)):
shape = mtf.Shape(shape)
variables = []
position_dims: mtf.Shape = (shape - args.params.feature_dims) - args.params.intermediate
feature_dims = linear_shapes(args).old
if 'absolute' in args:
out = _embed_var(args, shape)
elif 'axial' in args:
splits = 2
for a in args:
if a.isdigit():
splits = int(a)
break
tmp_dims = []
variables = []
def _new_part(size: int):
tmp = mtf.Dimension(f'_{len(tmp_dims)}', size)
tmp_dims.append(tmp)
variables.append(_embed_var(args, [tmp] + feature_dims))
for dim in position_dims:
base = int(dim.size ** (1 / splits))
while dim.size % base != 0:
base -= 1
final = dim.size // base ** (splits - 1)
_new_part(final)
for i in range(1, splits):
_new_part(base)
out = reshape(einsum(variables, output_shape=tmp_dims + feature_dims), shape)
elif 'relative' in args:
out = RelativeEmbeddingForward(args, shape).outputs[0]
if 'learned' in args:
out = multiply(out, _embed_var(args, feature_dims))
else:
raise ValueError("The following embeddings are supported:"
" relative(-learned) or absolute(-split) or axial(-split) are supported")
return out
def embed(args: BlockArgs, shape: SHAPE) -> mtf.Tensor:
return scoped('embed', _embed, args, shape)
def gather_embed(args: BlockArgs, shape: SHAPE, squeezed_dims: typing.Optional[SHAPE] = None) -> mtf.Tensor:
return Gather(args, scoped("gather", embed, args, shape), squeezed_dims).outputs[0]
| {
"alphanum_fraction": 0.6373693291,
"author": null,
"avg_line_length": 42.4698275862,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "754a793ce62ab078ccaa0bf890c93e8f36715305",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-14T15:50:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-14T15:44:59.000Z",
"max_forks_repo_head_hexsha": "e31f460616d8bc29931f069843e4f94b7f38e260",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "Ziems/OBST",
"max_forks_repo_path": "src/model/embedding.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e31f460616d8bc29931f069843e4f94b7f38e260",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "Ziems/OBST",
"max_issues_repo_path": "src/model/embedding.py",
"max_line_length": 119,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "e31f460616d8bc29931f069843e4f94b7f38e260",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "Ziems/OBST",
"max_stars_repo_path": "src/model/embedding.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-04T12:56:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-26T10:03:07.000Z",
"num_tokens": 2319,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9853
} |
import os
import sys
import shutil
import numpy as np
import time, datetime
import torch
import glob
import random
import logging
import argparse
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import joblib
import pdb
import pickle
from collections import defaultdict
from config import config
import copy
def broadcast(args, obj, src, group=torch.distributed.group.WORLD, async_op=False):
print('local_rank:{}, obj:{}'.format(args.local_rank, obj))
obj_tensor = torch.from_numpy(np.array(obj)).cuda()
torch.distributed.broadcast(obj_tensor, src, group, async_op)
obj = obj_tensor.cpu().numpy()
print('local_rank:{}, tensor:{}'.format(args.local_rank, obj))
return obj
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def get_optimizer_schedule(model, args, total_iters):
all_parameters = model.parameters()
weight_parameters = []
for pname, p in model.named_parameters():
if p.ndimension() == 4 or 'classifier.0.weight' in pname or 'classifier.0.bias' in pname:
weight_parameters.append(p)
weight_parameters_id = list(map(id, weight_parameters))
other_parameters = list(filter(lambda p: id(p) not in weight_parameters_id, all_parameters))
optimizer = torch.optim.SGD(
[{'params' : other_parameters},
{'params' : weight_parameters, 'weight_decay' : args.weight_decay}],
args.learning_rate,
momentum=args.momentum,
)
delta_iters = total_iters / (1.-args.min_lr / args.learning_rate)
print('delta_iters={}'.format(delta_iters))
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step : (1.0-step/delta_iters), last_epoch=-1)
return optimizer, scheduler
def get_location(s, key):
d = defaultdict(list)
for k,va in [(v,i) for i,v in enumerate(s)]:
d[k].append(va)
return d[key]
def list_substract(list1, list2):
list1 = [item for item in list1 if item not in set(list2)]
return list1
def check_cand(cand, operations):
cand = np.reshape(cand, [-1, config.edges])
offset, cell_cand = 0, cand[0]
for j in range(4):
edges = cell_cand[offset:offset+j+2]
edges_ops = operations[offset:offset+j+2]
none_idxs = get_location(edges, 0)
if len(none_idxs) < j:
general_idxs = list_substract(range(j+2), none_idxs)
num = min(j-len(none_idxs), len(general_idxs))
general_idxs = np.random.choice(general_idxs, size=num, replace=False, p=None)
for k in general_idxs:
edges[k] = 0
elif len(none_idxs) > j:
none_idxs = np.random.choice(none_idxs, size=len(none_idxs)-j, replace=False, p=None)
for k in none_idxs:
if len(edges_ops[k]) > 1:
l = np.random.randint(len(edges_ops[k])-1)
edges[k] = edges_ops[k][l+1]
offset += len(edges)
return cell_cand.tolist()
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def save_checkpoint(state, save):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpoint_epoch_{}.pth.tar'.format(state['epoch']+1))
torch.save(state, filename)
print('Save CheckPoint....')
def save(model, save, suffix):
torch.save(model.module.state_dict(), save)
shutil.copyfile(save, 'weight_{}.pt'.format(suffix))
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
script_path = os.path.join(path, 'scripts')
if scripts_to_save is not None and not os.path.exists(script_path):
os.mkdir(script_path)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def merge_ops(rngs):
cand = []
for rng in rngs:
for r in rng:
cand.append(r)
cand += [-1]
cand = cand[:-1]
return cand
def split_ops(cand):
cell, layer = 0, 0
cand_ = [[]]
for c in cand:
if c == -1:
cand_.append([])
layer += 1
else:
cand_[layer].append(c)
return cand_
def get_search_space_size(operations):
comb_num = 1
for j in range(len(operations)):
comb_num *= len(operations[j])
return comb_num
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
class Cifar10RandomLabels(dset.CIFAR10):
"""CIFAR10 dataset, with support for randomly corrupt labels.
Params
------
rand_seed: int
Default 0. numpy random seed.
num_classes: int
Default 10. The number of classes in the dataset.
"""
def __init__(self, rand_seed=0, num_classes=10, **kwargs):
super(Cifar10RandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
self.rand_seed = rand_seed
self.random_labels()
def random_labels(self):
labels = np.array(self.targets)
print('num_classes:{}, random labels num:{}, random seed:{}'.format(self.n_classes, len(labels), self.rand_seed))
np.random.seed(self.rand_seed)
rnd_labels = np.random.randint(0, self.n_classes, len(labels))
# we need to explicitly cast the labels from npy.int64 to
# builtin int type, otherwise pytorch will fail...
labels = [int(x) for x in rnd_labels]
self.targets = labels | {
"alphanum_fraction": 0.6633445509,
"author": null,
"avg_line_length": 31.406504065,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d59ef3a6a3c46bc77cdb98a982e4ec2181c1c26e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2021-10-18T02:41:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-28T00:04:20.000Z",
"max_forks_repo_head_hexsha": "a7e2ef9debcd06a93b075181a027b806b737b106",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "megvii-model/RLNAS",
"max_forks_repo_path": "darts_search_space/cifar10/rlnas/train_supernet/utils.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "a7e2ef9debcd06a93b075181a027b806b737b106",
"max_issues_repo_issues_event_max_datetime": "2022-02-05T10:15:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-09T05:14:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "megvii-model/RLNAS",
"max_issues_repo_path": "darts_search_space/cifar10/rlnas/train_supernet/utils.py",
"max_line_length": 117,
"max_stars_count": 17,
"max_stars_repo_head_hexsha": "a7e2ef9debcd06a93b075181a027b806b737b106",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "megvii-model/RLNAS",
"max_stars_repo_path": "darts_search_space/cifar10/rlnas/train_supernet/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-23T09:59:02.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-17T04:54:17.000Z",
"num_tokens": 2030,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7726
} |
theorem impNot {p q : Prop} : p → ¬ q ↔ ¬ (p ∧ q) :=
⟨ λ hpq h => hpq h.1 h.2, λ h hp hq => h <| And.intro hp hq ⟩
theorem Exists.impNot {p q : α → Prop} : (∃ x, p x → ¬ q x) ↔ ∃ x, ¬ (p x ∧ q x) := by
apply Iff.intro
intro h
cases h with | intro x hx =>
{ exact ⟨ x, λ hs => hx hs.1 hs.2 ⟩ }
intro h
cases h with | intro x hx =>
{ exact ⟨ x, λ hpx hqx => hx <| And.intro hpx hqx ⟩ }
namespace Classical
theorem contrapositive {p q : Prop} : (¬ q → ¬ p) → p → q :=
λ hqp hp => match em q with
| Or.inl h => h
| Or.inr h => False.elim <| hqp h hp
theorem notNot {p : Prop} : ¬ ¬ p ↔ p := by
apply Iff.intro
{ intro hp; cases em p with
| inl => assumption
| inr h => exact False.elim <| hp h }
{ exact λ hp hnp => False.elim <| hnp hp }
theorem notForall {p : α → Prop} : (¬ ∀ x, p x) → ∃ x, ¬ p x := by
{ apply contrapositive; intro hx; rw notNot; intro x;
cases em (p x); { assumption }
{ apply False.elim <| hx <| Exists.intro x _; assumption } }
theorem notAnd {p q : Prop} : p ∧ ¬ q ↔ ¬ (p → q) := by
apply Iff.intro
{ exact λ h himp => h.2 <| himp h.1 }
{ intro h; apply And.intro;
{ revert h; apply contrapositive; rw notNot;
exact λ hnp hp => False.elim <| hnp hp }
{ exact λ hq => h <| λ _ => hq } }
theorem Exists.notAnd {p q : α → Prop} :
(∃ x, p x ∧ ¬ q x) ↔ ∃ x, ¬ (p x → q x) := by
apply Iff.intro
{ intro h;
let ⟨ x, ⟨ hp, hnq ⟩ ⟩ := h;
exact Exists.intro x λ h => hnq <| h hp }
{ intro h;
let ⟨ x, hx ⟩ := h;
apply Exists.intro x;
apply And.intro;
{ revert hx; apply contrapositive;
exact λ hpx hpq => hpq λ hp => False.elim <| hpx hp }
{ intro foo;
apply hx;
intro bar;
assumption; } }
end Classical
def Set (α : Type u) := α → Prop
def setOf (p : α → Prop) : Set α := p
namespace Set
instance : EmptyCollection (Set α) := ⟨ λ x => False ⟩
variable {zzz : Type u}
variable {α : Type u}
variable {s : Set α}
def mem (a : α) (s : Set α) := s a
infix:55 "∈" => Set.mem
notation:55 x "∉" s => ¬ x ∈ s
instance : CoeSort (Set α) (Type u) where
coe s := Subtype s
theorem ext {s t : Set α} (h : ∀ x, x ∈ s ↔ x ∈ t) : s = t :=
funext <| λ x => propext <| h x
-- Declaring the index category
declare_syntax_cat index
syntax ident : index
syntax ident ":" term : index
syntax ident "∈" term : index
-- Notation for sets
syntax "{" index "|" term "}" : term
macro_rules
| `({ $x:ident : $t | $p }) => `(setOf (λ ($x:ident : $t) => $p))
| `({ $x:ident | $p }) => `(setOf (λ ($x:ident) => $p))
| `({ $x:ident ∈ $s | $p }) => `(setOf (λ $x => $x ∈ $s → $p))
def union (s t : Set α) : Set α := { x : α | x ∈ s ∨ x ∈ t }
def inter (s t : Set α) : Set α := { x : α | x ∈ s ∧ x ∈ t }
theorem unionDef (s t : Set α) : union s t = λ x => s x ∨ t x := rfl
theorem interDef (s t : Set α) : inter s t = λ x => s x ∧ t x := rfl
infix:60 "∪" => Set.union
infix:60 "∩" => Set.inter
def Union (s : Set (Set α)) : Set α := { x : α | ∃ t : Set α, t ∈ s → t x }
def Inter (s : Set (Set α)) : Set α := { x : α | ∀ t : Set α, t ∈ s → t x }
def UnionDef (s : Set (Set α)) : Union s = λ x => ∃ t : Set α, t ∈ s → t x := rfl
def InterDef (s : Set (Set α)) : Inter s = λ x => ∀ t : Set α, t ∈ s → t x := rfl
syntax "⋃" index "," term : term
syntax "⋂" index "," term : term
macro_rules
| `(⋃ $s:ident ∈ $c, $s) => `(Union $c)
| `(⋂ $s:ident ∈ $c, $s) => `(Inter $c)
-- variables {s : Set (Set α)}
-- #check ⋂ t ∈ s, t
-- Notation for ∀ x ∈ s, p and ∃ x ∈ s, p
syntax "∀" index "," term : term
syntax "∃" index "," term : term
macro_rules
| `(∀ $x:ident ∈ $s, $p) => `(∀ $x:ident, $x ∈ $s → $p)
| `(∃ $x:ident ∈ $s, $p) => `(∃ $x:ident, $x ∈ $s ∧ $p)
def Subset (s t : Set α) := ∀ x ∈ s, x ∈ t
infix:50 "⊆" => Subset
theorem Subset.def {s t : Set α} : s ⊆ t ↔ ∀ x ∈ s, x ∈ t := Iff.rfl
namespace Subset
theorem refl {s : Set α} : s ⊆ s := λ _ hx => hx
theorem trans {s t v : Set α} (hst : s ⊆ t) (htv : t ⊆ v) : s ⊆ t :=
λ x hx => hst x hx
theorem antisymm {s t : Set α} (hst : s ⊆ t) (hts : t ⊆ s) : s = t :=
Set.ext λ x => ⟨ λ hx => hst x hx, λ hx => hts x hx ⟩
theorem antisymmIff {s t : Set α} : s = t ↔ s ⊆ t ∧ t ⊆ s :=
⟨ by { intro hst; subst hst; exact ⟨ refl, refl ⟩ },
λ ⟨ hst, hts ⟩ => antisymm hst hts ⟩
-- ↓ Uses classical logic
theorem notSubset : ¬ s ⊆ t ↔ ∃ x ∈ s, x ∉ t := by
apply Iff.intro;
{ intro hst;
rw Classical.Exists.notAnd;
apply Classical.notForall;
exact λ h => hst λ x hx => h x hx }
{ intro h hst;
let ⟨ x, ⟨ hxs, hxt ⟩ ⟩ := h;
exact hxt <| hst x hxs }
end Subset
theorem memEmptySet {x : α} (h : x ∈ ∅) : False := h
@[simp] theorem memEmptySetIff : (∃ (x : α), x ∈ ∅) ↔ False :=
Iff.intro (λ h => h.2) False.elim
@[simp] theorem setOfFalse : { a : α | False } = ∅ := rfl
def univ : Set α := { x | True }
@[simp] theorem memUniv (x : α) : x ∈ univ := True.intro
theorem Subset.subsetUniv {s : Set α} : s ⊆ univ := λ x _ => memUniv x
theorem Subset.univSubsetIff {s : Set α} : univ ⊆ s ↔ univ = s := by
apply Iff.intro λ hs => Subset.antisymm hs Subset.subsetUniv
{ intro h; subst h; exact Subset.refl }
theorem eqUnivIff {s : Set α} : s = univ ↔ ∀ x, x ∈ s := by
apply Iff.intro
{ intro h x; subst h; exact memUniv x }
{ exact λ h => ext λ x => Iff.intro (λ _ => memUniv _) λ _ => h x }
/-! ### Unions and Intersections -/
macro "extia" x:term : tactic => `(tactic| apply ext; intro $x; apply Iff.intro)
theorem unionSelf {s : Set α} : s ∪ s = s := by
extia x
{ intro hx; cases hx; assumption; assumption }
{ exact Or.inl }
theorem unionEmpty {s : Set α} : s ∪ ∅ = s := by
extia x
{ intro hx; cases hx with
| inl => assumption
| inr h => exact False.elim <| memEmptySet h }
{ exact Or.inl }
theorem unionSymm {s t : Set α} : s ∪ t = t ∪ s := by
extia x
allGoals { intro hx; cases hx with
| inl hx => exact Or.inr hx
| inr hx => exact Or.inl hx }
theorem emptyUnion {s : Set α} : ∅ ∪ s = s := by
rw unionSymm; exact unionEmpty
theorem unionAssoc {s t w : Set α} : s ∪ t ∪ w = s ∪ (t ∪ w) := by
extia x
{ intro hx; cases hx with
| inr hx => exact Or.inr <| Or.inr hx
| inl hx => cases hx with
| inr hx => exact Or.inr <| Or.inl hx
| inl hx => exact Or.inl hx }
{ intro hx; cases hx with
| inl hx => exact Or.inl <| Or.inl hx
| inr hx => cases hx with
| inr hx => exact Or.inr hx
| inl hx => exact Or.inl <| Or.inr hx }
end Set | {
"alphanum_fraction": null,
"author": "kbuzzard",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/kbuzzard-lean4-filters/lean4-filters-29f90055b7a2341c86d924954463c439bd128fb7/other_peoples_work/jason_set.lean",
"reason": null,
"repo": "lean4-filters",
"save_path": "github-repos/lean/kbuzzard-lean4-filters",
"sha": "29f90055b7a2341c86d924954463c439bd128fb7",
"size": null
} |
# Possible operators to add if we use block object
# On(x,y) means block x is on top of block y
# OnTable(x) --- block x is on the table
# Clear(x) --- nothing is on top of block x
# Holding(x) --- robot arm is holding block x
# ArmEmpty() --- robot arm/hand is not holding anything (block in this world)
# How complex are these blocks going to get? If they're going to have more than one
# property (e.g. color and letter) then they need to be objects
#!/usr/bin/env python
#coding: utf-8
'''
Created on Feb 1, 2018
@author: cs540 team 1
'''
from __future__ import (print_function, absolute_import,
division, unicode_literals)
try:
from future_builtins import * # @UnusedWildImport
import codecs # python-2
open = codecs.open # @ReservedAssignment
input = raw_input # @ReservedAssignment
range = xrange # @ReservedAssignment
except ImportError:
pass # python-3
import numpy as np
np.set_printoptions(threshold=np.inf)
from datetime import datetime
import sys, logging, traceback
import random
import math
logging.basicConfig(filename='BlockWorldRun.log',level=logging.DEBUG)
Xsize = 101
Ysize = 51
Zsize = 101
class BlockWorld:
def __init__(self):
self.arr = np.zeros((Xsize, Ysize, Zsize), dtype=int)
self.dronePos = [None, None, None]
self.attached = None
self.maxY = 0
self.i2a = {1:str('red'), 2:str('green'), 3:str('blue'), \
4:str('yellow'), 9:str('drone'), 0:0}
self.a2i = {'red':1, 'green':2, 'blue':3, \
'yellow':4, 'drone':9, 0:0}
logging.debug('')
logging.debug('')
initString = 'Created block world object at ' + \
datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n'
logging.debug(initString)
def _validMove(self, newX, newY, newZ):
if max(abs(newX), newY, abs(newZ)) > 50:
raise ValueError('position out of bounds')
elif newY < 0 or (newY == 0 and self.attached):
raise ValueError('Y value too low')
elif self._rd(newX, newY, newZ) != 0:
raise ValueError('new drone position occupied')
elif self.attached and self._rd(newX, newY-1, newZ) != 0:
raise ValueError('new attached block position occupied')
return True
def _wr(self, x, y, z, val):
self.arr[x+Xsize // 2, y, z+Zsize // 2] = val
if val == self.a2i['drone']:
self.dronePos = [x, y, z]
def _rd(self, x, y, z):
return self.i2a[self.arr[x+Xsize // 2, y, z+Zsize // 2]]
def move(self, dx, dy, dz):
if max(abs(dx), abs(dy), abs(dz)) > 1:
raise ValueError('drone only moves to adjacent blocks')
origX, origY, origZ = origDronePos = self.dronePos
newX, newY, newZ = origX+dx, origY+dy, origZ+dz
if self._validMove(newX, newY, newZ):
if self.attached:
self._wr(origX, origY-1, origZ, 0) # clear old attach position
self._wr(origX, origY, origZ, 0) # clear old drone position
self._wr(newX, newY, newZ, self.a2i['drone']) # write new drone position
self.maxY = max(newY, self.maxY)
if self.attached: # write new attach position
self._wr(newX, newY-1, newZ, self.a2i[self.attached])
moveString = 'Drone with attachment ' + str(self.attached) + \
' moved from ' + str(origDronePos) + ' to ' + str(self.dronePos) + '\n'
else:
moveString = '**** INVALID MOVE ****'
logging.debug(moveString)
def attach(self):
x, y, z = self.dronePos
if self._rd(x, y-1, z) == 0:
raise ValueError('cannot attach to drone at ' + str(self.dronePos))
attachString = 'FAILED to Attach'
else:
self.attached = self._rd(x,y-1,z)
attachString = 'Attached to ' + str(self.attached) + '\n'
logging.debug(attachString)
def detach(self):
detachString = 'FAILED Detach'
x, y, z = self.dronePos
if self.attached is None:
raise ValueError('cannot detach if already detached')
for newY in range(y):
if self._rd(x, newY, z) == 0:
self._wr(x, y-1, z, 0)
self._wr(x, newY, z, self.a2i[self.attached])
detachString = 'detached into position' + str((x, newY, z)) + '\n'
self.attached = None
break
logging.debug(detachString)
def speak(self, strng='placeholder'):
speakString = 'Speak String: ' + strng
logging.debug(speakString)
return strng
def _verifyStates(self, blockStates):
droneCount = 0
for (x,y,z), block in blockStates.items():
if max(abs(x), y, abs(z)) > 50 or y < 0:
raise ValueError('invalid block position: ', (x,y,z))
if block == 'drone':
self.dronePos = [x,y,z]
droneCount += 1
elif block not in ['red', 'green', 'blue', 'yellow']:
raise ValueError('invalid block type: ', block)
elif y > 0:
if (x,y-1,z) not in blockStates:
raise ValueError('floating block not allowed')
elif blockStates[(x,y-1,z)] == 'drone':
raise ValueError('block on drone not allowed')
if droneCount != 1:
raise ValueError('only one drone allowed')
return True
def initialize(self, filename):
self.arr = np.zeros((Xsize, Ysize, Zsize), dtype=int)
blockStates = {}
with open(filename, 'r', encoding='utf-8') as infile:
for line in infile:
x,y,z,block = line.split(',')
coords = (int(x),int(y),int(z))
if coords in blockStates:
raise ValueError('duplicate position: ', coords)
blockStates[coords] = block.replace('\n','')
if self._verifyStates(blockStates):
for (x,y,z), block in blockStates.items():
self._wr(x, y, z, self.a2i[block])
if block != 'drone': # probably don't care where drone is for printouts
self.maxY = max(y, self.maxY)
initializeString = 'Initialized from file ' + str(filename) + '\n'
else:
initializeString = 'Failed to initalize' + '\n'
logging.debug(initializeString)
def state(self):
dic = {}
yDrone = self.dronePos[1]
yStart = max(yDrone, self.maxY)
for y in range(yStart,-1,-1):
for (x, z), val in np.ndenumerate(self.arr[:,y,:]):
if val > 0:
dic[(x - Xsize//2, y, z - Zsize//2)] = self.i2a[val]
logging.debug('generated State\n')
return dic
def _saveState(self, state, fileName='savedStates.txt'):
with open(fileName, 'w') as outfile:
for (x,y,z), block in state.items():
strng = str(x) + ',' + str(y) + ',' + str(z) + ',' + block + '\n'
outfile.write(strng)
saveString = 'saved State to file ' + str(fileName) + '\n'
print(saveString)
logging.debug(saveString)
outfile.close()
def printSingleLevel(self, levelNum):
print(self.__str__(levelNum, levelNum-1))
def __str__(self, start=None, end=None):
if start is None:
start = self.maxY
if end is None:
end = -1
s = ''
for level in range(start,end,-1):
s += '\n\nLevel ' + str(level) +':\n'
s += str(self.arr[:,level,:]).replace(' ', '').replace('\n', '') \
.replace(']', '\n').replace('0', '.').replace('[', '') \
.replace('1', 'R').replace('2','G').replace('3','B') \
.replace('4','Y').replace('9', 'D')
return s
# Z is across, top-left corner is -50,-50
if __name__ == '__main__':
try:
bw = BlockWorld()
#bw.createInitialStates()
#bw._saveState(bw.state())
#print("Saved states")
bw.initialize('config1.txt')
bw.move(0, 1, 0)
bw.move(0, 1, 1)
bw.attach()
bw.move(0, 0, 1)
bw.move(0, 0, 1)
bw.move(0, 0, 1)
bw.move(1, 0, 1)
bw.detach()
bw._saveState(bw.state())
bw.speak('testing speak')
bw.move(1, 1, 1)
#print(bw)
#bw.printSingleLevel(0)
except Exception as e:
logging.error(traceback.format_exc())
sys.exit()
else:
print('\n ****** finished running without errors')
| {
"alphanum_fraction": 0.5225634771,
"author": null,
"avg_line_length": 38.0548523207,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "19b423c27a796bcbd06dd408fd0763b96cd234df",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2a1dda69681b4f9f380f9921831f5a6ae3e1dddf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "suttondp/CS530",
"max_forks_repo_path": "BlockWorldSim.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2a1dda69681b4f9f380f9921831f5a6ae3e1dddf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "suttondp/CS530",
"max_issues_repo_path": "BlockWorldSim.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2a1dda69681b4f9f380f9921831f5a6ae3e1dddf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "suttondp/CS530",
"max_stars_repo_path": "BlockWorldSim.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2334,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9019
} |
[STATEMENT]
lemma quotient_map_imp_continuous_closed:
assumes T: "f ` S \<subseteq> T"
and ope: "\<And>U. U \<subseteq> T
\<Longrightarrow> (closedin (top_of_set S) (S \<inter> f -` U) \<longleftrightarrow>
closedin (top_of_set T) U)"
shows "continuous_on S f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on S f
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. continuous_on S f
[PROOF STEP]
have [simp]: "S \<inter> f -` f ` S = S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S \<inter> f -` f ` S = S
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
S \<inter> f -` f ` S = S
goal (1 subgoal):
1. continuous_on S f
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on S f
[PROOF STEP]
by (meson T closedin_imp_subset continuous_on_closed_gen ope)
[PROOF STATE]
proof (state)
this:
continuous_on S f
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 6,
"llama_tokens": 407,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
[STATEMENT]
lemma infer_v_g_weakening:
fixes e::e and \<Gamma>'::\<Gamma> and v::v
assumes "\<Theta>; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> \<tau>" and "toSet \<Gamma> \<subseteq> toSet \<Gamma>'" and "\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'"
shows "\<Theta>; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> \<tau>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> \<tau>
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> \<tau>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> \<tau>
[PROOF STEP]
proof(nominal_induct avoiding: \<Gamma>' rule: infer_v.strong_induct)
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>\<Theta> \<B> \<Gamma> b c x z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; Some (b, c) = lookup \<Gamma> x; atom z \<sharp> x; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ x ]\<^sup>v \<Rightarrow> \<lbrace> z : b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ x ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>\<Theta> \<B> \<Gamma> l \<tau> \<Gamma>'. \<lbrakk> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; \<turnstile> l \<Rightarrow> \<tau>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
3. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
4. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
5. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
case (infer_v_varI \<Theta> \<B> \<Gamma> b c x' z)
[PROOF STATE]
proof (state)
this:
atom z \<sharp> \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
Some (b, c) = lookup \<Gamma> x'
atom z \<sharp> x'
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (5 subgoals):
1. \<And>\<Theta> \<B> \<Gamma> b c x z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; Some (b, c) = lookup \<Gamma> x; atom z \<sharp> x; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ x ]\<^sup>v \<Rightarrow> \<lbrace> z : b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ x ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>\<Theta> \<B> \<Gamma> l \<tau> \<Gamma>'. \<lbrakk> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; \<turnstile> l \<Rightarrow> \<tau>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
3. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
4. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
5. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ x' ]\<^sup>v \<Rightarrow> \<lbrace> z : b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ x' ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
2. Some (b, ?c) = lookup \<Gamma>' x'
3. atom z \<sharp> x'
4. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
[PROOF STEP]
using infer_v_varI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
Some (b, c) = lookup \<Gamma> x'
atom z \<sharp> x'
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (3 subgoals):
1. Some (b, ?c) = lookup \<Gamma>' x'
2. atom z \<sharp> x'
3. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>Some (b, c) = lookup \<Gamma>' x'\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Some (b, c) = lookup \<Gamma>' x'
[PROOF STEP]
using infer_v_varI lookup_weakening
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
Some (b, c) = lookup \<Gamma> x'
atom z \<sharp> x'
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
\<lbrakk>Some (?b, ?c) = lookup ?\<Gamma> ?x; toSet ?\<Gamma> \<subseteq> toSet ?\<Gamma>'; ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma>' ; ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> \<rbrakk> \<Longrightarrow> Some (?b, ?c) = lookup ?\<Gamma>' ?x
goal (1 subgoal):
1. Some (b, c) = lookup \<Gamma>' x'
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
Some (b, c) = lookup \<Gamma>' x'
goal (2 subgoals):
1. atom z \<sharp> x'
2. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>atom z \<sharp> x'\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> x'
[PROOF STEP]
using infer_v_varI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
Some (b, c) = lookup \<Gamma> x'
atom z \<sharp> x'
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom z \<sharp> x'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom z \<sharp> x'
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
using infer_v_varI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
Some (b, c) = lookup \<Gamma> x'
atom z \<sharp> x'
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ x' ]\<^sup>v \<Rightarrow> \<lbrace> z : b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ x' ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (4 subgoals):
1. \<And>\<Theta> \<B> \<Gamma> l \<tau> \<Gamma>'. \<lbrakk> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; \<turnstile> l \<Rightarrow> \<tau>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
2. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
4. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>\<Theta> \<B> \<Gamma> l \<tau> \<Gamma>'. \<lbrakk> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; \<turnstile> l \<Rightarrow> \<tau>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
2. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
4. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
case (infer_v_litI \<Theta> \<B> \<Gamma> l \<tau>)
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
\<turnstile> l \<Rightarrow> \<tau>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (4 subgoals):
1. \<And>\<Theta> \<B> \<Gamma> l \<tau> \<Gamma>'. \<lbrakk> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma> ; \<turnstile> l \<Rightarrow> \<tau>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
2. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
4. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
\<turnstile> l \<Rightarrow> \<tau>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
\<turnstile> l \<Rightarrow> \<tau>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
[PROOF STEP]
using infer_v.intros
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>
\<turnstile> l \<Rightarrow> \<tau>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
\<lbrakk> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> ; Some (?b, ?c) = lookup ?\<Gamma> ?x; atom ?z \<sharp> ?x; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>)\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?x ]\<^sup>v \<Rightarrow> \<lbrace> ?z : ?b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ ?x ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> ; \<turnstile> ?l \<Rightarrow> ?\<tau>\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?l ]\<^sup>v \<Rightarrow> ?\<tau>
\<lbrakk>atom ?z \<sharp> (?v1.0, ?v2.0); atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>); ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v1.0 \<Rightarrow> ?t1.0; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v2.0 \<Rightarrow> ?t2.0\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?v1.0 , ?v2.0 ]\<^sup>v \<Rightarrow> \<lbrace> ?z : [ b_of ?t1.0 , b_of ?t2.0 ]\<^sup>b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ ?v1.0 , ?v2.0 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk>AF_typedef ?s ?dclist \<in> set ?\<Theta>; (?dc, ?tc) \<in> set ?dclist; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v \<Rightarrow> ?tv; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?tv \<lesssim> ?tc; atom ?z \<sharp> ?v; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>)\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> V_cons ?s ?dc ?v \<Rightarrow> \<lbrace> ?z : B_id ?s | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons ?s ?dc ?v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk>AF_typedef_poly ?s ?bv ?dclist \<in> set ?\<Theta>; (?dc, ?tc) \<in> set ?dclist; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v \<Rightarrow> ?tv; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?tv \<lesssim> ?tc[?bv::=?b]\<^sub>\<tau>\<^sub>b; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>, ?v, ?b); atom ?bv \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>, ?v, ?b); ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> V_consp ?s ?dc ?b ?v \<Rightarrow> \<lbrace> ?z : B_app ?s ?b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp ?s ?dc ?b ?v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ l ]\<^sup>v \<Rightarrow> \<tau>
goal (3 subgoals):
1. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
case (infer_v_pairI z v1 v2 \<Theta> \<B> \<Gamma> t1 t2)
[PROOF STATE]
proof (state)
this:
atom z \<sharp> \<Gamma>'
atom z \<sharp> v1
atom z \<sharp> v2
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v1 \<Rightarrow> t1
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v2 \<Rightarrow> t2
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (3 subgoals):
1. \<And>z v1 v2 \<Theta> \<B> \<Gamma> t1 t2 \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; atom z \<sharp> v1; atom z \<sharp> v2; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v1 \<Rightarrow> t1; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v2 \<Rightarrow> t2; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
3. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
atom z \<sharp> \<Gamma>'
atom z \<sharp> v1
atom z \<sharp> v2
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v1 \<Rightarrow> t1
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v2 \<Rightarrow> t2
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
atom z \<sharp> v1
atom z \<sharp> v2
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v1 \<Rightarrow> t1
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v2 \<Rightarrow> t2
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
using infer_v.intros
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
atom z \<sharp> v1
atom z \<sharp> v2
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v1 \<Rightarrow> t1
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v1 \<Rightarrow> t1
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v2 \<Rightarrow> t2
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v2 \<Rightarrow> t2
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
\<lbrakk> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> ; Some (?b, ?c) = lookup ?\<Gamma> ?x; atom ?z \<sharp> ?x; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>)\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?x ]\<^sup>v \<Rightarrow> \<lbrace> ?z : ?b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ ?x ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk> ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma> ; \<turnstile> ?l \<Rightarrow> ?\<tau>\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?l ]\<^sup>v \<Rightarrow> ?\<tau>
\<lbrakk>atom ?z \<sharp> (?v1.0, ?v2.0); atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>); ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v1.0 \<Rightarrow> ?t1.0; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v2.0 \<Rightarrow> ?t2.0\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?v1.0 , ?v2.0 ]\<^sup>v \<Rightarrow> \<lbrace> ?z : [ b_of ?t1.0 , b_of ?t2.0 ]\<^sup>b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ ?v1.0 , ?v2.0 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk>AF_typedef ?s ?dclist \<in> set ?\<Theta>; (?dc, ?tc) \<in> set ?dclist; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v \<Rightarrow> ?tv; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?tv \<lesssim> ?tc; atom ?z \<sharp> ?v; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>)\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> V_cons ?s ?dc ?v \<Rightarrow> \<lbrace> ?z : B_id ?s | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons ?s ?dc ?v ]\<^sup>c\<^sup>e \<rbrace>
\<lbrakk>AF_typedef_poly ?s ?bv ?dclist \<in> set ?\<Theta>; (?dc, ?tc) \<in> set ?dclist; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v \<Rightarrow> ?tv; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?tv \<lesssim> ?tc[?bv::=?b]\<^sub>\<tau>\<^sub>b; atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>, ?v, ?b); atom ?bv \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>, ?v, ?b); ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> V_consp ?s ?dc ?b ?v \<Rightarrow> \<lbrace> ?z : B_app ?s ?b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp ?s ?dc ?b ?v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> [ v1 , v2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of t1 , b_of t2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v1 , v2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (2 subgoals):
1. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
case (infer_v_consI s dclist \<Theta> dc tc \<B> \<Gamma> v tv z)
[PROOF STATE]
proof (state)
this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (2 subgoals):
1. \<And>s dclist \<Theta> dc tc \<B> \<Gamma> v tv z \<Gamma>'. \<lbrakk>atom z \<sharp> \<Gamma>'; AF_typedef s dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc; atom z \<sharp> v; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
2. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. AF_typedef s ?dclist \<in> set \<Theta>
2. (dc, ?tc) \<in> set ?dclist
3. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
4. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> ?tc
5. atom z \<sharp> v
6. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>AF_typedef s dclist \<in> set \<Theta>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. AF_typedef s dclist \<in> set \<Theta>
[PROOF STEP]
using infer_v_consI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. AF_typedef s dclist \<in> set \<Theta>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
AF_typedef s dclist \<in> set \<Theta>
goal (5 subgoals):
1. (dc, ?tc) \<in> set dclist
2. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
3. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> ?tc
4. atom z \<sharp> v
5. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>(dc, tc) \<in> set dclist\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (dc, tc) \<in> set dclist
[PROOF STEP]
using infer_v_consI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. (dc, tc) \<in> set dclist
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(dc, tc) \<in> set dclist
goal (4 subgoals):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
2. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> tc
3. atom z \<sharp> v
4. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open> \<Theta>; \<B>; \<Gamma>' \<turnstile> v \<Rightarrow> tv\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
[PROOF STEP]
using infer_v_consI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
goal (3 subgoals):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc
2. atom z \<sharp> v
3. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>\<Theta>; \<B>; \<Gamma>' \<turnstile> tv \<lesssim> tc\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc
[PROOF STEP]
using infer_v_consI subtype_weakening
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
\<lbrakk>?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?\<tau>1.0 \<lesssim> ?\<tau>2.0; toSet ?\<Gamma> \<subseteq> toSet ?\<Gamma>'; ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma>' \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma>' \<turnstile> ?\<tau>1.0 \<lesssim> ?\<tau>2.0
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc
goal (2 subgoals):
1. atom z \<sharp> v
2. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>atom z \<sharp> v\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> v
[PROOF STEP]
using infer_v_consI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom z \<sharp> v
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom z \<sharp> v
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
show \<open>atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
using infer_v_consI
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> \<Gamma>'
AF_typedef s dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc
atom z \<sharp> v
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (\<Theta>, \<B>, \<Gamma>')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_cons s dc v \<Rightarrow> \<lbrace> z : B_id s | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_cons s dc v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
case (infer_v_conspI s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z)
[PROOF STATE]
proof (state)
this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<And>s bv dclist \<Theta> dc tc \<B> \<Gamma> v tv b z \<Gamma>'. \<lbrakk>atom bv \<sharp> \<Gamma>'; atom z \<sharp> \<Gamma>'; AF_typedef_poly s bv dclist \<in> set \<Theta>; (dc, tc) \<in> set dclist; \<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv; \<And>b. \<lbrakk>toSet \<Gamma> \<subseteq> toSet b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; b \<turnstile> v \<Rightarrow> tv; \<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b; atom z \<sharp> \<Theta>; atom z \<sharp> \<B>; atom z \<sharp> \<Gamma>; atom z \<sharp> v; atom z \<sharp> b; atom bv \<sharp> \<Theta>; atom bv \<sharp> \<B>; atom bv \<sharp> \<Gamma>; atom bv \<sharp> v; atom bv \<sharp> b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b ; toSet \<Gamma> \<subseteq> toSet \<Gamma>'; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>' \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. AF_typedef_poly s ?bv ?dclist \<in> set \<Theta>
2. (dc, ?tc) \<in> set ?dclist
3. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
4. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> ?tc[?bv::=b]\<^sub>\<tau>\<^sub>b
5. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
6. atom ?bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
7. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open>AF_typedef_poly s bv dclist \<in> set \<Theta>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. AF_typedef_poly s bv dclist \<in> set \<Theta>
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. AF_typedef_poly s bv dclist \<in> set \<Theta>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
AF_typedef_poly s bv dclist \<in> set \<Theta>
goal (6 subgoals):
1. (dc, ?tc) \<in> set dclist
2. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
3. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> ?tc[bv::=b]\<^sub>\<tau>\<^sub>b
4. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
5. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
6. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open>(dc, tc) \<in> set dclist\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (dc, tc) \<in> set dclist
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. (dc, tc) \<in> set dclist
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(dc, tc) \<in> set dclist
goal (5 subgoals):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> ?tv
2. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> ?tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
3. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
4. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
5. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open> \<Theta>; \<B>; \<Gamma>' \<turnstile> v \<Rightarrow> tv\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> v \<Rightarrow> tv
goal (4 subgoals):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
2. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
3. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
4. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open>\<Theta>; \<B>; \<Gamma>' \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
[PROOF STEP]
using infer_v_conspI subtype_weakening
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
\<lbrakk>?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?\<tau>1.0 \<lesssim> ?\<tau>2.0; toSet ?\<Gamma> \<subseteq> toSet ?\<Gamma>'; ?\<Theta> ; ?\<B> \<turnstile>\<^sub>w\<^sub>f ?\<Gamma>' \<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma>' \<turnstile> ?\<tau>1.0 \<lesssim> ?\<tau>2.0
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
goal (3 subgoals):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
2. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
3. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open>atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
goal (2 subgoals):
1. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
2. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open>atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
atom bv \<sharp> (\<Theta>, \<B>, \<Gamma>', v, b)
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
show \<open> \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b \<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
using infer_v_conspI
[PROOF STATE]
proof (prove)
using this:
atom bv \<sharp> \<Gamma>'
atom z \<sharp> \<Gamma>'
AF_typedef_poly s bv dclist \<in> set \<Theta>
(dc, tc) \<in> set dclist
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v \<Rightarrow> tv
\<lbrakk>toSet \<Gamma> \<subseteq> toSet ?b; \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f ?b \<rbrakk> \<Longrightarrow> \<Theta> ; \<B> ; ?b \<turnstile> v \<Rightarrow> tv
\<Theta> ; \<B> ; \<Gamma> \<turnstile> tv \<lesssim> tc[bv::=b]\<^sub>\<tau>\<^sub>b
atom z \<sharp> \<Theta>
atom z \<sharp> \<B>
atom z \<sharp> \<Gamma>
atom z \<sharp> v
atom z \<sharp> b
atom bv \<sharp> \<Theta>
atom bv \<sharp> \<B>
atom bv \<sharp> \<Gamma>
atom bv \<sharp> v
atom bv \<sharp> b
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
toSet \<Gamma> \<subseteq> toSet \<Gamma>'
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f \<Gamma>'
goal (1 subgoal):
1. \<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> \<turnstile>\<^sub>w\<^sub>f b
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma>' \<turnstile> V_consp s dc b v \<Rightarrow> \<lbrace> z : B_app s b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ V_consp s dc b v ]\<^sup>c\<^sup>e \<rbrace>
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "MiniSail_TypingL",
"hexsha": null,
"include": null,
"lang": null,
"length": 80,
"llama_tokens": 30640,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
##############
#
# This example demonstrates the basics of using `equinox.jitf` and `equinox.gradf`.
#
# Here we'll use them to facilitate training a simple MLP: to automatically take gradients and jit with respect to
# all the jnp.arrays constituting the parameters. (But not with respect to anything else, like the choice of activation
# function -- as that isn't something we can differentiate/JIT anyway!)
#
#############
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import optax
import equinox as eqx
# Toy data
def get_data(dataset_size, *, key):
x = jrandom.normal(key, (dataset_size, 1))
y = 5 * x - 2
return x, y
# Simple dataloader
def dataloader(arrays, key, batch_size):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
(key,) = jrandom.split(key, 1)
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
def main(
dataset_size=10000,
batch_size=256,
learning_rate=3e-3,
steps=1000,
width_size=8,
depth=1,
seed=5678,
):
data_key, loader_key, model_key = jrandom.split(jrandom.PRNGKey(seed), 3)
data = get_data(dataset_size, key=data_key)
data = dataloader(data, batch_size=batch_size, key=loader_key)
# We happen to be using an Equinox model here, but that *is not important*.
# `equinox.jitf` and `equinox.gradf` will work just fine on any PyTree you like.
# (Here, `model` is actually a PyTree -- have a look at the `build_model.py` example for more on that.)
model = eqx.nn.MLP(
in_size=1, out_size=1, width_size=width_size, depth=depth, key=model_key
)
# `jitf` and `value_and_grad_f` are thin wrappers around the usual `jax` functions; they just flatten the
# input PyTrees and filter them according to their filter functions. In this case we're asking to only
# JIT/optimise with respect to arrays of floating point numbers, i.e. the parameters of our model. (So that
# for example we will statically JIT-compile with respect to any boolean flags.)
@ft.partial(eqx.jitf, filter_fn=eqx.is_inexact_array)
@ft.partial(eqx.value_and_grad_f, filter_fn=eqx.is_inexact_array)
def loss(model, x, y):
pred_y = jax.vmap(model)(x)
return jnp.mean((y - pred_y) ** 2)
optim = optax.sgd(learning_rate)
opt_state = optim.init(model)
for step, (x, y) in zip(range(steps), data):
value, grads = loss(model, x, y)
updates, opt_state = optim.update(grads, opt_state)
# Essentially equivalent to optax.apply_updates, it just doesn't try to update anything with a gradient
# of `None` (which is the gradient produced for anything we filtered out above).
model = eqx.apply_updates(model, updates)
print(step, value)
return value # Final loss
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.676739062,
"author": null,
"avg_line_length": 35.6966292135,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0c67cb21221dc8493263e86fdb59bca698563e60",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "556c5be7919f331761ce3409b74af8f5beffd816",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jrfinkbeiner/equinox",
"max_forks_repo_path": "examples/train_mlp.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "556c5be7919f331761ce3409b74af8f5beffd816",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jrfinkbeiner/equinox",
"max_issues_repo_path": "examples/train_mlp.py",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "556c5be7919f331761ce3409b74af8f5beffd816",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jrfinkbeiner/equinox",
"max_stars_repo_path": "examples/train_mlp.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 839,
"path": null,
"reason": "import jax",
"repo": null,
"save_path": null,
"sha": null,
"size": 3177
} |
import operator
import logging
import numpy as np
from ..core.slot import SlotDescriptor
from .module import TableModule
from ..core.bitmap import bitmap
from .table import Table
logger = logging.getLogger(__name__)
ops = {"<": operator.__lt__,
"<=": operator.__le__,
">": operator.__gt__,
">=": operator.__ge__,
"and": operator.__and__,
"or": operator.__or__,
"xor": operator.__xor__,
"==": operator.__eq__,
"!=": operator.__ne__
}
class CmpQueryLast(TableModule):
inputs = [SlotDescriptor('table', type=Table, required=True),
SlotDescriptor('cmp', type=Table, required=True)]
outputs = [SlotDescriptor('select', type=bitmap, required=False)]
def __init__(self, op="<", combine="and", **kwds):
super(CmpQueryLast, self).__init__(**kwds)
self.default_step_size = 1000
self.op = op
self._op = ops[op]
self.combine = combine
self._combine = ops[combine]
self._bitmap = None
def get_data(self, name):
if name == 'select':
return self._bitmap
if name == 'table':
self.get_input_slot('table').data()
return super(CmpQueryLast, self).get_data(name)
def run_step(self, run_number, step_size, howlong):
table_slot = self.get_input_slot('table')
# table_slot.update(run_number)
table_data = table_slot.data()
cmp_slot = self.get_input_slot('cmp')
# cmp_slot.update(run_number)
cmp_slot.clear_buffers()
cmp_data = cmp_slot.data()
if table_data is None \
or len(table_data) == 0 \
or cmp_data is None \
or len(cmp_data) == 0:
# nothing to do if no filter is specified
self._bitmap = None
return self._return_run_step(self.state_blocked, steps_run=1)
if table_slot.deleted.any() or cmp_slot.deleted.any():
# restart from scatch
table_slot.reset()
self._bitmap = None
table_slot.update(run_number)
cmp_slot.update(run_number)
cr = table_slot.created.next(as_slice=False)
if cr is None:
cr = bitmap()
up = table_slot.updated.next(as_slice=False)
work = cr | up
ids = work.pop(step_size)
if cr:
table_slot.created.push(cr - ids)
if up:
table_slot.updated.push(up - ids)
steps = len(ids)
ids = np.asarray(ids, dtype=np.int64)
indices = table_data.id_to_index(ids)
last = cmp_data.last()
results = None
for colname in last:
if colname in table_data:
arg1 = table_data._column(colname)
arg2 = last[colname]
res = self._op(arg1[indices], arg2)
res = ids[res]
if results is None:
results = bitmap(res)
else:
results = self._combine(results, bitmap(res))
if self._bitmap is None:
self._bitmap = results
else:
self._bitmap -= bitmap(indices)
self._bitmap |= results
return self._return_run_step(self.next_state(table_slot),
steps_run=steps)
| {
"alphanum_fraction": 0.5683388405,
"author": null,
"avg_line_length": 32.0096153846,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5094c52c672d621252685e462e594bd5989cb50c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2020-01-22T06:26:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-14T20:20:56.000Z",
"max_forks_repo_head_hexsha": "3bc79ce229cd628ef0aa4663136a674743697b47",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "jdfekete/progressivis",
"max_forks_repo_path": "progressivis/table/cmp_query.py",
"max_issues_count": 10,
"max_issues_repo_head_hexsha": "3bc79ce229cd628ef0aa4663136a674743697b47",
"max_issues_repo_issues_event_max_datetime": "2022-01-19T07:36:43.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-11-15T15:10:05.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "jdfekete/progressivis",
"max_issues_repo_path": "progressivis/table/cmp_query.py",
"max_line_length": 73,
"max_stars_count": 51,
"max_stars_repo_head_hexsha": "3bc79ce229cd628ef0aa4663136a674743697b47",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "jdfekete/progressivis",
"max_stars_repo_path": "progressivis/table/cmp_query.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-12T17:56:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-14T16:31:02.000Z",
"num_tokens": 746,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3329
} |
import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import torch.nn.functional as F
import torchvision as tv
import utils
import numpy as np
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1).sqrt()
#X = torch.div(X, norm.expand_as(X))
X = torch.div(X, norm.unsqueeze(1).expand_as(X))
return X
class vgg16_modified(nn.Module):
def __init__(self):
super(vgg16_modified, self).__init__()
vgg = tv.models.vgg16(pretrained=True)
self.vgg_features = vgg.features
def rep_size(self):
return 1024
def base_size(self):
return 512
def forward(self,x):
#return self.dropout2(self.relu2(self.lin2(self.dropout1(self.relu1(self.lin1(self.vgg_features(x).view(-1, 512*7*7)))))))
features = self.vgg_features(x)
return features
class TopDown(nn.Module):
def __init__(self,
embed_hidden=300,
mlp_hidden=512):
super(TopDown, self).__init__()
self.q_emb = nn.LSTM(embed_hidden, mlp_hidden,
batch_first=True, bidirectional=True)
self.q_prep = FCNet([mlp_hidden, mlp_hidden])
self.lstm_proj = nn.Linear(mlp_hidden * 2, mlp_hidden)
self.verb_transform = nn.Linear(embed_hidden, mlp_hidden)
self.v_att = Attention(mlp_hidden, mlp_hidden, mlp_hidden)
def forward(self, img, q):
batch_size = q.size(0)
w_emb = q
lstm_out, (h, _) = self.q_emb(w_emb)
q_emb = h.permute(1, 0, 2).contiguous().view(batch_size, -1)
q_emb = self.lstm_proj(q_emb)
att = self.v_att(img, q_emb)
v_emb = (att * img).sum(1) # [batch, v_dim]
return q_emb, v_emb
class RoleQHandler(nn.Module):
def __init__(self):
super(RoleQHandler, self).__init__()
self.vqa_model = TopDown()
def forward(self, img, q):
q_emb , v_emb = self.vqa_model(img, q)
return q_emb , v_emb
class ImSituationHandler(nn.Module):
def __init__(self,
encoder,
qword_embeddings,
vocab_size,
gpu_mode,
mlp_hidden=512):
super(ImSituationHandler, self).__init__()
self.encoder = encoder
self.qword_embeddings = qword_embeddings
self.vocab_size = vocab_size
self.gpu_mode = gpu_mode
self.role_handler = RoleQHandler()
self.q_net = FCNet([mlp_hidden, mlp_hidden])
self.v_net = FCNet([mlp_hidden, mlp_hidden])
self.classifier = SimpleClassifier(
mlp_hidden, 2 * mlp_hidden, self.vocab_size, 0.5)
self.mlp_hidden = mlp_hidden
self.g = nn.Sequential(
nn.Linear(mlp_hidden*2, mlp_hidden),
nn.ReLU(),
nn.Linear(mlp_hidden, mlp_hidden),
nn.ReLU(),
nn.Linear(mlp_hidden, mlp_hidden),
nn.ReLU(),
nn.Linear(mlp_hidden, mlp_hidden),
nn.ReLU(),
)
self.f = nn.Sequential(
nn.Linear(mlp_hidden, mlp_hidden),
nn.ReLU(),
nn.Linear(mlp_hidden, mlp_hidden)
)
def forward(self, img, verb):
batch_size = verb.size(0)
role_qs, _ = self.encoder.get_role_questions_batch(verb)
max_role_count = role_qs.size(1)
#roles = self.encoder.get_role_ids_batch(verb)
if self.gpu_mode >= 0:
role_qs = role_qs.to(torch.device('cuda'))
#roles = roles.to(torch.device('cuda'))
role_qs = role_qs.view(-1, role_qs.size(-1))
q_emb, v_emb = self.role_handler(img, self.qword_embeddings(role_qs))
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
#get a relational module to get relationship among 6 roles and get the final rep, assumkng it should equal back to the image
reshaped_joint_rep = joint_repr.contiguous().view(batch_size, -1, self.mlp_hidden)
rolerep1 = reshaped_joint_rep.unsqueeze(1).expand(batch_size, max_role_count, max_role_count, self.mlp_hidden)
rolerep2 = reshaped_joint_rep.unsqueeze(2).expand(batch_size, max_role_count, max_role_count, self.mlp_hidden)
rolerep1 = rolerep1.contiguous().view(-1, max_role_count * max_role_count, self.mlp_hidden)
rolerep2 = rolerep2.contiguous().view(-1, max_role_count * max_role_count, self.mlp_hidden)
concat_vec = torch.cat([rolerep1, rolerep2], 2).view(-1, self.mlp_hidden*2)
g = self.g(concat_vec)
g = g.view(-1, max_role_count * max_role_count, self.mlp_hidden).sum(1).squeeze()
recreated_img = l2norm(self.f(g))
return logits, recreated_img
class BaseModel(nn.Module):
def __init__(self, encoder,
gpu_mode,
embed_hidden=300,
mlp_hidden=512):
super(BaseModel, self).__init__()
self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.train_transform = tv.transforms.Compose([
tv.transforms.RandomRotation(10),
tv.transforms.RandomResizedCrop(224),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
self.normalize,
])
self.dev_transform = tv.transforms.Compose([
tv.transforms.Resize(224),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
self.normalize,
])
self.encoder = encoder
self.gpu_mode = gpu_mode
self.n_roles = self.encoder.get_num_roles()
self.n_verbs = self.encoder.get_num_verbs()
self.vocab_size = self.encoder.get_num_labels()
self.max_role_count = self.encoder.get_max_role_count()
self.n_role_q_vocab = len(self.encoder.question_words)
self.conv = vgg16_modified()
self.img_serialize = nn.Sequential(
nn.Linear(mlp_hidden*7*7, mlp_hidden*2),
nn.BatchNorm1d(mlp_hidden*2),
nn.ReLU(),
nn.Linear(mlp_hidden*2, mlp_hidden)
)
self.role_lookup = nn.Embedding(self.n_roles +1, embed_hidden, padding_idx=self.n_roles)
self.ans_lookup = nn.Embedding(self.vocab_size + 1, embed_hidden, padding_idx=self.vocab_size)
self.w_emb = nn.Embedding(self.n_role_q_vocab + 1, embed_hidden, padding_idx=self.n_role_q_vocab)
self.vsrl_model = ImSituationHandler(self.encoder, self.w_emb, self.vocab_size,
self.gpu_mode)
self.conv_hidden = self.conv.base_size()
self.mlp_hidden = mlp_hidden
self.embed_hidden = embed_hidden
def train_preprocess(self):
return self.train_transform
def dev_preprocess(self):
return self.dev_transform
def forward(self, img, verb):
img_features = self.conv(img)
ser_img = l2norm(self.img_serialize(img_features.view(-1, 512*7*7)))
batch_size, n_channel, conv_h, conv_w = img_features.size()
img = img_features.view(batch_size, n_channel, -1)
img = img.permute(0, 2, 1)
img = img.expand(self.max_role_count,img.size(0), img.size(1), img.size(2))
img = img.transpose(0,1)
img = img.contiguous().view(batch_size* self.max_role_count, -1, self.mlp_hidden)
role_pred, recreated_img = self.vsrl_model(img, verb)
role_pred = role_pred.contiguous().view(batch_size, -1, self.vocab_size)
#print('ans sizes :', verb_pred.size(), role_pred.size())
return role_pred, ser_img, recreated_img
def calculate_loss(self, gt_verbs, role_label_pred, gt_labels,args):
batch_size = role_label_pred.size()[0]
if args.train_all:
loss = 0
for i in range(batch_size):
for index in range(gt_labels.size()[1]):
frame_loss = 0
#verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])
#frame_loss = criterion(role_label_pred[i], gt_labels[i,index])
for j in range(0, self.max_role_count):
frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)
frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])
#print('frame loss', frame_loss, 'verb loss', verb_loss)
loss += frame_loss
else:
#verb from pre-trained
loss = 0
for i in range(batch_size):
for index in range(gt_labels.size()[1]):
frame_loss = 0
#verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])
#frame_loss = criterion(role_label_pred[i], gt_labels[i,index])
for j in range(0, self.max_role_count):
frame_loss += utils.cross_entropy_loss(role_label_pred[i][j], gt_labels[i,index,j] ,self.vocab_size)
frame_loss = frame_loss/len(self.encoder.verb2_role_dict[self.encoder.verb_list[gt_verbs[i]]])
#print('frame loss', frame_loss, 'verb loss', verb_loss)
loss += frame_loss
final_loss = loss/batch_size
#print('loss :', final_loss)
return final_loss
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=True):
super(ContrastiveLoss, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = self.order_sim
else:
self.sim = self.cosine_sim
self.max_violation = max_violation
def cosine_sim(self, im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(self, im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
# score = -YmX.clamp(min=0).pow(2).sum(2).squeeze(2).sqrt().t()
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
'''
For single-modal retrieval, emb1=query, emb2=data
For cross-modal retrieval, emb1=query in source domain, emb2=data in target domain
'''
def forward(self, emb1, emb2):
# compute image-sentence score matrix
scores = self.sim(emb1, emb2)
diagonal = scores.diag().view(emb1.size(0), 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# compare every diagonal score to scores in its column caption retrieval
cost_s = (self.margin + scores - d1).clamp(min=0)
# compare every diagonal score to scores in its row image retrieval
cost_im = (self.margin + scores - d2).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = mask
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
cost_im = cost_im.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
| {
"alphanum_fraction": 0.6105371019,
"author": null,
"avg_line_length": 36.2569659443,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "23a50de2d5ab6e4d22d51c0859a596cc6e93d534",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0e4bf3f7f301570b652490f697758361c866f3c1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "thilinicooray/mac-network-pytorch",
"max_forks_repo_path": "model_new_basic.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0e4bf3f7f301570b652490f697758361c866f3c1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "thilinicooray/mac-network-pytorch",
"max_issues_repo_path": "model_new_basic.py",
"max_line_length": 132,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0e4bf3f7f301570b652490f697758361c866f3c1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "thilinicooray/mac-network-pytorch",
"max_stars_repo_path": "model_new_basic.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2859,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11711
} |
import numpy as np
import tensorflow as tf
import cv2
def get_result(img):
# tf.gfile = tf.io.gfile
mnist_model = tf.saved_model.load(r'model\mnist')
print(mnist_model.signatures['serving_default'].inputs)
print(mnist_model.signatures['serving_default'].output_dtypes)
print(mnist_model.signatures['serving_default'].output_shapes)
# 多输入的情况
# img = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
#
# image3 = cv2.resize(img,(3,3))/255.0
# image_np3 = image3[np.newaxis,...,np.newaxis]
# input_tensor3 = tf.convert_to_tensor(image_np3,dtype=tf.float32)
#
# image6 = cv2.resize(img,(6,6))/255.0
# image_np6 = image6[np.newaxis,...,np.newaxis]
# input_tensor6 = tf.convert_to_tensor(image_np6,dtype=tf.float32)
#
# image12 = cv2.resize(img,(12,12))/255.0
# image_np12 = image12[np.newaxis,...,np.newaxis]
# input_tensor12 = tf.convert_to_tensor(image_np12,dtype=tf.float32)
# Run inference
# model_fn = mnist_model.signatures['serving_default']
#
# output_dict = model_fn(
# image12=input_tensor12,
# image3=input_tensor3,
# image6=input_tensor6,
# road=input_tensor3,
# roadExt=input_tensor3
# )
# print(np.argmax(output_dict['dense_1'],axis=1))
# mnist预测
image = 1-(cv2.resize(img,(28,28))/255.0)
import matplotlib.pyplot as plt
plt.imshow(image, cmap = 'gray', interpolation='bicubic')
image_np = image[...,np.newaxis]
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image_np,dtype=tf.float32)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
model_fn = mnist_model.signatures['serving_default']
output_dict = model_fn(input_1=input_tensor)
# print(np.argmax(output_dict['dense_1'],axis=1))
return np.argmax(output_dict['dense_1'],axis=1) | {
"alphanum_fraction": 0.6790557509,
"author": null,
"avg_line_length": 31.6031746032,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "af441d059fdbebce1ba6bd6a72c77cf425e0d839",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cdb509af2de4d5f7d0cc9bd3c562bb660c17163a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ljq2278/models",
"max_forks_repo_path": "my_test/tf2_predict_saved_model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cdb509af2de4d5f7d0cc9bd3c562bb660c17163a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ljq2278/models",
"max_issues_repo_path": "my_test/tf2_predict_saved_model.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cdb509af2de4d5f7d0cc9bd3c562bb660c17163a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ljq2278/models",
"max_stars_repo_path": "my_test/tf2_predict_saved_model.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 553,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1991
} |
from PyQt5.QtGui import QImage, QColor
import sympy as sp
def tand(x):
return sp.tan(x * sp.pi / 180)
def sind(x):
return sp.sin(x * sp.pi / 180)
def cosd(x):
return sp.cos(x * sp.pi / 180)
def invert(qImage) :
rgb_im_inverse = qImage.copy()
width = qImage.width()
y = 0
while y < qImage.height():
x = 0
while x < width:
color = QColor.fromRgb(qImage.pixel(x, y))
newR = 255-color.red()
newG = 255-color.green()
newB = 255-color.blue()
newColor = QColor(newR,newG,newB)
rgb_im_inverse.setPixel(x, y, newColor.rgba())
x = x+1
y = y+1
return rgb_im_inverse
def flip_horizontal(qImage):
rgb_im_flipped = qImage.copy()
width = qImage.width()
y = 0
while y < qImage.height():
x = 0
while x < width:
color = QColor.fromRgb(qImage.pixel(x, y))
newX = width-1 - x
rgb_im_flipped.setPixel(newX, y, color.rgba())
x = x+1
y = y+1
return rgb_im_flipped
def interlace(qImage) :
rgb_im_interlace = qImage.copy()
rgb_im_interlace.fill(0)
width = qImage.width()
y = 0
while y < qImage.height():
x = 0
while x < width:
color = QColor.fromRgb(qImage.pixel(x, y))
rgb_im_interlace.setPixel(x, y, color.rgba())
x = x+1
y = y+2
return rgb_im_interlace
def gray(qImage) :
rgb_im_gray = qImage.copy()
width = qImage.width()
y = 0
while y < qImage.height():
x = 0
while x < width:
color = QColor.fromRgb(qImage.pixel(x, y))
r = color.red()
g = color.green()
b = color.blue()
#gray = int((r + g + b) / 3)
gray = int((r * 0.299) + (g * 0.587) + (b * 0.114))
newColor = QColor(gray,gray,gray)
rgb_im_gray.setPixel(x, y, newColor.rgba())
x = x+1
y = y+1
return rgb_im_gray
def reduce(qImage) :
rgb_im_reduce = qImage.copy()
width=qImage.width()
y = 0
yOut = 0
while y < qImage.height():
x = 0
xOut = int(width/2)
while x < width:
color = QColor.fromRgb(qImage.pixel(x, y))
rgb_im_reduce.setPixel(xOut, yOut, color.rgba())
x = x+1
if x % 2 == 0 :
xOut = xOut + 1
y = y+1
if y % 2 == 0 :
yOut = yOut + 1
return rgb_im_reduce
def mosaic(qImage, pixelSize) :
rgb_im_mosaic = qImage.copy()
width=qImage.width()
y = 0
while y < qImage.height():
if y % pixelSize == 0:
ry = y
x = 0
while x < width:
if x % pixelSize == 0:
rx = x
color = QColor.fromRgb(rgb_im_mosaic.pixel(rx, ry))
rgb_im_mosaic.setPixel(x, y, color.rgba())
x = x+1
y = y+1
return rgb_im_mosaic
def rotate(qImage, angle) :
rgb_im_rotated = qImage.copy()
rgb_im_rotated.fill(0)
width = qImage.width()
height = qImage.height()
y = 1
while y < height:
x = 1
while x < width:
inputRow = y * cosd(angle) - x * sind(angle)
inputCol = y * sind(angle) + x * cosd(angle)
if inputRow >= 0 and inputRow < height and inputCol >= 0 and inputCol < width:
color = QColor.fromRgb(qImage.pixel(inputCol, inputRow))
rgb_im_rotated.setPixel(x, y, color.rgba())
x = x+5
y = y+5
return rgb_im_rotated
| {
"alphanum_fraction": 0.5056241427,
"author": null,
"avg_line_length": 24.4630872483,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f1614cb030bd58eaffe84b1f4a46a5c0afe4f3ff",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-22T17:57:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-22T17:57:04.000Z",
"max_forks_repo_head_hexsha": "8bc6531483f08982b19c08a4cdb1a0c9dbd03737",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ThomasDerZweifler/pyPro",
"max_forks_repo_path": "thomas/imageProcessing/operations/qtImageProcessing.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8bc6531483f08982b19c08a4cdb1a0c9dbd03737",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ThomasDerZweifler/pyPro",
"max_issues_repo_path": "thomas/imageProcessing/operations/qtImageProcessing.py",
"max_line_length": 90,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8bc6531483f08982b19c08a4cdb1a0c9dbd03737",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ThomasDerZweifler/pyPro",
"max_stars_repo_path": "thomas/imageProcessing/operations/qtImageProcessing.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1054,
"path": null,
"reason": "import sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3645
} |
from kinematicEnv import KinematicEnv
from QL import QL
from upDDPG import DDPG as uDDPG
import tensorflow as tf
from bottomDDPG import DDPG as bDDPG
import numpy as np
env = KinematicEnv()
s_dim = env.state_dim
a_dim = env.action_dim
a_bound = env.action_bound
g1 = tf.Graph()
isess1 = tf.Session(graph=g1)
with g1.as_default():
isess1.run(tf.global_variables_initializer())
uddpg = uDDPG(a_dim, s_dim, a_bound)
uddpg.restore()
g2 = tf.Graph()
isess2 = tf.Session(graph=g2)
with g2.as_default():
isess2.run(tf.global_variables_initializer())
bddpg = bDDPG(a_dim, s_dim, a_bound)
bddpg.restore()
g3 = tf.Graph()
isess3 = tf.Session(graph=g3)
with g3.as_default():
isess3.run(tf.global_variables_initializer())
Q = QL(2, s_dim)
def initial():
tt = np.random.randint(0, 3)
if tt == 0:
s = env.initialUp()
elif tt == 1:
s = env.initialDown()
else:
s = env.initialOn()
return s
def train():
step = 0
for i_episode in range(6000):
s = initial()
j = 0
for i in range(300):
#env.render()
a0 = Q.choose_action(s)
if a0 == 0:
k = uddpg.choose_action(s)
s_, _, _ = env.stepUp(k)
else:
k = bddpg.choose_action(s)
s_, _, _ = env.stepDown(k)
#rewardReset
label1, label2, label3 = s[0], s[8], s[9] - s[1]
if -20.<label1<20. and -20.<label2<20.:
if label3 < 150.:
if a0 == 0: reward = 1
else: reward = -1
else:
if a0 == 0: reward = -1
else: reward = 1
elif -20.<label1<20. and abs(label2) >= 20.:
if a0 == 0: reward = 1
else: reward = -2
elif abs(label1) >= 20. and -20.<label2<20.:
if a0 == 0: reward = -2
else: reward = 1
Q.store_transition(s, a0, reward, s_)
if step > 300 and step % 50 == 0:
Q.learn()
step+=1
if reward == 1:
j += 1
if reward == -2 or i == 299:
print('Ep: %i | accuracy: %.2f | step: %i' % (i_episode, 1.*j/(i+1)*100, i))
break
with g3.as_default():
Q.save()
#多个计算图训练时,怎么分别存储模型
train() | {
"alphanum_fraction": 0.5060542797,
"author": null,
"avg_line_length": 28.1764705882,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "698c3a55f3e93af01a220eadcc30ea0f28d5392f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-04-07T12:07:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-07T12:07:15.000Z",
"max_forks_repo_head_hexsha": "966441e7d08cf2ff067bcbcfb9305565e257ebfd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aaronworry/two-arms-climb",
"max_forks_repo_path": "trainQ.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "966441e7d08cf2ff067bcbcfb9305565e257ebfd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aaronworry/two-arms-climb",
"max_issues_repo_path": "trainQ.py",
"max_line_length": 93,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "966441e7d08cf2ff067bcbcfb9305565e257ebfd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aaronworry/two-arms-climb",
"max_stars_repo_path": "trainQ.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-22T07:03:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-22T08:38:47.000Z",
"num_tokens": 722,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2395
} |
c bad-flag specified spectral regions in the array
c blankoutsky2 takes out the B-band as well as 5577 and the A-band
c could also blank locations of possible strong em lines, mostly
c 3727 and 5007, if we are using this for abs line fits. Or use
c selectabsregions to exclude those.
c blankoutlines is directly from blankoutsky2, but it also
c needs to have the z as input so it can blank in restframe
subroutine blankoutlines(specarray,nspec,nspmax,nwave,wave,z)
real specarray(nspmax,nwave)
real wave(nwave)
parameter (NTOBLANK=11)
real blankreg(2*NTOBLANK)
c data blankreg /5574.,5580.,7590.,7650./
c data blankreg /5574.,5580.,6855.,6870.,7590.,7650./
c These might need to be in vacuum instead of air
c 3726+3729,4101,4340,4861,4959,5007,6548,6563,6583,6717,6731
cc medium size regions
c data blankreg /3717.,3738.,4095.,4107.,4333.,4347.,4851.,4871.,
c $ 4954.,4964.,4997.,5017.,6545.,6551.,6550.,6576.,
c $ 6578.,6588.,6712.,6722.,6727.,6735./
c large size regions
data blankreg /3707.,3747.,4091.,4111.,4325.,4355.,4841.,4881.,
$ 4949.,4969.,4977.,5037.,6540.,6556.,6523.,6603.,
$ 6563.,6603.,6707.,6727.,6721.,6741./
include 'pcredshift.h'
nblank=NTOBLANK
c I wrote the regions in air, so need to shift to vacuum?
c vacuum wavelengths are longer.
c Only do this if the spectrum was not converted to air on reading in.
c do ii=1,nblank
c blankreg(ii) = blankreg(ii) * 1.00029
c end do
do ii=1,nblank
wmin = blankreg(2*ii-1) * (1.0+z)
wmax = blankreg(2*ii) * (1.0+z)
c find the indexes corresponding to the region of interest
c wave array is assumed monotonically increasing
i=1
100 continue
if (i .gt. nwave .or. wave(i) .gt. wmin) then
imin=i
go to 110
end if
i=i+1
go to 100
110 continue
i=nwave
150 continue
if (i .lt. 1 .or. wave(i) .lt. wmax) then
imax=i
go to 160
end if
i=i-1
go to 150
160 continue
do j=1,nspec
do i=imin,imax
specarray(j,i) = badset
end do
end do
c end the do ii loop
end do
return
end
| {
"alphanum_fraction": 0.6304541407,
"author": null,
"avg_line_length": 26.7380952381,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "d99d79b92d835db68337a3282be8a6113b101d47",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d203035ba30260e3272c7d93066fb3ab9de88d3f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bjweiner/DEEP2-redshift",
"max_forks_repo_path": "blankoutlines.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d203035ba30260e3272c7d93066fb3ab9de88d3f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bjweiner/DEEP2-redshift",
"max_issues_repo_path": "blankoutlines.f",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d203035ba30260e3272c7d93066fb3ab9de88d3f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bjweiner/DEEP2-redshift",
"max_stars_repo_path": "blankoutlines.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 769,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2246
} |
"""
examples/pong.py
Copyright (c) 2020, Nir Aides - nir@winpdb.org
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
* pong sound is by [freesound](https://freesound.org/people/NoiseCollector/sounds/4359/).
* Commodore 64 font is by [KreativeKorp](https://www.kreativekorp.com/software/fonts/c64.shtml).
"""
import math
import time
import sys
import os
import PIL.Image
import numpy as np
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import jupylet.color
from jupylet.env import get_app_mode
from jupylet.app import App
from jupylet.state import State
from jupylet.label import Label
from jupylet.sprite import Sprite
from jupylet.audio.sample import Sample
import moderngl_window.timers.clock as _clock
if __name__ == '__main__':
mode = 'window'
else:
mode = 'hidden'
app = App(mode=mode)
background = '#3e32a2'
foreground = '#7c71da'
a0 = np.ones((32, 32)) * 255
a1 = np.ones((128, 16)) * 255
a2 = np.ones((app.height * 9 // 10, app.width * 9 // 10, 3)) * 255
ball = Sprite(a0, y=app.height/2, x=app.width/2)
padl = Sprite(a1, y=app.height/2, x=48)
padr = Sprite(a1, y=app.height/2, x=app.width-48)
field = Sprite(a2, y=app.height/2, x=app.width/2, color=background)
pong_sound = Sample('sounds/pong-blip.wav', amp=0.2).load()
scorel = Label(
'0', font_size=42, color=foreground,
x=64, y=app.height/2,
anchor_y='center', anchor_x='left',
font_path='fonts/PetMe64.ttf'
)
scorer = Label(
'0', font_size=42, color=foreground,
x=app.width-64, y=app.height/2,
anchor_y='center', anchor_x='right',
font_path='fonts/PetMe64.ttf'
)
@app.event
def render(ct, dt):
app.window.clear(color=foreground)
field.draw()
scorel.draw()
scorer.draw()
ball.draw()
padl.draw()
padr.draw()
state = State(
sl = 0,
sr = 0,
bvx = 192,
bvy = 192,
vyl = 0,
pyl = app.height/2,
vyr = 0,
pyr = app.height/2,
left = False,
right = False,
key_a = False,
key_d = False,
)
@app.event
def key_event(key, action, modifiers):
keys = app.window.keys
if action == keys.ACTION_PRESS:
if key == keys.LEFT:
state.left = True
if key == keys.RIGHT:
state.right = True
if key == keys.A:
state.key_a = True
if key == keys.D:
state.key_d = True
if action == keys.ACTION_RELEASE:
if key == keys.LEFT:
state.left = False
if key == keys.RIGHT:
state.right = False
if key == keys.A:
state.key_a = False
if key == keys.D:
state.key_d = False
@app.run_me_every(1/120)
def update_pads(ct, dt):
if state.right:
state.pyr = min(app.height, state.pyr + dt * 512)
if state.left:
state.pyr = max(0, state.pyr - dt * 512)
if state.key_a:
state.pyl = min(app.height, state.pyl + dt * 512)
if state.key_d:
state.pyl = max(0, state.pyl - dt * 512)
ayl = 200 * (state.pyl - padl.y)
ayr = 200 * (state.pyr - padr.y)
state.vyl = state.vyl * 0.9 + (ayl * dt)
state.vyr = state.vyr * 0.9 + (ayr * dt)
padl.y += state.vyl * dt
padr.y += state.vyr * dt
padr.clip_position(app.width, app.height)
padl.clip_position(app.width, app.height)
@app.run_me_every(1/60)
def update_ball(ct, dt):
bs0 = state.bvx ** 2 + state.bvy ** 2
ball.angle += 200 * dt
ball.x += state.bvx * dt
ball.y += state.bvy * dt
if ball.top >= app.height:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.y -= ball.top - app.height
state.bvy = -state.bvy
if ball.bottom <= 0:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.y -= ball.bottom
state.bvy = -state.bvy
if ball.right >= app.width:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.x -= ball.right - app.width
state.bvx = -192
state.bvy = 192 * np.sign(state.bvy)
bs0 = 0
state.sl += 1
scorel.text = str(state.sl)
if ball.left <= 0:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.x -= ball.left
state.bvx = 192
state.bvy = 192 * np.sign(state.bvy)
bs0 = 0
state.sr += 1
scorer.text = str(state.sr)
if state.bvx > 0 and ball.top >= padr.bottom and padr.top >= ball.bottom:
if 0 < ball.right - padr.left < 10:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.x -= ball.right - padr.left
state.bvx = -state.bvx
state.bvy += state.vyr / 2
if state.bvx < 0 and ball.top >= padl.bottom and padl.top >= ball.bottom:
if 0 < padl.right - ball.left < 10:
pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)
ball.x += ball.left - padl.right
state.bvx = -state.bvx
state.bvy += state.vyl / 2
bs1 = state.bvx ** 2 + state.bvy ** 2
if bs1 < 0.9 * bs0:
state.bvx = (bs0 - state.bvy ** 2) ** 0.5 * np.sign(state.bvx)
ball.wrap_position(app.width, app.height)
@app.run_me()
def highlights(ct, dt):
sl0 = state.sl
sr0 = state.sr
slc = np.array(scorel.color)
src = np.array(scorer.color)
while True:
ct, dt = yield 1/24
r0 = 0.9 ** (120 * dt)
scorel.color = np.array(scorel.color) * r0 + (1 - r0) * slc
scorer.color = np.array(scorer.color) * r0 + (1 - r0) * src
if sl0 != state.sl:
sl0 = state.sl
scorel.color = 'white'
if sr0 != state.sr:
sr0 = state.sr
scorer.color = 'white'
def step(player0=[0, 0, 0, 0, 0], player1=[0, 0, 0, 0, 0], n=1):
state.key_a, state.key_d = player0[:2]
state.left, state.right = player1[:2]
sl0 = state.sl
sr0 = state.sr
if app.mode == 'hidden':
app.step(n)
reward = (state.sl - sl0) - (state.sr - sr0)
return {
'screen0': app.observe(),
'player0': {'score': state.sl, 'reward': reward},
'player1': {'score': state.sr, 'reward': -reward},
}
START = 'pong-start.state'
def reset():
return load(START)
def load(path):
return app.load_state(path, state, ball, padl, padr, scorel, scorer)
def save(path=None):
app.save_state('pong', path, state, ball, padl, padr, scorel, scorer)
if __name__ == '__main__':
app.run()
| {
"alphanum_fraction": 0.5796319018,
"author": null,
"avg_line_length": 24.8475609756,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5d2835d5758a8546991302240270141d4e4e0502",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5b373a1f135f50ea14086790443e7dc7a76d86a4",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "ofer1992/jupylet",
"max_forks_repo_path": "examples/pong.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5b373a1f135f50ea14086790443e7dc7a76d86a4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "ofer1992/jupylet",
"max_issues_repo_path": "examples/pong.py",
"max_line_length": 100,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5b373a1f135f50ea14086790443e7dc7a76d86a4",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "ofer1992/jupylet",
"max_stars_repo_path": "examples/pong.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2312,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8150
} |
C
C $Id: mstrms.f,v 1.4 2008-07-27 00:17:31 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE MSTRMS (DIAG,SDIAG,SIGMA,DEL)
C
C ---------------------------------------------------------------------
C Note: This routine comes from a proprietary package called FITPACK.
C It is used in the NCAR graphics package by permission of the author,
C Alan Cline.
C ---------------------------------------------------------------------
C
C CODED BY ALAN KAYLOR CLINE
C FROM FITPACK -- JUNE 22, 1986
C A CURVE AND SURFACE FITTING PACKAGE
C A PRODUCT OF PLEASANT VALLEY SOFTWARE
C 8603 ALTUS COVE, AUSTIN, TEXAS 78759, USA
C
C ---------------------------------------------------------------------
C
C THIS SUBROUTINE COMPUTES THE DIAGONAL AND SUPERDIAGONAL
C TERMS OF THE TRIDIAGONAL LINEAR SYSTEM ASSOCIATED WITH
C SPLINE UNDER TENSION INTERPOLATION.
C
C ON INPUT--
C
C SIGMA CONTAINS THE TENSION FACTOR.
C
C DEL CONTAINS THE STEP SIZE.
C
C ON OUTPUT--
C
C SIGMA*DEL*COSH(SIGMA*DEL) - SINH(SIGMA*DEL)
C DIAG = DEL*--------------------------------------------.
C (SIGMA*DEL)**2 * SINH(SIGMA*DEL)
C
C SINH(SIGMA*DEL) - SIGMA*DEL
C SDIAG = DEL*----------------------------------.
C (SIGMA*DEL)**2 * SINH(SIGMA*DEL)
C
C SIGMA AND DEL ARE UNALTERED.
C
C THIS SUBROUTINE REFERENCES PACKAGE MODULE MSSHCH.
C
C-----------------------------------------------------------
C
IF (SIGMA .NE. 0.) GO TO 1
DIAG = DEL/3.
SDIAG = DEL/6.
RETURN
1 SIGDEL = SIGMA*DEL
CALL MSSHCH (SINHM,COSHM,SIGDEL,0)
DENOM = SIGMA*SIGDEL*(1.+SINHM)
DIAG = (COSHM-SINHM)/DENOM
SDIAG = SINHM/DENOM
RETURN
END
| {
"alphanum_fraction": 0.4741784038,
"author": null,
"avg_line_length": 33.8095238095,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "b93a712c659b15e32f935123d452c7f8dd2f73f2",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 58,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z",
"max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "tenomoto/ncl",
"max_forks_repo_path": "ncarg2d/src/libncarg/support/mstrms.f",
"max_issues_count": 156,
"max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "tenomoto/ncl",
"max_issues_repo_path": "ncarg2d/src/libncarg/support/mstrms.f",
"max_line_length": 71,
"max_stars_count": 210,
"max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "tenomoto/ncl",
"max_stars_repo_path": "ncarg2d/src/libncarg/support/mstrms.f",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z",
"num_tokens": 546,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2130
} |
[STATEMENT]
lemma COND_terms_hf:
assumes "hf_valid ainfo uinfo hfs hf" and "HVF hf \<in> ik" and "no_oracle ainfo uinfo" and "hf \<in> set hfs"
shows "\<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo' . (ainfo, hfs) \<in> auth_seg2 uinfo')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (ainfo, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
hf_valid ainfo uinfo hfs hf
HVF hf \<in> ik
True
hf \<in> set hfs
goal (1 subgoal):
1. \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (ainfo, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
apply(auto 3 4 simp add: hf_valid_invert ik_hfs_simp ik_def dest: ahi_eq)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>PoC_i_expire hfa hfsa PoC_i_expirea. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; maccontents (AHI hfa) hfsa PoC_i_expirea = maccontents (AHI hf) hfs PoC_i_expire; HVF hf = Hash (maccontents (AHI hf) hfs PoC_i_expire); hfa \<in> set hfsa; (Num PoC_i_expirea, hfsa) \<in> auth_seg2 \<epsilon>; HVF hfa = Hash (maccontents (AHI hf) hfs PoC_i_expire)\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
2. \<And>PoC_i_expire. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Hash (maccontents (AHI hf) hfs PoC_i_expire); Hash (maccontents (AHI hf) hfs PoC_i_expire) \<in> ik_add\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
using assms(1) assms(2)
[PROOF STATE]
proof (prove)
using this:
hf_valid ainfo uinfo hfs hf
HVF hf \<in> ik
goal (2 subgoals):
1. \<And>PoC_i_expire hfa hfsa PoC_i_expirea. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; maccontents (AHI hfa) hfsa PoC_i_expirea = maccontents (AHI hf) hfs PoC_i_expire; HVF hf = Hash (maccontents (AHI hf) hfs PoC_i_expire); hfa \<in> set hfsa; (Num PoC_i_expirea, hfsa) \<in> auth_seg2 \<epsilon>; HVF hfa = Hash (maccontents (AHI hf) hfs PoC_i_expire)\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
2. \<And>PoC_i_expire. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Hash (maccontents (AHI hf) hfs PoC_i_expire); Hash (maccontents (AHI hf) hfs PoC_i_expire) \<in> ik_add\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
apply(auto simp add: maccontents_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>PoC_i_expire hfa hfsa. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; hfa \<in> set hfsa; (Num PoC_i_expire, hfsa) \<in> auth_seg2 \<epsilon>; HVF hfa = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; hf_valid (Num PoC_i_expire) \<epsilon> hfs hf; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik; sntag (AHI hfa) = sntag (AHI hf); fullpath hfsa = fullpath hfs\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
2. \<And>PoC_i_expire. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik_add; hf_valid (Num PoC_i_expire) \<epsilon> hfs hf; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
apply(frule sntag_eq)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>PoC_i_expire hfa hfsa. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; hfa \<in> set hfsa; (Num PoC_i_expire, hfsa) \<in> auth_seg2 \<epsilon>; HVF hfa = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; hf_valid (Num PoC_i_expire) \<epsilon> hfs hf; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik; sntag (AHI hfa) = sntag (AHI hf); fullpath hfsa = fullpath hfs; AHI hfa = AHI hf\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
2. \<And>PoC_i_expire. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle>; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik_add; hf_valid (Num PoC_i_expire) \<epsilon> hfs hf; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> \<in> ik\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
apply(auto simp add: ik_def ik_hfs_simp dest: ik_add_form)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>PoC_i_expire hfa hfsa hfaa hfsaa ainfoa uinfoa. \<lbrakk>hf \<in> set hfs; ainfo = Num PoC_i_expire; uinfo = \<epsilon>; HVF hf = HVF hfaa; hfa \<in> set hfsa; (Num PoC_i_expire, hfsa) \<in> auth_seg2 \<epsilon>; HVF hfa = HVF hfaa; hf_valid (Num PoC_i_expire) \<epsilon> hfs hf; fullpath hfsa = fullpath hfs; AHI hfa = AHI hf; Mac[Mac[sntag (AHI hf)] \<langle>fullpath hfs, Num PoC_i_expire\<rangle>] \<langle>Num 0, Hash (fullpath hfs)\<rangle> = HVF hfaa; hfaa \<in> set hfsaa; (ainfoa, hfsaa) \<in> auth_seg2 uinfoa; hf_valid ainfoa uinfoa hfsaa hfaa\<rbrakk> \<Longrightarrow> \<exists>hfs. hf \<in> set hfs \<and> (\<exists>uinfo'. (Num PoC_i_expire, hfs) \<in> auth_seg2 uinfo')
[PROOF STEP]
by (metis info_hvf(1) info_hvf(2)) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "IsaNet_instances_ICING",
"hexsha": null,
"include": null,
"lang": null,
"length": 7,
"llama_tokens": 2984,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
Require Import List Arith.
Import ListNotations.
Require Import core.
Notation eqdec := eq_nat_dec.
Require Import Lia.
Module renSubst.
Notation upT vs := (0 :: vs).
(* Inductive fin : list nat -> Type := *)
(* | finO : fin [0] *)
(* | finS : forall l, fin l -> fin (List.map S l) *)
(* | finU : forall l, fin l -> fin (upT l). *)
(* Arguments finS {l}. *)
(* Arguments finU {l}. *)
(* should be able to represent any number in an ordered list *)
(* wait that does not really work *)
(* Check finO. *)
(* Check finS finO : fin [1]. *)
(* Check finU (finS finO) : fin [0;2]. *)
Inductive fin : list nat -> Type :=
| finH : forall n l, fin (n::l) (* the position in the list *)
| finT : forall n l, fin l -> fin (n::l). (* how big the jump is to the next number *)
Arguments finH _ {l}.
Arguments finT _ {l}.
Fixpoint denote {l: list nat} (x: fin l) : nat :=
match x with
| finH n => n
| finT n x' => n + S (denote x')
end.
(* flatten [0;1;2] -> [0;0;0]
* flatten [0;2;3] -> [0;1;0]
* flatten [1;2;3] -> [1;0;0]
* flatten [0;3;5] -> [0;2;1] *)
Fixpoint flatten (l: list nat) (acc: nat) : list nat :=
match l with
| [] => []
| x :: l =>
let x' := x - acc in
x' :: flatten l (S x)
end.
Definition fl (l: list nat) : list nat := flatten l 0.
Compute (fl [0]).
Compute (fl [0;1;2]).
Compute (fl [0;2;3]).
Compute (fl [1;2;3]).
Compute (fl [0;3;5]).
Definition f0 : fin (fl [0]) := finH 0.
Definition f1 : fin (fl [0;2;3]) := finH 0. (* denotes 0 *)
Definition f2 : fin (fl [0;2;3]) := finT 0 (finH 1). (* denotes 2 *)
Definition f3 : fin (fl [0;2;3]) := finT 0 (finT 1 (finH 0)). (* denotes 3 *)
Compute (denote f0).
Compute (denote f1).
Compute (denote f2).
Compute (denote f3).
Inductive tm (vs : list nat) : Type :=
| var_tm : fin vs -> tm vs
| app : tm vs -> tm vs -> tm vs
| lam : tm (upT vs) -> tm vs.
Arguments var_tm {vs}.
Arguments app {vs}.
Arguments lam {vs}.
Lemma congr_app {vs : list nat} {s0 : tm vs} {s1 : tm vs} {t0 : tm vs}
{t1 : tm vs} (H0 : s0 = t0) (H1 : s1 = t1) :
app s0 s1 = app t0 t1.
Proof.
exact (eq_trans (eq_trans eq_refl (ap (fun x => app x s1) H0))
(ap (fun x => app t0 x) H1)).
Qed.
Lemma congr_lam {vs : list nat} {s0 : tm (upT vs)} {t0 : tm (upT vs)}
(H0 : s0 = t0) : lam s0 = lam t0.
Proof.
exact (eq_trans eq_refl (ap (fun x => lam x) H0)).
Qed.
(* Inductive finList3 : list nat -> Type := *)
(* list0 : forall l, finList3 (0::l) *)
(* | listC : forall (v w:nat) (vs: list nat), finList3 (v::vs) -> finList3 (S v :: List.map S vs) *)
(* | listT : forall v vs, finList3 vs -> finList3 (v::vs). *)
Open Scope type.
Definition scons {X : Type} {vs : list nat} (x : X) (f : fin vs -> X) (m : fin (upT vs)) : X.
Proof.
revert m.
enough (H: forall vs' (Heq: vs' = upT vs), fin vs' -> X).
- intros m. exact (H (upT vs) eq_refl m).
- intros vs' Heq m.
destruct m.
+ exact x.
+ injection Heq.
intros -> _.
exact (f m).
Defined.
Compute (@scons nat (fl [1;2]) 1 (fun _ => 6) f1).
Compute (@scons nat (fl [1;2]) 1 (fun _ => 6) f2).
Compute (@scons nat (fl [1;2]) 1 (fun _ => 6) f3).
Definition var_zero {vs: list nat} : fin (upT vs) := finH 0.
Definition shift {vs: list nat} : fin vs -> fin (upT vs) := finT 0.
Definition up_ren_traced {vs0 vs1 : list nat} (xi: fin vs0 -> fin vs1) : fin (upT vs0) -> fin (upT vs1) :=
scons var_zero (funcomp shift xi).
Fixpoint ren_tm {vs0 vs1: list nat} (xi_tm : fin vs0 -> fin vs1) (s: tm vs0) {struct s} : tm vs1.
Proof.
refine (match s with
| var_tm x => _
| app s0 s1 => _
| lam s0 => _
end).
- exact (var_tm (xi_tm x)).
- exact (app (ren_tm _ _ xi_tm s0) (ren_tm _ _ xi_tm s1)).
- exact (lam (ren_tm _ _ (up_ren_traced xi_tm) s0)).
Defined.
Print Assumptions ren_tm.
Lemma up_tm_tm {vs0 : list nat} {vs1 : list nat} (sigma : fin vs0 -> tm vs1) :
fin (upT vs0) -> tm (upT vs1).
Proof.
exact (scons (@var_tm (upT vs1) var_zero) (funcomp (ren_tm shift) sigma)).
Defined.
Fixpoint subst_tm {vs0 : list nat} {vs1 : list nat} (sigma_tm : fin vs0 -> tm vs1) (s : tm vs0) {struct s}
: tm vs1.
Proof.
refine (match s with
| var_tm x => _
| app s0 s1 => _
| lam s0 => _
end).
- exact (sigma_tm x).
- exact (app (subst_tm _ _ sigma_tm s0) (subst_tm _ _ sigma_tm s1)).
- exact (lam (subst_tm _ _ (up_tm_tm sigma_tm) s0)).
Defined.
Print Assumptions subst_tm.
(* lambda x y . x w (lambda z . w z y) *)
(* lambda . lambda . 1 2 (lambda . 3 0 1) *)
Definition testtm : tm [1].
Proof.
refine (lam (lam (app (app (* x w *) _ _)
(lam (app (app (* w z *) _ _) (* y *) _))))).
- (* x *)
cbv.
(* apparently here Coq's unification is too weak *)
refine (@var_tm [0;0;1] var_zero).
- (* outer w *)
refine (@var_tm [0;0;1] (shift var_zero)).
- (* inner w *)
refine (@var_tm [0;0;0;1] (shift (shift var_zero))).
- (* z *)
exact (@var_tm (fl [0;1;2;4]) var_zero).
- (* y *)
refine (@var_tm (fl [0;1;2;4]) (shift (shift var_zero))).
Defined.
Print Assumptions testtm.
Print testtm.
Compute (ren_tm shift testtm).
Compute (subst_tm var_tm testtm).
Definition ap {X Y} (f : X -> Y) {x y : X} (p : x = y) : f x = f y :=
match p with eq_refl => eq_refl end.
Definition cast {X} {x y: X} {p: X -> Type}
: x = y -> p x -> p y
:= fun e a => match e with eq_refl => a end.
(* Lemma In_0 : forall vs (H: InN 0 (upT vs)), H = InNCons. *)
Require Import Eqdep_dec.
Module DecList <: DecidableType.
Definition U := list nat.
Definition eq_dec (x y: list nat) :=
list_eq_dec Nat.eq_dec x y.
End DecList.
Module UIPList := DecidableEqDep DecList.
Lemma UIP_list (l : list nat) (e: l = l) : e = eq_refl.
Proof.
apply (UIPList.UIP_refl l e).
Qed.
(* Lemma up_helper (vs: list nat) (F: forall vs, fin vs -> Prop) *)
(* (H0: F (upT vs) (finH 0)) *)
(* (H1: forall x, F (upT vs) (finT 0 x)) *)
(* : forall (x: fin (upT vs)), F _ x. *)
(* Proof. *)
(* enough (He: forall vs' (Heq: vs' = upT vs) (x: fin vs'), F vs' x). *)
(* - intros x. apply (He (upT vs) eq_refl x). *)
(* - intros vs' Heq x. *)
(* destruct x. *)
(* + injection Heq. *)
(* intros -> ->. *)
(* apply H0. *)
(* + injection Heq. *)
(* intros -> ->. *)
(* apply H1. *)
(* Qed. *)
Lemma upId_tm_tm {vs : list nat} (sigma : fin vs -> tm vs)
(Eq : forall x, sigma x = var_tm x) :
forall x, up_tm_tm sigma x = @var_tm (upT vs) x.
Proof.
unfold up_tm_tm.
enough (He: forall vs' (Heq: vs' = upT vs) (x : fin vs'), scons (var_tm var_zero) (funcomp (ren_tm shift) sigma) (cast Heq x) = var_tm (cast Heq x)).
- apply (He (upT vs) eq_refl).
- intros vs' Heq x.
destruct x.
+ injection Heq.
intros -> ->.
rewrite (UIP_list _ Heq).
reflexivity.
+ injection Heq.
intros -> ->.
rewrite (UIP_list _ Heq).
cbn.
exact (ap (ren_tm shift) (Eq x)).
Qed.
Fixpoint idSubst_tm {vs : list nat} (sigma_tm : fin vs -> tm vs)
(Eq_tm : forall x, sigma_tm x = var_tm x) (s : tm vs) {struct s} :
subst_tm sigma_tm s = s :=
match s as s0 return subst_tm sigma_tm s0 = s0
with
| var_tm s0 => Eq_tm s0
| app s0 s1 => congr_app (idSubst_tm sigma_tm Eq_tm s0) (idSubst_tm sigma_tm Eq_tm s1)
| lam s0 => congr_lam (idSubst_tm (up_tm_tm sigma_tm) (upId_tm_tm sigma_tm Eq_tm) s0)
end.
Fail Check 1.
(* Lemma upRen_tm_tm {m : nat} {n : nat} (xi : fin m -> fin n) : *)
(* fin (S m) -> fin (S n). *)
(* Proof. *)
(* exact (up_ren xi). *)
(* Defined. *)
(* Fixpoint ren_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm) *)
(* (s : tm m_tm) {struct s} : tm n_tm := *)
(* match s with *)
(* | var_tm _ s0 => var_tm n_tm (xi_tm s0) *)
(* | app _ s0 s1 => app n_tm (ren_tm xi_tm s0) (ren_tm xi_tm s1) *)
(* | lam _ s0 => lam n_tm (ren_tm (upRen_tm_tm xi_tm) s0) *)
(* end. *)
Lemma up_tm_tm {m : nat} {n_tm : nat} (sigma : fin m -> tm n_tm) :
fin (S m) -> tm (S n_tm).
Proof.
exact (scons (var_tm (S n_tm) var_zero) (funcomp (ren_tm shift) sigma)).
Defined.
Lemma up_list_tm_tm (p : nat) {m : nat} {n_tm : nat}
(sigma : fin m -> tm n_tm) : fin (plus p m) -> tm (plus p n_tm).
Proof.
exact (scons_p p (funcomp (var_tm (plus p n_tm)) (zero_p p))
(funcomp (ren_tm (shift_p p)) sigma)).
Defined.
Fixpoint subst_tm {m_tm : nat} {n_tm : nat} (sigma_tm : fin m_tm -> tm n_tm)
(s : tm m_tm) {struct s} : tm n_tm :=
match s with
| var_tm _ s0 => sigma_tm s0
| app _ s0 s1 => app n_tm (subst_tm sigma_tm s0) (subst_tm sigma_tm s1)
| lam _ s0 => lam n_tm (subst_tm (up_tm_tm sigma_tm) s0)
end.
Lemma upId_tm_tm {m_tm : nat} (sigma : fin m_tm -> tm m_tm)
(Eq : forall x, sigma x = var_tm m_tm x) :
forall x, up_tm_tm sigma x = var_tm (S m_tm) x.
Proof.
exact (fun n =>
match n with
| Some fin_n => ap (ren_tm shift) (Eq fin_n)
| None => eq_refl
end).
Qed.
Lemma upId_list_tm_tm {p : nat} {m_tm : nat} (sigma : fin m_tm -> tm m_tm)
(Eq : forall x, sigma x = var_tm m_tm x) :
forall x, up_list_tm_tm p sigma x = var_tm (plus p m_tm) x.
Proof.
exact (fun n =>
scons_p_eta (var_tm (plus p m_tm))
(fun n => ap (ren_tm (shift_p p)) (Eq n)) (fun n => eq_refl)).
Qed.
Fixpoint idSubst_tm {m_tm : nat} (sigma_tm : fin m_tm -> tm m_tm)
(Eq_tm : forall x, sigma_tm x = var_tm m_tm x) (s : tm m_tm) {struct s} :
subst_tm sigma_tm s = s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (idSubst_tm sigma_tm Eq_tm s0) (idSubst_tm sigma_tm Eq_tm s1)
| lam _ s0 =>
congr_lam (idSubst_tm (up_tm_tm sigma_tm) (upId_tm_tm _ Eq_tm) s0)
end.
Lemma upExtRen_tm_tm {m : nat} {n : nat} (xi : fin m -> fin n)
(zeta : fin m -> fin n) (Eq : forall x, xi x = zeta x) :
forall x, upRen_tm_tm xi x = upRen_tm_tm zeta x.
Proof.
exact (fun n =>
match n with
| Some fin_n => ap shift (Eq fin_n)
| None => eq_refl
end).
Qed.
Lemma upExtRen_list_tm_tm {p : nat} {m : nat} {n : nat} (xi : fin m -> fin n)
(zeta : fin m -> fin n) (Eq : forall x, xi x = zeta x) :
forall x, upRen_list_tm_tm p xi x = upRen_list_tm_tm p zeta x.
Proof.
exact (fun n =>
scons_p_congr (fun n => eq_refl) (fun n => ap (shift_p p) (Eq n))).
Qed.
Fixpoint extRen_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm)
(zeta_tm : fin m_tm -> fin n_tm) (Eq_tm : forall x, xi_tm x = zeta_tm x)
(s : tm m_tm) {struct s} : ren_tm xi_tm s = ren_tm zeta_tm s :=
match s with
| var_tm _ s0 => ap (var_tm n_tm) (Eq_tm s0)
| app _ s0 s1 =>
congr_app (extRen_tm xi_tm zeta_tm Eq_tm s0)
(extRen_tm xi_tm zeta_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(extRen_tm (upRen_tm_tm xi_tm) (upRen_tm_tm zeta_tm)
(upExtRen_tm_tm _ _ Eq_tm) s0)
end.
Lemma upExt_tm_tm {m : nat} {n_tm : nat} (sigma : fin m -> tm n_tm)
(tau : fin m -> tm n_tm) (Eq : forall x, sigma x = tau x) :
forall x, up_tm_tm sigma x = up_tm_tm tau x.
Proof.
exact (fun n =>
match n with
| Some fin_n => ap (ren_tm shift) (Eq fin_n)
| None => eq_refl
end).
Qed.
Lemma upExt_list_tm_tm {p : nat} {m : nat} {n_tm : nat}
(sigma : fin m -> tm n_tm) (tau : fin m -> tm n_tm)
(Eq : forall x, sigma x = tau x) :
forall x, up_list_tm_tm p sigma x = up_list_tm_tm p tau x.
Proof.
exact (fun n =>
scons_p_congr (fun n => eq_refl)
(fun n => ap (ren_tm (shift_p p)) (Eq n))).
Qed.
Fixpoint ext_tm {m_tm : nat} {n_tm : nat} (sigma_tm : fin m_tm -> tm n_tm)
(tau_tm : fin m_tm -> tm n_tm) (Eq_tm : forall x, sigma_tm x = tau_tm x)
(s : tm m_tm) {struct s} : subst_tm sigma_tm s = subst_tm tau_tm s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (ext_tm sigma_tm tau_tm Eq_tm s0)
(ext_tm sigma_tm tau_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(ext_tm (up_tm_tm sigma_tm) (up_tm_tm tau_tm) (upExt_tm_tm _ _ Eq_tm)
s0)
end.
Lemma up_ren_ren_tm_tm {k : nat} {l : nat} {m : nat} (xi : fin k -> fin l)
(zeta : fin l -> fin m) (rho : fin k -> fin m)
(Eq : forall x, funcomp zeta xi x = rho x) :
forall x, funcomp (upRen_tm_tm zeta) (upRen_tm_tm xi) x = upRen_tm_tm rho x.
Proof.
exact (up_ren_ren xi zeta rho Eq).
Qed.
Lemma up_ren_ren_list_tm_tm {p : nat} {k : nat} {l : nat} {m : nat}
(xi : fin k -> fin l) (zeta : fin l -> fin m) (rho : fin k -> fin m)
(Eq : forall x, funcomp zeta xi x = rho x) :
forall x,
funcomp (upRen_list_tm_tm p zeta) (upRen_list_tm_tm p xi) x =
upRen_list_tm_tm p rho x.
Proof.
exact (up_ren_ren_p Eq).
Qed.
Fixpoint compRenRen_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (zeta_tm : fin k_tm -> fin l_tm)
(rho_tm : fin m_tm -> fin l_tm)
(Eq_tm : forall x, funcomp zeta_tm xi_tm x = rho_tm x) (s : tm m_tm) {struct
s} : ren_tm zeta_tm (ren_tm xi_tm s) = ren_tm rho_tm s :=
match s with
| var_tm _ s0 => ap (var_tm l_tm) (Eq_tm s0)
| app _ s0 s1 =>
congr_app (compRenRen_tm xi_tm zeta_tm rho_tm Eq_tm s0)
(compRenRen_tm xi_tm zeta_tm rho_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(compRenRen_tm (upRen_tm_tm xi_tm) (upRen_tm_tm zeta_tm)
(upRen_tm_tm rho_tm) (up_ren_ren _ _ _ Eq_tm) s0)
end.
Lemma up_ren_subst_tm_tm {k : nat} {l : nat} {m_tm : nat}
(xi : fin k -> fin l) (tau : fin l -> tm m_tm) (theta : fin k -> tm m_tm)
(Eq : forall x, funcomp tau xi x = theta x) :
forall x, funcomp (up_tm_tm tau) (upRen_tm_tm xi) x = up_tm_tm theta x.
Proof.
exact (fun n =>
match n with
| Some fin_n => ap (ren_tm shift) (Eq fin_n)
| None => eq_refl
end).
Qed.
Lemma up_ren_subst_list_tm_tm {p : nat} {k : nat} {l : nat} {m_tm : nat}
(xi : fin k -> fin l) (tau : fin l -> tm m_tm) (theta : fin k -> tm m_tm)
(Eq : forall x, funcomp tau xi x = theta x) :
forall x,
funcomp (up_list_tm_tm p tau) (upRen_list_tm_tm p xi) x =
up_list_tm_tm p theta x.
Proof.
exact (fun n =>
eq_trans (scons_p_comp' _ _ _ n)
(scons_p_congr (fun z => scons_p_head' _ _ z)
(fun z =>
eq_trans (scons_p_tail' _ _ (xi z))
(ap (ren_tm (shift_p p)) (Eq z))))).
Qed.
Fixpoint compRenSubst_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (tau_tm : fin k_tm -> tm l_tm)
(theta_tm : fin m_tm -> tm l_tm)
(Eq_tm : forall x, funcomp tau_tm xi_tm x = theta_tm x) (s : tm m_tm) {struct
s} : subst_tm tau_tm (ren_tm xi_tm s) = subst_tm theta_tm s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (compRenSubst_tm xi_tm tau_tm theta_tm Eq_tm s0)
(compRenSubst_tm xi_tm tau_tm theta_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(compRenSubst_tm (upRen_tm_tm xi_tm) (up_tm_tm tau_tm)
(up_tm_tm theta_tm) (up_ren_subst_tm_tm _ _ _ Eq_tm) s0)
end.
Lemma up_subst_ren_tm_tm {k : nat} {l_tm : nat} {m_tm : nat}
(sigma : fin k -> tm l_tm) (zeta_tm : fin l_tm -> fin m_tm)
(theta : fin k -> tm m_tm)
(Eq : forall x, funcomp (ren_tm zeta_tm) sigma x = theta x) :
forall x,
funcomp (ren_tm (upRen_tm_tm zeta_tm)) (up_tm_tm sigma) x =
up_tm_tm theta x.
Proof.
exact (fun n =>
match n with
| Some fin_n =>
eq_trans
(compRenRen_tm shift (upRen_tm_tm zeta_tm)
(funcomp shift zeta_tm) (fun x => eq_refl) (sigma fin_n))
(eq_trans
(eq_sym
(compRenRen_tm zeta_tm shift (funcomp shift zeta_tm)
(fun x => eq_refl) (sigma fin_n)))
(ap (ren_tm shift) (Eq fin_n)))
| None => eq_refl
end).
Qed.
Lemma up_subst_ren_list_tm_tm {p : nat} {k : nat} {l_tm : nat} {m_tm : nat}
(sigma : fin k -> tm l_tm) (zeta_tm : fin l_tm -> fin m_tm)
(theta : fin k -> tm m_tm)
(Eq : forall x, funcomp (ren_tm zeta_tm) sigma x = theta x) :
forall x,
funcomp (ren_tm (upRen_list_tm_tm p zeta_tm)) (up_list_tm_tm p sigma) x =
up_list_tm_tm p theta x.
Proof.
exact (fun n =>
eq_trans (scons_p_comp' _ _ _ n)
(scons_p_congr
(fun x => ap (var_tm (plus p m_tm)) (scons_p_head' _ _ x))
(fun n =>
eq_trans
(compRenRen_tm (shift_p p) (upRen_list_tm_tm p zeta_tm)
(funcomp (shift_p p) zeta_tm)
(fun x => scons_p_tail' _ _ x) (sigma n))
(eq_trans
(eq_sym
(compRenRen_tm zeta_tm (shift_p p)
(funcomp (shift_p p) zeta_tm) (fun x => eq_refl)
(sigma n))) (ap (ren_tm (shift_p p)) (Eq n)))))).
Qed.
Fixpoint compSubstRen_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (zeta_tm : fin k_tm -> fin l_tm)
(theta_tm : fin m_tm -> tm l_tm)
(Eq_tm : forall x, funcomp (ren_tm zeta_tm) sigma_tm x = theta_tm x)
(s : tm m_tm) {struct s} :
ren_tm zeta_tm (subst_tm sigma_tm s) = subst_tm theta_tm s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (compSubstRen_tm sigma_tm zeta_tm theta_tm Eq_tm s0)
(compSubstRen_tm sigma_tm zeta_tm theta_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(compSubstRen_tm (up_tm_tm sigma_tm) (upRen_tm_tm zeta_tm)
(up_tm_tm theta_tm) (up_subst_ren_tm_tm _ _ _ Eq_tm) s0)
end.
Lemma up_subst_subst_tm_tm {k : nat} {l_tm : nat} {m_tm : nat}
(sigma : fin k -> tm l_tm) (tau_tm : fin l_tm -> tm m_tm)
(theta : fin k -> tm m_tm)
(Eq : forall x, funcomp (subst_tm tau_tm) sigma x = theta x) :
forall x,
funcomp (subst_tm (up_tm_tm tau_tm)) (up_tm_tm sigma) x = up_tm_tm theta x.
Proof.
exact (fun n =>
match n with
| Some fin_n =>
eq_trans
(compRenSubst_tm shift (up_tm_tm tau_tm)
(funcomp (up_tm_tm tau_tm) shift) (fun x => eq_refl)
(sigma fin_n))
(eq_trans
(eq_sym
(compSubstRen_tm tau_tm shift
(funcomp (ren_tm shift) tau_tm) (fun x => eq_refl)
(sigma fin_n))) (ap (ren_tm shift) (Eq fin_n)))
| None => eq_refl
end).
Qed.
Lemma up_subst_subst_list_tm_tm {p : nat} {k : nat} {l_tm : nat} {m_tm : nat}
(sigma : fin k -> tm l_tm) (tau_tm : fin l_tm -> tm m_tm)
(theta : fin k -> tm m_tm)
(Eq : forall x, funcomp (subst_tm tau_tm) sigma x = theta x) :
forall x,
funcomp (subst_tm (up_list_tm_tm p tau_tm)) (up_list_tm_tm p sigma) x =
up_list_tm_tm p theta x.
Proof.
exact (fun n =>
eq_trans
(scons_p_comp' (funcomp (var_tm (plus p l_tm)) (zero_p p)) _ _ n)
(scons_p_congr
(fun x => scons_p_head' _ (fun z => ren_tm (shift_p p) _) x)
(fun n =>
eq_trans
(compRenSubst_tm (shift_p p) (up_list_tm_tm p tau_tm)
(funcomp (up_list_tm_tm p tau_tm) (shift_p p))
(fun x => eq_refl) (sigma n))
(eq_trans
(eq_sym
(compSubstRen_tm tau_tm (shift_p p) _
(fun x => eq_sym (scons_p_tail' _ _ x)) (sigma n)))
(ap (ren_tm (shift_p p)) (Eq n)))))).
Qed.
Fixpoint compSubstSubst_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (tau_tm : fin k_tm -> tm l_tm)
(theta_tm : fin m_tm -> tm l_tm)
(Eq_tm : forall x, funcomp (subst_tm tau_tm) sigma_tm x = theta_tm x)
(s : tm m_tm) {struct s} :
subst_tm tau_tm (subst_tm sigma_tm s) = subst_tm theta_tm s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (compSubstSubst_tm sigma_tm tau_tm theta_tm Eq_tm s0)
(compSubstSubst_tm sigma_tm tau_tm theta_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(compSubstSubst_tm (up_tm_tm sigma_tm) (up_tm_tm tau_tm)
(up_tm_tm theta_tm) (up_subst_subst_tm_tm _ _ _ Eq_tm) s0)
end.
Lemma rinstInst_up_tm_tm {m : nat} {n_tm : nat} (xi : fin m -> fin n_tm)
(sigma : fin m -> tm n_tm)
(Eq : forall x, funcomp (var_tm n_tm) xi x = sigma x) :
forall x, funcomp (var_tm (S n_tm)) (upRen_tm_tm xi) x = up_tm_tm sigma x.
Proof.
exact (fun n =>
match n with
| Some fin_n => ap (ren_tm shift) (Eq fin_n)
| None => eq_refl
end).
Qed.
Lemma rinstInst_up_list_tm_tm {p : nat} {m : nat} {n_tm : nat}
(xi : fin m -> fin n_tm) (sigma : fin m -> tm n_tm)
(Eq : forall x, funcomp (var_tm n_tm) xi x = sigma x) :
forall x,
funcomp (var_tm (plus p n_tm)) (upRen_list_tm_tm p xi) x =
up_list_tm_tm p sigma x.
Proof.
exact (fun n =>
eq_trans (scons_p_comp' _ _ (var_tm (plus p n_tm)) n)
(scons_p_congr (fun z => eq_refl)
(fun n => ap (ren_tm (shift_p p)) (Eq n)))).
Qed.
Fixpoint rinst_inst_tm {m_tm : nat} {n_tm : nat}
(xi_tm : fin m_tm -> fin n_tm) (sigma_tm : fin m_tm -> tm n_tm)
(Eq_tm : forall x, funcomp (var_tm n_tm) xi_tm x = sigma_tm x) (s : tm m_tm)
{struct s} : ren_tm xi_tm s = subst_tm sigma_tm s :=
match s with
| var_tm _ s0 => Eq_tm s0
| app _ s0 s1 =>
congr_app (rinst_inst_tm xi_tm sigma_tm Eq_tm s0)
(rinst_inst_tm xi_tm sigma_tm Eq_tm s1)
| lam _ s0 =>
congr_lam
(rinst_inst_tm (upRen_tm_tm xi_tm) (up_tm_tm sigma_tm)
(rinstInst_up_tm_tm _ _ Eq_tm) s0)
end.
Lemma renRen_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (zeta_tm : fin k_tm -> fin l_tm)
(s : tm m_tm) :
ren_tm zeta_tm (ren_tm xi_tm s) = ren_tm (funcomp zeta_tm xi_tm) s.
Proof.
exact (compRenRen_tm xi_tm zeta_tm _ (fun n => eq_refl) s).
Qed.
Lemma renRen'_tm_pointwise {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (zeta_tm : fin k_tm -> fin l_tm) :
pointwise_relation _ eq (funcomp (ren_tm zeta_tm) (ren_tm xi_tm))
(ren_tm (funcomp zeta_tm xi_tm)).
Proof.
exact (fun s => compRenRen_tm xi_tm zeta_tm _ (fun n => eq_refl) s).
Qed.
Lemma substRen_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (zeta_tm : fin k_tm -> fin l_tm)
(s : tm m_tm) :
ren_tm zeta_tm (subst_tm sigma_tm s) =
subst_tm (funcomp (ren_tm zeta_tm) sigma_tm) s.
Proof.
exact (compSubstRen_tm sigma_tm zeta_tm _ (fun n => eq_refl) s).
Qed.
Lemma substRen_tm_pointwise {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (zeta_tm : fin k_tm -> fin l_tm) :
pointwise_relation _ eq (funcomp (ren_tm zeta_tm) (subst_tm sigma_tm))
(subst_tm (funcomp (ren_tm zeta_tm) sigma_tm)).
Proof.
exact (fun s => compSubstRen_tm sigma_tm zeta_tm _ (fun n => eq_refl) s).
Qed.
Lemma renSubst_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (tau_tm : fin k_tm -> tm l_tm) (s : tm m_tm)
: subst_tm tau_tm (ren_tm xi_tm s) = subst_tm (funcomp tau_tm xi_tm) s.
Proof.
exact (compRenSubst_tm xi_tm tau_tm _ (fun n => eq_refl) s).
Qed.
Lemma renSubst_tm_pointwise {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (tau_tm : fin k_tm -> tm l_tm) :
pointwise_relation _ eq (funcomp (subst_tm tau_tm) (ren_tm xi_tm))
(subst_tm (funcomp tau_tm xi_tm)).
Proof.
exact (fun s => compRenSubst_tm xi_tm tau_tm _ (fun n => eq_refl) s).
Qed.
Lemma substSubst_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (tau_tm : fin k_tm -> tm l_tm)
(s : tm m_tm) :
subst_tm tau_tm (subst_tm sigma_tm s) =
subst_tm (funcomp (subst_tm tau_tm) sigma_tm) s.
Proof.
exact (compSubstSubst_tm sigma_tm tau_tm _ (fun n => eq_refl) s).
Qed.
Lemma substSubst_tm_pointwise {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (tau_tm : fin k_tm -> tm l_tm) :
pointwise_relation _ eq (funcomp (subst_tm tau_tm) (subst_tm sigma_tm))
(subst_tm (funcomp (subst_tm tau_tm) sigma_tm)).
Proof.
exact (fun s => compSubstSubst_tm sigma_tm tau_tm _ (fun n => eq_refl) s).
Qed.
Lemma rinstInst'_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm)
(s : tm m_tm) : ren_tm xi_tm s = subst_tm (funcomp (var_tm n_tm) xi_tm) s.
Proof.
exact (rinst_inst_tm xi_tm _ (fun n => eq_refl) s).
Qed.
Lemma rinstInst'_tm_pointwise {m_tm : nat} {n_tm : nat}
(xi_tm : fin m_tm -> fin n_tm) :
pointwise_relation _ eq (ren_tm xi_tm)
(subst_tm (funcomp (var_tm n_tm) xi_tm)).
Proof.
exact (fun s => rinst_inst_tm xi_tm _ (fun n => eq_refl) s).
Qed.
Lemma instId'_tm {m_tm : nat} (s : tm m_tm) : subst_tm (var_tm m_tm) s = s.
Proof.
exact (idSubst_tm (var_tm m_tm) (fun n => eq_refl) s).
Qed.
Lemma instId'_tm_pointwise {m_tm : nat} :
pointwise_relation _ eq (subst_tm (var_tm m_tm)) id.
Proof.
exact (fun s => idSubst_tm (var_tm m_tm) (fun n => eq_refl) s).
Qed.
Lemma rinstId'_tm {m_tm : nat} (s : tm m_tm) : ren_tm id s = s.
Proof.
exact (eq_ind_r (fun t => t = s) (instId'_tm s) (rinstInst'_tm id s)).
Qed.
Lemma rinstId'_tm_pointwise {m_tm : nat} :
pointwise_relation _ eq (@ren_tm m_tm m_tm id) id.
Proof.
exact (fun s => eq_ind_r (fun t => t = s) (instId'_tm s) (rinstInst'_tm id s)).
Qed.
Lemma varL'_tm {m_tm : nat} {n_tm : nat} (sigma_tm : fin m_tm -> tm n_tm)
(x : fin m_tm) : subst_tm sigma_tm (var_tm m_tm x) = sigma_tm x.
Proof.
exact (eq_refl).
Qed.
Lemma varL'_tm_pointwise {m_tm : nat} {n_tm : nat}
(sigma_tm : fin m_tm -> tm n_tm) :
pointwise_relation _ eq (funcomp (subst_tm sigma_tm) (var_tm m_tm))
sigma_tm.
Proof.
exact (fun x => eq_refl).
Qed.
Lemma varLRen'_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm)
(x : fin m_tm) : ren_tm xi_tm (var_tm m_tm x) = var_tm n_tm (xi_tm x).
Proof.
exact (eq_refl).
Qed.
Lemma varLRen'_tm_pointwise {m_tm : nat} {n_tm : nat}
(xi_tm : fin m_tm -> fin n_tm) :
pointwise_relation _ eq (funcomp (ren_tm xi_tm) (var_tm m_tm))
(funcomp (var_tm n_tm) xi_tm).
Proof.
exact (fun x => eq_refl).
Qed.
Class Up_tm X Y :=
up_tm : X -> Y.
Instance Subst_tm {m_tm n_tm : nat}: (Subst1 _ _ _) := (@subst_tm m_tm n_tm).
Instance Up_tm_tm {m n_tm : nat}: (Up_tm _ _) := (@up_tm_tm m n_tm).
Instance Ren_tm {m_tm n_tm : nat}: (Ren1 _ _ _) := (@ren_tm m_tm n_tm).
Instance VarInstance_tm {n_tm : nat}: (Var _ _) := (@var_tm n_tm).
Notation "[ sigma_tm ]" := (subst_tm sigma_tm)
( at level 1, left associativity, only printing) : fscope.
Notation "s [ sigma_tm ]" := (subst_tm sigma_tm s)
( at level 7, left associativity, only printing) : subst_scope.
Notation "↑__tm" := up_tm (only printing) : subst_scope.
Notation "↑__tm" := up_tm_tm (only printing) : subst_scope.
Notation "⟨ xi_tm ⟩" := (ren_tm xi_tm)
( at level 1, left associativity, only printing) : fscope.
Notation "s ⟨ xi_tm ⟩" := (ren_tm xi_tm s)
( at level 7, left associativity, only printing) : subst_scope.
Notation "'var'" := var_tm ( at level 1, only printing) : subst_scope.
Notation "x '__tm'" := (@ids _ _ VarInstance_tm x)
( at level 5, format "x __tm", only printing) : subst_scope.
Notation "x '__tm'" := (var_tm x) ( at level 5, format "x __tm") :
subst_scope.
Instance subst_tm_morphism {m_tm : nat} {n_tm : nat}:
(Proper (respectful (pointwise_relation _ eq) (respectful eq eq))
(@subst_tm m_tm n_tm)).
Proof.
exact (fun f_tm g_tm Eq_tm s t Eq_st =>
eq_ind s (fun t' => subst_tm f_tm s = subst_tm g_tm t')
(ext_tm f_tm g_tm Eq_tm s) t Eq_st).
Qed.
Instance ren_tm_morphism {m_tm : nat} {n_tm : nat}:
(Proper (respectful (pointwise_relation _ eq) (respectful eq eq))
(@ren_tm m_tm n_tm)).
Proof.
exact (fun f_tm g_tm Eq_tm s t Eq_st =>
eq_ind s (fun t' => ren_tm f_tm s = ren_tm g_tm t')
(extRen_tm f_tm g_tm Eq_tm s) t Eq_st).
Qed.
Ltac auto_unfold := repeat
unfold VarInstance_tm, Var, ids, Ren_tm, Ren1, ren1,
Up_tm_tm, Up_tm, up_tm, Subst_tm, Subst1, subst1.
Tactic Notation "auto_unfold" "in" "*" := repeat
unfold VarInstance_tm, Var, ids,
Ren_tm, Ren1, ren1, Up_tm_tm,
Up_tm, up_tm, Subst_tm, Subst1,
subst1 in *.
Ltac asimpl' := repeat (first
[ progress setoid_rewrite substSubst_tm_pointwise
| progress setoid_rewrite substSubst_tm
| progress setoid_rewrite renSubst_tm_pointwise
| progress setoid_rewrite renSubst_tm
| progress setoid_rewrite substRen_tm_pointwise
| progress setoid_rewrite substRen_tm
| progress setoid_rewrite renRen'_tm_pointwise
| progress setoid_rewrite renRen_tm
| progress setoid_rewrite varLRen'_tm_pointwise
| progress setoid_rewrite varLRen'_tm
| progress setoid_rewrite varL'_tm_pointwise
| progress setoid_rewrite varL'_tm
| progress setoid_rewrite rinstId'_tm_pointwise
| progress setoid_rewrite rinstId'_tm
| progress setoid_rewrite instId'_tm_pointwise
| progress setoid_rewrite instId'_tm
| progress
unfold up_list_tm_tm, up_tm_tm, upRen_list_tm_tm,
upRen_tm_tm, up_ren
| progress cbn[subst_tm ren_tm]
| progress fsimpl ]).
Ltac asimpl := check_no_evars;
repeat
unfold VarInstance_tm, Var, ids, Ren_tm, Ren1, ren1,
Up_tm_tm, Up_tm, up_tm, Subst_tm, Subst1, subst1 in *;
asimpl'; minimize.
Tactic Notation "asimpl" "in" hyp(J) := revert J; asimpl; intros J.
Tactic Notation "auto_case" := auto_case ltac:(asimpl; cbn; eauto).
Ltac substify := auto_unfold; try setoid_rewrite rinstInst'_tm_pointwise;
try setoid_rewrite rinstInst'_tm.
Ltac renamify := auto_unfold; try setoid_rewrite_left rinstInst'_tm_pointwise;
try setoid_rewrite_left rinstInst'_tm.
End renSubst.
Module fext.
Import
renSubst.
Lemma rinstInst_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm) :
ren_tm xi_tm = subst_tm (funcomp (var_tm n_tm) xi_tm).
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun x => rinst_inst_tm xi_tm _ (fun n => eq_refl) x)).
Qed.
Lemma instId_tm {m_tm : nat} : subst_tm (var_tm m_tm) = id.
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun x => idSubst_tm (var_tm m_tm) (fun n => eq_refl) (id x))).
Qed.
Lemma rinstId_tm {m_tm : nat} : @ren_tm m_tm m_tm id = id.
Proof.
exact (eq_trans (rinstInst_tm (id _)) instId_tm).
Qed.
Lemma varL_tm {m_tm : nat} {n_tm : nat} (sigma_tm : fin m_tm -> tm n_tm) :
funcomp (subst_tm sigma_tm) (var_tm m_tm) = sigma_tm.
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun x => eq_refl)).
Qed.
Lemma varLRen_tm {m_tm : nat} {n_tm : nat} (xi_tm : fin m_tm -> fin n_tm) :
funcomp (ren_tm xi_tm) (var_tm m_tm) = funcomp (var_tm n_tm) xi_tm.
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun x => eq_refl)).
Qed.
Lemma renRen'_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (zeta_tm : fin k_tm -> fin l_tm) :
funcomp (ren_tm zeta_tm) (ren_tm xi_tm) = ren_tm (funcomp zeta_tm xi_tm).
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun n => renRen_tm xi_tm zeta_tm n)).
Qed.
Lemma substRen'_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (zeta_tm : fin k_tm -> fin l_tm) :
funcomp (ren_tm zeta_tm) (subst_tm sigma_tm) =
subst_tm (funcomp (ren_tm zeta_tm) sigma_tm).
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun n => substRen_tm sigma_tm zeta_tm n)).
Qed.
Lemma renSubst'_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(xi_tm : fin m_tm -> fin k_tm) (tau_tm : fin k_tm -> tm l_tm) :
funcomp (subst_tm tau_tm) (ren_tm xi_tm) = subst_tm (funcomp tau_tm xi_tm).
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun n => renSubst_tm xi_tm tau_tm n)).
Qed.
Lemma substSubst'_tm {k_tm : nat} {l_tm : nat} {m_tm : nat}
(sigma_tm : fin m_tm -> tm k_tm) (tau_tm : fin k_tm -> tm l_tm) :
funcomp (subst_tm tau_tm) (subst_tm sigma_tm) =
subst_tm (funcomp (subst_tm tau_tm) sigma_tm).
Proof.
exact (FunctionalExtensionality.functional_extensionality _ _
(fun n => substSubst_tm sigma_tm tau_tm n)).
Qed.
Ltac asimpl_fext' := repeat (first
[ progress setoid_rewrite substSubst_tm_pointwise
| progress setoid_rewrite substSubst_tm
| progress setoid_rewrite renSubst_tm_pointwise
| progress setoid_rewrite renSubst_tm
| progress setoid_rewrite substRen_tm_pointwise
| progress setoid_rewrite substRen_tm
| progress setoid_rewrite renRen'_tm_pointwise
| progress setoid_rewrite renRen_tm
| progress setoid_rewrite substSubst'_tm
| progress setoid_rewrite renSubst'_tm
| progress setoid_rewrite substRen'_tm
| progress setoid_rewrite renRen'_tm
| progress setoid_rewrite varLRen_tm
| progress setoid_rewrite varL_tm
| progress setoid_rewrite rinstId_tm
| progress setoid_rewrite instId_tm
| progress
unfold up_list_tm_tm, up_tm_tm, upRen_list_tm_tm,
upRen_tm_tm, up_ren
| progress cbn[subst_tm ren_tm]
| fsimpl_fext ]).
Ltac asimpl_fext := check_no_evars; repeat try unfold_funcomp;
repeat
unfold VarInstance_tm, Var, ids, Ren_tm, Ren1, ren1,
Up_tm_tm, Up_tm, up_tm, Subst_tm, Subst1, subst1
in *; asimpl_fext'; repeat try unfold_funcomp.
Tactic Notation "asimpl_fext" "in" hyp(J) := revert J; asimpl_fext; intros J.
Ltac substify_fext := auto_unfold; try repeat erewrite ?rinstInst_tm.
Ltac renamify_fext := auto_unfold; try repeat erewrite <- ?rinstInst_tm.
End fext.
Module interface.
Export renSubst.
Export
fext.
Arguments var_tm {n_tm}.
Arguments lam {n_tm}.
Arguments app {n_tm}.
Hint Opaque subst_tm: rewrite.
Hint Opaque ren_tm: rewrite.
End interface.
Export interface.
| {
"alphanum_fraction": null,
"author": "addap",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/addap-autosubst-ocaml/autosubst-ocaml-f820bde3c51299b5f54ef21af39ac4654854d124/traced/utlc_traced_ind.v",
"reason": null,
"repo": "autosubst-ocaml",
"save_path": "github-repos/coq/addap-autosubst-ocaml",
"sha": "f820bde3c51299b5f54ef21af39ac4654854d124",
"size": null
} |
#include <iostream>
#include <cstdlib>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <EditorWindow.hpp>
#include <ApplicationController.hpp>
#include <CRDTManager.hpp>
#include <VersionVector.hpp>
#include <CustomMessageException.hpp>
std::string generateUniqueId();
int main(int argc, char **argv) {
if (argc < 3 || argc != (3 + 2 * strtoul(argv[2], nullptr, 10))) {
std::cerr
<< "Error: Not enough or invalid arguments, please enter in this format ./collaborative_text_editor "
<< "<client_port> <no_of_peers> [<peer_ip> <peer_port> ...]"
<< std::endl;
return -1;
}
std::vector<PeerAddress> peers; // take from console
for (int i = 3; i < argc; i = i + 2) {
peers.push_back({sf::IpAddress(argv[i]), (unsigned short) (strtoul(argv[i + 1], nullptr, 10))});
}
std::string unique_id = generateUniqueId();
EditorWindow window(sf::Color(47, 50, 47), unique_id);
CRDTManager crdt_manager(unique_id);
VersionVector version_vector(unique_id);
unsigned int port = strtoul(argv[1], nullptr, 10);
UDPClient udp_client(port, peers);
ApplicationController controller(window, crdt_manager, udp_client, version_vector);
controller.Go();
controller.Shutdown();
udp_client.ShutdownClient();
return 0;
}
std::string generateUniqueId() {
boost::uuids::random_generator generator;
boost::uuids::uuid uuid = generator();
return boost::uuids::to_string(uuid);
}
| {
"alphanum_fraction": 0.6966442953,
"author": null,
"avg_line_length": 32.3913043478,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "7b6cf76510bbccc728235300e8980d2c26ff33d2",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-04-30T17:51:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-04-30T17:51:44.000Z",
"max_forks_repo_head_hexsha": "457fee0c9aebf874e0ee3ac0a505220a7b493189",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "psx95/collaborative-text-editor",
"max_forks_repo_path": "src/main.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "457fee0c9aebf874e0ee3ac0a505220a7b493189",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "psx95/collaborative-text-editor",
"max_issues_repo_path": "src/main.cpp",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "457fee0c9aebf874e0ee3ac0a505220a7b493189",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "psx95/collaborative-text-editor",
"max_stars_repo_path": "src/main.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 394,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1490
} |
\chapter{B Chapter}
\lipsum[1] \ref{fig:frogger}\ref{fig:froggerr}.
\begin{figure}
\centering
\caption{\label{fig:frogger}Another instance of the frog.}
\includegraphics[width=0.3\textwidth]{frog.jpg}
\end{figure}
\begin{figure}
\centering
\caption{\label{fig:froggerr}And a third one.}
\includegraphics[width=0.3\textwidth]{frog.jpg}
\end{figure}
\section{A Section}
\lipsum[2]
\section{B Section}
\subsection{A Subsection}
\lipsum[3]
\subsubsection{A Subsubsection}
\lipsum[4]
\subsection{B subsection}
\lipsum[5]
\section{C Section}
\lipsum[6-8] \ref{tab:widgetss} \cite{One, Two, Three}.
\begin{table}
\centering
\caption{\label{tab:widgetss}An example table.}
\begin{tabular}{l|r}
Item & Quantity \\\hline
Widgets & 42 \\
Gadgets & 13
\end{tabular}
\end{table}
\begin{enumerate}
\item first,
\item second.
\end{enumerate}
\dots and bullet points \dots
\begin{itemize}
\item one bullet,
\item two bullets.
\end{itemize}
Let $X_1, X_2, \ldots, X_n$ be a sequence of independent and identically distributed random variables with $\text{E}[X_i] = \mu$ and $\text{Var}[X_i] = \sigma^2 < \infty$, and let
\[S_n = \frac{X_1 + X_2 + \cdots + X_n}{n}
= \frac{1}{n}\sum_{i}^{n} X_i\]
denote their mean. Then as $n$ approaches infinity, the random variables $\sqrt{n}(S_n - \mu)$ converge in distribution to a normal $\mathcal{N}(0, \sigma^2)$.
\begin{quote}
\lipsum[10]
\end{quote} | {
"alphanum_fraction": 0.6741957563,
"author": null,
"avg_line_length": 24.7627118644,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "7b826b67d41e98d85a90c65b6730fe86f4de5243",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d36628203822e618c8a446975dcca0800a004ccf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "uderbashi/GTU_Thesis_LaTeX_Template",
"max_forks_repo_path": "Body/Mainmatter/C2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d36628203822e618c8a446975dcca0800a004ccf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "uderbashi/GTU_Thesis_LaTeX_Template",
"max_issues_repo_path": "Body/Mainmatter/C2.tex",
"max_line_length": 179,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d36628203822e618c8a446975dcca0800a004ccf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "uderbashi/GTU_Thesis_LaTeX_Template",
"max_stars_repo_path": "Body/Mainmatter/C2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 506,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1461
} |
Network Graphs in R
We are using the well-known social network of Zachary's karate club. GML format file can be collected from here.
read graph file and update layout
library(igraph)
library(plotly)
G <- read.graph("karate.gml", format = c("gml"))
L <- layout.circle(G)
vertices and edges for the graph
vs <- V(G)
es <- as.data.frame(get.edgelist(G))
count of vertices and edges
Nv <- length(vs)
Ne <- length(es[1]$V1)
node positions
Xn <- L[,1]
Yn <- L[,2]
draw network nodes
network <- plot_ly(type = "scatter", x = Xn, y = Yn, mode = "markers", text = vs$label, hoverinfo = "text")
creates shapes for edges
edge_shapes <- list()
for(i in 1:Ne) {
v0 <- es[i,]$V1
v1 <- es[i,]$V2
edge_shape = list(
type = "line",
line = list(color = "#030303", width = 0.3),
x0 = Xn[v0],
y0 = Yn[v0],
x1 = Xn[v1],
y1 = Yn[v1]
)
edge_shapes[[i]] <- edge_shape
}
add edges to the network
network <- layout(
network,
title = 'Karate Network',
shapes = edge_shapes,
xaxis = list(title = "", showgrid = FALSE, showticklabels = FALSE, zeroline = FALSE),
yaxis = list(title = "", showgrid = FALSE, showticklabels = FALSE, zeroline = FALSE)
)
network
| {
"alphanum_fraction": 0.6137096774,
"author": null,
"avg_line_length": 22.1428571429,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "2aff5b8ce3a21486144a0d9f7c853add20e6d1ba",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7025e8bc03c40d65e71bcab783d2162be828ace7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mstrickland256/Basics",
"max_forks_repo_path": "Basic network graphs.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7025e8bc03c40d65e71bcab783d2162be828ace7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mstrickland256/Basics",
"max_issues_repo_path": "Basic network graphs.r",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7025e8bc03c40d65e71bcab783d2162be828ace7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mstrickland256/Basics",
"max_stars_repo_path": "Basic network graphs.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 369,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1240
} |
'''
---------------------------
Licensing and Distribution
---------------------------
Program name: Q2DTor
Version : 2020.1
License : MIT/x11
Copyright (c) 2020, David Ferro Costas (david.ferro@usc.es) and
Antonio Fernandez Ramos (qf.ramos@usc.es)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : modq2dtor |
| Sub-module : classes |
| Last Update: 2020/02/11 (Y/M/D) |
| Main Author: David Ferro-Costas |
*----------------------------------*
This module constains different Classes
'''
#------------------------------------------------#
# >> Importation section << #
#------------------------------------------------#
import math, cmath
import sys
import time
#------------------------------------------------#
import numpy as np
import random
from scipy.optimize import curve_fit
from scipy.optimize import newton
#random.seed(1111)
#------------------------------------------------#
import modq2dtor.constants as cons
import modq2dtor.helpfns as hf
from modq2dtor.gtsfile import read_gtsfile
# >>>>>>>>>>>>>>>>>>>>> ## <<<<<<<<<<<<<<<<<<<<< #
#>>>>>>>>>>>>>>>>>>*
# CLASS: InfoStr *
#>>>>>>>>>>>>>>>>>>*
class InfoStr:
def __init__(self):
self.text = ""
def add(self, string, n=0, b=0):
self.text = self.text + b*" " + string + n*"\n"
def __str__(self):
return self.text
def blanklines(self, n=1):
self.text = self.text + n*"\n"
def endinblank(self):
if not self.text.endswith("\n"): self.text = self.text + "\n"
def clear(self):
self.text = ""
def removeblank(self):
if self.text.endswith("\n\n"): self.text = self.text[:-1]
#>>>>>>>>>>>>>>>>>>*
# CLASS: Logger *
#>>>>>>>>>>>>>>>>>>*
class Logger(object):
'''
Class used to save in a file
what is printed in terminal
Requirements:
sys library
Use:
sys.stdout = Logger(f_out,tprint)
'''
def __init__(self,output=None,mode="w",bool_print=True):
# terminal
self.terminal = sys.__stdout__
self.bool_print = bool_print
# file
self.bool_write = False
self.file = output
if output is not None:
self.log = open(self.file, mode)
self.bool_write = True
else:
self.bool_write = False
def write(self, message):
if self.bool_print: self.terminal.write(message)
if self.bool_write: self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
#>>>>>>>>>>>>>>>>>>*
# CLASS: Fourier2D *
#>>>>>>>>>>>>>>>>>>*
class Fourier2D(object):
def __init__(self, terms, coefs=None, imag=True):
# All terms or only not imaginary?
if imag:
self._terms = list(terms)
else:
self._terms = []
for term, idx1, idx2 in terms:
if term == "sin" : continue
if term == "sincos": continue
if term == "cossin": continue
self._terms.append( (term, idx1, idx2) )
self.NP = len(self._terms)
self._coefs = coefs
def set_coefs(self,coefs):
self._coefs = coefs
def __call__(self, Phi1, Phi2):
return self.value(Phi1,Phi2)
def value(self,Phi1,Phi2):
'''
Returns the value of the functions
at the given point
'''
value = 0.0
for term,coef in zip(self._terms,self._coefs):
term_type, idx1, idx2 = term
if term_type == "const": value += coef
elif term_type == "cos" and coef != 0.0:
if idx1 == "-": value += coef * np.cos(idx2*Phi2)
if idx2 == "-": value += coef * np.cos(idx1*Phi1)
elif term_type == "sin" and coef != 0.0:
if idx1 == "-": value += coef * np.sin(idx2*Phi2)
if idx2 == "-": value += coef * np.sin(idx1*Phi1)
elif term_type == "coscos" and coef != 0.0:
value += coef * np.cos(idx1*Phi1)*np.cos(idx2*Phi2)
elif term_type == "sincos" and coef != 0.0:
value += coef * np.sin(idx1*Phi1)*np.cos(idx2*Phi2)
elif term_type == "sinsin" and coef != 0.0:
value += coef * np.sin(idx1*Phi1)*np.sin(idx2*Phi2)
elif term_type == "cossin" and coef != 0.0:
value += coef * np.cos(idx1*Phi1)*np.sin(idx2*Phi2)
return value
def derphi1(self,Phi1,Phi2):
'''
'''
value = 0.0
for term,coef in zip(self._terms,self._coefs):
term_type, idx1, idx2 = term
if term_type == "cos" and coef != 0.0:
if idx2 == "-": value += -idx1 * coef * np.sin(idx1*Phi1)
elif term_type == "sin" and coef != 0.0:
if idx2 == "-": value += idx1 * coef * np.cos(idx1*Phi1)
elif term_type == "coscos" and coef != 0.0:
value += -idx1 * coef * np.sin(idx1*Phi1)*np.cos(idx2*Phi2)
elif term_type == "sincos" and coef != 0.0:
value += idx1 * coef * np.cos(idx1*Phi1)*np.cos(idx2*Phi2)
elif term_type == "sinsin" and coef != 0.0:
value += idx1 * coef * np.cos(idx1*Phi1)*np.sin(idx2*Phi2)
elif term_type == "cossin" and coef != 0.0:
value += -idx1 * coef * np.sin(idx1*Phi1)*np.sin(idx2*Phi2)
return value
def derphi2(self,Phi1,Phi2):
'''
'''
value = 0.0
for term,coef in zip(self._terms,self._coefs):
term_type, idx1, idx2 = term
if term_type == "const": continue
elif term_type == "cos" and coef != 0.0:
if idx1 == "-": value += - idx2 * coef * np.sin(idx2*Phi2)
elif term_type == "sin" and coef != 0.0:
if idx1 == "-": value += idx2 * coef * np.cos(idx2*Phi2)
elif term_type == "coscos" and coef != 0.0:
value += -idx2 * coef * np.cos(idx1*Phi1)*np.sin(idx2*Phi2)
elif term_type == "sincos" and coef != 0.0:
value += -idx2 * coef * np.sin(idx1*Phi1)*np.sin(idx2*Phi2)
elif term_type == "sinsin" and coef != 0.0:
value += idx2 * coef * np.sin(idx1*Phi1)*np.cos(idx2*Phi2)
elif term_type == "cossin" and coef != 0.0:
value += idx2 * coef * np.cos(idx1*Phi1)*np.cos(idx2*Phi2)
return value
def value2(self,XY_array,*coefs):
'''
Just created to be used in 'fit' method
'''
# Set new coefs
self._coefs = coefs
# Get output
output = []
for (Phi1,Phi2) in XY_array:
fcn = self.value(Phi1,Phi2)
output.append(fcn)
return np.array(output)
def fit(self,xdata,ydata,weight=0.0,guess=None):
'''
A function created in order to fit
'''
t0 = time.time()
# Min, average and max values in ydata
y_min = min(ydata)
y_mean = sum(ydata) / len(ydata)
y_max = max(ydata)
# Define weights
if weight in [0.0,0,None,False]: weights = None
else : weights = [ (y-y_min+0.01)**float(weight) for y in ydata ]
# Define guess values
if guess in [None,False,[]]: guess = [y_mean] + [0.] * (self.NP-1)
# Define bounds
low_bound = [y_min]+[-np.inf]*(self.NP-1)
upp_bound = [y_max]+[+np.inf]*(self.NP-1)
bounds = (low_bound,upp_bound)
# Perform fitting
try:
popt, pcov = curve_fit(self.value2, xdata, ydata, p0=guess, sigma=weights, bounds=bounds)
except:
popt, pcov = curve_fit(self.value2, xdata, ydata, p0=guess, sigma=weights)
# Set coefficients
self.set_coefs(popt)
# Check
ydata_fit = self.value2(xdata,*popt)
averAbsErr = 0.0
averAbsErr_small = 0.0
n_small = 0.0
SStot = 0.0
SSres = 0.0
for y,yf in zip(ydata,ydata_fit):
SSres += (y-yf)**2
SStot += (y-y_mean)**2
averAbsErr += abs(y-yf)
if y < y_mean:
n_small += 1
averAbsErr_small += abs(y-yf)
rsquare = 1.0- SSres/SStot
averAbsErr /= len(ydata)
averAbsErr_small /= n_small
t1 = time.time()
fitting_time = t1 - t0
return self._terms, popt, (rsquare,averAbsErr,averAbsErr_small), fitting_time
#>>>>>>>>>>>>>>>>>>*
# CLASS: splineVaG *
#>>>>>>>>>>>>>>>>>>*
class SplineVaG:
def __init__(self,xx,yy):
self.ZERO = 1e-6
self.xx = xx
self.yy = yy
self.s_alpha = xx[0]
self.V_alpha = yy[0]
self.s_omega = xx[-1]
self.V_omega = yy[-1]
for idx in range(len(xx)):
if xx[idx] == 0.0: self.V_saddle = yy[idx]
# a) Obtain maxima inside xx
full_maxdata = hf.obtain_extremum(xx,yy,xtr="max",full=True)
# Data of "global" maximum
self.smax = full_maxdata[0][0]
self.VAG = full_maxdata[0][1]
# b) Remove coincident ranges in maximum splines!
self.splines = []
ranges = []
for xxx,xxx,the_spline,si,sj in full_maxdata:
for sa_prime,sb_prime in ranges:
if abs(si - sb_prime) < self.ZERO: continue
if abs(sj - sa_prime) < self.ZERO: continue
if si + self.ZERO > sa_prime and sb_prime + self.ZERO > si: si = sb_prime
if sj + self.ZERO > sa_prime and sb_prime + self.ZERO > sj: sj = sa_prime
if abs(sj-si) < self.ZERO: break
if abs(sj-si) < self.ZERO: continue
if sj < si: continue
self.splines += [ (si,sj,the_spline) ]
ranges.append( (si,sj) )
self.splines.sort()
ranges.sort()
# c) Add new splines (if no coincidence in ranges)
for idx in range(0,len(xx)-1):
si = xx[idx]
sj = xx[idx+1]
for sa_prime,sb_prime in ranges:
if abs(si - sb_prime) < self.ZERO: continue
if abs(sj - sa_prime) < self.ZERO: continue
if si + self.ZERO > sa_prime and sb_prime + self.ZERO > si: si = sb_prime
if sj + self.ZERO > sa_prime and sb_prime + self.ZERO > sj: sj = sa_prime
if abs(sj-si) < self.ZERO: break
if abs(sj-si) < self.ZERO: continue
if sj < si: continue
the_spline = hf.localspline(xx,yy,idx,nps=(2,2),spl=3)
self.splines.append( (si, sj, the_spline) )
ranges.append( (si,sj) )
self.splines.sort()
ranges.sort()
def __call__(self, svalue):
return self.interpolate(svalue)
def get_bwvalues(self):
return self.s_alpha, self.V_alpha
def get_fwvalues(self):
return self.s_omega, self.V_omega
def get_saddle(self): return self.V_saddle
def get_max(self):
return self.smax, self.VAG
def interpolate(self,si,E=0.0):
if si <= self.s_alpha: return self.V_alpha - E
if si >= self.s_omega: return self.V_omega - E
for idx in range(len(self.xx)):
if abs(self.xx[idx]-si) < self.ZERO: return self.yy[idx] - E
for s1, s2, spline in self.splines:
if s1 < si and si < s2: return spline(si) - E
def reduceMEP(self):
self.s_alpha = self.xx[ 1]
self.V_alpha = self.yy[ 1]
self.s_omega = self.xx[-2]
self.V_omega = self.yy[-2]
def undoreduceMEP(self):
self.s_alpha = self.xx[ 0]
self.V_alpha = self.yy[ 0]
self.s_omega = self.xx[-1]
self.V_omega = self.yy[-1]
def returnpoints(self,E,ds):
s_current = self.s_alpha
rtn_points = []
while s_current <= self.s_omega:
V1 = self.interpolate(s_current)
V2 = self.interpolate( min(s_current+ds,self.s_omega) )
if V1 > V2: rpoint_type = "-"
elif V1 < V2: rpoint_type = "+"
else: s_current = s_current+ds; continue
if (V1 <= E and E < V2) or (V1 >= E and E > V2):
s_guess = s_current + 0.5*ds
rpoint = newton(self.interpolate,s_guess,args=(E,))
rtn_points.append( (rpoint,rpoint_type) )
s_current = s_current+ds
if abs(E - V2) < 1e-6: rtn_points.append( (self.s_omega,rpoint_type) )
return rtn_points
#>>>>>>>>>>>>>>>>>>*
# CLASS: Freq *
#>>>>>>>>>>>>>>>>>>*
class Freq:
def __init__(self , scaling=1.0, mu=1.0/cons.amu, rmode=1):
'''
if rmode == 1: for imag frequencies, pfn = 1 and its derivates are 0
if rmode == 2: for imag frequencies, pfn = 1E10 and its derivates are 0
'''
self.scaling = scaling
self.mu = mu
# Basic ones
self.evalue = None
self.angfreq = None
self.wavenum = None
self.evector = None
# Derivated ones
self.vibT = None
self.zpe = None
self.tpoint = None
self.real = None
# rmode
self._rmode = rmode
def __str__(self):
return self.str(f="%6.1f")
def __float__(self):
if self.real: return self.wavenum.real
else: return -self.wavenum.imag
#####################
# Setting variables #
#####################
def set_scaling(self,scaling):
self.scaling = scaling
def set_mu(self,mu):
self.mu = mu
def set_evalue(self , evalue):
self.evalue = evalue
# a) Calculate angular frequency
self.angfreq = cmath.sqrt(self.evalue/self.mu)
self.angfreq *= self.scaling
# If real, keep only real part
if self.angfreq.imag == 0.0:
self.angfreq = self.angfreq.real
self.real = True
else:
self.real = False
# b) Calculate wavenum
self.wavenum = self.angfreq / (2.0*math.pi*cons.c0)
def set_wavenum(self , wavenum):
if wavenum.imag == 0.0:
if wavenum.real < 0.0:
self.wavenum = complex(0,abs(wavenum.real))
self.real = False
else:
self.wavenum = wavenum.real
self.real = True
else:
if wavenum.real == 0.0:
self.wavenum = wavenum
self.real = False
else:
sys.exit("This frequency has both real and imaginary terms...")
# a) Calculate angular frequency
self.angfreq = self.wavenum * 2.0 * math.pi * cons.c0
# b) Calculate eigenvalue
self.evalue = (self.angfreq / self.scaling)**2 * self.mu
if self.evalue.imag == 0.0: self.evalue = self.evalue.real
def set_evector(self , evector):
self.evector = evector
def str(self,f="%4i"):
real = self.wavenum.real / cons.cm
imag = self.wavenum.imag / cons.cm
if imag == 0.0: string = f%real+" "
elif real == 0.0: string = f%imag+"i"
else: string = "%4i%+4ii"%(real,imag)
return string
###############################
# Calculate rest of variables #
###############################
def calc_derivated_magnitudes(self):
# (a) The frequency is imaginary
if not self.real:
self.zpe = 0.0
self.vibT = None
self.tpoint = 1e10
# (b) The frequency is real
else:
self.zpe = cons.hbar * self.angfreq / 2.0
self.vibT = cons.hbar * self.angfreq / cons.kB
self.tpoint = math.sqrt( cons.hbar / self.angfreq / self.mu )
###############
# Return data #
###############
def copy(self):
copy_freq = Freq(scaling = self.scaling, mu = self.mu, rmode= self._rmode)
copy_freq.set_evalue(self.evalue)
copy_freq.set_evector(self.evector)
if self.zpe is not None: copy_freq.calc_derivated_magnitudes()
return copy_freq
def isItImag(self): return not self.real
def get(self,variable):
if variable == "wavenum": return self.wavenum
if variable == "angfreq": return self.angfreq
if variable == "mu" : return self.mu
if variable == "evalue" : return self.evalue
if variable == "evector": return self.evector
if variable == "zpe" : return self.zpe
if variable == "vibT" : return self.vibT
if variable == "tpoint" : return self.tpoint
def get_qvib(self,T):
'''
Calculates vibrational partition function,
taking as zero the energy of level n=0
(i.e. the zero point energy)
'''
if (not self.real) and (self._rmode == 1): return 1.0 , self.zpe
if (not self.real) and (self._rmode == 2): return 1E10, self.zpe
exp = np.exp(-self.vibT/T)
qvib = 1.0/(1.0-exp)
return qvib, self.zpe
def get_fdln(self,T):
'''
'''
if not self.real: return 0.0
hw = cons.hbar * self.angfreq
bhw = (1.0/cons.kB/T) * hw
exp = np.exp(-bhw)
fdln = - hw * exp / (1.0-exp)
return fdln
def get_sdln(self,T):
'''
'''
if not self.real: return 0.0
hw = cons.hbar * self.angfreq
bhw = (1.0/cons.kB/T) * hw
# Exponential part
exp_part = 1.0 / ((np.exp(bhw)-1.0)*(1.0-np.exp(-bhw)))
# second derivative of natural log
sdln = hw*hw * exp_part
return sdln
#>>>>>>>>>>>>>>>>>>*
# CLASS: Queue *
#>>>>>>>>>>>>>>>>>>*
class Queue:
"""
A simple implementation of a FIFO queue.
"""
def __init__(self):
self._items = []
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __str__(self):
return str(self._items)
def enqueue(self, item):
self._items.append(item)
def dequeue(self):
return self._items.pop(0)
def clear(self):
self._items = []
#>>>>>>>>>>>>>>>>>>*
# CLASS: Stack *
#>>>>>>>>>>>>>>>>>>*
class Stack:
"""
A simple implementation of a LIFO stack
"""
def __init__(self):
self._items = []
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __str__(self):
return str(self._items)
def push(self, item):
self._items = [item] + self._items
def pop(self):
return self._items.pop(0)
def clear(self):
self._items = []
#>>>>>>>>>>>>>>>>>>*
# CLASS: UGRAPH *
#>>>>>>>>>>>>>>>>>>*
class UGRAPH:
"""
A simple implementation of a undirected graph
"""
def __init__(self):
self._ugdict = {}
def __str__(self):
num_nodes = self.get_nnodes()
num_edges = self.get_nedges()
return "(n,e)=(%i,%i)"%(num_nodes,num_edges)
def get_nnodes(self):
'''
Returns number of nodes in the ugraph
'''
return len(self._ugdict.keys())
def get_edges(self):
'''
Returns the edges in the ugraph
'''
edges = set([])
for node1 in self._ugdict.keys():
for node2 in self._ugdict[node1]:
edge = tuple(sorted((node1,node2)))
edges.add(edge)
return edges
def get_nedges(self):
'''
Returns number of edges in the ugraph
'''
edges = set([])
for node1 in self._ugdict.keys():
for node2 in self._ugdict[node1]:
edge = tuple(sorted((node1,node2)))
edges.add(edge)
return len(edges)
def neighbors(self,node):
return self._ugdict[node].copy()
#----------------------------#
# Add/Remove nodes and edges #
#----------------------------#
def add_node(self,node):
if node not in self._ugdict.keys():
self._ugdict[node] = set([])
def add_edge(self,node1,node2):
self.add_node(node1)
self.add_node(node2)
self._ugdict[node1].add(node2)
self._ugdict[node2].add(node1)
def remove_node(self,node1):
# Remove node
self._ugdict.pop(node1)
# Remove edges with that node
for node2 in self._ugdict.keys():
self._ugdict[node2].discard(node1)
def remove_edge(self,node1,node2):
self._ugdict[node1].discard(node2)
self._ugdict[node2].discard(node1)
#----------------------------#
def bfsearch(self,start_idx):
'''
Breadth First Search for undirected graph
Input:
* graph_dict: a dict of the graph representing the
adjacency list
- key : integer
- value: list of integers
* start_idx : the index where to start the BFS
'''
# Initialize queue
queue = Queue()
visited = [start_idx]
queue.enqueue(start_idx)
# Start BFS
while len(queue) != 0:
# Take node out of queue
target_idx = queue.dequeue()
# Get neighbors
neighbors = self._ugdict[target_idx]
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
queue.enqueue(neighbor)
return visited
def dfsearch(self,start_idx):
'''
Depth First Search
Breadth First Search for undirected graph
Input:
* graph_dict: a dict of the graph representing the
adjacency list
- key : integer
- value: list of integers
* start_idx : the index where to start the BFS
'''
# Initialize queue
stack = Stack()
visited = [start_idx]
stack.push(start_idx)
# Start BFS
while len(stack) != 0:
# Take node out of queue
target_idx = stack.pop()
# Get neighbors
neighbors = self._ugdict[target_idx]
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
stack.push(neighbor)
return visited
def bfsearch1d(self,idx1,idx2):
'''
Using a BFS algorithm, goes through
the graph.
However, it does it in the idx1-->idx2
directions.
'''
# Initialize queue
queue = Queue()
neighbors1 = self._ugdict[idx1]
old2 = None
# idx1 and idx2 are not bonded, there is a node in the middle (idx1--idxJ--idx2)
if idx2 not in neighbors1:
neighbors2 = self._ugdict[idx2]
idxJ = list(neighbors1.intersection(neighbors2))
if idxJ == []:
return None
else:
old2 = idx2
idx2 = idxJ[0]
visited = [idx2]
queue.enqueue(idx2)
# Start BFS
while len(queue) != 0:
# Take node out of queue
target_idx = queue.dequeue()
# Get neighbors
neighbors = list(self._ugdict[target_idx])
if target_idx == idx2:
neighbors.remove(idx1)
# Visit neighbors
for neighbor in neighbors:
if neighbor in visited: continue
visited.append(neighbor)
queue.enqueue(neighbor)
visited.remove(idx2)
if old2 is not None: visited.remove(old2)
return visited
def get_fragments(self):
fragments = []
nodes = self._ugdict.keys()
visited_nodes = set([])
for node in nodes:
if node in visited_nodes: continue
fragment = self.bfsearch(node)
visited_nodes = visited_nodes.union(fragment)
fragments.append(fragment)
return fragments
def longest_path(self,start_idx,visited=[]):
'''
Naive algorithm to explore the graph, starting at start_idx,
and return the longest path
Limitations:
* start_idx has to be a terminal node
if not, part of the path may be ommitted
'''
# Get neighbors, excluding previously visited ones
neighbors = [node for node in self._ugdict[start_idx] if node not in visited]
if len(neighbors) == 0: return [start_idx]
#if visited == []: assert len(neighbors) == 1
# Get longest from non-visited neighbors
length = - float("inf")
for neighbor in neighbors:
visited_i = visited + [start_idx,neighbor]
path_i = self.longest_path(neighbor,visited=visited_i)
if len(path_i) > length:
length = len(path_i)
the_path = path_i
return [start_idx] + the_path
def get_layers(self,center):
'''
returns a list of layers for the node center
* 1st layer: neighbors of node center
* 2nd layer: neighbors of neighbors of center (excluding repetitions of previous layers)
'''
layers = [set([center])]
current = [center]
visited = set([center])
nnodes = len(self._ugdict.keys())
while len(visited) != nnodes:
layer = []
for node in current:
neighbors = self._ugdict[node]
layer = layer + list(neighbors)
layer = set(layer).difference(visited)
visited = visited.union(layer)
layers.append(layer)
current = list(layer)
return layers
#----------------------------#
# Get matrix representations #
#----------------------------#
def gen_laplacian(self):
num_nodes = self.get_nnodes()
laplacian = np.zeros((num_nodes,num_nodes))
for node in self._ugdict.keys():
neighbors = self._ugdict[node]
for neighbor in neighbors:
laplacian[node,node] = laplacian[node,node] + 1
laplacian[node,neighbor] = -1
# Eigenvalues
vals, vecs = np.linalg.eigh(laplacian)
# Degenerancies?
degs = [0]*len(vals)
for i in range(len(vals)):
val_i = vals[i]
for j in range(len(vals)):
val_j = vals[j]
if abs(val_i-val_j) < 1e-3: degs[i] = degs[i]+1
# Data for each node
dict_vecs = {}
for node in self._ugdict.keys():
vector = [ abs(float(vecs[node,idx])) for idx in range(len(vals)) if degs[idx] == 1]
dict_vecs[node] = vector
return dict_vecs
#>>>>>>>>>>>>>>>>>>*
# CLASS: Struct *
#>>>>>>>>>>>>>>>>>>*
class Struct(object):
'''
The Struct class - includes graph theory implementations
Mandatory variables:
* name
a string to give a name to the structure
* x_cc
the non-scaled cartesian coordinates;
both 3Nx1 and Nx3 formats are allowed
* atonums_or_symbols
a list of atomic numbers or a list of atomic symbols
Non-mandatory variables:
* masslist
a list of atomic masses or a list of (atom index,mass) tuples
if None, masses are taken from a dictionary in cons module
* stype
0 for a stationary point with 0 imaginary frequencies
1 for a stationary point with 1 imaginary frequencies
2 for a stationary point with 2 imaginary frequencies
...
-1 for a non-stationary point
'''
def __init__(self, name, x_cc, atonums_or_symbols, masslist=None, stype=0):
self._name = name
self._type = stype
self._natoms = len(atonums_or_symbols)
self._xcc = hf.xvecformat(x_cc,self._natoms,'3Nx1')
#---------------------------#
# Deal with atonums/symbols #
#---------------------------#
# atonums_or_symbols is atonums
try:
self._atonums = [int(atonum) for atonum in atonums_or_symbols]
self._symbols = [cons.dict_z2symbol[atonum].strip() for atonum in self._atonums]
# atonums_or_symbols is symbols
except:
self._symbols = [symbol.strip() for symbol in atonums_or_symbols]
self._atonums = [cons.dict_symbol2z[symbol] for symbol in self._symbols]
# Get molecular formula
self._molformula = hf.get_molformula(self._symbols)
#-----------------------------------------------#
# Deal with masslist and isotopic modifications #
#-----------------------------------------------#
self._masslist = [cons.dict_atomasses[atonum] for atonum in self._atonums]
if masslist is not None:
# masslist is a list of (idx,mass) tuples
try:
for idx,mass in masslist:
# mass may be a string, like "2H"
if mass in cons.dict_isomasses:
mass = cons.dict_isomasses[mass]
else:
mass = float(mass)
self._masslist[idx] = float(mass)
# masslist is a list of masses
except:
self._masslist = masslist
self._totmass = sum(self._masslist)
self._masslist = np.array(self._masslist)
#---------------------------#
# Generate undirected graph #
#---------------------------#
self._ugraph = UGRAPH()
for idx in range(self._natoms): self._ugraph.add_node(idx)
#-----------------------------#
# Basic analysis of structure #
#-----------------------------#
self._linear = hf.isitlinear(self._xcc)
if self._natoms == 1:
self._ntra = 3
self._nrot = 0
self._nvib = 0
self._kind = "Atom"
elif self._natoms == 2:
self._ntra = 3
self._nrot = 2
self._nvib = 1
self._kind = "Diatomic Molecule"
elif self._linear:
self._ntra = 3
self._nrot = 2
self._nvib = 3*self._natoms - self._ntra - self._nrot
self._kind = "Linear Molecule"
else:
self._ntra = 3
self._nrot = 3
self._nvib = 3*self._natoms - self._ntra - self._nrot
self._kind = "Non-linear Molecule"
#--------------------------------#
# Initializing rest of variables #
#--------------------------------#
self._freqscal = 1.0
self._rmode = 1
self._mu = 1.0 / cons.amu
self._ch = None
self._mtp = None
self._elstates = None
self._gcc = None
self._Fcc = None
self._Etot = None
self._pgroup = None
self._rotsigma = None
self._imoments = None
self._xms = None
self._gms = None
self._Fms = None
self._ccfreqs = None
self._icfreqs = None
self._zpe = None
self._Vadi = None
self._rotT = None
self._vibT = None
self._v0 = None # in mass-scaled
self._v1 = None # in mass-scaled
self._meppoint = None
def __str__(self):
return self._molformula
def __repr__(self):
return "struct"
#-----------------------------------#
# Set/Get variable #
#-----------------------------------#
def set(self,variables,values):
if type(variables) == type(""):
variables = [variables]
values = [values]
for variable,value in zip(variables,values):
if variable == "freqscal" : self._freqscal = value
elif variable == "masslist" : self._masslist = value; self._totmass = sum(self._masslist)
elif variable == "mu" : self._mu = value
elif variable == "gcc" : self._gcc = hf.xvecformat(value,self._natoms,'3Nx1')
elif variable == "Fcc" :
if value is not None : self._Fcc = hf.hessianformat(value,self._natoms)
elif variable == "ch" : self._ch = float(value)
elif variable == "mtp" : self._mtp = float(value)
elif variable == "Etot" : self._Etot = float(value)
elif variable == "pgroup" : self._pgroup = str(value)
elif variable == "rotsigma" : self._rotsigma = int(value)
elif variable == "elstates" : self._elstates = value
elif variable == "meppoint" : self._meppoint = value
elif variable == "v0" : self._v0 = value
elif variable == "v1" : self._v1 = value
else : sys.exit("Trying to set an unknown variable (%s)..."%variable)
def get(self,variable):
# Variables defined in __init__
if variable == "name" : return self._name
elif variable == "type" : return self._type
elif variable == "xcc" :
if self._xcc is None : return None
else : return np.array(self._xcc,copy=True)
elif variable == "natoms" : return self._natoms
elif variable == "symbols" :
if self._symbols is None: return None
else : return list(self._symbols)
elif variable == "atonums" :
if self._atonums is None: return None
else : return list(self._atonums)
elif variable == "molformula": return str(self._molformula)
elif variable == "masslist" : return list(self._masslist)
elif variable == "totmass" : return float(self._totmass)
#elif variable == "ugraph" : return self._ugraph
elif variable == "lineal" : return self._linear
elif variable == "ntra" : return self._ntra
elif variable == "nrot" : return self._nrot
elif variable == "nvib" : return self._nvib
elif variable == "kind" : return self._kind
# Variables that can be defined through set function
elif variable == "freqscal" : return float(self._freqscal)
elif variable == "mu" : return float(self._mu)
elif variable == "gcc" :
if self._gcc is None : return None
else : return np.array(self._gcc,copy=True)
elif variable == "Fcc" :
if self._Fcc is None : return None
else : return np.matrix(self._Fcc,copy=True)
elif variable == "ch" : return int(self._ch)
elif variable == "mtp" : return int(self._mtp)
elif variable == "Etot" : return float(self._Etot)
elif variable == "pgroup" : return str(self._pgroup)
elif variable == "rotsigma" : return int(self._rotsigma)
elif variable == "elstates" : return list(self._elstates)
elif variable == "meppoint" : return self._meppoint
elif variable == "v0" :
if self._v0 is None : return None
else : return np.array(self._v0,copy=True)
elif variable == "v1" :
if self._v1 is None : return None
else : return np.array(self._v1,copy=True)
# Derivated variables that cannot be defined using set function
elif variable == "imoments" : return self._imoments
elif variable == "xms" :
if self._xms is None : return None
else : return np.array(self._xms,copy=True)
elif variable == "gms" :
if self._gms is None : return None
else : return np.array(self._gms,copy=True)
elif variable == "Fms" :
if self._Fcc is None : return None
else : return np.matrix(self._Fms,copy=True)
elif variable == "ccfreqs" :
if self._ccfreqs is None: return None
else : return list(self._ccfreqs)
elif variable == "icfreqs" : return list(self._icfreqs)
elif variable == "zpe" : return float(self._zpe)
elif variable == "Vadi" : return float(self._Vadi)
elif variable == "rotT" : return list(self._rotT)
elif variable == "vibT" : return list(self._vibT)
else : sys.exit("Requesting an unknown variable (%s)..."%variable)
def get_ifreq(self):
if self._ccfreqs is None: return None
for freq in self._ccfreqs:
if freq.isItImag(): return freq.copy()
#-----------------------------------#
# Basic setup of structure #
#-----------------------------------#
def basic_setups(self,setup="all"):
'''
'''
if type(setup) == type(1): setup = [setup]
if setup == "all" : setup = range(5)
for stype in setup:
# Origin in center of mass
if stype == 0:
self._xcc = hf.shift2cm(self._xcc,self._masslist)
# Point Group and rotsigma
if stype == 1 and self._rotsigma is None:
self._pgroup, self._rotsigma = hf.get_pgs(self._atonums,self._masslist,self._xcc)
# elstates
if stype == 2 and self._elstates is None:
self._elstates = [ (0.0,self._mtp) ]
# Mass-scale
if stype == 3:
self._xms = hf.cc2ms_x(self._xcc,self._masslist,self._mu)
if self._gcc is not None:
self._gms = hf.cc2ms_g(self._gcc,self._masslist,self._mu)
if self._Fcc is not None:
self._Fms = hf.cc2ms_F(self._Fcc,self._masslist,self._mu)
# Rotational temperatures
if stype == 4 and self._natoms > 1:
# (a) Calculation of the inertia tensor (a.u.)
inertia = np.zeros((3,3))
for i in range(self._natoms):
# Diagonal elements
inertia[0][0] += self._masslist[i] * (self._xcc[i*3+1]**2 + self._xcc[i*3+2]**2)
inertia[1][1] += self._masslist[i] * (self._xcc[i*3+0]**2 + self._xcc[i*3+2]**2)
inertia[2][2] += self._masslist[i] * (self._xcc[i*3+0]**2 + self._xcc[i*3+1]**2)
# Offdiagonal elements
inertia[0][1] -= self._masslist[i] * self._xcc[i*3+0] * self._xcc[i*3+1]
inertia[0][2] -= self._masslist[i] * self._xcc[i*3+0] * self._xcc[i*3+2]
inertia[1][2] -= self._masslist[i] * self._xcc[i*3+1] * self._xcc[i*3+2]
inertia[1][0] = inertia[0][1]; inertia[2][0] = inertia[0][2]; inertia[2][1] = inertia[1][2]
# (b) Get its eigenvalues
evalsI, evecsI = np.linalg.eigh(inertia)
self._imoments = []
if self._nrot == 2:
evalsI = np.sort(evalsI)
cocient = evalsI[1] / evalsI[2]
if abs(cocient - 1.0) < cons.ZERO: self._imoments = [evalsI[1]]
else: sys.exit("ERROR! Molecule is not linear!")
else:
self._imoments = evalsI
# (c) Get rotational Temperatures
self._rotT = [cons.hbar**2 / (2*I_i*cons.kB) for I_i in self._imoments]
#-----------------------------------#
# Cartesian-Coordinate FREQUENCIES #
#-----------------------------------#
def calc_ccfreqs(self):
'''
info about projection matrix: Appendix D of JChemPhys 1988, 88, 922-935
'''
if self._natoms < 2 : return None
if self._ccfreqs is not None: return None
#-----------------------#
# Get projection matrix #
#-----------------------#
# (a) Translation vectors (b1,b2,b3)
T_vecs = []
for i in range(3):
T = np.zeros(3*self._natoms)
for j in range(self._natoms):
T[3*j+i] = math.sqrt(self._masslist[j])
T = T / np.linalg.norm(T)
T_vecs.append(T)
# (b) Rotation vectors (b4,b5,b6)
R1 = np.zeros(3*self._natoms)
R2 = np.zeros(3*self._natoms)
R3 = np.zeros(3*self._natoms)
for i in range(self._natoms):
R1[3*i + 1] = math.sqrt(self._masslist[i]) * self._xcc[3*i + 2]
R1[3*i + 2] = - math.sqrt(self._masslist[i]) * self._xcc[3*i + 1]
R2[3*i + 0] = - math.sqrt(self._masslist[i]) * self._xcc[3*i + 2]
R2[3*i + 2] = math.sqrt(self._masslist[i]) * self._xcc[3*i + 0]
R3[3*i + 0] = math.sqrt(self._masslist[i]) * self._xcc[3*i + 1]
R3[3*i + 1] = - math.sqrt(self._masslist[i]) * self._xcc[3*i + 0]
R_vecs = []
for R in (R1,R2,R3):
normR = np.linalg.norm(R)
if normR > 1e-7: R = R / normR; R_vecs.append(R)
# (c) Apply Gram Schmidt method (v1 to v6 vectors)
X = np.matrix(T_vecs+R_vecs).transpose() # each column is a vector
X_gs, R = np.linalg.qr(X)
# (d) Get Translation-Rotation matrix
R_matrix = X_gs * X_gs.H
# (e) Get projection matrix (eq D1)
if self._type == -1: proj_matrix = R_matrix + np.matrix(self._v0).transpose() * np.matrix(self._v0)
else : proj_matrix = R_matrix
#-----------------------#
# Get projected hessian #
#-----------------------#
# Get identity matrix
I = np.identity(3*self._natoms)
# Calculate projected hessian matrix
proj_F = (I - proj_matrix) * self._Fms * (I - proj_matrix)
# Diagonalize
evalsF, evecsF = np.linalg.eigh(proj_F)
# Remove translation and rotation degrees of freedom
removals = self._ntra+self._nrot
if self._type == -1: removals += 1
for removal in range(removals):
ref_value = float("inf")
for idx in range(len(evalsF)):
absval = abs(evalsF[idx])
if absval < ref_value:
ref_value = absval
target = idx
evalsF = np.delete(evalsF,target)
evecsF = np.delete(evecsF,target,1)
# Generate list of cc vibrational frequencies
self._ccfreqs = []
for idx in range(len(evalsF)):
FreqInst = Freq(scaling=self._freqscal , mu=self._mu, rmode= self._rmode)
FreqInst.set_evalue( evalsF[idx] )
# Get evector as a numpy array
evector = np.array(evecsF[:,idx].transpose().tolist()[0])
FreqInst.set_evector( evector )
FreqInst.calc_derivated_magnitudes()
self._ccfreqs.append( FreqInst )
#-----------------------------------#
# Internal-Coordinate FREQUENCIES #
#-----------------------------------#
def get_Dmatrix(self,nricoords):
'''
Get the D matrix for two torsions
Last two torsions are the selected torsions
'''
B_wilson, C_wilson, sortedICs = hf.get_B_and_C(self._xcc, self._natoms, nricoords)
mass_array = []
for m in self._masslist: mass_array += [m,m,m]
u = [ 1.0/mass for mass in mass_array]
u = np.diag(u)
# Calculate h matrix (h* in [3], cursive G in [4])
h = B_wilson * u * B_wilson.transpose()
# Calculate D matrix
Dmatrix = np.linalg.inv(h)
# Units of each element of Dmatrix is [distance]*[mass]^2 (in a.u.)
Dmatrix = Dmatrix[-2:,-2:]
return Dmatrix
def icproj_hessian(self,icoords):
#---------------------------#
# Get B matrix and C tensor #
#---------------------------#
B_wilson, C_wilson, sortedICs = hf.get_B_and_C(self._xcc, self._natoms, icoords)
num_ric, num_3N = B_wilson.shape
num_nric = self._nvib
#--------------------------#
# Calculate h*, h^-1 and A #
#--------------------------#
mass_array = []
for m in self._masslist: mass_array += [m,m,m]
u = [ 1.0/mass for mass in mass_array]
u = np.diag(u)
# Calculate h matrix (h* in [3], cursive G in [4])
h = B_wilson * u * B_wilson.transpose()
# Calculate inverse of h*
if num_ric <= num_nric:
h_inv = np.linalg.inv(h)
else:
h_evals, h_evecs = np.linalg.eigh(h)
K , Kprime = [], []
Gamma = []
Gamma_zeros = []
for idx in range(len(h_evals)):
h_eval = h_evals[idx]
h_evec = [float(i) for i in h_evecs[:,idx]]
# Non-zero eigenvalue
if abs(h_eval) > 1e-15:
K.append( h_evec )
Gamma.append( h_eval )
# Zero eigenvalue - save as zero
else:
Kprime.append( h_evec )
Gamma_zeros.append( 0.0 )
h_evecs_sorted = np.matrix(K + Kprime)
h_evecs_sorted = h_evecs_sorted.transpose()
h_evals_inv = [1.0/evalue for evalue in Gamma] + Gamma_zeros
h_evals_inv = np.diag(h_evals_inv)
h_inv = h_evecs_sorted * h_evals_inv * h_evecs_sorted.transpose()
# Get A matrix
A = u * B_wilson.transpose() * h_inv
#--------------------------------------------------------#
# Gradiend and hessian in redundant internal coordinates #
#--------------------------------------------------------#
F_ric = A.transpose() * self._Fcc * A
# A MEP point is being analyzed
if self._type == -1:
g_cc = np.array(self._gcc,copy=True)
g_cc = np.matrix(g_cc).transpose() # save as column matrix
g_ric = A.transpose() * g_cc
for idx in range(g_ric.shape[0]):
F_ric -= float(g_ric[idx]) * A.transpose() * C_wilson[idx] * A
#proj_rc = True
# A critical point of the PES is being analyzed
else:
g_cc = np.zeros((3*self._natoms,3*self._natoms))
g_ric = A.transpose() * g_cc
#proj_rc = False
return sortedICs, g_ric, F_ric, B_wilson, C_wilson, mass_array, u, h, h_inv, A
def caca(self,icoords):
'''
At some point, this will subtitute calc_icfreqs function
'''
if self._type == -1: proj_rc = True
else : proj_rc = False
sortedICs, g_ric, F_ric, B_wilson, C_wilson, mass_array, u, h, h_inv, A = self.icproj_hessian(icoords)
num_ric, num_3N = B_wilson.shape
num_nric = self._nvib
#------------------------------------------------------------#
# Gradiend and hessian in non-redundant internal coordinates #
# and with the reaction coordinate projected out #
#------------------------------------------------------------#
# Project from ric to non-redundant internal coordinates (nric)
P = h * h_inv
f_nric = P * F_ric * P
# Project out the reaction coordinate
if proj_rc:
g_nric = P * g_ric
p = g_nric * g_nric.transpose() / (g_nric.transpose() * h * g_nric)
proj_F = (np.identity(num_ric) - p*h) * f_nric * (np.identity(num_ric)-h*p)
else:
proj_F = f_nric
# Get eigenvalues and eigenvectors
# PS: as h*proj_F isn't symmetric, pF_evals and pF_evecs have imaginary components
pF_evals, pF_evecs = np.linalg.eig(h*proj_F)
#---------------------------------#
# Get indices of zero eigenvalues #
#---------------------------------#
indices_zero = []
for idx in range(len(pF_evals)):
F_evalue = pF_evals[idx]
F_evalue = F_evalue*self._mu
freq = Freq(scaling=self._freqscal,mu=self._mu,rmode= self._rmode)
freq.set_evalue(F_evalue)
if abs(freq.get("wavenum")) < cons.ZERO_wavenum: indices_zero.append(idx)
#--------------------------------------------------------------#
# Get eigenvectors in mass-scaled cartesian coordinates #
# PS: single value decomposition (svd) used to get pF_evecs^-1 #
#--------------------------------------------------------------#
u_svd, s_svd, v_svd = np.linalg.svd(pF_evecs,full_matrices=True,compute_uv=True)
# Pseudoinverse of s_svd
s_svd_inv = []
for idx in range(len(s_svd)):
if abs(s_svd[idx]) < cons.ZERO2: s_svd_inv.append( s_svd[idx] )
else: s_svd_inv.append( s_svd[idx]**-1.0 )
pF_evecs_inv = np.dot(v_svd.transpose(), np.dot(np.diag(s_svd_inv),u_svd.transpose()))
# Get normalized vectors (but only those of non-zero eigenvalue)
pF_norm_evecs = []
nrows, ncols = pF_evecs.shape
zero_evec = [0.0] * nrows
C = pF_evecs_inv * h * pF_evecs_inv.transpose()
for idx in range(len(pF_evals)):
# Only normalize those of non-zero evalue
if abs(pF_evals[idx].real) > cons.ZERO2:
# remove imag part of C element
if abs( C[idx,idx].imag ) < cons.ZERO: Cii = float(C[idx,idx].real)
else: Cii = C[idx,idx]
# Get normalized L vector
norm_L = pF_evecs[:,idx] * np.sqrt(Cii)
pF_norm_evecs.append( norm_L.transpose().tolist()[0] )
else:
pF_norm_evecs.append( zero_evec )
pF_norm_evecs = np.matrix(pF_norm_evecs).transpose()
# Get chi matrix
chi_matrix = A * pF_norm_evecs
# Get normal-mode eigenvectors in mass-scaled cartesian
self._icfreqs = []
for j in range(num_ric):
if j in indices_zero: continue
F_evalue = pF_evals[j]
F_evector = chi_matrix[:,j]
# mass-scalde eigenvalue
F_evalue = F_evalue*self._mu
# mass-scaled eigenvector
for i in range(3*self._natoms):
m_i = mass_array[i]
F_evector[i] = np.sqrt(m_i) * F_evector[i]
if np.linalg.norm(F_evector) != 0.0: F_evector = F_evector / np.linalg.norm(F_evector)
# Evector (remove zero imaginary component)
F_evector = np.array([float(Fi.real) for Fi in F_evector.transpose().tolist()[0]])
# Generate frequency
freq = Freq(scaling=self._freqscal,mu=self._mu,rmode= self._rmode)
freq.set_evalue(F_evalue)
freq.set_evector(F_evector)
freq.calc_derivated_magnitudes()
self._icfreqs.append( (F_evalue,freq) )
# Sort freqs
self._icfreqs.sort()
self._icfreqs = [j for (i,j) in self._icfreqs]
def calc_icfreqs(self,icoords):
'''
'''
self._icfreqs = []
#---------------------------#
# Get B matrix and C tensor #
#---------------------------#
B_wilson, C_wilson, sortedICs = hf.get_B_and_C(self._xcc, self._natoms, icoords)
num_ric, num_3N = B_wilson.shape
num_nric = self._nvib
#--------------------------#
# Calculate h*, h^-1 and A #
#--------------------------#
mass_array = []
for m in self._masslist: mass_array += [m,m,m]
u = [ 1.0/mass for mass in mass_array]
u = np.diag(u)
# Calculate h matrix (h* in [3], cursive G in [4])
h = B_wilson * u * B_wilson.transpose()
# Calculate inverse of h*
if num_ric <= num_nric:
h_inv = np.linalg.inv(h)
else:
h_evals, h_evecs = np.linalg.eigh(h)
K , Kprime = [], []
Gamma = []
Gamma_zeros = []
for idx in range(len(h_evals)):
h_eval = h_evals[idx]
h_evec = [float(i) for i in h_evecs[:,idx]]
# Non-zero eigenvalue
if abs(h_eval) > 1e-15:
K.append( h_evec )
Gamma.append( h_eval )
# Zero eigenvalue - save as zero
else:
Kprime.append( h_evec )
Gamma_zeros.append( 0.0 )
h_evecs_sorted = np.matrix(K + Kprime)
h_evecs_sorted = h_evecs_sorted.transpose()
h_evals_inv = [1.0/evalue for evalue in Gamma] + Gamma_zeros
h_evals_inv = np.diag(h_evals_inv)
h_inv = h_evecs_sorted * h_evals_inv * h_evecs_sorted.transpose()
# Get A matrix
A = u * B_wilson.transpose() * h_inv
#--------------------------------------------------------#
# Gradiend and hessian in redundant internal coordinates #
#--------------------------------------------------------#
F_ric = A.transpose() * self._Fcc * A
# A MEP point is being analyzed
if self._type == -1:
g_cc = np.array(self._gcc,copy=True)
g_cc = np.matrix(g_cc).transpose() # save as column matrix
g_ric = A.transpose() * g_cc
for idx in range(g_ric.shape[0]):
F_ric -= float(g_ric[idx]) * A.transpose() * C_wilson[idx] * A
proj_rc = True
# A critical point of the PES is being analyzed
else:
g_cc = np.zeros((3*self._natoms,3*self._natoms))
g_ric = A.transpose() * g_cc
proj_rc = False
#------------------------------------------------------------#
# Gradiend and hessian in non-redundant internal coordinates #
# and with the reaction coordinate projected out #
#------------------------------------------------------------#
# Project from ric to non-redundant internal coordinates (nric)
P = h * h_inv
f_nric = P * F_ric * P
# Project out the reaction coordinate
if proj_rc:
g_nric = P * g_ric
p = g_nric * g_nric.transpose() / (g_nric.transpose() * h * g_nric)
proj_F = (np.identity(num_ric) - p*h) * f_nric * (np.identity(num_ric)-h*p)
else:
proj_F = f_nric
# Get eigenvalues and eigenvectors
# PS: as h*proj_F isn't symmetric, pF_evals and pF_evecs have imaginary components
pF_evals, pF_evecs = np.linalg.eig(h*proj_F)
#---------------------------------#
# Get indices of zero eigenvalues #
#---------------------------------#
indices_zero = []
for idx in range(len(pF_evals)):
F_evalue = pF_evals[idx]
F_evalue = F_evalue*self._mu
freq = Freq(scaling=self._freqscal,mu=self._mu,rmode= self._rmode)
freq.set_evalue(F_evalue)
if abs(freq.get("wavenum")) < cons.ZERO_wavenum: indices_zero.append(idx)
#--------------------------------------------------------------#
# Get eigenvectors in mass-scaled cartesian coordinates #
# PS: single value decomposition (svd) used to get pF_evecs^-1 #
#--------------------------------------------------------------#
u_svd, s_svd, v_svd = np.linalg.svd(pF_evecs,full_matrices=True,compute_uv=True)
# Pseudoinverse of s_svd
s_svd_inv = []
for idx in range(len(s_svd)):
if abs(s_svd[idx]) < cons.ZERO2: s_svd_inv.append( s_svd[idx] )
else: s_svd_inv.append( s_svd[idx]**-1.0 )
pF_evecs_inv = np.dot(v_svd.transpose(), np.dot(np.diag(s_svd_inv),u_svd.transpose()))
# Get normalized vectors (but only those of non-zero eigenvalue)
pF_norm_evecs = []
nrows, ncols = pF_evecs.shape
zero_evec = [0.0] * nrows
C = pF_evecs_inv * h * pF_evecs_inv.transpose()
for idx in range(len(pF_evals)):
# Only normalize those of non-zero evalue
if abs(pF_evals[idx].real) > cons.ZERO2:
# remove imag part of C element
if abs( C[idx,idx].imag ) < cons.ZERO: Cii = float(C[idx,idx].real)
else: Cii = C[idx,idx]
# Get normalized L vector
norm_L = pF_evecs[:,idx] * np.sqrt(Cii)
pF_norm_evecs.append( norm_L.transpose().tolist()[0] )
else:
pF_norm_evecs.append( zero_evec )
pF_norm_evecs = np.matrix(pF_norm_evecs).transpose()
# Get chi matrix
chi_matrix = A * pF_norm_evecs
# Get normal-mode eigenvectors in mass-scaled cartesian
self._icfreqs = []
for j in range(num_ric):
if j in indices_zero: continue
F_evalue = pF_evals[j]
F_evector = chi_matrix[:,j]
# mass-scalde eigenvalue
F_evalue = F_evalue*self._mu
# mass-scaled eigenvector
for i in range(3*self._natoms):
m_i = mass_array[i]
F_evector[i] = np.sqrt(m_i) * F_evector[i]
if np.linalg.norm(F_evector) != 0.0: F_evector = F_evector / np.linalg.norm(F_evector)
# Evector (remove zero imaginary component)
F_evector = np.array([float(Fi.real) for Fi in F_evector.transpose().tolist()[0]])
# Generate frequency
freq = Freq(scaling=self._freqscal,mu=self._mu,rmode= self._rmode)
freq.set_evalue(F_evalue)
freq.set_evector(F_evector)
freq.calc_derivated_magnitudes()
self._icfreqs.append( (F_evalue,freq) )
# Sort freqs
self._icfreqs.sort()
self._icfreqs = [j for (i,j) in self._icfreqs]
def check_icoords(self,icoords):
'''
Compare frequencies obtained with
cartesian coordinates and with icoords
'''
# cc-freqs
ccfreqs = self.get("ccfreqs")
if ccfreqs is None or ccfreqs == []:
print("ERROR: cc-freqs are not calculated!")
sys.exit()
# ic-freqs
self.calc_icfreqs(icoords)
icfreqs = self.get("icfreqs")
# Compare number of freqs
if len(ccfreqs) != len(icfreqs): return False
# Compare frequency value
for idx in range(len(ccfreqs)):
ccfreq = ccfreqs[idx].get("wavenum")/cons.cm
icfreq = icfreqs[idx].get("wavenum")/cons.cm
diff = abs(ccfreq-icfreq)
if diff > 0.5: return False
return True
#-----------------------------------#
# Adiabatic potential #
#-----------------------------------#
def calc_Vadi(self,k="cc",nimag=None):
if nimag is None: nimag = self._type
if nimag < 0.0: nimag = 0
self._zpe = 0.0
if k == "cc" and self._natoms>1:
for ccfreq in self._ccfreqs[nimag:]:
self._zpe += ccfreq.get("zpe")
if k == "ic" and self._natoms>1:
for icfreq in self._icfreqs[nimag:]:
self._zpe += icfreq.get("zpe")
self._Vadi = self._Etot + self._zpe
#-----------------------------------#
# Partition Functions #
#-----------------------------------#
def get_partition_functions(self,T,k="cc"):
'''
Input:
* T: the temperature, in Kelvin degrees
* k: to indicate if Cartesian-coordinate frequencies (cc)
or internal coordinate frequencies are to be used
Returns:
* ptra (phi_tra) : per volume unit (bohr^3)
* Qrot
* Qvib
* Qele
* Vadi - the reference energy in hartree
'''
# Number of vibrational freqs to consider
if self._type >= 0: nimag = self._type
else: nimag = 0
# Sort elstates
self._elstates.sort()
# In case of asking for zero kelvin
if T == 0.0:
ptra = 0.0
qrot = 0.0
qvib = 1.0
qele = self._elstates[0][1]
if k == "cc": zpe = sum([freq.get("zpe") for freq in self._ccfreqs[nimag:]])
if k == "ic": zpe = sum([freq.get("zpe") for freq in self._icfreqs[nimag:]])
# At other temperature
else:
beta = 1.0 / (cons.kB * T)
# (a) Translational partition function
ptra = ( 2 * np.pi * self._totmass * cons.kB * T )**(3./2.) / (cons.h**3)
# (b) Rotational partition function
qrot = 1.0
if self._nrot == 2:
qrot = 1.0 * (T / self._rotT[0])
if self._nrot == 3:
product = self._rotT[0] * self._rotT[1] * self._rotT[2]
qrot = math.sqrt(np.pi * T**3 / product)
qrot = qrot/self._rotsigma
# (c) Vibrational partition function
qvib = 1.0
zpe = 0.0
if self._nvib != 0:
if k=="cc":
for freq in self._ccfreqs[nimag:]:
qvib_i, zpe_i = freq.get_qvib(T)
qvib *= qvib_i
zpe += zpe_i
if k=="ic":
for freq in self._icfreqs[nimag:]:
qvib_i, zpe_i = freq.get_qvib(T)
qvib *= qvib_i
zpe += zpe_i
# (d) Electronic partition function
qele = sum( [mtp*np.exp(-beta*relE) for relE,mtp in self._elstates] )
# Reference for molecule and external reference
Vadi = self._Etot + zpe
return ptra, qrot, qvib, qele, Vadi
def get_pfns(self,T,k="cc"):
# Single temperature or a list of them
singleT = (type(T) == type(0)) or (type(T) == type(0.0))
# Return data for single T
if singleT:
return self.get_partition_functions(T,k)
# Return data for list of T
else:
vec_ptra = []
vec_qrot = []
vec_qvib = []
vec_qele = []
for idx in range(len(T)):
ptra, qrot, qvib, qele, Vadi = self.get_partition_functions(T[idx],k)
vec_ptra.append(ptra)
vec_qrot.append(qrot)
vec_qvib.append(qvib)
vec_qele.append(qele)
vec_ptra = np.array(vec_ptra)
vec_qrot = np.array(vec_qrot)
vec_qvib = np.array(vec_qvib)
vec_qele = np.array(vec_qele)
return vec_ptra,vec_qrot,vec_qvib,vec_qele, Vadi
#-----------------------------------#
# Functions related to the atoms #
#-----------------------------------#
def atom_xcc(self,idx):
return np.array(self._xcc[3*idx:3*idx+3],copy=True)
#-----------------------------------#
# Distances/angles in the molecule #
#-----------------------------------#
def icvalue(self,indices):
# Asking for distance
if len(indices) == 2:
idx1, idx2 = indices
x1 = self.atom_xcc(idx1)
x2 = self.atom_xcc(idx2)
return float(hf.calc_distance(x1,x2))
# Asking for angle
if len(indices) == 3:
idx1, idx2, idx3 = indices
x1 = self.atom_xcc(idx1)
x2 = self.atom_xcc(idx2)
x3 = self.atom_xcc(idx3)
return float(hf.calc_angle(x1,x2,x3))
# Asking for dihedral
if len(indices) == 4:
idx1, idx2, idx3, idx4 = indices
x1 = self.atom_xcc(idx1)
x2 = self.atom_xcc(idx2)
x3 = self.atom_xcc(idx3)
x4 = self.atom_xcc(idx4)
return float(hf.calc_dihedral(x1,x2,x3,x4))
def dist_between_frags(self, fragmentA, fragmentB):
min_distance = + float("inf")
for atom_i in fragmentA:
for atom_j in fragmentB:
d_ij = self.icvalue( (atom_i,atom_j) )
if d_ij < min_distance:
min_distance = d_ij
idxA, idxB = atom_i, atom_j
return min_distance, idxA, idxB
#-----------------------------------#
# Functions related to Graph Theory #
#-----------------------------------#
def graph_getbonds(self):
bonds = self._ugraph.get_edges()
return bonds
def graph_addbond(self, idx1, idx2):
self._ugraph.add_edge(idx1,idx2)
def graph_removebond(self, idx1, idx2):
self._ugraph.remove_edge(idx1,idx2)
def graph_nbonds(self):
return self._ugraph.get_nedges()
def graph_autoconnect(self,lengthfactor=1.3,bonds=[]):
for idx1 in range(self._natoms):
atonum1 = self._atonums[idx1]
for idx2 in range(idx1+1,self._natoms):
atonum2 = self._atonums[idx2]
if (idx1,idx2) in bonds or (idx2,idx1) in bonds:
connected = True
else:
dist = self.icvalue( (idx1,idx2) )
dref = cons.dict_covradii[atonum1] + cons.dict_covradii[atonum2]
dref = lengthfactor*dref
if dist <= dref: connected = True
else: connected = False
# Connected atoms
if connected: self.graph_addbond(idx1,idx2)
def graph_fragconnect(self,maxfrags=1):
'''
connect defined fragments until you have,
as much, a total of maxfrags
'''
fragments = self._ugraph.get_fragments()
nf = len(fragments)
if nf <= maxfrags: return []
# Calculate min distances between fragments
distances = []
for fA in range(nf):
fragmentA = fragments[fA]
for fB in range(fA+1,nf):
fragmentB = fragments[fB]
# Get distance
dist_AB, idxA, idxB = self.dist_between_frags(fragmentA, fragmentB)
distances.append( (dist_AB,fA,fB,idxA,idxB) )
# Sort by distances
distances.sort()
# Create bonds
added_edges = []
connections = []
for dist_AB,fA,fB,idx1,idx2 in distances:
connect = True
append = True
for connection in connections:
if fA in connection and fB in connection:
connect = False
append = False
elif fA in connection and fB not in connection:
connection += [fB]
append = False
elif fA not in connection and fB in connection:
connection += [fA]
append = False
if append: connections.append( [fA,fB] )
if connect:
self.graph_addbond(idx1,idx2)
added_edges.append( (idx1,idx2) )
nf -= 1
if nf <= maxfrags: return added_edges
def graph_neighbors(self,idx):
return list(self._ugraph.neighbors(idx))
def graph_valence(self,idx):
return len(self._ugraph.neighbors(idx))
def graph_atostretchings(self,idx):
neighbors = self.graph_neighbors(idx)
neighbors.sort()
N = len(neighbors)
stretchings = []
for neighbor in neighbors:
if idx > neighbor: stretchings.append( (neighbor,idx) )
else : stretchings.append( (idx,neighbor) )
return stretchings
def graph_atobendings(self,idx):
neighbors = self.graph_neighbors(idx)
neighbors.sort()
N = len(neighbors)
bendings = []
if N > 1:
for idx1 in range(N):
IDX1 = neighbors[idx1]
for idx2 in range(idx1+1,N):
IDX2 = neighbors[idx2]
bending = (IDX1,idx,IDX2)
bendings.append(bending)
return bendings
def graph_atowaggings(self,idx):
neighbors = self.graph_neighbors(idx)
neighbors.sort()
N = len(neighbors)
waggings = []
if N > 2:
for idx1 in range(N):
IDX1 = neighbors[idx1]
for idx2 in range(idx1+1,N):
IDX2 = neighbors[idx2]
for idx3 in range(idx2+1,N):
IDX3 = neighbors[idx3]
wagging = (IDX1,IDX2,IDX3,idx)
waggings.append(wagging)
return waggings
def graph_nolinealangle(self,M_idx,N_idx,visited=[]):
'''
Given the selected bond in the molecule (M-N)
this functions returns the first angle that,
starting at M and going in the direction of N,
presents a non-lineal value
...-M-N-->...
* Returns a tuple of three indices starting at M_idx
* Returns None if no angle was found
'''
# Get nodes and corresponding neighbors
M_neighbors = self.graph_neighbors(M_idx)
N_neighbors = self.graph_neighbors(N_idx)
# Remove M from N neighbors and vice versa
if M_idx in N_neighbors: N_neighbors.remove(M_idx)
if N_idx in M_neighbors: M_neighbors.remove(N_idx)
# Remove common nodes, if any
common_nodes = list(set(M_neighbors).intersection(set(N_neighbors)))
for node in common_nodes:
M_neighbors.remove(node)
N_neighbors.remove(node)
# Sort neighbors of N according to connectivity and to atomic number
sorted_Nneighbors = []
for neighbor in N_neighbors:
atonum = self._atonums[neighbor]
valence = self.graph_valence(neighbor)
sorted_Nneighbors += [(valence,atonum,neighbor)]
sorted_Nneighbors.sort(reverse=True)
N_neighbors = [neighbor for nbonds,atonum,neighbor in sorted_Nneighbors]
# Define all visited neighbors
visited = visited + [M_idx,N_idx] + M_neighbors + common_nodes
# No neighbors to visit??
if N_neighbors == []: return None
next_N = None
for L_idx in N_neighbors:
if L_idx in visited: continue
else: visited.append(L_idx)
next_N = L_idx
theta = self.icvalue( (M_idx,N_idx,L_idx) )
if theta < cons.LINEAR: return (M_idx,N_idx,L_idx)
if next_N is None: return None
return self.graph_nolinealangle(M_idx,next_N,visited=visited)
def graph_torsion(self,bond):
'''
Returns the atoms involved in the
torsion coordinate around the bond
'''
M_idx, N_idx = bond
# Valid L neighbors of M
NML = self.graph_nolinealangle(N_idx,M_idx)
# Valid O neighbors of N
MNO = self.graph_nolinealangle(M_idx,N_idx)
if NML is None: return None
if MNO is None: return None
L, M, N, O = NML[2],NML[1],MNO[1],MNO[2]
if L > O: return (L,M,N,O)
elif L < O: return (O,N,M,L)
else : return None
def graph_irotation(self,bonds,thetas):
'''
To generate a structure after internal rotations around bonds
* bonds: a list of bonds to rotate around
* thetas: the corresponding angle of rotation around each bond
Generates the rotation around each bond
'''
if type(thetas) != type(list()) and type(thetas) != type(tuple()):
bonds = [bonds]
thetas = [thetas]
# The geometry
xvector = np.array(self._xcc,copy=True)
failed = False
for bond , theta in zip(bonds,thetas):
# Get axis vector and origin
idxA, idxB = bond
# Get two fragments
A_frag = set(self._ugraph.bfsearch1d(bond[1],bond[0]))
B_frag = set(self._ugraph.bfsearch1d(bond[0],bond[1]))
# Compare fragments. They may be equal in case of cyclic systems
if (B_frag is None) or (A_frag is None) or (B_frag == A_frag):
failed = True
break
# Choose smaller fragment
if len(A_frag) > len(B_frag):
# if B_frag, rotation around A-->B
x0 = xvector[3*idxA:3*idxA+3]
axis = xvector[3*idxB:3*idxB+3] - x0
target_fragment = B_frag.copy()
else:
# if A_frag, rotation around B-->A
x0 = xvector[3*idxB:3*idxB+3]
axis = xvector[3*idxA:3*idxA+3] - x0
target_fragment = A_frag.copy()
axis = axis / np.linalg.norm(axis)
# Remove indices of the bond
target_fragment.discard(idxA)
target_fragment.discard(idxB)
# Get rotation matrix
R, rot_theta = hf.gen_rotmatrix(axis,theta)
# Rotate atoms in fragment
rotated_xyz = []
for idx in range(self._natoms):
xyz = xvector[3*idx:3*idx+3]
symbol = self._symbols[idx]
if idx in target_fragment:
xyz = xyz - x0
xyz = R * np.matrix(xyz).transpose()
xyz = np.array((xyz.transpose()).tolist()[0])
xyz = xyz + x0
rotated_xyz += xyz.tolist()
xvector = np.array(rotated_xyz)
if failed: return None
return xvector
# def graph_nricoords(self, utorsions = []):
# '''
# utorsions: a list of torsions defined by the user
# '''
# def helper(dihedral,deftorsions,waggings_k):
# '''
# dihedral: i-j-k-l, l being the target
# '''
# l = dihedral[3]
# sent = "torsion"
# dihedral = tuple(dihedral)
# for deftorsion in deftorsions:
# same_bond = sorted(list(deftorsion[1:3])) == sorted(list(dihedral[1:3]))
# same_torsion = sorted(list(deftorsion)) == sorted(list(dihedral))
# # The dihedral is one of the defined by the user
# if same_torsion:
# dihedral = tuple(utorsion)
# break
# # The dihedral cannot be used and has to be included as a wagging
# elif same_bond:
# twag = None
# for wag in range(len(waggings_k)):
# if l in waggings_k[wag]:
# twag = wag
# break
# if twag is None:
# dihedral = None
# break
# else: dihedral = tuple(waggings_k.pop(twag))
# sent = "wagging"
# else:
# continue
# return dihedral, waggings_k, sent
#
# utorsions = [tuple(utorsion) for utorsion in utorsions]
#
# stretchings, bendings, waggings = [] , [] , []
# torsions = list(utorsions)
#
# #---------------------#
# # (a) Find main chain #
# #---------------------#
# max_length = - float("inf")
# main_chain = None
# # Start at terminal atoms
# for i in range(self._natoms):
# if self.graph_valence(i) == 1:
# chain = self._ugraph.longest_path(i)
# if len(chain) > max_length:
# max_length = len(chain)
# main_chain = chain
# # In case of having no terminal atoms
# if main_chain is None:
# main_chain = self._ugraph.longest_path(0)
#
# #----------------------------------#
# # (b) A dictionary of waggings #
# # and a dictionary of valences #
# #----------------------------------#
# dict_waggings = {}
# dict_valences = {}
# for atom in range(self._natoms):
# dict_valences[atom] = self.graph_valence(atom)
# dict_waggings[atom] = self.graph_atowaggings(atom)
#
# #----------------------#
# # (c) Visit main chain #
# #----------------------#
# ramified_nodes = []
# for idx in range(1,len(main_chain)):
#
# atom_i = main_chain[idx]
#
# # Add nodes to ramified_nodes for next step
# if dict_valences[atom_i] > 2: ramified_nodes += [atom_i]
#
# # Define streching with previous atom
# atom_h = main_chain[idx-1]
# bond = (atom_h,atom_i)
# stretchings.append(bond)
# if idx == 1: continue
#
# # Define bending with previous atoms
# atom_g = main_chain[idx-2]
# angle = (atom_g,atom_h,atom_i)
# bendings.append(angle)
# if idx == 2: continue
#
# # Define dihedral with previous atoms
# atom_f = main_chain[idx-3]
# dihedral = (atom_f,atom_g,atom_h,atom_i)
# dihedral, dict_waggings[atom_h], sent = helper(dihedral,utorsions,dict_waggings[atom_h])
# if dihedral is None: continue
# if sent == "torsion": torsions.append(dihedral)
# if sent == "wagging": waggings.append(dihedral)
#
# #-------------------------#
# # (d) Visit ramifications #
# #-------------------------#
# visited = set(main_chain)
# while ramified_nodes != []:
# atom_i = ramified_nodes.pop(0)
# wagg_list = dict_waggings[atom_i]
# neighbors_i = set(self.graph_neighbors(atom_i))
# for node in visited: neighbors_i.discard(node)
#
# for atom_j in neighbors_i:
#
# # Add atom to ramified_nodes?
# neighbors_j = set(self.graph_neighbors(atom_j))
# for node in visited: neighbors_j.discard(node)
# if len(neighbors_j) > 0:
# ramified_nodes.append(atom_j)
#
# # The stretching
# stretching = (atom_i,atom_j)
#
# # The bending
# for icoord in stretchings:
# a,b = icoord
# if a == atom_i: bending = (atom_j,a,b)
# if b == atom_i: bending = (atom_j,b,a)
#
# # The torsion
# # a) was atom_j included previously in a (user-defined) dihedral?
# needtorsion = True
# for utorsion in utorsions:
# if utorsion[0] == atom_j or utorsion[3] == atom_j:
# needtorsion = False
# break
# # b) Find a torsion for this atom
# if needtorsion:
# for icoord in bendings:
# a,b,c = icoord
# if a == atom_i: dihedral = (c,b,a,atom_j)
# if c == atom_i: dihedral = (a,b,c,atom_j)
# dihedral, dict_waggings[atom_i], sent = helper(dihedral,torsions,dict_waggings[atom_i])
# else:
# dihedral = None
#
# # Add icoords
# stretchings.append( stretching )
# bendings.append( bending )
# if dihedral is None : continue
# if sent == "torsion": torsions.append( dihedral )
# if sent == "wagging": waggings.append( dihedral )
# visited.add(atom_j)
#
# #-------------------------------------------#
# # (e) Defined torsions should go at the end #
# #-------------------------------------------#
# final_torsions = []
# for torsion in list(set(torsions)):
# if torsion in utorsions: continue
# final_torsions.append(torsion)
# final_torsions = final_torsions + utorsions
#
# return stretchings, bendings, list(set(waggings)), final_torsions
def gen_ricoords(self,torsions=[],check=False,cleaned=True):
'''
'''
if torsions != []:
for idx in range(len(torsions)):
torsion = torsions[idx]
if torsion[0] > torsion[3]:
torsions[idx] = tuple(torsion[::-1])
else:
torsions[idx] = tuple(torsion)
linear = 178.0 * cons.D2R
angular = 168.0 * cons.D2R
#-----------------#
# Get stretchings #
#-----------------#
ics_stretch = self.graph_getbonds()
ics_stretch = [ set(bond) for bond in ics_stretch]
#--------------#
# Get bendings #
#--------------#
ics_abend = []
ics_lbend = []
for idxA in range(len(ics_stretch)):
for idxB in range(idxA+1,len(ics_stretch)):
bondA = ics_stretch[idxA]
bondB = ics_stretch[idxB]
central = bondA.intersection( bondB )
if len(central) == 1:
at1 = bondA.difference(central)
at3 = bondB.difference(central)
if list(at1)[0] < list(at3)[0]: bending = list(at1) + list(central) + list(at3)
else : bending = list(at3) + list(central) + list(at1)
angle = self.icvalue(bending)
if angle > linear:
ics_lbend.append( tuple(bending) )
elif angle < angular:
ics_abend.append( tuple(bending) )
else:
ics_lbend.append( tuple(bending) )
ics_abend.append( tuple(bending) )
#--------------------------------------#
# Get torsions associated to lbendings #
#--------------------------------------#
ics_ltors = []
for lbend in ics_lbend:
atI = lbend[0]
atJ = lbend[1]
atK = lbend[2]
# atoms bonded to I
bondedI = self.graph_neighbors(atI)
bondedI = [at for at in bondedI if at != atJ]
if len(bondedI) == 0: continue
# atoms bonded to K
bondedK = self.graph_neighbors(atK)
bondedK = [at for at in bondedK if at != atJ]
if len(bondedK) == 0: continue
# Angles H-I-J
partHI = None
while len(bondedI) > 0:
atH = bondedI.pop()
angleHIJ = self.icvalue([atH,atI,atJ])
if angleHIJ < linear:
partHI = [atH,atI]
break
if partHI is None: continue
# Angles J-K-L
partKL = None
while len(bondedK) > 0:
atL = bondedK.pop()
angleJKL = self.icvalue([atJ,atK,atL])
if angleJKL < linear:
partKL = [atK,atL]
break
if partKL is None: continue
# Save data
ltorsion = partHI+partKL
ics_ltors.append(ltorsion)
#-----------------------#
# Get waggings/torsions #
#-----------------------#
ics_wagg = []
ics_tors = []
for idxA in range(len(ics_abend)):
for idxB in range(idxA+1,len(ics_abend)):
angleA = ics_abend[idxA]
angleB = ics_abend[idxB]
common = set(angleA).intersection(set(angleB))
if len(common) == 2:
# Wagging:
if angleA[1] == angleB[1]:
around = sorted(list(set([angleA[0],angleA[2],angleB[0],angleB[2]])))
wagging = around + [angleA[1]]
if wagging not in ics_wagg:
ics_wagg.append(wagging)
# Torsion:
else:
cycle1 = set([angleA[0],angleA[2]]) == set([angleB[1],angleB[2]])
cycle2 = set([angleA[0],angleA[2]]) == set([angleB[0],angleB[1]])
cycle3 = set([angleB[0],angleB[2]]) == set([angleA[1],angleA[2]])
cycle4 = set([angleB[0],angleB[2]]) == set([angleA[0],angleA[1]])
cycle5 = set([angleA[0],angleA[2]]) == set([angleB[0],angleB[2]])
# cycle?
if cycle1 or cycle2 or cycle3 or cycle4 or cycle5:
if cycle1: torsion = [angleB[0]]+list(angleA)
if cycle2: torsion = [angleB[2]]+list(angleA)
if cycle3: torsion = [angleA[0]]+list(angleB)
if cycle4: torsion = [angleA[2]]+list(angleB)
if cycle5: torsion = [angleB[1]]+list(angleA)
if torsion[0] > torsion[3]: torsion = torsion[::-1]
# Normal torsion
else:
non_common = sorted(list((set(angleA).union(set(angleB))).difference(common)))
left = non_common[0]
if left == angleA[0]: left = [left , angleA[1]]
elif left == angleA[2]: left = [left , angleA[1]]
elif left == angleB[0]: left = [left , angleB[1]]
elif left == angleB[2]: left = [left , angleB[1]]
right = non_common[1]
if right == angleA[0]: right = [angleA[1], right]
elif right == angleA[2]: right = [angleA[1], right]
elif right == angleB[0]: right = [angleB[1], right]
elif right == angleB[2]: right = [angleB[1], right]
torsion = left+right
if torsion not in ics_tors: ics_tors.append(torsion)
ics_stretch = [tuple(sorted(list(ic))) for ic in ics_stretch]
ics_abend = [tuple(ic) for ic in ics_abend ]
ics_lbend = [tuple(ic) for ic in ics_lbend ]
ics_ltors = [tuple(ic) for ic in ics_ltors ]
ics_wagg = [tuple(ic) for ic in ics_wagg ]
ics_tors = [tuple(ic) for ic in ics_tors ]
if cleaned:
#--------------------------------#
# Keep only one wagging per atom #
#--------------------------------#
toremove = []
newlist = []
for atom in range(self._natoms):
waggs = [ic for ic in ics_wagg if ic[3] == atom]
if waggs != []:
ic = random.choice(waggs)
newlist = newlist + [ic]
# Bending to remove
bending = (ic[0],ic[3],ic[1])
toremove.append(bending)
ics_wagg = newlist
#--------------------------------------#
# Remove bendings included in waggings #
#--------------------------------------#
ics_abend = [ic for ic in ics_abend if ic not in toremove]
#---------------------------#
# Remove redundant torsions #
#---------------------------#
newlist = torsions
included = [sorted(torsion[1:3]) for torsion in newlist]
for torsion in ics_tors:
bond = sorted(torsion[1:3])
if torsion in newlist:
included.append(bond)
elif bond not in included:
included.append(bond)
newlist = [torsion] + newlist
ics_tors = newlist
#------------------#
# Put all together #
#------------------#
nics = len(ics_stretch)+len(ics_abend)+2*len(ics_lbend)+len(ics_wagg)+len(ics_tors)
ricoords = []
for stretching in ics_stretch: ricoords.append( ("1",stretching) )
for abending in ics_abend : ricoords.append( ("2",abending ) )
for lbending in ics_lbend : ricoords.append( ("3",lbending ) )
for ltorsion in ics_ltors : ricoords.append( ("4",ltorsion ) )
for wagging in ics_wagg : ricoords.append( ("4",wagging ) )
for torsion in ics_tors : ricoords.append( ("4",torsion ) )
if check:
ccfreqs = self.get("ccfreqs")
if ccfreqs is None or ccfreqs == []:
print("ERROR: cc-freqs are not calculated!")
sys.exit()
self.calc_icfreqs(ricoords)
icfreqs = self.get("icfreqs")
if len(ccfreqs) != len(icfreqs):
print("ERROR: redundant internal coordinates are not adequate!")
sys.exit()
else:
for idx in range(len(ccfreqs)):
ccfreq = ccfreqs[idx].get("wavenum")/cons.cm
icfreq = icfreqs[idx].get("wavenum")/cons.cm
diff = abs(ccfreq-icfreq)
if diff > 0.5:
print("ERROR: cc-freqs and ic-freqs differ more than 0.5 cm^-1")
sys.exit()
return ricoords, nics
def purify_ricoords(self,ricoords,torsions=[],show=False,rbonds=False):
'''
'''
def get_nics(icoords):
nics = 0
for kind,ic in icoords:
if kind == "3": nics += 2
else : nics += 1
return nics
nn = get_nics(ricoords)
if nn < self._nvib:
print("ERROR: System requires %i internal coordinates, but only %i are given"%(self._nvib,nn))
sys.exit()
ccfreqs = self.get("ccfreqs")
if ccfreqs is None or ccfreqs == []:
print("ERROR: cc-freqs are not calculated!")
sys.exit()
# Coordinates that can't be removed
fixed = []
if not rbonds:
fixed += [(kind,tuple(ic)) for kind,ic in ricoords if kind=="1"]
for torsion in torsions:
fixed += [("4",tuple(torsion))]
# Save initial set
ics = list(ricoords)
# Remove redundancies
targets = [(k,i) for k,i in ics if (k,i) not in fixed ]
random.shuffle(targets)
for target in targets:
kind,ic = target
# remove target
ics2 = list(ics)
ics2.remove(target)
# calculate ic-freqs
try : self.calc_icfreqs(ics2)
except: continue
icfreqs = self.get("icfreqs")
# compare cc and ic freqs
if len(icfreqs) != len(ccfreqs): continue
for ccfreq,icfreq in zip(ccfreqs,icfreqs):
diff = abs((ccfreq.get("wavenum")-icfreq.get("wavenum"))/cons.cm)
if diff > 0.5: break
if diff > 0.5: continue
# Accept removal
ics = list(ics2)
if show and kind != "3": print(" removing IC %s..."%("-".join( [str(atom+1) for atom in ic])))
if show and kind == "3": print(" removing IC %s..."%("=".join( [str(atom+1) for atom in ic])))
# Finished?
if get_nics(ics) == self._nvib: break
return ics, get_nics(ics)
#-----------------------------------#
# Functions related to the MEP #
#-----------------------------------#
def imag_dir(self,icoords=None):
'''
Get the pair of atoms whose distance
varies the most when following the
imaginary frequency
'''
assert self._type == 1, "Problems in imag_dir"
for freq in self._ccfreqs:
if freq.isItImag():
ievec = freq.get("evector")
break
if icoords is None:
coordinates = [(i,j) for i in range(self._natoms) for j in range(i+1,self._natoms)]
else:
coordinates = [ic for ictype,ic in icoords if ictype != "3"]
# Initial geom and new geom
x_initial = np.array(self._xms,copy=True)
x_final = hf.ms2cc_x(x_initial + ievec,self._masslist,self._mu)
newstrut = Struct("", x_final, self._atonums, masslist=self._masslist)
maxvar = -float("inf")
for coord in coordinates:
value_initial = self.icvalue(coord)
value_final = newstrut.icvalue(coord)
diff = (value_final-value_initial)
if abs(diff) > maxvar:
maxvar = abs(diff)
if diff < 0.0: idir = (coord , "-")
if diff > 0.0: idir = (coord , "+")
return idir
def v0dir_ifreq(self):
for freq in self._ccfreqs:
if freq.isItImag():
self._v0 = np.array(freq.get("evector"),copy=True)
ifreq = freq.copy()
break
return ifreq
def v0dir_grad(self):
assert self._gms is not None, "ERROR: gms is None"
self._v0 = - self._gms / np.linalg.norm(self._gms)
def v0dir_check(self,dir_v0):
coord, effect = dir_v0
# Prepare geometries
x_initial = np.array(self._xms,copy=True)
x_final = hf.ms2cc_x(x_initial + self._v0,self._masslist,self._mu)
newstrut = Struct("", x_final, self._atonums, masslist=self._masslist)
# Calculate differentce
value_initial = self.icvalue(coord)
value_final = newstrut.icvalue(coord)
diff = (value_final-value_initial)
# Is it correct?
if diff > 0.0 and effect == "-": return False
if diff < 0.0 and effect == "+": return False
return True
def v0dir_invert(self):
if (self._v0 is not None): self._v0 = - self._v0
if (self._v1 is not None): self._v1 = - self._v1
def v1dir_hess(self):
v0Fv0 = float( np.matrix(self._v0) * self._Fms * np.matrix(self._v0).transpose() )
component_A = np.array(self._Fms * np.matrix(self._v0).transpose()).transpose()[0]
component_B = v0Fv0*self._v0
self._v1 = (component_A - component_B) / np.linalg.norm(self._gms)
def nextTaylor(self,ds,bw=False,qt=False):
'''
* Uses the quadratic Taylor expansion of the path:
x(s0+ds) = x(s0) + v0*ds + 0.5 *v1 * ds^2
* If bw is True, it uses -v^(0) and -v^(1)
* If qt is False, the quadratic term is ommitted:
x(s0+ds) = x(s0) + v0*ds
'''
ds = abs(ds)
v0 = self.get("v0")
v1 = self.get("v1")
if bw: v0 = -v0
if bw and qt: v1 = -v1
x_next = self._xms + v0 * ds
if qt: x_next += 0.5 * v1 * (ds**2)
# Get it also in non-scaled coordinates
x_next_cc = hf.ms2cc_x(x_next,self._masslist,self._mu)
return x_next_cc, x_next
def write_molden(self,moldenfile):
molden = open(moldenfile,'w')
molden.write("[Molden Format]\n")
molden.write("[FR-COORD] # Coordinates in bohr\n")
for idx in range(self._natoms):
symbol = self._symbols[idx]
x, y, z = self._xcc[3*idx:3*idx+3]
molden.write(" %2s %+11.6f %+11.6f %+11.6f \n"%(symbol,x,y,z))
molden.write("[FREQ] # Frequencies in cm^-1\n")
evecs = []
for vibfreq in self._ccfreqs:
f, v = vibfreq.get("wavenum"), vibfreq.get("evector")
if f.imag != 0.0: f = - f.imag
f = f/cons.cm
molden.write(" %9.4f\n"%f)
# As they are displacements, ms2cc_x has to be used
if v is not None:
evec = hf.ms2cc_x(v,self._masslist,self._mu)
evec = evec / np.linalg.norm(evec)
evecs.append( evec )
molden.write("[FR-NORM-COORD] # Displacements in bohr\n")
nv = 1
for evector in evecs:
molden.write("vibration %i\n"%nv )
for j in range(0,len(evector),3):
vx, vy, vz = evector[j:j+3]
molden.write(" %+9.3f %+9.3f %+9.3f\n"%(vx,vy,vz))
nv += 1
molden.close()
class MEP():
def __init__(self):
# Basic about TS
self._gts = None
self._tsname = None
self._ts = None
# Basic information
self._masses = None
self._totmass = None
self._atonums = None
self._symbols = None
# MEP variables
self._path = None
self._mu = None
self._ds = None
self._hsteps = None
self._cubic = None
self._sbw = None
self._sfw = None
self._bsst = None
self._idir = None
self._epse = None
self._epsg = None
# More for MEP
self._rstbw = None
self._rstfw = None
self._mep = {}
self._mepl1 = None
self._mepl2 = None
self._tvals = {}
self._dirtmp = None
self._dirbw = None
self._dirfw = None
# For Vadi
self._internal= None
self._ics = None
# Energies
self._eref = None
self._e0 = None
# for calculations
self.__spc = None
self.__template = None
# Correction factors
self._Tlist = None
self._sct = None
self._cvt = None
self._cvt_s = None
self._cagtst = None
self._cagcvt = None
def get(self,which):
if which == "gts" : return self._gts
if which == "masslist": return self._masses
if which == "totmass" : return self._totmass
if which == "atonums" : return self._atonums
if which == "symbols" : return self._symbols
if which == "ts" : return self._ts
if which == "rstbw" : return self._rstbw
if which == "rstfw" : return self._rstfw
if which == "mep" : return self._mep
if which == "tvals" : return self._tvals
def set(self,which,value):
if which == "Tlist" : self._Tlist = value
if which == "ics" : self._ics = value
if which == "rstbw" : self._rstbw = value
if which == "rstfw" : self._rstfw = value
if which == "dirtmp" : self._dirtmp = value
if which == "dirbw" : self._dirbw = value
if which == "dirfw" : self._dirfw = value
if which == "path" : self._path = value
if which == "mu" : self._mu = value
if which == "ds" : self._ds = value
if which == "cubic" : self._cubic = value
if which == "hsteps" : self._hsteps = value
if which == "sbw" : self._sbw = value
if which == "sfw" : self._sfw = value
if which == "idir" : self._idir = value
if which == "bsst" : self._bsst = value
if which == "epse" : self._epse = value
if which == "epsg" : self._epsg = value
if which == "internal" : self._internal = value
if which == "eref" : self._eref = value
if which == "e0" : self._e0 = value
if which == "spc" : self.__spc = value
if which == "template": self.__template = value
def set_TS(self,gtsfile,name,masslist=None,freqscal=1.0):
self._gts = gtsfile
self._tsname = name
xyz_list , atonum_list , ch, mtp, Etot, pgroup, rotsigma, gcc , Fcc, freqs = read_gtsfile(self._gts)
variables = ["ch","mtp","Etot","freqscal"]
values = [ ch , mtp , Etot , freqscal ]
if masslist is None: variables += ["pgroup","rotsigma"]; values += [pgroup , rotsigma ]
if gcc is not None: variables += ["gcc"] ; values += [gcc]
if Fcc is not None: variables += ["Fcc"] ; values += [Fcc]
structure = Struct(name,xyz_list,atonum_list,masslist=masslist,stype=1)
structure.set(variables,values)
self._ts = structure
self._masses = self._ts.get("masslist")
self._atonums = self._ts.get("atonums")
self._symbols = self._ts.get("symbols")
self._totmass = self._ts.get("totmass")
def mod_masslist(self,masslist):
self._ts.set("masslist",masslist)
self._masses = masslist
self._totmass = self._ts.get("totmass")
def prepare_ts(self):
self._ts.basic_setups([2,3])
def calculate_mep(self,pprint=False):
from modq2dtor.mep import get_mep
tuple_path = self._path, self._mu, self._ds, self._cubic, self._hsteps, self._sbw, self._sfw, self._idir, self._bsst, self._epse, self._epsg
tuple_spc = (self.__spc,self.__template)
dirbw = self._dirtmp+'bw_%s/'%self._tsname
dirfw = self._dirtmp+'fw_%s/'%self._tsname
tuple_ffs = (self._rstbw, self._rstfw, dirbw, dirfw)
input_mep = (self._ts, tuple_path, tuple_spc, tuple_ffs, self._mep, self._tvals, pprint)
self._mep, self._tvals, infobw, infofw = get_mep(*input_mep)
self._mepl1 = sorted([mep_struct.get("meppoint") for mep_struct in self._mep.values()])
self._mepl2 = [mp for mp in self._mepl1 if self._mep[mp[1]].get("Fcc") is not None]
if self._eref is None:
self._eref = self._mep[self._mepl2[0][1]].get("Etot")
# Define how imaginary frequencies are treated
for value in self._mep.values(): value._rmode = 2
def string_mep(self,points="hessian"):
if points == "hessian": lpoints = self._mepl2
if points == "all" : lpoints = self._mepl1
string = " s (bohr) | E (kcal/mol) \n"
string += "-------------------------\n"
for s,l in lpoints:
Etot = (self._mep[l].get("Etot") - self._eref)*cons.kcalmol
string += " %+8.4f | %12.3f \n"%(s,Etot)
string += "-------------------------\n"
return string
def calculate_vadi(self):
warning = " Warning: ZPE in ic is smaller than in cc for s = %+.4f bohr (%.3f kcal/mol of difference)"
error1 = " ERROR: ZPE in ic differs by %.3f kcal/mol from that of cc for saddle point"
error2 = " ERROR: the set of internal coordinates is not appropiate..."
string_warnings = ""
numerr = 0.1 # in kcal/mol
# Independent variable
data_x = [mep_s for mep_s, mep_label in self._mepl2]
# Dependent variable
data_y = []
list1_cc = []
list1_ic = []
list2_cc = []
list2_ic = []
for mep_s, mep_label in self._mepl2:
structure = self._mep[mep_label]
# Prepare structure
structure.basic_setups([0,2,3,4])
# For MEP points, set v0
if mep_s != 0.0: structure.v0dir_grad()
# Calculate ZPE at each point (cc-hessian)
structure.calc_ccfreqs()
ccfreqs = [freq for freq in structure._ccfreqs]
if mep_s == 0.0: ccfreqs = ccfreqs[1:]
cc_zpe = sum([freq.get("zpe") for freq in ccfreqs])
list1_cc.append(cc_zpe)
list2_cc.append( [str(freq) for freq in ccfreqs] )
# Get Vadi
Vadi = structure.get("Etot") + cc_zpe - self._eref
# Calculate ZPE at each point (ci-hessian)
if self._internal == "yes":
structure.calc_icfreqs(self._ics)
icfreqs = [freq for freq in structure._icfreqs]
if mep_s == 0.0:
icfreqs = icfreqs[1:]
if len(icfreqs) != len(ccfreqs): print(error2); sys.exit()
ic_zpe = sum([freq.get("zpe") for freq in icfreqs])
list1_ic.append(ic_zpe)
list2_ic.append( [str(freq) for freq in icfreqs] )
# Check them
diff = (ic_zpe-cc_zpe)*cons.kcalmol
if diff < -numerr:
string_warnings += warning%(mep_s,diff)+"\n"
if abs(diff) > numerr and mep_s == 0.0:
string_warnings += error1%diff+"\n"
sys.exit(string_warnings)
Vadi = structure.get("Etot") + ic_zpe - self._eref
# Append data
data_y.append(Vadi)
# Get spline and find maximum
self._Vadi = SplineVaG(data_x,data_y)
self._sAG, self._VAG = self._Vadi.get_max()
# value of E0
if self._e0 is None:
E0_bw = self._Vadi(self._mepl2[ 0][0])
E0_fw = self._Vadi(self._mepl2[-1][0])
self._e0 = max(E0_bw,E0_fw) + self._eref
# Get string
STRING1 = self.string_vadi_table(list1_cc,list1_ic)
STRING2 = self.string_vadi_freqs(list2_cc,list2_ic)
return STRING1+string_warnings+"\n"+STRING2
def string_vadi_table(self,list1_cc,list1_ic):
STRING = ""
if self._internal:
STRING += "Internal coordinates (ic):\n"
nepl = 4
for idx in range(0,len(self._ics),nepl):
line = ""
for xx in range(nepl):
pos = idx + xx
if pos > len(self._ics)-1: continue
tt, ic = self._ics[pos]
if tt=="3": ic = "=".join("%i"%(a+1) for a in ic)
else : ic = "-".join("%i"%(a+1) for a in ic)
line = line + " %11s "%ic
STRING += " %s\n"%line
STRING += "\n"
STRING += "ZPE and adiabatic potential:\n"
STRING += " Reference energy: %.6f hartree\n"%(self._eref)
STRING += " --------------------------------------------------\n"
STRING += " s (bohr) | V_MEP | ZPE(cc) | ZPE(ic) | V_adi \n"
STRING += " --------------------------------------------------\n"
for idx in range(len(self._mepl2)):
mep_s = self._mepl2[idx][0]
mep_label = self._mepl2[idx][1]
mep_E = (self._mep[mep_label].get("Etot")-self._eref)*cons.kcalmol
zpecc = "%7.3f"%(list1_cc[idx]*cons.kcalmol)
Vadi = self._Vadi(mep_s) * cons.kcalmol
if len(list1_ic) != 0: zpeic = "%7.3f"%(list1_ic[idx]*cons.kcalmol)
else : zpeic = " - "
STRING += " %+7.3f | %+7.3f | %s | %s | %+7.3f \n"%(mep_s,mep_E,zpecc,zpeic,Vadi)
STRING += " --------------------------------------------------\n"
# Maximum
STRING += " %+7.3f | (maximum in Vadi) | %+7.3f \n"%(self._sAG, self._VAG*cons.kcalmol)
STRING += " --------------------------------------------------\n"
STRING += "\n"
return STRING
def string_vadi_freqs(self,list2_cc,list2_ic):
STRING = ""
# Print frequencies
nepl = 7
for freqs_list,ftype in [(list2_cc,"cc"),(list2_ic,"ic")]:
if len(freqs_list) == 0: continue
STRING += "Summary of vibrational frequencies (%2s):\n"%ftype
list_s = [s for s,l in self._mepl2]
for idxi in range(0,len(list_s),nepl):
s_range = "|".join([" %+8.4f "%ss for ss in list_s[idxi:idxi+nepl]])
STRING += " "*5+"-"*len(s_range)+"\n"
STRING += " "*5+s_range+"\n"
STRING += " "*5+"-"*len(s_range)+"\n"
for nf in range(len(freqs_list[0])):
freq_range = "|".join( [ " "+ff[nf]+" " for ff in freqs_list[idxi:idxi+nepl]])
STRING += " "*5+freq_range+"\n"
STRING += "\n"
return STRING
def calculate_sct(self,sctbmf,sbw=None,sfw=None):
'''
bmf = "hess"
bmf = "grad1"
bmf = "grad2"
'''
from modq2dtor.sct import get_SCTkappa
if sbw is None: sbw = self._mepl1[ 0][0]
if sfw is None: sfw = self._mepl1[-1][0]
mepl1 = [(s,l) for (s,l) in list(self._mepl1) if sbw<=s<=sfw]
mepl2 = [(s,l) for (s,l) in list(self._mepl2) if sbw<=s<=sfw]
# Get gradients
grads = []
for mep_s, mep_label in mepl1:
structure = self._mep[mep_label]
grad = structure.get("gms")
grads.append(grad)
# Get analytic v1 vectors
dict_hpts = {}
for mep_s, mep_label in mepl2:
structure = self._mep[mep_label]
if not self._internal: freqs = structure.get("ccfreqs")
if self._internal: freqs = structure.get("icfreqs")
if mep_s!= 0.0: structure.v1dir_hess()
anv1 = structure.get("v1")
dict_hpts[mep_label] = [mep_s,freqs,anv1]
# Get kappaSCT
lists = (list(mepl1),list(mepl2))
E0=self._e0-self._eref
self._SCTkappa, STRING = get_SCTkappa(self._Vadi,dict_hpts,lists,self._Tlist,\
grads=grads,E0=E0,mu=self._mu,bmfcalc=sctbmf)
return self._SCTkappa, STRING
def calculate_sct_CONV(self,sctbmf,sctsteps,sctvar):
if sctsteps == 1: sctsteps = 0
self._sbw = self._mepl1[ 0][0]
self._sfw = self._mepl1[-1][0]
# Calculation with reduced MEP
if sctsteps > 1:
copy_Tlist = list(self._Tlist)
print(" Convergence at %7.2f K:"%self._Tlist[0])
print(" -----------------------------------")
print(" s_bw | s_fw | kappa_SCT ")
print(" -----------------------------------")
self._Tlist = self._Tlist[0:1]
kappas = []
self._sbw += self._ds*self._hsteps
self._sfw -= self._ds*self._hsteps
# Increase MEP until convergence
for step in range(sctsteps):
SCT_T0, STRING = self.calculate_sct(sctbmf,self._sbw,self._sfw)
kappas.append( (SCT_T0[0], self._sbw , self._sfw ) )
print(" %+8.4f | %+8.4f | %11.4E "%(self._sbw,self._sfw,SCT_T0[0]))
# Converged?
if len(kappas) > 1 :
diff = 100.0 * abs( (kappas[-1][0] - kappas[-2][0]) / kappas[-1][0] )
if diff < sctvar: break
# Set increase of MEP
lbw = self._mepl2[ 0][1]
lfw = self._mepl2[-1][1]
mep_diffE = self._mep[lfw].get("Etot") - self._mep[lbw].get("Etot")
if mep_diffE*cons.kcalmol > +1.0:
self._sfw += self._ds*self._hsteps
elif mep_diffE*cons.kcalmol < -1.0:
self._sbw -= self._ds*self._hsteps
else:
self._sbw -= self._ds*self._hsteps
self._sfw += self._ds*self._hsteps
# Calculate MEP
self.calculate_mep(False)
self.calculate_vadi()
print(" -----------------------------------")
print("")
self._Tlist = copy_Tlist
# Calculate kappa for all T
SCT_kappas, STRING = self.calculate_sct(sctbmf,self._sbw,self._sfw)
return SCT_kappas, STRING
def calculate_cagtst(self):
from modq2dtor.cag import get_CAGTSTkappa
self._cagtst = get_CAGTSTkappa(self._Vadi, self._Tlist)
STRING = self.string_cag(which="tst")
return self._cagtst, STRING
def calculate_cagcvt(self):
from modq2dtor.cag import get_CAGCVTkappa
self._cagcvt = get_CAGCVTkappa(self._Vadi, self._Tlist, self._cvt_s)
STRING = self.string_cag(which="cvt")
return self._cagcvt, STRING
def string_cag(self,which="tst"):
STRING = ""
STRING += " -----------------------\n"
STRING += " T (K) | CAG_%3s \n"%(which.upper())
STRING += " -----------------------\n"
for idx in range(len(self._Tlist)):
T = self._Tlist[idx]
if which == "tst": cag = "%11.4E"%(self._cagtst[idx])
if which == "cvt": cag = "%11.4E"%(self._cagcvt[idx])
STRING += " %7.2f | %s \n"%(T, cag)
STRING += " -----------------------\n"
return STRING
def calculate_cvt(self,smin=-1.00,smax=+1.00):
'''
Only between [smin,smax]
'''
if self._internal: k = "ic"
else : k = "cc"
Tlist = np.array(self._Tlist)
# Select data
mepl2 = []
for s,l in self._mepl2:
if s < smin : continue
if s > smax : continue
mepl2.append( (s,l) )
# Locate s = 0.0 (if not, just half)
idx_ts = len(mepl2) / 2
for idx in range(len(mepl2)):
s,l = mepl2[idx]
if s == 0.0: idx_ts = idx
# Initialize matrix with values
nrows = len(mepl2)
ncols = len(Tlist)
gibbs_matrix = np.zeros( (nrows,ncols) )
# Set values in matrix
data = []
for row in range(nrows):
mep_s, mep_l = mepl2[row]
structure = self._mep[mep_l]
# Calculate Qtot
phi_tr, pf_rot, pf_vib, pf_ele, Vadi = structure.get_pfns(Tlist,k=k)
qtot = phi_tr * pf_rot * pf_vib * pf_ele
data.append( (Vadi,qtot) )
if mep_s == 0.0:
ts_qtot = qtot
ts_Vadi = Vadi
for row in range(nrows):
Vadi, qtot = data[row]
# Save data as log to avoid big numbers!
exp = np.exp(-(Vadi-ts_Vadi)/cons.kB/Tlist)
Kc = qtot/ts_qtot * exp
gibbs = -cons.kB * Tlist * np.log(Kc)
# Save data in kcal/mol
gibbs_matrix[row,:] = gibbs * cons.kcalmol
# For each temperature, find maximum gibbs
x_values = [s for s,l in mepl2]
self._cvt = []
self._cvt_s = []
for col in range(ncols):
y_values = gibbs_matrix[:,col]
CVT_s, CVT_gibbs = hf.obtain_extremum(x_values,y_values,xtr="max")
CVT_gibbs = CVT_gibbs / cons.kcalmol
# CVT values
CVT_s = float(CVT_s)
CVT_gamma = np.exp( -(CVT_gibbs/cons.kB/Tlist[col]))
# Correct value, just in case
if CVT_gamma > 1.0: CVT_gamma = 1.0
# Save data
self._cvt.append( float(CVT_gamma) )
self._cvt_s.append( float(CVT_s) )
STRING = self.string_cvt(mepl2,gibbs_matrix)
# Get string
return self._cvt, STRING
def string_cvt(self,sl_list,mgibbs):
list_CVT = self._cvt
STRING = ""
STRING += "* Matrix of gibbs free energy (MEP point - TS) [kcal/mol]\n"
STRING += " rows: s coordinate (%i)\n"%(len(sl_list))
STRING += " cols: temperatures (%i)\n"%(len(self._Tlist))
STRING += " shape: %i x %i\n"%(mgibbs.shape)
STRING += "\n"
nrows, ncols = mgibbs.shape
head_line = " s value | "
matrix_string = ""
for row in range(nrows):
line = ""
for col in range(ncols):
if (ncols < 9) or (col < 4 or col > ncols-4):
if row == 0: head_line = head_line + " %7.2f K |"%(self._Tlist[col])
line = line + " %7.2f |"%(mgibbs[row][col])
elif (col == ncols-4):
if row == 0: head_line = head_line + "...|"
line = line + "...|"
else:
continue
matrix_string = matrix_string + " %+8.4f | "%sl_list[row][0] + line + "\n"
STRING += head_line + "\n"
STRING += matrix_string + "\n"
# Print correction factor
STRING += "* CVT correction factor and associated change in \n"
STRING += " Gibbs free energy of activation (DDeltaG_CVT, kcal/mol)\n"
STRING += "\n"
STRING += "-----------------------------------------------\n"
STRING += " T (K) | s_CVT | Gamma_CVT | DDeltaG_CVT \n"
STRING += "-----------------------------------------------\n"
for idx in range(len(self._Tlist)):
T = self._Tlist[idx]
s_CVT = self._cvt_s[idx]
gamma_CVT = self._cvt[idx]
gibbs = - cons.R * T * np.log(gamma_CVT) * cons.kcal
STRING += " %7.2f | %+7.4f | %11.4E | %9.4f \n"%(T, s_CVT,gamma_CVT,gibbs)
STRING += "-----------------------------------------------\n"
return STRING
#--------------------------------------#
# Some functions associated to classes #
#--------------------------------------#
def basic2Struct(name,xcc,atonums,ch,mtp,Etot,gcc,Fcc,stype=-1,masslist=None):
variables = ["ch","mtp","Etot"]
values = [ ch , mtp , Etot ]
if gcc is not None: variables.append("gcc"); values.append(gcc)
if Fcc is not None: variables.append("Fcc"); values.append(Fcc)
structure = Struct(name,xcc,atonums,masslist=masslist,stype=stype)
structure.set(variables,values)
return structure
#--------------------------------------#
def xyz2Struct(xyzfile,name="",stype=0):
xvector, symbols, masslist = hf.read_xyz(xyzfile)
xvector = xvector / cons.angstrom
structure = Struct(name,xvector,symbols,masslist=masslist,stype=stype)
return structure, masslist
#--------------------------------------#
def gts2Struct(gtsfile,name="",masslist=None,stype=0):
xyz_list , atonum_list , ch, mtp, Etot, pgroup, rotsigma, gcc , Fcc, freqs = read_gtsfile(gtsfile)
variables = ["ch","mtp","Etot"]
values = [ ch , mtp , Etot ]
if masslist is None: variables += ["pgroup","rotsigma"]; values += [pgroup , rotsigma ]
if gcc is not None: variables.append("gcc"); values.append(gcc)
if Fcc is not None: variables.append("Fcc"); values.append(Fcc)
structure = Struct(name,xyz_list,atonum_list,masslist=masslist,stype=stype)
structure.set(variables,values)
# No hessian but freqs
if Fcc is None and freqs is not None:
ccfreqs = []
for freq in freqs:
instance = Freq()
instance.set_wavenum(freq * cons.cm)
instance.calc_derivated_magnitudes()
ccfreqs.append(instance)
structure._ccfreqs = ccfreqs
# Return structure
return structure
#--------------------------------------#
| {
"alphanum_fraction": 0.5001033664,
"author": null,
"avg_line_length": 38.4389701208,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "da53fa7d3f422fecd4fc6831e3aa2556fad623db",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "abb7e8a229ed469323b508404c5f9cb717dd4798",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "cathedralpkg/Q2DTor",
"max_forks_repo_path": "src/modq2dtor/classes.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "abb7e8a229ed469323b508404c5f9cb717dd4798",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "cathedralpkg/Q2DTor",
"max_issues_repo_path": "src/modq2dtor/classes.py",
"max_line_length": 148,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "abb7e8a229ed469323b508404c5f9cb717dd4798",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "cathedralpkg/Q2DTor",
"max_stars_repo_path": "src/modq2dtor/classes.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 31463,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 120929
} |
#!/usr/bin/env python3
# encoding: utf-8
"""
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Copyright 2020 Salesforce Research (Weiran Wang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""End-to-end speech recognition model decoding script."""
import configargparse
import logging
import os
import random
import sys
import numpy as np
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description='Transcribe text from speech using a speech recognition model on one CPU or GPU',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
# general configuration
parser.add('--config', is_config_file=True,
help='Config file path')
parser.add('--config2', is_config_file=True,
help='Second config file path that overwrites the settings in `--config`')
parser.add('--config3', is_config_file=True,
help='Third config file path that overwrites the settings in `--config` and `--config2`')
parser.add_argument('--ngpu', type=int, default=0,
help='Number of GPUs')
parser.add_argument('--backend', type=str, default='pytorch',
choices=['pytorch'], help='Backend library')
parser.add_argument('--debugmode', type=int, default=1,
help='Debugmode')
parser.add_argument('--seed', type=int, default=1,
help='Random seed')
parser.add_argument('--verbose', '-V', type=int, default=1,
help='Verbose option')
parser.add_argument('--batchsize', type=int, default=1,
help='Batch size for beam search (0: means no batch processing)')
parser.add_argument('--preprocess-conf', type=str, default=None,
help='The configuration file for the pre-processing')
# task and dataloader related
parser.add_argument('--recog-sets', type=str, default=None,
help='Recognition datasets')
parser.add_argument('--precomputed-feats-type', type=str, default=None,
help='Audio feature type (e.g., fbank_pitch)')
parser.add_argument('--spmodel', type=str, default=None,
help='Filename of the sentencepiece model for converting text to tokens')
parser.add_argument('--text-filename', type=str, default=None,
help='File containing normalized text for training (to be processed by spmodel)')
parser.add_argument('--loader-num-worker', type=int, default=0,
help='Number of workers for each data loader')
parser.add_argument('--loader-cache-mb', type=int, default=2048,
help='Cache size (in mb) used by each loader worker')
parser.add_argument('--num_replicas', type=int, default=1,
help='Total number of CPU jobs for parallel decoding')
parser.add_argument('--jobid', type=int, default=1,
help='The jobid for current CPU decoding')
parser.add_argument('--result-label', type=str, required=True,
help='Filename of result label data (json)')
# model (parameter) related
parser.add_argument('--model', type=str, required=True,
help='Model file parameters to read')
parser.add_argument('--model-conf', type=str, default=None,
help='Model config file')
parser.add_argument('--num-spkrs', type=int, default=1,
choices=[1],
help='Number of speakers in the speech')
parser.add_argument('--num-encs', default=1, type=int,
help='Number of encoders in the model.')
# search related
parser.add_argument('--nbest', type=int, default=10,
help='Output N-best hypotheses')
parser.add_argument('--nbest-copy-input', action='store_true', default=False,
help='Copy input to output json')
parser.add_argument('--nbest-compute-wer', action='store_true', default=False,
help='Calculate wer for partial hypothesis of the nbest list')
parser.add_argument('--beam-size', type=int, default=1,
help='Beam size')
parser.add_argument('--penalty', type=float, default=0.0,
help='Incertion penalty')
parser.add_argument('--maxlenratio', type=float, default=0.0,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""")
parser.add_argument('--minlenratio', type=float, default=0.0,
help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', type=float, default=0.0,
help='CTC weight in joint decoding')
parser.add_argument('--weights-ctc-dec', type=float, action='append',
help='ctc weight assigned to each encoder during decoding.[in multi-encoder mode only]')
parser.add_argument('--ctc-window-margin', type=int, default=0,
help="""Use CTC window with margin parameter to accelerate
CTC/attention decoding especially on GPU. Smaller magin
makes decoding faster, but may increase search errors.
If margin=0 (default), this function is disabled""")
# context for input (low frame rate)
parser.add_argument('--input-context', default=0, type=int,
help='Context to use at input.')
parser.add_argument('--input-skiprate', default=1, type=int,
help='Skiprate to use at input.')
# rnnlm related
parser.add_argument('--rnnlm', type=str, default=None,
help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None,
help='RNNLM model config file to read')
parser.add_argument('--word-rnnlm', type=str, default=None,
help='Word RNNLM model file to read')
parser.add_argument('--word-rnnlm-conf', type=str, default=None,
help='Word RNNLM model config file to read')
parser.add_argument('--word-dict', type=str, default=None,
help='Word list to read')
parser.add_argument('--lexicon-dict', type=str, default=None,
help='Lexicon dict to read')
parser.add_argument('--lm-weight', type=float, default=0.1,
help='RNNLM weight')
parser.add_argument('--sublm-weight', type=float, default=0.8,
help='Weight of subword LM in multi-level LM')
parser.add_argument('--word-bonus', type=float, default=0.0,
help='Word bonus score')
parser.add_argument('--truth_file', type=str, default=None,
help='file containing groud truth text')
# realigner related
parser.add_argument('--realigner-num-steps', default=-1, type=int,
help='Number of steps for iterative re-alignment')
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
args = parser.parse_args(args)
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning("Skip DEBUG/INFO messages")
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info('python path = ' + os.environ.get('PYTHONPATH', '(None)'))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info('set random seed = %d' % args.seed)
# validate rnn options
if args.rnnlm is not None and args.word_rnnlm is not None:
logging.warning("Both --rnnlm and --word-rnnlm are specified, using multi-level RNNLM.")
# recog
from espnet.asr.pytorch_backend.asr_dsl import recog
recog(args)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"alphanum_fraction": 0.6107835781,
"author": null,
"avg_line_length": 47.3553299492,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "98c6cd158a74690322254efed260ea2143833985",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1de0c16573bc6f47c0b16d3f07af0f670db8f30f",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "salesforce/TransformerASR",
"max_forks_repo_path": "espnet/bin/asr_recog.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1de0c16573bc6f47c0b16d3f07af0f670db8f30f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "salesforce/TransformerASR",
"max_issues_repo_path": "espnet/bin/asr_recog.py",
"max_line_length": 112,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "1de0c16573bc6f47c0b16d3f07af0f670db8f30f",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "salesforce/TransformerASR",
"max_stars_repo_path": "espnet/bin/asr_recog.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-25T08:26:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-19T16:53:40.000Z",
"num_tokens": 1996,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9329
} |
"""The goedesic datasets in this folder provide even coverage over a sphere.
Sometimes a more dense coverage is required in some part of the sphere.
In such a case a composite file with extracts from several of the
geodesic data files can be made.
For example, suppose more dense coverage is required in a cone around the
x axis. Or more dense coverage is required near the three primary planes.
The sphere data available in pyradi/data/plotspherical are:
=============== =========== ============== ===============
Filename Resolution Number Number
. (degrees) points triangles
=============== =========== ============== ===============
vertexsphere_0_12 63.4 12 20
vertexsphere_1_42 33.9 42 80
vertexsphere_2_162 17.2 162 320
vertexsphere_3_642 8.6 642 1280
vertexsphere_4_2562 4.32 2562 5120
vertexsphere_5_10242 2.16 10242 20480
vertexsphere_6_40962 1.08 40962 81920
vertexsphere_7_163842 0.54 163842 327680
The geodesic vertex vectors are assumed to have length of 1.
"""
import numpy as np
# use the low res set as the basic set
filelores = 'vertexsphere_2_162.txt'
# use the medium res set in the designated medium res areas
filemeres = 'vertexsphere_3_642.txt'
# use the hi res set in the designated hires areas
filehires = 'vertexsphere_4_2562.txt'
filedataset = 'compositesphere.txt'
xAxisConeApexDeg = 20
xPlaneHalfAngle = 1
yPlaneHalfAngle = 1
zPlaneHalfAngle = 1
dataset = np.loadtxt(filelores,delimiter=' ')
meres = np.loadtxt(filemeres,delimiter=' ')
hires = np.loadtxt(filehires,delimiter=' ')
print(f'now {dataset.shape} vertices')
# select hires vertices in the cone around x axis
xCone = np.cos(xAxisConeApexDeg*np.pi/180)
print(f'Selecting all vertices with x>{xCone}')
dataset = np.vstack((dataset, hires[np.all([ hires[:,0]>=xCone], axis=0)]))
print(f'now {dataset.shape} vertices')
# select meres vertices near x==0 plane
xPlane = np.tan(xPlaneHalfAngle*np.pi/180)
print(f'Selecting all vertices with x<{xPlane} and x>{-xPlane}')
dataset = np.vstack((dataset, meres[np.logical_and(
np.all([ meres[:,0]<xPlane], axis=0),
np.all([ meres[:,0]>-xPlane], axis=0)
)]))
print(f'now {dataset.shape} vertices')
# select meres vertices near y==0 plane
yPlane = np.tan(yPlaneHalfAngle*np.pi/180)
print(f'Selecting all vertices with y<{yPlane} and y>{-yPlane}')
dataset = np.vstack((dataset, meres[np.logical_and(
np.all([ meres[:,1]<yPlane], axis=0),
np.all([ meres[:,1]>-yPlane], axis=0)
)]))
print(f'now {dataset.shape} vertices')
# select meres vertices near z==0 plane
zPlane = np.tan(zPlaneHalfAngle*np.pi/180)
print(f'Selecting all vertices with z<{zPlane} and z>{-zPlane}')
dataset = np.vstack((dataset, meres[np.logical_and(
np.all([ meres[:,2]<zPlane], axis=0),
np.all([ meres[:,2]>-zPlane], axis=0)
)]))
print(f'now {dataset.shape} vertices')
np.savetxt(filedataset, dataset, delimiter = ' ')
| {
"alphanum_fraction": 0.6494252874,
"author": null,
"avg_line_length": 35.5909090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f8a885e5aad084cf5f333bd5d4656b25a54d4439",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 28,
"max_forks_repo_forks_event_max_datetime": "2021-08-19T08:59:30.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-18T09:08:13.000Z",
"max_forks_repo_head_hexsha": "2069f3b9a046888f449980de7408ecbc59931b13",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "grosskc/pyradi",
"max_forks_repo_path": "pyradi/data/plotspherical/composite-geodesic.py",
"max_issues_count": 14,
"max_issues_repo_head_hexsha": "2069f3b9a046888f449980de7408ecbc59931b13",
"max_issues_repo_issues_event_max_datetime": "2021-07-13T21:54:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-02-06T04:55:13.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "grosskc/pyradi",
"max_issues_repo_path": "pyradi/data/plotspherical/composite-geodesic.py",
"max_line_length": 76,
"max_stars_count": 43,
"max_stars_repo_head_hexsha": "2069f3b9a046888f449980de7408ecbc59931b13",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "grosskc/pyradi",
"max_stars_repo_path": "pyradi/data/plotspherical/composite-geodesic.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-27T16:31:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-03-18T09:11:53.000Z",
"num_tokens": 879,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3132
} |
#!/usr/bin/env python
"""Compute summary statistics of data files.
Computes summary statistics of data files such as the number of samples or the
mean and variance of output variables.
Examples
--------
.. code:: bash
dcpg_data_stats.py
./data/*.h5
"""
from __future__ import print_function
from __future__ import division
from collections import OrderedDict
import os
import sys
import argparse
import logging
import numpy as np
import pandas as pd
import seaborn as sns
import six
from deepcpg import data as dat
from deepcpg.data import hdf
def get_output_stats(output):
stats = OrderedDict()
output = np.ma.masked_values(output, dat.CPG_NAN)
stats['nb_tot'] = len(output)
stats['nb_obs'] = np.sum(output != dat.CPG_NAN)
stats['frac_obs'] = stats['nb_obs'] / stats['nb_tot']
stats['mean'] = float(np.mean(output))
stats['var'] = float(np.var(output))
return stats
def plot_stats(stats):
stats = stats.sort_values('frac_obs', ascending=False)
stats = pd.melt(stats, id_vars=['output'], var_name='metric')
# stats = stats.loc[stats.metric.isin(['frac_obs', 'frac_one'])]
# stats.metric = stats.metric.str.replace('frac_obs', 'cov')
# stats.metric = stats.metric.str.replace('frac_one', 'met')
grid = sns.FacetGrid(data=stats, col='metric', sharex=False)
grid.map(sns.barplot, 'value', 'output')
for ax in grid.axes.ravel():
ax.set(xlabel='', ylabel='')
return grid
class App(object):
def run(self, args):
name = os.path.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Computes data statistics')
p.add_argument(
'data_files',
nargs='+',
help='Data files')
p.add_argument(
'-o', '--out_tsv',
help='Write statistics to tsv file')
p.add_argument(
'-f', '--out_fig',
help='Create output figure')
p.add_argument(
'--output_names',
help='List of regex to filter outputs',
nargs='+')
p.add_argument(
'--nb_sample',
help='Maximum number of samples',
type=int)
p.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
p.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
output_names = dat.get_output_names(opts.data_files[0],
regex=opts.output_names)
stats = OrderedDict()
for name in output_names:
output = hdf.read(opts.data_files, 'outputs/%s' % name,
nb_sample=opts.nb_sample)
output = list(output.values())[0]
stats[name] = get_output_stats(output)
tmp = []
for key, value in six.iteritems(stats):
tmp.append(pd.DataFrame(value, index=[key]))
stats = pd.concat(tmp)
stats.index.name = 'output'
stats.reset_index(inplace=True)
print(stats.to_string())
if opts.out_tsv:
stats.to_csv(opts.out_tsv, sep='\t', index=False)
if opts.out_fig:
plot_stats(stats).savefig(opts.out_fig)
return 0
if __name__ == '__main__':
app = App()
app.run(sys.argv)
| {
"alphanum_fraction": 0.5888580792,
"author": null,
"avg_line_length": 28.9562043796,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5c17cab2dc462fdee6d07f64d8f00b8a8fdc4405",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 70,
"max_forks_repo_forks_event_max_datetime": "2021-08-30T02:28:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-05-31T14:25:14.000Z",
"max_forks_repo_head_hexsha": "7f58da5423121168edabb27202c234df0f0e460d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cangermueller/deepcpg2",
"max_forks_repo_path": "scripts/dcpg_data_stats.py",
"max_issues_count": 41,
"max_issues_repo_head_hexsha": "7f58da5423121168edabb27202c234df0f0e460d",
"max_issues_repo_issues_event_max_datetime": "2021-05-31T10:15:18.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-08-02T17:18:00.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cangermueller/deepcpg2",
"max_issues_repo_path": "scripts/dcpg_data_stats.py",
"max_line_length": 78,
"max_stars_count": 151,
"max_stars_repo_head_hexsha": "7f58da5423121168edabb27202c234df0f0e460d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cangermueller/deepcpg2",
"max_stars_repo_path": "scripts/dcpg_data_stats.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T08:23:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-30T07:05:11.000Z",
"num_tokens": 868,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3967
} |
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 14:23:18 2020
@author: Thaileng_GIC
"""
#import csv
import pandas as pd
#import numpy as np
season = ['-dry-2017', '-wet-2017', '-dry-2018', '-wet-2018']
for b in season:
for a in range (1,2):
data1=pd.read_csv('../Extract_CSV/Parameterization Results/wrf-para-'+str(a)+'/wrf-para-'+str(a)+str(b)+'-rainc.csv', sep=',')
data2=pd.read_csv('../Extract_CSV/Parameterization Results/wrf-para-'+str(a)+'/wrf-para-'+str(a)+str(b)+'-rainnc.csv', sep=',')
#print(data2.head())
#stations
Attapeu=[]
Borkeo=[]
Louangnumtha=[]
Louangphabang=[]
Mkham=[]
Oudomxai=[]
Pakse=[]
Paksong=[]
Paksan=[]
Phonhong=[]
Salavanh=[]
Savannakhet=[]
Sayabouli=[]
Thakhek=[]
Sekong=[]
Phongsaly=[]
Samnuea=[]
Vientiane=[]
Xiengkhouang=[]
#rainfall
rainc=data1['RAINNC (mm)']
rainnc=data2['RAINNC (mm)']
rainfall = data1['RAINNC (mm)'] + data2['RAINNC (mm)']
utc=data1['TIME']
#number of days
days=int(len(utc)/19)
#get timestamps
timestamps=[]
for x in range (days):
timestamp=utc[19*x]
timestamps.append(timestamp)
#get accumulation data
for x in range (days):
Attapeu.append(rainfall[0+(x*19)])
Borkeo.append(rainfall[1+(x*19)])
Louangnumtha.append(rainfall[2+(x*19)])
Louangphabang.append(rainfall[3+(x*19)])
Mkham.append(rainfall[4+(x*19)])
Oudomxai.append(rainfall[5+(x*19)])
Pakse.append(rainfall[6+(x*19)])
Paksong.append(rainfall[7+(x*19)])
Paksan.append(rainfall[8+(x*19)])
Phonhong.append(rainfall[9+(x*19)])
Salavanh.append(rainfall[10+(x*19)])
Savannakhet.append(rainfall[11+(x*19)])
Sayabouli.append(rainfall[12+(x*19)])
Thakhek.append(rainfall[13+(x*19)])
Sekong.append(rainfall[14+(x*19)])
Phongsaly.append(rainfall[15+(x*19)])
Samnuea.append(rainfall[16+(x*19)])
Vientiane.append(rainfall[17+(x*19)])
Xiengkhouang.append(rainfall[18+(x*19)])
#define variables for each station
rainfall_daily_Attapeu=[]
rainfall_daily_Borkeo=[]
rainfall_daily_Louangnumtha=[]
rainfall_daily_Louangphabang=[]
rainfall_daily_Mkham=[]
rainfall_daily_Oudomxai=[]
rainfall_daily_Pakse=[]
rainfall_daily_Paksong=[]
rainfall_daily_Paksan=[]
rainfall_daily_Phonhong=[]
rainfall_daily_Salavanh=[]
rainfall_daily_Savannakhet=[]
rainfall_daily_Sayabouli=[]
rainfall_daily_Thakhek=[]
rainfall_daily_Sekong=[]
rainfall_daily_Phongsaly=[]
rainfall_daily_Samnuea=[]
rainfall_daily_Vientiane=[]
rainfall_daily_Xiengkhouang=[]
#calculate daily rainfall values for each station
for y in range (len(Attapeu)):
if y==0:
rainfall_daily=(Attapeu[y])
rainfall_daily_Attapeu.append(rainfall_daily)
else:
rainfall_daily=(Attapeu[y]- Attapeu[y-1])
rainfall_daily_Attapeu.append(rainfall_daily)
for y in range (len(Borkeo)):
if y==0:
rainfall_daily=(Borkeo[y])
rainfall_daily_Borkeo.append(rainfall_daily)
else:
rainfall_daily=(Borkeo[y]- Borkeo[y-1])
rainfall_daily_Borkeo.append(rainfall_daily)
for y in range (len(Louangnumtha)):
if y==0:
rainfall_daily=(Louangnumtha[y])
rainfall_daily_Louangnumtha.append(rainfall_daily)
else:
rainfall_daily=(Louangnumtha[y]- Louangnumtha[y-1])
rainfall_daily_Louangnumtha.append(rainfall_daily)
for y in range (len(Louangphabang)):
if y==0:
rainfall_daily=(Louangphabang[y])
rainfall_daily_Louangphabang.append(rainfall_daily)
else:
rainfall_daily=(Louangphabang[y]- Louangphabang[y-1])
rainfall_daily_Louangphabang.append(rainfall_daily)
for y in range (len(Mkham)):
if y==0:
rainfall_daily=(Mkham[y])
rainfall_daily_Mkham.append(rainfall_daily)
else:
rainfall_daily=(Mkham[y]- Mkham[y-1])
rainfall_daily_Mkham.append(rainfall_daily)
for y in range (len(Oudomxai)):
if y==0:
rainfall_daily=(Oudomxai[y])
rainfall_daily_Oudomxai.append(rainfall_daily)
else:
rainfall_daily=(Oudomxai[y]- Oudomxai[y-1])
rainfall_daily_Oudomxai.append(rainfall_daily)
for y in range (len(Pakse)):
if y==0:
rainfall_daily=(Pakse[y])
rainfall_daily_Pakse.append(rainfall_daily)
else:
rainfall_daily=(Pakse[y]- Pakse[y-1])
rainfall_daily_Pakse.append(rainfall_daily)
for y in range (len(Paksong)):
if y==0:
rainfall_daily=(Paksong[y])
rainfall_daily_Paksong.append(rainfall_daily)
else:
rainfall_daily=(Paksong[y]- Paksong[y-1])
rainfall_daily_Paksong.append(rainfall_daily)
for y in range (len(Paksan)):
if y==0:
rainfall_daily=(Paksan[y])
rainfall_daily_Paksan.append(rainfall_daily)
else:
rainfall_daily=(Paksan[y]- Paksan[y-1])
rainfall_daily_Paksan.append(rainfall_daily)
for y in range (len(Phonhong)):
if y==0:
rainfall_daily=(Phonhong[y])
rainfall_daily_Phonhong.append(rainfall_daily)
else:
rainfall_daily=(Phonhong[y]- Phonhong[y-1])
rainfall_daily_Phonhong.append(rainfall_daily)
for y in range (len(Salavanh)):
if y==0:
rainfall_daily=(Salavanh[y])
rainfall_daily_Salavanh.append(rainfall_daily)
else:
rainfall_daily=(Salavanh[y]- Salavanh[y-1])
rainfall_daily_Salavanh.append(rainfall_daily)
for y in range (len(Savannakhet)):
if y==0:
rainfall_daily=(Savannakhet[y])
rainfall_daily_Savannakhet.append(rainfall_daily)
else:
rainfall_daily=(Savannakhet[y]- Savannakhet[y-1])
rainfall_daily_Savannakhet.append(rainfall_daily)
for y in range (len(Sayabouli)):
if y==0:
rainfall_daily=(Sayabouli[y])
rainfall_daily_Sayabouli.append(rainfall_daily)
else:
rainfall_daily=(Sayabouli[y]- Sayabouli[y-1])
rainfall_daily_Sayabouli.append(rainfall_daily)
for y in range (len(Thakhek)):
if y==0:
rainfall_daily=(Thakhek[y])
rainfall_daily_Thakhek.append(rainfall_daily)
else:
rainfall_daily=(Thakhek[y]- Thakhek[y-1])
rainfall_daily_Thakhek.append(rainfall_daily)
for y in range (len(Sekong)):
if y==0:
rainfall_daily=(Sekong[y])
rainfall_daily_Sekong.append(rainfall_daily)
else:
rainfall_daily=(Sekong[y]- Sekong[y-1])
rainfall_daily_Sekong.append(rainfall_daily)
for y in range (len(Phongsaly)):
if y==0:
rainfall_daily=(Phongsaly[y])
rainfall_daily_Phongsaly.append(rainfall_daily)
else:
rainfall_daily=(Phongsaly[y]- Phongsaly[y-1])
rainfall_daily_Phongsaly.append(rainfall_daily)
for y in range (len(Samnuea)):
if y==0:
rainfall_daily=(Samnuea[y])
rainfall_daily_Samnuea.append(rainfall_daily)
else:
rainfall_daily=(Samnuea[y]- Samnuea[y-1])
rainfall_daily_Samnuea.append(rainfall_daily)
for y in range (len(Vientiane)):
if y==0:
rainfall_daily=(Vientiane[y])
rainfall_daily_Vientiane.append(rainfall_daily)
else:
rainfall_daily=(Vientiane[y]- Vientiane[y-1])
rainfall_daily_Vientiane.append(rainfall_daily)
for y in range (len(Xiengkhouang)):
if y==0:
rainfall_daily=(Xiengkhouang[y])
rainfall_daily_Xiengkhouang.append(rainfall_daily)
else:
rainfall_daily=(Xiengkhouang[y]- Xiengkhouang[y-1])
rainfall_daily_Xiengkhouang.append(rainfall_daily)
#setup the final data frame to print csv
df=pd.DataFrame(list(zip(*[timestamps,rainfall_daily_Attapeu,rainfall_daily_Borkeo,rainfall_daily_Louangnumtha,rainfall_daily_Louangphabang,rainfall_daily_Mkham,rainfall_daily_Oudomxai,rainfall_daily_Pakse,rainfall_daily_Paksong,rainfall_daily_Paksan,rainfall_daily_Phonhong,rainfall_daily_Salavanh,rainfall_daily_Savannakhet,rainfall_daily_Sayabouli,rainfall_daily_Thakhek,rainfall_daily_Sekong,rainfall_daily_Phongsaly,rainfall_daily_Samnuea,rainfall_daily_Vientiane,rainfall_daily_Xiengkhouang])),columns=['Date','Attapeu','Borkeo','Louangnumtha','Louangphabang','Mkham','Oudomxai','Pakse','Paksong','Paksan','Phonhong','Salavanh','Savannakhet','Sayabouli','Thakhek','Sekong','Phongsaly','Samnuea','Vientiane','Xiengkhouang'])
#df.to_csv('../Extract_CSV/wrf_para_'+str(a)+str(b)+'.csv')
station = ['Attapeu','Borkeo','Louangnumtha','Louangphabang','Mkham','Oudomxai','Pakse','Paksong','Paksan','Phonhong','Salavanh','Savannakhet','Sayabouli','Thakhek','Sekong','Phongsaly','Samnuea','Vientiane','Xiengkhouang']
for st in station:
df.append(df[st])
dfs = ['Attapeu':df['Attapeu'],'Borkeo':df'Borkeo'omxai','Pakse','Paksong','Paksan','Phonhong','Salavanh','Savannakhet','Sayabouli','Thakhek','Sekong','Phongsaly','Samnuea','Vientiane','Xiengkhouang']
| {
"alphanum_fraction": 0.5539443684,
"author": null,
"avg_line_length": 41.5340909091,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "edf9689dfc49eed900a9c07dfccef04c49d76884",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3f9ff6249e621b596e62a208937acd1108a59c87",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "thailengthol/WRF_PostPrcocessing",
"max_forks_repo_path": "code/graph&stat2.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3f9ff6249e621b596e62a208937acd1108a59c87",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "thailengthol/WRF_PostPrcocessing",
"max_issues_repo_path": "code/graph&stat2.py",
"max_line_length": 738,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "3f9ff6249e621b596e62a208937acd1108a59c87",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "thailengthol/WRF_PostPrcocessing",
"max_stars_repo_path": "code/graph&stat2.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-29T10:30:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-29T10:30:37.000Z",
"num_tokens": 2804,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10965
} |
if (!interactive())
{
suppressMessages(getPass:::readline_nomask("asdf", noblank=FALSE))
}
| {
"alphanum_fraction": 0.7204301075,
"author": null,
"avg_line_length": 18.6,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "2191a5571a347923ff2871bfcf2bd457d7819445",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2017-03-06T12:50:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-02-21T01:36:07.000Z",
"max_forks_repo_head_hexsha": "cbfefb6b77b701c9413f0961a6812c7266b3386f",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "snoweye/getPass",
"max_forks_repo_path": "tests/readline_nomask.r",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "cbfefb6b77b701c9413f0961a6812c7266b3386f",
"max_issues_repo_issues_event_max_datetime": "2020-09-10T17:10:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-02-19T22:47:38.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "snoweye/getPass",
"max_issues_repo_path": "tests/readline_nomask.r",
"max_line_length": 68,
"max_stars_count": 47,
"max_stars_repo_head_hexsha": "cbfefb6b77b701c9413f0961a6812c7266b3386f",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "snoweye/getPass",
"max_stars_repo_path": "tests/readline_nomask.r",
"max_stars_repo_stars_event_max_datetime": "2021-08-25T12:12:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-02-19T22:58:21.000Z",
"num_tokens": 26,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 93
} |
module multibox
use fft_work, only: fft_type
implicit none
public :: read_multibox_parameters
public :: init_multibox
public :: finish_multibox
public :: multibox_communicate
public :: init_mb_get_phi
public :: mb_get_phi
public :: communicate_multibox_parameters
public :: add_multibox_krook
public :: boundary_size
public :: bs_fullgrid
public :: xL, xR
public :: rhoL, rhoR
public :: kx0_L, kx0_R
public :: RK_step, comm_at_init
public :: include_multibox_krook
public :: phi_buffer0, phi_buffer1
private
complex, dimension (:), allocatable :: g_buffer0, g_buffer1, phi_buffer0, phi_buffer1
complex, dimension (:), allocatable :: fsa_x
real, dimension (:), allocatable :: copy_mask_left, copy_mask_right
real, dimension (:), allocatable :: krook_mask_left, krook_mask_right
real, dimension (:), allocatable :: krook_fac
real, dimension (:), allocatable :: b_mat
real, dimension(:), allocatable :: x_mb, rho_mb, rho_mb_clamped
real :: dx_mb
complex, dimension (:,:), allocatable :: fft_kxky, fft_xky
real, dimension (:,:), allocatable :: fft_xy
! for the unpadded FFTs
type (fft_type) :: yf_fft, yb_fft
type (fft_type) :: xf_fft, xb_fft
complex, dimension (:), allocatable :: fft_x_k, fft_x_x
complex, dimension (:), allocatable :: fft_y_k
real, dimension (:), allocatable :: fft_y_y
logical :: mb_transforms_initialized = .false.
logical :: get_phi_initialized = .false.
integer :: temp_ind = 0
integer :: bs_fullgrid
integer :: mb_debug_step
integer :: x_fft_size
integer :: phi_bound, phi_pow
real :: xL = 0., xR = 0.
real :: rhoL = 0., rhoR = 0.
real :: kx0_L, kx0_R
!real :: efac_l, efacp_l
integer :: boundary_size, krook_size
real :: nu_krook_mb, krook_exponent
logical :: smooth_ZFs
logical :: RK_step, include_multibox_krook, comm_at_init
integer :: krook_option_switch
integer, parameter:: krook_option_default = 0, &
krook_option_linear = 0, &
krook_option_exp = 1, &
krook_option_exp_rev = 2
integer:: mb_zf_option_switch
integer, parameter :: mb_zf_option_default = 0, &
mb_zf_option_no_ky0 = 1, &
mb_zf_option_no_fsa = 2
integer :: LR_debug_switch
integer, parameter:: LR_debug_option_default = 0, &
LR_debug_option_L = 1, &
LR_debug_option_R = 2
contains
subroutine read_multibox_parameters
use file_utils, only: input_unit_exist, error_unit
use file_utils, only: runtype_option_switch, runtype_multibox
use text_options, only: text_option, get_option_value
use mp, only: broadcast, proc0
use kt_grids, only: nx, nakx
use job_manage, only: njobs
use mp, only: scope, crossdomprocs, subprocs, &
send, receive, job
implicit none
integer :: in_file, ierr
integer :: nakxl, nxl, nakxr, nxr, fac
logical exist
type (text_option), dimension (4), parameter :: krook_opts = &
(/ text_option('default', krook_option_default), &
text_option('linear', krook_option_linear) , &
text_option('exp', krook_option_exp) , &
text_option('exp_reverse', krook_option_exp_rev)/)
type (text_option), dimension (3), parameter :: mb_zf_opts = &
(/ text_option('default', mb_zf_option_default), &
text_option('no_ky0', mb_zf_option_no_ky0) , &
text_option('no_fsa', mb_zf_option_no_fsa)/)
type (text_option), dimension (3), parameter :: LR_db_opts = &
(/ text_option('default', LR_debug_option_default), &
text_option('L', LR_debug_option_L) , &
text_option('R', LR_debug_option_R)/)
character(30) :: zf_option, krook_option, LR_debug_option
namelist /multibox_parameters/ boundary_size, krook_size, &
smooth_ZFs, zf_option, LR_debug_option, &
krook_option, RK_step, nu_krook_mb, &
mb_debug_step, krook_exponent, comm_at_init, &
phi_bound, phi_pow
if(runtype_option_switch /= runtype_multibox) return
boundary_size = 4
krook_size = 0
phi_bound = 0
phi_pow = 0
krook_exponent = 0.0
nu_krook_mb = 0.0
mb_debug_step = 1000
smooth_ZFs = .false.
comm_at_init = .false.
RK_step = .false.
zf_option = 'default'
krook_option = 'default'
LR_debug_option = 'default'
if (proc0) then
in_file = input_unit_exist("multibox_parameters", exist)
if (exist) read (in_file, nml=multibox_parameters)
ierr = error_unit()
call get_option_value &
(krook_option, krook_opts, krook_option_switch, &
ierr, "krook_option in multibox_parameters")
call get_option_value &
(zf_option, mb_zf_opts, mb_zf_option_switch, &
ierr, "zf_option in multibox_parameters")
call get_option_value &
(LR_debug_option, LR_db_opts, LR_debug_switch, &
ierr, "LR_debug_option in multibox_parameters")
if(krook_size > boundary_size) krook_size = boundary_size
endif
call broadcast(boundary_size)
call broadcast(krook_size)
call broadcast(nu_krook_mb)
call broadcast(smooth_ZFs)
call broadcast(mb_zf_option_switch)
call broadcast(krook_option_switch)
call broadcast(krook_exponent)
call broadcast(LR_debug_switch)
call broadcast(RK_step)
call broadcast(mb_debug_step)
call broadcast(comm_at_init)
call broadcast(phi_bound)
call broadcast(phi_pow)
call scope(crossdomprocs)
if(job==1) then
call receive(nakxl,0)
call receive(nxl ,0)
call receive(nakxr,njobs-1)
call receive(nxr ,njobs-1)
! the following assumes nx in the center domain is some
! integer multiple of nx in the left or right domain.
! Also assumes dx is the same in every domain, which should
! be the case
fac=nx/nxl
x_fft_size=nakxl*fac
else
call send(nakx,1)
call send(nx,1)
x_fft_size = nakx
endif
call scope(subprocs)
if(abs(nu_krook_mb) > epsilon(0.0)) then
include_multibox_krook = .true.
endif
end subroutine read_multibox_parameters
subroutine init_multibox
use constants, only: pi
use stella_layouts, only: vmu_lo
use stella_geometry, only: geo_surf, q_as_x, get_x_to_rho
use stella_geometry, only: drhodpsi, dxdXcoord
use zgrid, only: nzgrid, ntubes
use kt_grids, only: nakx,naky, akx, aky, nx,x, x_d, x0
use kt_grids, only: centered_in_rho, rho_clamped, rho_d_clamped
use kt_grids, only: periodic_variation
use file_utils, only: runtype_option_switch, runtype_multibox
use job_manage, only: njobs
use physics_parameters, only: rhostar
use mp, only: scope, crossdomprocs, subprocs, &
send, receive, job
implicit none
integer :: g_buff_size
integer :: phi_buff_size
integer :: i, pfac
real, dimension (:), allocatable :: x_clamped, x_d_clamped, x_mb_clamped
real :: db, x_shift, dqdrho
if(runtype_option_switch /= runtype_multibox) return
bs_fullgrid = nint((3.0*boundary_size)/2.0)
pfac = 1
if (periodic_variation) pfac = 2
phi_buff_size = pfac*boundary_size*naky*ntubes*(2*nzgrid+1)
g_buff_size = phi_buff_size*(vmu_lo%ulim_alloc-vmu_lo%llim_proc+1)
if (.not.allocated(g_buffer0)) allocate(g_buffer0(g_buff_size))
if (.not.allocated(g_buffer1)) allocate(g_buffer1(g_buff_size))
if (.not.allocated(phi_buffer0)) allocate(phi_buffer0(phi_buff_size))
if (.not.allocated(phi_buffer1)) allocate(phi_buffer1(phi_buff_size))
if (.not.allocated(fsa_x) .and. (mb_zf_option_switch.eq.mb_zf_option_no_fsa)) then
allocate(fsa_x(nakx)); fsa_x=0.0
endif
if (.not.allocated(copy_mask_left)) allocate(copy_mask_left(pfac*boundary_size)); copy_mask_left =1.0
if (.not.allocated(copy_mask_right)) allocate(copy_mask_right(pfac*boundary_size)); copy_mask_right=1.0
if (.not.allocated(krook_mask_left)) allocate(krook_mask_left(pfac*boundary_size)); krook_mask_left =0.0
if (.not.allocated(krook_mask_right)) allocate(krook_mask_right(pfac*boundary_size)); krook_mask_right=0.0
if (krook_size .gt. 0) then
select case (krook_option_switch)
case (krook_option_linear)
db = 1.0/krook_size
do i = 1, krook_size
krook_mask_right(i) = i*db
copy_mask_right(i) = 0.0
enddo
case (krook_option_exp)
db = 3.0/krook_size
do i = 1, krook_size
krook_mask_right(i) = 1.0-(1.0-exp(-(krook_size-i)*db))/(1.0-exp(-3.0))
copy_mask_right(i) = 0.0
enddo
case (krook_option_exp_rev)
db = 3.0/krook_size
do i = 1, krook_size
krook_mask_right(i) = (1.0-exp(-i*db))/(1.0-exp(-3.0))
copy_mask_right(i) = 0.0
enddo
end select
end if
if (periodic_variation) then
do i = 1, boundary_size
copy_mask_right(i+boundary_size) = copy_mask_right(boundary_size - i + 1)
krook_mask_right(i+boundary_size) = krook_mask_right(boundary_size - i + 1)
enddo
copy_mask_left = copy_mask_right
krook_mask_left = krook_mask_right
else
do i = 1, boundary_size
copy_mask_left(i) = copy_mask_right(boundary_size - i + 1)
krook_mask_left(i) = krook_mask_right(boundary_size - i + 1)
enddo
endif
if(.not.allocated(krook_fac)) allocate (krook_fac(naky))
krook_fac = 1.0
do i = 2, naky
krook_fac(i) = (aky(i)/aky(2))**krook_exponent
enddo
#ifdef MPI
call scope(crossdomprocs)
if(.not.allocated(x_mb)) allocate(x_mb(x_fft_size))
if(.not.allocated(rho_mb)) allocate(rho_mb(x_fft_size))
if(.not.allocated(rho_mb_clamped)) allocate(rho_mb_clamped(x_fft_size))
dx_mb = (2*pi*x0)/x_fft_size
if(job==1) then
x_shift = pi*x0
if(centered_in_rho) then
if(q_as_x) then
dqdrho = geo_surf%shat*geo_surf%qinp/geo_surf%rhoc
x_shift = pi*x0*(1.0 &
- 0.5*rhostar*pi*x0*geo_surf%d2qdr2/(pfac*dqdrho**2*dxdXcoord))
else
x_shift = pi*x0*(1.0 &
- 0.5*rhostar*pi*x0*geo_surf%d2psidr2*drhodpsi**2/(pfac*dxdXcoord))
endif
endif
do i = 1, x_fft_size
if (periodic_variation) then
if (i.le.(x_fft_size/2)) then
x_mb(i) = (i-1)*dx_mb - 0.5*x_shift
else
x_mb(i) = x_mb(x_fft_size-i+1)
endif
else
x_mb(i) = (i-0.5)*dx_mb - x_shift
endif
enddo
call get_x_to_rho(1, x_mb, rho_mb)
xL = x_mb(boundary_size)
xR = x_mb(x_fft_size/pfac-boundary_size+1)
rhoL = rho_mb(boundary_size)
rhoR = rho_mb(x_fft_size/pfac-boundary_size+1)
allocate(x_clamped(nx))
allocate(x_d_clamped(nakx))
allocate(x_mb_clamped(x_fft_size))
if(LR_debug_switch == LR_debug_option_L) then
x_clamped = xL
x_d_clamped = xL
x_mb_clamped = xL
else if(LR_debug_switch == LR_debug_option_R) then
x_clamped = xR
x_d_clamped = xR
x_mb_clamped = xR
else
x_clamped = x
x_d_clamped = x_d
x_mb_clamped = x_mb
do i = 1, nx
if(x_clamped(i) < xL) x_clamped(i) = xL
if(x_clamped(i) > xR) x_clamped(i) = xR
enddo
do i = 1, x_fft_size
if(x_mb_clamped(i) < xL) x_mb_clamped(i) = xL
if(x_mb_clamped(i) > xR) x_mb_clamped(i) = xR
enddo
do i = 1, nakx
if(x_d_clamped(i) < xL) x_d_clamped(i) = xL
if(x_d_clamped(i) > xR) x_d_clamped(i) = xR
enddo
endif
call get_x_to_rho(1, x_clamped, rho_clamped)
call get_x_to_rho(1, x_d_clamped, rho_d_clamped)
call get_x_to_rho(1, x_mb_clamped, rho_mb_clamped)
deallocate (x_clamped, x_d_clamped)
elseif(job==0) then
do i = 1, x_fft_size
x_mb(i) = (i-1)*dx_mb
enddo
call receive(xL,1)
call send(akx(2),1)
elseif(job==njobs-1) then
do i = 1, x_fft_size
x_mb(i) = (i-1)*dx_mb
enddo
call receive(xR,1)
call send(akx(2),1)
endif
call scope(subprocs)
#endif
call init_mb_transforms
end subroutine init_multibox
subroutine communicate_multibox_parameters
use job_manage, only: njobs
use mp, only: scope, crossdomprocs, subprocs, &
send, receive, job
implicit none
#ifdef MPI
if(job==1) then
call scope(crossdomprocs)
call send(xL ,0)
call send(xR ,njobs-1)
call receive(kx0_L,0)
call receive(kx0_R,njobs-1)
call scope(subprocs)
endif
#endif
end subroutine communicate_multibox_parameters
subroutine finish_multibox
implicit none
if (allocated(g_buffer0)) deallocate (g_buffer0)
if (allocated(g_buffer1)) deallocate (g_buffer1)
if (allocated(phi_buffer0)) deallocate (phi_buffer0)
if (allocated(phi_buffer1)) deallocate (phi_buffer1)
if (allocated(fsa_x)) deallocate (fsa_x)
if (allocated(copy_mask_left)) deallocate (copy_mask_left)
if (allocated(copy_mask_right)) deallocate (copy_mask_right)
if (allocated(krook_mask_left)) deallocate (krook_mask_left)
if (allocated(krook_mask_right)) deallocate (krook_mask_right)
if (allocated(krook_fac)) deallocate (krook_fac)
if (allocated(fft_kxky)) deallocate (fft_kxky)
if (allocated(fft_xky)) deallocate (fft_xky)
if (allocated(fft_xy)) deallocate (fft_xy)
if (allocated(x_mb)) deallocate (x_mb)
if (allocated(rho_mb)) deallocate (rho_mb)
if (allocated(rho_mb_clamped)) deallocate (rho_mb_clamped)
call finish_mb_transforms
end subroutine finish_multibox
subroutine multibox_communicate (gin)
use constants, only: zi
use kt_grids, only: nakx,naky,naky_all, akx, aky, nx,ny,dx,dy, zonal_mode
use kt_grids, only: periodic_variation
use file_utils, only: runtype_option_switch, runtype_multibox
use file_utils, only: get_unused_unit
use fields_arrays, only: phi, phi_corr_QN, shift_state
use job_manage, only: njobs
use physics_flags, only: radial_variation,prp_shear_enabled, hammett_flow_shear
use physics_parameters, only: g_exb, g_exbfac
use stella_layouts, only: vmu_lo
use stella_geometry, only: dl_over_b
use zgrid, only: nzgrid
use mp, only: job, scope, mp_abort, &
crossdomprocs, subprocs, allprocs, &
send, receive, proc0
implicit none
integer :: num,ia, ix,iix,iL,iR, iky,iz,it,iv
integer :: offset, offsetL, offsetR
integer :: ii,jj, temp_unit, pfac
real :: afacx, afacy
complex :: dzm,dzp
character(len=512) :: filename
complex, dimension (:,:), allocatable :: prefac
complex, dimension (:,:,-nzgrid:,:,vmu_lo%llim_proc:), intent (inout) :: gin
#ifndef MPI
return
#else
if(runtype_option_switch /= runtype_multibox) return
if(LR_debug_switch /= LR_debug_option_default) return
if(njobs /= 3) call mp_abort("Multibox only supports 3 domains at the moment.")
allocate (prefac(naky,x_fft_size)); prefac = 1.0
pfac = 1
if (periodic_variation) pfac = 2
if(prp_shear_enabled.and.hammett_flow_shear) then
prefac = exp(-zi*g_exb*g_exbfac*spread(x_mb,1,naky)*spread(aky*shift_state,2,x_fft_size))
endif
if(mod(temp_ind,mb_debug_step)==0 .and. proc0) then
! call get_unused_unit(temp_unit)
temp_unit=3023+job
afacx = real(nx)/real(x_fft_size)
afacy = real(ny)/real(2*naky-1)
fft_kxky=phi(:,:,0,1)
if(radial_variation) then
fft_kxky = fft_kxky + phi_corr_QN(:,:,0,1)
endif
call transform_kx2x(fft_kxky,fft_xky)
fft_xky=fft_xky*prefac
call transform_ky2y(fft_xky,fft_xy)
write (filename,"(A,I1,A,I0.6)") "phiout",job,"_",temp_ind
open (unit=temp_unit, file=filename, status="replace",&
action="write",form="unformatted",access="stream")
write (temp_unit) real(x_fft_size,4)
do ii=1,x_fft_size
write(temp_unit) real(afacx*dx*(ii-1),4)
enddo
do ii=1,naky_all
write (temp_unit) real(afacy*dy*(ii-1),4)
do jj=1,x_fft_size
write (temp_unit) real(fft_xy(ii,jj),4)
enddo
enddo
close (unit=temp_unit)
endif
! DSO - change communicator
call scope(crossdomprocs)
ia=1
if(job==0 .or. job==(njobs-1)) then
if (periodic_variation) then
offset=-boundary_size
else
offset=0;
! DSO the next line might seem backwards, but this makes it easier to stitch together imaages
! FLAG DSO - might do something weird with magnetic shear
if(job==njobs-1) offset=nakx-boundary_size
endif
!first g
num=1
do iv = vmu_lo%llim_proc, vmu_lo%ulim_proc
do it = 1, vmu_lo%ntubes
!this is where the FSA goes
if(zonal_mode(1) .and. mb_zf_option_switch .eq. mb_zf_option_no_fsa) then
do ix= 1,nakx
fsa_x(ix) = sum(dl_over_b(ia,:)*gin(1,ix,:,it,iv))
enddo
endif
do iz = -vmu_lo%nzgrid, vmu_lo%nzgrid
fft_kxky = gin(:,:,iz,it,iv)
if(zonal_mode(1)) then
if( mb_zf_option_switch .eq. mb_zf_option_no_ky0) then
fft_kxky(1,:) = 0.0
elseif(mb_zf_option_switch .eq. mb_zf_option_no_fsa) then
fft_kxky(1,:) = fft_kxky(1,:) - fsa_x
endif
endif
call transform_kx2x(fft_kxky,fft_xky)
fft_xky = fft_xky*prefac
do ix=1,pfac*boundary_size
iix=ix + offset
if(iix.le.0) iix = iix + nakx
do iky=1,naky
!DSO if in the future the grids can have different naky, one will
!have to divide by naky here, and multiply on the receiving end
g_buffer0(num) = fft_xky(iky,iix)
num=num+1
enddo
enddo
enddo
enddo
enddo
call send(g_buffer0,1,43 + job)
!now phi
num=1
do it = 1, vmu_lo%ntubes
!this is where the FSA goes
if(zonal_mode(1) .and. mb_zf_option_switch .eq. mb_zf_option_no_fsa) then
do ix= 1,nakx
fsa_x(ix) = sum(dl_over_b(ia,:)*phi(1,ix,:,it))
enddo
endif
do iz = -vmu_lo%nzgrid, vmu_lo%nzgrid
fft_kxky = spread((zi*akx)**phi_pow,1,naky)*phi(:,:,iz,it)
if(zonal_mode(1)) then
if( mb_zf_option_switch .eq. mb_zf_option_no_ky0) then
fft_kxky(1,:) = 0.0
elseif(mb_zf_option_switch .eq. mb_zf_option_no_fsa) then
fft_kxky(1,:) = fft_kxky(1,:) - fsa_x
endif
endif
call transform_kx2x(fft_kxky,fft_xky)
fft_xky = fft_xky*prefac
do iky=1,naky
do ix=1,pfac*boundary_size
iix=ix + offset
if(iix.le.0) iix = iix + nakx
!DSO if in the future the grids can have different naky, one will
!have to divide by naky here, and multiply on the receiving end
phi_buffer0(num) = fft_xky(iky,iix)
num=num+1
enddo
enddo
enddo
enddo
! DSO - send data
call send(phi_buffer0,1,143 + job)
else
offsetL = 0
offsetR = x_fft_size - boundary_size
if (periodic_variation) then
offsetL = -boundary_size
offsetR = x_fft_size/2 - boundary_size + 1
endif
! DSO - receive the data
! left
call receive(g_buffer0,0, 43)
call receive(phi_buffer0,0, 143)
! right
call receive(g_buffer1,njobs-1, 43+njobs-1)
call receive(phi_buffer1,njobs-1, 143+njobs-1)
num=1
do iv = vmu_lo%llim_proc, vmu_lo%ulim_proc
do it = 1, vmu_lo%ntubes
do iz = -vmu_lo%nzgrid, vmu_lo%nzgrid
call transform_kx2x(gin(:,:,iz,it,iv),fft_xky)
do ix=1,pfac*boundary_size
iL = ix + offsetL
iR = ix + offsetR
if (iL.le.0) iL = iL + x_fft_size
if (iR.le.0) iR = iR + x_fft_size
do iky=1,naky
fft_xky(iky,iL) = fft_xky(iky,iL)*(1-copy_mask_left(ix)) &
+ g_buffer0(num)*copy_mask_left(ix)
fft_xky(iky,iR) = fft_xky(iky,iR)*(1-copy_mask_right(ix)) &
+ g_buffer1(num)*copy_mask_right(ix)
num=num+1
enddo
enddo
if(smooth_ZFs) then
dzm = fft_xky(1,boundary_size+1) - fft_xky(1,boundary_size)
dzp = fft_xky(1,x_fft_size-boundary_size+1) - fft_xky(1,x_fft_size-boundary_size)
do ix=1,pfac*boundary_size
iL = ix + offsetL
iR = ix + offsetR
if (iL.le.0) iL = iL + x_fft_size
if (iR.le.0) iR = iR + x_fft_size
fft_xky(1,iL) = fft_xky(1,iL) + dzm
fft_xky(1,iR) = fft_xky(1,iR) - dzp
enddo
endif
if(zonal_mode(1)) fft_xky(1,:) = real(fft_xky(1,:))
call transform_x2kx(fft_xky,gin(:,:,iz,it,iv))
enddo
enddo
enddo
endif
! DSO - change communicator
call scope(subprocs)
temp_ind=temp_ind+1
deallocate (prefac)
#endif
end subroutine multibox_communicate
subroutine add_multibox_krook (g, rhs)
use stella_time, only: code_dt
use stella_layouts, only: vmu_lo
use kt_grids, only: nakx, naky, periodic_variation
use zgrid, only: nzgrid, ntubes
use mp, only: job
implicit none
integer :: iky, ix, iL, iR, iz, it, ivmu, num, offsetL, offsetR
integer :: pfac
complex, dimension (:,:,-nzgrid:,:,vmu_lo%llim_proc:), intent (in) :: g
complex, dimension (:,:,-nzgrid:,:,vmu_lo%llim_proc:), intent (in out) :: rhs
complex, allocatable, dimension (:,:) :: g0x, g0k
if(job /= 1) return
allocate (g0k(naky,nakx))
allocate (g0x(naky,x_fft_size))
if (periodic_variation) then
offsetL = -boundary_size
offsetR = x_fft_size/2 - boundary_size + 1
else
offsetL = 0
offsetR = x_fft_size - boundary_size
endif
pfac = 1
if (periodic_variation) pfac = 2
num=1
do ivmu = vmu_lo%llim_proc, vmu_lo%ulim_proc
do it = 1, ntubes
do iz = -nzgrid, nzgrid
g0x = 0.0
call transform_kx2x(g(:,:,iz,it,ivmu),fft_xky)
do ix=1,pfac*boundary_size
iL = ix + offsetL
iR = ix + offsetR
if (iL.le.0) iL = iL + x_fft_size
if (iR.le.0) iR = iR + x_fft_size
do iky=1,naky
g0x(iky,iL) = (fft_xky(iky,iL) - g_buffer0(num))*krook_mask_left(ix)
g0x(iky,iR) = (fft_xky(iky,iR) - g_buffer1(num))*krook_mask_right(ix)
num=num+1
enddo
enddo
call transform_x2kx(g0x,g0k)
rhs(:,:,iz,it,ivmu) = rhs(:,:,iz,it,ivmu) &
- code_dt*nu_krook_mb*spread(krook_fac,2,nakx)*g0k
enddo
enddo
enddo
deallocate(g0k,g0x)
end subroutine add_multibox_krook
!!
!!>DSO - The following subroutines solve for phi in the _physical_ region of space
!! It is done here because the radial grid may include an extra point
subroutine init_mb_get_phi(has_elec, adiabatic_elec,efac,efacp)
use kt_grids, only: nakx, naky
use zgrid, only: nzgrid
use physics_flags, only: radial_variation
use stella_geometry, only: dl_over_b, d_dl_over_b_drho
use run_parameters, only: ky_solve_radial
use fields_arrays, only: phi_solve, phizf_solve, gamtot, dgamtotdr
use linear_solve, only: lu_decomposition, lu_inverse
implicit none
logical, intent (in) :: has_elec, adiabatic_elec
real, intent (in) :: efac, efacp
integer :: ia, iz, iky, ikx, b_solve
real :: dum
complex, dimension (:,:), allocatable :: g0k, g1k, g0x, a_inv, a_fsa
if(.not.radial_variation) return
!this does not depend on the timestep, so only do once
if(get_phi_initialized) return
get_phi_initialized = .true.
!efac_l = efac
!efacp_l = efacp_l
ia = 1
b_solve = boundary_size - phi_bound
allocate (g0k(1,nakx))
allocate (g1k(1,nakx))
allocate (g0x(1,x_fft_size))
if(.not.allocated(phi_solve)) allocate(phi_solve(min(ky_solve_radial,naky),-nzgrid:nzgrid))
do iky = 1, min(ky_solve_radial,naky)
do iz = -nzgrid, nzgrid
if(.not.associated(phi_solve(iky,iz)%zloc)) &
allocate(phi_solve(iky,iz)%zloc(x_fft_size-2*b_solve,x_fft_size-2*b_solve))
if(.not.associated(phi_solve(iky,iz)%idx)) &
allocate(phi_solve(iky,iz)%idx(x_fft_size-2*b_solve))
phi_solve(iky,iz)%zloc = 0.0
phi_solve(iky,iz)%idx = 0
do ikx = 1+b_solve, x_fft_size-b_solve
g0x(1,:) = 0.0
g0x(1,ikx) = 1.0
call transform_x2kx(g0x,g0k)
g1k(1,:) = g0k(1,:)*gamtot(iky,:,iz)
call transform_kx2x (g1k,g0x)
!row column
phi_solve(iky,iz)%zloc(:,ikx-b_solve) = g0x(1,(1+b_solve):(x_fft_size-b_solve))
g1k(1,:) = g0k(1,:)*dgamtotdr(iky,:,iz)
call transform_kx2x (g1k,g0x)
g0x(1,:) = rho_mb_clamped*g0x(1,:)
phi_solve(iky,iz)%zloc(:,ikx-b_solve) = phi_solve(iky,iz)%zloc(:,ikx-b_solve) &
+ g0x(1,(1+b_solve):(x_fft_size-b_solve))
enddo
call lu_decomposition(phi_solve(iky,iz)%zloc, phi_solve(iky,iz)%idx, dum)
! call zgetrf(nakx,nakx,phi_solve(iky,iz)%zloc,nakx,phi_solve(iky,iz)%idx,info)
enddo
enddo
if (.not.has_elec) then
if(.not.allocated(b_mat)) allocate(b_mat(x_fft_size-2*b_solve)); b_mat = 0.0
do ikx = 1+b_solve, x_fft_size-b_solve
!row column
b_mat(ikx-b_solve) = efac + efacp*rho_mb_clamped(ikx)
enddo
endif
if (adiabatic_elec) then
allocate(a_inv(x_fft_size-2*b_solve,x_fft_size-2*b_solve))
allocate(a_fsa(x_fft_size-2*b_solve,x_fft_size-2*b_solve)); a_fsa = 0.0
if(.not.associated(phizf_solve%zloc)) &
allocate(phizf_solve%zloc(x_fft_size-2*b_solve,x_fft_size-2*b_solve));
phizf_solve%zloc = 0.0
if(.not.associated(phizf_solve%idx)) allocate(phizf_solve%idx(x_fft_size-2*b_solve));
!get inverse of A
do iz = -nzgrid, nzgrid
call lu_inverse(phi_solve(1,iz)%zloc, phi_solve(1,iz)%idx, a_inv)
!flux surface average it
do ikx = 1, x_fft_size-2*b_solve
a_fsa(:,ikx) = a_fsa(:,ikx) + (dl_over_b(ia,iz) + d_dl_over_b_drho(ia,iz) &
*rho_mb_clamped((1+b_solve):(x_fft_size-b_solve)))*a_inv(:,ikx)
enddo
enddo
! calculate I - <A^-1>B
do ikx = 1, x_fft_size-2*b_solve
phizf_solve%zloc(ikx,:) = -a_fsa(ikx,:)*b_mat
phizf_solve%zloc(ikx,ikx) = 1.0 + phizf_solve%zloc(ikx,ikx)
enddo
call lu_decomposition(phizf_solve%zloc,phizf_solve%idx, dum)
deallocate(a_inv,a_fsa)
endif
deallocate(g0k,g1k,g0x)
end subroutine init_mb_get_phi
subroutine mb_get_phi(phi,has_elec,adiabatic_elec)
use constants, only: zi
use kt_grids, only: akx, nakx, naky, zonal_mode
use zgrid, only: nzgrid, ntubes
use stella_geometry, only: dl_over_b, d_dl_over_b_drho
use run_parameters, only: ky_solve_radial
use fields_arrays, only: gamtot, dgamtotdr, phi_solve, phizf_solve
use linear_solve, only: lu_back_substitution
implicit none
complex, dimension (:,:,-nzgrid:,:), intent (in out) :: phi
logical, intent (in) :: has_elec, adiabatic_elec
integer :: ia, it, iz, ix, iky, ind, b_solve
complex, dimension (:,:), allocatable :: g0k, g1k, g0x, g1x, g0z
complex, dimension (:), allocatable :: g_fsa, pb_fsa
real :: tmp
ia = 1
b_solve = boundary_size - phi_bound
allocate (g0k(1,nakx))
allocate (g1k(1,nakx))
allocate (g0x(1,x_fft_size))
allocate (g1x(1,x_fft_size))
if(adiabatic_elec.and.zonal_mode(1)) then
allocate (g0z(x_fft_size,-nzgrid:nzgrid))
allocate (g_fsa(x_fft_size-2*b_solve))
allocate (pb_fsa(x_fft_size-2*b_solve))
endif
do it = 1, ntubes
if(adiabatic_elec.and.zonal_mode(1)) pb_fsa = 0.0
do iz = -nzgrid, nzgrid
do iky = 1, naky
if(iky > ky_solve_radial) then
phi(iky,:,iz,it) = phi(iky,:,iz,it)/gamtot(iky,:,iz)
else
g0x = 0.0
tmp = 0
ind = boundary_size*(iky-1 + naky*(iz+nzgrid + (2*nzgrid+1)*(it-1)))
do ix=1,b_solve
g0x(1,ix) = phi_buffer0(ind+ix)
g0x(1,x_fft_size+1-ix) = phi_buffer1(ind+boundary_size+1-ix)
enddo
if(iky.eq.1) tmp = sum(real(g0x(1,:)))
call transform_x2kx(g0x,g0k)
if(phi_pow.ne.0) then
g0k(1,:) = g0k(1,:)/((zi*akx)**phi_pow)
if(iky.eq.1) g0k(1,1) = 0.0
endif
g0x = 0.0
if(.not.has_elec.and.phi_pow.ne.0) then
g1k(1,:) = g0k(1,:)
call transform_kx2x (g1k,g0x)
g0x(1,(b_solve+1):(x_fft_size-b_solve)) = &
- g0x(1,(b_solve+1):(x_fft_size-b_solve))*b_mat
if(adiabatic_elec.and.iky.eq.1) then
pb_fsa = pb_fsa + (dl_over_b(ia,iz) + d_dl_over_b_drho(ia,iz) &
*rho_mb_clamped((1+b_solve):(x_fft_size-b_solve))) &
*g0x(1,(1+b_solve):(x_fft_size-b_solve))
endif
endif
g1k(1,:) = g0k(1,:)*gamtot(iky,:,iz)
call transform_kx2x (g1k,g1x)
g0x = g0x + g1x
g1k(1,:) = g0k(1,:)*dgamtotdr(iky,:,iz)
call transform_kx2x (g1k,g1x)
g1x(1,:) = rho_mb_clamped*g1x(1,:) + g0x(1,:)
g0k(1,:) = phi(iky,:,iz,it)
call transform_kx2x (g0k,g0x)
g0x = g0x - g1x
call lu_back_substitution(phi_solve(iky,iz)%zloc, phi_solve(iky,iz)%idx, &
g0x(1,(b_solve+1):(x_fft_size-b_solve)))
if(iky.eq.1) then
tmp = (tmp + &
sum(real(g0x(1,(b_solve+1):(x_fft_size-b_solve))))) &
/(2*b_solve)
endif
if(phi_pow.ne.0) then
do ix=1,b_solve
g0x(1,ix) = 0.0
g0x(1,x_fft_size+1-ix) = 0.0
enddo
call transform_x2kx (g0x,g0k)
g0k(1,:) = g0k(1,:)*(zi*akx)**phi_pow
call transform_kx2x (g0k,g0x)
endif
do ix=1,b_solve
g0x(1,ix) = phi_buffer0(ind+ix) !- tmp
g0x(1,x_fft_size+1-ix) = phi_buffer1(ind+boundary_size+1-ix) !- tmp
enddo
call transform_x2kx(g0x,g0k)
if(phi_pow.ne.0) then
g0k(1,:) = g0k(1,:)/(zi*akx)**phi_pow
if(iky.eq.1) g0k(1,1) = 0.
endif
phi(iky,:,iz,it) = g0k(1,:)
endif
enddo
enddo
if(ky_solve_radial.eq.0.and.any(gamtot(1,1,:).lt.epsilon(0.))) phi(1,1,:,it) = 0.0
if(adiabatic_elec.and.zonal_mode(1)) then
!get A_p^-1.(g - A_b.phi_b) in real space
do iz = -nzgrid, nzgrid
g0k(1,:) = phi(1,:,iz,it)
call transform_kx2x(g0k,g0x)
g0z(:,iz) = g0x(1,:)
if(phi_pow.ne.0) then
call lu_back_substitution(phi_solve(1,iz)%zloc, phi_solve(1,iz)%idx,pb_fsa)
g0z((1+b_solve):(x_fft_size-b_solve),iz) = &
g0z((1+b_solve):(x_fft_size-b_solve),iz) + pb_fsa
endif
enddo
! get <A_p^-1.(g- - A_b.phi_b)>_psi
g_fsa = 0.0
do iz = -nzgrid, nzgrid
g_fsa = g_fsa + (dl_over_b(ia,iz) + d_dl_over_b_drho(ia,iz) &
* rho_mb_clamped((1+b_solve):(x_fft_size-b_solve))) &
*g0z((1+b_solve):(x_fft_size-b_solve),iz)
enddo
call lu_back_substitution(phizf_solve%zloc,phizf_solve%idx, g_fsa)
g1x(1,(1+b_solve):(x_fft_size-b_solve)) = b_mat*g_fsa
do iz = -nzgrid, nzgrid
g_fsa = g1x(1,(1+b_solve):(x_fft_size-b_solve))
call lu_back_substitution(phi_solve(1,iz)%zloc,phi_solve(1,iz)%idx, g_fsa)
g0z((1+b_solve):(x_fft_size-b_solve),iz) = &
g0z((1+b_solve):(x_fft_size-b_solve),iz) + g_fsa
g0x(1,:) = g0z(:,iz)
call transform_x2kx(g0x,g0k)
phi(1,:,iz,it) = g0k(1,:)
enddo
end if
enddo
deallocate (g0k,g1k,g0x,g1x)
if(allocated(g0z)) deallocate(g0z)
if(allocated(g_fsa)) deallocate(g_fsa)
if(allocated(pb_fsa)) deallocate(pb_fsa)
end subroutine mb_get_phi
!!>DSO - The following subroutines are the _ analogues of the ones found in
! stella_transforms.f90.
! The ones uses here ensure that the grid spacing in real space is consistent between
! domains (since we do not keep the checkboard mode)
subroutine init_mb_transforms
use stella_layouts, only: init_stella_layouts
use kt_grids, only: nakx, naky, naky_all
implicit none
if (mb_transforms_initialized) return
mb_transforms_initialized = .true.
if (.not.allocated(fft_kxky)) allocate (fft_kxky(naky ,nakx))
if (.not.allocated(fft_xky)) allocate (fft_xky(naky ,x_fft_size))
if (.not.allocated(fft_xy)) allocate (fft_xy(naky_all,x_fft_size))
call init_x_fft
call init_y_fft
end subroutine init_mb_transforms
subroutine init_x_fft
use fft_work, only: init_ccfftw, FFT_BACKWARD, FFT_FORWARD
implicit none
if (.not.allocated(fft_x_k)) allocate (fft_x_k(x_fft_size))
if (.not.allocated(fft_x_x)) allocate (fft_x_x(x_fft_size))
call init_ccfftw (xf_fft, FFT_BACKWARD, x_fft_size, fft_x_k, fft_x_x)
call init_ccfftw (xb_fft, FFT_FORWARD , x_fft_size, fft_x_x, fft_x_k)
end subroutine init_x_fft
subroutine init_y_fft
use kt_grids, only: naky, naky_all
use fft_work, only: init_crfftw, init_rcfftw, FFT_BACKWARD, FFT_FORWARD
implicit none
if (.not.allocated(fft_y_k)) allocate (fft_y_k(naky))
if (.not.allocated(fft_y_y)) allocate (fft_y_y(naky_all))
call init_crfftw (yf_fft, FFT_BACKWARD, naky_all, fft_y_k, fft_y_y)
call init_rcfftw (yb_fft, FFT_FORWARD , naky_all, fft_y_y, fft_y_k)
end subroutine init_y_fft
!
!> transform routines start here
!
subroutine transform_kx2x (gkx, gx)
use kt_grids, only: ikx_max
implicit none
complex, dimension (:,:), intent (in) :: gkx
complex, dimension (:,:), intent (out) :: gx
integer :: iy
do iy = 1, size(gkx,1)
fft_x_k = 0.
fft_x_k(:ikx_max) = gkx(iy,:ikx_max)
fft_x_k((x_fft_size-ikx_max+2):) = gkx(iy,ikx_max+1:)
call dfftw_execute_dft(xf_fft%plan, fft_x_k, fft_x_x)
gx(iy,:) = fft_x_x*xf_fft%scale
end do
end subroutine transform_kx2x
subroutine transform_x2kx (gx, gkx)
use kt_grids, only: ikx_max
implicit none
complex, dimension (:,:), intent (in) :: gx
complex, dimension (:,:), intent (out) :: gkx
integer :: iy
do iy = 1, size(gx,1)
fft_x_x = gx(iy,:)
call dfftw_execute_dft(xb_fft%plan, fft_x_x, fft_x_k)
gkx(iy,:ikx_max) = fft_x_k(:ikx_max)*xb_fft%scale
gkx(iy,ikx_max+1:) = fft_x_k((x_fft_size-ikx_max+2):)*xb_fft%scale
end do
end subroutine transform_x2kx
subroutine transform_ky2y (gky, gy)
implicit none
complex, dimension (:,:), intent (in) :: gky
real, dimension (:,:), intent (out) :: gy
integer :: ikx
do ikx = 1, size(gky,2)
fft_y_k = gky(:,ikx)
call dfftw_execute_dft_c2r(yf_fft%plan, fft_y_k, fft_y_y)
gy(:,ikx) =fft_y_y*yf_fft%scale
end do
end subroutine transform_ky2y
! subroutine transform_y2ky (gy, gky)
!
! implicit none
!
! real, dimension (:,:), intent (in out) :: gy
! complex, dimension (:,:), intent (out) :: gky
!
! integer :: ikx
!
! do ikx = 1, size(gy,2)
! fft_y_k = gy(:,ikx)
! call dfftw_execute_dft_r2c(yb_fft%plan, fft_y_y, fft_y_k)
! gky(:,ikx) = fft_y_y*yb_fft%scale
! end do
!
! end subroutine transform_y2ky
subroutine finish_mb_transforms
implicit none
call dfftw_destroy_plan (yf_fft%plan)
call dfftw_destroy_plan (yb_fft%plan)
call dfftw_destroy_plan (xf_fft%plan)
call dfftw_destroy_plan (xb_fft%plan)
if (allocated(fft_y_k)) deallocate (fft_y_k)
if (allocated(fft_y_y)) deallocate (fft_y_y)
if (allocated(fft_x_k)) deallocate (fft_x_k)
if (allocated(fft_x_x)) deallocate (fft_x_x)
if (allocated(fft_xky)) deallocate (fft_xky)
if (allocated(fft_xy)) deallocate (fft_xy)
mb_transforms_initialized = .false.
end subroutine finish_mb_transforms
end module multibox
| {
"alphanum_fraction": 0.6126653307,
"author": null,
"avg_line_length": 32.0145423439,
"converted": null,
"ext": "fpp",
"file": null,
"hexsha": "4dfd1e9777416840338690c34ecdadff96858a6e",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "104556a07b9736e7c28e6f1bf2f799384732f38b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SStroteich/stella-1",
"max_forks_repo_path": "src/multibox.fpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "104556a07b9736e7c28e6f1bf2f799384732f38b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SStroteich/stella-1",
"max_issues_repo_path": "src/multibox.fpp",
"max_line_length": 110,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "104556a07b9736e7c28e6f1bf2f799384732f38b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SStroteich/stella-1",
"max_stars_repo_path": "src/multibox.fpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 11956,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 37425
} |
from astropy import units as u
from astropy.coordinates import FK5, Galactic
def fk52gal(ra, dec):
c = FK5(ra * u.deg, dec * u.deg)
out = c.transform_to(Galactic())
return out.l.degree, out.b.degree
def gal2fk5(l, b):
c = Galactic(l * u.deg, b * u.deg)
out = c.transform_to(FK5())
return out.ra.degree, out.dec.degree
def radec2glon(ra, dec):
"""
Compute galactic longitude from right ascension and declination.
"""
return fk52gal(ra, dec)[0]
def radec2glat(ra, dec):
"""
Compute galactic latitude from right ascension and declination.
"""
return fk52gal(ra, dec)[1]
def lb2ra(lon, lat):
"""
Compute right ascension from galactic longitude and latitude.
"""
return gal2fk5(lon, lat)[0]
def lb2dec(lon, lat):
"""
Compute declination from galactic longitude and latitude.
"""
return gal2fk5(lon, lat)[1]
| {
"alphanum_fraction": 0.645232816,
"author": null,
"avg_line_length": 20.976744186,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6cf09dfa22047f2e765fd471a26ead5187da1818",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 142,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T13:25:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-08T13:08:00.000Z",
"max_forks_repo_head_hexsha": "1843787ccb4de852dfe103ff58473da13faccf5f",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "HPLegion/glue",
"max_forks_repo_path": "glue/plugins/coordinate_helpers/deprecated.py",
"max_issues_count": 1362,
"max_issues_repo_head_hexsha": "1843787ccb4de852dfe103ff58473da13faccf5f",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T13:23:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-03T19:15:52.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "HPLegion/glue",
"max_issues_repo_path": "glue/plugins/coordinate_helpers/deprecated.py",
"max_line_length": 68,
"max_stars_count": 550,
"max_stars_repo_head_hexsha": "1843787ccb4de852dfe103ff58473da13faccf5f",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "HPLegion/glue",
"max_stars_repo_path": "glue/plugins/coordinate_helpers/deprecated.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T11:54:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-08T13:51:06.000Z",
"num_tokens": 251,
"path": null,
"reason": "from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 902
} |
# ---
# title: 480. Sliding Window Median
# id: problem480
# author: Indigo
# date: 2021-02-03
# difficulty: Hard
# categories: Sliding Window
# link: <https://leetcode.com/problems/sliding-window-median/description/>
# hidden: true
# ---
#
# Median is the middle value in an ordered integer list. If the size of the list
# is even, there is no middle value. So the median is the mean of the two middle
# value.
#
# Examples:
#
# `[2,3,4]` , the median is `3`
#
# `[2,3]`, the median is `(2 + 3) / 2 = 2.5`
#
# Given an array _nums_ , there is a sliding window of size _k_ which is moving
# from the very left of the array to the very right. You can only see the _k_
# numbers in the window. Each time the sliding window moves right by one
# position. Your job is to output the median array for each window in the
# original array.
#
# For example,
# Given _nums_ = `[1,3,-1,-3,5,3,6,7]`, and _k_ = 3.
#
#
#
# Window position Median
# --------------- -----
# [1 3 -1] -3 5 3 6 7 1
# 1 [3 -1 -3] 5 3 6 7 -1
# 1 3 [-1 -3 5] 3 6 7 -1
# 1 3 -1 [-3 5 3] 6 7 3
# 1 3 -1 -3 [5 3 6] 7 5
# 1 3 -1 -3 5 [3 6 7] 6
#
#
# Therefore, return the median sliding window as `[1,-1,-1,3,5,6]`.
#
# **Note:**
# You may assume `k` is always valid, ie: `k` is always smaller than input
# array's size for non-empty array.
# Answers within `10^-5` of the actual value will be accepted as correct.
#
#
## @lc code=start
using LeetCode
function median_sliding_window(nums::Vector{Int}, k::Int)
tree = AVLTree{Tuple{Int, Int}}()
res = Float64[]
for i in 1:length(nums)
push!(tree, (nums[i], i))
if i ≥ k
push!(res, (tree[k ÷ 2 + 1][1] + tree[(k - 1) ÷ 2 + 1][1]) / 2)
delete!(tree, (nums[i - k + 1], i - k + 1))
end
end
res
end
## @lc code=end
| {
"alphanum_fraction": 0.5522540984,
"author": null,
"avg_line_length": 29.1343283582,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "33b16ce07cfb557ad2acce8ec32badaa10dd2036",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 20,
"max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z",
"max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jmmshn/LeetCode.jl",
"max_forks_repo_path": "src/problems/480.sliding-window-median.jl",
"max_issues_count": 57,
"max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e",
"max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jmmshn/LeetCode.jl",
"max_issues_repo_path": "src/problems/480.sliding-window-median.jl",
"max_line_length": 80,
"max_stars_count": 74,
"max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jmmshn/LeetCode.jl",
"max_stars_repo_path": "src/problems/480.sliding-window-median.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z",
"num_tokens": 713,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1952
} |
from ms2ldaviz.celery_tasks import app
from django.db import transaction
import numpy as np
import pandas as pd
from keras.models import load_model
from basicviz.models import Document, FeatureInstance
from annotation.models import SubstituentTerm, SubstituentInstance
@app.task
def predict_substituent_terms(experiment_id):
documents = Document.objects.filter(experiment__id=experiment_id)
doc_names = []
doc_dict = {}
for doc in documents:
doc_names.append(doc.name)
doc_dict[doc.name] = doc
MAX_MASS = 1000
BIN_SIZE = 1
intensities = pd.DataFrame(0.0, index=doc_names, columns=range((MAX_MASS // BIN_SIZE)), dtype=float)
for doc in documents:
feature_instances = FeatureInstance.objects.filter(document__id=doc.id)
# get the mass and convert to nearest int bin e.g. mass 2.9 will be put in bin 1 (0-999)
for feature_instance in feature_instances:
feature_type = feature_instance.feature.name.split('_')[0]
if feature_type == 'fragment':
mass = float(feature_instance.feature.name.split("_")[1])
if mass < MAX_MASS:
mass_bin = (int(mass) // BIN_SIZE) - 1
intensity = feature_instance.intensity
# Populate the dataframe using each document's name to place data in the correct row.
intensities.at[doc.name, mass_bin] = intensity
np_matrix = intensities.values
np_index = intensities.index
x_train_spectra = np.log(np_matrix + 1)
model = load_model('/home/classyfire/model.h5')
predicted = model.predict(x_train_spectra)
print len(predicted),len(documents)
# turn the probabilities into 0 and 1
predicted_substituents = np.zeros(predicted.shape)
predicted_substituents[predicted > 0.8] = 1
legend_all = pd.read_csv("/home/classyfire/legend.txt", names=["name"])
legends = legend_all["name"].values.tolist()
sub_terms = SubstituentTerm.objects.all()
sub_term_dict = {s.name: s for s in sub_terms}
print predicted
store_thresh = 0.7
with transaction.atomic():
for doc_idx,doc_preds in enumerate(predicted):
for term_idx,probability in enumerate(doc_preds):
if probability >= store_thresh:
term = legends[term_idx]
doc_name = np_index[doc_idx]
document = doc_dict[doc_name]
if term in sub_term_dict:
subterm = sub_term_dict[term]
else:
subterm = SubstituentTerm.objects.get_or_create(name=term)[0]
sub_term_dict[term] = subterm
subintance = SubstituentInstance.objects.get_or_create(subterm=subterm, document=document, source="Predicted")[0]
subintance.probability = probability
subintance.save()
# for index, value in enumerate(predicted_substituents):
# if value:
# print value
# doc_name = np_index[index]
# term = legends[index]
# probability = predicted[index]
# document = Document.objects.get(name=doc_name, experiment_id=experiment_id)
# subterm = SubstituentTerm.objects.get_or_create(name=term)[0]
# subintance = SubstituentInstance.objects.get_or_create(subterm=subterm, document=document)
# subintance.probability = probability
# subintance.save()
| {
"alphanum_fraction": 0.6399322225,
"author": null,
"avg_line_length": 38.0752688172,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d73324bb7bd5ccfb69c328540279db0b080377fc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RP0001/ms2ldaviz",
"max_forks_repo_path": "ms2ldaviz/annotation/tasks.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RP0001/ms2ldaviz",
"max_issues_repo_path": "ms2ldaviz/annotation/tasks.py",
"max_line_length": 133,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RP0001/ms2ldaviz",
"max_stars_repo_path": "ms2ldaviz/annotation/tasks.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 797,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3541
} |
from src.vehicle import Vehicle
from src.visualiser import Vis
from src.track import TrackHandler
from src.ga import GeneticAlgorithm
from src.ga import IslandGA
from src.lidar import Lidar
from sandbox.game_pad_inputs import GamePad
import numpy as np
from time import sleep
import time
import multiprocessing
from joblib import Parallel, delayed
import copy
# ####################
# NN OUTPUT HANDLING #
# ####################
def update_nn_outputs(i, outputs, nn):
"""
Assumes outputs are [Thr/Brake, Steering]
"""
thr_brake = max(0.0, min(1.0, nn.outputs[0])) * 2 - 1
if thr_brake >= 0:
outputs[0, i] = thr_brake
outputs[1, i] = 0.0
else:
outputs[0, i] = 0.0
outputs[1, i] = thr_brake
outputs[2, i] = (max(0.0, min(1.0, nn.outputs[1])) * 2 - 1) * 360.0
# ###################
# NN INPUT HANDLING #
# ###################
def update_nn_inputs(i, inputs, veh):
"""
Fixes the order of the inputs
Lidar is scaled 0-1 - out of range lidar is set to max distance.
"""
collArr = np.hstack((veh.lidar_front.collision_array, veh.lidar_left.collision_array, veh.lidar_right.collision_array))
collArr[np.where(collArr < 0)[0]] = Lidar._xLidarRange
collArr = collArr / Lidar._xLidarRange
inputs[:, i] = collArr
# ##################
# ALIVE DEFINITION #
# ##################
def check_alive_state(i, is_alive, veh, t_stationary, tSim):
"""
Update the alive state of a living car
"""
# check how long a car has been 'stationary' for
if veh.vVehicle > 2 and t_stationary[i] is not None:
t_stationary[i] = None
elif veh.vVehicle <= 2 and t_stationary[i] is None:
t_stationary[i] = tSim
if t_stationary[i] is None:
bHasStopped = False
else:
if tSim - t_stationary[i] > 5:
bHasStopped = True
else:
bHasStopped = False
if veh.bHasCollided or veh.bMovingBackwards or veh.NLapsComplete > 0 or bHasStopped:
is_alive[i] = False
# ####################
# FITNESS DEFINITION #
# ####################
def calc_fitness(veh, max_steer):
"""
Set the fitness of the vehicle
"""
if max_steer < 10:
return 0.0
else:
return veh.rLapProgress + 1.0 * veh.NLapsComplete
def run_sim(nns, task_rate, track, num_inputs):
"""
Run the simulation for this set of nns
"""
vehs, is_alive, nn_inputs, nn_outputs, t_stationary = init_pop(len(nns), task_rate, track, num_inputs)
tSim = 0.0
max_steer = np.zeros(len(nns)) # log the maximum steering angle applied
while any(is_alive):
for i in range(0,len(is_alive)):
if is_alive[i]:
nns[i].update_network(nn_inputs[:, i])
update_nn_outputs(i, nn_outputs, nns[i])
max_steer[i] = max(max_steer[i], abs(nn_outputs[2, i]))
vehs[i].update(nn_outputs[0, i], nn_outputs[1, i], nn_outputs[2, i])
update_nn_inputs(i, nn_inputs, vehs[i])
check_alive_state(i, is_alive, vehs[i], t_stationary, tSim)
tSim += task_rate
if tSim > 10 * 60.0:
break
# determine and return the fitness
f_list = [calc_fitness(v, max_steer[i]) for i,v in enumerate(vehs)]
return f_list
def init_pop(pop_size, task_rate, track, num_inputs):
vehs = [Vehicle(i, track, 60, 60, 60, auto_reset=False, task_rate=task_rate) for i in range(0, pop_size)]
is_alive = [True for i in range(0, pop_size)]
nn_inputs = np.zeros((num_inputs, pop_size))
nn_outputs = np.zeros((3, pop_size))
t_stationary = [None] * pop_size
return vehs, is_alive, nn_inputs, nn_outputs, t_stationary
# ######
# MAIN #
# ######
def main():
# instantiate the objects
track = TrackHandler('dodec_track')
gp = GamePad()
run_game = True
pop_size = 1200
num_inputs = Lidar._NRays * 3
gen_number = 0
max_gens = 500
task_rate = 0.1
num_car_render = 50
num_cores = multiprocessing.cpu_count()
use_islands = True
use_parallel = True or use_islands # islands need the cpus
if use_islands:
isl_pop_size = int(np.ceil(pop_size / num_cores))
fit_data = np.zeros((max_gens, num_cores))
migr_data = np.zeros(4)
if use_parallel:
sims_per_core = pop_size / num_cores
core_sim_idxs = []
for i in range(0,num_cores):
core_sim_idxs.append((int(np.ceil(sims_per_core * i)), int(np.ceil(sims_per_core * (i + 1)))))
print('Parallel Mode [ENABLED]')
else:
print('Parallel Mode [DISABLED]')
if use_islands:
print('Islands [ENABLED]')
gas = [IslandGA(id=i, max_gens=max_gens, population_size=isl_pop_size, num_inputs=num_inputs, num_outputs=2, hidden_layer_lens=[6, 4]) for i in range(0,num_cores)]
for i,ga in enumerate(gas):
locs = [gas[ii].location for ii in range(0, num_cores) if ii != i]
ids = [gas[ii].id for ii in range(0, num_cores) if ii != i]
ga.set_island_probabilities(locs, ids)
ga.create_population(is_first=True)
else:
ga = GeneticAlgorithm(max_gens=max_gens, population_size=pop_size, num_inputs=num_inputs, num_outputs=2, hidden_layer_lens=[6, 4])
ga.create_population(is_first=True)
tTotal = time.time()
while run_game:
gen_number += 1
tSim = 0
t = time.time()
if use_islands:
f_list = Parallel(n_jobs=num_cores)(delayed(run_sim)(gas[i].pop, task_rate, track, num_inputs) for i in range(0,num_cores))
for i,f in enumerate(f_list):
gas[i].fitness = np.array(f)
else:
if use_parallel:
f_list = Parallel(n_jobs=num_cores)(delayed(run_sim)(ga.pop[x:y], task_rate, track, num_inputs) for x,y in core_sim_idxs)
fitness_list = []
for f in f_list:
fitness_list.extend(f)
else:
vehs, is_alive, nn_inputs, nn_outputs, t_stationary = init_pop(pop_size, task_rate, track, num_inputs)
while any(is_alive):
tSim += task_rate
# update the living populus
for i in range(0, pop_size):
if is_alive[i]:
ga.pop[i].update_network(nn_inputs[:, i])
update_nn_outputs(i,nn_outputs, ga.pop[i])
vehs[i].update(nn_outputs[0, i], nn_outputs[1, i], nn_outputs[2, i])
update_nn_inputs(i, nn_inputs, vehs[i])
check_alive_state(i, is_alive, vehs[i], t_stationary, tSim)
# calculate the fitness
fitness_list = [calc_fitness(v) for v in vehs]
ga.fitness = np.array(fitness_list)
if use_islands:
print('Gen -',gen_number, 'after - {:.3f} s'.format(time.time()-t))
f_string = ''
for i in range(0, num_cores):
if i > 0:
f_string += ' - '
f_string += 'ID {} @ {:.5f}'.format(i, float(max(gas[i].fitness)))
fit_data[gen_number,i] = float(max(gas[i].fitness))
print(f_string)
else:
print('Gen -',gen_number,'Max Fitness {:.5f}'.format(float(max(ga.fitness))),'after - {:.3f} s'.format(time.time()-t))
if gp.quit_requested:
print("Quitting...")
run_game = False
gp.exit_thread()
break
if not run_game:
break
if gen_number >= max_gens:
# just quit the game
run_game = False
else:
if use_islands:
# run any migration
migrants = []
ids = []
fitness = []
# first collect up the migrants - only migrant once they are all collected
for i in range(0, num_cores):
p = np.random.rand()
if p < IslandGA._prob_migration:
id, m, f = gas[i].handle_migration()
print('Island {0} is migrating {1} members to island {2}'.format(i, len(m), id))
migr_data = np.vstack((migr_data, np.array([gen_number, i, id, len(m)])))
migrants.append(m)
fitness.append(f)
ids.append(id)
# perform the immigration
if len(ids) > 0:
for i,id in enumerate(ids):
gas[id].handle_immigration(migrants[i], fitness[i])
# create the new populations
for ga in gas:
ga.create_population()
else:
# prepare the next generation
ga.create_population()
# save the fitessed
if use_islands:
print('Programme completed in {:.3f} s'.format(time.time()-tTotal))
for i in range(0, num_cores):
gas[i].pop[int(np.argmax(gas[i].fitness))].pickle_nn(suffix='id_{}'.format(i))
print('Island {} s with a max fitness of {:.5f}'.format(i, float(max(gas[i].fitness))))
np.savetxt('fit_data.csv', fit_data, delimiter=',')
np.savetxt('migr_data.csv', migr_data, delimiter=',')
else:
ga.pop[int(np.argmax(ga.fitness))].pickle_nn()
print('Programme completed in {:.3f} s with a max fitness of {:.5f}'.format(time.time()-tTotal, float(max(ga.fitness))))
if __name__ == "__main__":
main()
sleep(5)
| {
"alphanum_fraction": 0.5642382731,
"author": null,
"avg_line_length": 37.3488372093,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e57a55a1efa926c7219f22ccb40305b9268487d9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ca738b271fa346da4234c5ffc781abc12a5ac49f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "steviet91/furmulaone_source",
"max_forks_repo_path": "main_ga.py",
"max_issues_count": 24,
"max_issues_repo_head_hexsha": "ca738b271fa346da4234c5ffc781abc12a5ac49f",
"max_issues_repo_issues_event_max_datetime": "2020-04-29T08:18:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-14T12:38:07.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "steviet91/furmulaone_source",
"max_issues_repo_path": "main_ga.py",
"max_line_length": 171,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ca738b271fa346da4234c5ffc781abc12a5ac49f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "steviet91/furmulaone_source",
"max_stars_repo_path": "main_ga.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2454,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9636
} |
#include "Cache.hpp"
#include "Sha256.hpp"
#include <fstream>
#include <streambuf>
#include <boost/filesystem/operations.hpp>
SourceCache::SourceCache(std::string const & path, std::string const & targetName)
: m_cacheFileName(path + "/" + targetName + ".gen_cache")
{
}
SourceCache::~SourceCache()
{
}
void SourceCache::Load()
{
std::ifstream input(m_cacheFileName);
std::string str((std::istreambuf_iterator<char>(input)), std::istreambuf_iterator<char>());
size_t pos = str.find("\n");
while (pos != std::string::npos)
{
std::string const line = str.substr(0, pos);
size_t delimPos = line.find("|");
if (delimPos != std::string::npos)
{
std::string key = line.substr(0, delimPos);
std::string value = line.substr(delimPos + 1);
if (!key.empty() && !value.empty())
m_cache[key] = value;
}
str = str.substr(pos + 1);
pos = str.find("\n");
}
}
void SourceCache::Save()
{
std::ofstream out(m_cacheFileName, std::fstream::out);
CacheMap::const_iterator it = m_cache.begin();
for (; it != m_cache.end(); ++it)
{
out << it->first << "|" << it->second << std::endl;
}
out.flush();
out.close();
}
void SourceCache::CheckGenerator(std::string const & fileName)
{
std::string absPath = boost::filesystem::canonical(boost::filesystem::path(fileName)).string();
std::string checksum = FileChecksum(absPath);
CacheMap::iterator it = m_cache.find(absPath);
if (it != m_cache.end())
{
if (it->second == checksum)
return; // generator doesn't changed
}
// clear cache, because of new generator
m_cache.clear();
m_cache[absPath] = checksum;
}
bool SourceCache::RequestGenerate(std::string const & fileName)
{
std::string absPath = boost::filesystem::canonical(boost::filesystem::path(fileName)).string();
std::string checksum = FileChecksum(absPath);
CacheMap::iterator it = m_cache.find(absPath);
if (it != m_cache.end())
{
if (it->second == checksum)
return false;
}
m_cache[absPath] = checksum;
return true;
}
std::string SourceCache::FileChecksum(std::string const & fileName)
{
boost::filesystem::path p(fileName);
if (boost::filesystem::exists(p))
{
std::ifstream input(fileName);
std::string str((std::istreambuf_iterator<char>(input)), std::istreambuf_iterator<char>());
std::time_t t = boost::filesystem::last_write_time(p);
return sha256(str + std::ctime(&t));
}
return std::string();
}
| {
"alphanum_fraction": 0.6504657756,
"author": null,
"avg_line_length": 23.7403846154,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "61ddfd12ae25f721151b2402738b043c04eda486",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 23,
"max_forks_repo_forks_event_max_datetime": "2021-12-21T14:43:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-04-18T15:11:56.000Z",
"max_forks_repo_head_hexsha": "8cb79a818d4e4144843b26722ac67bd71515ddaf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RuslanKorshunov/sc-machine",
"max_forks_repo_path": "tools/codegen/Parser/Cache.cpp",
"max_issues_count": 19,
"max_issues_repo_head_hexsha": "8cb79a818d4e4144843b26722ac67bd71515ddaf",
"max_issues_repo_issues_event_max_datetime": "2016-10-27T15:59:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-02-01T19:42:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RuslanKorshunov/sc-machine",
"max_issues_repo_path": "tools/codegen/Parser/Cache.cpp",
"max_line_length": 97,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "8cb79a818d4e4144843b26722ac67bd71515ddaf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RuslanKorshunov/sc-machine",
"max_stars_repo_path": "tools/codegen/Parser/Cache.cpp",
"max_stars_repo_stars_event_max_datetime": "2015-03-14T11:37:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-20T16:48:59.000Z",
"num_tokens": 641,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2469
} |
import pygame
import datetime
import numpy as np
class Display:
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (0, 155, 0)
DARKGRAY = (40, 40, 40)
BGCOLOR = BLACK
BLACK = (0,0,0)
text_surface_height = 20
def __init__(self, window_width, window_height, surface_width, surface_height, cell_width):
pygame.display.init()
self.window_width = window_width
self.cell_width = cell_width
self.surface_width = surface_width
self.surface_height = surface_height
self.window_height = window_height
self.text_surface = pygame.surface.Surface((window_width, self.text_surface_height), 0, 24)
self.display_surface = pygame.surface.Surface((surface_width,surface_height))
self.display_window = pygame.display.set_mode((window_width,window_height+self.text_surface_height), 0, 24)
pygame.display.set_caption('NeuralNest')
self.display_surface.fill(self.BGCOLOR)
pygame.display.update()
def update(self, basket, egg_set):
self.display_surface.fill(self.BGCOLOR)
# Draw Basket
if basket is not None:
self.draw_box(basket.basket_x, basket.basket_y, basket.cell_width, 1, self.WHITE)
# Draw EggSet
if egg_set is not None:
for egg in egg_set.eggs:
self.draw_box(egg.eggx, egg.eggy, egg.egg_radius, egg.egg_radius, self.WHITE)
# Display score
self.display_text_top("{0}:{1}".format(egg_set.total_caught, egg_set.total_broken))
self.update_display()
return True
def update_display(self):
self.display_window.blit(pygame.transform.scale(self.display_surface, (self.window_width, self.window_height +
self.text_surface_height)), (0, self.text_surface_height))
self.display_window.blit(self.text_surface, (0, 0))
pygame.display.update()
def draw_circle(self, x, y, radius, color, width):
pygame.draw.circle(self.display_surface, color, (int(x), int(y)), radius, width)
def draw_box(self, x, y, width, height, color):
basket_rect = pygame.Rect((x, y), (width, height))
pygame.draw.rect(self.display_surface, color, basket_rect)
def draw_checkerboard(self, x, y, width, height, c1, c2):
for xi in range(x, x + width):
for yi in range(y, y + height):
c = c1
if (xi + yi) % 2 == 0:
c = c2
self.draw_box(xi, yi, 1, 1, c)
def show_wave_start(self, wave):
if wave is None:
msg = "Ready?"
else:
msg = "{0}:{1}".format(wave.waveName, wave.waveCount)
self.display_text_middle(msg)
def show_wave_over(self, eggSet):
msg = "Caught: {0}".format(eggSet.totalDropped)
self.display_text_middle(msg)
def text_objects(self, text, font):
text_surface = font.render(text, False, self.RED)
return text_surface, text_surface.get_rect()
def display_text_top(self, msg):
self.text_surface.fill(self.BGCOLOR)
score_text = pygame.font.Font("/Library/Fonts/Courier New.ttf", 18)
scoreSurf, scoreRect = self.text_objects(msg, score_text)
self.text_surface.blit(scoreSurf, scoreRect)
def display_text_middle(self, msg):
self.text_surface.fill(self.BGCOLOR)
score_text = pygame.font.Font("/Library/Fonts/Courier New.ttf", 18)
text_surf, text_rect = self.text_objects(msg, score_text)
text_rect.center = ((self.window_width / 2), 5)
text_rect.top = 2
self.text_surface.blit(text_surf, text_rect)
self.update_display()
self.wait_for_key()
def wait_for_key(self):
start = datetime.datetime.utcnow()
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT: run = False
if e.type == pygame.KEYDOWN:
return
if (datetime.datetime.utcnow() - start).total_seconds() > 3:
return
def get_surface_array(self):
self.update_display()
surface_array = pygame.surfarray.array3d(self.display_surface)
return np.reshape(surface_array, (self.surface_width*self.surface_height, 3))
def get_surface_matrix(self):
self.update_display()
surface_array = pygame.surfarray.array3d(self.display_surface)
return surface_array
def get_surface_grayscale_array(self):
gray_scale_array = []
surface_array = pygame.surfarray.array3d(self.display_surface)
surface_array = surface_array.swapaxes(0, 1)
new_surface = np.reshape(surface_array, (self.surface_width * self.surface_height, 3))
for x in new_surface:
c = ((int(x[0])+int(x[1])+int(x[2]))/(255*3))
gray_scale_array.append(c)
return np.array(gray_scale_array, dtype=float)
| {
"alphanum_fraction": 0.6260404281,
"author": null,
"avg_line_length": 38.8153846154,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "92b42c47c1d93d9d1474a06a17a79426e6ff0d64",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "749376de7395c828d87a6d53be596e2f378c8f17",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "fractalbass/neuralnest",
"max_forks_repo_path": "Display.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "749376de7395c828d87a6d53be596e2f378c8f17",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "fractalbass/neuralnest",
"max_issues_repo_path": "Display.py",
"max_line_length": 137,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "749376de7395c828d87a6d53be596e2f378c8f17",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "fractalbass/neuralnest",
"max_stars_repo_path": "Display.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1193,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5046
} |
'''
Code Author: Siyun WANG
'''
from EMST import * # git repo: https://github.com/ChatCatKatzeNeko/EMST
import numpy as np
import scipy
class EMSTBasedClustering():
def __init__(self, emst):
'''
emst: EMST object
'''
self.emst = emst
self.nbVertices = self.emst.nbVertices
self.nbClusters = 1
self.slicedTrees = None
def cutEdges(self, **kwargs):
pass
def getComponents(self):
'''
get all components of the emst
'''
self.visited = np.zeros(self.nbVertices, dtype=bool)
self.emst._transformTree(tree=self.slicedTrees)
isRoot = np.ones(self.nbVertices, dtype=bool)
# loop through all parent-child tuples, set False in the isRoot list to all child nodes
for tup in self.slicedTrees.keys():
isRoot[tup[1]] = False
self.components = [[] for _ in range(self.nbClusters)]
c = 0
for i in range(self.nbVertices):
if isRoot[i]:
self.getSubTree(i, self.components[c])
c += 1
self.labels = np.array([None]*self.nbVertices)
for i in range(len(self.components)):
for node in self.components[i]:
self.labels[node] = i
def getSubTree(self, root, subTree):
'''
Find subtree from the root using the depth first search method
======
INPUTS
======
root: integer, index of the starting node
subTree: list, store the elements of the tree
'''
# keep track of node visited
self.visited[root] = 1
subTree.append(root)
for iteration in self.emst.mst_node_neighb[root]:
if not self.visited[iteration]:
self.getSubTree(iteration, subTree)
return
def fit(self, **kwargs):
self.cutEdges(**kwargs)
self.getComponents()
class HEMST(EMSTBasedClustering):
def cutEdges(self, sig=1):
'''
hierarchical EMST clustering algorithm by Grygorash et al.
sig: strictly positive number, number of standard diviation away from the average edge length
that an edge is considered to be cut
'''
self.slicedTrees = self.emst.mst_pair_weight.copy()
edgeWeights = np.array(list(self.emst.mst_pair_weight.values()))
avgDist = edgeWeights.mean()
stdDist = edgeWeights.std()
self.toBeRemoved = [ key for key, value in self.emst.mst_pair_weight.items() if value > avgDist + sig * stdDist ]
self.nbClusters = len(self.toBeRemoved) + 1
for key in self.toBeRemoved:
del self.slicedTrees[key]
class FAREMST(EMSTBasedClustering):
'''
Fused adaptive ridge based on EMST
The article about the Fused adaptive ridge for segmentation:
https://hal.archives-ouvertes.fr/tel-02473848/document
Authors: V. Goepp, G.Nuel
Class parametres
----------------
emst: EMST object, euclidean minimun spanning tree
delta: float. tiny positive value that allows to prevent division by 0
tol: float. convergence criterion
max_iter: int. convergence criterion (maximum number of iterations)
Class method(s)
-------------
fit(self, lmbda): cut unwanted edges and get labels
Class attributes
----------------
nbClusters: number of clusters
labels: label of each data
'''
def __init__(self, emst, delta=1e-5, tol=1e-3, max_iter=100):
'''
======
INPUTS
======
delta: float, tiny positive number to prevent division by zero
tol: float, tiny positive number, convergence criterion
max_iter: int, number of maximum iterations allowed if convergence criterion is not reached
'''
EMSTBasedClustering.__init__(self, emst=emst)
self.delta = delta
self.tol = tol
self.max_iter = max_iter
self.X = emst.data
def initialise(self):
'''
initialise mu, w, prepare for iterations
'''
self.emst._transformTree()
self.mu = self.X
self.w = {pair:1 for pair in self.emst.mst_pair_weight.keys()}
self.pairwise_centroid_dist = np.array(list(self.emst.mst_pair_weight.values()))
def updateA(self, lmbda):
# create the sparse matrix
self.A = np.zeros((self.nbVertices, self.nbVertices))
for k,val in self.emst.mst_node_neighb.items():
for v in val:
try:
self.A[k][v] = self.w[(k,v)]
except KeyError:
self.A[k][v] = self.w[(v,k)]
sumOfLine = [l.sum() for l in self.A]
self.A *= -lmbda
# add diagonal
self.A += np.eye(self.nbVertices)
self.A += lmbda*np.diag(sumOfLine)
def updateMu(self):
self.last_mu = self.mu.copy()
self.mu = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix(self.A), self.X)
def updateW(self):
for k,(i,j) in enumerate(self.emst.mst_pair_weight.keys()):
self.pairwise_centroid_dist[k] = np.linalg.norm(self.mu[i] - self.mu[j])**2
self.w[(i,j)] = 1/(self.pairwise_centroid_dist[k] + self.delta**2)
def cutEdges(self, lmbda=5):
'''
cut unwanted edges in the EMST
======
INPUTS
======
lmbda: float. penalty coefficient. the larger the penalty, the less the number of clusters
'''
# initialisation
self.initialise()
# iterations
for iteration in range(self.max_iter):
self.updateA(lmbda=lmbda)
self.updateMu()
self.updateW()
if np.max(np.linalg.norm(self.mu-self.last_mu)/np.linalg.norm(self.last_mu)) < self.tol:
# print('Early break, number of iteration: %d' % iteration)
break
# remove edges that have little weight
self.toBeRemoved = [ key for k,key in enumerate(self.emst.mst_pair_weight.keys()) if self.pairwise_centroid_dist[k] * self.w[key] > 0.99]
self.slicedTrees = self.emst.mst_pair_weight.copy()
self.nbClusters = len(self.toBeRemoved) + 1
for key in self.toBeRemoved:
del self.slicedTrees[key]
class FAREMST_Label(EMSTBasedClustering):
'''
Fused adaptive ridge based on EMST using binary labels
Class parametres
----------------
emst: EMST object, euclidean minimun spanning tree
delta: float. tiny positive value that allows to prevent division by 0
tol: float. convergence criterion
max_iter: int. convergence criterion (maximum number of iterations)
Class method(s)
-------------
fit(self, lmbda): cut unwanted edges and get labels
Class attributes
----------------
nbClusters: number of clusters
labels: label of each data
'''
def __init__(self, emst, delta=1e-5, tol=1e-3, max_iter=100):
'''
delta: float, tiny positive number to prevent division by zero
tol: float, tiny positive number, convergence criterion
max_iter: int, number of maximum iterations allowed if convergence criterion is not reached
'''
EMSTBasedClustering.__init__(self, emst=emst)
self.delta = delta
self.tol = tol
self.max_iter = max_iter
self.X = emst.data
def initialise(self, y):
'''
initialise mu, w, prepare for iterations
======
INPUTS
======
y: array of shape [nbData,], binary label of each data
'''
self.emst._transformTree()
# convert binary values to 0.1 and 0.9 (numeric consideration)
self.y_cp = y * 0.8 + 0.1
self.mu = np.log(self.y_cp / (1 - self.y_cp))
self.w = {pair:1 for pair in self.emst.mst_pair_weight.keys()}
self.pairwise_diff = np.array(list(self.emst.mst_pair_weight.values()))
def _updateNegHessian(self, lmbda, logit):
# Hessian matrix
# H[i,i] = logit[i] * (logit[i] - 1) - lmbda * sum(w_ij (mu_i - mu_j), for j in {neighbor_i})
# H[i,j] = 0 if i and j are NOT neighbors
# = lmbda * w_ij else
# here the negative hessian is calculated
# Rk: the Hessian matrix is not semi-definite, so we can't tell the convexity of the function
H = np.zeros((self.nbVertices,self.nbVertices))
# diag[i] = logit[i] * (1 - logit[i]) + lmbda * sum(w_ij, for j in {neighbor_i})
diag = logit * (1 - logit)
for i in range(self.nbVertices):
for j in self.emst.mst_node_neighb[i]:
try:
H[i,j] = -lmbda * self.w[(i,j)]
diag[i] += lmbda * self.w[(i,j)]
except KeyError:
H[i,j] = -lmbda * self.w[(j,i)]
diag[i] += lmbda * self.w[(j,i)]
H += np.diag(diag)
return H
def _updateGradient(self, y, lmbda, logit):
# gradient
# g[i] = y[i] - logit[i] - lmbda * tmp
# tmp[i] = sum(w_ij (mu_i - mu_j), for j in {neighbor_i})
tmp = np.zeros_like(y, dtype=float)
for i in range(self.nbVertices):
for j in self.emst.mst_node_neighb[i]:
try:
tmp[i] += self.w[(i,j)] * (self.mu[i] - self.mu[j])
except KeyError:
tmp[i] += self.w[(j,i)] * (self.mu[i] - self.mu[j])
return y - logit - lmbda * tmp
def updateMu(self, y, lmbda):
self.last_mu = self.mu.copy()
# Newton-Raphson loop
# 20 iterations seem to be sufficient for now. Can be adjusted if needed.
for _ in range(20):
# logit[i] = P(y_i = 1)
logit = 1 / (1 + np.exp(-self.mu))
# get ready for the Newton-Raphson's algo: compute the gradient and the Hessian matrix
grad = self._updateGradient(y, lmbda, logit)
negHess = self._updateNegHessian(lmbda, logit)
# update mu until convergence by the Newton's method
self.mu += scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix(negHess), grad)
def updateW(self):
for k,(i,j) in enumerate(self.emst.mst_pair_weight.keys()):
self.pairwise_diff[k] = np.abs(self.mu[i] - self.mu[j])
self.w[(i,j)] = 1/(self.pairwise_diff[k]**2 + self.delta**2)
def cutEdges(self, y, lmbda=5):
'''
cut unwanted edges in the EMST
======
INPUTS
======
y: array of shape [nbData,], binary label of each data
lmbda: float. penalty coefficient. the larger the penalty, the less the number of clusters
'''
# initialisation
self.initialise(y)
# iterations
for iteration in range(self.max_iter):
self.updateMu(y=y, lmbda=lmbda)
self.updateW()
if np.max(np.linalg.norm(self.mu-self.last_mu)/np.linalg.norm(self.last_mu)) < self.tol:
# print('Early break, number of iteration: %d' % (iteration+1))
break
# remove edges that have little weight
self.toBeRemoved = [ key for k,key in enumerate(self.emst.mst_pair_weight.keys()) if self.pairwise_diff[k]**2 * self.w[key] > 0.99]
self.slicedTrees = self.emst.mst_pair_weight.copy()
self.nbClusters = len(self.toBeRemoved) + 1
for key in self.toBeRemoved:
del self.slicedTrees[key]
| {
"alphanum_fraction": 0.5554908001,
"author": null,
"avg_line_length": 34.2193732194,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a61a8a3933c5e5ac542725818cbe2e052c4847bd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "39ffbc3affaa1afe1b714b893af7efb4d0f2c3cc",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ChatCatKatzeNeko/EMST_Based_Clustering",
"max_forks_repo_path": "EMSTBasedClustering.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "39ffbc3affaa1afe1b714b893af7efb4d0f2c3cc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ChatCatKatzeNeko/EMST_Based_Clustering",
"max_issues_repo_path": "EMSTBasedClustering.py",
"max_line_length": 145,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "39ffbc3affaa1afe1b714b893af7efb4d0f2c3cc",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ChatCatKatzeNeko/EMST_Based_Clustering",
"max_stars_repo_path": "EMSTBasedClustering.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-13T06:55:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-13T06:55:36.000Z",
"num_tokens": 2924,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12011
} |
import numpy as np
import matplotlib.pyplot as plt
import mplhep as hep
plt.switch_backend("agg")
plt.rcParams.update({"font.size": 16})
plt.style.use(hep.style.CMS)
def plot_part_feats(
jet_type,
real_jets,
gen_jets,
real_mask=None,
gen_mask=None,
coords="polarrel",
name=None,
figs_path=None,
dataset="jetnet",
num_particles=30,
losses=None,
const_ylim=False,
show=False,
):
"""Plot particle feature histograms"""
if coords == "cartesian":
plabels = ["$p_x$ (GeV)", "$p_y$ (GeV)", "$p_z$ (GeV)"]
bin = np.arange(-500, 500, 10)
pbins = [bin, bin, bin]
elif coords == "polarrel":
if dataset == "jetnet":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T^{rel}$"]
if jet_type == "g" or jet_type == "q" or jet_type == "w" or jet_type == "z":
if num_particles == 100:
pbins = [
np.arange(-0.5, 0.5, 0.005),
np.arange(-0.5, 0.5, 0.005),
np.arange(0, 0.1, 0.001),
]
else:
pbins = [
np.linspace(-0.3, 0.3, 100),
np.linspace(-0.3, 0.3, 100),
np.linspace(0, 0.2, 100),
]
ylims = [3e5, 3e5, 3e5]
elif jet_type == "t":
pbins = [
np.linspace(-0.5, 0.5, 100),
np.linspace(-0.5, 0.5, 100),
np.linspace(0, 0.2, 100),
]
elif dataset == "jets-lagan":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T^{rel}$"]
pbins = [
np.linspace(-1.25, 1.25, 25 + 1),
np.linspace(-1.25, 1.25, 25 + 1),
np.linspace(0, 1, 51),
]
elif coords == "polarrelabspt":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T (GeV)$"]
pbins = [np.arange(-0.5, 0.5, 0.01), np.arange(-0.5, 0.5, 0.01), np.arange(0, 400, 4)]
if real_mask is not None:
parts_real = real_jets[real_mask]
parts_gen = gen_jets[gen_mask]
else:
parts_real = real_jets.reshape(-1, real_jets.shape[2])
parts_gen = gen_jets.reshape(-1, gen_jets.shape[2])
fig = plt.figure(figsize=(22, 8))
for i in range(3):
fig.add_subplot(1, 3, i + 1)
plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
_ = plt.hist(parts_real[:, i], pbins[i], histtype="step", label="Real", color="red")
_ = plt.hist(parts_gen[:, i], pbins[i], histtype="step", label="Generated", color="blue")
plt.xlabel("Particle " + plabels[i])
plt.ylabel("Number of Particles")
if const_ylim:
plt.ylim(0, ylims[i])
if losses is not None and "w1p" in losses:
plt.title(
f'$W_1$ = {losses["w1p"][-1][i]:.2e} ± {losses["w1p"][-1][i + len(losses["w1p"][-1]) // 2]:.2e}',
fontsize=12,
)
plt.legend(loc=1, prop={"size": 18})
plt.tight_layout(2.0)
if figs_path is not None and name is not None:
plt.savefig(figs_path + name + ".pdf", bbox_inches="tight")
if show:
plt.show()
else:
plt.close()
def plot_part_feats_jet_mass(
jet_type,
real_jets,
gen_jets,
real_mask,
gen_mask,
real_masses,
gen_masses,
num_particles=30,
coords="polarrel",
name=None,
figs_path=None,
dataset="jetnet",
losses=None,
const_ylim=False,
show=False,
):
"""Plot histograms of particle feature + jet mass in one row"""
if coords == "cartesian":
plabels = ["$p_x$ (GeV)", "$p_y$ (GeV)", "$p_z$ (GeV)"]
bin = np.arange(-500, 500, 10)
pbins = [bin, bin, bin]
elif coords == "polarrel":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T^{rel}$"]
if jet_type == "g" or jet_type == "q" or jet_type == "w" or jet_type == "z":
if num_particles == 100:
pbins = [
np.arange(-0.5, 0.5, 0.005),
np.arange(-0.5, 0.5, 0.005),
np.arange(0, 0.1, 0.001),
]
else:
pbins = [
np.linspace(-0.3, 0.3, 100),
np.linspace(-0.3, 0.3, 100),
np.linspace(0, 0.2, 100),
]
elif jet_type == "t":
pbins = [
np.linspace(-0.5, 0.5, 100),
np.linspace(-0.5, 0.5, 100),
np.linspace(0, 0.2, 100),
]
elif coords == "polarrelabspt":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T (GeV)$"]
pbins = [np.arange(-0.5, 0.5, 0.01), np.arange(-0.5, 0.5, 0.01), np.arange(0, 400, 4)]
if jet_type == "g" or jet_type == "q" or jet_type == "t":
mbins = np.linspace(0, 0.225, 51)
else:
mbins = np.linspace(0, 0.12, 51)
if real_mask is not None:
parts_real = real_jets[real_mask]
parts_gen = gen_jets[gen_mask]
else:
parts_real = real_jets.reshape(-1, real_jets.shape[2])
parts_gen = gen_jets.reshape(-1, gen_jets.shape[2])
fig = plt.figure(figsize=(30, 8))
for i in range(3):
fig.add_subplot(1, 4, i + 1)
plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
_ = plt.hist(parts_real[:, i], pbins[i], histtype="step", label="Real", color="red")
_ = plt.hist(parts_gen[:, i], pbins[i], histtype="step", label="Generated", color="blue")
plt.xlabel("Particle " + plabels[i])
plt.ylabel("Number of Particles")
if losses is not None and "w1p" in losses:
plt.title(
f'$W_1$ = {losses["w1p"][-1][i]:.2e} ± {losses["w1p"][-1][i + len(losses["w1p"][-1]) // 2]:.2e}',
fontsize=12,
)
plt.legend(loc=1, prop={"size": 18})
fig.add_subplot(1, 4, 4)
plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
_ = plt.hist(real_masses, bins=mbins, histtype="step", label="Real", color="red")
_ = plt.hist(gen_masses, bins=mbins, histtype="step", label="Generated", color="blue")
plt.xlabel("Jet $m/p_{T}$")
plt.ylabel("Jets")
plt.legend(loc=1, prop={"size": 18})
if losses is not None and "w1m" in losses:
plt.title(f'$W_1$ = {losses["w1m"][-1][0]:.2e} ± {losses["w1m"][-1][1]:.2e}', fontsize=12)
plt.tight_layout(2.0)
if figs_path is not None and name is not None:
plt.savefig(figs_path + name + ".pdf", bbox_inches="tight")
if show:
plt.show()
else:
plt.close()
def plot_jet_feats(
jet_type,
real_masses,
gen_masses,
real_efps,
gen_efps,
name=None,
figs_path=None,
losses=None,
show=False,
):
"""Plot 5 EFPs and jet mass histograms"""
if jet_type == "g":
binranges = [0.0013, 0.0004, 0.0004, 0.0004, 0.0004]
elif jet_type == "q":
binranges = [0.002, 0.001, 0.001, 0.0005, 0.0005]
else:
binranges = [0.0045, 0.0035, 0.004, 0.002, 0.003]
bins = [np.linspace(0, binr, 101) for binr in binranges]
if jet_type == "g" or jet_type == "q" or jet_type == "t":
mbins = np.linspace(0, 0.225, 51)
else:
mbins = np.linspace(0, 0.12, 51)
fig = plt.figure(figsize=(20, 12))
fig.add_subplot(2, 3, 1)
plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
_ = plt.hist(real_masses, bins=mbins, histtype="step", label="Real", color="red")
_ = plt.hist(gen_masses, bins=mbins, histtype="step", label="Generated", color="blue")
plt.xlabel("Jet $m/p_{T}$")
plt.ylabel("Jets")
plt.legend(loc=1, prop={"size": 18})
if losses is not None and "w1m" in losses:
plt.title(f'$W_1$ = {losses["w1m"][-1][0]:.2e} ± {losses["w1m"][-1][1]:.2e}', fontsize=12)
for i in range(5):
fig.add_subplot(2, 3, i + 2)
plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
plt.ticklabel_format(axis="x", scilimits=(0, 0), useMathText=True)
_ = plt.hist(real_efps[:, i], bins[i], histtype="step", label="Real", color="red")
_ = plt.hist(gen_efps[:, i], bins[i], histtype="step", label="Generated", color="blue")
plt.xlabel("EFP " + str(i + 1), x=0.7)
plt.ylabel("Jets")
plt.legend(loc=1, prop={"size": 18})
if losses is not None and "w1efp" in losses:
plt.title(
f'$W_1$ = {losses["w1efp"][-1][i]:.2e} ± {losses["w1efp"][-1][i + len(losses["w1efp"][-1]) // 2]:.2e}',
fontsize=12,
)
plt.tight_layout(pad=0.5)
if figs_path is not None and name is not None:
plt.savefig(figs_path + name + ".pdf", bbox_inches="tight")
if show:
plt.show()
else:
plt.close()
# def plot_jet_mass_pt(realjf, genjf, dataset="jetnet", name=None, figs_path=None, show=False):
# if dataset == "jetnet":
# jlabels = ["Jet Relative Mass", "Jet Relative $p_T$"]
# binsm = np.linspace(0, 0.225, 101)
# binspt = np.linspace(0.5, 1.2, 101)
# elif dataset == "jets-lagan":
# jlabels = ["Jet Mass (GeV)", "Jet $p_T$ (GeV)"]
# binsm = np.linspace(40, 120, 51)
# binspt = np.linspace(220, 340, 51)
#
# fig = plt.figure(figsize=(16, 8))
#
# fig.add_subplot(1, 2, 1)
# plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
# # plt.ticklabel_format(axis='x', scilimits=(0, 0), useMathText=True)
# _ = plt.hist(realjf[:, 0], bins=binsm, histtype="step", label="Real", color="red")
# _ = plt.hist(genjf[:, 0], bins=binsm, histtype="step", label="Generated", color="blue")
# plt.xlabel(jlabels[0])
# plt.ylabel("Jets")
# plt.legend(loc=1, prop={"size": 18})
#
# fig.add_subplot(1, 2, 2)
# plt.ticklabel_format(axis="y", scilimits=(0, 0), useMathText=True)
# plt.ticklabel_format(axis="x", scilimits=(0, 0), useMathText=True)
# _ = plt.hist(realjf[:, 1], bins=binspt, histtype="step", label="Real", color="red")
# _ = plt.hist(genjf[:, 1], bins=binspt, histtype="step", label="Generated", color="blue")
# plt.xlabel(jlabels[1])
# plt.ylabel("Jets")
# plt.legend(loc=1, prop={"size": 18})
#
# plt.tight_layout(pad=2)
# if figs_path is not None and name is not None:
# plt.savefig(figs_path + name + ".pdf", bbox_inches="tight")
#
# if show:
# plt.show()
# else:
# plt.close()
def plot_losses(losses, loss="lg", name=None, losses_path=None, show=False):
"""Plot loss curves"""
plt.figure()
if loss == "og" or loss == "ls":
plt.plot(losses["Dr"], label="Discriminitive real loss")
plt.plot(losses["Df"], label="Discriminitive fake loss")
plt.plot(losses["G"], label="Generative loss")
elif loss == "w":
plt.plot(losses["D"], label="Critic loss")
elif loss == "hinge":
plt.plot(losses["Dr"], label="Discriminitive real loss")
plt.plot(losses["Df"], label="Discriminitive fake loss")
plt.plot(losses["G"], label="Generative loss")
if "gp" in losses:
plt.plot(losses["gp"], label="Gradient penalty")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
if losses_path is not None and name is not None:
plt.savefig(losses_path + name + ".pdf", bbox_inches="tight")
if show:
plt.show()
else:
plt.close()
def plot_eval(
losses, epoch, save_epochs, coords="polarrel", name=None, losses_path=None, show=False
):
"""Evaluation metric plots per epoch"""
if coords == "cartesian":
plabels = ["$p_x$ (GeV)", "$p_y$ (GeV)", "$p_z$ (GeV)"]
elif coords == "polarrel":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T^{rel}$"]
elif coords == "polarrelabspt":
plabels = ["$\eta^{rel}$", "$\phi^{rel}$", "$p_T (GeV)$"]
# jlabels = ['Relative Mass', 'Relative $p_T$', 'EFP']
colors = ["blue", "green", "orange", "red", "yellow"]
x = np.arange(0, epoch + 1, save_epochs)[-len(losses["w1p"]) :]
fig = plt.figure(figsize=(30, 24))
if "w1p" in losses:
for i in range(3):
fig.add_subplot(3, 3, i + 1)
plt.plot(x, np.array(losses["w1p"])[:, i])
plt.xlabel("Epoch")
plt.ylabel("Particle " + plabels[i] + " $W_1$")
plt.yscale("log")
# x = np.arange(0, epoch + 1, args.save_epochs)[-len(losses['w1j_' + str(args.w1_num_samples[0]) + 'm']):]
if "w1m" in losses:
fig.add_subplot(3, 3, 4)
plt.plot(x, np.array(losses["w1m"])[:, 0])
plt.xlabel("Epoch")
plt.ylabel("Jet Relative Mass $W_1$")
plt.yscale("log")
if "w1efp" in losses:
fig.add_subplot(3, 3, 5)
for i in range(5):
plt.plot(x, np.array(losses["w1p"])[:, i], label="EFP " + str(i + 1), color=colors[i])
plt.legend(loc=1)
plt.xlabel("Epoch")
plt.ylabel("Jet EFPs $W_1$")
plt.yscale("log")
if "mmd" in losses and "coverage" in losses:
# x = x[-len(losses['mmd']):]
metrics = {"mmd": (1, "MMD"), "coverage": (2, "Coverage")}
for key, (i, label) in metrics.items():
fig.add_subplot(3, 3, 6 + i)
plt.plot(x, np.array(losses[key]))
plt.xlabel("Epoch")
plt.ylabel(label)
if key == "mmd":
plt.yscale("log")
if "fpnd" in losses:
fig.add_subplot(3, 3, 9)
plt.plot(x, np.array(losses["fpnd"]))
plt.xlabel("Epoch")
plt.ylabel("FPND")
plt.yscale("log")
if losses_path is not None and name is not None:
plt.savefig(losses_path + name + ".pdf", bbox_inches="tight")
if show:
plt.show()
else:
plt.close()
| {
"alphanum_fraction": 0.5271223022,
"author": null,
"avg_line_length": 35.0125944584,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "88e38aa829e442f8ca2442e6d77c8e94275a9d82",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8be23da97ece3d173e1a59defc45aa7a1112232d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rkansal47/MPGAN",
"max_forks_repo_path": "plotting.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "8be23da97ece3d173e1a59defc45aa7a1112232d",
"max_issues_repo_issues_event_max_datetime": "2021-12-08T22:44:57.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-02T11:38:31.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rkansal47/MPGAN",
"max_issues_repo_path": "plotting.py",
"max_line_length": 119,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "8be23da97ece3d173e1a59defc45aa7a1112232d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rkansal47/MPGAN",
"max_stars_repo_path": "plotting.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-28T00:09:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-29T11:52:45.000Z",
"num_tokens": 4381,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13900
} |
# import numpy as np
# r= [1.0,1.0,1.0,-1.0,1.0,1.0]
# gamma = 0.5
#
# r = np.array(r)
# discounted_r = np.zeros_like(r)
# running_add = 0
# # we go from last reward to first one so we don't have to do exponentiations
# for t in reversed(range(0, r.size)):
# if r[t] != 0:
# running_add = 0 # if the game ended (in Pong), reset the reward sum
# running_add = running_add * gamma + r[t] # the point here is to use Horner's method to compute those rewards efficiently
# discounted_r[t] = running_add
# discounted_r -= np.mean(discounted_r) #normalizing the result
# discounted_r /= np.std(discounted_r) #idem
# print (discounted_r)
#
# print ('{0:02b}'.format(0))
from gym import envs
envids = [spec.id for spec in envs.registry.all()]
for envid in sorted(envids):
print(envid)
import gym
# initializing our environment
env = gym.make('BipedalWalker-v2')
nb_actions = env.action_space.n
# beginning of an episode
observation = env.reset()
| {
"alphanum_fraction": 0.6845915202,
"author": null,
"avg_line_length": 27.6285714286,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7db35a55817528e506a33793c2343caa2f6f0d96",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "aec01e9c2d28eda6019fe8bb94804a78f2d7fbc0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "amousist/cartpole",
"max_forks_repo_path": "tests.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "aec01e9c2d28eda6019fe8bb94804a78f2d7fbc0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "amousist/cartpole",
"max_issues_repo_path": "tests.py",
"max_line_length": 126,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "aec01e9c2d28eda6019fe8bb94804a78f2d7fbc0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "amousist/cartpole",
"max_stars_repo_path": "tests.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 297,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 967
} |
[STATEMENT]
lemma l2_inv2_init [iff]:
"init l2 \<subseteq> l2_inv2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. init l2 \<subseteq> l2_inv2
[PROOF STEP]
by (auto simp add: l2_def l2_init_def l2_inv2_def) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Key_Agreement_Strong_Adversaries_pfslvl2",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 99,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 14:17:51 2018
def backward_calc(self,ic,num_compartments,target_conc = 1,target_emiss = 1):
@author: Tim Rodgers
"""
import numpy as np
import pandas as pd
num_compartments = 7
target_emiss = 1
target_conc = 3
#Initialize outputs
col_name = pd.Series(index = range(num_compartments))
for i in range(num_compartments):
col_name[i] = 'f'+str(i+1) #Fugacity for every compartment
#Emissions for the target_emiss compartment
col_name[num_compartments+1] = 'emiss_'+str(target_emiss)
bw_out = pd.DataFrame(index = ic['Compound'],columns = col_name)
#Define target name and check if there is a value for it in the ic dataframe. If not, abort
targ_name = 'targ_' + str(target_conc)
#initialize a matrix of (numc - 1) x numc compartments. This is not necessarily a determined system
D_mat = pd.DataFrame(index = range(num_compartments-1),columns = range(num_compartments))
#initialize a blank dataframe for input vectors, RHS of matrix.
inp_val = pd.DataFrame(index = range(num_compartments-1),columns = ic.Compound)
#Loop over the chemicals, solving for each.
for chem in ic.index: #Index of chemical i starting at 0
#Put the target fugacity into the output
bw_out.iloc[chem,target_conc] = ic.loc[chem,targ_name]
#Double loop to set matrix values
j = 0 #Index to pull values from ic
jj = 0 #Index to fill matrix
while j < num_compartments: #compartment j, index of D_mat
#Skip the target_conc row as we know f(T)
if (j+1) == target_conc:
j += 1
#Define RHS = -Inp(j) - D(Tj)*f(T) for every compartment j using target T
D_val = 'D_' +str(target_conc)+str(j+1) #label compartments from 1
inp_name = 'inp_' + str(j + 1) #must have an input for every compartment, even if it is zero
if D_val in ic.columns: #check if there is a D(Tj) value
if j+1 == target_emiss: #Set -Inp(j) to zero for the targ_emiss row, we will subtract GCb(target_emiss) later
inp_val.iloc[jj,chem] = -ic.loc[chem,D_val] * bw_out.iloc[chem,target_conc]
else:
inp_val.iloc[jj,chem] = -ic.loc[chem,inp_name] - ic.loc[chem,D_val]*bw_out.iloc[chem,target_conc]
else: #If there is no D(Tj) then RHS = -Inp(j), unless it is the target_emiss column again
if j+1 == target_emiss:
inp_val.iloc[jj,chem] = 0
else:
inp_val.iloc[jj,chem] = i-ic.loc[chem,inp_name]
#Set D values across each row
k = 0 #Compartment index
kk = 0 #Index to fill matrix
while k < num_compartments: #compartment k, column of D_mat
if (k+1) == target_conc:
k += 1
if j == k:
DT = 'DT' + str(j + 1)
D_mat.iloc[jj,kk] = -ic.loc[chem,DT]
else:
D_val = 'D_' +str(k+1)+str(j+1) #label compartments from 1
if D_val in ic.columns: #Check if there is transfer between the two compartments
D_mat.iloc[jj,kk] = ic.loc[chem,D_val]
else:
D_mat.iloc[jj,kk] = 0 #If no transfer, set to 0
if k+1 == num_compartments: #Final column is the input to the target_emiss compartment
if (j+1) == target_emiss: #This is 1 for the target_emiss column and 0 everywhere else
D_mat.iloc[jj,kk+1] = 1
else:
D_mat.iloc[jj,kk+1] = 0
k +=1
kk += 1
j += 1
jj += 1
#Solve for fugsinp = D_mat\inp_val, the last value in fugs is the total inputs
lhs = np.array(D_mat,dtype = float)
rhs = np.array(inp_val.iloc[:,chem],dtype = float)
fugsinp = np.linalg.lstsq(lhs,rhs,rcond = None)
bw_out.iloc[chem,:] = fugsinp
| {
"alphanum_fraction": 0.6207799005,
"author": null,
"avg_line_length": 47.1728395062,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "808f60e0521f71399bf1f99d8442640fb335b8b4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cf2f4c773b2b992fcf55aaf1ffc22463335fec85",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tfmrodge/ppLFERMUM",
"max_forks_repo_path": "ppLFER_MUM/back_calc.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cf2f4c773b2b992fcf55aaf1ffc22463335fec85",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tfmrodge/ppLFERMUM",
"max_issues_repo_path": "ppLFER_MUM/back_calc.py",
"max_line_length": 121,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cf2f4c773b2b992fcf55aaf1ffc22463335fec85",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tfmrodge/ppLFERMUM",
"max_stars_repo_path": "ppLFER_MUM/back_calc.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1096,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3821
} |
C
C $Id: pcsetc.f,v 1.15 2008-07-27 00:17:20 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE PCSETC (WHCH,CVAL)
C
CHARACTER*(*) WHCH,CVAL
C
C The subroutine PCSETC may be used to set PLCHHQ parameters which have
C values of type CHARACTER.
C
C COMMON block declarations.
C
COMMON /PCPRMS/ ADDS,CONS,DSTB,DSTL,DSTR,DSTT,HPIC(3),IBNU,
+ IBXC(3),IBXF,ICEN,IORD,IOUC,IOUF,IPCC,IQUF,
+ ISHC,ISHF,ITEF,JCOD,LSCI(16),NFCC,NODF,RBXL,
+ RBXM,RBXX,RBXY,ROLW,RPLW,RSLW,SHDX,SHDY,SIZA,
+ SSIC,SSPR,SUBS,VPIC(3),WPIC(3),XBEG,XCEN,XEND,
+ XMUL(3),YBEG,YCEN,YEND,YMUL(3),ZINX,ZINY,ZINZ
SAVE /PCPRMS/
C
COMMON /PCFNNO/ LFNO(43),LFNL
SAVE /PCFNNO/
C
COMMON /PCFNNM/ LFNM(43,2)
CHARACTER*18 LFNM
SAVE /PCFNNM/
C
C Do a call forcing a BLOCKDATA to be loaded from a binary library.
C
CALL PCBLDA
C
C Check for an uncleared prior error.
C
IF (ICFELL('PCSETC - UNCLEARED PRIOR ERROR',1).NE.0) RETURN
C
C There are two possibilities: In the case of the function-code
C character, what is really stored is the index of the character
C in the local collating sequence. If the default font name is
C being specified, what is really set is the font number.
C
IF (WHCH(1:2).EQ.'FC'.OR.WHCH(1:2).EQ.'fc') THEN
NFCC=ICHAR(CVAL(1:1))
ELSE IF (WHCH(1:2).EQ.'FN'.OR.WHCH(1:2).EQ.'fn') THEN
DO 101 I=1,LFNL
IF (CVAL.EQ.LFNM(I,1).OR.CVAL.EQ.LFNM(I,2)) THEN
NODF=LFNO(I)
GO TO 102
END IF
101 CONTINUE
CALL SETER ('PCSETC - UNRECOGNIZED FONT NAME',2,1)
RETURN
ELSE
CALL SETER ('PCSETC - UNRECOGNIZED PARAMETER NAME',3,1)
RETURN
END IF
C
C Done.
C
102 RETURN
C
END
| {
"alphanum_fraction": 0.593539055,
"author": null,
"avg_line_length": 30.5,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "d26ae5780cd6628da20d0d7e63cd69576cabe915",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 58,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z",
"max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "tenomoto/ncl",
"max_forks_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetc.f",
"max_issues_count": 156,
"max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "tenomoto/ncl",
"max_issues_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetc.f",
"max_line_length": 71,
"max_stars_count": 210,
"max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "tenomoto/ncl",
"max_stars_repo_path": "ncarg2d/src/libncarg/plotchar/pcsetc.f",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z",
"num_tokens": 716,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2074
} |
[STATEMENT]
lemma unit_counit_G:
assumes "C.ide x"
shows "G x = G (\<epsilon>o x) \<cdot>\<^sub>D \<eta>o (G x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. G x = G (\<epsilon>o x) \<cdot>\<^sub>D \<eta>o (G x)
[PROOF STEP]
using assms \<phi>_in_terms_of_\<eta>o \<epsilon>o_def \<phi>_\<psi> \<epsilon>o_in_hom G.preserves_ide D.ide_in_hom
[PROOF STATE]
proof (prove)
using this:
C.ide x
\<lbrakk>D.ide ?y; \<guillemotleft>?f : F ?y \<rightarrow>\<^sub>C ?x\<guillemotright>\<rbrakk> \<Longrightarrow> \<phi> ?y ?f = G ?f \<cdot>\<^sub>D \<eta>o ?y
\<epsilon>o ?x = \<psi> ?x (G ?x)
\<lbrakk>C.ide ?x; \<guillemotleft>?g : ?y \<rightarrow>\<^sub>D G ?x\<guillemotright>\<rbrakk> \<Longrightarrow> \<phi> ?y (\<psi> ?x ?g) = ?g
C.ide ?x \<Longrightarrow> \<guillemotleft>\<epsilon>o ?x : F (G ?x) \<rightarrow>\<^sub>C ?x\<guillemotright>
C.ide ?a \<Longrightarrow> D.ide (G ?a)
D.ide ?a = \<guillemotleft>?a : ?a \<rightarrow>\<^sub>D ?a\<guillemotright>
goal (1 subgoal):
1. G x = G (\<epsilon>o x) \<cdot>\<^sub>D \<eta>o (G x)
[PROOF STEP]
by metis | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Category3_Adjunction",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 467,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
"""Embed text of list of tesxts."""
from typing import List, Union
import more_itertools as mit
# import numpy as np
from logzero import logger
from alive_progress import alive_bar
from fetch_embed import fetch_embed
# fmt: off
def embed_text(
text: Union[str, List[str]],
chunk_size: int = 32,
livepbar: bool = True, # need to turn it off in pytest
) -> List[float]:
"""Embed text or list of texts.
Args:
text: strings or list of string for embedding
chunk_size: default 32
livepbar: default True, shows progress bar
Returns:
embedding of 512-dimensional vectors
"""
# fmt: on
if isinstance(text, str):
text = [text]
tot, rem = divmod(len(text), chunk_size)
res = []
def func_():
try:
_ = fetch_embed(item, livepbar=False)
except Exception as exc:
logger.error(exc)
raise
res.extend(_)
if livepbar:
with alive_bar(tot + bool(rem)) as pbar:
for item in mit.chunked(text, chunk_size):
func_()
pbar()
else:
for item in mit.chunked(text, chunk_size):
func_()
return res
| {
"alphanum_fraction": 0.5933884298,
"author": null,
"avg_line_length": 22.8301886792,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e5994a27a54c126ed3730f0f37ef4afb71dfa2d4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ffreemt/tinybee-aligner",
"max_forks_repo_path": "tinybee/embed_text.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ffreemt/tinybee-aligner",
"max_issues_repo_path": "tinybee/embed_text.py",
"max_line_length": 59,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ffreemt/tinybee-aligner",
"max_stars_repo_path": "tinybee/embed_text.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 295,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1210
} |
import os, sys, time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import sys
import os
import pickle
import logging
import coloredlogs
import numpy as np
from codetiming import Timer
from datetime import datetime
import inspect
import json
from ast import literal_eval
import matplotlib.ticker as mtick
from matplotlib.lines import Line2D
import math
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
from src.utils.cv import setup_params
from src.utils.settings import config
from src.models.icestupaClass import Icestupa
from src.models.methods.metadata import get_parameter_metadata
def truncate(number, digits) -> float:
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
if __name__ == "__main__":
# Main logger
logger = logging.getLogger(__name__)
coloredlogs.install(
fmt="%(funcName)s %(levelname)s %(message)s",
level=logging.ERROR,
logger=logger,
)
locations = ["gangles21", "guttannen21"]
# locations = ["guttannen21"]
# location = "guttannen21"
# params = ['Z', 'SA_corr', 'DX']
params = ['SA_corr', 'DX']
kind = ['volume', 'area']
# kind = ['volume']
sns.set(style="whitegrid")
fig, ax = plt.subplots(
nrows=len(kind), ncols=len(params), sharey="row", figsize=(18, 8)
)
for obj in kind:
# Creating an empty Dataframe with column names only
dfx = pd.DataFrame(columns=params)
for ctr, location in enumerate(locations):
icestupa = Icestupa(location)
CONSTANTS, SITE, FOLDER = config(location)
icestupa.read_output()
file_path = 'loo-cv-'+obj+'-'
# file_path = 'cv-'+obj+'-'
file_path += '-'.join('{}'.format(key) for key in params)
df = pd.read_csv(FOLDER['sim'] + file_path)
df = df.set_index('rmse').sort_index().reset_index()
df['params'] = df['params'].apply(literal_eval)
num_selected = int(0.1 * df.shape[0])
num_total = df.shape[0]
print()
print("\tObjective %s Site %s" % (obj, location))
print("\tSelected %s out of %s" % (num_selected, num_total))
print("\tRMSE %s upto %s" % (df.rmse[0], df.rmse[num_selected]))
df = df[:num_selected]
df = pd.concat([df.drop(['params'], axis=1), df['params'].apply(pd.Series)], axis=1)
# df = df.round(4)
#df = df.loc[df.DX==0.02]
# df = df.loc[df.SA_corr >= 1.2]
print()
for col in params:
print("\t%s from %s upto %s with percentage %s" % (col, df[col].min(), df[col].max(),
df[col].value_counts(normalize=True)))
df['AIR'] = get_parameter_metadata(location)['shortname']
# df[['Z', 'DX']] *= 1000
df[['DX']] *= 1000
# df['Z'] = pd.to_numeric(df['Z'], downcast='integer')
# df['DX'] = pd.to_numeric(df['DX'], downcast='integer')
dfx = dfx.append(df, ignore_index = True)
print(dfx.head())
print(dfx.tail())
df_fill = []
for k, param_name in enumerate(params):
ylim = get_parameter_metadata(param_name)['ylim']
step = get_parameter_metadata(param_name)['step']
bars = np.arange(ylim[0],ylim[1] + step,step)
for l in bars:
if k == 0:
df_fill.append([l,10, 1, 'IN21'])
if k == 1:
df_fill.append([1,l*1000, 1, 'IN21'])
df_fill = pd.DataFrame(df_fill, columns=['SA_corr', 'DX', 'rmse', 'AIR'])
# print(df_fill)
# bars = str(bars)
# heights = np.ones(len(bars))
# print(bars, heights)
# ax[j,i].bar(bars, heights)
for i,param_name in enumerate(params):
ylim = get_parameter_metadata(param_name)['ylim']
step = get_parameter_metadata(param_name)['step']
bars = np.arange(ylim[0],ylim[1] + step,step)
if param_name == 'DX':
bars = [num*1000 for num in bars]
bars = [round(num, 0) for num in bars]
if param_name == 'SA_corr':
bars = [round(num, 1) for num in bars]
if obj == 'volume':
j=0
sns.countplot( x=param_name, hue ='AIR', palette="Set1", data=dfx,
ax=ax[j,i], order= bars)
if i == 0:
ax[j,i].set_ylabel('Volume Objective')
else:
ax[j,i].set_ylabel('')
ax[j,i].set_xlabel('')
if i != len(params) - 1:
ax[j,i].get_legend().remove()
if obj == 'area':
j=1
sns.countplot( x=param_name, hue ='AIR', palette="Set1", data=dfx,
ax=ax[j,i], order= bars)
if i == 0:
ax[j,i].set_ylabel('Area Objective')
else:
ax[j,i].set_ylabel('')
v = get_parameter_metadata(param_name)
label = v['latex'] + v['units']
ax[j,i].set_xlabel(label)
ax[j,i].get_legend().remove()
ax[j,i].yaxis.set_major_formatter(mtick.PercentFormatter(num_selected))
plt.savefig(
"data/paper1/param_hist.jpg",
dpi=300,
bbox_inches="tight",
)
| {
"alphanum_fraction": 0.5387814138,
"author": null,
"avg_line_length": 34.3540372671,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "76d11a4a3a34b53ed75fbb0b006f37a519af2ebe",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fb6fb02b866a821ee8a83551be78c25bdb7da7c9",
"max_forks_repo_licenses": [
"FTL"
],
"max_forks_repo_name": "gayashiva/air_model",
"max_forks_repo_path": "src/plots/sensitivity-hist.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "fb6fb02b866a821ee8a83551be78c25bdb7da7c9",
"max_issues_repo_issues_event_max_datetime": "2022-01-31T15:59:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-31T15:59:08.000Z",
"max_issues_repo_licenses": [
"FTL"
],
"max_issues_repo_name": "Gayashiva/AIR_model",
"max_issues_repo_path": "src/plots/sensitivity-hist.py",
"max_line_length": 101,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "fb6fb02b866a821ee8a83551be78c25bdb7da7c9",
"max_stars_repo_licenses": [
"FTL"
],
"max_stars_repo_name": "gayashiva/air_model",
"max_stars_repo_path": "src/plots/sensitivity-hist.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-12T10:48:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-23T18:12:40.000Z",
"num_tokens": 1369,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5531
} |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Description
# ==============================================================================
#
# Functions to process and print the table title.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
_print_title(buf::IO, title_tokens::Vector{String}, has_color::Bool, title_crayon::Crayon)
Print the table title to the buffer `buf`.
"""
function _print_title(
buf::IO,
title_tokens::Vector{String},
# Configurations
has_color::Bool,
title_crayon::Crayon
)
num_tokens = length(title_tokens)
num_tokens == 0 && return nothing
has_color && print(buf, title_crayon)
@inbounds for i = 1:num_tokens
print(buf, rstrip(title_tokens[i]))
# In the last line we must not add the new line character
# because we need to reset the crayon first if the display
# supports colors.
i != num_tokens && println(buf)
end
has_color && print(buf, _reset_crayon)
println(buf)
return nothing
end
"""
_tokenize_title(title::AbstractString, display_width::Int, table_width::Int, title_alignment::Symbol, title_autowrap::Bool, title_same_width_as_table::Bool)
Split the table title into tokens considering the line break character.
"""
function _tokenize_title(
title::AbstractString,
display_width::Int,
table_width::Int,
# Configurations
title_alignment::Symbol,
title_autowrap::Bool,
title_same_width_as_table::Bool
)
# Process the title separating the tokens.
title_tokens = String[]
if length(title) > 0
# Compute the title width.
title_width = title_same_width_as_table ? table_width : display_width
# If the title width is not higher than 0, then we should only print the
# title.
if title_width ≤ 0
push!(title_tokens, title)
# Otherwise, we must check for the alignments.
else
title_tokens_raw = string.(split(title, '\n'))
title_autowrap && (title_tokens_raw = _str_autowrap(title_tokens_raw, title_width))
num_tokens = length(title_tokens_raw)
@inbounds for i = 1:num_tokens
token = title_tokens_raw[i]
token_str = _str_aligned(token, title_alignment, title_width)
push!(title_tokens, token_str)
end
end
end
return title_tokens
end
| {
"alphanum_fraction": 0.5943661972,
"author": null,
"avg_line_length": 29.2352941176,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "6127a74af987ee26b9e449fd2a8ba6118b3a39ed",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "waldyrious/PrettyTables.jl",
"max_forks_repo_path": "src/backends/text/title.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "waldyrious/PrettyTables.jl",
"max_issues_repo_path": "src/backends/text/title.jl",
"max_line_length": 160,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "waldyrious/PrettyTables.jl",
"max_stars_repo_path": "src/backends/text/title.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 617,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2485
} |
import numpy as np
import sys
import os
#import faiss
import time
from tsnecuda import TSNE
from sklearn.datasets import make_classification
num_points = int(sys.argv[1])
num_dims = int(sys.argv[2])
sep = int(sys.argv[3])
k = int(sys.argv[4])
option = int(sys.argv[5])
reorder = int(sys.argv[6])
num_clusters = int(sys.argv[7])
x, y = make_classification(n_samples=num_points, n_features=num_dims,
n_redundant=int(num_dims/2), n_informative=int(num_dims/2), class_sep=sep,
n_clusters_per_class=1, scale=10.0,
n_classes=4,shuffle=True,random_state=42)
x = x.astype('float32')
if option == 0:
np.savetxt(str(num_points) + ".data", x, delimiter=" ", fmt='%f')
elif option == 1:
X_emb = TSNE(reorder=reorder,verbose=True).fit_transform(x)
else:
print("Inside faiss branch")
start = time.perf_counter()
res = faiss.StandardGpuResources()
index_flat = faiss.index_factory(num_dims, "IVF4096,PQ64")
#gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)
co = faiss.GpuClonerOptions()
gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat, co)
#index.train(x)
print("train")
start_train = time.perf_counter()
gpu_index_flat.train(x)
print("Train time: " + str(time.perf_counter()-start_train) + "s")
print("add index")
start_add = time.perf_counter()
gpu_index_flat.add(x)
print("Add time: " + str(time.perf_counter()-start_add) + "s")
#gpu_index_flat.search(x[:5], 123)
#nlist=20
#quantizer = faiss.IndexFlatL2(num_dims)
#index = faiss.IndexIVFFlat(quantizer, num_dims, nlist)
#index.nprobe = 20
#index.train(x)
#index.add(x)
#print(index.is_trained)
#print(index.ntotal)
print("Starting search:")
start_search = time.perf_counter()
D, I = gpu_index_flat.search(x, k)
print("Search time: " + str(time.perf_counter()-start_search) + "s")
print (type(I))
#print (D)
print("Total Elapsed time: " + str(time.perf_counter()-start) + "s")
#print (I)
np.savetxt("faiss_res.txt", I, delimiter=" ")
| {
"alphanum_fraction": 0.6937965261,
"author": null,
"avg_line_length": 23.9880952381,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "469d3af2d81c89aaac7d7452028b6a2c56171b50",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3a21a1bcd05fc26aa6d487d6df92879ad6a1fd11",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "dogadikbayir/tsne-cuda",
"max_forks_repo_path": "gen_syn_pts.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3a21a1bcd05fc26aa6d487d6df92879ad6a1fd11",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "dogadikbayir/tsne-cuda",
"max_issues_repo_path": "gen_syn_pts.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3a21a1bcd05fc26aa6d487d6df92879ad6a1fd11",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "dogadikbayir/tsne-cuda",
"max_stars_repo_path": "gen_syn_pts.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 612,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2015
} |
# test neutral_models
context("neutral model functions")
test_that("calculating expected K for a population in a WFIA model works", {
n <- 2000
t <- 20
observed <- expected_k_pop(n,t)
expected <- 81.2528
expect_equal(expected, observed, tolerance=0.1)
}) | {
"alphanum_fraction": 0.6996336996,
"author": null,
"avg_line_length": 19.5,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "003e9d50af972fced25910db02398f9c00ee2e36",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1a7ac5431d87628a32fee0453a7d2a289dd5975b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "mmadsen/mmadsenr",
"max_forks_repo_path": "tests/testthat/test_neutral_models.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1a7ac5431d87628a32fee0453a7d2a289dd5975b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "mmadsen/mmadsenr",
"max_issues_repo_path": "tests/testthat/test_neutral_models.r",
"max_line_length": 76,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1a7ac5431d87628a32fee0453a7d2a289dd5975b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "mmadsen/mmadsenr",
"max_stars_repo_path": "tests/testthat/test_neutral_models.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 74,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 273
} |
[STATEMENT]
lemma ivp_solsD:
assumes "X \<in> Sols f U S t\<^sub>0 s"
shows "D X = (\<lambda>t. f t (X t)) on U s" and "X t\<^sub>0 = s"
and "X \<in> U s \<rightarrow> S" and "t\<^sub>0 \<in> U s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ( D X = (\<lambda>t. f t (X t)) on U s &&& X t\<^sub>0 = s) &&& X \<in> U s \<rightarrow> S &&& t\<^sub>0 \<in> U s
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
X \<in> Sols f U S t\<^sub>0 s
goal (1 subgoal):
1. ( D X = (\<lambda>t. f t (X t)) on U s &&& X t\<^sub>0 = s) &&& X \<in> U s \<rightarrow> S &&& t\<^sub>0 \<in> U s
[PROOF STEP]
unfolding ivp_sols_def
[PROOF STATE]
proof (prove)
using this:
X \<in> {X \<in> U s \<rightarrow> S. D X = (\<lambda>t. f t (X t)) on U s \<and> X t\<^sub>0 = s \<and> t\<^sub>0 \<in> U s}
goal (1 subgoal):
1. ( D X = (\<lambda>t. f t (X t)) on U s &&& X t\<^sub>0 = s) &&& X \<in> U s \<rightarrow> S &&& t\<^sub>0 \<in> U s
[PROOF STEP]
by auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Hybrid_Systems_VCs_HS_ODEs",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 454,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#!/usr/bin/python
from ctwc__common import *
import numpy as np
INITIALIZED = False
PLOT_RAW_FILE = RESULTS_PATH+'plot_raw-{0}.pklz'
PLOT_MAT_RAW_FILE = RESULTS_PATH+'plot_raw_mat-{0}.npz'
PLOT_PNG_FILE = RESULTS_PATH+'plot-{0}.png'
SHOW_ON_SCREEN = False
def plot_mat(mat, xlabel=None, ylabel=None, header=None):
sanitized_hdr = make_camel_from_string(header)
save_to_file((xlabel, ylabel, header), PLOT_RAW_FILE.format(sanitized_hdr), mat, PLOT_MAT_RAW_FILE.format(sanitized_hdr))
__plot_mat(mat, xlabel, ylabel, header)
def __plot_mat(mat, xlabel, ylabel, header):
if not INITIALIZED:
return
import matplotlib.pyplot as plt
plt.matshow(mat)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if header is not None:
plt.title(header)
plt.locator_params(axis='x', nbins=4)
plt.locator_params(axis='y', nbins=10)
plt.xticks(rotation=45)
plt.savefig(PLOT_PNG_FILE.format(make_camel_from_string(header)), dpi=1000)
if SHOW_ON_SCREEN:
plt.draw()
plt.pause(0.001)
def show_plots():
if SHOW_ON_SCREEN:
plt.show()
raw_input("Press [Enter] to close all plots...")
def init():
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ion()
plt.set_cmap('hot')
globals()['INITIALIZED'] = True
except Exception:
globals()['INITIALIZED'] = False
def wait_for_user():
if INITIALIZED:
raw_input("Press Enter to continue...")
def plot_from_file(filename, mat_filename):
pack = load_from_file(filename, True, mat_filename)
if pack is None:
return
metadata, mat = pack
xlabel, ylabel, header = metadata
__plot_mat(mat, xlabel, ylabel, header)
def test():
init()
mat = np.random.rand(1200,18000)
plot_mat(mat, str(18000), str(1200), 'large rectangular matrix')
mat2 = np.random.rand(300, 300)
plot_mat(mat2, header="header")
show_plots()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
init()
if sys.argv[1] == "all":
import glob
for fn in glob.glob(PLOT_RAW_FILE.format("*")):
tmplt = PLOT_RAW_FILE.format("@")
desc = fn[len(templ.split("@")[0]):-len(tmplt.split("@")[1])]
INFO(desc)
plot_from_file(PLOT_RAW_FILE.format(desc), PLOT_MAT_RAW_FILE.format(desc))
else:
for desc in sys.argv[1:]:
plot_from_file(PLOT_RAW_FILE.format(desc), PLOT_MAT_RAW_FILE.format(desc))
wait_for_user()
exit()
else:
test()
| {
"alphanum_fraction": 0.633432172,
"author": null,
"avg_line_length": 28.4,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cc089f3db48e2b0f6b64f0e3ee50295f47901682",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0c239919fb63f55d1c169fa6be64ef131ef0a6a6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zbarzik/ctwc_16s",
"max_forks_repo_path": "ctwc__plot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0c239919fb63f55d1c169fa6be64ef131ef0a6a6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zbarzik/ctwc_16s",
"max_issues_repo_path": "ctwc__plot.py",
"max_line_length": 125,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0c239919fb63f55d1c169fa6be64ef131ef0a6a6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zbarzik/ctwc_16s",
"max_stars_repo_path": "ctwc__plot.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 682,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2698
} |
import numpy as np
from sklearn.manifold import Isomap
from vx.com.py.projection.Projection import *
class LDA(Projection):
def __init__(self, X=None, p=2):
super().__init__(X,p)
def execute(self):
#X = self.X
X = np.array(self.X)
X2 = Isomap(n_components=self.p).fit_transform(X)
return X2.tolist();
| {
"alphanum_fraction": 0.6187845304,
"author": null,
"avg_line_length": 21.2941176471,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "180db58b1752e66628b305b954db390b71238b1e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "704d386734449c8cfe1ee34b744e5dcb17e894c3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TrustPaul/feature_importance",
"max_forks_repo_path": "sourcecode/src/vx/com/py/projection/LDA.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "704d386734449c8cfe1ee34b744e5dcb17e894c3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TrustPaul/feature_importance",
"max_issues_repo_path": "sourcecode/src/vx/com/py/projection/LDA.py",
"max_line_length": 57,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "704d386734449c8cfe1ee34b744e5dcb17e894c3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TrustPaul/feature_importance",
"max_stars_repo_path": "sourcecode/src/vx/com/py/projection/LDA.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 94,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 362
} |
# Gross-Nevue ground state using DMRG
include("Schollwock.jl")
using DelimitedFiles
using Plots
using LsqFit
using DataFrames
using CSV
function Hss(m₀::Float64,g₀::Float64,r::Float64,n::Int;ϵ=0.0)
a = 1.0/(n+1)
a⁺ = [0.0 0.0;
1.0 0.0]
a⁻ = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0. -1.0]
# MagneticField = ϵ*randn(Float64)*Diagonal([1.5, 0.5, -0.5, -1.5])
# return im*(m₀-r/a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))+g₀^2/(2*a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))^2 + m₀*MagneticField
return im*(-m₀-r/a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))+g₀^2/(2*a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))^2
end
function Hnn(r::Float64,n::Int)
a = 1.0/(n+1)
a⁺ = [0.0 0.0;
1.0 0.0]
a⁻ = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0. -1.0]
II = [1. 0.;
0. 1.]
# return im/(2*a)*(-kron(II,a⁻,Z,a⁻)+kron(a⁻,Z,a⁻,II)+kron(II,a⁺,Z,a⁺)-kron(a⁺,Z,a⁺,II)+r*(kron(II,a⁻,a⁺,II)-kron(II,a⁺,a⁻,II)+kron(a⁺,Z,Z,a⁻)-kron(a⁻,Z,Z,a⁺)))
return im/(2*a)*(kron(II,a⁻,Z,a⁺)-kron(a⁻,Z,a⁺,II)-kron(II,a⁺,Z,a⁻)+kron(a⁺,Z,a⁻,II)+r*(kron(II,a⁻,a⁺,II)-kron(II,a⁺,a⁻,II)+kron(a⁺,Z,Z,a⁻)-kron(a⁻,Z,Z,a⁺)))
end
#Making a Plot#
# function InnerOfAdjoiningMPS(Le::Int,n::Int,BD::Int,r::Float64,m0::Float64,g0::Float64) #Le: length of the smaller system; n: Number of differing sites; BD: Starting Bond dimension
# mps_long = DMRG(NNHandSSHtoMPO(Hnn(r,Le+n),Hss(m0,g0,r,Le+n),Le+n),Le+n,D=BD,σ=4,ϵ=1.0e-8,maxiteration=30)
# mps_short_temp = DMRG(NNHandSSHtoMPO(Hnn(r,Le),Hss(m0,g0,r,Le),Le),Le,D=BD,σ=4,ϵ=1.0e-8,maxiteration=30)
# mps_cut = CutEntanglement(mps_long,Le+1,after=false)
# tensors = mps_short_temp.tensors
# for i in Le+1:Le+n
# tensors[i] = mps_cut.tensors[i]
# end
# mps_short = MPS2(tensors, Le+n,mps_short_temp.BD,Le,4)
# norm_of_MPS = sqrt(abs(inner(mps_short,mps_short)))
# mps_short.tensors[Le] = mps_short.tensors[Le]/norm_of_MPS
# return (mps_long.BD,abs(inner(mps_short,mps_long)))
# end
function AbsInnerProductOfMismatchingMPS(M1::MPS2,M2::MPS2)
M1.length == M2.length && return inner(M1,M2)
M1.length < M2.length ? (SM,BM) = (M1,M2) : (SM,BM) = (M2,M1)
l1 = SM.length
l2 = BM.length
M_cut = CutEntanglement(BM,l1+1,after=false)
tensors = SM.tensors
for l in l1+1:l2
tensors[l] = M_cut.tensors[l]
end
temp_MPS = MPS2(tensors)
norm_of_MPS = sqrt(abs(inner(temp_MPS,temp_MPS)))
temp_MPS.tensors[l2] = temp_MPS.tensors[l2]/norm_of_MPS
return abs(inner(temp_MPS,BM))
end
function UniformInnerProductOfMPS(M1::MPS2,M2::MPS2)
M1.length == M2.length && return inner(M1,M2)
M1.length < M2.length ? (SM,BM) = (M1,M2) : (SM,BM) = (M2,M1)
l1 = SM.length
l2 = BM.length
σ = SM.σ
tensors = SM.tensors
for l in l1+1:l2
tensors[l] = ones(ComplexF64,σ,1,1)
end
temp_MPS = normalize(MPS2(tensors))
return abs(inner(temp_MPS,BM))
end
function AddRandomSite(M::MPS2)
σ = M.σ
l = M.length
tensors = M.tensors
T = typeof(tensors[1][1])
temp_tensor = rand(T,(σ,1,1))
tensors[l+1] = temp_tensor/sum(abs2.(temp_tensor))
Mt = MPS2(tensors,l+1,M.BD,M.CP,M.σ)
norm_of_MPS = sqrt(abs(inner(Mt,Mt)))
Mt.tensors[l+1] = Mt.tensors[l+1]/norm_of_MPS
return Mt
end
function main1(args)
l0 = parse(Int,args[1+1])
l = parse(Int,args[2+1])
r = parse(Float64,args[3+1])
m0 = parse(Float64,args[4+1])
g0 = parse(Float64,args[5+1])
n = parse(Int,args[6+1])
ϵ0 = parse(Float64,args[7+1])
D = parse(Int,args[9])
DMAX = parse(Int,args[10])
local inners = Array{Float64,1}()
t = time()
local SmallGroundStates = Dict{Int,MPS2}()
local ϵ_Small = zeros(Float64,n)
local ϵ_Large = zeros(Float64,n)
local BigGroundStates = Dict{Int,MPS2}()
local DF_inners = DataFrame(Length = Int[],
ϵSmall = Float64[],
BD_Small = Int[],
ϵLarge = Float64[],
BD_Large = Int[],
InnerProduct = Float64[],
time = Float64[])
for k in 1:n
SmallGroundStates[k],ϵ_Small[k] = DMRG(NNHandSSHtoMPO(Hnn(r,l0),Hss(m0,g0,r,l0),l0),D=D,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
end
open("Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).txt","w") do io
open("MAX_Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).txt","w") do ioMAX
for Le in l0+1:1:l
t1 = time()
for k in 1:n
BigGroundStates[k],ϵ_Large[k] = DMRG(NNHandSSHtoMPO(Hnn(r,Le),Hss(m0,g0,r,Le),Le),D=SmallGroundStates[k].BD,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
end
push!(inners,UniformInnerProductOfMPS(SmallGroundStates[1],BigGroundStates[1]))
for j in 1:n, k in 1:n
tmp = UniformInnerProductOfMPS(SmallGroundStates[j],BigGroundStates[k])
inners[end] = max(tmp,inners[end])
write(io,"$(Le) \t $(round(tmp,digits=5)) \t $(round(time()-t))\n")
end
write(ioMAX,"$(Le) \t $(round(inners[end],digits=5)) \t $(round(time()-t))\n")
push!(DF_inners,(Le,ϵ_Small[1],SmallGroundStates[1].BD,ϵ_Large[1],BigGroundStates[1].BD,inners[end],round(time()-t1,digits=2)))
SmallGroundStates = deepcopy(BigGroundStates)
ϵ_Small = deepcopy(ϵ_Large)
end
write(ioMAX,"finished computation in $(time()-t).\n")
CSV.write("Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).csv",DF_inners)
end
write(io,"finished computation in $(time()-t).\n")
end
gr()
p = plot(l0+1:l,inners)
png("Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).png")
end
function main2()
l = parse(Int,ARGS[1+1])
r = parse(Float64,ARGS[2+1])
m0 = parse(Float64,ARGS[3+1])
mf = parse(Float64,ARGS[4+1])
dm = parse(Float64,ARGS[5+1])
g0 = parse(Float64,ARGS[6+1])
D = 25
local inners = Array{Float64,1}()
t = time()
open("Gross-Neveu_Varying-Mass_r=$(r)_l=$(l)_g0=$(g0).txt","w") do io
for m in m0:dm:mf
local SmallGroundStates , _ = DMRG(NNHandSSHtoMPO(Hnn(r,l),Hss(m,g0,r,l),l),D=D,ϵ=1.0e-8,maxiteration=5)
local BigGroundStates , _ = DMRG(NNHandSSHtoMPO(Hnn(r,l+1),Hss(m,g0,r,l+1),l+1),D=SmallGroundStates.BD,ϵ=1.0e-8,maxiteration=5)
push!(inners,UniformInnerProductOfMPS(SmallGroundStates,BigGroundStates))
write(io,"$(m) \t $(round(inners[end],digits=5)) \t $(round(time()-t))\n")
end
write(io,"finished computation in $(time()-t).\n")
end
gr()
p = plot(m0:dm:mf,inners)
png("Gross-Neveu_Varying-Mass_r=$(r)_l=$(l)_g0=$(g0).png")
end
function main3(args)
l = parse(Int,args[1+1])
r = parse(Float64,args[2+1])
m0 = parse(Float64,args[3+1])
rm = parse(Float64,args[4+1])
n = parse(Int,args[5+1])
g0 = parse(Float64,args[6+1])
ϵ0 = parse(Float64,args[7+1])
D = parse(Int,args[9])
DMAX = parse(Int,args[10])
local inners = Array{Float64,1}()
local DF_inners = DataFrame(m = Float64[],
ϵ_Small = Float64[],
BD_Small = Int[],
ϵ_Large = Float64[],
BD_Large = Int[],
Inner = Float64[],
time = Int[])
t = time()
open("Gross-Neveu_Power-Mass2_r=$(r)_l=$(l)_g0=$(g0).txt","w") do io
local m = m0
for i in 0:n
t1 = time()
local SmallGroundStates,ϵsmall = DMRG(NNHandSSHtoMPO(Hnn(r,l),Hss(m,g0,r,l),l),D=D,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
local BigGroundStates,ϵbig = DMRG(NNHandSSHtoMPO(Hnn(r,l+1),Hss(m,g0,r,l+1),l+1),D=SmallGroundStates.BD,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
push!(inners,UniformInnerProductOfMPS(SmallGroundStates,BigGroundStates))
write(io,"$(m) \t $(round(inners[end],digits=10)) \t $(round(time()-t))\n")
push!(DF_inners,(m,ϵsmall,SmallGroundStates.BD,ϵbig,BigGroundStates.BD,inners[end],round(Int,time()-t1)))
m = round(m*rm,digits = 10)
end
write(io,"finished computation in $(time()-t).\n")
CSV.write("Gross-Neveu_Power-Mass2_r=$(r)_l=$(l)_g0=$(g0).csv",DF_inners)
end
gr()
p = plot([m0*rm^i for i in 0:n],inners)
png("Gross-Neveu_Power-Mass2_r=$(r)_l=$(l)_g0=$(g0).png")
end
function twopoint(j::Int,k::Int) #unnormalized
Lm = [0.0 0.0;
1.0 0.0]
Lp = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0.0 -1.0]
LmZ = kron(Lm,Z)
ZZ = kron(Z,Z)
ZLp = kron(Z,Lp)
local O = Dict{Int,Array{Float64,2}}()
O[j] = LmZ
O[k] = ZLp
for s in j+1:k-1
O[s] = ZZ
end
return O
end
function main4(args)
l = parse(Int,args[1+1])
r = parse(Float64,args[2+1])
m0 = parse(Float64,args[3+1])
rm = parse(Float64,args[4+1]) #multiplicative coefficient for increasing m
n = parse(Int,args[5+1]) #number of different masses
g0 = parse(Float64,args[6+1])
ϵg = parse(Float64,args[7+1]) #precision goal
D = parse(Int,args[9])
DMAX = parse(Int,args[10])
m = m0
local DF_inners = DataFrame(Distance = collect(0:l-1))
open("Gross-Neveu_twopoint_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg).txt","w") do io
t = time()
for i in 1:n
exps = Float64[]
exps2 = ComplexF64[]
local GS,ϵf = DMRG(NNHandSSHtoMPO(Hnn(r,l),Hss(m,g0,r,l),l),D=D,ϵ=ϵg,maxiteration=3,DMax=DMAX)
write(io,"precision = $(ϵf)\nBondDimension = $(GS.BD)\nm = $(m)\nTwo point correlators:\n")
for k in 1:l
tp = twopoint(1,k)
ex = (l-1)*1im*expectation(GS,GS,tp)
write(io,"$(k):\t\t$(ex)\n")
append!(exps,abs(ex))
append!(exps2,ex)
end
DF_inners[Symbol("ydata_m=",round(m,digits=4))] = exps
DF_inners[Symbol("ydata_error_m=",round(m,digits=4))] = 2*sqrt(ϵf)*exps
DF_inners[Symbol("ActualExpectation_m=",round(m,digits=4))] = exps2
DF_inners[Symbol("BD_m=",round(m,digits=4))] = repeat([GS.BD],l)
DF_inners[Symbol("mdata_m=",round(m,digits=4))] = repeat([m],l)
DF_inners[Symbol("ϵ_m=",round(m,digits=4))] = repeat([ϵf],l)
@. model(x,p) = p[3] + p[2]*exp(-x/p[1])
write(io,"model(x,p) = p[3] + p[2]*exp(-x/p[1])\n")
try
fit = curve_fit(model,0:l-1,exps,[1.0,1.0,0.0])
write(io,"parameters:\n$(fit.param)\n")
write(io,"Standard Deviations:\n$(stderror(fit))\n")
catch
write(io,"LsqFit Failed\n")
end
write(io,"============================================================\n")
m = m*rm
end
CSV.write("Gross-Neveu_twopoint_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg).csv",DF_inners)
write(io,"finished computation in $(time()-t).")
end
end
function twopoint2(j::Int,k::Int) #unnormalized
Lm = [0.0 0.0;
1.0 0.0]
I = [1.0 0.0;
0.0 1.0]
Lp = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0.0 -1.0]
ILp = kron(I,-Lp)
ZZ = kron(Z,Z)
LmI = kron(Lm,I)
local O = Dict{Int,Array{Float64,2}}()
O[j] = ILp
O[k] = LmI
for s in j+1:k-1
O[s] = ZZ
end
return O
end
function main5(args)
l = parse(Int,args[1+1])
r = parse(Float64,args[2+1])
m0 = parse(Float64,args[3+1])
rm = parse(Float64,args[4+1]) #multiplicative coefficient for increasing m
n = parse(Int,args[5+1]) #number of different masses
g0 = parse(Float64,args[6+1])
ϵg = parse(Float64,args[7+1]) #precision goal
D = parse(Int,args[9])
DMAX = parse(Int,args[10])
m = m0
local DF_inners = DataFrame(Distance = collect(0:l-1))
open("Gross-Neveu_twopoint2_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg).txt","w") do io
t = time()
for i in 1:n
exps = Float64[]
exps2 = ComplexF64[]
local GS,ϵf = DMRG(NNHandSSHtoMPO(Hnn(r,l),Hss(m,g0,r,l),l),D=D,ϵ=ϵg,maxiteration=5,DMax=DMAX)
write(io,"precision = $(ϵf)\nBondDimension = $(GS.BD)\nm = $(m)\nTwo point correlators 2:\n")
for k in 1:l
tp = twopoint2(1,k)
ex = (l-1)*1im*expectation(GS,GS,tp)
write(io,"$(k):\t\t$(ex)\n")
append!(exps,abs(ex))
append!(exps2,ex)
end
DF_inners[Symbol("ydata_m=",round(m,digits=4))] = exps
DF_inners[Symbol("ydata_error_m=",round(m,digits=4))] = 2*sqrt(ϵf)*exps
DF_inners[Symbol("ActualExpectation_m=",round(m,digits=4))] = exps2
DF_inners[Symbol("BD_m=",round(m,digits=4))] = repeat([GS.BD],l)
DF_inners[Symbol("mdata_m=",round(m,digits=4))] = repeat([m],l)
DF_inners[Symbol("ϵ_m=",round(m,digits=4))] = repeat([ϵf],l)
@. model(x,p) = p[3] + p[2]*exp(-x/p[1])
write(io,"model(x,p) = p[3] + p[2]*exp(-x/p[1])\n")
try
fit = curve_fit(model,0:l-1,exps,[1.0,1.0,0.0])
write(io,"parameters:\n$(fit.param)\n")
write(io,"Standard Deviations:\n$(stderror(fit))\n")
catch
write(io,"LsqFit Failed\n")
end
write(io,"============================================================\n")
m = m*rm
end
CSV.write("Gross-Neveu_twopoint2_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg).csv",DF_inners)
write(io,"finished computation in $(time()-t).")
end
end
function main6(args)
l = parse(Int,args[1+1])
r = parse(Float64,args[2+1])
m0 = parse(Float64,args[3+1]) #The m₀ that we know has a good correlation length
κ = parse(Float64,args[4+1]) #multiplicative span around m₀ we want to study(2 would be a good value)
n = parse(Int,args[5+1]) #number of different masses
g0 = parse(Float64,args[6+1])
ϵg = parse(Float64,args[7+1]) #precision goal
D = parse(Int,args[9])
DMAX = parse(Int,args[10])
rm = (κ)^(1/n)
m = m0/κ
local DF_TP = DataFrame(Distance = collect(0:l-1))
local DF_fit = DataFrame(m0 = Float64[],
Chi = Float64[],
b = Float64[],
error_Chi = Float64[],
error_b = Float64[],
inner50 = Float64[],
error_inner50 = Float64[],
entanglement1 = Float64[],
entanglement2 = Float64[],
entanglement3 = Float64[],
entanglement4 = Float64[])
open("Gross-Neveu_TwoPoint_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg)_m0=$(m0).txt","w") do io
t = time()
for i in 1:2n+1
exps = Float64[]
exps2 = ComplexF64[]
local GS,ϵf = DMRG(NNHandSSHtoMPO(Hnn(r,l),Hss(m,g0,r,l),l),D=D,ϵ=ϵg,maxiteration=3,DMax=DMAX)
write(io,"precision = $(ϵf)\nBondDimension = $(GS.BD)\nm = $(m)\nTwo point correlators:\n")
for k in 1:l
tp = twopoint(1,k)
ex = (l-1)*1im*expectation(GS,GS,tp)
write(io,"$(k):\t\t$(ex)\n")
append!(exps,abs(ex))
append!(exps2,ex)
end
Ms = GS.tensors
D = GS.BD
(σ,j,k) = size(Ms[l])
F = svd(reshape(permutedims(Ms[l],(2,1,3)),(j,σ*k)))
local S = F.S
DF_TP[Symbol("InnerProduct_m=",round(m,digits=4))] = exps
DF_TP[Symbol("InnerProduct_error_m=",round(m,digits=4))] = 2*sqrt(ϵf)*exps
DF_TP[Symbol("ActualExpectation_m=",round(m,digits=4))] = exps2
DF_TP[Symbol("BD_m=",round(m,digits=4))] = repeat([GS.BD],l)
DF_TP[Symbol("m=",round(m,digits=4))] = repeat([m],l)
DF_TP[Symbol("ϵ_m=",round(m,digits=4))] = repeat([ϵf],l)
local small_GS,small_ϵf = DMRG(NNHandSSHtoMPO(Hnn(r,l-1),Hss(m,g0,r,l-1),l-1),D=GS.BD,ϵ=ϵg,maxiteration=3,DMax=DMAX)
local inner = UniformInnerProductOfMPS(small_GS,GS)
@. model(x,p) = p[2]*exp(-x/p[1])
write(io,"model(x,p) = p[2]*exp(-x/p[1])\n")
try
fit = curve_fit(model,2:l-1,exps[3:end],[1.0,1.0])
write(io,"parameters:\n$(fit.param)\n")
write(io,"Standard Deviations:\n$(stderror(fit))\n")
push!(DF_fit,[m,fit.param...,stderror(fit)...,inner,hypot(ϵf,small_ϵf)*inner,S[1:4]...])
catch e
write(io,"LsqFit Failed\n")
@warn e
end
write(io,"============================================================\n")
m = m*rm
end
CSV.write("Gross-Neveu_TwoPoint_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg)_m0=$(m0).csv",DF_TP)
CSV.write("Gross-Neveu_Fitting_l=$(l)_r=$(r)_g0=$(g0)_eps=$(ϵg)_m0=$(m0).csv",DF_fit)
write(io,"finished computation in $(time()-t).")
end
end
function Hss2(m₀::Float64,g₀::Float64,r::Float64,a::Float64;ϵ=0.0)
a⁺ = [0.0 0.0;
1.0 0.0]
a⁻ = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0. -1.0]
# MagneticField = ϵ*randn(Float64)*Diagonal([1.5, 0.5, -0.5, -1.5])
# return im*(m₀-r/a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))+g₀^2/(2*a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))^2 + m₀*MagneticField
return im*(-m₀-r/a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))+g₀^2/(2*a)*(kron(a⁺,a⁻)-kron(a⁻,a⁺))^2
end
function Hnn2(r::Float64,a::Float64)
a⁺ = [0.0 0.0;
1.0 0.0]
a⁻ = [0.0 1.0;
0.0 0.0]
Z = [1.0 0.0;
0. -1.0]
II = [1. 0.;
0. 1.]
# return im/(2*a)*(-kron(II,a⁻,Z,a⁻)+kron(a⁻,Z,a⁻,II)+kron(II,a⁺,Z,a⁺)-kron(a⁺,Z,a⁺,II)+r*(kron(II,a⁻,a⁺,II)-kron(II,a⁺,a⁻,II)+kron(a⁺,Z,Z,a⁻)-kron(a⁻,Z,Z,a⁺)))
return im/(2*a)*(kron(II,a⁻,Z,a⁺)-kron(a⁻,Z,a⁺,II)-kron(II,a⁺,Z,a⁻)+kron(a⁺,Z,a⁻,II)+r*(kron(II,a⁻,a⁺,II)-kron(II,a⁺,a⁻,II)+kron(a⁺,Z,Z,a⁻)-kron(a⁻,Z,Z,a⁺)))
end
function main7(args)
l0 = parse(Int,args[2])
l = parse(Int,args[3])
r = parse(Float64,args[4])
m0 = parse(Float64,args[5])
g0 = parse(Float64,args[6])
ϵ0 = parse(Float64,args[7])
D = parse(Int,args[8])
DMAX = parse(Int,args[9])
local inners = Array{Float64,1}()
t = time()
local a = 1.0/(l+1)
local DF_inners = DataFrame(Length = Int[],
ϵSmall = Float64[],
BD_Small = Int[],
ϵLarge = Float64[],
BD_Large = Int[],
InnerProduct = Float64[],
time = Float64[])
local SmallGroundStates,ϵ_Small = DMRG(NNHandSSHtoMPO(Hnn2(r,a),Hss2(m0,g0,r,a),l0),D=D,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
open("Fixed-Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).txt","w") do io
for Le in l0+1:1:l
t1 = time()
local BigGroundStates,ϵ_Large = DMRG(NNHandSSHtoMPO(Hnn2(r,a),Hss2(m0,g0,r,a),Le),D=SmallGroundStates.BD,ϵ=ϵ0,maxiteration=3,DMax=DMAX)
push!(inners,UniformInnerProductOfMPS(SmallGroundStates,BigGroundStates))
write(io,"$(Le) \t $(round(inners[end],digits=5)) \t $(round(time()-t))\n")
push!(DF_inners,(Le,ϵ_Small,SmallGroundStates.BD,ϵ_Large,BigGroundStates.BD,inners[end],round(time()-t1,digits=2)))
SmallGroundStates = deepcopy(BigGroundStates)
ϵ_Small = deepcopy(ϵ_Large)
end
CSV.write("Fixed-Gross-Neveu_r=$(r)_m0=$(m0)_g0=$(g0)_l=$(l).csv",DF_inners)
write(io,"finished computation in $(time()-t).\n")
end
end
function main()
fun = parse(Int,ARGS[1])
if fun == 1 #Inner products of ground states for varying lengths
#l0,l,r,m0,g0,n=1,ϵ0,D(iniitial BD),DMAX
main1(ARGS)
elseif fun == 2
main2()
elseif fun == 3 # Inner producs for fixed length but varying mass as a power.
# l ,r ,m0 ,rm ,n ,g0 ,ϵ0 ,D ,DMAX
main3(ARGS)
elseif fun == 4 # Two point function ψ1ψ̄j
# l, r, m0, rm, n, g0, ϵg, D, DMAX
main4(ARGS)
elseif fun == 5 # Two point function ψ̄1ψj
# l, r, m0, rm, n, g0, ϵg, D, DMAX
main5(ARGS)
elseif fun == 6 #calculating both correlation length and asymptotic value of inner products
# l, r, m0, κ, n, g0, ϵg, D, DMAX
main6(ARGS)
elseif fun ==7 #Inner products of ground states for varying lengths with fixed lattice spacing
#l0,l,r,m0,g0,ϵ0,D(iniitial BD),DMAX
main7(ARGS)
end
end
if basename(PROGRAM_FILE) == basename(@__FILE__)
main()
end
| {
"alphanum_fraction": 0.535748839,
"author": null,
"avg_line_length": 40.375,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "b6831efa2e90b005c9ba80ef7bf56b3a9b517497",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "17e002e482706ad91e37bfa88e1ad7c76e13237c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "CNOT/DMRG",
"max_forks_repo_path": "example/Gross-Neveu.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17e002e482706ad91e37bfa88e1ad7c76e13237c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "CNOT/DMRG",
"max_issues_repo_path": "example/Gross-Neveu.jl",
"max_line_length": 182,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "17e002e482706ad91e37bfa88e1ad7c76e13237c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "CNOT/DMRG",
"max_stars_repo_path": "example/Gross-Neveu.jl",
"max_stars_repo_stars_event_max_datetime": "2019-11-19T16:42:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-11-19T16:42:44.000Z",
"num_tokens": 7614,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 20672
} |
from flask import Blueprint, render_template, redirect, url_for, request, flash, abort, make_response, Response
import os
from .import db
from .models import Iron
from .models import User
from werkzeug.utils import secure_filename
from flask_login import login_required, current_user
import pdfkit
import numpy as np
from PIL import Image
from .feature_extractor import FeatureExtractor
from datetime import datetime
from pathlib import Path
main = Blueprint('main', __name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
APP_ROOT2 = os.path.dirname(os.path.abspath(__file__))
fe = FeatureExtractor()
features = []
img_paths = []
for feature_path in Path("project/static/feature").glob("*.npy"):
features.append(np.load(feature_path))
img_paths.append(Path("/static/f-images") / (feature_path.stem + ".jpg"))
features = np.array(features)
@main.route("/filter", methods = ["GET", "POST"])
def filter():
if request.method == "POST":
file = request.files["query_img"]
# Save query image
img = Image.open(file.stream) # PIL image
uploaded_img_path = "project/static/uploaded/" + datetime.now().isoformat().replace(":", ".") + "_" + file.filename
img.save(uploaded_img_path)
image_f = "static/uploaded/" + datetime.now().isoformat().replace(":", ".") + "_" + file.filename
img_f = str(image_f)
# Run search
query = fe.extract(img)
dists = np.linalg.norm(features-query, axis=1) # L2 distances to features
ids = np.argsort(dists)[:30] # Top 30 results
scores = [(dists[id], img_paths[id]) for id in ids]
return render_template("filter.html", query_path=uploaded_img_path, scores=scores, image_f = img_f)
else:
return render_template("filter.html")
@main.route('/dsng')
def dsgn():
q = request.args.get('q')
if q:
irons = Iron.query.filter(Iron.nombre.contains(q))
else:
irons = Iron.query.all()
return render_template('irons.html', irons = irons)
@main.route('/')
def index():
q = request.args.get('q')
if q:
irons = Iron.query.filter(Iron.nombre.contains(q))
else:
irons = Iron.query.all()
return render_template('irons.html', irons = irons )
@main.route('/profile')
@login_required
def profile():
return render_template('profile.html', name=current_user.name)
@main.route('/new')
@login_required
def new_iron():
return render_template('create_iron.html')
@main.route('/search')
def search():
return render_template('irons.html')
@main.route('/new', methods = ['POST'])
@login_required
def new_iron_post():
nombre = request.form.get('nombre')
cedula = request.form.get('cedula')
domicilio = request.form.get('domicilio')
correo = request.form.get('correo')
telefono = request.form.get('telefono')
iron = Iron(nombre = nombre, cedula = cedula, domicilio = domicilio, user_id = current_user.id, correo = correo, telefono = telefono, )
db.session.add(iron)
db.session.commit()
target = os.path.join(APP_ROOT, 'static/ferretes')
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create directory ferretes".format(target))
print('check')
for file in request.files.getlist("file"):
print('debug1')
print(file.filename)
filename = file.filename
a = "ferrete"
b = ".jpg"
c = str(iron.id)
file_name = "ferrete\'%s\'.jpg"%(int(iron.id))
file_name2 = a + c + b
destination = "/".join([target, file_name2])
print(destination)
file.save(destination)
target2 = os.path.join(APP_ROOT, 'static/cedulas')
if not os.path.isdir(target2):
os.mkdir(target2)
else:
print("Couldn't create directory cedulas".format(target2))
for cedula_img in request.files.getlist("cedula_img"):
print('debug2')
print(cedula_img.filename)
filename = cedula_img.filename
a = "cedula"
b = ".jpg"
c = str(iron.id)
file_name = "ferrete\'%s\'.jpg"%(int(iron.id))
file_name2 = a + c + b
destination = "/".join([target2, file_name2])
print(destination)
cedula_img.save(destination)
return redirect(url_for('main.irons'))
@main.route('/irons')
def irons():
q = request.args.get('q')
if q:
irons = Iron.query.filter(Iron.nombre.contains(q))
else:
irons = Iron.query.all()
return render_template('irons.html', irons = irons )
@main.route("/irons/<int:iron_id>/update", methods=['GET', 'POST'])
@login_required
def update_iron(iron_id):
iron = Iron.query.get_or_404(iron_id)
if request.method == "POST":
iron.nombre = request.form['nombre']
iron.cedula = request.form['cedula']
iron.domicilio = request.form['domicilio']
db.session.commit()
flash('Your post has been updated!')
return redirect(url_for('main.irons'))
return render_template('update_iron.html', iron=iron)
@main.route("/irons/<int:iron_id>", methods=['GET'])
@login_required
def show_iron(iron_id):
iron = Iron.query.get_or_404(iron_id)
if request.method == "POST":
iron.nombre = request.form['nombre']
iron.cedula = request.form['cedula']
iron.domicilio = request.form['domicilio']
url = request.base_url
qr_url = "{{qrcode(\'%s\')}}"%(url)
converted_id = str(iron.id)
x = "/static/ferretes/ferrete" + converted_id +".jpg"
y = "/static/cedulas/cedula" + converted_id +".jpg"
return render_template('iron.html', iron=iron, qr = qr_url, url = url, img = x, cedula = y)
@main.route("/irons/<int:iron_id>/pdf", methods=['GET'])
@login_required
def pdf_iron(iron_id):
iron = Iron.query.get_or_404(iron_id)
url = request.base_url
render = render_template('pdf-doc.html', iron=iron, url = url)
pdf = pdfkit.from_string(render, False)
url = request.base_url
qr_url = "{{qrcode(\'%s\')}}"%(url)
print(url)
response = make_response(pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Dis['] = 'application/pdf'
return response
@main.route('/irons/<int:iron_id>/img')
def get_img(iron_id):
iron = Iron.query.filter_by(id=iron_id).first()
if not iron:
return 'Img Not Found!', 404
return Response(iron.img, mimetype=iron.mimetype)
@main.route('/act')
def act():
print ("Running")
exec(open("project/offline.py").read())
return render_template('act.html')
| {
"alphanum_fraction": 0.6687657431,
"author": null,
"avg_line_length": 26.2479338843,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ebf3e37f1be4d23375c952be758df52d2cbcd0e5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "604fb312b281b70b0e286907f2eb26472eeb8e2a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hammadyasir/flask-docker-ec2",
"max_forks_repo_path": "flaskapp-docker/flaskapp/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "604fb312b281b70b0e286907f2eb26472eeb8e2a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hammadyasir/flask-docker-ec2",
"max_issues_repo_path": "flaskapp-docker/flaskapp/main.py",
"max_line_length": 138,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "604fb312b281b70b0e286907f2eb26472eeb8e2a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hammadyasir/flask-docker-ec2",
"max_stars_repo_path": "flaskapp-docker/flaskapp/main.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1659,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6352
} |
c =======================================================
subroutine src2(maxmx,maxmy,meqn,mbc,mx,my,xlower,ylower,
& dx,dy,q,maux,aux,t,dt,Rsphere)
c =======================================================
c
c
c # Compute source term for Rossby-Haurwitz wave.
c # The source term models the Coriolis force using a 4-stage RK method
c # and the projection of the velocity components to the tangent plane.
c
implicit double precision (a-h,o-z)
dimension q(meqn, 1:mx, 1:my)
dimension aux(maux, 1:mx, 1:my)
double precision RK(4,3)
cf2py integer intent(in) maxmx
cf2py integer intent(in) maxmy
cf2py integer optional, intent(in) meqn
cf2py integer intent(in) mbc
cf2py integer intent(in) mx
cf2py integer intent(in) my
cf2py double precision intent(in) xlower
cf2py double precision intent(in) ylower
cf2py double precision intent(in) dx
cf2py double precision intent(in) dy
cf2py intent(in,out) q
cf2py integer optional, intent(in) maux
cf2py intent(in) aux
cf2py double precision intent(in) t
cf2py double precision intent(in) dt
cf2py double precision intent(in) Rsphere
c # Parameters
a = 6.37122d6
Omega = 7.292d-5
df=12.600576e0
c # project momentum components of q onto tangent plane:
do i=1,mx
do j=1, my
erx = aux(14,i,j)
ery = aux(15,i,j)
erz = aux(16,i,j)
qn = erx*q(2,i,j) + ery*q(3,i,j) + erz*q(4,i,j)
q(2,i,j) = q(2,i,j) - qn*erx
q(3,i,j) = q(3,i,j) - qn*ery
q(4,i,j) = q(4,i,j) - qn*erz
enddo
enddo
c # calculate Coriolis term
do i=1, mx
xc = xlower + (i-0.5d0)*dx
do j=1, my
yc = ylower + (j-0.5d0)*dy
c
call mapc2p(xc,yc,xp,yp,zp,Rsphere)
erx = xp
ery = yp
erz = zp
c
rad = dsqrt(xp**2. + yp**2.)
if (rad.gt.1.d-4) then
theta = dacos(xp/rad)
elseif (yp.gt.0.d0) then
theta = 0.d0
endif
if (yp.lt.0.d0) then
theta = -theta
endif
c # compute phi, at north pole: pi/2 at south pool: -pi/2
if (zp.gt. 0.d0) then
phi = dacos(rad/Rsphere)
else
phi = - dacos(rad/Rsphere)
endif
c
fcor = df*erz
c stage 1
hu = q(2,i,j)
hv = q(3,i,j)
hw = q(4,i,j)
RK(1,1) = fcor*dt*(erz*hv-ery*hw)
RK(1,2) = dt*fcor*(erx*hw-erz*hu)
RK(1,3) = dt*fcor*(ery*hu-erx*hv)
c stage 2
hu = q(2,i,j) + 0.5d0*RK(1,1)
hv = q(3,i,j) + 0.5d0*RK(1,2)
hw = q(4,i,j) + 0.5d0*RK(1,3)
RK(2,1) = fcor*dt*(erz*hv-ery*hw)
RK(2,2) = dt*fcor*(erx*hw-erz*hu)
RK(2,3) = dt*fcor*(ery*hu-erx*hv)
c stage 3
hu = q(2,i,j) + 0.5d0*RK(2,1)
hv = q(3,i,j) + 0.5d0*RK(2,2)
hw = q(4,i,j) + 0.5d0*RK(2,3)
RK(3,1) = fcor*dt*(erz*hv-ery*hw)
RK(3,2) = dt*fcor*(erx*hw-erz*hu)
RK(3,3) = dt*fcor*(ery*hu-erx*hv)
c stage 4
hu = q(2,i,j) + 0.5d0*RK(3,1)
hv = q(3,i,j) + 0.5d0*RK(3,2)
hw = q(4,i,j) + 0.5d0*RK(3,3)
RK(4,1) = fcor*dt*(erz*hv-ery*hw)
RK(4,2) = dt*fcor*(erx*hw-erz*hu)
RK(4,3) = dt*fcor*(ery*hu-erx*hv)
do m=2,meqn
q(m,i,j) = q(m,i,j)
& + (RK(1,m-1) + 2.d0*RK(2,m-1)+
& 2.d0*RK(3,m-1) + RK(4,m-1))/6.d0
enddo
enddo
enddo
c # project momentum components of q onto tangent plane:
do i=1,mx
do j=1, my
erx = aux(14,i,j)
ery = aux(15,i,j)
erz = aux(16,i,j)
qn = erx*q(2,i,j) + ery*q(3,i,j) + erz*q(4,i,j)
q(2,i,j) = q(2,i,j) - qn*erx
q(3,i,j) = q(3,i,j) - qn*ery
q(4,i,j) = q(4,i,j) - qn*erz
enddo
enddo
return
end
| {
"alphanum_fraction": 0.4527764594,
"author": null,
"avg_line_length": 28.472972973,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "3bf392ad793369b130095976d029bf88e49401f7",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2017-11-15T11:29:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-15T11:29:22.000Z",
"max_forks_repo_head_hexsha": "15ad4fb972e7540bde1670f7d924ae16e590237d",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "ketch/pyclaw",
"max_forks_repo_path": "apps/shallow-sphere/src2.f",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "15ad4fb972e7540bde1670f7d924ae16e590237d",
"max_issues_repo_issues_event_max_datetime": "2017-03-07T22:54:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-08-31T07:01:02.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "ketch/pyclaw",
"max_issues_repo_path": "apps/shallow-sphere/src2.f",
"max_line_length": 75,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "15ad4fb972e7540bde1670f7d924ae16e590237d",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ketch/pyclaw",
"max_stars_repo_path": "test/shallow_sphere/src2.f",
"max_stars_repo_stars_event_max_datetime": "2017-08-18T19:53:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-08-18T19:53:22.000Z",
"num_tokens": 1535,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4214
} |
(* Copyright (c) 2012-2015, Robbert Krebbers. *)
(* This file is distributed under the terms of the BSD license. *)
Require Export types integer_operations.
Local Open Scope ctype_scope.
Local Unset Elimination Schemes.
Class Env (K : iType) : iType := {
env_type_env :> IntEnv K;
size_of : env K → type K → nat;
align_of : env K → type K → nat;
field_sizes : env K → list (type K) → list nat;
alloc_can_fail : bool
}.
Arguments size_of _ _ _ _ : simpl never.
Arguments align_of _ _ _ _ : simpl never.
Arguments field_sizes _ _ _ _ : simpl never.
Definition ptr_size_of `{Env K} (Γ : env K) (τp : ptr_type K) : nat :=
match τp with TType τ => size_of Γ τ | _ => 1 end.
Definition offset_of `{Env K} (Γ : env K) (τs : list (type K))
(i : nat) : nat := sum_list $ take i $ field_sizes Γ τs.
Definition bit_size_of `{Env K} (Γ : env K)
(τ : type K) : nat := size_of Γ τ * char_bits.
Definition bit_align_of `{Env K} (Γ : env K)
(τ : type K) : nat := align_of Γ τ * char_bits.
Definition ptr_bit_size_of `{Env K} (Γ : env K) (τp : ptr_type K) : nat :=
match τp with TType τ => bit_size_of Γ τ | _ => char_bits end.
Definition field_bit_sizes `{Env K} (Γ : env K)
(τs : list (type K)) : list nat :=
(λ sz, sz * char_bits) <$> field_sizes Γ τs.
Definition field_bit_padding `{Env K}
(Γ : env K) (τs : list (type K)) : list nat :=
zip_with (λ sz τ, sz - bit_size_of Γ τ) (field_bit_sizes Γ τs) τs.
Definition bit_offset_of `{Env K}
(Γ : env K) (τs : list (type K)) (i : nat) : nat :=
sum_list $ take i $ field_bit_sizes Γ τs.
Class EnvSpec (K : iType) `{Env K} := {
int_env_spec :> IntEnvSpec K;
size_of_ptr_ne_0 Γ τp : size_of Γ (τp.*) ≠ 0;
size_of_int Γ τi : size_of Γ (intT τi) = rank_size (rank τi);
size_of_void_ne_0 Γ : size_of Γ voidT ≠ 0;
size_of_array Γ τ n : size_of Γ (τ.[n]) = n * size_of Γ τ;
size_of_struct Γ t τs :
✓ Γ → Γ !! t = Some τs →
size_of Γ (structT t) = sum_list (field_sizes Γ τs);
size_of_fields Γ τs :
✓ Γ → Forall2 (λ τ sz, size_of Γ τ ≤ sz) τs (field_sizes Γ τs);
size_of_union Γ t τs :
✓ Γ → Γ !! t = Some τs →
Forall (λ τ, size_of Γ τ ≤ size_of Γ (unionT t)) τs;
align_of_array Γ τ n : (align_of Γ τ | align_of Γ (τ.[n]));
align_of_compound Γ c t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
(align_of Γ τ | align_of Γ (compoundT{c} t));
align_of_divide Γ τ :
✓ Γ → ✓{Γ} τ → (align_of Γ τ | size_of Γ τ);
align_of_offset_of Γ τs i τ :
✓ Γ → ✓{Γ}* τs → τs !! i = Some τ → (align_of Γ τ | offset_of Γ τs i);
size_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → size_of Γ1 τ = size_of Γ2 τ;
align_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → align_of Γ1 τ = align_of Γ2 τ;
fields_sizes_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 → field_sizes Γ1 τs = field_sizes Γ2 τs
}.
Section env_spec.
Context `{EnvSpec K}.
Implicit Types τ σ : type K.
Implicit Types τs σs : list (type K).
Implicit Types Γ : env K.
Lemma size_of_char Γ si : size_of Γ (intT (IntType si char_rank)) = 1.
Proof. rewrite size_of_int. by apply rank_size_char. Qed.
Lemma field_sizes_length Γ τs : ✓ Γ → length (field_sizes Γ τs) = length τs.
Proof. symmetry. by eapply Forall2_length, size_of_fields. Qed.
Lemma field_sizes_nil Γ : ✓ Γ → field_sizes Γ [] = [].
Proof. intros. apply nil_length_inv. by rewrite field_sizes_length. Qed.
Lemma size_of_union_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
size_of Γ τ ≤ size_of Γ (unionT t).
Proof.
intros. assert (Forall (λ τ, size_of Γ τ ≤ size_of Γ (unionT t)) τs) as Hτs
by eauto using size_of_union; rewrite Forall_lookup in Hτs. eauto.
Qed.
Lemma size_of_struct_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
size_of Γ τ ≤ size_of Γ (structT t).
Proof.
intros HΓ Ht Hτs. erewrite size_of_struct by eauto. clear Ht. revert i Hτs.
induction (size_of_fields Γ τs HΓ) as [|σ sz σs szs]; intros [|?] ?;
simplify_equality'; auto with lia.
transitivity (sum_list szs); eauto with lia.
Qed.
Lemma size_of_union_singleton Γ t τ :
✓ Γ → Γ !! t = Some [τ] → size_of Γ τ ≤ size_of Γ (unionT t).
Proof. intros. by apply (size_of_union_lookup Γ t [τ] 0). Qed.
Lemma sizes_of_weaken P Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
Forall (λ τ', P (size_of Γ1 τ')) τs → Forall (λ τ', P (size_of Γ2 τ')) τs.
Proof.
induction 4; decompose_Forall_hyps; constructor; simpl;
erewrite <-1?size_of_weaken by eauto; eauto.
Qed.
Lemma bit_size_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → bit_size_of Γ1 τ = bit_size_of Γ2 τ.
Proof. intros. unfold bit_size_of. f_equal. by apply size_of_weaken. Qed.
Lemma bit_size_of_int Γ τi : bit_size_of Γ (intT τi) = int_width τi.
Proof. unfold bit_size_of. by rewrite size_of_int. Qed.
Lemma bit_size_of_char Γ si :
bit_size_of Γ (intT (IntType si char_rank)) = char_bits.
Proof. rewrite bit_size_of_int. by apply int_width_char. Qed.
Lemma bit_size_of_int_same_kind Γ τi1 τi2 :
rank τi1 = rank τi2 → bit_size_of Γ (intT τi1) = bit_size_of Γ (intT τi2).
Proof.
destruct τi1, τi2; intros; simplify_equality'. by rewrite !bit_size_of_int.
Qed.
Lemma bit_size_of_array Γ τ n : bit_size_of Γ (τ.[n]) = n * bit_size_of Γ τ.
Proof. unfold bit_size_of. by rewrite !size_of_array, Nat.mul_assoc. Qed.
Lemma bit_size_of_struct Γ t τs :
✓ Γ → Γ !! t = Some τs →
bit_size_of Γ (structT t) = sum_list (field_bit_sizes Γ τs).
Proof.
unfold bit_size_of, field_bit_sizes. intros.
erewrite size_of_struct by eauto.
induction (field_sizes Γ τs); csimpl; auto with lia.
Qed.
Lemma bit_size_of_fields Γ τs :
✓ Γ → Forall2 (λ τ sz, bit_size_of Γ τ ≤ sz) τs (field_bit_sizes Γ τs).
Proof.
intros HΓ. unfold bit_size_of, field_bit_sizes.
induction (size_of_fields Γ τs HΓ);
simpl; constructor; auto using Nat.mul_le_mono_nonneg_r with lia.
Qed.
Lemma bit_size_of_union Γ t τs :
✓ Γ → Γ !! t = Some τs →
Forall (λ τ, bit_size_of Γ τ ≤ bit_size_of Γ (unionT t)) τs.
Proof.
intros ? Hτs. apply size_of_union in Hτs; auto. unfold bit_size_of.
induction Hτs; constructor; auto using Nat.mul_le_mono_nonneg_r with lia.
Qed.
Lemma bit_size_of_union_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
bit_size_of Γ τ ≤ bit_size_of Γ (unionT t).
Proof.
intros. unfold bit_size_of. apply Nat.mul_le_mono_nonneg_r;
eauto using size_of_union_lookup with lia.
Qed.
Lemma bit_size_of_union_singleton Γ t τ :
✓ Γ → Γ !! t = Some [τ] → bit_size_of Γ τ ≤ bit_size_of Γ (unionT t).
Proof. intros. by apply (bit_size_of_union_lookup Γ t [τ] 0). Qed.
Lemma ptr_bit_size_of_alt Γ τp :
ptr_bit_size_of Γ τp = ptr_size_of Γ τp * char_bits.
Proof. destruct τp; simpl; unfold bit_size_of; lia. Qed.
Lemma field_bit_sizes_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 → field_bit_sizes Γ1 τs = field_bit_sizes Γ2 τs.
Proof. unfold field_bit_sizes. auto using fields_sizes_weaken with f_equal. Qed.
Lemma field_bit_sizes_length Γ τs :
✓ Γ → length (field_bit_sizes Γ τs) = length τs.
Proof. symmetry. by eapply Forall2_length, bit_size_of_fields. Qed.
Lemma field_bit_sizes_nil Γ : ✓ Γ → field_bit_sizes Γ [] = [].
Proof. intros. apply nil_length_inv. by rewrite field_bit_sizes_length. Qed.
Lemma field_bit_padding_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
field_bit_padding Γ1 τs = field_bit_padding Γ2 τs.
Proof.
intros HΓ1 Hτs ?. unfold field_bit_padding.
erewrite <-(field_bit_sizes_weaken Γ1 Γ2) by eauto.
induction (bit_size_of_fields _ τs HΓ1); decompose_Forall_hyps;
auto using bit_size_of_weaken with f_equal.
Qed.
Lemma field_bit_padding_length Γ τs :
✓ Γ → length (field_bit_padding Γ τs) = length τs.
Proof.
intros. unfold field_bit_padding.
rewrite zip_with_length, field_bit_sizes_length by done; lia.
Qed.
Lemma bit_offset_of_weaken Γ1 Γ2 τs i :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
bit_offset_of Γ1 τs i = bit_offset_of Γ2 τs i.
Proof.
unfold bit_offset_of. eauto using field_bit_sizes_weaken with f_equal.
Qed.
Lemma bit_offset_of_alt Γ τs i :
bit_offset_of Γ τs i = offset_of Γ τs i * char_bits.
Proof.
unfold bit_offset_of, offset_of, field_bit_sizes.
revert i. induction (field_sizes Γ τs) as [|?? IH];
intros [|i]; simpl; auto with lia.
by rewrite IH, Nat.mul_add_distr_r.
Qed.
Lemma bit_offset_of_lt Γ τs i j σ :
✓ Γ → τs !! i = Some σ → i < j →
bit_offset_of Γ τs i + bit_size_of Γ σ ≤ bit_offset_of Γ τs j.
Proof.
intros HΓ. revert i j σ. unfold bit_offset_of.
induction (bit_size_of_fields _ τs HΓ) as [|τ sz τs szs ?? IH];
intros [|i] [|j] σ ??; simplify_equality'; try lia.
specialize (IH i j σ). intuition lia.
Qed.
Lemma bit_offset_of_size Γ t τs i σ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some σ →
bit_offset_of Γ τs i + bit_size_of Γ σ ≤ bit_size_of Γ (structT t).
Proof.
intros HΓ Ht. erewrite bit_size_of_struct by eauto; clear Ht.
revert i σ. unfold bit_offset_of. induction (bit_size_of_fields _ τs HΓ)
as [|τ sz τs szs ?? IH]; intros [|i] σ ?; simplify_equality'; [lia|].
specialize (IH i σ). intuition lia.
Qed.
Lemma align_of_char Γ si : ✓ Γ → align_of Γ (intT (IntType si char_rank)) = 1.
Proof.
intros. apply Nat.divide_1_r; rewrite <-(size_of_char Γ si).
apply align_of_divide; repeat constructor; auto.
Qed.
Lemma bit_align_of_array Γ τ n : (bit_align_of Γ τ | bit_align_of Γ (τ.[n])).
Proof. apply Nat.mul_divide_mono_r, align_of_array. Qed.
Lemma bit_align_of_compound Γ c t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
(bit_align_of Γ τ | bit_align_of Γ (compoundT{c} t)).
Proof. eauto using Nat.mul_divide_mono_r, align_of_compound. Qed.
Lemma bit_align_of_divide Γ τ :
✓ Γ → ✓{Γ} τ → (bit_align_of Γ τ | bit_size_of Γ τ).
Proof. eauto using Nat.mul_divide_mono_r, align_of_divide. Qed.
Lemma bit_align_of_offset_of Γ τs i τ :
✓ Γ → ✓{Γ}* τs → τs !! i = Some τ →
(bit_align_of Γ τ | bit_offset_of Γ τs i).
Proof.
rewrite bit_offset_of_alt.
eauto using Nat.mul_divide_mono_r, align_of_offset_of.
Qed.
Lemma bit_align_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → bit_align_of Γ1 τ = bit_align_of Γ2 τ.
Proof. unfold bit_align_of; auto using align_of_weaken, f_equal. Qed.
Lemma size_of_base_ne_0 Γ τb : size_of Γ (baseT τb) ≠ 0.
Proof.
destruct τb; auto using size_of_void_ne_0, size_of_ptr_ne_0.
rewrite size_of_int. apply rank_size_ne_0.
Qed.
Lemma bit_size_of_base_ne_0 Γ τb : bit_size_of Γ (baseT τb) ≠ 0.
Proof. apply Nat.neq_mul_0. auto using char_bits_ne_0, size_of_base_ne_0. Qed.
#[global] Instance: ∀ Γ τb, PropHolds (size_of Γ (baseT τb) ≠ 0).
Proof. apply size_of_base_ne_0. Qed.
#[global] Instance: ∀ Γ τb, PropHolds (bit_size_of Γ (baseT τb) ≠ 0).
Proof. apply bit_size_of_base_ne_0. Qed.
Lemma size_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → size_of Γ τ ≠ 0.
Proof.
intros HΓ. revert τ. refine (type_env_ind _ HΓ _ _ _ _).
* auto using size_of_base_ne_0.
* intros. rewrite size_of_array. by apply Nat.neq_mul_0.
* intros [] t τs Ht Hτs IH Hlen.
+ erewrite size_of_struct by eauto. clear Ht.
destruct (size_of_fields Γ τs HΓ); decompose_Forall_hyps; auto with lia.
+ apply size_of_union in Ht; auto.
destruct Ht; decompose_Forall_hyps; auto with lia.
Qed.
Lemma align_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → align_of Γ τ ≠ 0.
Proof. eauto using Nat_divide_ne_0, size_of_ne_0, align_of_divide. Qed.
Lemma size_of_pos Γ τ : ✓ Γ → ✓{Γ} τ → 0 < size_of Γ τ.
Proof. intros. by apply Nat.neq_0_lt_0, size_of_ne_0. Qed.
Lemma bit_size_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → bit_size_of Γ τ ≠ 0.
Proof. intros. apply Nat.neq_mul_0. auto using char_bits_ne_0,size_of_ne_0. Qed.
Lemma bit_size_of_pos Γ τ : ✓ Γ → ✓{Γ} τ → 0 < bit_size_of Γ τ.
Proof. intros. by apply Nat.neq_0_lt_0, bit_size_of_ne_0. Qed.
End env_spec.
| {
"alphanum_fraction": null,
"author": "robbertkrebbers",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/robbertkrebbers-ch2o/ch2o-1afb3f615db053b741341e9bfd1d5c65bddea641/types/type_environment.v",
"reason": null,
"repo": "ch2o",
"save_path": "github-repos/coq/robbertkrebbers-ch2o",
"sha": "1afb3f615db053b741341e9bfd1d5c65bddea641",
"size": null
} |
! *********************************************************************
! * *
! * function dcvaa3 *
! * *
! *********************************************************************
! Double Precision Version 1.5
! Written by Gordon A. Fenton, TUNS, July 17, 1992
! Latest Update: Jul 2, 2003
!
! PURPOSE returns the covariance between two 3-D local averages of equal
! volume. Used by LAS3G.
!
! This function evaluates the covariance between two local averages in
! 3-dimensional space. The local averages are assumed to be of equal
! size, Dx x Dy x Dz, and separated in space by the lags Tx = C1*Dx,
! Ty = C2*Dy and Tz = C3*Dz
!
! The covariance is obtained by a 16-pt Gaussian quadrature of a
! 6-D integral collapsed (by assuming the covariance function to be
! quadrant symmetric) to a 3-D integral.
!
! NOTE: if the covariance function is not quadrant symmetric, this
! routine will return erroneous results. Quadrant symmetry means
! that cov(X,Y,Z) = cov(-X,Y,Z) = cov(X,-Y,Z), etc, where
! cov(X,Y,Z) is a function returning the covariance between
! points separated by lag (X,Y,Z), as discussed next.
!
! The covariance function is referenced herein using a call of the
! form
!
! V = cov( X, Y, Z )
!
! where X, Y, and Z are the lag distances between the points in the
! field.
! Parameters, such as var, pb, dthx, dthy, and dthz are passed to the
! covariance function via the common block 'dparam'.
!
! Arguments to this function are as follows
!
! cov external function provided by the user. On each invocation,
! this routine calls cov up to 1000 times (= 8*5^3).
!
! Dx x-dimension of each local average. (input)
!
! Dy y-dimension of each local average. (input)
!
! Dz z-dimension of each local average. (input)
!
! C1 x-direction distance between local average centers is C1*Dx.
! (input)
!
! C2 y-direction distance between local average centers is C2*Dy.
! (input)
!
! C3 z-direction distance between local average centers is C3*Dz.
! (input)
!
! REVISION HISTORY:
! 1.1 now including a Gaussian Quadrature integration option as an
! alternative to the variance function approach to evaluate
! covariances between local averages. (Jun 16/00)
! 1.2 use Gauss quadrature unless intervals overlap, eliminated lvarfn
! (Apr 5/01)
! 1.3 now use Gauss quadrature for everything. (May 8/01)
! 1.4 now use 16 point Gauss quadrature (Mar 21/03)
! 1.5 simplification below only correct for volumes completely overlapping.
! Now using 20-point Gauss quadrature (Jul 2/03)
!---------------------------------------------------------------------------
real*8 function dcvaa3( cov, Dx, Dy, Dz, C1, C2, C3 )
parameter (NG = 20)
implicit real*8 (a-h,o-z)
dimension w(NG), z(NG)
external cov
common/dparam/ var, pb, dthx, dthy, dthz
data zero/0.d0/, half/0.5d0/, one/1.d0/, two/2.d0/
data eighth/0.125d0/, sxt4th/0.015625d0/
data w/0.017614007139152118312d0, 0.040601429800386941331d0, &
0.062672048334109063570d0, 0.083276741576704748725d0, &
0.101930119817240435037d0, 0.118194531961518417312d0, &
0.131688638449176626898d0, 0.142096109318382051329d0, &
0.149172986472603746788d0, 0.152753387130725850698d0, &
0.152753387130725850698d0, 0.149172986472603746788d0, &
0.142096109318382051329d0, 0.131688638449176626898d0, &
0.118194531961518417312d0, 0.101930119817240435037d0, &
0.083276741576704748725d0, 0.062672048334109063570d0, &
0.040601429800386941331d0, 0.017614007139152118312d0/
data z/-.993128599185094924786d0, -.963971927277913791268d0, &
-.912234428251325905868d0, -.839116971822218823395d0, &
-.746331906460150792614d0, -.636053680726515025453d0, &
-.510867001950827098004d0, -.373706088715419560673d0, &
-.227785851141645078080d0, -.076526521133497333755d0, &
0.076526521133497333755d0, 0.227785851141645078080d0, &
0.373706088715419560673d0, 0.510867001950827098004d0, &
0.636053680726515025453d0, 0.746331906460150792614d0, &
0.839116971822218823395d0, 0.912234428251325905868d0, &
0.963971927277913791268d0, 0.993128599185094924786d0/
r1 = half*Dx
r2 = half*Dy
r3 = half*Dz
! if intervals the same, GQ simplifies
if( (C1.eq.zero) .and. (C2.eq.zero) .and. (C3.eq.zero) ) then
d1 = zero
do 30 i = 1, NG
xi = r1*(one + z(i))
d2 = zero
do 20 j = 1, NG
yj = r2*(one + z(j))
d3 = zero
do 10 k = 1, NG
zk = r3*(one + z(k))
d3 = d3 + w(k)*(one-z(k))*cov(xi,yj,zk)
10 continue
d2 = d2 + w(j)*(one-z(j))*d3
20 continue
d1 = d1 + w(i)*(one-z(i))*d2
30 continue
dcvaa3 = eighth*d1
return
endif
! otherwise, partial or non-overlapping
! intervals
s1 = two*C1 - one
s2 = two*C1 + one
v1 = two*C2 - one
v2 = two*C2 + one
u1 = two*C3 - one
u2 = two*C3 + one
d1 = zero
do 60 i = 1, NG
x1 = r1*(z(i) + s1)
x2 = r1*(z(i) + s2)
d21 = zero
d22 = zero
do 50 j = 1, NG
y1 = r2*(z(j) + v1)
y2 = r2*(z(j) + v2)
d31 = zero
d32 = zero
d33 = zero
d34 = zero
do 40 k = 1, NG
z1 = r3*(z(k) + u1)
z2 = r3*(z(k) + u2)
dp = one + z(k)
dm = one - z(k)
d31 = d31 + w(k)*(dp*cov(x1,y1,z1) + dm*cov(x1,y1,z2))
d32 = d32 + w(k)*(dp*cov(x1,y2,z1) + dm*cov(x1,y2,z2))
d33 = d33 + w(k)*(dp*cov(x2,y1,z1) + dm*cov(x2,y1,z2))
d34 = d34 + w(k)*(dp*cov(x2,y2,z1) + dm*cov(x2,y2,z2))
40 continue
dp = one + z(j)
dm = one - z(j)
d21 = d21 + w(j)*(dp*d31 + dm*d32)
d22 = d22 + w(j)*(dp*d33 + dm*d34)
50 continue
d1 = d1 + w(i)*((one+z(i))*d21 + (one-z(i))*d22)
60 continue
dcvaa3 = sxt4th*d1
return
end
| {
"alphanum_fraction": 0.5199188759,
"author": null,
"avg_line_length": 41.3353293413,
"converted": null,
"ext": "f95",
"file": null,
"hexsha": "af46f7bb9f15990bec3d63fde82fe58d6ef31966",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "23be1d3fa3091bcb4ec114a319b402feb5cba7d3",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "leemargetts/PoreFEM",
"max_forks_repo_path": "src/libs/gaf95/dcvaa3.f95",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "23be1d3fa3091bcb4ec114a319b402feb5cba7d3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "leemargetts/PoreFEM",
"max_issues_repo_path": "src/libs/gaf95/dcvaa3.f95",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "23be1d3fa3091bcb4ec114a319b402feb5cba7d3",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "leemargetts/PoreFEM",
"max_stars_repo_path": "src/libs/gaf95/dcvaa3.f95",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2180,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6903
} |
# 可视化中间输出
from keras.models import load_model
import matplotlib.pyplot as plt
from keras import models
import numpy as np
from keras.preprocessing import image
model = load_model('cats_dogs.h5') # 加载之前保存的卷积模型
# 预处理单张图片
img = image.load_img('./cats.jpg', target_size=(150, 150))
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
layer_outputs = [layer.output for layer in model.layers[:8]] # 提取前8层的输出
# 创建一个Model用于可视化输出,输入和输出都来自卷积模型
view_model = models.Model(inputs=model.input, outputs=layer_outputs)
# 运行可视化Model
views = view_model.predict(img_tensor) # 这里使用mnist测试集的第一个图像
# 可视化每个中间激活的通道
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name) # 保存每一层的名字
images_per_row = 16 # 一行显示16个图像
for layer_name, layer_view in zip(layer_names, views):
n_features = layer_view.shape[-1] # 特征图中的特征个数
size = layer_view.shape[1] # 特征图的形状为 (1, size, size, n_features)
n_rows = n_features // images_per_row # 行数
display_grid = np.zeros((size * n_rows, images_per_row * size))
for row in range(n_rows):
for col in range(images_per_row):
channel_image = layer_view[0, :, :, row * images_per_row + col]
# 对特征进程处理,使其看起来更美观
# channel_image -= channel_image.mean()
# channel_image /= channel_image.std()
# channel_image *= 64
# channel_image += 128
# channel_image = np.clip(channel_image, 0, 255).astype('uint8')
# 填充网格
display_grid[row * size:(row + 1) * size, col * size:(col + 1) *
size] = channel_image
scale = 1. / size # 缩小比例
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.show() | {
"alphanum_fraction": 0.6624671916,
"author": null,
"avg_line_length": 33.4210526316,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4c7d1affed73ea50bb56cde2ebe68f4df9c8dfc9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "40cc49733f0051701efa58dccc822ff3bccac041",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Renxiuhu/PythonScripts",
"max_forks_repo_path": "deep learning with python book/9_cnn_view_1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "40cc49733f0051701efa58dccc822ff3bccac041",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Renxiuhu/PythonScripts",
"max_issues_repo_path": "deep learning with python book/9_cnn_view_1.py",
"max_line_length": 76,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "40cc49733f0051701efa58dccc822ff3bccac041",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Renxiuhu/PythonScripts",
"max_stars_repo_path": "deep learning with python book/9_cnn_view_1.py",
"max_stars_repo_stars_event_max_datetime": "2020-02-24T03:32:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-24T03:32:31.000Z",
"num_tokens": 602,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1905
} |
using Gtk, Cairo, ColorTypes
function generategrays(n, screenwidth)
verts = Vector{RGB}()
hwidth = Int(ceil(screenwidth/n))
for x in 00:Int(floor(0xff/(n-1))):0xff
rgbgray = RGB(x/255, x/255, x/255)
for i in 1:hwidth
push!(verts, rgbgray)
end
end
verts
end
function drawline(ctx, p1, p2, color, width)
move_to(ctx, p1.x, p1.y)
set_source(ctx, color)
line_to(ctx, p2.x, p2.y)
set_line_width(ctx, width)
stroke(ctx)
end
const can = @GtkCanvas()
const win = GtkWindow(can, "Grayscale bars/Display", 400, 400)
fullscreen(win) # start full screen, then reduce to regular window in 5 seconds.
draw(can) do widget
ctx = getgc(can)
h = height(can)
w = width(can)
gpoints = generategrays(8, w)
for (i, x) in enumerate(0:w-1)
drawline(ctx, Point(x, 0.25*h), Point(x, 0), gpoints[i], 1)
end
gpoints = reverse(generategrays(16, w))
for (i, x) in enumerate(0:w-1)
drawline(ctx, Point(x, 0.5*h), Point(x, 0.25*h), gpoints[i], 1)
end
gpoints = generategrays(32, w)
for (i, x) in enumerate(0:w-1)
drawline(ctx, Point(x, 0.75*h), Point(x, 0.5*h), gpoints[i], 1)
end
gpoints = reverse(generategrays(64, w))
for (i, x) in enumerate(0:w-1)
drawline(ctx, Point(x, h), Point(x, 0.75*h), gpoints[i], 1)
end
end
show(can)
sleep(5)
unfullscreen(win)
const cond = Condition()
endit(w) = notify(cond)
signal_connect(endit, win, :destroy)
wait(cond)
| {
"alphanum_fraction": 0.6156417112,
"author": null,
"avg_line_length": 26.7142857143,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "44c0cbb968c54db94e6841a0c55604b5b27ab8df",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ethansaxenian/RosettaDecode",
"max_forks_repo_path": "lang/Julia/greyscale-bars-display.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ethansaxenian/RosettaDecode",
"max_issues_repo_path": "lang/Julia/greyscale-bars-display.jl",
"max_line_length": 81,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ethansaxenian/RosettaDecode",
"max_stars_repo_path": "lang/Julia/greyscale-bars-display.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 509,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1496
} |
[STATEMENT]
lemma "\<exists>x. f x = g x \<longrightarrow> f = g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. f x = g x \<longrightarrow> f = g
[PROOF STEP]
refute [maxsize = 4, expect = none]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>x. f x = g x \<longrightarrow> f = g
[PROOF STEP]
by (auto simp add: ext) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 142,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
[STATEMENT]
lemma mem_val_w32_8_mem_equal:
assumes a1: "mem_equal s1 s2 a"
shows "mem_val_w32 8 a s1 = mem_val_w32 8 a s2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mem_val_w32 8 a s1 = mem_val_w32 8 a s2
[PROOF STEP]
apply (simp add: mem_val_w32_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (let addr' = a AND 68719476732; r0 = mem_val_alt 8 addr' s1; r1 = mem_val_alt 8 (addr' + 1) s1; r2 = mem_val_alt 8 (addr' + 2) s1; r3 = mem_val_alt 8 (addr' + 3) s1 in if r0 = None \<or> r1 = None \<or> r2 = None \<or> r3 = None then None else let byte0 = case r0 of Some v \<Rightarrow> v; byte1 = case r1 of Some v \<Rightarrow> v; byte2 = case r2 of Some v \<Rightarrow> v; byte3 = case r3 of Some v \<Rightarrow> v in Some ((((ucast byte0 << 24) OR (ucast byte1 << 16)) OR (ucast byte2 << 8)) OR ucast byte3)) = (let addr' = a AND 68719476732; r0 = mem_val_alt 8 addr' s2; r1 = mem_val_alt 8 (addr' + 1) s2; r2 = mem_val_alt 8 (addr' + 2) s2; r3 = mem_val_alt 8 (addr' + 3) s2 in if r0 = None \<or> r1 = None \<or> r2 = None \<or> r3 = None then None else let byte0 = case r0 of Some v \<Rightarrow> v; byte1 = case r1 of Some v \<Rightarrow> v; byte2 = case r2 of Some v \<Rightarrow> v; byte3 = case r3 of Some v \<Rightarrow> v in Some ((((ucast byte0 << 24) OR (ucast byte1 << 16)) OR (ucast byte2 << 8)) OR ucast byte3))
[PROOF STEP]
apply (simp add: Let_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (mem_val_alt 8 (a AND 68719476732) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 1) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 2) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> ((\<exists>y. mem_val_alt 8 (a AND 68719476732) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some y) \<longrightarrow> (\<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y) \<and> ((\<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y) \<longrightarrow> (((ucast (case mem_val_alt 8 (a AND 68719476732) s1 of Some v \<Rightarrow> v) << 24) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 1) s1 of Some v \<Rightarrow> v) << 16)) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 2) s1 of Some v \<Rightarrow> v) << 8)) OR ucast (case mem_val_alt 8 ((a AND 68719476732) + 3) s1 of Some v \<Rightarrow> v) = (((ucast (case mem_val_alt 8 (a AND 68719476732) s2 of Some v \<Rightarrow> v) << 24) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 1) s2 of Some v \<Rightarrow> v) << 16)) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 2) s2 of Some v \<Rightarrow> v) << 8)) OR ucast (case mem_val_alt 8 ((a AND 68719476732) + 3) s2 of Some v \<Rightarrow> v)))
[PROOF STEP]
using mem_val_alt_8_mem_equal a1
[PROOF STATE]
proof (prove)
using this:
mem_equal ?s1.0 ?s2.0 ?pa \<Longrightarrow> mem_val_alt 8 (?pa AND 68719476732) ?s1.0 = mem_val_alt 8 (?pa AND 68719476732) ?s2.0 \<and> mem_val_alt 8 ((?pa AND 68719476732) + 1) ?s1.0 = mem_val_alt 8 ((?pa AND 68719476732) + 1) ?s2.0 \<and> mem_val_alt 8 ((?pa AND 68719476732) + 2) ?s1.0 = mem_val_alt 8 ((?pa AND 68719476732) + 2) ?s2.0 \<and> mem_val_alt 8 ((?pa AND 68719476732) + 3) ?s1.0 = mem_val_alt 8 ((?pa AND 68719476732) + 3) ?s2.0
mem_equal s1 s2 a
goal (1 subgoal):
1. (mem_val_alt 8 (a AND 68719476732) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 1) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 2) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> (mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None \<longrightarrow> mem_val_alt 8 (a AND 68719476732) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 1) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None \<or> mem_val_alt 8 ((a AND 68719476732) + 3) s2 = None) \<and> ((\<exists>y. mem_val_alt 8 (a AND 68719476732) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some y) \<longrightarrow> (\<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y) \<and> ((\<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y) \<and> (\<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y) \<longrightarrow> (((ucast (case mem_val_alt 8 (a AND 68719476732) s1 of Some v \<Rightarrow> v) << 24) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 1) s1 of Some v \<Rightarrow> v) << 16)) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 2) s1 of Some v \<Rightarrow> v) << 8)) OR ucast (case mem_val_alt 8 ((a AND 68719476732) + 3) s1 of Some v \<Rightarrow> v) = (((ucast (case mem_val_alt 8 (a AND 68719476732) s2 of Some v \<Rightarrow> v) << 24) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 1) s2 of Some v \<Rightarrow> v) << 16)) OR (ucast (case mem_val_alt 8 ((a AND 68719476732) + 2) s2 of Some v \<Rightarrow> v) << 8)) OR ucast (case mem_val_alt 8 ((a AND 68719476732) + 3) s2 of Some v \<Rightarrow> v)))
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (9 subgoals):
1. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
2. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
3. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
4. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
5. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y
6. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
7. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
8. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
9. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
2. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
3. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
4. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y
5. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
6. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
7. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
8. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
2. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
3. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y
4. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
5. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
6. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
7. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<And>y ya yb. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = None; mem_val_alt 8 (a AND 68719476732) s2 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yb\<rbrakk> \<Longrightarrow> mem_val_alt 8 ((a AND 68719476732) + 2) s2 = None
2. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y
3. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
4. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
5. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
6. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 (a AND 68719476732) s2 = Some y
2. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
3. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
4. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
5. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some y
2. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
3. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
4. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some y
2. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
3. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y ya yb yc. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc\<rbrakk> \<Longrightarrow> \<exists>y. mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some y
2. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y ya yb yc yd ye yf yg. \<lbrakk>\<And>s1 s2 pa. mem_equal s1 s2 pa \<Longrightarrow> mem_val_alt 8 (pa AND 68719476732) s1 = mem_val_alt 8 (pa AND 68719476732) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 1) s1 = mem_val_alt 8 ((pa AND 68719476732) + 1) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 2) s1 = mem_val_alt 8 ((pa AND 68719476732) + 2) s2 \<and> mem_val_alt 8 ((pa AND 68719476732) + 3) s1 = mem_val_alt 8 ((pa AND 68719476732) + 3) s2; mem_equal s1 s2 a; mem_val_alt 8 (a AND 68719476732) s1 = Some y; mem_val_alt 8 ((a AND 68719476732) + 1) s1 = Some ya; mem_val_alt 8 ((a AND 68719476732) + 2) s1 = Some yb; mem_val_alt 8 ((a AND 68719476732) + 3) s1 = Some yc; mem_val_alt 8 (a AND 68719476732) s2 = Some yd; mem_val_alt 8 ((a AND 68719476732) + 1) s2 = Some ye; mem_val_alt 8 ((a AND 68719476732) + 2) s2 = Some yf; mem_val_alt 8 ((a AND 68719476732) + 3) s2 = Some yg\<rbrakk> \<Longrightarrow> (((ucast y << 24) OR (ucast ya << 16)) OR (ucast yb << 8)) OR ucast yc = (((ucast yd << 24) OR (ucast ye << 16)) OR (ucast yf << 8)) OR ucast yg
[PROOF STEP]
by fastforce | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "SPARCv8_SparcModel_MMU_Sparc_Properties",
"hexsha": null,
"include": null,
"lang": null,
"length": 13,
"llama_tokens": 26421,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#include "utf.h"
#define BOOST_UTF8_BEGIN_NAMESPACE namespace utf { namespace detail {
#define BOOST_UTF8_END_NAMESPACE }}
#define BOOST_UTF8_DECL
#include "detail/utf8_codecvt_facet.hpp"
#include "detail/utf8_codecvt_facet.cpp"
#undef BOOST_UTF8_DECL
#undef BOOST_UTF8_END_NAMESPACE
#undef BOOST_UTF8_BEGIN_NAMESPACE
#include <boost/bind.hpp>
namespace utf { namespace detail {
/* Internal function to actually perform conversion.
The logic in ucs_from_8_bit and ucs_to_8_bit function is exactly
the same, except that one calls 'in' method of codecvt and another
calls the 'out' method, and that syntax difference makes straightforward
template implementation impossible.
This functions takes a 'fun' argument, which should have the same
parameters and return type and the in/out methods. The actual converting
function will pass functional objects created with boost::bind.
Experiments show that the performance loss is less than 10%.
*/
template<class ToChar, class FromChar, class Fun>
std::basic_string<ToChar>
convert(const std::basic_string<FromChar>& s, Fun fun)
{
std::basic_string<ToChar> result;
mbstate_t state = {0};
const FromChar* from = s.data();
const FromChar* from_end = s.data() + s.size();
// The interface of cvt is not really iterator-like, and it's
// not possible the tell the required output size without the conversion.
// All we can is convert data by pieces.
while(from != from_end) {
// std::basic_string does not provide non-const pointers to the data,
// so converting directly into basic_string<char> is not possible.
ToChar buffer[32];
ToChar* to_next = buffer;
// Need variable because boost::bind doesn't work with rvalues.
ToChar* to_end = buffer + 32;
std::codecvt_base::result r =
fun(state, from, from_end, from, buffer, to_end, to_next);
if (r == std::codecvt_base::error)
throw std::logic_error("character conversion failed");
// 'partial' is not an error, it just means not all source
// characters were converted. However, we need to check that at
// least one new target character was produced. If not, it means
// the source data is incomplete, and since we don't have extra
// data to add to source, it's error.
if (to_next == buffer)
throw std::logic_error("character conversion failed");
// Add converted characters
result.append(buffer, to_next);
}
return result;
}
}}
namespace utf {
std::basic_string<wchar_t>
ucs_from_8_bit(const std::basic_string<char>& s,
const std::codecvt<wchar_t, char, mbstate_t>& cvt)
{
return detail::convert<wchar_t>(
s,
boost::bind(&std::codecvt<wchar_t, char, mbstate_t>::in,
&cvt,
_1, _2, _3, _4, _5, _6, _7));
}
std::basic_string<char>
ucs_to_8_bit(const std::basic_string<wchar_t>& s,
const std::codecvt<wchar_t, char, mbstate_t>& cvt)
{
return detail::convert<char>(
s,
boost::bind(&std::codecvt<wchar_t, char, mbstate_t>::out,
&cvt,
_1, _2, _3, _4, _5, _6, _7));
}
namespace {
detail::utf8_codecvt_facet utf8_facet;
}
std::basic_string<wchar_t>
utf8_to_ucs(const std::basic_string<char>& s)
{
return ucs_from_8_bit(s, utf8_facet);
}
std::basic_string<char>
ucs_to_utf8(const std::basic_string<wchar_t>& s)
{
return ucs_to_8_bit(s, utf8_facet);
}
std::basic_string<wchar_t>
mbcs_to_ucs(const std::basic_string<char>& s)
{
typedef std::codecvt<wchar_t, char, mbstate_t> facet_type;
return ucs_from_8_bit(s, BOOST_USE_FACET(facet_type, std::locale()));
}
std::basic_string<char>
ucs_to_mbcs(const std::basic_string<wchar_t>& s)
{
typedef std::codecvt<wchar_t, char, mbstate_t> facet_type;
return ucs_to_8_bit(s, BOOST_USE_FACET(facet_type, std::locale()));
}
std::basic_string<char>
utf8_to_mbcs(const std::basic_string<char>& s)
{
return ucs_to_mbcs(utf8_to_ucs(s));
}
std::basic_string<char>
mbcs_to_utf8(const std::basic_string<char>& s)
{
return ucs_to_utf8(mbcs_to_ucs(s));
}
}
| {
"alphanum_fraction": 0.6235906331,
"author": null,
"avg_line_length": 33.6642335766,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "bf637f71c7abdad60516ce18fd11fba8d7e8d426",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2022-01-19T08:25:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-31T14:34:23.000Z",
"max_forks_repo_head_hexsha": "e3cea615b06761c2098a05427a5f41c236b71bf7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "shacojx/SourceCodeGameTLBB",
"max_forks_repo_path": "Game/Client/Helper/Helper/utf/utf.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e3cea615b06761c2098a05427a5f41c236b71bf7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "shacojx/SourceCodeGameTLBB",
"max_issues_repo_path": "Game/Client/Helper/Helper/utf/utf.cpp",
"max_line_length": 99,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "b702c9e0a9ca5d86933f3c827abb02a18ffc9a59",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hackerlank/SourceCode",
"max_stars_repo_path": "Game/Client/Helper/Helper/utf/utf.cpp",
"max_stars_repo_stars_event_max_datetime": "2021-11-13T02:55:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-31T13:56:01.000Z",
"num_tokens": 1140,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4612
} |
module caf_co_sum_test
use caffeine_m, only : caf_co_sum, caf_num_images, caf_this_image
use vegetables, only: result_t, test_item_t, assert_equals, describe, it, assert_that, assert_equals, succeed
implicit none
private
public :: test_caf_co_sum
contains
function test_caf_co_sum() result(tests)
type(test_item_t) tests
tests = describe( &
"The caf_co_sum subroutine", &
[ it("sums default integer scalars with no optional arguments present", sum_default_integer_scalars) &
,it("sums default integer scalars with all arguments present", sum_integers_all_arguments) &
,it("sums integer(c_int64_t) scalars with stat argument present", sum_c_int64_scalars) &
,it("sums default integer 1D arrays with no optional arguments present", sum_default_integer_1D_array) &
,it("sums default integer 15D arrays with stat argument present", sum_default_integer_15D_array) &
,it("sums default real scalars with result_image argument present", sum_default_real_scalars) &
,it("sums double precision 2D arrays with no optional arguments present", sum_double_precision_2D_array) &
,it("sums default complex scalars with stat argument present", sum_default_complex_scalars) &
,it("sums double precision 1D complex arrays with no optional arguments present", sum_dble_complex_1D_arrays) &
])
end function
function sum_default_integer_scalars() result(result_)
type(result_t) result_
integer i
i = 1
call caf_co_sum(i)
result_ = assert_equals(caf_num_images(), i)
end function
function sum_integers_all_arguments() result(result_)
type(result_t) result_
integer i, status_, result_image_
character(len=*), parameter :: whitespace = repeat(" ", ncopies=29)
character(len=:), allocatable :: error_message
i = 1
result_image_ = 1
status_ = -1
error_message = whitespace
associate(expected_i => merge(caf_num_images()*i, i, caf_this_image()==result_image_))
call caf_co_sum(i, result_image_, status_, error_message)
result_ = assert_equals(expected_i, i) .and. assert_equals(0, status_) .and. assert_equals(whitespace, error_message)
end associate
end function
function sum_c_int64_scalars() result(result_)
use iso_c_binding, only : c_int64_t
type(result_t) result_
integer(c_int64_t) i
integer i_default_kind, status_
status_ = -1
i = 2_c_int64_t
call caf_co_sum(i, stat=status_)
i_default_kind = i
result_ = assert_equals(2*caf_num_images(), int(i)) .and. assert_equals(0, status_)
end function
function sum_default_integer_1D_array() result(result_)
type(result_t) result_
integer i
integer, allocatable :: array(:)
associate(images => caf_num_images())
associate(sequence_ => [(i,i=1,images)])
array = sequence_
call caf_co_sum(array)
result_ = assert_that(all(array==images*sequence_))
end associate
end associate
end function
function sum_default_integer_15D_array() result(result_)
type(result_t) result_
integer array(2,1,1, 1,1,1, 1,1,1, 1,1,1, 1,2,1)
integer status_
status_ = -1
array = 3
call caf_co_sum(array, stat=status_)
result_ = assert_that(all(3*caf_num_images() == array)) .and. assert_equals(0, status_)
end function
function sum_default_real_scalars() result(result_)
type(result_t) result_
real scalar
real, parameter :: e = 2.7182818459045
integer result_image_
result_image_ = 1
scalar = e
call caf_co_sum(scalar, result_image=result_image_)
associate(expected_result => merge(caf_num_images()*e, e, caf_this_image()==result_image_))
result_ = assert_equals(dble(expected_result), dble(scalar))
end associate
end function
function sum_double_precision_2D_array() result(result_)
type(result_t) result_
double precision, allocatable :: array(:,:)
double precision, parameter :: input(*,*) = reshape(-[6,5,4,3,2,1], [3,2])
array = input
call caf_co_sum(array)
result_ = assert_equals(product(caf_num_images()*input), product(array))
end function
function sum_default_complex_scalars() result(result_)
type(result_t) result_
real scalar
complex z
complex, parameter :: i=(0.,1.)
integer status_
status_ = -1
z = i
call caf_co_sum(z, stat=status_)
result_ = assert_equals(dble(abs(i*caf_num_images())), dble(abs(z)) ) .and. assert_equals(0, status_)
end function
function sum_dble_complex_1D_arrays() result(result_)
type(result_t) result_
integer, parameter :: dp = kind(1.D0)
complex(dp), allocatable :: array(:)
complex(dp), parameter :: input(*) = [(1.D0,1.0D0)]
array = [(1.D0,1.D0)]
call caf_co_sum(array)
result_ = assert_that(all([input*caf_num_images()] == array))
end function
end module caf_co_sum_test
| {
"alphanum_fraction": 0.6536796537,
"author": null,
"avg_line_length": 37.95,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "8ab48df1495c1df89033dbc3f32527ea37d8adaf",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-11T06:13:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-13T21:19:15.000Z",
"max_forks_repo_head_hexsha": "a0efb35f3662c8bf611abe4a572e9d9154ce861d",
"max_forks_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_forks_repo_name": "BerkeleyLab/caffeine",
"max_forks_repo_path": "test/caf_co_sum_test.f90",
"max_issues_count": 16,
"max_issues_repo_head_hexsha": "a0efb35f3662c8bf611abe4a572e9d9154ce861d",
"max_issues_repo_issues_event_max_datetime": "2022-03-04T16:42:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-13T22:51:53.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_issues_repo_name": "BerkeleyLab/caffeine",
"max_issues_repo_path": "test/caf_co_sum_test.f90",
"max_line_length": 127,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "a0efb35f3662c8bf611abe4a572e9d9154ce861d",
"max_stars_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_stars_repo_name": "BerkeleyLab/caffeine",
"max_stars_repo_path": "test/caf_co_sum_test.f90",
"max_stars_repo_stars_event_max_datetime": "2022-03-11T06:13:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-15T03:13:37.000Z",
"num_tokens": 1298,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5313
} |
{-# language FlexibleContexts #-}
{-# language GADTs #-}
{-# language ScopedTypeVariables #-}
{-# language TypeApplications #-}
module Verify where
import Feldspar
import Feldspar.Software
import Feldspar.Software.Verify
import Feldspar.Array.Vector
import Feldspar.Array.Buffered
import Data.Bits (Bits)
import Data.Complex (Complex)
import Prelude hiding ((==), (/=), (>), (<=), length, div, reverse, sum)
--------------------------------------------------------------------------------
inc :: Software ()
inc = do
len :: SExp Length <- fget stdin
ix :: SExp Index <- fget stdin
arr :: Arr (SExp Word32) <- newArr len
-- assert (ix <= length arr) "ix out of bounds"
val <- getArr arr ix
setArr arr ix (val + 1)
copy :: Software ()
copy = do
arr1 :: Arr (SExp Word32) <- newArr 10
arr2 :: Arr (SExp Word32) <- unsafeFreezeArr arr1 >>= unsafeThawArr
--
setArr arr1 0 0
val <- getArr arr1 0
setArr arr2 0 val
inplace :: Software ()
inplace = do
st :: Store Software (SExp Word32) <- newInPlaceStore 10
arr :: Manifest Software (SExp Word32) <- store st $ (1...10)
brr <- store st $ reverse arr
val <- shareM $ sum brr
return ()
where
reverse :: Manifest Software (SExp Word32) -> Push Software (SExp Word32)
reverse = pairwise (\ix -> (ix, 10-ix-1))
--------------------------------------------------------------------------------
-- A super-simple verification example.
count :: Software ()
count = do
printf "Enter a number: "
n :: SExp Word32 <- fget stdin
let total = iter n 0 (\i n -> hint (n == i) $ i + 1)
total <- initRef total >>= unsafeFreezeRef
assert (total == n) "Count is wrong"
printf "The count is %d\n" total
--------------------------------------------------------------------------------
rev :: Software ()
rev = do
n <- fget stdin
loc :: IArr (SExp Word32) <- newArr n >>= unsafeFreezeArr
cpy :: Arr (SExp Word32) <- newArr n
assert (n > 0) "neg"
for 0 1 (n-1) $ \i -> do
setArr cpy i (loc ! (n-i-1))
rev_inplace :: Software ()
rev_inplace = do
n <- fget stdin
loc :: Arr (SExp Word32) <- newArr n
vec <- unsafeFreezeArr loc >>= unsafeThawArr
for 0 1 ((n `div` 2 :: SExp Word32)) $ \i -> do
x <- getArr vec i
y <- getArr vec (n-i-1)
setArr loc i y
setArr loc (n-i-1) x
rev_amazing :: Software ()
rev_amazing = do
n <- fget stdin
iarr :: IArr (SExp Word32) <- newArr n >>= unsafeFreezeArr
arr <- unsafeThawArr iarr
result <- manifestArr arr (pairwise @Software (\i -> (i, n-i-1)) (reverse iarr))
return ()
------------------------------------------------------------
{-
test_scProd1 = do
n <- fget stdin
printf "result: %.3f\n" $
(scProd (fmap i2n (0 ... n-1)) (fmap i2n (2 ... n+1)) :: Data Double)
test_scProd2 = do
n <- fget stdin
v1 <- manifestFresh $ fmap i2n (0 ... n-1)
v2 <- manifestFresh $ fmap i2n (2 ... n+1)
printf "result: %.3f\n" (scProd v1 v2 :: Data Double)
map_inplace :: Run ()
map_inplace = do
n <- fget stdin
loc <- newArr n
vec <- manifest loc (0 ... n-1)
manifestStore loc $ map (+1) vec
vec <- unsafeFreezeArr loc
printf "result: %d\n" $ sum vec
map2_inplace :: Run ()
map2_inplace = do
n <- fget stdin
assert (n < maxBound) "oops"
loc :: Arr (Data Word32) <- newArr (n+1)
vec <- unsafeFreezeArr loc
for (0, 1, Excl (n :: Data Word32)) $ \i -> do
setArr loc i (arrIx vec i+1)
tail_inplace :: Run ()
tail_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc
let when cond x = iff cond x (return ())
when (n > 0) $
for (0, 1, Excl (n-1)) $ \i -> do
setArr loc i (arrIx vec (i+1)+1)
filter_inplace :: Run ()
filter_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc
ref <- initRef 0
let when cond x = iff cond x (return ())
for (0, 1, Excl n) $ \i -> do
let x = arrIx vec i
when (x > 5) $ do
j <- unsafeFreezeRef ref
hint (j <= i)
setArr loc j x
setRef ref (j+1)
rev_inplace :: Software ()
rev_inplace = do
n <- fget stdin
loc :: Arr (Data Word32) <- newArr n
vec <- unsafeFreezeArr loc >>= unsafeThawArr
for (0, 1, Excl (n `div` 2 :: Data Word32)) $ \i -> do
x <- getArr vec i
y <- getArr vec (n-i-1)
setArr loc i y
setArr loc (n-i-1) x
-}
--------------------------------------------------------------------------------
| {
"alphanum_fraction": 0.5392438647,
"author": null,
"avg_line_length": 27.9197530864,
"converted": null,
"ext": "hs",
"file": null,
"hexsha": "15b3482c7c4460d72bffdc525a7af49c63e34f17",
"include": null,
"lang": "Haskell",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-06-30T14:16:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-12T13:36:02.000Z",
"max_forks_repo_head_hexsha": "580c693f0c80505ad879e4363c715464c5e04aab",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "markus-git/co-feldspar",
"max_forks_repo_path": "examples/Verify.hs",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "580c693f0c80505ad879e4363c715464c5e04aab",
"max_issues_repo_issues_event_max_datetime": "2021-07-12T17:10:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-05T23:49:58.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "markus-git/co-feldspar",
"max_issues_repo_path": "examples/Verify.hs",
"max_line_length": 84,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "580c693f0c80505ad879e4363c715464c5e04aab",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "markus-git/co-feldspar",
"max_stars_repo_path": "examples/Verify.hs",
"max_stars_repo_stars_event_max_datetime": "2021-06-30T14:16:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-08-17T13:31:32.000Z",
"num_tokens": 1397,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4523
} |
[STATEMENT]
lemma "e1.hf_valid ainfo uinfo hfBl (Some hfAl)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e1.hf_valid (Num 0) 1 hfBl (Some hfAl)
[PROOF STEP]
apply (auto simp add: e1.hf_valid_invert hfAl_def hfBl_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. Ex (d0.ASIF (Some nB))
2. Ex (d0.ASIF None)
[PROOF STEP]
using d0.ASIF.simps
[PROOF STATE]
proof (prove)
using this:
d0.ASIF (Some ?a) (AS ?a') = (?a = ?a')
d0.ASIF None \<epsilon> = True
d0.ASIF None (AS ?v) = False
d0.ASIF None (Num ?v) = False
d0.ASIF None (Key ?va) = False
d0.ASIF None (Nonce ?vd) = False
d0.ASIF None (L ?vc) = False
d0.ASIF None (FS ?vb) = False
d0.ASIF None \<langle>?ve, ?vaa\<rangle> = False
d0.ASIF None (Hash ?ve) = False
d0.ASIF None (Crypt ?va ?vaa) = False
d0.ASIF (Some ?v) \<epsilon> = False
d0.ASIF ?uu (Num ?v) = False
d0.ASIF ?uu (Key ?va) = False
d0.ASIF ?uu (Nonce ?vd) = False
d0.ASIF ?uu (L ?vc) = False
d0.ASIF ?uu (FS ?vb) = False
d0.ASIF ?uu \<langle>?ve, ?vaa\<rangle> = False
d0.ASIF ?uu (Hash ?ve) = False
d0.ASIF ?uu (Crypt ?va ?vaa) = False
goal (2 subgoals):
1. Ex (d0.ASIF (Some nB))
2. Ex (d0.ASIF None)
[PROOF STEP]
by blast+ | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "IsaNet_instances_EPIC_L1_SA_Example",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 575,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
\documentclass[12pt]{amsart}
\usepackage{geometry} % see geometry.pdf on how to lay out the page. There's lots.
\geometry{a4paper} % or letter or a5paper or ... etc
% \geometry{landscape} % rotated page geometry
\usepackage{listings}
\usepackage{hyperref}
\lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small}}{}
% See the ``Article customise'' template for come common customisations
\title{Design of the Literate Unit-B toolset}
\author{Simon Hudon}
\date{} % delete this line to display the current date
%%% BEGIN DOCUMENT
\begin{document}
\maketitle
\tableofcontents
\section{Requirements}
\subsection{Secrets}
\section{Design Principles}
\section{Front End}
\input{Document/visitor.lhs}
\subsection{Parser}
\section{Proof Obligations}
\input{UnitB/feasibility.lhs}
\end{document} | {
"alphanum_fraction": 0.770755886,
"author": null,
"avg_line_length": 25.21875,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "e7763cbf9fa475bfcac587c43e0cce332aaed40c",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0d843456dc103bb09babc5b12855435d2e10f534",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "literate-unitb/literate-unitb",
"max_forks_repo_path": "design.tex",
"max_issues_count": 32,
"max_issues_repo_head_hexsha": "0d843456dc103bb09babc5b12855435d2e10f534",
"max_issues_repo_issues_event_max_datetime": "2017-06-25T04:28:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-06-25T03:53:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "unitb/literate-unitb",
"max_issues_repo_path": "design.tex",
"max_line_length": 82,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "0d843456dc103bb09babc5b12855435d2e10f534",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "literate-unitb/literate-unitb",
"max_stars_repo_path": "design.tex",
"max_stars_repo_stars_event_max_datetime": "2018-01-20T14:53:33.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-27T11:05:56.000Z",
"num_tokens": 225,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 807
} |
function csqrt(z1)
complex csqrt, z1, z2
dimension dummy(2)
equivalence (a,z2,dummy(1)), (b,dummy(2))
z2 = z1
c = cabs(z2)
c = sqrt(c)
b = atan2(b, a)/2.0
a = c*cos(b)
b = c*sin(b)
csqrt = z2
return
end
| {
"alphanum_fraction": 0.5898617512,
"author": null,
"avg_line_length": 14.4666666667,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "bdafe2833ab5913771e736c9ed42578f993d66b0",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7eabcf07e61df13e868d785087ab24353110f626",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "alexchenfeng/UNIXV6",
"max_forks_repo_path": "fort/rt1/csqrt.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7eabcf07e61df13e868d785087ab24353110f626",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "alexchenfeng/UNIXV6",
"max_issues_repo_path": "fort/rt1/csqrt.f",
"max_line_length": 42,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7eabcf07e61df13e868d785087ab24353110f626",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "alexchenfeng/UNIXV6",
"max_stars_repo_path": "fort/rt1/csqrt.f",
"max_stars_repo_stars_event_max_datetime": "2021-04-13T04:29:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-13T04:29:34.000Z",
"num_tokens": 94,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 217
} |
(*************************************************************************
* Copyright (C)
* 2019-2022 The University of Exeter
* 2018-2022 The University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<*)
theory "M_00_Frontmatter"
imports
"Isabelle_DOF.technical_report"
begin
use_template "scrreprt-modern"
use_ontology "technical_report"
section\<open>Local Document Setup.\<close>
text\<open>Introducing document specific abbreviations and macros:\<close>
define_shortcut* dof \<rightleftharpoons> \<open>\dof\<close>
isadof \<rightleftharpoons> \<open>\isadof{}\<close>
define_shortcut* TeXLive \<rightleftharpoons> \<open>\TeXLive\<close>
BibTeX \<rightleftharpoons> \<open>\BibTeX{}\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
TeX \<rightleftharpoons> \<open>\TeX{}\<close>
pdf \<rightleftharpoons> \<open>PDF\<close>
text\<open>Note that these setups assume that the associated \<^LaTeX> macros
are defined, \<^eg>, in the document prelude. \<close>
define_macro* index \<rightleftharpoons> \<open>\index{\<close> _ \<open>}\<close>
define_macro* bindex \<rightleftharpoons> \<open>\bindex{\<close> _ \<open>}\<close>
ML\<open>
fun boxed_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_text
(fn ctxt => DOF_lib.string_2_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox")
val neant = K(Latex.text("",\<^here>))
fun boxed_theory_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_theory_text
(fn ctxt => DOF_lib.string_2_theory_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox"
(* #> neant *)) (*debugging *)
fun boxed_sml_text_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "sml")
(* the simplest conversion possible *)
fun boxed_pdf_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "out")
(* the simplest conversion possible *)
fun boxed_latex_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "ltx")
(* the simplest conversion possible *)
fun boxed_bash_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "bash")
(* the simplest conversion possible *)
\<close>
setup\<open>(* std_text_antiquotation \<^binding>\<open>my_text\<close> #> *)
boxed_text_antiquotation \<^binding>\<open>boxed_text\<close> #>
(* std_text_antiquotation \<^binding>\<open>my_cartouche\<close> #> *)
boxed_text_antiquotation \<^binding>\<open>boxed_cartouche\<close> #>
(* std_theory_text_antiquotation \<^binding>\<open>my_theory_text\<close>#> *)
boxed_theory_text_antiquotation \<^binding>\<open>boxed_theory_text\<close> #>
boxed_sml_text_antiquotation \<^binding>\<open>boxed_sml\<close> #>
boxed_pdf_antiquotation \<^binding>\<open>boxed_pdf\<close> #>
boxed_latex_antiquotation \<^binding>\<open>boxed_latex\<close>#>
boxed_bash_antiquotation \<^binding>\<open>boxed_bash\<close>
\<close>
open_monitor*[this::report]
(*>*)
title*[title::title] \<open>Isabelle/DOF\<close>
subtitle*[subtitle::subtitle]\<open>User and Implementation Manual\<close>
author*[ adb,
email ="\<open>a.brucker@exeter.ac.uk\<close>",
orcid ="\<open>0000-0002-6355-1200\<close>",
http_site ="\<open>https://www.brucker.ch/\<close>",
affiliation ="\<open>University of Exeter, Exeter, UK\<close>"]\<open>Achim D. Brucker\<close>
author*[ bu,
email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
abstract*[abs, keywordlist="[\<open>Ontology\<close>, \<open>Ontological Modeling\<close>, \<open>Document Management\<close>,
\<open>Formal Document Development\<close>,\<open>Document Authoring\<close>,\<open>Isabelle/DOF\<close>]"]
\<open> \<^isadof> provides an implementation of \<^dof> on top of Isabelle/HOL.
\<^dof> itself is a novel framework for \<^emph>\<open>defining\<close> ontologies
and \<^emph>\<open>enforcing\<close> them during document development and document
evolution. \<^isadof> targets use-cases such as mathematical texts referring
to a theory development or technical reports requiring a particular structure.
A major application of \<^dof> is the integrated development of
formal certification documents (\<^eg>, for Common Criteria or CENELEC
50128) that require consistency across both formal and informal
arguments.
\<^isadof> is integrated into Isabelle's IDE, which
allows for smooth ontology development as well as immediate
ontological feedback during the editing of a document.
Its checking facilities leverage the collaborative
development of documents required to be consistent with an
underlying ontological structure.
In this user-manual, we give an in-depth presentation of the design
concepts of \<^dof>'s Ontology Definition Language (ODL) and describe
comprehensively its major commands. Many examples show typical best-practice
applications of the system.
It is an unique feature of \<^isadof> that ontologies may be used to control
the link between formal and informal content in documents in a machine
checked way. These links can connect both text elements and formal
modeling elements such as terms, definitions, code and logical formulas,
altogether \<^emph>\<open>integrated\<close> in a state-of-the-art interactive theorem prover.
\<close>
(*<*)
end
(*>*)
| {
"alphanum_fraction": null,
"author": "logicalhacking",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/isabelle/logicalhacking-Isabelle_DOF/Isabelle_DOF-07444efd2168aac992471259f3cd958bde777ea6/Isabelle_DOF/thys/manual/M_00_Frontmatter.thy",
"reason": null,
"repo": "Isabelle_DOF",
"save_path": "github-repos/isabelle/logicalhacking-Isabelle_DOF",
"sha": "07444efd2168aac992471259f3cd958bde777ea6",
"size": null
} |
import sys
import unittest
import numpy
import ideep4py
from ideep4py import intVector, mdarrayVector, concat
try:
import testing
except Exception as ex:
print('*** testing directory is missing: %s' % ex)
sys.exit(-1)
@testing.parameterize(*testing.product_dict(
[
{'shape': (7, 2, 3, 5), 'axis': 0, 'section': [2, 5],
'slices': [[slice(None, 2)], [slice(2, 5)],
[slice(5, None)]]},
{'shape': (2, 7, 3, 5), 'axis': 1, 'section': [2, 5],
'slices': [[slice(None), slice(None, 2)], [slice(None), slice(2, 5)],
[slice(None), slice(5, None)]]},
{'shape': (2, 3, 7, 5), 'axis': 2, 'section': [2, 5],
'slices': [[slice(None), slice(None), slice(None, 2)],
[slice(None), slice(None), slice(2, 5)],
[slice(None), slice(None), slice(5, None)]]},
{'shape': (2, 3, 5, 7), 'axis': 3, 'section': [2, 5],
'slices': [[slice(None), slice(None), slice(None), slice(None, 2)],
[slice(None), slice(None), slice(None), slice(2, 5)],
[slice(None), slice(None), slice(None), slice(5, None)]]},
{'shape': (60, 33, 3, 3), 'axis': 0, 'section': [12, 48],
'slices': [[slice(None, 12)],
[slice(12, 48)],
[slice(48, None)]]},
{'shape': (33, 60, 3, 3), 'axis': 1, 'section': [12, 48],
'slices': [[slice(None), slice(None, 12)],
[slice(None), slice(12, 48)],
[slice(None), slice(48, None)]]},
{'shape': (33, 3, 60, 3), 'axis': 2, 'section': [12, 48],
'slices': [[slice(None), slice(None), slice(None, 12)],
[slice(None), slice(None), slice(12, 48)],
[slice(None), slice(None), slice(48, None)]]},
{'shape': (33, 3, 3, 60), 'axis': 3, 'section': [12, 48],
'slices': [[slice(None), slice(None), slice(None), slice(None, 12)],
[slice(None), slice(None), slice(None), slice(12, 48)],
[slice(None), slice(None), slice(None), slice(48, None)]]},
],
[
{'dtype': numpy.float32},
],
))
class TestConcatPyF32(unittest.TestCase):
def setUp(self):
self.y = numpy.arange(
numpy.prod(self.shape), dtype=self.dtype).reshape(self.shape)
self.xs = [self.y[s] for s in self.slices]
def check_forward(self, xs_data, y_data, axis):
xs = tuple(x_data for x_data in xs_data)
xs_mdarray = mdarrayVector()
for yi in xs:
if isinstance(yi, numpy.ndarray):
if yi.flags.contiguous is False:
yi = numpy.ascontiguousarray(yi)
yi = ideep4py.mdarray(numpy.ascontiguousarray(yi))
xs_mdarray.push_back(yi)
y_act = concat.Forward(xs_mdarray, self.axis)
y_act = numpy.array(y_act, dtype=self.dtype)
numpy.testing.assert_allclose(y_data, y_act, atol=0, rtol=0)
def test_forward_cpu(self):
self.check_forward(self.xs, self.y, axis=self.axis)
def check_backward(self, xs_data, y_data, axis):
xs = tuple(x_data for x_data in xs_data)
xs_mdarray = mdarrayVector()
for yi in xs:
if isinstance(yi, numpy.ndarray):
if yi.flags.contiguous is False:
yi = numpy.ascontiguousarray(yi)
yi = ideep4py.mdarray(numpy.ascontiguousarray(yi))
xs_mdarray.push_back(yi)
y_data = ideep4py.mdarray(y_data)
offsets = intVector()
# FIXME
for i in self.section:
offsets.push_back(i)
x_act_mdarray = concat.Backward(y_data, offsets, self.axis)
i = 0
for x in xs:
x_act = numpy.array(x_act_mdarray[i], dtype=self.dtype)
numpy.testing.assert_allclose(
x, x_act, atol=0, rtol=0)
i = i + 1
def test_backward_cpu(self):
self.check_backward(self.xs, self.y, axis=self.axis)
testing.run_module(__name__, __file__)
| {
"alphanum_fraction": 0.5293107677,
"author": null,
"avg_line_length": 39.5825242718,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5eeeaf1039fd4866a1ae0d6ff80c70cf266eecb0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 78,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T08:14:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-05T11:46:31.000Z",
"max_forks_repo_head_hexsha": "b57539e4608e75f80dbc5c2784643d5f2f242003",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cis-ncbj/ideep",
"max_forks_repo_path": "python/ideep4py/tests/ideep4py_tests/test_concat_py.py",
"max_issues_count": 46,
"max_issues_repo_head_hexsha": "b57539e4608e75f80dbc5c2784643d5f2f242003",
"max_issues_repo_issues_event_max_datetime": "2021-12-01T03:50:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-03-05T06:44:16.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cis-ncbj/ideep",
"max_issues_repo_path": "python/ideep4py/tests/ideep4py_tests/test_concat_py.py",
"max_line_length": 79,
"max_stars_count": 133,
"max_stars_repo_head_hexsha": "b57539e4608e75f80dbc5c2784643d5f2f242003",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cis-ncbj/ideep",
"max_stars_repo_path": "python/ideep4py/tests/ideep4py_tests/test_concat_py.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T08:27:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-20T06:45:25.000Z",
"num_tokens": 1148,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4077
} |
Require Import
Fiat.BinEncoders.Env.Common.Notations
Fiat.BinEncoders.Env.Common.Specs
Fiat.BinEncoders.Env.Common.ComposeOpt.
Require Export
Coq.Lists.List.
Notation "| ls |" := (Datatypes.length ls) : binencoders_scope.
Section FixList.
Context {A : Type}.
Context {B : Type}.
Context {cache : Cache}.
Context {transformer : Transformer B}.
Variable A_predicate : A -> Prop.
Variable A_predicate_rest : A -> B -> Prop.
Variable A_encode_Spec : A -> CacheEncode -> Comp (B * CacheEncode).
Variable A_decode : B -> CacheDecode -> option (A * B * CacheDecode).
Variable A_cache_inv : CacheDecode -> Prop.
Variable A_decode_pf : encode_decode_correct_f cache transformer A_predicate A_predicate_rest A_encode_Spec A_decode A_cache_inv.
(* Ben: Should we do this with a FixComp instead? *)
Fixpoint encode_list_Spec (xs : list A) (ce : CacheEncode)
: Comp (B * CacheEncode) :=
match xs with
| nil => ret (transform_id, ce)
| x :: xs' => `(b1, env1) <- A_encode_Spec x ce;
`(b2, env2) <- encode_list_Spec xs' env1;
ret (transform b1 b2, env2)
end%comp.
Fixpoint encode_list_Impl
(A_encode_Impl : A -> CacheEncode -> B * CacheEncode)
(xs : list A) (ce : CacheEncode)
: B * CacheEncode :=
match xs with
| nil => (transform_id, ce)
| x :: xs' => let (b1, env1) := A_encode_Impl x ce in
let (b2, env2) := encode_list_Impl A_encode_Impl xs' env1 in
(transform b1 b2, env2)
end%comp.
Fixpoint decode_list (s : nat) (b : B) (cd : CacheDecode) : option (list A * B * CacheDecode) :=
match s with
| O => Some (nil, b, cd)
| S s' => `(x, b1, e1) <- A_decode b cd;
`(xs, b2, e2) <- decode_list s' b1 e1;
Some (x :: xs, b2, e2)
end.
Fixpoint FixList_predicate_rest
(As : list A)
(b : B)
: Prop :=
match As with
| nil => True
| cons a As' =>
(forall b' ce ce',
computes_to (encode_list_Spec As' ce) (b', ce')
-> A_predicate_rest a (transform b' b))
/\ FixList_predicate_rest As' b
end.
Theorem FixList_decode_correct
:
forall sz ,
encode_decode_correct_f
cache transformer
(fun ls => |ls| = sz /\ forall x, In x ls -> A_predicate x)
FixList_predicate_rest
encode_list_Spec (decode_list sz) A_cache_inv.
Proof.
split.
{
intros env env' xenv l l' ext ? Eeq Ppred Ppred_rest Penc.
intuition; subst.
revert H0.
generalize dependent env. generalize dependent env'.
generalize dependent xenv.
generalize dependent l'. induction l.
{ intros.
simpl in *; intuition; computes_to_inv;
injections; simpl.
rewrite transform_id_left; eexists; eauto. }
{ intros; simpl in *.
assert (A_predicate a) by eauto.
unfold Bind2 in Penc; computes_to_inv; subst.
destruct v; destruct v0; simpl in *.
injections.
destruct (fun H' => proj1 A_decode_pf _ _ _ _ _ (transform b0 ext) env_OK Eeq H H' Penc) as [ ? [? [? xenv_OK] ] ].
intuition; destruct_ex.
eapply H1; eauto.
setoid_rewrite <- transform_assoc; setoid_rewrite H1;
simpl.
destruct (IHl (proj2 Ppred_rest) b0 xenv x xenv_OK c); intuition eauto.
setoid_rewrite H6; simpl.
eexists; intuition.
}
}
{ induction sz; simpl; intros.
- injections; simpl; repeat eexists; intuition eauto.
symmetry; apply transform_id_left.
- destruct (A_decode bin env') as [ [ [? ?] ?] | ] eqn: ? ;
simpl in *; try discriminate.
destruct (decode_list sz b c) as [ [ [? ?] ?] | ] eqn: ? ;
simpl in *; try discriminate; injections.
eapply (proj2 A_decode_pf) in Heqo; eauto;
destruct Heqo; destruct_ex; intuition; subst;
eapply IHsz in Heqo0; eauto; destruct Heqo0;
destruct_ex; intuition; subst.
simpl.
eexists; eexists; intuition eauto.
computes_to_econstructor; eauto.
computes_to_econstructor; eauto.
rewrite transform_assoc; reflexivity.
subst; eauto.
}
Qed.
Definition encode_list_body
(A_encode_Impl : A -> CacheEncode -> B * CacheEncode)
:= (fun (acc: B * CacheEncode) x =>
let (bacc, env) := acc in
let (b1, env1) := A_encode_Impl x env in
(transform bacc b1, env1)).
Lemma encode_list_body_characterization A_encode_Impl :
forall xs base env,
fold_left (encode_list_body A_encode_Impl) xs (base, env) =
(let (b2, env2) := fold_left (encode_list_body A_encode_Impl) xs (transform_id, env) in
(transform base b2, env2)).
Proof.
induction xs; simpl.
+ intros; rewrite transform_id_right; reflexivity.
+ intros; destruct (A_encode_Impl _ _).
rewrite IHxs, transform_id_left, (IHxs b).
destruct (fold_left _ _ _).
rewrite transform_assoc; reflexivity.
Qed.
Lemma encode_list_as_foldl A_encode_Impl :
forall xs env,
encode_list_Impl A_encode_Impl xs env =
fold_left (encode_list_body A_encode_Impl) xs (transform_id, env).
Proof.
induction xs; simpl.
+ reflexivity.
+ intros; destruct (A_encode_Impl _ _).
rewrite IHxs, transform_id_left, (encode_list_body_characterization A_encode_Impl xs b c).
destruct (fold_left _ _ _); reflexivity.
Qed.
Lemma measure_encode_length_Spec n :
(forall (a : A) b ctx ctx',
computes_to (A_encode_Spec a ctx) (b, ctx')
-> bin_measure b = n)
-> forall l b ctx ctx',
computes_to (encode_list_Spec l ctx) (b, ctx') ->
bin_measure b = n * (length l).
Proof.
induction l; simpl; intros.
- computes_to_inv; injections.
pose proof (transform_measure transform_id transform_id) as H';
rewrite transform_id_left in H'.
simpl bin_measure in H'; simpl transform_id in H'; omega.
- unfold Bind2 in *; computes_to_inv; injections.
destruct v; destruct v0; simpl in *.
rewrite transform_measure.
apply H in H0; rewrite H0.
apply IHl in H0'; rewrite H0'.
rewrite Mult.mult_succ_r.
auto with arith.
Qed.
End FixList.
Lemma FixedList_predicate_rest_True {A B}
{cache : Cache}
{transformer : Transformer B}
(A_encode_Spec : A -> CacheEncode -> Comp (B * CacheEncode))
: forall (l : list A) (b : B),
FixList_predicate_rest (fun a b => True) A_encode_Spec l b.
Proof.
induction l; simpl; eauto.
Qed.
| {
"alphanum_fraction": null,
"author": "proofskiddie",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/proofskiddie-CoqStuff/CoqStuff-fc8ecdf8045bc835bb10b2e4791f041d82451b5d/idontevnkno/src/BinEncoders/Env/Lib2/FixListOpt.v",
"reason": null,
"repo": "CoqStuff",
"save_path": "github-repos/coq/proofskiddie-CoqStuff",
"sha": "fc8ecdf8045bc835bb10b2e4791f041d82451b5d",
"size": null
} |
from grpc.beta import implementations
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
hostport = 'localhost:9000'
def do_prediction(hostport):
# Create connection
host, port = hostport.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# Initialize a request
request = predict_pb2.PredictRequest()
request.model_spec.name = 'nn'
request.model_spec.signature_name = 'prediction'
# Use evenly-spaced points for test data
tests = temp_data = np.array([range(-1, 6, 1)]).transpose().astype(
np.float32)
# Set the tests as the input for prediction
request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(tests, shape=tests.shape))
# Get prediction from server
result = stub.Predict(request, 5.0) # 5 second timeout
# Compare to noise-free actual values
actual = np.sum(0.5 * temp_data + 2.5, 1)
return result, actual
prediction, actual = do_prediction(hostport)
print('Prediction is: ', prediction)
print('Noise-free value is: ', actual)
| {
"alphanum_fraction": 0.7523012552,
"author": null,
"avg_line_length": 28.4523809524,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2b50f215c061a86dd2b4d380bdb87c2165c6c01d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "927ef4a5de0e78e9c15e5121e8f769088e31206c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lrakai/aws-dlami-tensorflow",
"max_forks_repo_path": "src/nn_client.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "927ef4a5de0e78e9c15e5121e8f769088e31206c",
"max_issues_repo_issues_event_max_datetime": "2018-02-12T22:52:34.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-02-12T22:52:25.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lrakai/aws-dlami-tensorflow",
"max_issues_repo_path": "src/nn_client.py",
"max_line_length": 75,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "927ef4a5de0e78e9c15e5121e8f769088e31206c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lrakai/aws-dlami-tensorflow",
"max_stars_repo_path": "src/nn_client.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 296,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1195
} |
#!/usr/bin/python
'''
Learning Machines
Taught by Patrick Hebron at NYU ITP
MNIST dataset helpers.
'''
import os
import gzip
import pickle
import numpy as np
def mnist_encode_one_hot_label(idx):
'''MNIST one-hot encoder function'''
enc = np.zeros( 10 )
enc[ idx ] = 1.0
return enc
def mnist_decode_one_hot_label(enc):
'''MNIST one-hot decoder function'''
return np.argmax( enc )
def mnist_get_accuracy(labels, guesses):
'''returns percentage of MNIST guesses that match labels'''
return np.mean( np.equal( np.argmax( labels, axis = 1 ), np.argmax( guesses, axis = 1 ) ).astype( np.float64 ) )
class Mnist:
def __init__(self, threshold = True):
# Set pickle path:
mnist_pickle_path = 'mnist.pkl.gz'
# Download pickle, if necessary:
if not os.path.exists( mnist_pickle_path ):
# fix for python3 compatibility. credit: https://stackoverflow.com/a/3969809/5420567
import urllib.request
downloader = urllib.request.URLopener()
downloader.retrieve( 'http://deeplearning.net/data/mnist/mnist.pkl.gz', mnist_pickle_path )
# fix for python3 compatibility. credit: http://www.mlblog.net/2016/09/reading-mnist-in-python3.html
# Load pickle:
with gzip.open(mnist_pickle_path, 'rb') as fh:
u = pickle._Unpickler(fh)
u.encoding = 'latin1'
training_data, validation_data, testing_data = u.load()
# Format dataset:
self.training_digits, self.training_labels = self.format_dataset( training_data, threshold )
self.validation_digits, self.validation_labels = self.format_dataset( validation_data, threshold )
self.testing_digits, self.testing_labels = self.format_dataset( testing_data, threshold )
def getTrainingData(self, count = 0):
if count == 0:
return ( self.training_digits, self.training_labels )
else:
return self.get_batch( count, self.training_digits, self.training_labels )
def getValidationData(self, count = 0):
if count == 0:
return ( self.validation_digits, self.validation_labels )
else:
return self.get_batch( count, self.validation_digits, self.validation_labels )
def getTestingData(self, count = 0):
if count == 0:
return ( self.testing_digits, self.testing_labels )
else:
return self.get_batch( count, self.testing_digits, self.testing_labels )
@staticmethod
def get_batch(count,digits,labels):
total = len( digits )
count = min( count, total )
idxs = np.random.choice( np.arange( total ), count, replace=False )
return ( digits[ idxs ], labels[ idxs ] )
@staticmethod
def format_dataset(dataset, threshold):
digits = np.array( [ np.reshape( x, 784 ) for x in dataset[ 0 ] ] )
labels = np.array( [ mnist_encode_one_hot_label( y ) for y in dataset[ 1 ] ] )
return ( ( digits > 0 ).astype( np.float ) if threshold else digits, labels )
| {
"alphanum_fraction": 0.7254261879,
"author": null,
"avg_line_length": 33.6219512195,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8133065176d9db57ef092d3746709a8e45af348a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-09-29T12:47:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-29T12:47:32.000Z",
"max_forks_repo_head_hexsha": "d261a3647f678784bd15641e39fbd03de59dc144",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dodiku/learning_machines_class",
"max_forks_repo_path": "restricted_boltzmann_machine/Mnist.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d261a3647f678784bd15641e39fbd03de59dc144",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dodiku/learning_machines_class",
"max_issues_repo_path": "restricted_boltzmann_machine/Mnist.py",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d261a3647f678784bd15641e39fbd03de59dc144",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dodiku/learning_machines_class",
"max_stars_repo_path": "restricted_boltzmann_machine/Mnist.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 711,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2757
} |
[STATEMENT]
lemma mcont2mcont_tset [THEN lfp.mcont2mcont, cont_intro]:
shows mcont_tset: "mcont tSup tllist_ord Union (\<subseteq>) tset"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mcont local.tSup local.tllist_ord \<Union> (\<subseteq>) tset
[PROOF STEP]
including tllist.lifting
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mcont local.tSup local.tllist_ord \<Union> (\<subseteq>) tset
[PROOF STEP]
apply transfer
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mcont (\<lambda>A. (lSup (fst ` A), flat_lub b (snd ` (A \<inter> {(xs, uu_). lfinite xs})))) (\<lambda>(xs1, b1) (xs2, b2). if lfinite xs1 then b1 = b \<and> lprefix xs1 xs2 \<or> xs1 = xs2 \<and> flat_ord b b1 b2 else xs1 = xs2) \<Union> (\<subseteq>) (lset \<circ> fst)
[PROOF STEP]
apply(rule mcont_comp[OF _ mcont_lset])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mcont (\<lambda>A. (lSup (fst ` A), flat_lub b (snd ` (A \<inter> {(xs, uu_). lfinite xs})))) (\<lambda>(xs1, b1) (xs2, b2). if lfinite xs1 then b1 = b \<and> lprefix xs1 xs2 \<or> xs1 = xs2 \<and> flat_ord b b1 b2 else xs1 = xs2) lSup lprefix fst
[PROOF STEP]
unfolding mcont_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monotone (\<lambda>(xs1, b1) (xs2, b2). if lfinite xs1 then b1 = b \<and> lprefix xs1 xs2 \<or> xs1 = xs2 \<and> flat_ord b b1 b2 else xs1 = xs2) lprefix fst \<and> cont (\<lambda>A. (lSup (fst ` A), flat_lub b (snd ` (A \<inter> {(xs, uu_). lfinite xs})))) (\<lambda>(xs1, b1) (xs2, b2). if lfinite xs1 then b1 = b \<and> lprefix xs1 xs2 \<or> xs1 = xs2 \<and> flat_ord b b1 b2 else xs1 = xs2) lSup lprefix fst
[PROOF STEP]
by(auto intro: monotoneI contI ) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Coinductive_TLList_CCPO",
"hexsha": null,
"include": null,
"lang": null,
"length": 5,
"llama_tokens": 719,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
function exportDVHData(command)
%GUI to export a DVH data file starting with a DVH plot of one or more VOI's.
%VOI's must be renamed to agree with a standard list stored in optS.defaultVOINames.
%DVH data may then be exported to a delimited text file.
%
%KU 04/14/2006
%
% Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team.
%
% This file is part of The Computational Environment for Radiotherapy Research (CERR).
%
% CERR development has been led by: Aditya Apte, Divya Khullar, James Alaly, and Joseph O. Deasy.
%
% CERR has been financially supported by the US National Institutes of Health under multiple grants.
%
% CERR is distributed under the terms of the Lesser GNU Public License.
%
% This version of CERR is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% CERR is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
% without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
% See the GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with CERR. If not, see <http://www.gnu.org/licenses/>.
global planC;
global stateS;
persistent exportPlots;
indexS = planC{end};
uicolor = [.9 .9 .9];
units = 'normalized';
%Get DVH data.
h = findobj('tag', 'DVHPlot');
ud = get(h, 'userdata');
plots = ud.plots;
for i=1:length(plots)
strNames{i} = plots(i).struct;
end
strNames = {strNames{:}};
%Get list of standard VOI names.
okList = stateS.optS.defaultVOINames;
if nargin == 0
command = 'init';
end
switch lower(command)
case 'init'
hFig = findobj('tag', 'DVHExportFigure');
if ~isempty(hFig)
delete(hFig);
end
screenSize = get(0,'ScreenSize');
units = 'normalized';
hFig = figure('Name', 'DVH Data Export', 'position',[screenSize(3)/10 screenSize(4)/8 screenSize(3)/10*8 screenSize(4)/10*5], 'MenuBar', 'none', 'NumberTitle', 'off', 'resize', 'off', 'Tag', 'DVHExportFigure');
stateS.handles.figure = hFig;
%Init List Boxes
ud.handles.DVHList = uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.05 .10 .25 .85],'String', strNames, 'Style','listbox','Tag','DVHList');
ud.handles.myDVHList = uicontrol('callback', 'exportDVHData(''SelectDVH'');','units',units,'BackgroundColor',uicolor, 'Position',[.35 .10 .25 .85], 'String', [], 'Style','listbox', 'Enable', 'on', 'Tag','myDVHList');
%Make labels for listboxes
uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.05 .95 .15 .04],'String', 'Calculated DVHs: ', 'Style','text', 'horizontalalignment', 'left');
uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.35 .95 .15 .04],'String', 'Selected For Export: ', 'Style','text', 'horizontalalignment', 'left');
%make buttons for adding and removing DVH's from list boxes.
uicontrol('callback', 'exportDVHData(''addDVH'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.3 .65 .05 .05],'String','-->', 'Style','pushbutton','Tag','DVHAdd');
uicontrol('callback', 'exportDVHData(''removeDVH'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.3 .55 .05 .05],'String','<--', 'Style','pushbutton','Tag','DVHRemove');
%Make frame for checking and renaming VOI's.
uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.65 .45 .3 .5],'String','frame', 'Style','frame','Tag','renameFrame');
%Add pop-up menus for renaming VOI's.
ud.handles.myDVHpopup = uicontrol('callback', 'exportDVHData(''SelectPopupDVH'');','units',units,'BackgroundColor',uicolor, 'Position',[.67 .67 .12 .05],'String', {''}, 'Style','popupmenu','Tag','myDVHpopup');
ud.handles.okDVHList = uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.81 .67 .12 .05], 'String', okList, 'Style','popupmenu','Tag','okDVHList');
%Make labels for pop-up menus.
uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.66 .72 .13 .05],'String', 'Select VOI to rename', 'Style','text', 'horizontalalignment', 'left');
uicontrol('units',units,'BackgroundColor',uicolor, 'Position',[.81 .72 .13 .05],'String', 'Select name from list', 'Style','text', 'horizontalalignment', 'left');
%Make buttons for checking VOI names and renaming.
uicontrol('callback', 'exportDVHData(''checknames'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.70 .87 .2 .05],'String','Check VOI Names', 'Style','pushbutton','Tag','namecheck');
uicontrol('callback', 'exportDVHData(''rename'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.75 .50 .1 .05],'String','Rename', 'Style','pushbutton','Tag','rename');
%Make buttons for export and cancel.
uicontrol('callback', 'exportDVHData(''export'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.65 .35 .1 .05],'String','Export', 'Style','pushbutton','Tag','export');
uicontrol('callback', 'exportDVHData(''cancel'');', 'units',units,'BackgroundColor',uicolor, 'Position',[.85 .35 .1 .05],'String','Cancel', 'Style','pushbutton','Tag','cancel');
set(hFig, 'userdata', ud);
return;
case 'adddvh' %add current DVH to the "selected for export" list.
hFig = findobj('tag', 'DVHExportFigure');
ud = get(hFig, 'userdata');
index_selected = get(ud.handles.DVHList,'Value');
exportList = get(findobj('tag', 'myDVHList'), 'String');
if isempty(exportList)
exportPlots = plots(index_selected);
else
for i = 1:length(exportPlots)
if isequal(plots(index_selected).volsV, exportPlots(i).volsV)
warndlg('That DVH has already been selected for export!', 'Warning', 'modal');
break
elseif i == length(exportPlots)
exportPlots = [exportPlots, plots(index_selected)];
end
end
end
for i=1:length(exportPlots)
exportNames{i} = exportPlots(i).struct;
end
exportNames = {exportNames{:}};
set(findobj('tag', 'myDVHList'), 'String', exportNames);
set(findobj('tag', 'myDVHpopup'), 'String', exportNames);
case 'removedvh' %Remove DVH from the "selected for export" list.
hFig = findobj('tag', 'DVHExportFigure');
exportList = get(findobj('tag', 'myDVHList'), 'String');
if isempty(exportList)
return
end
ud = get(hFig, 'userdata');
index_selected = get(ud.handles.myDVHList,'Value');
exportPlots(index_selected) = [];
if length(exportPlots) >= 1
for i=1:length(exportPlots)
exportNames{i} = exportPlots(i).struct;
end
exportNames = {exportNames{:}};
set(findobj('tag', 'myDVHList'), 'String', exportNames);
set(findobj('tag', 'myDVHpopup'), 'String', exportNames);
else
set(findobj('tag', 'myDVHList'), 'String', []);
set(findobj('tag', 'myDVHpopup'), 'String', {''});
end
%Fix index if now out of range.
if index_selected > length(exportPlots) && length(exportPlots) >= 1
set(findobj('tag', 'myDVHList'), 'Value', index_selected-1);
set(findobj('tag', 'myDVHpopup'), 'Value', index_selected-1);
end
case 'checknames' %Check VOI names against standard list in optS.defaultVOINames.
exportList = get(findobj('tag', 'myDVHList'), 'String');
if isempty(exportList)
warndlg('No DVHs have been selected for export!', 'Warning', 'modal');
return
end
k = 0;
for i = 1:length(exportPlots)
for j = 1:length(okList)
if strcmpi(exportPlots(i).struct, okList{j})
break
elseif j == length(okList)
k = k+1;
badNames{k} = exportPlots(i).struct;
end
end
end
if k == 0
sentence1 = ('Ready for export!');
Zmsgbox=msgbox(sentence1, 'modal');
waitfor(Zmsgbox);
else
sentence1 = {'The following VOIs must be renamed before the DVHs can be exported:',...
'',...
badNames{:}};
Zmsgbox=msgbox(sentence1);
waitfor(Zmsgbox);
end
case 'selectdvh' %Synchronize selected DVH in export list and pop-up menu.
hFig = findobj('tag', 'DVHExportFigure');
ud = get(hFig, 'userdata');
index_selected = get(ud.handles.myDVHList,'Value');
if index_selected >=1
set(findobj('tag', 'myDVHpopup'), 'Value', index_selected);
else
set(findobj('tag', 'myDVHList'), 'Value', 1);
end
case 'selectpopupdvh' %Synchronize selected DVH in export list and pop-up menu.
hFig = findobj('tag', 'DVHExportFigure');
ud = get(hFig, 'userdata');
index_selected = get(ud.handles.myDVHpopup,'Value');
set(findobj('tag', 'myDVHList'), 'Value', index_selected);
case 'rename' %Rename VOI's
hFig = findobj('tag', 'DVHExportFigure');
exportList = get(findobj('tag', 'myDVHList'), 'String');
if isempty(exportList)
warndlg('Nothing to rename!', 'Warning', 'modal');
return
end
ud = get(hFig, 'userdata');
export_index = get(ud.handles.myDVHpopup,'Value');
OK_index = get(ud.handles.okDVHList,'Value');
Zquestion=questdlg({'Please confirm:',...
'',...
['The exported DVH for "' exportPlots(export_index).struct '" will be renamed "' okList{OK_index} '."']},...
'Confirm name change', 'Rename', 'Cancel', 'Cancel');
if strcmpi(Zquestion, 'rename')
exportPlots(export_index).struct = okList{OK_index};
for i=1:length(exportPlots)
exportNames{i} = exportPlots(i).struct;
end
exportNames = {exportNames{:}};
set(findobj('tag', 'myDVHList'), 'String', exportNames);
set(findobj('tag', 'myDVHpopup'), 'String', exportNames);
end
case 'export' %Export to file.
exportList = get(findobj('tag', 'myDVHList'), 'String');
if isempty(exportList)
warndlg('No DVHs have been selected for export!', 'Warning', 'modal');
return
end
k = 0;
for i = 1:length(exportPlots) %Check that all the names are OK.
for j = 1:length(okList)
if strcmpi(exportPlots(i).struct, okList{j})
break
elseif j == length(okList)
k = k+1;
badNames{k} = exportPlots(i).struct;
end
end
end
if k == 0 %%%% All the names are OK. Do the export!!!!
exportData = [];
dlmwrite('tempfile', length(exportPlots));
for i = 1:length(exportPlots)
exportData(i).struct = exportPlots(i).struct;
exportData(i).numDoseVals = length(exportPlots(i).xVals);
exportData(i).array(:,1) = exportPlots(i).xVals';
%Convert dose units from Gy to cGy.
exportData(i).array(:,1) = 100*(exportData(i).array(:,1));
exportData(i).array(:,2) = exportPlots(i).yVals';
exportData(i).array(:,3) = [0;exportPlots(i).volsV'];
%Write to temporary file
fid = fopen('tempfile', 'a');
fprintf(fid, '\n%s\n', exportData(i).struct);
fclose(fid);
dlmwrite('tempfile', exportData(i).numDoseVals, '-append');
dlmwrite('tempfile', exportData(i).array, '-append', 'delimiter', '\t', 'precision', 6);
end
%Save file
[pathstr, name, ext] = fileparts(stateS.CERRFile);
if strcmpi(ext, '.bz2')
[pathstr, name, ext] = fileparts(fullfile(pathstr,name));
end
[fname, pname] = uiputfile('*.txt', 'Save as', fullfile(pathstr,name));
if isequal(fname,0) || isequal(pname,0)
CERRStatusString('Save cancelled. Ready.');
delete tempfile;
return;
end
saveFile = fullfile(pname, fname);
copyfile('tempfile', saveFile);
delete tempfile;
hFig = findobj('tag', 'DVHExportFigure');
delete(hFig);
else
sentence1 = {'The following VOIs must be renamed before the DVHs can be exported:',...
'',...
badNames{:}};
Zmsgbox=msgbox(sentence1);
waitfor(Zmsgbox);
end
case 'cancel'
hFig = findobj('tag', 'DVHExportFigure');
delete(hFig);
end | {
"alphanum_fraction": null,
"author": "cerr",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/cerr-CERR/CERR-d320754abad9dcb78508ab69f33ae9f644202114/CERR_core/PlanAnalysis/DoseVolumeHistograms/exportDVHData.m",
"reason": null,
"repo": "CERR",
"save_path": "github-repos/MATLAB/cerr-CERR",
"sha": "d320754abad9dcb78508ab69f33ae9f644202114",
"size": null
} |
[STATEMENT]
lemma distinguished_bounded_support:
assumes "x distinguishes P from Q"
obtains y where "supp y \<subseteq> supp P" and "y distinguishes P from Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
let ?B = "{p \<bullet> x|p. supp P \<sharp>* p}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "supp P supports ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. supp P supports {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
unfolding supports_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a b. a \<notin> supp P \<and> b \<notin> supp P \<longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
proof (clarify)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
fix a b
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
assume a: "a \<notin> supp P" and b: "b \<notin> supp P"
[PROOF STATE]
proof (state)
this:
a \<notin> supp P
b \<notin> supp P
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
have "(a \<rightleftharpoons> b) \<bullet> ?B \<subseteq> ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
fix x'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
assume "x' \<in> (a \<rightleftharpoons> b) \<bullet> ?B"
[PROOF STATE]
proof (state)
this:
x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
obtain p where 1: "x' = (a \<rightleftharpoons> b) \<bullet> p \<bullet> x" and 2: "supp P \<sharp>* p"
[PROOF STATE]
proof (prove)
using this:
x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. (\<And>p. \<lbrakk>x' = (a \<rightleftharpoons> b) \<bullet> p \<bullet> x; supp P \<sharp>* p\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp add: permute_set_def)
[PROOF STATE]
proof (state)
this:
x' = (a \<rightleftharpoons> b) \<bullet> p \<bullet> x
supp P \<sharp>* p
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
let ?q = "(a \<rightleftharpoons> b) + p"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
from 1
[PROOF STATE]
proof (chain)
picking this:
x' = (a \<rightleftharpoons> b) \<bullet> p \<bullet> x
[PROOF STEP]
have "x' = ?q \<bullet> x"
[PROOF STATE]
proof (prove)
using this:
x' = (a \<rightleftharpoons> b) \<bullet> p \<bullet> x
goal (1 subgoal):
1. x' = ((a \<rightleftharpoons> b) + p) \<bullet> x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x' = ((a \<rightleftharpoons> b) + p) \<bullet> x
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
x' = ((a \<rightleftharpoons> b) + p) \<bullet> x
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
from a and b and 2
[PROOF STATE]
proof (chain)
picking this:
a \<notin> supp P
b \<notin> supp P
supp P \<sharp>* p
[PROOF STEP]
have "supp P \<sharp>* ?q"
[PROOF STATE]
proof (prove)
using this:
a \<notin> supp P
b \<notin> supp P
supp P \<sharp>* p
goal (1 subgoal):
1. supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
[PROOF STEP]
by (metis fresh_perm fresh_star_def fresh_star_plus swap_atom_simps(3))
[PROOF STATE]
proof (state)
this:
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
goal (1 subgoal):
1. \<And>xa. xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
x' = ((a \<rightleftharpoons> b) + p) \<bullet> x
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
[PROOF STEP]
show "x' \<in> ?B"
[PROOF STATE]
proof (prove)
using this:
x' = ((a \<rightleftharpoons> b) + p) \<bullet> x
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
goal (1 subgoal):
1. x' \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x' \<in> {p \<bullet> x |p. supp P \<sharp>* p}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
have "?B \<subseteq> (a \<rightleftharpoons> b) \<bullet> ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
fix x'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
assume "x' \<in> ?B"
[PROOF STATE]
proof (state)
this:
x' \<in> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x' \<in> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
obtain p where 1: "x' = p \<bullet> x" and 2: "supp P \<sharp>* p"
[PROOF STATE]
proof (prove)
using this:
x' \<in> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. (\<And>p. \<lbrakk>x' = p \<bullet> x; supp P \<sharp>* p\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x' = p \<bullet> x
supp P \<sharp>* p
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
let ?q = "(a \<rightleftharpoons> b) + p"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
from 1
[PROOF STATE]
proof (chain)
picking this:
x' = p \<bullet> x
[PROOF STEP]
have "x' = (a \<rightleftharpoons> b) \<bullet> ?q \<bullet> x"
[PROOF STATE]
proof (prove)
using this:
x' = p \<bullet> x
goal (1 subgoal):
1. x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
from a and b and 2
[PROOF STATE]
proof (chain)
picking this:
a \<notin> supp P
b \<notin> supp P
supp P \<sharp>* p
[PROOF STEP]
have "supp P \<sharp>* ?q"
[PROOF STATE]
proof (prove)
using this:
a \<notin> supp P
b \<notin> supp P
supp P \<sharp>* p
goal (1 subgoal):
1. supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
[PROOF STEP]
by (metis fresh_perm fresh_star_def fresh_star_plus swap_atom_simps(3))
[PROOF STATE]
proof (state)
this:
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
goal (1 subgoal):
1. \<And>xa. xa \<in> {p \<bullet> x |p. supp P \<sharp>* p} \<Longrightarrow> xa \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
[PROOF STEP]
show "x' \<in> (a \<rightleftharpoons> b) \<bullet> ?B"
[PROOF STATE]
proof (prove)
using this:
x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
goal (1 subgoal):
1. x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
using mem_permute_iff
[PROOF STATE]
proof (prove)
using this:
x' = (a \<rightleftharpoons> b) \<bullet> ((a \<rightleftharpoons> b) + p) \<bullet> x
supp P \<sharp>* ((a \<rightleftharpoons> b) + p)
(?p \<bullet> ?x \<in> ?p \<bullet> ?X) = (?x \<in> ?X)
goal (1 subgoal):
1. x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x' \<in> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. \<And>a b. \<lbrakk>a \<notin> supp P; b \<notin> supp P\<rbrakk> \<Longrightarrow> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> {p \<bullet> x |p. supp P \<sharp>* p}
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
show "(a \<rightleftharpoons> b) \<bullet> ?B = ?B"
[PROOF STATE]
proof (prove)
using this:
(a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> {p \<bullet> x |p. supp P \<sharp>* p}
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. (a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(a \<rightleftharpoons> b) \<bullet> {p \<bullet> x |p. supp P \<sharp>* p} = {p \<bullet> x |p. supp P \<sharp>* p}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
supp P supports {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
supp P supports {p \<bullet> x |p. supp P \<sharp>* p}
[PROOF STEP]
have supp_B_subset_supp_P: "supp ?B \<subseteq> supp P"
[PROOF STATE]
proof (prove)
using this:
supp P supports {p \<bullet> x |p. supp P \<sharp>* p}
goal (1 subgoal):
1. supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
[PROOF STEP]
by (metis (erased, lifting) finite_supp supp_is_subset)
[PROOF STATE]
proof (state)
this:
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
[PROOF STEP]
have finite_supp_B: "finite (supp ?B)"
[PROOF STATE]
proof (prove)
using this:
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
goal (1 subgoal):
1. finite (supp {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
using finite_supp rev_finite_subset
[PROOF STATE]
proof (prove)
using this:
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
finite (supp ?x)
\<lbrakk>finite ?B; ?A \<subseteq> ?B\<rbrakk> \<Longrightarrow> finite ?A
goal (1 subgoal):
1. finite (supp {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
finite (supp {p \<bullet> x |p. supp P \<sharp>* p})
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "?B \<subseteq> (\<lambda>p. p \<bullet> x) ` UNIV"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> range (\<lambda>p. p \<bullet> x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> range (\<lambda>p. p \<bullet> x)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> range (\<lambda>p. p \<bullet> x)
[PROOF STEP]
have "|?B| \<le>o |UNIV :: perm set|"
[PROOF STATE]
proof (prove)
using this:
{p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> range (\<lambda>p. p \<bullet> x)
goal (1 subgoal):
1. |{p \<bullet> x |p. supp P \<sharp>* p}| \<le>o |UNIV|
[PROOF STEP]
by (rule surj_imp_ordLeq)
[PROOF STATE]
proof (state)
this:
|{p \<bullet> x |p. supp P \<sharp>* p}| \<le>o |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
|{p \<bullet> x |p. supp P \<sharp>* p}| \<le>o |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "|UNIV :: perm set| <o |UNIV :: 'idx set|"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. |UNIV| <o |UNIV|
[PROOF STEP]
by (metis card_idx_perm)
[PROOF STATE]
proof (state)
this:
|UNIV| <o |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
|UNIV| <o |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "|UNIV :: 'idx set| \<le>o natLeq +c |UNIV :: 'idx set|"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. |UNIV| \<le>o natLeq +c |UNIV|
[PROOF STEP]
by (metis Cnotzero_UNIV ordLeq_csum2)
[PROOF STATE]
proof (state)
this:
|UNIV| \<le>o natLeq +c |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
|{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
[PROOF STEP]
have card_B: "|?B| <o natLeq +c |UNIV :: 'idx set|"
[PROOF STATE]
proof (prove)
using this:
|{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
goal (1 subgoal):
1. |{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
|{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
let ?y = "Conj (Abs_bset ?B) :: ('idx, 'pred, 'act) formula"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from finite_supp_B and card_B and supp_B_subset_supp_P
[PROOF STATE]
proof (chain)
picking this:
finite (supp {p \<bullet> x |p. supp P \<sharp>* p})
|{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
[PROOF STEP]
have "supp ?y \<subseteq> supp P"
[PROOF STATE]
proof (prove)
using this:
finite (supp {p \<bullet> x |p. supp P \<sharp>* p})
|{p \<bullet> x |p. supp P \<sharp>* p}| <o natLeq +c |UNIV|
supp {p \<bullet> x |p. supp P \<sharp>* p} \<subseteq> supp P
goal (1 subgoal):
1. supp (Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})) \<subseteq> supp P
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
supp (Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})) \<subseteq> supp P
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
supp (Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})) \<subseteq> supp P
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "?y distinguishes P from Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p}) distinguishes P from Q
[PROOF STEP]
unfolding is_distinguishing_formula_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p}) \<and> \<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. P \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
2. \<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
x distinguishes P from Q
[PROOF STEP]
show "P \<Turnstile> ?y"
[PROOF STATE]
proof (prove)
using this:
x distinguishes P from Q
goal (1 subgoal):
1. P \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
by (auto simp add: card_B finite_supp_B) (metis is_distinguishing_formula_def supp_perm_eq valid_eqvt)
[PROOF STATE]
proof (state)
this:
P \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
goal (1 subgoal):
1. \<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
x distinguishes P from Q
[PROOF STEP]
show "\<not> Q \<Turnstile> ?y"
[PROOF STATE]
proof (prove)
using this:
x distinguishes P from Q
goal (1 subgoal):
1. \<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
[PROOF STEP]
by (auto simp add: card_B finite_supp_B) (metis is_distinguishing_formula_def permute_zero fresh_star_zero)
[PROOF STATE]
proof (state)
this:
\<not> Q \<Turnstile> Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p}) distinguishes P from Q
goal (1 subgoal):
1. (\<And>y. \<lbrakk>supp y \<subseteq> supp P; y distinguishes P from Q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
supp (Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})) \<subseteq> supp P
Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p}) distinguishes P from Q
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
supp (Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p})) \<subseteq> supp P
Conj (Abs_bset {p \<bullet> x |p. supp P \<sharp>* p}) distinguishes P from Q
goal (1 subgoal):
1. thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Modal_Logics_for_NTS_Equivalence_Implies_Bisimilarity",
"hexsha": null,
"include": null,
"lang": null,
"length": 92,
"llama_tokens": 9705,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
[STATEMENT]
lemma not_ide_fromArr:
shows "\<not> ide (fromArr x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> ide (fromArr x)
[PROOF STEP]
using fromArr_def ide_char ide_def Zero_def One_def
[PROOF STATE]
proof (prove)
using this:
fromArr ?x \<equiv> if ?x \<in> Arr then MkArr False True [?x] else null
ide ?f = (?f = Zero \<or> ?f = One)
ide ?a \<equiv> ?a \<cdot> ?a \<noteq> null \<and> (\<forall>f. (f \<cdot> ?a \<noteq> null \<longrightarrow> f \<cdot> ?a = f) \<and> (?a \<cdot> f \<noteq> null \<longrightarrow> ?a \<cdot> f = f))
Zero \<equiv> MkIde False
One \<equiv> MkIde True
goal (1 subgoal):
1. \<not> ide (fromArr x)
[PROOF STEP]
by (metis Cod.simps(1) Dom.simps(1)) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Category3_FreeCategory",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 291,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
theory "Ids"
imports Complex_Main
"HOL-Library.Code_Target_Int"
"Syntax"
begin
(*Differential dynamic logic can be defined for any finite types, given a
few elements of those types (so that we can generate axioms). *)
(*locale ids =
fixes vid1 :: ident
fixes vid2 :: ident
fixes vid3 :: ident
fixes is_vid1 :: "ident \<Rightarrow> bool"
fixes fid1 :: ident
fixes fid2 :: ident
fixes fid3 :: ident
fixes pid1 :: ident
fixes pid2 :: ident
fixes pid3 :: ident
fixes pid4 :: ident
assumes vne12:"vid1 \<noteq> vid2"
assumes vne23:"vid2 \<noteq> vid3"
assumes vne13:"vid1 \<noteq> vid3"
assumes fne12:"fid1 \<noteq> fid2"
assumes fne23:"fid2 \<noteq> fid3"
assumes fne13:"fid1 \<noteq> fid3"
assumes pne12:"pid1 \<noteq> pid2"
assumes pne23:"pid2 \<noteq> pid3"
assumes pne13:"pid1 \<noteq> pid3"
assumes pne14:"pid1 \<noteq> pid4"
assumes pne24:"pid2 \<noteq> pid4"
assumes pne34:"pid3 \<noteq> pid4"
context ids begin*)
lemma vne12:"Ix \<noteq> Iy" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma vne23:"Iy \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma vne13:"Ix \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma fne12:"Ix \<noteq> Iy" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma fne23:"Iy \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma fne13:"Ix \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne12:"Ix \<noteq> Iy" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne23:"Iy \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne13:"Ix \<noteq> Iz" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne14:"Ix \<noteq> Iw" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne24:"Iy \<noteq> Iw" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma pne34:"Iz \<noteq> Iw" apply (auto simp add: Ix_def Iy_def Iz_def Iw_def)using Ix.abs_eq Ix.rep_eq Iy.abs_eq Iy.rep_eq Iz.abs_eq Iz.rep_eq Iw.abs_eq Iw.rep_eq by auto
lemma id_simps:
"(Ix = Iy) = False" "(Iy = Iz) = False" "(Ix = Iz) = False"
"(Ix = Iy) = False" "(Iy = Iz) = False" "(Ix = Iz) = False"
"(Ix = Iy) = False" "(Iy = Iz) = False" "(Ix = Iz) = False"
"(Ix = Iw) = False" "(Iy = Iw) = False" "(Iz = Iw) = False"
"(Iy = Ix) = False" "(Iz = Iy) = False" "(Iz = Ix) = False"
"(Iy = Ix) = False" "(Iz = Iy) = False" "(Iz = Ix) = False"
"(Iy = Ix) = False" "(Iz = Iy) = False" "(Iz = Ix) = False"
"(Iw = Ix) = False" "(Iw = Iy) = False" "(Iw = Iz) = False"
using vne12 vne23 vne13 fne12 fne23 fne13 pne12 pne23 pne13 pne14 pne24 pne34 by auto
(* Function applied to one argument *)
definition f1::"ident \<Rightarrow> ident \<Rightarrow> trm"
where "f1 f x = Function f (singleton (Var x))"
(* Function applied to zero arguments (simulates a constant symbol given meaning by the interpretation) *)
definition f0::"ident \<Rightarrow> trm"
where "f0 f = Function f empty"
(* Predicate applied to one argument *)
definition p1::"ident \<Rightarrow> ident \<Rightarrow> formula"
where "p1 p x = Prop p (singleton (Var x))"
(* Predicational *)
definition P::"ident \<Rightarrow> formula"
where "P p = Predicational p"
end
| {
"alphanum_fraction": null,
"author": "LS-Lab",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/isabelle/LS-Lab-Isabelle-dL/Isabelle-dL-97770ed9ca8d6a633c59d11d799247f44cc62dc2/Ids.thy",
"reason": null,
"repo": "Isabelle-dL",
"save_path": "github-repos/isabelle/LS-Lab-Isabelle-dL",
"sha": "97770ed9ca8d6a633c59d11d799247f44cc62dc2",
"size": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.