text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
MODULE module_data_soa_vbs
IMPLICIT NONE
INTEGER NP
PARAMETER (NP = 8)
INTEGER MAXITS
PARAMETER (MAXITS = 100)
REAL TOLF
PARAMETER (TOLF = 1.E-09)
REAL TOLMIN
PARAMETER (TOLMIN = 1.E-12)
REAL TOLX
PARAMETER (TOLX = 1.E-10)
REAL STPMX
PARAMETER (STPMX = 100.)
REAL c303, c302
PARAMETER (c303=19.83, c302=5417.4)
INTEGER lcva, lcvb, lspcv, ldesn
PARAMETER (lcva=4,lcvb=4, lspcv=lcva+lcvb)
PARAMETER (ldesn=13)
INTEGER laerdvc, lnonaerdvc, l1ae, laero, imodes, aspec
PARAMETER (laerdvc=39,lnonaerdvc=8+lspcv)
PARAMETER (l1ae=laerdvc+lnonaerdvc)
PARAMETER (laero=4,imodes=4,aspec=1)
INTEGER aemiss
PARAMETER (aemiss=4)
INTEGER, PARAMETER :: ldroga=6
INTEGER, PARAMETER :: ldrogb=3
INTEGER, PARAMETER :: ldrogr=1
INTEGER, PARAMETER :: ldrog_vbs=ldroga+ldrogb+ldrogr
INTEGER orgaer
PARAMETER (orgaer=2)
INTEGER n_ae_vis_spc
PARAMETER (n_ae_vis_spc=2)
INTEGER idcvw
PARAMETER (idcvw=1)
INTEGER ibext
PARAMETER (ibext=2)
INTEGER vso4aj
PARAMETER (vso4aj=1)
INTEGER vso4ai
PARAMETER (vso4ai=2)
INTEGER vnh4aj
PARAMETER (vnh4aj=3)
INTEGER vnh4ai
PARAMETER (vnh4ai=4)
INTEGER vno3aj
PARAMETER (vno3aj=5)
INTEGER vno3ai
PARAMETER (vno3ai=6)
INTEGER vnaaj
PARAMETER (vnaaj=7)
INTEGER vnaai
PARAMETER (vnaai=8)
INTEGER vclaj
PARAMETER (vclaj=9)
INTEGER vclai
PARAMETER (vclai=10)
INTEGER, PARAMETER :: vasoa1j=11
INTEGER, PARAMETER :: vasoa1i=12
INTEGER, PARAMETER :: vasoa2j=13
INTEGER, PARAMETER :: vasoa2i=14
INTEGER, PARAMETER :: vasoa3j=15
INTEGER, PARAMETER :: vasoa3i=16
INTEGER, PARAMETER :: vasoa4j=17
INTEGER, PARAMETER :: vasoa4i=18
INTEGER, PARAMETER :: vbsoa1j=19
INTEGER, PARAMETER :: vbsoa1i=20
INTEGER, PARAMETER :: vbsoa2j=21
INTEGER, PARAMETER :: vbsoa2i=22
INTEGER, PARAMETER :: vbsoa3j=23
INTEGER, PARAMETER :: vbsoa3i=24
INTEGER, PARAMETER :: vbsoa4j=25
INTEGER, PARAMETER :: vbsoa4i=26
INTEGER vorgpaj
PARAMETER (vorgpaj=27)
INTEGER vorgpai
PARAMETER (vorgpai=28)
INTEGER vecj
PARAMETER (vecj=29)
INTEGER veci
PARAMETER (veci=30)
INTEGER vp25aj
PARAMETER (vp25aj=31)
INTEGER vp25ai
PARAMETER (vp25ai=32)
INTEGER vantha
PARAMETER (vantha=33)
INTEGER vseas
PARAMETER (vseas=34)
INTEGER vsoila
PARAMETER (vsoila=35)
INTEGER vnu0
PARAMETER (vnu0=36)
INTEGER vac0
PARAMETER (vac0=37)
INTEGER vcorn
PARAMETER (vcorn=38)
INTEGER vh2oaj
PARAMETER (vh2oaj=39)
INTEGER vh2oai
PARAMETER (vh2oai=40)
INTEGER vnu3
PARAMETER (vnu3=41)
INTEGER vac3
PARAMETER (vac3=42)
INTEGER vcor3
PARAMETER (vcor3=43)
INTEGER vsulf
PARAMETER (vsulf=44)
INTEGER vhno3
PARAMETER (vhno3=45)
INTEGER vnh3
PARAMETER (vnh3=46)
INTEGER vhcl
PARAMETER (vhcl=47)
INTEGER, PARAMETER :: vcvasoa1=48
INTEGER, PARAMETER :: vcvasoa2=49
INTEGER, PARAMETER :: vcvasoa3=50
INTEGER, PARAMETER :: vcvasoa4=51
INTEGER, PARAMETER :: vcvbsoa1=52
INTEGER, PARAMETER :: vcvbsoa2=53
INTEGER, PARAMETER :: vcvbsoa3=54
INTEGER, PARAMETER :: vcvbsoa4=55
INTEGER naspcssed
PARAMETER (naspcssed=6)
INTEGER vsnnuc
PARAMETER (vsnnuc=1)
INTEGER vsnacc
PARAMETER (vsnacc=2)
INTEGER vsncor
PARAMETER (vsncor=3)
INTEGER vsmnuc
PARAMETER (vsmnuc=4)
INTEGER vsmacc
PARAMETER (vsmacc=5)
INTEGER vsmcor
PARAMETER (vsmcor=6)
INTEGER naspcsdep
PARAMETER (naspcsdep=7)
INTEGER vdnnuc
PARAMETER (vdnnuc=1)
INTEGER vdnacc
PARAMETER (vdnacc=2)
INTEGER vdncor
PARAMETER (vdncor=3)
INTEGER vdmnuc
PARAMETER (vdmnuc=4)
INTEGER vdmacc
PARAMETER (vdmacc=5)
INTEGER vdmfine
PARAMETER (vdmfine=6)
INTEGER vdmcor
PARAMETER (vdmcor=7)
INTEGER, PARAMETER :: palk4=1
INTEGER, PARAMETER :: palk5=2
INTEGER, PARAMETER :: pole1=3
INTEGER, PARAMETER :: pole2=4
INTEGER, PARAMETER :: paro1=5
INTEGER, PARAMETER :: paro2=6
INTEGER, PARAMETER :: pisop=7
INTEGER, PARAMETER :: pterp=8
INTEGER, PARAMETER :: psesq=9
INTEGER, PARAMETER :: pbrch=10
INTEGER, PARAMETER :: pasoa1=1
INTEGER, PARAMETER :: pasoa2=2
INTEGER, PARAMETER :: pasoa3=3
INTEGER, PARAMETER :: pasoa4=4
INTEGER, PARAMETER :: pbsoa1=5
INTEGER, PARAMETER :: pbsoa2=6
INTEGER, PARAMETER :: pbsoa3=7
INTEGER, PARAMETER :: pbsoa4=8
REAL*8 &
pirs
PARAMETER (pirs=3.14159265358979324)
REAL avo
PARAMETER (avo=6.0221367E23)
REAL rgasuniv
PARAMETER (rgasuniv=8.314510)
REAL stdatmpa
PARAMETER (stdatmpa=101325.0)
REAL stdtemp
PARAMETER (stdtemp=273.15)
REAL stfblz
PARAMETER (stfblz=5.67051E-8)
REAL grav
PARAMETER (grav=9.80622)
REAL molvol
PARAMETER (molvol=22.41410)
REAL mwair
PARAMETER (mwair=28.9628)
REAL rdgas
PARAMETER (rdgas=1.0E3*rgasuniv/mwair)
REAL threepi
PARAMETER (threepi=3.0*pirs)
REAL f6dpi
PARAMETER (f6dpi=6.0/pirs)
REAL f6dpi9
PARAMETER (f6dpi9=1.0E9*f6dpi)
REAL f6dpim9
PARAMETER (f6dpim9=1.0E-9*f6dpi)
REAL sqrtpi
PARAMETER (sqrtpi=1.7724539)
REAL sqrt2
PARAMETER (sqrt2=1.4142135623731)
REAL lgsqt2
PARAMETER (lgsqt2=0.34657359027997)
REAL dlgsqt2
PARAMETER (dlgsqt2=1.0/lgsqt2)
REAL one3
PARAMETER (one3=1.0/3.0)
REAL two3
PARAMETER (two3=2.0/3.0)
REAL boltz
PARAMETER (boltz=rgasuniv/avo)
REAL rhoso4
PARAMETER (rhoso4=1.8E3)
REAL rhonh4
PARAMETER (rhonh4=1.8E3)
REAL rhono3
PARAMETER (rhono3=1.8E3)
REAL rhoh2o
PARAMETER (rhoh2o=1.0E3)
REAL rhoorg
PARAMETER (rhoorg=1.0E3)
REAL rhosoil
PARAMETER (rhosoil=2.6E3)
REAL rhoseas
PARAMETER (rhoseas=2.2E3)
REAL rhoanth
PARAMETER (rhoanth=2.2E3)
REAL rhona
PARAMETER (rhona=2.2E3)
REAL rhocl
PARAMETER (rhocl=2.2E3)
REAL so4fac
PARAMETER (so4fac=f6dpim9/rhoso4)
REAL nh4fac
PARAMETER (nh4fac=f6dpim9/rhonh4)
REAL h2ofac
PARAMETER (h2ofac=f6dpim9/rhoh2o)
REAL no3fac
PARAMETER (no3fac=f6dpim9/rhono3)
REAL orgfac
PARAMETER (orgfac=f6dpim9/rhoorg)
REAL soilfac
PARAMETER (soilfac=f6dpim9/rhosoil)
REAL seasfac
PARAMETER (seasfac=f6dpim9/rhoseas)
REAL anthfac
PARAMETER (anthfac=f6dpim9/rhoanth)
REAL nafac
PARAMETER (nafac=f6dpim9/rhona)
REAL clfac
PARAMETER (clfac=f6dpim9/rhocl)
REAL pss0
PARAMETER (pss0=101325.0)
REAL tss0
PARAMETER (tss0=288.15)
REAL sginin
PARAMETER (sginin=1.70)
REAL sginia
PARAMETER (sginia=2.00)
REAL sginic
PARAMETER (sginic=2.5)
REAL dginin
PARAMETER (dginin=0.01E-6)
REAL dginia
PARAMETER (dginia=0.07E-6)
REAL dginic
PARAMETER (dginic=1.0E-6)
REAL en1
REAL ea1
REAL ec1
REAL esn04
REAL esa04
REAL esc04
REAL esn05
REAL esa05
REAL esn08
REAL esa08
REAL esc08
REAL esn09
REAL esa09
REAL esn12
REAL esa12
REAL esc12
REAL esn16
REAL esa16
REAL esc16
REAL esn20
REAL esa20
REAL esc20
REAL esn25
REAL esa25
REAL esn24
REAL esa24
REAL esc24
REAL esn28
REAL esa28
REAL esc28
REAL esn32
REAL esa32
REAL esc32
REAL esn36
REAL esa36
REAL esc36
REAL esn49
REAL esa49
REAL esn52
REAL esa52
REAL esn64
REAL esa64
REAL esc64
REAL esn100
REAL esnm20
REAL esam20
REAL escm20
REAL esnm32
REAL esam32
REAL escm32
REAL xxlsgn
REAL xxlsga
REAL xxlsgc
REAL l2sginin
REAL l2sginia
REAL l2sginic
INTEGER inucl
PARAMETER (inucl=2)
LOGICAL icoarse
PARAMETER (icoarse=.FALSE.)
REAL dgvem_i
PARAMETER (dgvem_i=0.03E-6)
REAL sgem_i
PARAMETER (sgem_i=1.7)
REAL dgvem_j
PARAMETER (dgvem_j=0.3E-6)
REAL sgem_j
PARAMETER (sgem_j=2.0)
REAL dgvem_c
PARAMETER (dgvem_c=6.0E-6)
REAL sgem_c
PARAMETER (sgem_c=2.2)
REAL factnumn
REAL factnuma
REAL factnumc
REAL facatkn_min, facacc_min
PARAMETER (facatkn_min=0.04,facacc_min=1.0-facatkn_min)
REAL xxm3
REAL, PARAMETER :: conmin = 1.E-16
REAL, PARAMETER :: epsilc = 1.E-16
REAL*8 &
nummin_i
REAL*8 &
nummin_j
REAL*8 &
nummin_c
REAL alphsulf
PARAMETER (alphsulf=1.0)
REAL mwh2so4
PARAMETER (mwh2so4=98.07354E-3)
REAL diffsulf
PARAMETER (diffsulf=9.362223E-06)
REAL alphaorg
PARAMETER (alphaorg=1.0)
REAL mworg
PARAMETER (mworg=175.0E-03)
REAL difforg
PARAMETER (difforg=5.151174E-06)
REAL cconc
PARAMETER (cconc=2.0*pirs*diffsulf)
REAL cconc_org
PARAMETER (cconc_org=2.0*pirs*difforg)
REAL ccofm_org
REAL ccofm
REAL aeroconcmin
PARAMETER (aeroconcmin=0.0001)
integer, parameter :: maxd_atype = 2
integer, parameter :: maxd_asize = 2
integer, parameter :: maxd_acomp = 19
integer, parameter :: maxd_aphase = 2
integer, save :: ai_phase
integer, save :: cw_phase
integer, save :: ci_phase
integer, save :: cr_phase
integer, save :: cs_phase
integer, save :: cg_phase
integer, save :: ntype_aer = 0
integer, save :: ntot_mastercomp_aer = 0
integer, save :: nphase_aer = 0
integer, save :: &
msectional, maerosolincw, &
nsize_aer( maxd_atype ), &
ncomp_aer( maxd_atype ), &
ncomp_aer_nontracer( maxd_atype ), &
mastercompptr_aer(maxd_acomp, maxd_atype), &
massptr_aer( maxd_acomp, maxd_asize, maxd_atype, maxd_aphase ), &
waterptr_aer( maxd_asize, maxd_atype ), &
hygroptr_aer( maxd_asize, maxd_atype ), &
numptr_aer( maxd_asize, maxd_atype, maxd_aphase ), &
mprognum_aer(maxd_asize,maxd_atype,maxd_aphase)
real, save :: &
dens_aer( maxd_acomp, maxd_atype ), &
dens_mastercomp_aer( maxd_acomp ), &
mw_mastercomp_aer( maxd_acomp ), &
mw_aer( maxd_acomp, maxd_atype ), &
hygro_mastercomp_aer( maxd_acomp ), &
hygro_aer( maxd_acomp, maxd_atype )
character*10, save :: &
name_mastercomp_aer( maxd_acomp ), &
name_aer( maxd_acomp, maxd_atype )
real, save :: &
volumcen_sect( maxd_asize, maxd_atype ), &
volumlo_sect( maxd_asize, maxd_atype ), &
volumhi_sect( maxd_asize, maxd_atype ), &
dcen_sect( maxd_asize, maxd_atype ), &
dlo_sect( maxd_asize, maxd_atype ), &
dhi_sect( maxd_asize, maxd_atype ), &
sigmag_aer(maxd_asize, maxd_atype)
integer, save :: &
lptr_so4_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_nh4_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_no3_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_asoa1_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_asoa2_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_asoa3_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_asoa4_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_bsoa1_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_bsoa2_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_bsoa3_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_bsoa4_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_orgpa_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_ec_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_p25_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_anth_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_cl_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_na_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_seas_aer(maxd_asize,maxd_atype,maxd_aphase), &
lptr_soil_aer(maxd_asize,maxd_atype,maxd_aphase)
logical, save :: &
do_cloudchem_aer(maxd_asize,maxd_atype)
real, parameter :: mw_so4_aer = 96.066
real, parameter :: mw_no3_aer = 62.007
real, parameter :: mw_nh4_aer = 18.042
real, parameter :: mw_oc_aer = 250.0
real, parameter :: mw_ec_aer = 1.0
real, parameter :: mw_oin_aer = 1.0
real, parameter :: mw_dust_aer = 100.087
real, parameter :: mw_seas_aer = 58.440
real, parameter :: mw_cl_aer = 35.450
real, parameter :: mw_na_aer = 22.990
real, parameter :: mw_water_aer = 18.016
real, parameter :: dens_so4_aer = 1.80
real, parameter :: dens_no3_aer = 1.80
real, parameter :: dens_nh4_aer = 1.80
real, parameter :: dens_oc_aer = 1.5
real, parameter :: dens_ec_aer = 1.70
real, parameter :: dens_dust_aer = 2.60
real, parameter :: dens_oin_aer = 2.20
real, parameter :: dens_seas_aer = 2.20
real, parameter :: dens_cl_aer = 2.20
real, parameter :: dens_na_aer = 2.20
real, parameter :: dens_water_aer = 1.0
real, parameter :: hygro_so4_aer = 0.5
real, parameter :: hygro_no3_aer = 0.5
real, parameter :: hygro_nh4_aer = 0.5
real, parameter :: hygro_oc_aer = 0.14
real, parameter :: hygro_ec_aer = 1.e-6
real, parameter :: hygro_oin_aer = 0.14
real, parameter :: hygro_dust_aer = 0.1
real, parameter :: hygro_seas_aer = 1.16
real, parameter :: hygro_cl_aer = 1.16
real, parameter :: hygro_na_aer = 1.16
real dlndg_nimptblgrow
integer nimptblgrow_mind, nimptblgrow_maxd
parameter (nimptblgrow_mind=-14, nimptblgrow_maxd=24)
real scavimptblnum(4, nimptblgrow_mind:nimptblgrow_maxd, maxd_asize, maxd_atype), &
scavimptblvol(4, nimptblgrow_mind:nimptblgrow_maxd, maxd_asize, maxd_atype)
INTEGER NGAUSdv
PARAMETER( NGAUSdv = 7 )
REAL Y_GQ(NGAUSdv), WGAUS(NGAUSdv)
END Module module_data_soa_vbs
|
{"hexsha": "a38b905e21b0f65b61debaf132ec9688715a8107", "size": 14325, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "WRF-CHEM/chem/module_data_soa_vbs.f90", "max_stars_repo_name": "ksetigui/paper_gmd-2020-50", "max_stars_repo_head_hexsha": "1c4bf2b0946bc31cfb443686c8aa1e33755d5fd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WRF-CHEM/chem/module_data_soa_vbs.f90", "max_issues_repo_name": "ksetigui/paper_gmd-2020-50", "max_issues_repo_head_hexsha": "1c4bf2b0946bc31cfb443686c8aa1e33755d5fd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "WRF-CHEM/chem/module_data_soa_vbs.f90", "max_forks_repo_name": "ksetigui/paper_gmd-2020-50", "max_forks_repo_head_hexsha": "1c4bf2b0946bc31cfb443686c8aa1e33755d5fd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5928143713, "max_line_length": 89, "alphanum_fraction": 0.6392321117, "num_tokens": 5131}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Lindley Graham
"""
This module contains a set of methods and a class for interacting with NCSU
Subdomain Modeling Python code and associated files. The focus of this module
is the :class:`fulldomain`.
"""
import subprocess, glob, sys, os
import numpy as np
import scipy.io as sio
import polyadcirc.run_framework.domain as dom
import polyadcirc.pyADCIRC.post_management as post
import polyadcirc.run_framework.random_manningsn as rmn
import polyadcirc.pyADCIRC.output as output
class fulldomain(dom.domain):
"""
Objects of this class contain all the data needed by :mod:`py.genbcs`,
:mod:`py.genfull`, and :mod:`py.gensub` for a particular full domain
mesh/grid. References to :class:`polyadcirc.run_framework.subdomain` objects
are also contained in an instantiation of this class.
"""
def __init__(self, path, subdomains=None, node_num=0, element_num=0,
node=None, element=None):
"""
Initialization
"""
super(fulldomain, self).__init__(path, node_num, element_num, node,
element)
# figure out where the script dir for the ncsu subdomain code is
for sys_path in sys.path:
potential_file_list = glob.glob(os.path.join(sys_path, 'py'))
if potential_file_list:
self.script_dir = potential_file_list[0]
break
#: list of :class:`~polyadcirc.run_framework.subdomain`
if subdomains is None:
self.subdomains = list()
else:
self.subdomains = subdomains
def add_subdomain(self, subdomain):
"""
Adds subdomain to self.subdomains.
:type subdomain: :class:`~polyadcirc.run_framework.subdomain`
:param subdomain: subdomain within this domain
"""
self.subdomains.append(subdomain)
subdomain.fulldomain = self
def add_subdomains(self, subdomains):
"""
Adds subdomain to self.subdomains.
:type subdomains: list of :class:`~polyadcirc.run_framework.subdomain`
:param subdomains: subdomains within this domain
"""
self.subdomains.extend(subdomains)
for s in subdomains:
s.fulldomain = self
def update_subdomains(self):
"""
Update relational references between fulldomain and it's subdomains by
setting subdomain.fulldomain = self
"""
for subdomain in self.subdomains:
subdomain.fulldomain = self
def genfull(self, noutgs=1, nspoolgs=1, subdomains=None):
"""
Generate the full domain control file, ``fort.015``, and save it to
``self.path``.
:param int noutgs: flag controlling whether or not ``fort.06*`` will be
written out
:param int nspoolgs: the number of timesteps at which information is
written to the new output files ``fort.06*``
:rtype: string
:returns: command line for invoking genfull.py
"""
if subdomains is None:
subdomains = self.subdomains
if len(subdomains) == 0:
with open(os.path.join(self.path, 'genfull.in'), 'w') as fid:
fid.write(str(noutgs)+'\n')
fid.write(str(nspoolgs)+'\n')
command = "python "+self.script_dir+" -a "+self.path+'/ '
command += " < genfull.in"
subprocess.call(command, shell=True, cwd=self.path)
else:
with open(os.path.join(self.path, 'genfull.in'), 'w') as fid:
fid.write(str(noutgs)+'\n')
fid.write(str(nspoolgs)+'\n')
for subdomain in subdomains:
subdomain.nspoolgs = nspoolgs
fid.write(subdomain.path+'/\n')
command = "python "+self.script_dir+"/genfull.py -s "+self.path+'/ '
command += str(len(subdomains)) + " < genfull.in"
subprocess.call(command, shell=True, cwd=self.path)
return command
def genbcss(self, forcing_freq=None, dt=None, nspoolgs=None, h0=None,
L=False):
"""
Generate the ``fort.019`` files for the subdomains. This requires the
presence of the output files from a fulldomain run, ``fort.06*``.
:param list forcing_freq: number of timesteps at which infomration
is written to a boudnary conditions file (``fort.019``)
:param list dt: One timestep in seconds
:param list nspoolgs: the number of timesteps at which information is
written to the new output files ``fort.06*``
:param list h0: minimum water depth for a node to be wet
:param bool L: flag whether or not :program:`PADCIRC` was run with
``-L`` flag and if local files need to be post-processed into
global files
:rtype: list
:return: command lines for invoking genbcs.py
"""
commands = []
if L:
# create post-processing input file
post.write_sub(self.path)
# run ADCPOST
subprocess.call('./adcpost < in.postsub > post_o.txt', shell=True,
cwd=self.path)
if self.check_fulldomain():
if forcing_freq is None:
forcing_freq = [1 for i in self.subdomains]
if dt is None:
dt = [self.time.dt for i in self.subdomains]
if nspoolgs is None:
nspoolgs = [1 for i in self.subdomains]
if h0 is None:
h0 = [None for s in self.subdomains]
for f, d, ns, h, subdomain in zip(forcing_freq, dt, nspoolgs, h0,
self.subdomains):
commands.append(subdomain.genbcs(f, d, ns, h))
else:
print "Output files from the fulldomain run do not exist"
return commands
def check_fulldomain(self):
"""
Check to see if the ``fort.06*`` and ``PE*/fort.065`` files exist
:rtype: bool
:returns: False if the ``fort.06*`` files don't exist
"""
fort06 = glob.glob(os.path.join(self.path, 'fort.06*'))
fort065 = glob.glob(os.path.join(self.path, 'PE*', 'fort.065'))
return (len(fort06) > 0 and len(fort065) > 0)
def check_subdomains(self):
"""
Check all the subdomains to make sure the ``fort.019`` file exists
:rtype: bool
:returns: False if ``fort.019`` is missing from at least one of the
subdomains
"""
for sub in self.subdomains:
if not sub.check():
return False
else:
return True
def setup_all(self):
"""
Set up all of the subdomains
"""
for sub in self.subdomains:
sub.setup()
def read_and_save_output(self, ts_names, nts_names, save_file=None,
timesteps=None, save=False):
"""
Reads in output files from this fulldomain and saves to a file.
NOTE THIS DOES NOT CURRENTLY WORK FOR STATION DATA! ONLY USE FOR GLOBAL
DATA i.e files that are fort.*3 or fort.*4
NOTE THIS DOES NOT CURRENTLY WORK FOR ANY NTS DATA EXCEPT FOR MAXELE
:param list ts_names: names of ADCIRC timeseries
output files to be recorded from each run
:param list nts_names: names of ADCIRC non timeseries
output files to be recorded from each run
:param string save_file: name of file to save comparision matricies to
:param int timesteps: number of timesteps to read from file
:rtype: dict
:returns: full_dict
"""
if save_file is None:
save_file = os.path.join(self.path, 'full.mat')
fulldict = dict()
# Get nts_error
for fid in nts_names:
key = fid.replace('.', '')
fulldict[key] = output.get_nts_sr(self.path, self, fid)
# Get ts_data
for fid in ts_names:
key = fid.replace('.', '')
fulldict[key] = output.get_ts_sr(self.path,
fid, timesteps=timesteps,
ihot=self.ihot)[0]
# fix dry nodes
if fulldict.has_key('fort63'):
fulldict['fort63'] = np.expand_dims(fulldict['fort63'], axis=2)
fulldict = rmn.fix_dry_nodes(fulldict, self)
fulldict['fort63'] = np.squeeze(fulldict['fort63'])
# fix dry data
if fulldict.has_key('fort61'):
fulldict['fort61'] = np.expand_dims(fulldict['fort61'], axis=1)
fulldict = rmn.fix_dry_data(fulldict, self)
fulldict['fort61'] = np.squeeze(fulldict['fort61'])
# fix dry nodes nts
if fulldict.has_key('maxele63'):
fulldict['maxele63'] = np.expand_dims(fulldict['maxele63'], axis=1)
fulldict = rmn.fix_dry_nodes_nts(fulldict, self)
fulldict['maxele63'] = np.squeeze(fulldict['maxele63'])
if save:
sio.savemat(save_file, fulldict, do_compression=True)
return fulldict
|
{"hexsha": "0e9b61fc00a4a433f84ebcb5860d5e4530983ea1", "size": 9243, "ext": "py", "lang": "Python", "max_stars_repo_path": "polyadcirc/run_framework/fulldomain.py", "max_stars_repo_name": "tmiesse/PolyADCIRC", "max_stars_repo_head_hexsha": "a4a31dda2c2dac4cd696c0f3827dbbcea7feab33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-03-04T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T15:39:25.000Z", "max_issues_repo_path": "polyadcirc/run_framework/fulldomain.py", "max_issues_repo_name": "tmiesse/PolyADCIRC", "max_issues_repo_head_hexsha": "a4a31dda2c2dac4cd696c0f3827dbbcea7feab33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2015-04-28T05:14:28.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-19T12:54:59.000Z", "max_forks_repo_path": "polyadcirc/run_framework/fulldomain.py", "max_forks_repo_name": "UT-CHG/PolyADCIRC", "max_forks_repo_head_hexsha": "a4a31dda2c2dac4cd696c0f3827dbbcea7feab33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-01-20T00:34:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T11:00:56.000Z", "avg_line_length": 37.7265306122, "max_line_length": 80, "alphanum_fraction": 0.5829276209, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2212}
|
using Gadfly
using ProgressMeter
# Creates an array of length N where each entry is an independent realization
# of a complex Gaussian with Ez = Ezz = 0 and Ezz^* = 1.
function zGaussian(N=1)
x, y = rand(N), rand(N)
sqrt(-log(x)) .* exp(2im * pi * y)
end
function getRHS(nwells, T, KdJ)
# Diagonal entries (0 for the action)
center = (nwells + 1) / 2
diags = push!(.5 * KdJ * ([1:nwells] - center).^2, 0)
prop = -1im * diagm(diags)
# Coupling between the wells
coupls = push!(ones(nwells - 1) * T, 0)
prop += 1im * (diagm(coupls, 1) + diagm(coupls, -1))
(t, y) -> prop * y
end
function integrate_rk4(rhs, times, y0)
y = zeros(typeof(y0[0]), (length(y0), length(times)))
y[:, 1] = y0
yrk = zeros(typeof(y0[0]), (length(y0), 4))
for (i, t) in enumerate(times[2:end])
dt = t - times[i]
yrk[:, 1] = dt * rhs(t, y[:, i])
yrk[:, 2] = dt * rhs(t + dt/2, y[:, i] + .5 * yrk[:, 1])
yrk[:, 3] = dt * rhs(t + dt/2, y[:, i] + .5 * yrk[:, 2])
yrk[:, 4] = dt * rhs(t + dt, y[:, i] + yrk[:, 3])
y[:, i+1] = y[:, i] + 1/6 * (yrk[:, 1] + 2*yrk[:, 2] + 2*yrk[:, 3] + yrk[:, 4])
end
y
end
function main(timesteps, dt, nwells, realizations; T=2.0, KdJ=2.0)
println("GO!")
shift = convert(Int, floor((nwells - 1) // 2 % 2))
even_w = filter(n -> isodd(n - shift), [1:nwells])
odd_w = filter(n -> iseven(n - shift), [1:nwells])
t = [1:timesteps] * dt
rhs = getRHS(nwells, T, KdJ)
norm = zeros(Complex{Float64}, timesteps)
prog = Progress(realizations, 1)
for n in [1:realizations]
z1 = integrate_rk4(rhs, t, push!(zGaussian(nwells), 0))
z2 = integrate_rk4(rhs, t, push!(zGaussian(nwells), 0))
prefactor = prod(conj(z1[even_w, 1]) .* z2[even_w, 1])
# Refactor as dot product
sum_even = reshape(sum(z1[even_w, :] .* conj(z2[even_w, :]), 1), timesteps)
sum_odd = reshape(sum(z1[odd_w, :] .* conj(z2[odd_w, :]), 1), timesteps)
norm += prefactor * exp(sum_even + sum_odd) / realizations
next!(prog)
end
println("DOne")
t, norm
end
t, norm = main(100, 0.01, 5, 80000)
plot(x=t, y=real(norm), Geom.line)
|
{"hexsha": "58f6832d4d4bdf9a642b9fc463aabcf6b472a5f4", "size": 2159, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/semiclassics.jl", "max_stars_repo_name": "dseuss/stuff", "max_stars_repo_head_hexsha": "2d6d54863ad01f3b465b4464a6c1023b2f8f4f5f", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/semiclassics.jl", "max_issues_repo_name": "dseuss/stuff", "max_issues_repo_head_hexsha": "2d6d54863ad01f3b465b4464a6c1023b2f8f4f5f", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/semiclassics.jl", "max_forks_repo_name": "dseuss/stuff", "max_forks_repo_head_hexsha": "2d6d54863ad01f3b465b4464a6c1023b2f8f4f5f", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1756756757, "max_line_length": 85, "alphanum_fraction": 0.5590551181, "num_tokens": 836}
|
// Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NETKET_GRAPH_OPERATOR_HPP
#define NETKET_GRAPH_OPERATOR_HPP
#include <Eigen/Dense>
#include <array>
#include <unordered_map>
#include <vector>
#include "Graph/abstract_graph.hpp"
#include "Hilbert/abstract_hilbert.hpp"
#include "abstract_operator.hpp"
#include "local_operator.hpp"
namespace netket {
// Graph Hamiltonian on an arbitrary graph
class GraphOperator : public AbstractOperator {
// Arbitrary graph
const AbstractGraph &graph_;
LocalOperator operator_;
const int nvertices_;
public:
using SiteType = std::vector<int>;
using OMatType = LocalOperator::MatType;
using OVecType = std::vector<OMatType>;
using VectorType = AbstractOperator::VectorType;
using VectorRefType = AbstractOperator::VectorRefType;
using VectorConstRefType = AbstractOperator::VectorConstRefType;
GraphOperator(std::shared_ptr<const AbstractHilbert> hilbert,
OVecType siteops, OVecType bondops,
std::vector<int> bondops_colors);
// Constructor to be used when overloading operators
GraphOperator(std::shared_ptr<const AbstractHilbert> hilbert,
const LocalOperator &lop);
void FindConn(VectorConstRefType v, std::vector<Complex> &mel,
std::vector<std::vector<int>> &connectors,
std::vector<std::vector<double>> &newconfs) const override;
void ForEachConn(VectorConstRefType v, ConnCallback callback) const override;
friend GraphOperator operator+(const GraphOperator &lhs,
const GraphOperator &rhs);
}; // namespace netket
} // namespace netket
#endif
|
{"hexsha": "0b890a8e805afe140a9e2ff342ed2cd7c8e63c1b", "size": 2228, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Sources/Operator/graph_operator.hpp", "max_stars_repo_name": "vigsterkr/netket", "max_stars_repo_head_hexsha": "1e187ae2b9d2aa3f2e53b09fe743e50763d04c9a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Sources/Operator/graph_operator.hpp", "max_issues_repo_name": "vigsterkr/netket", "max_issues_repo_head_hexsha": "1e187ae2b9d2aa3f2e53b09fe743e50763d04c9a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/Operator/graph_operator.hpp", "max_forks_repo_name": "vigsterkr/netket", "max_forks_repo_head_hexsha": "1e187ae2b9d2aa3f2e53b09fe743e50763d04c9a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2769230769, "max_line_length": 79, "alphanum_fraction": 0.7333931777, "num_tokens": 500}
|
import numpy
import pylab
import math
from orbit import *
# circular orbit
o = Orbit(1.0, 0.0) # eccentricity = 0
# orbital period
P = o.kepler_period()
tstep = []
err_Euler = []
err_EC = []
err_RK2 = []
err_RK4 = []
dt = 0.05
for i in range(5):
hist_Euler = o.int_Euler(dt, P)
hist_Euler_Cromer = o.int_Euler_Cromer(dt, P)
hist_RK2 = o.int_RK2(dt, P)
hist_RK4 = o.int_RK4(dt, P)
# error is final radius - initial radius. Since we are circular, the
# initial radius is o.a, the semimajor axis
print dt, \
abs(hist_Euler.final_R()-o.a), \
abs(hist_Euler_Cromer.final_R()-o.a), \
abs(hist_RK2.final_R()-o.a), \
abs(hist_RK4.final_R()-o.a)
tstep.append(dt)
err_Euler.append(abs(hist_Euler.final_R()-o.a))
err_EC.append(abs(hist_Euler_Cromer.final_R()-o.a))
err_RK2.append(abs(hist_RK2.final_R()-o.a))
err_RK4.append(abs(hist_RK4.final_R()-o.a))
dt /= 2
pylab.scatter(numpy.array(tstep), numpy.array(err_Euler), label="Euler", color="k")
pylab.plot(numpy.array(tstep), err_Euler[0]*(tstep[0]/numpy.array(tstep))**-1, color="k")
pylab.scatter(numpy.array(tstep), numpy.array(err_EC), label="Euler-Cromer", color="r")
pylab.plot(numpy.array(tstep), err_EC[0]*(tstep[0]/numpy.array(tstep))**-1, color="r")
pylab.scatter(numpy.array(tstep), numpy.array(err_RK2), label="R-K 2", color="b")
pylab.plot(numpy.array(tstep), err_RK2[0]*(tstep[0]/numpy.array(tstep))**-2, color="b")
pylab.scatter(numpy.array(tstep), numpy.array(err_RK4), label="R-K 4", color="g")
pylab.plot(numpy.array(tstep), err_RK4[0]*(tstep[0]/numpy.array(tstep))**-4, color="g")
leg = pylab.legend(loc=2)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
leg.draw_frame(0)
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlabel(r"$\tau$")
pylab.ylabel("absolute error in radius after one period")
pylab.ylim(1.e-10, 10)
pylab.savefig("orbit-converge.png")
|
{"hexsha": "f6db5f6bf1e21c7cb1025a8ffeac1a7d9a17a1be", "size": 1948, "ext": "py", "lang": "Python", "max_stars_repo_path": "basic_numerics/ODEs/orbit-converge.py", "max_stars_repo_name": "python-hydro/hydro_examples", "max_stars_repo_head_hexsha": "55b7750a7644f3e2187f7fe338b6bc1d6fb9c139", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-09-01T10:44:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T23:50:57.000Z", "max_issues_repo_path": "basic_numerics/orbit-converge.py", "max_issues_repo_name": "srinivasvl81/hydro_examples", "max_issues_repo_head_hexsha": "d1b8a5c98ce28ed4f8bac4d2a20d91a27355a21a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basic_numerics/orbit-converge.py", "max_forks_repo_name": "srinivasvl81/hydro_examples", "max_forks_repo_head_hexsha": "d1b8a5c98ce28ed4f8bac4d2a20d91a27355a21a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2018-09-06T20:02:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T17:05:24.000Z", "avg_line_length": 27.8285714286, "max_line_length": 89, "alphanum_fraction": 0.6709445585, "include": true, "reason": "import numpy", "num_tokens": 639}
|
'''Partial Regression plot and residual plots to find misspecification
Author: Josef Perktold
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.graphics import utils
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog']
def plot_fit(res, exog_idx, exog_name='', y_true=None, ax=None, fontsize='small'):
"""Plot fit against one regressor.
This creates one graph with the scatterplot of observed values compared to
fitted values.
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
y_true : array_like
(optional) If this is not None, then the array is added to the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig, ax = utils.create_mpl_ax(ax)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
y = res.model.endog
x1 = res.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label='observed')
if not y_true is None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='true')
title = 'fitted versus regressor %s' % exog_name
else:
title = 'fitted versus regressor %s' % exog_name
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues[x1_argsort], 'k-', label='fitted') #'k-o')
#ax.plot(x1, iv_u, 'r--')
#ax.plot(x1, iv_l, 'r--')
ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1, color='k')
ax.set_title(title, fontsize=fontsize)
return fig
def plot_regress_exog(res, exog_idx, exog_name='', fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig = utils.create_mpl_fig(fig)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
#y = res.endog
x1 = res.model.exog[:,exog_idx]
ax = fig.add_subplot(2,2,1)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.model.endog, 'o')
ax.set_title('endog versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,2)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.resid, 'o')
ax.axhline(y=0)
ax.set_title('residuals versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,3)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,4)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted plus residuals versus exog', fontsize='small')# + namestr)
fig.suptitle('Regression Plots for %s' % exog_name)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function doesn't appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress_ax(endog, exog_i, exog_others, varname='',
title_fontsize=None, ax=None):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : ndarray
endogenous or response variable
exog_i : ndarray
exogenous, explanatory variable
exog_others : ndarray
other exogenous, explanatory variables, the effect of these variables
will be removed by OLS regression
varname : str
name of the variable used in the title
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_partregress : Plot partial regression for a set of regressors.
"""
fig, ax = utils.create_mpl_ax(ax)
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
ax.plot(res1b.resid, res1a.resid, 'o')
res1c = OLS(res1a.resid, res1b.resid).fit()
ax.plot(res1b.resid, res1c.fittedvalues, '-', color='k')
ax.set_title('Partial Regression plot %s' % varname,
fontsize=title_fontsize)# + namestr)
return fig
def plot_partregress(results, exog_idx=None, xnames=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None or list of int
(column) indices of the exog used in the plot, default is all.
xnames : None or list of strings
Names for the numbers given in exog_idx. Default is
results.model.exog_names.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress_ax : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
fig = utils.create_mpl_fig(fig)
#maybe add option for using wendog, wexog instead
y = results.model.endog
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if xnames is None:
exog_idx = range(k_vars)
xnames = results.model.exog_names
else:
exog_idx = []
for name in xnames:
exog_idx.append(results.model.exog_names.index(name))
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_fontsize = 'small'
else:
nrows = len(exog_idx)
ncols = 1
title_fontsize = None
for i,idx in enumerate(exog_idx):
others = range(k_vars)
others.pop(idx)
exog_others = exog[:, others]
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress_ax(y, exog[:, idx], exog_others, ax=ax,
varname=xnames[i])
return fig
def plot_ccpr_ax(res, exog_idx=None, ax=None):
"""Plot CCPR against one regressor.
Generates a CCPR (component and component-plus-residual) plot.
Parameters
----------
res : result instance
uses exog and params of the result instance
exog_idx : int
(column) index of the exog used in the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr : Creates CCPR plot for multiple regressors in a plot grid.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig, ax = utils.create_mpl_ax(ax)
x1 = res.model.exog[:,exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
ax.plot(x1, x1beta + res.resid, 'o')
ax.plot(x1, x1beta, '-')
ax.set_title('X_%d beta_%d plus residuals versus exog (CCPR)' % \
(exog_idx, exog_idx))
return fig
def plot_ccpr(res, exog_idx=None, grid=None, fig=None):
"""Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of CCPR (component and component-plus-residual) plots.
Parameters
----------
res : result instance
uses exog and params of the result instance
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
See Also
--------
plot_ccpr_ax : Creates CCPR plot for a single regressor.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig = utils.create_mpl_fig(fig)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
for i, idx in enumerate(exog_idx):
ax = fig.add_subplot(nrows, ncols, i+1)
plot_ccpr_ax(res, exog_idx=idx, ax=ax)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plots a line given an intercept and slope.
intercept : float
The intercept of the line
slope : float
The slope of the line
horiz : float or array-like
Data for horizontal lines on the y-axis
vert : array-like
Data for verterical lines on the x-axis
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope)
ax : axes, optional
Matplotlib axes instance
kwargs
Options passed to matplotlib.pyplot.plt
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30), prepend=True)
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = abline_plot(model_results=mod)
>>> ax = fig.axes
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
"""
if ax is not None: # get axis limits first thing, don't change these
x = ax.get_xlim()
y = ax.get_ylim()
else:
x = None
fig,ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
if x is None:
x = [model_results.model.exog[:,1].min(),
model_results.model.exog[:,1].max()]
else:
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if x is None:
x = ax.get_xlim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
#ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
abline = [children[i] for i in range(len(children))
if isinstance(children[i], ABLine2D)][0]
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
abline.set_data(x,y)
ax.figure.canvas.draw()
line = ABLine2D(x, data_y, **kwargs)
ax.add_line(line)
ax.callbacks.connect('xlim_changed', line.update_datalim)
ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
|
{"hexsha": "36c7e924304c7be16b1c9165e515580b674153ae", "size": 14614, "ext": "py", "lang": "Python", "max_stars_repo_path": "statsmodels/graphics/regressionplots.py", "max_stars_repo_name": "escheffel/statsmodels", "max_stars_repo_head_hexsha": "bc70147c4c7ea00b6ac7256bbaf107902983c189", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-01-05T22:44:37.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-26T08:34:00.000Z", "max_issues_repo_path": "statsmodels/graphics/regressionplots.py", "max_issues_repo_name": "escheffel/statsmodels", "max_issues_repo_head_hexsha": "bc70147c4c7ea00b6ac7256bbaf107902983c189", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "statsmodels/graphics/regressionplots.py", "max_forks_repo_name": "escheffel/statsmodels", "max_forks_repo_head_hexsha": "bc70147c4c7ea00b6ac7256bbaf107902983c189", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1696606786, "max_line_length": 86, "alphanum_fraction": 0.6241959765, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 3886}
|
\chapter{Conclusion}
\label{ch:conclusion}
%
%Using power colours, black hole and neutron star \acp{LMXB} have been shown to follow a similar power spectral evolution, providing fresh evidence for a common variability origin from an accretion disk. While significant variability in power spectra can occur over short time scales, power colour tracks show the broad band evolution is relatable and model-independent. Diverging power colour paths additionally allow for the rapid classification of neutron star and black hole \acp{LMXB} in the hard states. Further evidence for common behaviour can be found in spectral states, with a link being found between neutron star power colours and the various canonical atoll states, similar to the analogous behaviour discovered for black hole spectral states \citep{heil2015power}. This allows for a comparison between systems with a similar geometry. \\
%
%The effect of a number of other \ac{LMXB} properties on timing properties were also tested. Speculative signs of an inclination dependence of neutron star hardness are found, however they run counter to expectations and require more research. A clear division could be made in the \ac{PCC}~diagram between the population of atoll and Z sources, with the latter remaining firmly in the soft states. Cyg- and Sco-like Z sources showed a tentative difference in power colour spread, however this could be due to observational biases. The effects of pulsations on power colours were also investigated. While bursters showed no particular effects on power colours, a tentative relation between the spin frequency of pulsars and the associated power colour tracks was found. This suggests that strong magnetic fields could affect the timing properties of neutron star \acp{LMXB}. \\
%
%The research conducted in this project could benefit from further research into the effect of broader power colour frequency bands, and into the necessity of various extraction settings, to ensure optimal use of data. While beyond the scope of this project, research into the relationship between power colours and \QPOs would be fascinating, potentially allowing the evolution of \QPOs to be linked to the broader spectral evolution. A wide range of other parameters from mass to the presence of bursts could also be provide new insights into the similarities between black hole and neutron star \acp{LMXB} and effects due to the difference in compact object.
|
{"hexsha": "934e9cae19a75c931fda68f9596202836bac57f3", "size": 2441, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/conclusion.tex", "max_stars_repo_name": "FloorBroekgaarden/MasterThesis", "max_stars_repo_head_hexsha": "c533e3c6671bf703609fb071653e77d847cfbae0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/conclusion.tex", "max_issues_repo_name": "FloorBroekgaarden/MasterThesis", "max_issues_repo_head_hexsha": "c533e3c6671bf703609fb071653e77d847cfbae0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/conclusion.tex", "max_forks_repo_name": "FloorBroekgaarden/MasterThesis", "max_forks_repo_head_hexsha": "c533e3c6671bf703609fb071653e77d847cfbae0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 305.125, "max_line_length": 877, "alphanum_fraction": 0.8181073331, "num_tokens": 480}
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import time
import albumentations as A
from albumentations.pytorch import ToTensor
from torch.utils.data import random_split
from torch.optim import lr_scheduler
import seaborn as sns
import pandas as pd
import argparse
import os
from data_loading import multi_classes,binary_class
from sklearn.model_selection import GroupKFold
from pytorch_dcsaunet import DCSAU_Net
from loss import *
from self_attention_cv import transunet
def get_train_transform():
return A.Compose(
[
A.Resize(256, 256),
A.HorizontalFlip(p=0.25),
A.ShiftScaleRotate(shift_limit=0,p=0.25),
A.CoarseDropout(),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensor()
])
def get_valid_transform():
return A.Compose(
[
A.Resize(256, 256),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensor()
])
def train_model(model, criterion, optimizer, scheduler, num_epochs=5):
since = time.time()
Loss_list = {'train': [], 'valid': []}
Accuracy_list = {'train': [], 'valid': []}
best_model_wts = model.state_dict()
best_loss = float('inf')
counter = 0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid']:
if phase == 'train':
model.train(True)
else:
model.train(False)
running_loss = []
running_corrects = []
# Iterate over data
#for inputs,labels,label_for_ce,image_id in dataloaders[phase]:
for inputs,labels,image_id in dataloaders[phase]:
# wrap them in Variable
if torch.cuda.is_available():
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
#label_for_ce = Variable(label_for_ce.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
#label_for_ce = label_for_ce.long()
# forward
outputs = model(inputs)
loss = criterion(outputs, labels)
score = accuracy_metric(outputs,labels)
if phase == 'train':
loss.backward()
optimizer.step()
# calculate loss and IoU
running_loss.append(loss.item())
running_corrects.append(score.item())
epoch_loss = np.mean(running_loss)
epoch_acc = np.mean(running_corrects)
print('{} Loss: {:.4f} IoU: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
Loss_list[phase].append(epoch_loss)
Accuracy_list[phase].append(epoch_acc)
# save parameters
if phase == 'valid' and epoch_loss <= best_loss:
best_loss = epoch_loss
best_model_wts = model.state_dict()
counter = 0
if epoch > 50:
torch.save(model, f'save_models/epoch_{epoch}_{epoch_acc}.pth')
elif phase == 'valid' and epoch_loss > best_loss:
counter += 1
if phase == 'train':
scheduler.step()
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
return model, Loss_list, Accuracy_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str,default='data/', help='the path of images')
parser.add_argument('--csvfile', type=str,default='src/train_data.csv', help='two columns [image_id,category(train/test)]')
parser.add_argument('--loss', default='dice', help='loss type')
parser.add_argument('--batch', type=int, default=16, help='batch size')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--epoch', type=int, default=150, help='epoches')
args = parser.parse_args()
os.makedirs(f'save_models/',exist_ok=True)
fold = 0
df = pd.read_csv(args.csvfile)
gkf = GroupKFold(n_splits = 5)
df['fold'] = -1
for fold, (train_idx, val_idx) in enumerate(gkf.split(df, groups = df.image_id.tolist())):
df.loc[val_idx, 'fold'] = fold
val_files = list(df[df.fold==fold].image_id)
train_files = list(df[df.fold!=fold].image_id)
train_dataset = binary_class(args.dataset,train_files, get_train_transform())
val_dataset = binary_class(args.dataset,val_files, get_valid_transform())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batch, shuffle=True,drop_last=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=args.batch//2,drop_last=True)
dataset_sizes = {'train':len(train_loader.dataset),'valid':len(train_loader.dataset)}
dataloaders = {'train':train_loader,'valid':val_loader}
model_ft = DCSAU_Net.Model(img_channels = 3, n_classes = 1)
if torch.cuda.is_available():
model_ft = model_ft.cuda()
# Loss, IoU and Optimizer
if args.loss == 'ce':
#criterion = nn.CrossEntropyLoss()
criterion = nn.BCELoss()
if args.loss == 'dice':
criterion = DiceLoss_binary()
#criterion = DiceLoss_multiple()
accuracy_metric = IoU_binary()
#accuracy_metric = IoU_multiple()
optimizer_ft = optim.Adam(model_ft.parameters(),lr = args.lr)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.5)
#exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer_ft, patience=5, factor=0.1,min_lr=1e-6)
model_ft, Loss_list, Accuracy_list = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=args.epoch)
torch.save(model_ft, f'save_models/epoch_last.pth')
plt.title('Validation loss and IoU',)
valid_data = pd.DataFrame({'Loss':Loss_list["valid"], 'IoU':Accuracy_list["valid"]})
valid_data.to_csv(f'valid_data.csv')
sns.lineplot(data=valid_data,dashes=False)
plt.ylabel('Value')
plt.xlabel('Epochs')
plt.savefig('valid.png')
plt.figure()
plt.title('Training loss and IoU',)
valid_data = pd.DataFrame({'Loss':Loss_list["train"],'IoU':Accuracy_list["train"]})
valid_data.to_csv(f'train_data.csv')
sns.lineplot(data=valid_data,dashes=False)
plt.ylabel('Value')
plt.xlabel('Epochs')
plt.savefig('train.png')
|
{"hexsha": "579993bec2861c5f6dc159e81906935bc4aa448e", "size": 7370, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "xq141839/DCSAU-Net", "max_stars_repo_head_hexsha": "dd2fa996a325cbebff6f05dd34965483a568d999", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2022-02-03T07:06:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T01:24:43.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "xq141839/DCSAU-Net", "max_issues_repo_head_hexsha": "dd2fa996a325cbebff6f05dd34965483a568d999", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-01T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T02:36:15.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "xq141839/DCSAU-Net", "max_forks_repo_head_hexsha": "dd2fa996a325cbebff6f05dd34965483a568d999", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-26T12:03:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T13:56:45.000Z", "avg_line_length": 37.2222222222, "max_line_length": 128, "alphanum_fraction": 0.5940298507, "include": true, "reason": "import numpy", "num_tokens": 1665}
|
(* ------------------------------------------------------- *)
(** #<hr> <center> <h1>#
The double time redundancy (DTR) transformation
#</h1>#
- Exporting the main lemmas of control block
Dmitry Burlyaev - Pascal Fradet - 2015
#</center> <hr># *)
(* ------------------------------------------------------- *)
Add LoadPath "..\..\Common\".
Add LoadPath "..\..\TMRProof\".
Add LoadPath "..\Transf\".
Require Import dtrTransform.
(*Control Block properties during odd clock cycles w/o errors*)
Require Export controlStep0.
(*
step0_tcbv_C
step0_tcbv
*)
(*Control Block properties during even clock cycles w/o errors*)
Require Export controlStep1.
(*
step1_tcbv_C
step1_tcbv
stepr1_tcbv
*)
(*Control Block properties during odd clock cycles with errors*)
Require Export controlStepg0.
(*
stepg0_tcbv
step0_tcbv_i
*)
(*Control Block properties during even clock cycles with errors*)
Require Export controlStepg1.
(*
stepg1_tcbv
step1_tcbv_i
stepr1_tcbv_i
*)
(*Control Block properties during the DTR recovery process*)
Require Export controlRec.
(*
stepr2_tcbv_C
stepr2_tcbv
stepr3_tcbv
stepr4_tcbv
stepr5_tcbv
*)
|
{"author": "dburl", "repo": "Coq_LDDL", "sha": "691023b88314c1ad531a1177954a1c6596fe4483", "save_path": "github-repos/coq/dburl-Coq_LDDL", "path": "github-repos/coq/dburl-Coq_LDDL/Coq_LDDL-691023b88314c1ad531a1177954a1c6596fe4483/DTRProof/controlBlock/controlLib.v"}
|
export GenericRef,
EmptyRef,
UniversalRef,
NativeRef,
UnionRef,
SubtractionRef,
DynRef,
DynRefs,
void_ref,
ensure_ref,
map_ref,
collect_ref,
unite_ref,
intersect_ref,
subtract_ref
export Shape,
Shapes,
Backend,
LazyBackend,
Path,
backend,
new_backend,
backend_name,
current_backend,
has_current_backend,
switch_to_backend,
delete_shape, delete_shapes,
delete_all_shapes, mark_deleted,
force_realize,
set_length_unit,
is_collecting_shapes,
collecting_shapes,
collected_shapes,
with_transaction,
surface_boundary,
curve_domain,
surface_domain,
get_layer,
create_layer,
get_or_create_layer,
current_layer,
delete_all_shapes_in_layer,
get_material,
create_material,
get_or_create_material,
create_block,
instantiate_block,
reset_backend,
connection,
@deffamily,
@defproxy,
force_creation,
subpath,
subpath_starting_at,
subpath_ending_at,
bounding_box,
capture_shape, capture_shapes,
captured_shape, captured_shapes,
revolve
#References can be (single or multiple) native references
abstract type GenericRef{K,T} end
struct EmptyRef{K,T} <: GenericRef{K,T} end
struct UniversalRef{K,T} <: GenericRef{K,T} end
struct NativeRef{K,T} <: GenericRef{K,T}
value::T
end
struct NativeRefs{K,T} <: GenericRef{K,T}
values::Vector{T}
end
ensure_ref(b::Backend{K,T}, r::T) where {K,T} = NativeRef{K,T}(r)
ensure_ref(b::Backend{K,T}, rs::Vector{T}) where {K,T} =
length(rs) == 1 ?
NativeRef{K,T}(rs[1]) :
NativeRefs{K,T}(rs)
#Unions and subtractions are needed because actual backends frequently fail those operations
struct UnionRef{K,T} <: GenericRef{K,T}
values::Tuple{Vararg{GenericRef{K,T}}}
end
struct SubtractionRef{K,T} <: GenericRef{K,T}
value::GenericRef{K,T}
values::Tuple{Vararg{GenericRef{K,T}}}
end
ensure_ref(b::Backend{K,T}, v::GenericRef{K,T}) where {K,T} = v
ensure_ref(b::Backend{K,T}, v::Vector{<:S}) where {K,T,S} =
length(v) == 1 ?
ensure_ref(b, v[1]) :
UnionRef{K,T}(Tuple((ensure_ref(b, vi) for vi in v)))
# currying
map_ref(b::Backend{K,T}, f::Function) where {K,T} = r -> map_ref(b, f, r)
map_ref(b::Backend{K,T}, f::Function, r::NativeRef{K,T}) where {K,T} = ensure_ref(b, f(r.value))
map_ref(b::Backend{K,T}, f::Function, r::UnionRef{K,T}) where {K,T} = UnionRef{K,T}(map(map_ref(b, f), r.values))
map_ref(b::Backend{K,T}, f::Function, r::SubtractionRef{K,T}) where {K,T} = SubtractionRef{K,T}(map_ref(b, f, r.value), map(map_ref(b, f), r.values))
map_ref(b::Backend{K,T}, f::Function, r::NativeRefs{K,T}) where {K,T} = ensure_ref(b, map(r -> map_ref(b, f, NativeRef{K,T}(r)), r.values))
# currying
collect_ref(b::Backend{K,T}) where {K,T} = r -> collect_ref(b, r)
collect_ref(b::Backend{K,T}, r::EmptyRef{K,T}) where {K,T} = T[]
collect_ref(b::Backend{K,T}, r::NativeRef{K,T}) where {K,T} = T[r.value]
collect_ref(b::Backend{K,T}, r::UnionRef{K,T}) where {K,T} =
mapreduce(collect_ref(b), vcat, r.values, init=T[])
collect_ref(b::Backend{K,T}, r::SubtractionRef{K,T}) where {K,T} =
vcat(collect_ref(b, r.value), mapreduce(collect_ref(b), vcat, r.values, init=T[]))
# Boolean algebra laws
# currying
unite_ref(b::Backend{K,T}) where {K,T} = (r0::GenericRef{K,T}, r1::GenericRef{K,T}) -> unite_ref(b, r0, r1)
unite_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UniversalRef{K,T}) where {K,T} = r1
unite_ref(b::Backend{K,T}, r0::UniversalRef{K,T}, r1::GenericRef{K,T}) where {K,T} = r0
#To avoid ambiguity
unite_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::UnionRef{K,T}) where {K,T} =
unite_ref(b, unite_refs(b, r0), unite_refs(b, r1))
unite_ref(b::Backend{K,T}, r0::EmptyRef{K,T}, r1::EmptyRef{K,T}) where {K,T} = r0
unite_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::EmptyRef{K,T}) where {K,T} = r0
unite_ref(b::Backend{K,T}, r0::EmptyRef{K,T}, r1::UnionRef{K,T}) where {K,T} = r1
unite_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::EmptyRef{K,T}) where {K,T} = r0
unite_ref(b::Backend{K,T}, r0::EmptyRef{K,T}, r1::GenericRef{K,T}) where {K,T} = r1
unite_refs(b::Backend{K,T}, r::UnionRef{K,T}) where {K,T} =
foldr((r0,r1)->unite_ref(b,r0,r1), r.values, init=EmptyRef{K,T}())
unite_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::GenericRef{K,T}) where {K,T} =
unite_ref(b, unite_refs(b, r0), r1)
unite_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UnionRef{K,T}) where {K,T} =
unite_ref(b, r0, unite_refs(b, r1))
# currying
intersect_ref(b::Backend{K,T}) where {K,T} = (r0::GenericRef{K,T}, r1::GenericRef{K,T}) -> intersect_ref(b, r0, r1)
intersect_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UniversalRef{K,T}) where {K,T} = r0
intersect_ref(b::Backend{K,T}, r0::UniversalRef{K,T}, r1::GenericRef{K,T}) where {K,T} = r1
intersect_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::EmptyRef{K,T}) where {K,T} = r1
intersect_ref(b::Backend{K,T}, r0::EmptyRef{K,T}, r1::GenericRef{K,T}) where {K,T} = r0
intersect_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UnionRef{K,T}) where {K,T} =
intersect_ref(b, r0, unite_refs(b, r1))
intersect_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::GenericRef{K,T}) where {K,T} =
intersect_ref(b, unite_refs(b, r0), r1)
#To avoid ambiguity
# currying
subtract_ref(b::Backend{K,T}) where {K,T} = (r0::GenericRef{K,T}, r1::GenericRef{K,T}) -> subtract_ref(b, r0, r1)
subtract_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::UnionRef{K,T}) where {K,T} =
subtract_ref(b, unite_refs(b, r0), unite_refs(b, r1))
subtract_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UniversalRef{K,T}) where {K,T} = EmptyRef{K,T}()
subtract_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::EmptyRef{K,T}) where {K,T} = r0
subtract_ref(b::Backend{K,T}, r0::EmptyRef{K,T}, r1::GenericRef{K,T}) where {K,T} = r0
subtract_ref(b::Backend{K,T}, r0::GenericRef{K,T}, r1::UnionRef{K,T}) where {K,T} =
subtract_ref(b, r0, unite_refs(b, r1))
subtract_ref(b::Backend{K,T}, r0::UnionRef{K,T}, r1::GenericRef{K,T}) where {K,T} =
subtract_ref(b, unite_refs(b, r0), r1)
# References need to be created, deleted, and recreated, depending on the way the backend works
# For example, each time a shape is consumed, it becomes deleted and might need to be recreated
mutable struct DynRef{K,R}
backend::Backend{K,R}
value::GenericRef{K,R}
created::Int
deleted::Int
end
#DynRef{K,R}(backend::Backend{K,T}) where {K,R} = DynRef{K,R}(backend, void_ref(backend), 0, 0)
DynRef(b::Backend{K,T}, v) where {K,T} = DynRef{K,T}(b, ensure_ref(b, v), 1, 0)
const DynRefs = IdDict{Backend, Any}
const dyn_refs = DynRefs
abstract type Proxy end
backend(s::Proxy) = first(first(s.ref))
realized_in(s::Proxy, b::Backend) = s.ref[b].created == s.ref[b].deleted + 1
# This is so stupid. We need call-next-method.
really_mark_deleted(b::Backend, s::Proxy) = really_mark_deleted(b, s.ref)
really_mark_deleted(b::Backend, ref::DynRefs) = delete!(ref, b)
really_mark_deleted(b::Backend, s::Any) = nothing
mark_deleted(b::Backend, s::Proxy) = really_mark_deleted(b, s)
# We also need to propagate this to all dependencies
mark_deleted(b::Backend, ss::Array{<:Proxy}) = foreach(s->mark_deleted(b, s), ss)
mark_deleted(b::Backend, s::Any) = nothing
marked_deleted(b::Backend, s::Proxy) = !haskey(s.ref, b)
#=
The protocol is this:
ref(b, s) calls
force_realize(b, s)
=#
ref(b::Backend, s::Proxy) =
force_realize(b, s)
reset_ref(b::Backend, s::Proxy) =
delete!(s.ref, b)
force_realize(b::Backend, s::Proxy) =
haskey(s.ref, b) ?
s.ref[b] : #error("Shape was already realized in $(b)") :
s.ref[b] = ensure_ref(b, realize(b, s))
realized(b::Backend, s::Proxy) =
haskey(s.ref, b)
force_realize(s::Proxy) =
for b in current_backends()
force_realize(b, s)
end
# We can also use a shape as a surrogate for another shape
ensure_ref(b::Backend{K,T}, v::Proxy) where {K,T} =
ref(b, v)
abstract type Shape <: Proxy end
show(io::IO, s::Shape) =
print(io, "$(typeof(s))(...)")
Shapes = Vector{<:Shape}
map_ref(f::Function, b::Backend, s::Shape) = map_ref(b, f, ref(b, s))
collect_ref(s::Shape) = error("collect_ref(s.ref.backend, ref(s))")
collect_ref(ss::Shapes) = error("mapreduce(collect_ref, vcat, ss, init=[])")
#=
Whenever a shape is created, it might be eagerly realized in its backend,
depending on the kind of shape and kind of backend (and/or its current state).
Another possibility is for the shape to be saved in some container.
It might also be necessary to record the control flow that caused the shape to be created.
This means that we need to control what happens immediately after a shape is initialized.
The protocol after_init takes care of that.
=#
after_init(a::Any) = a
after_init(s::Shape) =
begin
maybe_realize(s)
maybe_collect(s)
maybe_trace(s)
s
end
#=
Backends might need to immediately realize a shape while supporting further modifications
e.g., using boolean operations. Others, however, cannot do that and can only realize
shapes by request, presumably, when they have complete information about them.
A middle term might be a backend that supports both modes.
=#
delay_realize(b::Backend, s::Shape) =
nothing
delaying_realize = Parameter(false)
with_transaction(fn) =
maybe_realize(with(fn, delaying_realize, true))
maybe_realize(s::Shape) =
delaying_realize() ?
for b in current_backends()
delay_realize(b, s)
end :
for b in current_backends()
maybe_realize(b, s)
end
#=
Even if a backend is eager, it might be necessary to temporarily delay the
realization of shapes, particularly, when the construction is incremental.
=#
maybe_realize(b::Backend, s::Shape) =
if ! realized(b, s)
force_realize(b, s)
end
abstract type LazyBackend{K,T} <: Backend{K,T} end
maybe_realize(b::LazyBackend, s::Shape) = delay_realize(b, s)
delay_realize(b::LazyBackend, s::Shape) = save_shape!(b, s)
# By default, save_shape! assumes there is a field in the backend to store shapes
export save_shape!
save_shape!(b::Backend, s::Shape) = (push!(b.shapes, s); s)
#=
Frequently, we need to collect all shapes that are created:
=#
# HACK: Replace in_shape_collection with is_collecting_shapes
in_shape_collection = Parameter(false)
is_collecting_shapes = in_shape_collection
collected_shapes = Parameter(Shape[])
collect_shape!(s::Shape) = (push!(collected_shapes(), s); s)
collecting_shapes(fn) =
with(collected_shapes, Shape[]) do
with(in_shape_collection, true) do
fn()
end
collected_shapes()
end
maybe_collect(s::Shape) = (in_shape_collection() && collect_shape!(s); s)
######################################################
#Traceability
traceability = Parameter(false)
trace_depth = Parameter(1000)
excluded_modules = Parameter([Base, Base.CoreLogging, KhepriBase])
# We a dict from shapes to file locations
# and a dict from file locations to shapes
shape_to_file_locations = IdDict()
file_location_to_shapes = Dict()
export traceability, trace_depth, excluded_modules, clear_trace!, shape_source, source_shapes
shape_source(s) = get(shape_to_file_locations, s, [])
source_shapes(file, line) = get(file_location_to_shapes, (file, line), [])
clear_trace!() =
begin
empty!(shape_to_file_locations)
empty!(file_location_to_shapes)
end
#=
We do not care about frames that are unrelated to the application.
=#
interesting_locations(frames) =
let locations = [],
max_depth = min(trace_depth(), length(frames)-0)#14)
for i in 2:max_depth
let frame = frames[i],
linfo = frame.linfo
if linfo isa Core.CodeInfo ||
(linfo isa Core.MethodInstance &&
! (linfo.def.module in excluded_modules()))
push!(locations, (frame.file, frame.line))
end
end
end
locations
end
trace!(s) =
let frames = stacktrace(),
locations = interesting_locations(frames)
shape_to_file_locations[s] = locations
for location in locations
file_location_to_shapes[location] = Shape[get(file_location_to_shapes, location, [])..., s]
end
s
end
maybe_trace(s) = traceability() && trace!(s)
######################################################
macro defshapeop(name_params)
name, params = name_params.args[1], name_params.args[2:end]
quote
export $(esc(name))
$(esc(name))(s::Shape, $(map(esc,params)...), b::Backend=backend(s)) =
throw(UndefinedBackendException())
end
end
export all_shapes, delete_all_shapes
const delete_all_shapes = delete_all_refs
all_shapes(b::Backend=top_backend()) =
Shape[b_shape_from_ref(b, r) for r in b_all_refs(b)]
@bdef(b_shape_from_ref(r))
@defcbs set_length_unit(unit::String="")
@defcb reset_backend()
@defcb save_as(pathname::String, format::String)
new_backend(b::Backend = top_backend()) = backend(b)
struct WrongTypeForParam <: Exception
param::Symbol
value::Any
expected_type::Type
end
Base.showerror(io::IO, e::WrongTypeForParam) =
print(io, "$(e.param) expected a $(e.expected_type) but got $(e.value) of type $(typeof(e.value))")
macro defproxy(name_typename, parent, fields...)
(name, typename) = name_typename isa Symbol ?
(name_typename, Symbol(string(map(uppercasefirst,split(string(name_typename),'_'))...))) :
name_typename.args
name_str = string(name)
struct_name = esc(typename)
field_names = map(field -> field.args[1].args[1], fields)
field_types = map(field -> esc(field.args[1].args[2]), fields)
field_inits = map(field -> field.args[2], fields)
# field_renames = map(esc ∘ Symbol ∘ uppercasefirst ∘ string, field_names)
field_renames = map(Symbol ∘ string, field_names)
field_replacements = Dict(zip(field_names, field_renames))
struct_fields = map((name,typ) -> :($(name) :: $(typ)), field_names, field_types)
# opt_params = map((name,typ,init) -> :($(name) :: $(typ) = $(init)), field_renames, field_types, field_inits)
# key_params = map((name,typ,rename) -> :($(name) :: $(typ) = $(rename)), field_names, field_types, field_renames)
# mk_param(name,typ) = Expr(:kw, Expr(:(::), name, typ))
mk_param(name,typ,init) = Expr(:kw, name, init) #Expr(:kw, Expr(:(::), name, typ), init)
opt_params = map(mk_param, field_renames, field_types, map(init -> replace_in(init, field_replacements), field_inits))
key_params = map(mk_param, field_names, field_types, field_renames)
constructor_name = esc(name)
predicate_name = esc(Symbol("is_", name_str))
#mk_convert(name,typ) = :(isa($(esc(name)), $(typ)) ? $(esc(name)) : throw(WrongTypeForParam($(QuoteNode(name)), $(esc(name)), $(typ))))
mk_convert(name,typ) = :($(esc(name)))
field_converts = map(mk_convert, field_names, field_types)
selector_names = map(field_name -> esc(Symbol(name_str, "_", string(field_name))), field_names)
quote
export $(constructor_name), $(struct_name), $(predicate_name) #, $(selector_names...)
struct $struct_name <: $parent
ref::DynRefs
$(struct_fields...)
end
# we don't need to convert anything because Julia already does that with the default constructor
# and, by the same idea, we don't need to define parameter types.
@noinline $(constructor_name)($(opt_params...); $(key_params...), ref::DynRefs=dyn_refs()) =
after_init($(struct_name)(ref, $(field_converts...)))
$(predicate_name)(v::$(struct_name)) = true
$(predicate_name)(v::Any) = false
$(map((selector_name, field_name) -> :($(selector_name)(v::$(struct_name)) = v.$(field_name)),
selector_names, field_names)...)
KhepriBase.mark_deleted(b::Backend, v::$(struct_name)) =
if ! marked_deleted(b, v)
really_mark_deleted(b, v)
$(map(field_name -> :(mark_deleted(b, v.$(field_name))), field_names)...)
end
KhepriBase.meta_program(v::$(struct_name)) =
Expr(:call, $(Expr(:quote, name)), $(map(field_name -> :(meta_program(v.$(field_name))), field_names)...))
end
end
#=
There are entities who have parameters that depend on the backend.
We will assume that these entities have one field called data which
should be a BackendParameter.
To assign such a parameter, we use the set_on! function.
=#
export set_on!
set_on!(b::Backend, proxy, ref) =
begin
proxy.data(b, ref)
reset_ref(b, proxy)
proxy
end
set_on!(proxy, ref) =
begin
proxy.data(ref)
proxy
end
#=
Layers are just a classification mechanism.
Some backends, however, can colorize the shapes that have that layer, or can make
those shapes appear and disappear by activating or deactivating the layer.
=#
@defproxy(layer, Proxy, name::String="Layer", active::Bool=true, color::RGB=rgb(1,1,1))
create_layer(args...) =
let s = layer(args...)
force_realize(s)
s
end
realize(b::Backend, l::Layer) =
b_layer(b, l.name, l.active, l.color)
current_layer(backends::Backends=current_backends()) =
layer(ref=DynRefs(b=>ensure_ref(b, b_current_layer(b)) for b in backends))
current_layer(layer, backends::Backends=current_backends()) =
for b in backends
b_current_layer(b, ref(b, layer).value)
end
delete_all_shapes_in_layer(layer, backends::Backends=current_backends()) =
for b in backends
b_delete_all_shapes_in_layer(b, ref(b, layer).value)
end
#=
Materials
A shape can be directly associated to a material or the shape can be associated
to a layer and the layer is then associated to the material
=#
@defproxy(material, Proxy, layer::Layer=current_layer(), data::BackendParameter=BackendParameter())
material(name::String, color::RGB=rgb(1,1,1), bvs...) = material(layer(name, true, color), BackendParameter(bvs...))
# Some backends prefer to use layers instead of materials
export material_as_layer, with_material_as_layer
const material_as_layer = Parameter(false)
use_material_as_layer(b::Backend) = material_as_layer()
with_material_as_layer(f::Function, b::Backend, m::Material) =
use_material_as_layer(b) ?
let cur_layer = b_current_layer(b),
new_layer = ref(b, m.layer).value
cur_layer == new_layer ?
f() :
begin
b_current_layer(b, new_layer)
let res = f()
b_current_layer(b, cur_layer)
res
end
end
end :
f()
realize(b::Backend, m::Material) =
b_get_material(b, m.data(b))
# For compatibility
export set_material
const set_material = set_on!
# To facilitate accessing the material reference that is provided to the backends:
material_ref(b::Backend, m::Material) = ref(b, m).value
material_ref(b::Backend, s::Shape) = material_ref(b, s.material)
# These are pre-defined materials that need to be specified by each backend.
export material_point, material_curve, material_surface,
material_basic, material_glass,
material_metal, material_wood,
material_concrete, material_plaster,
material_grass
const material_point = material("Points")
const material_curve = material("Curves")
const material_surface = material("Surfaces")
const material_basic = material("Basic")
const material_glass = material("Glass")
const material_metal = material("Metal")
const material_wood = material("Wood")
const material_concrete = material("Concrete")
const material_plaster = material("Plaster")
const material_grass = material("Grass")
export default_point_material, default_curve_material, default_surface_material, default_material
const default_point_material = Parameter{Material}(material_point)
const default_curve_material = Parameter{Material}(material_curve)
const default_surface_material = Parameter{Material}(material_surface)
const default_material = Parameter{Material}(material_basic)
abstract type Shape0D <: Shape end
abstract type Shape1D <: Shape end
abstract type Shape2D <: Shape end
abstract type Shape3D <: Shape end
is_curve(s::Shape) = false
is_surface(s::Shape) = false
is_solid(s::Shape) = false
is_curve(s::Shape1D) = true
is_surface(s::Shape2D) = true
is_solid(s::Shape3D) = true
# HACK: Fix element type
Shapes0D = Vector{<:Any}
Shapes1D = Vector{<:Any}
Shapes2D = Vector{<:Any}
# This might be usable, so
export @defproxy, realize, Shape0D, Shape1D, Shape2D, Shape3D, void_ref
@defproxy(empty_shape, Shape0D)
@defproxy(universal_shape, Shape3D)
realize(b::Backend, s::EmptyShape) = void_ref(b)
realize(b::Backend, s::UniversalShape) = void_ref(b)
macro defshape(supertype, name_typename, fields...)
# Merge this with defproxy
(name, typename) = name_typename isa Symbol ?
(name_typename, Symbol(string(map(uppercasefirst,split(string(name_typename),'_'))...))) :
name_typename.args
field_names = map(field -> field.args[1].args[1], fields)
default_material =
supertype == :Shape0D ? :default_point_material :
supertype == :Shape1D ? :default_curve_material :
supertype == :Shape2D ? :default_surface_material :
supertype == :Shape3D ? :default_material :
error("Unknown supertype:", supertype)
esc(quote
@defproxy($(name_typename), $(supertype), $(fields...), material::Material=$(default_material)())
realize(b::Backend, s::$(typename)) =
$(Symbol(:b_, name))(b, $(map(f->:(getproperty(s, $(QuoteNode(f)))), field_names)...), material_ref(b, s))
end)
end
@defshape(Shape0D, point, position::Loc=u0())
@defshape(Shape1D, line, vertices::Locs=[u0(), ux()])
line(v0::Loc, v1::Loc, vs...) = line([v0, v1, vs...])
@defshape(Shape1D, closed_line, vertices::Locs=[u0(), ux(), uy()])
closed_line(v0::Loc, v1::Loc, vs...) = closed_line([v0, v1, vs...])
@defshape(Shape1D, spline, points::Locs=[u0(), ux(), uy()], v0::Union{Bool,Vec}=false, v1::Union{Bool,Vec}=false,
interpolator::Parameter{Any}=Parameter{Any}(missing))
spline(v0::Loc, v1::Loc, vs...) = spline([v0, v1, vs...])
#=
evaluate(s::Spline, t::Real) =
let interpolator = s.interpolator
if ismissing(interpolator())
interpolator(curve_interpolator(s.points))
end
let p = interpolator()(t),
vt = Interpolations.gradient(interpolator(), t)[1],
vn = Interpolations.hessian(interpolator(), t)[1]
loc_from_o_vx_vy(
xyz(p[1], p[2], p[3], world_cs),
vxyz(vt[1], vt[2], vt[3], world_cs),
vxyz(vn[1], vn[2], vn[3], world_cs))
end
end
evaluate(s::Spline, t::Real) =
let interpolator = s.interpolator
if ismissing(interpolator())
interpolator(curve_interpolator(s.points))
end
let p = interpolator()(t),
vt = Interpolations.gradient(interpolator(), t)[1],
vn = Interpolations.hessian(interpolator(), t)[1],
vy = cross(vt, vn)
loc_from_o_vx_vy(
xyz(p[1], p[2], p[3], world_cs),
vxyz(vn[1], vn[2], vn[3], world_cs),
vxyz(vy[1], vy[2], vy[3], world_cs))
end
end
=#
map_division(f::Function, s::Spline, n::Int, backend::Backend=backend(s)) =
backend_map_division(backend, f, s, n)
#=HACK, THIS IS NOT READY, YET. COMPARE WITH THE BACKEND VERSION!!!!!!
let (t1, t2) = curve_domain(s)
map_division(t1, t2, n) do t
f(frame_at(s, t))
end
end
=#
#(def-base-shape 1D-shape (spline* [pts : (Listof Loc) (list (u0) (ux) (uy))] [v0 : (U Boolean Vec) #f] [v1 : (U Boolean Vec) #f]))
@defshape(Shape1D, closed_spline, points::Locs=[u0(), ux(), uy()])
closed_spline(v0, v1, vs...) = closed_spline([v0, v1, vs...])
@defshape(Shape1D, circle, center::Loc=u0(), radius::Real=1)
@defshape(Shape1D, arc, center::Loc=u0(), radius::Real=1, start_angle::Real=0, amplitude::Real=pi)
@defshape(Shape1D, elliptic_arc, center::Loc=u0(), radius_x::Real=1, radius_y::Real=1, start_angle::Real=0, amplitude::Real=pi)
@defshape(Shape1D, ellipse, center::Loc=u0(), radius_x::Real=1, radius_y::Real=1)
@defshape(Shape1D, polygon, vertices::Locs=[u0(), ux(), uy()])
polygon(v0, v1, vs...) = polygon([v0, v1, vs...])
@defshape(Shape1D, regular_polygon, edges::Integer=3, center::Loc=u0(), radius::Real=1, angle::Real=0, inscribed::Bool=true)
@defshape(Shape1D, rectangle, corner::Loc=u0(), dx::Real=1, dy::Real=1)
rectangle(p::Loc, q::Loc) =
let v = in_cs(q - p, p.cs)
rectangle(p, v.x, v.y)
end
#
#@defshape dimension(p0::Loc, p1::Loc, p::Loc, scale::Real, style::Symbol)
#@defshape dimension(p0::Loc, p1::Loc, sep::Real, scale::Real, style::Symbol)
@defshape(Shape1D, dimension, from::Loc=u0(), to::Loc=ux(), text::AbstractString=string(distance(p0, p1)), size::Real=1)
# Surfaces
@defshape(Shape2D, surface_circle, center::Loc=u0(), radius::Real=1)
@defshape(Shape2D, surface_arc, center::Loc=u0(), radius::Real=1, start_angle::Real=0, amplitude::Real=pi)
@defshape(Shape2D, surface_elliptic_arc, center::Loc=u0(), radius_x::Real=1, radius_y::Real=1, start_angle::Real=0, amplitude::Real=pi)
@defshape(Shape2D, surface_ellipse, center::Loc=u0(), radius_x::Real=1, radius_y::Real=1)
@defshape(Shape2D, surface_polygon, vertices::Locs=[u0(), ux(), uy()])
surface_polygon(v0, v1, vs...) = surface_polygon([v0, v1, vs...])
@defshape(Shape2D, surface_regular_polygon, edges::Integer=3, center::Loc=u0(), radius::Real=1, angle::Real=0, inscribed::Bool=true)
@defshape(Shape2D, surface_rectangle, corner::Loc=u0(), dx::Real=1, dy::Real=1)
surface_rectangle(p::Loc, q::Loc) =
let v = in_cs(q - p, p.cs)
surface_rectangle(p, v.x, v.y)
end
@defproxy(surface, Shape2D, frontier::Shapes1D=[circle()])
surface(c0::Shape, cs...) = surface([c0, cs...])
#To be removed
surface_from = surface
@defproxy(surface_path, Shape2D, path::ClosedPath=[circular_path()])
realize(b::Backend, s::SurfacePath) =
backend_fill(b, s.path)
surface_boundary(s::Shape2D, backend::Backend=top_backend()) =
backend_surface_boundary(backend, s)
curve_domain(s::Shape1D, backend::Backend=top_backend()) =
backend_curve_domain(backend, s)
map_division(f::Function, s::Shape1D, n::Int, backend::Backend=top_backend()) =
backend_map_division(backend, f, s, n)
surface_domain(s::Shape2D, backend::Backend=top_backend()) =
backend_surface_domain(backend, s)
map_division(f::Function, s::Shape2D, nu::Int, nv::Int, backend::Backend=top_backend()) =
backend_map_division(backend, f, s, nu, nv)
path_vertices(s::Shape1D) = path_vertices(shape_path(s))
shape_path(s::Circle) = circular_path(s.center, s.radius)
shape_path(s::Spline) = open_spline_path(s.points, s.v0, s.v1)
shape_path(s::ClosedSpline) = closed_spline_path(s.points)
@defshape(Shape0D, text, str::String="", corner::Loc=u0(), height::Real=1)
export text_centered
text_centered(str::String="", center::Loc=u0(), height::Real=1) =
text(str, add_xy(center, -length(str)*height*0.85/2, -height/2), height)
# This is for unknown shapes (they are opaque, the only thing you can do with then
# might be just delete them)
@defproxy(unknown, Shape3D, baseref::Any=required())
@defshape(Shape3D, sphere, center::Loc=u0(), radius::Real=1)
@defshape(Shape3D, torus, center::Loc=u0(), re::Real=1, ri::Real=1/2)
@defshape(Shape3D, cuboid,
b0::Loc=u0(), b1::Loc=add_x(b0,1), b2::Loc=add_y(b1,1), b3::Loc=add_x(b2,-1),
t0::Loc=add_z(b0,1), t1::Loc=add_x(t0,1), t2::Loc=add_y(t1,1), t3::Loc=add_x(t2,-1))
@defshape(Shape3D, regular_pyramid_frustum, edges::Integer=4, cb::Loc=u0(), rb::Real=1, angle::Real=0, h::Real=1, rt::Real=1, inscribed::Bool=true)
regular_pyramid_frustum(edges::Integer, cb::Loc, rb::Real, angle::Real, ct::Loc, rt::Real=1, inscribed::Bool=true) =
let (c, h) = position_and_height(cb, ct)
regular_pyramid_frustum(edges, c, rb, angle, h, rt, inscribed)
end
@defshape(Shape3D, regular_pyramid, edges::Integer=3, cb::Loc=u0(), rb::Real=1, angle::Real=0, h::Real=1, inscribed::Bool=true)
regular_pyramid(edges::Integer, cb::Loc, rb::Real, angle::Real, ct::Loc, inscribed::Bool=true) =
let (c, h) = position_and_height(cb, ct)
regular_pyramid(edges, c, rb, angle, h, inscribed)
end
@defshape(Shape3D, pyramid_frustum, bs::Locs=[ux(), uy(), uxy()], ts::Locs=[uxz(), uyz(), uxyz()])
@defshape(Shape3D, pyramid, bs::Locs=[ux(), uy(), uxy()], t::Loc=uz())
@defshape(Shape3D, regular_prism, edges::Integer=3, cb::Loc=u0(), r::Real=1, angle::Real=0, h::Real=1, inscribed::Bool=true)
regular_prism(edges::Integer, cb::Loc, r::Real, angle::Real, ct::Loc, inscribed::Bool=true) =
let (c, h) = position_and_height(cb, ct)
regular_prism(edges, c, r, angle, h, inscribed)
end
@defshape(Shape3D, prism, bs::Locs=[ux(), uy(), uxy()], v::Vec=vz(1))
prism(bs::Locs, h::Real) =
prism(bs, vz(h))
@defshape(Shape3D, right_cuboid, cb::Loc=u0(), width::Real=1, height::Real=1, h::Real=1)
right_cuboid(cb::Loc, width::Real, height::Real, ct::Loc, angle::Real=0) =
let (c, h) = position_and_height(cb, ct),
o = angle == 0 ? c : loc_from_o_phi(c, angle)
right_cuboid(o, width, height, h)
end
@defshape(Shape3D, box, c::Loc=u0(), dx::Real=1, dy::Real=dx, dz::Real=dy)
box(c0::Loc, c1::Loc) =
let v = in_cs(c1, c0)-c0
if v.x < 0
c0 = add_x(c0, v.x)
end
if v.y < 0
c0 = add_y(c0, v.y)
end
if v.z < 0
c0 = add_z(c0, v.z)
end
box(c0, abs(v.x), abs(v.y), abs(v.z))
end
@defshape(Shape3D, cone, cb::Loc=u0(), r::Real=1, h::Real=1)
cone(cb::Loc, r::Real, ct::Loc) =
let (c, h) = position_and_height(cb, ct)
cone(c, r, h)
end
@defshape(Shape3D, cone_frustum, cb::Loc=u0(), rb::Real=1, h::Real=1, rt::Real=1)
cone_frustum(cb::Loc, rb::Real, ct::Loc, rt::Real; material=default_material()) =
let (c, h) = position_and_height(cb, ct)
cone_frustum(c, rb, h, rt, material)
end
@defshape(Shape3D, cylinder, cb::Loc=u0(), r::Real=1, h::Real=1)
cylinder(cb::Loc, r::Real, ct::Loc; material=default_material()) =
let (c, h) = position_and_height(cb, ct)
cylinder(c, r, h, material)
end
#=
An isosurface is surface that is described by the implícit equation
F(x,y,z) = k
It is frequent to use the simpler form
G(x,y,z) = 0,
by defining G(x,y,z) = F(x,y,z) - k
The name 'iso' means 'same value', which comes from the fact that F(x,y,z) has
always the same value. The idea is that we sample all points in space, applying
F to each one, and we those where F returns k (or G returns zero) belong to the
isosurface. There are several algorithms that speed up this sampling process,
being the marching cubes the most popular one.
=#
@defshape(Shape3D, isosurface, frep::Function=loc->sph_rho(loc), bounding_box::Locs=[xyz(-1,-1,-1), xyz(+1,+1,+1)])
@defshape(Shape3D, extrusion, profile::Path=circular_path(), v::Vec=vz(1), cb::Loc=u0())
extrusion(profile, h::Real) =
extrusion(profile, vz(h))
@defshape(Shape3D, sweep, path::Union{Shape1D, Path}=circle(), profile::Union{Shape,Path}=point(), rotation::Real=0, scale::Real=1)
@defproxy(revolve_point, Shape1D, profile::Shape0D=point(), p::Loc=u0(), n::Vec=vz(1,p.cs), start_angle::Real=0, amplitude::Real=2*pi)
@defproxy(revolve_curve, Shape2D, profile::Shape1D=line(), p::Loc=u0(), n::Vec=vz(1,p.cs), start_angle::Real=0, amplitude::Real=2*pi)
@defproxy(revolve_surface, Shape3D, profile::Shape2D=circle(), p::Loc=u0(), n::Vec=vz(1,p.cs), start_angle::Real=0, amplitude::Real=2*pi)
revolve(profile::Shape=point(x(1)), p::Loc=u0(), n::Vec=vz(1,p.cs), start_angle::Real=0, amplitude::Real=2*pi) =
if is_point(profile)
revolve_point(profile, p, n, start_angle, amplitude)
elseif is_curve(profile)
revolve_curve(profile, p, n, start_angle, amplitude)
elseif is_surface(profile)
revolve_surface(profile, p, n, start_angle, amplitude)
elseif is_union_shape(profile)
union(map(s->revolve(s, p, n, start_angle, amplitude), profile.shapes))
elseif is_empty_shape(profile)
profile
else
error("Profile is neither a point nor a curve nor a surface")
end
backend_revolve_point(b::Backend, profile::Shape, p::Loc, n::Vec, start_angle::Real, amplitude::Real) = error("Finish this")
backend_revolve_curve(b::Backend, profile::Shape, p::Loc, n::Vec, start_angle::Real, amplitude::Real) = error("Finish this")
backend_revolve_surface(b::Backend, profile::Shape, p::Loc, n::Vec, start_angle::Real, amplitude::Real) = error("Finish this")
realize(b::Backend, s::RevolvePoint) =
backend_revolve_point(b, s.profile, s.p, s.n, s.start_angle, s.amplitude)
realize(b::Backend, s::RevolveCurve) =
backend_revolve_curve(b, s.profile, s.p, s.n, s.start_angle, s.amplitude)
realize(b::Backend, s::RevolveSurface) =
backend_revolve_surface(b, s.profile, s.p, s.n, s.start_angle, s.amplitude)
@defproxy(loft_points, Shape1D, profiles::Shapes0D=Shape[], rails::Shapes=Shape[], ruled::Bool=false, closed::Bool=false)
@defproxy(loft_curves, Shape2D, profiles::Shapes1D=Shape[], rails::Shapes=Shape[], ruled::Bool=false, closed::Bool=false)
@defproxy(loft_surfaces, Shape3D, profiles::Shapes2D=Shape[], rails::Shapes=Shape[], ruled::Bool=false, closed::Bool=false)
@defproxy(loft_curve_point, Shape2D, profile::Shape1D=circle(), point::Shape0D=point(z(1)))
@defproxy(loft_surface_point, Shape3D, profile::Shape2D=surface_circle(), point::Shapes=point(z(1)))
loft(profiles::Shapes=Shape[], rails::Shapes=Shape[], ruled::Bool=false, closed::Bool=false) =
if all(is_point, profiles)
loft_points(profiles, rails, ruled, closed)
elseif all(is_curve, profiles)
loft_curves(profiles, rails, ruled, closed)
elseif all(is_surface, profiles)
loft_surfaces(profiles, rails, ruled, closed)
elseif length(profiles) == 2
let (p, sh) = if is_point(profiles[1])
(profiles[1], profiles[2])
elseif is_point(profiles[2])
(profiles[2], profiles[1])
else
error("Cross sections are neither points nor curves nor surfaces")
end
if is_curve(sh)
loft_curve_point(sh, p)
elseif is_surface(sh)
loft_surface_point(sh, p)
else
error("Can't loft the shapes")
end
end
else
error("Cross sections are neither points nor curves nor surfaces")
end
loft_ruled(profiles::Shapes=Shape[]) = loft(profiles, Shape[], true, false)
export loft, loft_ruled
realize(b::Backend, s::LoftPoints) = backend_loft_points(backend(s), s.profiles, s.rails, s.ruled, s.closed)
realize(b::Backend, s::LoftCurves) = backend_loft_curves(backend(s), s.profiles, s.rails, s.ruled, s.closed)
realize(b::Backend, s::LoftSurfaces) = backend_loft_surfaces(backend(s), s.profiles, s.rails, s.ruled, s.closed)
realize(b::Backend, s::LoftCurvePoint) = backend_loft_curve_point(backend(s), s.profile, s.point)
realize(b::Backend, s::LoftSurfacePoint) = backend_loft_surface_point(backend(s), s.profile, s.point)
backend_loft_points(b::Backend, profiles::Shapes, rails::Shapes, ruled::Bool, closed::Bool) =
let f = (ruled ? (closed ? polygon : line) : (closed ? closed_spline : spline))
and_delete_shapes(ref(b, f(map(point_position, profiles), backend=b)),
vcat(profiles, rails))
end
@defproxy(move, Shape3D, shape::Shape=point(), v::Vec=vx())
@defproxy(scale, Shape3D, shape::Shape=point(), s::Real=1, p::Loc=u0())
@defproxy(rotate, Shape3D, shape::Shape=point(), angle::Real=0, p::Loc=u0(), v::Vec=vz(1,p.cs))
@defproxy(transform, Shape3D, shape::Shape=point(), xform::Loc=u0())
#####################################################################
# We can also translate some shapes
translate(s::Line, v::Vec) = line(map(p -> p+v, s.vertices))
translate(s::Polygon, v::Vec) = polygon(map(p -> p+v, s.vertices))
translate(s::Circle, v::Vec) = circle(s.center+v, s.radius)
translate(s::Text, v::Vec) = text(s.str, s.c+v, s.h)
# We can translate arrays of Shapes
translate(ss::Shapes, v::Vec) = translate.(ss, v)
# We can compute the length of shapes as long as we can convert them
curve_length(s::Shape) = curve_length(convert(Path, s))
# We will also need to compute a bounding rectangle
bounding_rectangle(s::Union{Line, Polygon}) =
bounding_rectangle(s.vertices)
bounding_rectangle(pts::Locs) =
let min_p = pts[1]
max_p = min_p
for i in 2:length(pts)
min_p = min_loc(min_p, pts[i])
max_p = max_loc(max_p, pts[i])
end
[min_p, max_p]
end
bounding_rectangle(ss::Shapes) =
bounding_rectangle(mapreduce(bounding_rectangle, vcat, ss))
#####################################################################
#####################################################################
## Conversions
convert(::Type{Path}, s::Rectangle) =
and_delete_shape(rectangular_path(s.corner, s.dx, s.dy), s)
convert(::Type{Path}, s::Line) =
and_delete_shape(convert(OpenPath, s.vertices), s)
convert(::Type{Path}, s::Circle) =
and_delete_shape(circular_path(s.center, s.radius), s)
convert(::Type{Path}, s::Polygon) =
and_delete_shape(polygonal_path(s.vertices), s)
convert(::Type{Region}, s::SurfaceCircle) =
and_delete_shape(region(circular_path(s.center, s.radius)), s)
convert(::Type{Region}, s::SurfacePolygon) =
and_delete_shape(region(polygonal_path(s.vertices), s))
#####################################################################
## Paths can be used to generate surfaces and solids
@defproxy(sweep_path, Shape3D, path::Path=polygonal_path(), profile::Path=circular_path(), rotation::Real=0, scale::Real=1)
#####################################################################
export curve_domain, surface_domain, frame_at
surface_domain(s::SurfaceRectangle) = (0, s.dx, 0, s.dy)
surface_domain(s::SurfaceCircle) = (0, s.radius, 0, 2pi)
surface_domain(s::SurfaceArc) = (0, s.radius, s.start_angle, s.amplitude)
export backend_frame_at
backend_frame_at(b::Backend, s::Shape2D, u::Real, v::Real) = error("BUM")
frame_at(c::Shape1D, t::Real) = backend_frame_at(backend(c), c, t)
frame_at(s::Shape2D, u::Real, v::Real) = backend_frame_at(backend(s), s, u, v)
#Some specific cases can be handled in an uniform way without the backend
frame_at(s::SurfaceRectangle, u::Real, v::Real) = add_xy(s.corner, u, v)
frame_at(s::SurfaceCircle, u::Real, v::Real) = add_pol(s.center, u, v)
export union, intersection, subtraction
#=
We do some pre-filtering to deal with the presence of empty shapes or to simplify one-arg cases.
=#
@defproxy(union_shape, Shape3D, shapes::Shapes=Shape[])
union(shapes::Shapes) =
let non_empty_shapes = filter(s -> !is_empty_shape(s), shapes),
count_non_empty_shapes = length(non_empty_shapes)
count_non_empty_shapes == 0 ? empty_shape() :
count_non_empty_shapes == 1 ? non_empty_shapes[1] :
union_shape(non_empty_shapes)
end
union(shape::Shape, shapes...) = union([shape, shapes...])
@defproxy(intersection_shape, Shape3D, shapes::Shapes=Shape[])
intersection(shapes::Shapes) = intersection_shape(shapes)
intersection(shape::Shape, shapes...) =
is_empty_shape(shape) || any(is_empty_shape, shapes) ? empty_shape() :
shapes == [] ? shape : intersection_shape([shape, shapes...])
@defproxy(subtraction_shape2D, Shape2D, shape::Shape=surface_circle(), shapes::Shapes=Shape[])
@defproxy(subtraction_shape3D, Shape3D, shape::Shape=surface_sphere(), shapes::Shapes=Shape[])
subtraction(shape::Shape2D, shapes...) =
is_empty_shape(shape) ? empty_shape() :
let non_empty_shapes = filter(s -> !is_empty_shape(s), shapes),
count_non_empty_shapes = length(non_empty_shapes)
count_non_empty_shapes == 0 ? shape : subtraction_shape2D(shape, [non_empty_shapes...])
end
subtraction(shape::Shape3D, shapes...) =
is_empty_shape(shape) ? empty_shape() :
let non_empty_shapes = filter(s -> !is_empty_shape(s), shapes),
count_non_empty_shapes = length(non_empty_shapes)
count_non_empty_shapes == 0 ? shape : subtraction_shape3D(shape, [non_empty_shapes...])
end
@defproxy(slice, Shape3D, shape::Shape=sphere(), p::Loc=u0(), n::Vec=vz(1))
@defproxy(mirror, Shape3D, shape::Shape=sphere(), p::Loc=u0(), n::Vec=vz(1))
@defproxy(union_mirror, Shape3D, shape::Shape=sphere(), p::Loc=u0(), n::Vec=vz(1))
@defshape(Shape2D, surface_grid, points::Matrix{<:Loc}=zeros(Loc,(2,2)),
closed_u::Bool=false, closed_v::Bool=false,
smooth_u::Bool=true, smooth_v::Bool=true)
surface_grid(_points::Vector{<:Vector{<:Loc}},
_closed_u=false, _closed_v=false, _smooth_u=true, _smooth_v=true, _material=default_material();
points=_points, closed_u=_closed_u, closed_v=_closed_v, smooth_u=_smooth_u, smooth_v=_smooth_v, material=_material) =
surface_grid(permutedims(hcat(points...)), closed_u, closed_v, smooth_u, smooth_v, material=material)
# For interpolator to work, we need this:
convert(::Type{Matrix{XYZ}}, ptss::Vector{Vector{<:Loc}}) =
permutedims(hcat(ptss...))
surface_domain(s::SurfaceGrid) = (0.0, 1.0, 0.0, 1.0)
frame_at(s::SurfaceGrid, u::Real, v::Real) = evaluate(s, u, v)
map_division(f::Function, s::SurfaceGrid, nu::Int, nv::Int, backend::Backend=top_backend()) =
let (u1, u2, v1, v2) = surface_domain(s)
map_division(u1, u2, nu) do u
map_division(v1, v2, nv) do v
f(frame_at(s, u, v))
end
end
end
@defshape(Shape2D, surface_mesh, vertices::Locs=[u0(), ux(), uy()], faces::Vector{Vector{Int}}=[[0,1,2]])
@defproxy(parametric_surface, Shape2D, definition::Function=(u,v)->xyz(u,v,0),
domain_u::Tuple{Real,Real}=(0,1), domain_v::Tuple{Real,Real}=(0,1))
@defproxy(thicken, Shape3D, shape::Shape=surface_circle(), thickness::Real=1)
# Blocks
@defproxy(block, Shape, name::String="Block", shapes::Shapes = Shape[])
@defproxy(block_instance, Shape, block::Block=required(), loc::Loc=u0(), scale::Real=1.0)
################################################################################
################################################################################
bounding_box(shape::Shape) =
bounding_box([shape])
bounding_box(shapes::Shapes=Shape[]) =
if isempty(shapes)
[u0(), u0()]
else
backend_bounding_box(backend(shapes[1]), shapes)
end
backend_bounding_box(backend::Backend, shape::Shape) =
throw(UndefinedBackendException())
@defcbs delete_shape(s::Shape)
b_delete_shape(b::Backend, s::Shape) =
if realized(b, s)
b_delete_refs(b, collect_ref(b, ref(b, s)))
reset_ref(b, s)
end
delete_shapes(ss::Shapes=Shape[], bs=current_backends()) =
for s in ss
delete_shape(s, bs)
end
export and_delete_shape, and_delete_shapes, and_mark_deleted
and_delete_shape(r::Any, shape::Shape) =
begin
delete_shape(shape)
r
end
and_delete_shapes(r::Any, shapes::Shapes) =
begin
delete_shapes(shapes)
r
end
and_mark_deleted(b::Backend, r::Any, shape) =
begin
mark_deleted(b, shape)
r
end
# Common implementations for realize function
realize(b::Backend, s::UnionShape) =
unite_refs(b, UnionRef(tuple(map(s->ref(b, s), s.shapes)...)))
realize(b::Backend, s::Union{SubtractionShape2D,SubtractionShape3D}) =
subtract_ref(b, ref(b, s.shape), unite_refs(b, UnionRef(tuple(map(s->ref(b, s), s.shapes)...))))
function startSketchup(port)
ENV["ROSETTAPORT"] = port
args = "C:\\Users\\aml\\Dropbox\\AML\\Projects\\rosetta\\sketchup\\rosetta.rb"
println(args)
run(`cmd /C Sketchup -RubyStartup $args`)
#Start listening for Sketchup
listener = listen(port)
connection = listener.accept()
readline(connection) == "connected" ? connection : error("Could not connect!")
end
# CAD
@defcb select_position(prompt::String="Select a position")
@defcb select_positions(prompt::String="Select positions")
@defcb select_point(prompt::String="Select a point")
@defcb select_points(prompt::String="Select points")
@defcb select_curve(prompt::String="Select a curve")
@defcb select_curves(prompt::String="Select curves")
@defcb select_surface(prompt::String="Select a surface")
@defcb select_surfaces(prompt::String="Select surfaces")
@defcb select_solid(prompt::String="Select a solid")
@defcb select_solids(prompt::String="Select solids")
@defcb select_shape(prompt::String="Select a shape")
@defcb select_shapes(prompt::String="Select shapes")
@defshapeop register_shape_for_changes(s::Shape)
@defshapeop unregister_shape_for_changes(s::Shape)
@defshapeop waiting_for_changes()
@defcb changed_shape(shapes::Shapes)
@defcbs highlight_shape(s::Shape)
b_highlight_shape(b::Backend, s::Shape) =
if realized(b, s)
b_highlight_refs(b, collect_ref(b, ref(b, s)))
end
export highlight_shapes
highlight_shapes(ss::Shapes=Shape[], bs=current_backends()) =
for s in ss
highlight_shape(s, bs)
end
#
@defcbs unhighlight_shape(s::Shape)
b_unhighlight_shape(b::Backend, s::Shape) =
if realized(b, s)
b_unhighlight_refs(b, collect_ref(b, ref(b, s)))
end
export unhighlight_shapes
unhighlight_shapes(ss::Shapes=Shape[], bs=current_backends()) =
for s in ss
unhighlight_shape(s, bs)
end
export unhighlight_all_shapes
const unhighlight_all_shapes = unhighlight_all_refs
capture_shape(s=select_shape("Select shape to be captured")) =
if ! isnothing(s)
generate_captured_shape(s, backend(s))
end
capture_shapes(ss=select_shapes("Select shapes to be captured")) =
generate_captured_shapes(ss, backend(ss[1]))
export register_for_changes
register_for_changes(shapes::Shapes) =
map(shapes) do shape
register_shape_for_changes(shape, backend(shape))
end
export unregister_for_changes
unregister_for_changes(shapes::Shapes) =
map(shapes) do shape
unregister_shape_for_changes(shape, backend(shape))
end
waiting_for_changes(shapes::Shapes) =
waiting_for_changes(shapes[1], backend(shapes[1]))
export on_change
on_change(f, shape::Shape) = on_change(f, [shape])
on_change(f, shapes) =
let registered = register_for_changes(shapes)
try
while waiting_for_changes(shapes)
let changed = changed_shape(shapes)
f()
end
end
finally
unregister_for_changes(registered)
end
end
#
export with_shape_dependency
with_shape_dependency(f, ss) =
let shapes = collecting_shapes() do
f()
end
on_change(ss) do
try
delete_shapes(shapes)
catch e
end
shapes = collecting_shapes() do
f()
end
end
end
#
export internalize_shape, internalize_shapes
internalize_shape(s=select_shape("Select shape to be internalized")) =
if ! isnothing(s)
println(meta_program(s))
end
internalize_shapes(ss=select_shapes("Select shapes to be internalized")) =
println(meta_program(ss))
# Seletion
select_one_with_prompt(prompt::String, b::Backend, f::Function) =
let ans = select_many_with_prompt(prompt, b, f)
length(ans) > 0 ? ans[1] : nothing
end
select_many_with_prompt(prompt::String, b::Backend, f::Function) =
begin
@info "$(prompt) on the $(b) backend."
map(id -> shape_from_ref(id, b), f(connection(b), prompt))
end
export save_view
save_view(name::String="View") =
let path = prepare_for_saving_file(render_pathname(name))
save_view(path, top_backend())
path
end
export realistic_sky
realistic_sky(;
date::DateTime=DateTime(2020, 9, 21, 10, 0, 0),
latitude::Real=39,
longitude::Real=9,
elevation::Real=0,
meridian::Real=0,
altitude::Union{Missing,Real}=missing,
azimuth::Union{Missing,Real}=missing,
turbidity::Real=5,
withsun::Bool=true,
backend::Backend=top_backend()) =
ismissing(altitude) ?
b_realistic_sky(
backend,
date, latitude, longitude, elevation, meridian, turbidity, withsun) :
b_realistic_sky(
backend,
altitude, azimuth, turbidity, withsun)
export ground
ground(level::Real=0, material::Material=material_basic, backend::Backend=top_backend()) =
b_set_ground(backend, level, material_ref(backend, material))
############################################################
# Analysis
abstract type Analysis end
abstract type StructuralAnalysis <: Analysis end
abstract type LightingAnalysis <: Analysis end
###########################################################
# Geometric properties
# Axis-aligned Bounding Box
# Centroid
export centroid
centroid(s::Sphere) = s.center
centroid(s::Cylinder) = add_z(s.cb, s.h/2)
###########################################################
export show_cs
show_cs(p, scale=1) =
let rcyl = scale/10,
rcon = scale/5,
lcyl = scale,
lcon = scale/5,
px = add_x(p, 3*lcyl),
py = add_y(p, 2*lcyl),
pz = add_z(p, 1*lcyl)
union(args...) = args[end] # Unity is having problems with unions
union(cylinder(p, rcyl, px),
cone(px, rcon, add_x(px, lcon)),
cylinder(p, rcyl, py),
cone(py, rcon, add_y(py, lcon)),
cylinder(p, rcyl, pz),
cone(pz, rcon, add_z(pz, lcon)))
end
#
nonzero_offset(l::Line, d::Real) =
line(offset(l.vertices, d, false))
#
export stroke, b_stroke
stroke(path;
material::Material=default_curve_material(),
backend::Backend=top_backend(),
backends::Backends=(backend,)) =
for backend in backends
let mat = material_ref(backend, material)
b_stroke(backend, path, mat)
end
end
export fill, b_fill
fill(path;
material::Material=default_surface_material(),
backend::Backend=top_backend(),
backends::Backends=(backend,)) =
for backend in backends
let mat = material_ref(backend, material)
b_fill(backend, path, mat)
end
end
|
{"hexsha": "60d42d329fcf0fb8a5e7e021c7fdffe232795d78", "size": 49198, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Shapes.jl", "max_stars_repo_name": "aptmcl/KhepriBase.jl", "max_stars_repo_head_hexsha": "3e914cd53999919ac05288f7bd8a7333a3a8253a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Shapes.jl", "max_issues_repo_name": "aptmcl/KhepriBase.jl", "max_issues_repo_head_hexsha": "3e914cd53999919ac05288f7bd8a7333a3a8253a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-02T01:02:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T18:22:09.000Z", "max_forks_repo_path": "src/Shapes.jl", "max_forks_repo_name": "aptmcl/KhepriBase.jl", "max_forks_repo_head_hexsha": "3e914cd53999919ac05288f7bd8a7333a3a8253a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.687546607, "max_line_length": 149, "alphanum_fraction": 0.6761047197, "num_tokens": 14348}
|
# most of code from https://github.com/priya-dwivedi/Deep-Learning
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import pandas as pd
import numpy as np
import nltk
class LDAtext:
def __init__(self):
self.stemmer = SnowballStemmer("english")
def text_preprocess_(self, documents):
'''
documents: list of strings
'''
processed_docs = []
for doc in documents:
processed_docs.append(self.preprocess(doc))
return processed_docs
def lemmatize_stemming(self, text):
return self.stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
# Tokenize and lemmatize
def preprocess(self, text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(
token) > 3:
result.append(token)
return result
def train(self,
documents,
number_of_topics=8,
number_of_passes=10,
number_of_workers=2):
processed_docs = self.text_preprocess_(documents)
self.dictionary = gensim.corpora.Dictionary(processed_docs)
bow_corpus = [self.dictionary.doc2bow(doc) for doc in processed_docs]
self.lda_model = gensim.models.LdaMulticore(
bow_corpus,
num_topics=number_of_topics,
id2word=self.dictionary,
passes=number_of_passes,
workers=number_of_workers)
def predict(self, unseen_doc):
# Data preprocessing step for the unseen document
bow_vector = self.dictionary.doc2bow(self.preprocess(unseen_doc))
return self.lda_model[bow_vector]
def topics(self):
'''
For each topic, we will explore the words occuring in that topic and its relative weight
'''
for idx, topic in self.lda_model.print_topics(-1):
print("Topic: {} \nWords: {}".format(idx, topic))
print("\n")
|
{"hexsha": "6584386f0741808f906393d7920de9fb93b2390a", "size": 2172, "ext": "py", "lang": "Python", "max_stars_repo_path": "LDA_text_model.py", "max_stars_repo_name": "riakotti/LDA-Modelling-Example", "max_stars_repo_head_hexsha": "2fda3d2cf751edc18bcb913c401c78cf7e1078e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-08-02T10:35:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:26:29.000Z", "max_issues_repo_path": "LDA_text_model.py", "max_issues_repo_name": "riakotti/LDA-Modelling-Example", "max_issues_repo_head_hexsha": "2fda3d2cf751edc18bcb913c401c78cf7e1078e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LDA_text_model.py", "max_forks_repo_name": "riakotti/LDA-Modelling-Example", "max_forks_repo_head_hexsha": "2fda3d2cf751edc18bcb913c401c78cf7e1078e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5915492958, "max_line_length": 96, "alphanum_fraction": 0.6436464088, "include": true, "reason": "import numpy", "num_tokens": 483}
|
from tkinter import Tk
from typing import List, Optional, Union
import numpy as np
from IPython import display
from PIL import ImageGrab
from puzzle.heuristics import analyze
from puzzle.puzzlepedia import prod_config, puzzle, puzzle_widget
_INITIALIZED = False
def parse(
source: Optional[puzzle.PuzzleSources] = None,
hint: analyze.Hint = None,
threshold: float = None):
_init()
if source is None:
source = _get_clipboard()
if threshold is None:
result = puzzle.Puzzle('first stage', source, hint=hint)
else:
result = puzzle.Puzzle(
'first stage', source, hint=hint, threshold=threshold)
interact_with(result)
return result
def interact_with(puzzle: puzzle.Puzzle) -> None:
_init()
display.display(puzzle_widget.PuzzleWidget(puzzle))
def initialized() -> bool:
return _INITIALIZED
def _init() -> None:
global _INITIALIZED
if not _INITIALIZED:
_INITIALIZED = True
prod_config.reset()
prod_config.init()
def reset() -> None:
global _INITIALIZED
_INITIALIZED = False
prod_config.reset()
def _get_clipboard() -> Union[np.ndarray, List[str]]:
image_grab = ImageGrab.grabclipboard()
if image_grab:
return np.array(image_grab)
return [Tk().clipboard_get()]
|
{"hexsha": "509cb3a14d60b8715f87b95b67d295ed7a5d3137", "size": 1249, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/puzzle/puzzlepedia/puzzlepedia.py", "max_stars_repo_name": "PhilHarnish/forge", "max_stars_repo_head_hexsha": "663f19d759b94d84935c14915922070635a4af65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-18T18:43:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-18T20:05:59.000Z", "max_issues_repo_path": "src/puzzle/puzzlepedia/puzzlepedia.py", "max_issues_repo_name": "PhilHarnish/forge", "max_issues_repo_head_hexsha": "663f19d759b94d84935c14915922070635a4af65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/puzzle/puzzlepedia/puzzlepedia.py", "max_forks_repo_name": "PhilHarnish/forge", "max_forks_repo_head_hexsha": "663f19d759b94d84935c14915922070635a4af65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5344827586, "max_line_length": 65, "alphanum_fraction": 0.7221777422, "include": true, "reason": "import numpy", "num_tokens": 313}
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module order.succ_pred.basic
! leanprover-community/mathlib commit 0111834459f5d7400215223ea95ae38a1265a907
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Order.CompleteLattice
import Mathbin.Order.Cover
import Mathbin.Order.Iterate
/-!
# Successor and predecessor
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines successor and predecessor orders. `succ a`, the successor of an element `a : α` is
the least element greater than `a`. `pred a` is the greatest element less than `a`. Typical examples
include `ℕ`, `ℤ`, `ℕ+`, `fin n`, but also `enat`, the lexicographic order of a successor/predecessor
order...
## Typeclasses
* `succ_order`: Order equipped with a sensible successor function.
* `pred_order`: Order equipped with a sensible predecessor function.
* `is_succ_archimedean`: `succ_order` where `succ` iterated to an element gives all the greater
ones.
* `is_pred_archimedean`: `pred_order` where `pred` iterated to an element gives all the smaller
ones.
## Implementation notes
Maximal elements don't have a sensible successor. Thus the naïve typeclass
```lean
class naive_succ_order (α : Type*) [preorder α] :=
(succ : α → α)
(succ_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b)
(lt_succ_iff : ∀ {a b}, a < succ b ↔ a ≤ b)
```
can't apply to an `order_top` because plugging in `a = b = ⊤` into either of `succ_le_iff` and
`lt_succ_iff` yields `⊤ < ⊤` (or more generally `m < m` for a maximal element `m`).
The solution taken here is to remove the implications `≤ → <` and instead require that `a < succ a`
for all non maximal elements (enforced by the combination of `le_succ` and the contrapositive of
`max_of_succ_le`).
The stricter condition of every element having a sensible successor can be obtained through the
combination of `succ_order α` and `no_max_order α`.
## TODO
Is `galois_connection pred succ` always true? If not, we should introduce
```lean
class succ_pred_order (α : Type*) [preorder α] extends succ_order α, pred_order α :=
(pred_succ_gc : galois_connection (pred : α → α) succ)
```
`covby` should help here.
-/
open Function OrderDual Set
variable {α : Type _}
#print SuccOrder /-
/-- Order equipped with a sensible successor function. -/
@[ext]
class SuccOrder (α : Type _) [Preorder α] where
succ : α → α
le_succ : ∀ a, a ≤ succ a
max_of_succ_le {a} : succ a ≤ a → IsMax a
succ_le_of_lt {a b} : a < b → succ a ≤ b
le_of_lt_succ {a b} : a < succ b → a ≤ b
#align succ_order SuccOrder
-/
#print PredOrder /-
/-- Order equipped with a sensible predecessor function. -/
@[ext]
class PredOrder (α : Type _) [Preorder α] where
pred : α → α
pred_le : ∀ a, pred a ≤ a
min_of_le_pred {a} : a ≤ pred a → IsMin a
le_pred_of_lt {a b} : a < b → a ≤ pred b
le_of_pred_lt {a b} : pred a < b → a ≤ b
#align pred_order PredOrder
-/
instance [Preorder α] [SuccOrder α] : PredOrder αᵒᵈ
where
pred := toDual ∘ SuccOrder.succ ∘ ofDual
pred_le := SuccOrder.le_succ
min_of_le_pred _ := SuccOrder.max_of_succ_le
le_pred_of_lt a b h := SuccOrder.succ_le_of_lt h
le_of_pred_lt a b := SuccOrder.le_of_lt_succ
instance [Preorder α] [PredOrder α] : SuccOrder αᵒᵈ
where
succ := toDual ∘ PredOrder.pred ∘ ofDual
le_succ := PredOrder.pred_le
max_of_succ_le _ := PredOrder.min_of_le_pred
succ_le_of_lt a b h := PredOrder.le_pred_of_lt h
le_of_lt_succ a b := PredOrder.le_of_pred_lt
section Preorder
variable [Preorder α]
#print SuccOrder.ofSuccLeIffOfLeLtSucc /-
/-- A constructor for `succ_order α` usable when `α` has no maximal element. -/
def SuccOrder.ofSuccLeIffOfLeLtSucc (succ : α → α) (hsucc_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b)
(hle_of_lt_succ : ∀ {a b}, a < succ b → a ≤ b) : SuccOrder α :=
{ succ
le_succ := fun a => (hsucc_le_iff.1 le_rfl).le
max_of_succ_le := fun a ha => (lt_irrefl a <| hsucc_le_iff.1 ha).elim
succ_le_of_lt := fun a b => hsucc_le_iff.2
le_of_lt_succ := fun a b => hle_of_lt_succ }
#align succ_order.of_succ_le_iff_of_le_lt_succ SuccOrder.ofSuccLeIffOfLeLtSucc
-/
#print PredOrder.ofLePredIffOfPredLePred /-
/-- A constructor for `pred_order α` usable when `α` has no minimal element. -/
def PredOrder.ofLePredIffOfPredLePred (pred : α → α) (hle_pred_iff : ∀ {a b}, a ≤ pred b ↔ a < b)
(hle_of_pred_lt : ∀ {a b}, pred a < b → a ≤ b) : PredOrder α :=
{ pred
pred_le := fun a => (hle_pred_iff.1 le_rfl).le
min_of_le_pred := fun a ha => (lt_irrefl a <| hle_pred_iff.1 ha).elim
le_pred_of_lt := fun a b => hle_pred_iff.2
le_of_pred_lt := fun a b => hle_of_pred_lt }
#align pred_order.of_le_pred_iff_of_pred_le_pred PredOrder.ofLePredIffOfPredLePred
-/
end Preorder
section LinearOrder
variable [LinearOrder α]
#print SuccOrder.ofCore /-
/-- A constructor for `succ_order α` for `α` a linear order. -/
@[simps]
def SuccOrder.ofCore (succ : α → α) (hn : ∀ {a}, ¬IsMax a → ∀ b, a < b ↔ succ a ≤ b)
(hm : ∀ a, IsMax a → succ a = a) : SuccOrder α :=
{ succ
succ_le_of_lt := fun a b => by_cases (fun h hab => (hm a h).symm ▸ hab.le) fun h => (hn h b).mp
le_succ := fun a =>
by_cases (fun h => (hm a h).symm.le) fun h => le_of_lt <| by simpa using (hn h a).Not
le_of_lt_succ := fun a b hab =>
by_cases (fun h => hm b h ▸ hab.le) fun h => by simpa [hab] using (hn h a).Not
max_of_succ_le := fun a => not_imp_not.mp fun h => by simpa using (hn h a).Not }
#align succ_order.of_core SuccOrder.ofCore
-/
#print PredOrder.ofCore /-
/-- A constructor for `pred_order α` for `α` a linear order. -/
@[simps]
def PredOrder.ofCore {α} [LinearOrder α] (pred : α → α)
(hn : ∀ {a}, ¬IsMin a → ∀ b, b ≤ pred a ↔ b < a) (hm : ∀ a, IsMin a → pred a = a) :
PredOrder α :=
{ pred
le_pred_of_lt := fun a b => by_cases (fun h hab => (hm b h).symm ▸ hab.le) fun h => (hn h a).mpr
pred_le := fun a =>
by_cases (fun h => (hm a h).le) fun h => le_of_lt <| by simpa using (hn h a).Not
le_of_pred_lt := fun a b hab =>
by_cases (fun h => hm a h ▸ hab.le) fun h => by simpa [hab] using (hn h b).Not
min_of_le_pred := fun a => not_imp_not.mp fun h => by simpa using (hn h a).Not }
#align pred_order.of_core PredOrder.ofCore
-/
#print SuccOrder.ofSuccLeIff /-
/-- A constructor for `succ_order α` usable when `α` is a linear order with no maximal element. -/
def SuccOrder.ofSuccLeIff (succ : α → α) (hsucc_le_iff : ∀ {a b}, succ a ≤ b ↔ a < b) :
SuccOrder α :=
{ succ
le_succ := fun a => (hsucc_le_iff.1 le_rfl).le
max_of_succ_le := fun a ha => (lt_irrefl a <| hsucc_le_iff.1 ha).elim
succ_le_of_lt := fun a b => hsucc_le_iff.2
le_of_lt_succ := fun a b h => le_of_not_lt ((not_congr hsucc_le_iff).1 h.not_le) }
#align succ_order.of_succ_le_iff SuccOrder.ofSuccLeIff
-/
#print PredOrder.ofLePredIff /-
/-- A constructor for `pred_order α` usable when `α` is a linear order with no minimal element. -/
def PredOrder.ofLePredIff (pred : α → α) (hle_pred_iff : ∀ {a b}, a ≤ pred b ↔ a < b) :
PredOrder α :=
{ pred
pred_le := fun a => (hle_pred_iff.1 le_rfl).le
min_of_le_pred := fun a ha => (lt_irrefl a <| hle_pred_iff.1 ha).elim
le_pred_of_lt := fun a b => hle_pred_iff.2
le_of_pred_lt := fun a b h => le_of_not_lt ((not_congr hle_pred_iff).1 h.not_le) }
#align pred_order.of_le_pred_iff PredOrder.ofLePredIff
-/
end LinearOrder
/-! ### Successor order -/
namespace Order
section Preorder
variable [Preorder α] [SuccOrder α] {a b : α}
#print Order.succ /-
/-- The successor of an element. If `a` is not maximal, then `succ a` is the least element greater
than `a`. If `a` is maximal, then `succ a = a`. -/
def succ : α → α :=
SuccOrder.succ
#align order.succ Order.succ
-/
#print Order.le_succ /-
theorem le_succ : ∀ a : α, a ≤ succ a :=
SuccOrder.le_succ
#align order.le_succ Order.le_succ
-/
#print Order.max_of_succ_le /-
theorem max_of_succ_le {a : α} : succ a ≤ a → IsMax a :=
SuccOrder.max_of_succ_le
#align order.max_of_succ_le Order.max_of_succ_le
-/
#print Order.succ_le_of_lt /-
theorem succ_le_of_lt {a b : α} : a < b → succ a ≤ b :=
SuccOrder.succ_le_of_lt
#align order.succ_le_of_lt Order.succ_le_of_lt
-/
#print Order.le_of_lt_succ /-
theorem le_of_lt_succ {a b : α} : a < succ b → a ≤ b :=
SuccOrder.le_of_lt_succ
#align order.le_of_lt_succ Order.le_of_lt_succ
-/
#print Order.succ_le_iff_isMax /-
@[simp]
theorem succ_le_iff_isMax : succ a ≤ a ↔ IsMax a :=
⟨max_of_succ_le, fun h => h <| le_succ _⟩
#align order.succ_le_iff_is_max Order.succ_le_iff_isMax
-/
#print Order.lt_succ_iff_not_isMax /-
@[simp]
theorem lt_succ_iff_not_isMax : a < succ a ↔ ¬IsMax a :=
⟨not_isMax_of_lt, fun ha => (le_succ a).lt_of_not_le fun h => ha <| max_of_succ_le h⟩
#align order.lt_succ_iff_not_is_max Order.lt_succ_iff_not_isMax
-/
alias lt_succ_iff_not_is_max ↔ _ lt_succ_of_not_is_max
#align order.lt_succ_of_not_is_max Order.lt_succ_of_not_isMax
#print Order.wcovby_succ /-
theorem wcovby_succ (a : α) : a ⩿ succ a :=
⟨le_succ a, fun b hb => (succ_le_of_lt hb).not_lt⟩
#align order.wcovby_succ Order.wcovby_succ
-/
#print Order.covby_succ_of_not_isMax /-
theorem covby_succ_of_not_isMax (h : ¬IsMax a) : a ⋖ succ a :=
(wcovby_succ a).covby_of_lt <| lt_succ_of_not_isMax h
#align order.covby_succ_of_not_is_max Order.covby_succ_of_not_isMax
-/
#print Order.lt_succ_iff_of_not_isMax /-
theorem lt_succ_iff_of_not_isMax (ha : ¬IsMax a) : b < succ a ↔ b ≤ a :=
⟨le_of_lt_succ, fun h => h.trans_lt <| lt_succ_of_not_isMax ha⟩
#align order.lt_succ_iff_of_not_is_max Order.lt_succ_iff_of_not_isMax
-/
#print Order.succ_le_iff_of_not_isMax /-
theorem succ_le_iff_of_not_isMax (ha : ¬IsMax a) : succ a ≤ b ↔ a < b :=
⟨(lt_succ_of_not_isMax ha).trans_le, succ_le_of_lt⟩
#align order.succ_le_iff_of_not_is_max Order.succ_le_iff_of_not_isMax
-/
#print Order.succ_lt_succ_iff_of_not_isMax /-
theorem succ_lt_succ_iff_of_not_isMax (ha : ¬IsMax a) (hb : ¬IsMax b) : succ a < succ b ↔ a < b :=
by rw [lt_succ_iff_of_not_is_max hb, succ_le_iff_of_not_is_max ha]
#align order.succ_lt_succ_iff_of_not_is_max Order.succ_lt_succ_iff_of_not_isMax
-/
#print Order.succ_le_succ_iff_of_not_isMax /-
theorem succ_le_succ_iff_of_not_isMax (ha : ¬IsMax a) (hb : ¬IsMax b) : succ a ≤ succ b ↔ a ≤ b :=
by rw [succ_le_iff_of_not_is_max ha, lt_succ_iff_of_not_is_max hb]
#align order.succ_le_succ_iff_of_not_is_max Order.succ_le_succ_iff_of_not_isMax
-/
#print Order.succ_le_succ /-
@[simp, mono]
theorem succ_le_succ (h : a ≤ b) : succ a ≤ succ b :=
by
by_cases hb : IsMax b
· by_cases hba : b ≤ a
· exact (hb <| hba.trans <| le_succ _).trans (le_succ _)
· exact succ_le_of_lt ((h.lt_of_not_le hba).trans_le <| le_succ b)
· rwa [succ_le_iff_of_not_is_max fun ha => hb <| ha.mono h, lt_succ_iff_of_not_is_max hb]
#align order.succ_le_succ Order.succ_le_succ
-/
#print Order.succ_mono /-
theorem succ_mono : Monotone (succ : α → α) := fun a b => succ_le_succ
#align order.succ_mono Order.succ_mono
-/
#print Order.le_succ_iterate /-
theorem le_succ_iterate (k : ℕ) (x : α) : x ≤ (succ^[k]) x :=
by
conv_lhs => rw [(by simp only [Function.iterate_id, id.def] : x = (id^[k]) x)]
exact Monotone.le_iterate_of_le succ_mono le_succ k x
#align order.le_succ_iterate Order.le_succ_iterate
-/
#print Order.isMax_iterate_succ_of_eq_of_lt /-
theorem isMax_iterate_succ_of_eq_of_lt {n m : ℕ} (h_eq : (succ^[n]) a = (succ^[m]) a)
(h_lt : n < m) : IsMax ((succ^[n]) a) :=
by
refine' max_of_succ_le (le_trans _ h_eq.symm.le)
have : succ ((succ^[n]) a) = (succ^[n + 1]) a := by rw [Function.iterate_succ']
rw [this]
have h_le : n + 1 ≤ m := Nat.succ_le_of_lt h_lt
exact Monotone.monotone_iterate_of_le_map succ_mono (le_succ a) h_le
#align order.is_max_iterate_succ_of_eq_of_lt Order.isMax_iterate_succ_of_eq_of_lt
-/
#print Order.isMax_iterate_succ_of_eq_of_ne /-
theorem isMax_iterate_succ_of_eq_of_ne {n m : ℕ} (h_eq : (succ^[n]) a = (succ^[m]) a)
(h_ne : n ≠ m) : IsMax ((succ^[n]) a) :=
by
cases le_total n m
· exact is_max_iterate_succ_of_eq_of_lt h_eq (lt_of_le_of_ne h h_ne)
· rw [h_eq]
exact is_max_iterate_succ_of_eq_of_lt h_eq.symm (lt_of_le_of_ne h h_ne.symm)
#align order.is_max_iterate_succ_of_eq_of_ne Order.isMax_iterate_succ_of_eq_of_ne
-/
#print Order.Iio_succ_of_not_isMax /-
theorem Iio_succ_of_not_isMax (ha : ¬IsMax a) : Iio (succ a) = Iic a :=
Set.ext fun x => lt_succ_iff_of_not_isMax ha
#align order.Iio_succ_of_not_is_max Order.Iio_succ_of_not_isMax
-/
#print Order.Ici_succ_of_not_isMax /-
theorem Ici_succ_of_not_isMax (ha : ¬IsMax a) : Ici (succ a) = Ioi a :=
Set.ext fun x => succ_le_iff_of_not_isMax ha
#align order.Ici_succ_of_not_is_max Order.Ici_succ_of_not_isMax
-/
#print Order.Ico_succ_right_of_not_isMax /-
theorem Ico_succ_right_of_not_isMax (hb : ¬IsMax b) : Ico a (succ b) = Icc a b := by
rw [← Ici_inter_Iio, Iio_succ_of_not_is_max hb, Ici_inter_Iic]
#align order.Ico_succ_right_of_not_is_max Order.Ico_succ_right_of_not_isMax
-/
#print Order.Ioo_succ_right_of_not_isMax /-
theorem Ioo_succ_right_of_not_isMax (hb : ¬IsMax b) : Ioo a (succ b) = Ioc a b := by
rw [← Ioi_inter_Iio, Iio_succ_of_not_is_max hb, Ioi_inter_Iic]
#align order.Ioo_succ_right_of_not_is_max Order.Ioo_succ_right_of_not_isMax
-/
#print Order.Icc_succ_left_of_not_isMax /-
theorem Icc_succ_left_of_not_isMax (ha : ¬IsMax a) : Icc (succ a) b = Ioc a b := by
rw [← Ici_inter_Iic, Ici_succ_of_not_is_max ha, Ioi_inter_Iic]
#align order.Icc_succ_left_of_not_is_max Order.Icc_succ_left_of_not_isMax
-/
#print Order.Ico_succ_left_of_not_isMax /-
theorem Ico_succ_left_of_not_isMax (ha : ¬IsMax a) : Ico (succ a) b = Ioo a b := by
rw [← Ici_inter_Iio, Ici_succ_of_not_is_max ha, Ioi_inter_Iio]
#align order.Ico_succ_left_of_not_is_max Order.Ico_succ_left_of_not_isMax
-/
section NoMaxOrder
variable [NoMaxOrder α]
#print Order.lt_succ /-
theorem lt_succ (a : α) : a < succ a :=
lt_succ_of_not_isMax <| not_isMax a
#align order.lt_succ Order.lt_succ
-/
#print Order.lt_succ_iff /-
@[simp]
theorem lt_succ_iff : a < succ b ↔ a ≤ b :=
lt_succ_iff_of_not_isMax <| not_isMax b
#align order.lt_succ_iff Order.lt_succ_iff
-/
#print Order.succ_le_iff /-
@[simp]
theorem succ_le_iff : succ a ≤ b ↔ a < b :=
succ_le_iff_of_not_isMax <| not_isMax a
#align order.succ_le_iff Order.succ_le_iff
-/
#print Order.succ_le_succ_iff /-
theorem succ_le_succ_iff : succ a ≤ succ b ↔ a ≤ b := by simp
#align order.succ_le_succ_iff Order.succ_le_succ_iff
-/
#print Order.succ_lt_succ_iff /-
theorem succ_lt_succ_iff : succ a < succ b ↔ a < b := by simp
#align order.succ_lt_succ_iff Order.succ_lt_succ_iff
-/
alias succ_le_succ_iff ↔ le_of_succ_le_succ _
#align order.le_of_succ_le_succ Order.le_of_succ_le_succ
alias succ_lt_succ_iff ↔ lt_of_succ_lt_succ succ_lt_succ
#align order.lt_of_succ_lt_succ Order.lt_of_succ_lt_succ
#align order.succ_lt_succ Order.succ_lt_succ
#print Order.succ_strictMono /-
theorem succ_strictMono : StrictMono (succ : α → α) := fun a b => succ_lt_succ
#align order.succ_strict_mono Order.succ_strictMono
-/
#print Order.covby_succ /-
theorem covby_succ (a : α) : a ⋖ succ a :=
covby_succ_of_not_isMax <| not_isMax a
#align order.covby_succ Order.covby_succ
-/
#print Order.Iio_succ /-
@[simp]
theorem Iio_succ (a : α) : Iio (succ a) = Iic a :=
Iio_succ_of_not_isMax <| not_isMax _
#align order.Iio_succ Order.Iio_succ
-/
#print Order.Ici_succ /-
@[simp]
theorem Ici_succ (a : α) : Ici (succ a) = Ioi a :=
Ici_succ_of_not_isMax <| not_isMax _
#align order.Ici_succ Order.Ici_succ
-/
#print Order.Ico_succ_right /-
@[simp]
theorem Ico_succ_right (a b : α) : Ico a (succ b) = Icc a b :=
Ico_succ_right_of_not_isMax <| not_isMax _
#align order.Ico_succ_right Order.Ico_succ_right
-/
#print Order.Ioo_succ_right /-
@[simp]
theorem Ioo_succ_right (a b : α) : Ioo a (succ b) = Ioc a b :=
Ioo_succ_right_of_not_isMax <| not_isMax _
#align order.Ioo_succ_right Order.Ioo_succ_right
-/
#print Order.Icc_succ_left /-
@[simp]
theorem Icc_succ_left (a b : α) : Icc (succ a) b = Ioc a b :=
Icc_succ_left_of_not_isMax <| not_isMax _
#align order.Icc_succ_left Order.Icc_succ_left
-/
#print Order.Ico_succ_left /-
@[simp]
theorem Ico_succ_left (a b : α) : Ico (succ a) b = Ioo a b :=
Ico_succ_left_of_not_isMax <| not_isMax _
#align order.Ico_succ_left Order.Ico_succ_left
-/
end NoMaxOrder
end Preorder
section PartialOrder
variable [PartialOrder α] [SuccOrder α] {a b : α}
#print Order.succ_eq_iff_isMax /-
@[simp]
theorem succ_eq_iff_isMax : succ a = a ↔ IsMax a :=
⟨fun h => max_of_succ_le h.le, fun h => h.eq_of_ge <| le_succ _⟩
#align order.succ_eq_iff_is_max Order.succ_eq_iff_isMax
-/
alias succ_eq_iff_is_max ↔ _ _root_.is_max.succ_eq
#align is_max.succ_eq IsMax.succ_eq
#print Order.succ_eq_succ_iff_of_not_isMax /-
theorem succ_eq_succ_iff_of_not_isMax (ha : ¬IsMax a) (hb : ¬IsMax b) : succ a = succ b ↔ a = b :=
by
rw [eq_iff_le_not_lt, eq_iff_le_not_lt, succ_le_succ_iff_of_not_is_max ha hb,
succ_lt_succ_iff_of_not_is_max ha hb]
#align order.succ_eq_succ_iff_of_not_is_max Order.succ_eq_succ_iff_of_not_isMax
-/
#print Order.le_le_succ_iff /-
theorem le_le_succ_iff : a ≤ b ∧ b ≤ succ a ↔ b = a ∨ b = succ a :=
by
refine'
⟨fun h =>
or_iff_not_imp_left.2 fun hba : b ≠ a =>
h.2.antisymm (succ_le_of_lt <| h.1.lt_of_ne <| hba.symm),
_⟩
rintro (rfl | rfl)
· exact ⟨le_rfl, le_succ b⟩
· exact ⟨le_succ a, le_rfl⟩
#align order.le_le_succ_iff Order.le_le_succ_iff
-/
#print Order.Covby.succ_eq /-
theorem Order.Covby.succ_eq (h : a ⋖ b) : succ a = b :=
(succ_le_of_lt h.lt).eq_of_not_lt fun h' => h.2 (lt_succ_of_not_isMax h.lt.not_isMax) h'
#align covby.succ_eq Order.Covby.succ_eq
-/
#print Order.Wcovby.le_succ /-
theorem Order.Wcovby.le_succ (h : a ⩿ b) : b ≤ succ a :=
by
obtain h | rfl := h.covby_or_eq
· exact h.succ_eq.ge
· exact le_succ _
#align wcovby.le_succ Order.Wcovby.le_succ
-/
#print Order.le_succ_iff_eq_or_le /-
theorem le_succ_iff_eq_or_le : a ≤ succ b ↔ a = succ b ∨ a ≤ b :=
by
by_cases hb : IsMax b
· rw [hb.succ_eq, or_iff_right_of_imp le_of_eq]
· rw [← lt_succ_iff_of_not_is_max hb, le_iff_eq_or_lt]
#align order.le_succ_iff_eq_or_le Order.le_succ_iff_eq_or_le
-/
#print Order.lt_succ_iff_eq_or_lt_of_not_isMax /-
theorem lt_succ_iff_eq_or_lt_of_not_isMax (hb : ¬IsMax b) : a < succ b ↔ a = b ∨ a < b :=
(lt_succ_iff_of_not_isMax hb).trans le_iff_eq_or_lt
#align order.lt_succ_iff_eq_or_lt_of_not_is_max Order.lt_succ_iff_eq_or_lt_of_not_isMax
-/
#print Order.Iic_succ /-
theorem Iic_succ (a : α) : Iic (succ a) = insert (succ a) (Iic a) :=
ext fun _ => le_succ_iff_eq_or_le
#align order.Iic_succ Order.Iic_succ
-/
#print Order.Icc_succ_right /-
theorem Icc_succ_right (h : a ≤ succ b) : Icc a (succ b) = insert (succ b) (Icc a b) := by
simp_rw [← Ici_inter_Iic, Iic_succ, inter_insert_of_mem (mem_Ici.2 h)]
#align order.Icc_succ_right Order.Icc_succ_right
-/
#print Order.Ioc_succ_right /-
theorem Ioc_succ_right (h : a < succ b) : Ioc a (succ b) = insert (succ b) (Ioc a b) := by
simp_rw [← Ioi_inter_Iic, Iic_succ, inter_insert_of_mem (mem_Ioi.2 h)]
#align order.Ioc_succ_right Order.Ioc_succ_right
-/
#print Order.Iio_succ_eq_insert_of_not_isMax /-
theorem Iio_succ_eq_insert_of_not_isMax (h : ¬IsMax a) : Iio (succ a) = insert a (Iio a) :=
ext fun _ => lt_succ_iff_eq_or_lt_of_not_isMax h
#align order.Iio_succ_eq_insert_of_not_is_max Order.Iio_succ_eq_insert_of_not_isMax
-/
#print Order.Ico_succ_right_eq_insert_of_not_isMax /-
theorem Ico_succ_right_eq_insert_of_not_isMax (h₁ : a ≤ b) (h₂ : ¬IsMax b) :
Ico a (succ b) = insert b (Ico a b) := by
simp_rw [← Iio_inter_Ici, Iio_succ_eq_insert_of_not_is_max h₂, insert_inter_of_mem (mem_Ici.2 h₁)]
#align order.Ico_succ_right_eq_insert_of_not_is_max Order.Ico_succ_right_eq_insert_of_not_isMax
-/
#print Order.Ioo_succ_right_eq_insert_of_not_isMax /-
theorem Ioo_succ_right_eq_insert_of_not_isMax (h₁ : a < b) (h₂ : ¬IsMax b) :
Ioo a (succ b) = insert b (Ioo a b) := by
simp_rw [← Iio_inter_Ioi, Iio_succ_eq_insert_of_not_is_max h₂, insert_inter_of_mem (mem_Ioi.2 h₁)]
#align order.Ioo_succ_right_eq_insert_of_not_is_max Order.Ioo_succ_right_eq_insert_of_not_isMax
-/
section NoMaxOrder
variable [NoMaxOrder α]
#print Order.succ_eq_succ_iff /-
@[simp]
theorem succ_eq_succ_iff : succ a = succ b ↔ a = b :=
succ_eq_succ_iff_of_not_isMax (not_isMax a) (not_isMax b)
#align order.succ_eq_succ_iff Order.succ_eq_succ_iff
-/
#print Order.succ_injective /-
theorem succ_injective : Injective (succ : α → α) := fun a b => succ_eq_succ_iff.1
#align order.succ_injective Order.succ_injective
-/
#print Order.succ_ne_succ_iff /-
theorem succ_ne_succ_iff : succ a ≠ succ b ↔ a ≠ b :=
succ_injective.ne_iff
#align order.succ_ne_succ_iff Order.succ_ne_succ_iff
-/
alias succ_ne_succ_iff ↔ _ succ_ne_succ
#align order.succ_ne_succ Order.succ_ne_succ
#print Order.lt_succ_iff_eq_or_lt /-
theorem lt_succ_iff_eq_or_lt : a < succ b ↔ a = b ∨ a < b :=
lt_succ_iff.trans le_iff_eq_or_lt
#align order.lt_succ_iff_eq_or_lt Order.lt_succ_iff_eq_or_lt
-/
#print Order.succ_eq_iff_covby /-
theorem succ_eq_iff_covby : succ a = b ↔ a ⋖ b :=
⟨by
rintro rfl
exact covby_succ _, Order.Covby.succ_eq⟩
#align order.succ_eq_iff_covby Order.succ_eq_iff_covby
-/
#print Order.Iio_succ_eq_insert /-
theorem Iio_succ_eq_insert (a : α) : Iio (succ a) = insert a (Iio a) :=
Iio_succ_eq_insert_of_not_isMax <| not_isMax a
#align order.Iio_succ_eq_insert Order.Iio_succ_eq_insert
-/
#print Order.Ico_succ_right_eq_insert /-
theorem Ico_succ_right_eq_insert (h : a ≤ b) : Ico a (succ b) = insert b (Ico a b) :=
Ico_succ_right_eq_insert_of_not_isMax h <| not_isMax b
#align order.Ico_succ_right_eq_insert Order.Ico_succ_right_eq_insert
-/
#print Order.Ioo_succ_right_eq_insert /-
theorem Ioo_succ_right_eq_insert (h : a < b) : Ioo a (succ b) = insert b (Ioo a b) :=
Ioo_succ_right_eq_insert_of_not_isMax h <| not_isMax b
#align order.Ioo_succ_right_eq_insert Order.Ioo_succ_right_eq_insert
-/
end NoMaxOrder
section OrderTop
variable [OrderTop α]
/- warning: order.succ_top -> Order.succ_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Eq.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Eq.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
Case conversion may be inaccurate. Consider using '#align order.succ_top Order.succ_topₓ'. -/
@[simp]
theorem succ_top : succ (⊤ : α) = ⊤ :=
isMax_top.succ_eq
#align order.succ_top Order.succ_top
/- warning: order.succ_le_iff_eq_top -> Order.succ_le_iff_eq_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) a) (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) a) (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.succ_le_iff_eq_top Order.succ_le_iff_eq_topₓ'. -/
@[simp]
theorem succ_le_iff_eq_top : succ a ≤ a ↔ a = ⊤ :=
succ_le_iff_isMax.trans isMax_iff_eq_top
#align order.succ_le_iff_eq_top Order.succ_le_iff_eq_top
/- warning: order.lt_succ_iff_ne_top -> Order.lt_succ_iff_ne_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)) (Ne.{succ u1} α a (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)) (Ne.{succ u1} α a (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.lt_succ_iff_ne_top Order.lt_succ_iff_ne_topₓ'. -/
@[simp]
theorem lt_succ_iff_ne_top : a < succ a ↔ a ≠ ⊤ :=
lt_succ_iff_not_isMax.trans not_isMax_iff_ne_top
#align order.lt_succ_iff_ne_top Order.lt_succ_iff_ne_top
end OrderTop
section OrderBot
variable [OrderBot α]
/- warning: order.lt_succ_bot_iff -> Order.lt_succ_bot_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : NoMaxOrder.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))) (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : NoMaxOrder.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))) (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.lt_succ_bot_iff Order.lt_succ_bot_iffₓ'. -/
@[simp]
theorem lt_succ_bot_iff [NoMaxOrder α] : a < succ ⊥ ↔ a = ⊥ := by rw [lt_succ_iff, le_bot_iff]
#align order.lt_succ_bot_iff Order.lt_succ_bot_iff
/- warning: order.le_succ_bot_iff -> Order.le_succ_bot_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))) (Or (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Eq.{succ u1} α a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))) (Or (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Eq.{succ u1} α a (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))))
Case conversion may be inaccurate. Consider using '#align order.le_succ_bot_iff Order.le_succ_bot_iffₓ'. -/
theorem le_succ_bot_iff : a ≤ succ ⊥ ↔ a = ⊥ ∨ a = succ ⊥ := by
rw [le_succ_iff_eq_or_le, le_bot_iff, or_comm']
#align order.le_succ_bot_iff Order.le_succ_bot_iff
variable [Nontrivial α]
/- warning: order.bot_lt_succ -> Order.bot_lt_succ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)) (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)) (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)
Case conversion may be inaccurate. Consider using '#align order.bot_lt_succ Order.bot_lt_succₓ'. -/
theorem bot_lt_succ (a : α) : ⊥ < succ a :=
(lt_succ_of_not_isMax not_isMax_bot).trans_le <| succ_mono bot_le
#align order.bot_lt_succ Order.bot_lt_succ
/- warning: order.succ_ne_bot -> Order.succ_ne_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), Ne.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), Ne.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
Case conversion may be inaccurate. Consider using '#align order.succ_ne_bot Order.succ_ne_botₓ'. -/
theorem succ_ne_bot (a : α) : succ a ≠ ⊥ :=
(bot_lt_succ a).ne'
#align order.succ_ne_bot Order.succ_ne_bot
end OrderBot
end PartialOrder
/-- There is at most one way to define the successors in a `partial_order`. -/
instance [PartialOrder α] : Subsingleton (SuccOrder α) :=
⟨by
intro h₀ h₁
ext a
by_cases ha : IsMax a
· exact (@IsMax.succ_eq _ _ h₀ _ ha).trans ha.succ_eq.symm
· exact @Order.Covby.succ_eq _ _ h₀ _ _ (covby_succ_of_not_is_max ha)⟩
section CompleteLattice
variable [CompleteLattice α] [SuccOrder α]
/- warning: order.succ_eq_infi -> Order.succ_eq_infᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CompleteLattice.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))] (a : α), Eq.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1))) _inst_2 a) (infᵢ.{u1, succ u1} α (CompleteSemilatticeInf.toHasInf.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)) α (fun (b : α) => infᵢ.{u1, 0} α (CompleteSemilatticeInf.toHasInf.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)) (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) a b) (fun (h : LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) a b) => b)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CompleteLattice.{u1} α] [_inst_2 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))] (a : α), Eq.{succ u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1))) _inst_2 a) (infᵢ.{u1, succ u1} α (CompleteLattice.toInfSet.{u1} α _inst_1) α (fun (b : α) => infᵢ.{u1, 0} α (CompleteLattice.toInfSet.{u1} α _inst_1) (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) a b) (fun (h : LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) a b) => b)))
Case conversion may be inaccurate. Consider using '#align order.succ_eq_infi Order.succ_eq_infᵢₓ'. -/
theorem succ_eq_infᵢ (a : α) : succ a = ⨅ (b) (h : a < b), b :=
by
refine' le_antisymm (le_infᵢ fun b => le_infᵢ succ_le_of_lt) _
obtain rfl | ha := eq_or_ne a ⊤
· rw [succ_top]
exact le_top
exact infᵢ₂_le _ (lt_succ_iff_ne_top.2 ha)
#align order.succ_eq_infi Order.succ_eq_infᵢ
end CompleteLattice
/-! ### Predecessor order -/
section Preorder
variable [Preorder α] [PredOrder α] {a b : α}
#print Order.pred /-
/-- The predecessor of an element. If `a` is not minimal, then `pred a` is the greatest element less
than `a`. If `a` is minimal, then `pred a = a`. -/
def pred : α → α :=
PredOrder.pred
#align order.pred Order.pred
-/
#print Order.pred_le /-
theorem pred_le : ∀ a : α, pred a ≤ a :=
PredOrder.pred_le
#align order.pred_le Order.pred_le
-/
#print Order.min_of_le_pred /-
theorem min_of_le_pred {a : α} : a ≤ pred a → IsMin a :=
PredOrder.min_of_le_pred
#align order.min_of_le_pred Order.min_of_le_pred
-/
#print Order.le_pred_of_lt /-
theorem le_pred_of_lt {a b : α} : a < b → a ≤ pred b :=
PredOrder.le_pred_of_lt
#align order.le_pred_of_lt Order.le_pred_of_lt
-/
#print Order.le_of_pred_lt /-
theorem le_of_pred_lt {a b : α} : pred a < b → a ≤ b :=
PredOrder.le_of_pred_lt
#align order.le_of_pred_lt Order.le_of_pred_lt
-/
#print Order.le_pred_iff_isMin /-
@[simp]
theorem le_pred_iff_isMin : a ≤ pred a ↔ IsMin a :=
⟨min_of_le_pred, fun h => h <| pred_le _⟩
#align order.le_pred_iff_is_min Order.le_pred_iff_isMin
-/
#print Order.pred_lt_iff_not_isMin /-
@[simp]
theorem pred_lt_iff_not_isMin : pred a < a ↔ ¬IsMin a :=
⟨not_isMin_of_lt, fun ha => (pred_le a).lt_of_not_le fun h => ha <| min_of_le_pred h⟩
#align order.pred_lt_iff_not_is_min Order.pred_lt_iff_not_isMin
-/
alias pred_lt_iff_not_is_min ↔ _ pred_lt_of_not_is_min
#align order.pred_lt_of_not_is_min Order.pred_lt_of_not_isMin
#print Order.pred_wcovby /-
theorem pred_wcovby (a : α) : pred a ⩿ a :=
⟨pred_le a, fun b hb => (le_of_pred_lt hb).not_lt⟩
#align order.pred_wcovby Order.pred_wcovby
-/
#print Order.pred_covby_of_not_isMin /-
theorem pred_covby_of_not_isMin (h : ¬IsMin a) : pred a ⋖ a :=
(pred_wcovby a).covby_of_lt <| pred_lt_of_not_isMin h
#align order.pred_covby_of_not_is_min Order.pred_covby_of_not_isMin
-/
#print Order.pred_lt_iff_of_not_isMin /-
theorem pred_lt_iff_of_not_isMin (ha : ¬IsMin a) : pred a < b ↔ a ≤ b :=
⟨le_of_pred_lt, (pred_lt_of_not_isMin ha).trans_le⟩
#align order.pred_lt_iff_of_not_is_min Order.pred_lt_iff_of_not_isMin
-/
#print Order.le_pred_iff_of_not_isMin /-
theorem le_pred_iff_of_not_isMin (ha : ¬IsMin a) : b ≤ pred a ↔ b < a :=
⟨fun h => h.trans_lt <| pred_lt_of_not_isMin ha, le_pred_of_lt⟩
#align order.le_pred_iff_of_not_is_min Order.le_pred_iff_of_not_isMin
-/
#print Order.pred_le_pred /-
@[simp, mono]
theorem pred_le_pred {a b : α} (h : a ≤ b) : pred a ≤ pred b :=
succ_le_succ h.dual
#align order.pred_le_pred Order.pred_le_pred
-/
#print Order.pred_mono /-
theorem pred_mono : Monotone (pred : α → α) := fun a b => pred_le_pred
#align order.pred_mono Order.pred_mono
-/
#print Order.pred_iterate_le /-
theorem pred_iterate_le (k : ℕ) (x : α) : (pred^[k]) x ≤ x :=
by
conv_rhs => rw [(by simp only [Function.iterate_id, id.def] : x = (id^[k]) x)]
exact Monotone.iterate_le_of_le pred_mono pred_le k x
#align order.pred_iterate_le Order.pred_iterate_le
-/
#print Order.isMin_iterate_pred_of_eq_of_lt /-
theorem isMin_iterate_pred_of_eq_of_lt {n m : ℕ} (h_eq : (pred^[n]) a = (pred^[m]) a)
(h_lt : n < m) : IsMin ((pred^[n]) a) :=
@isMax_iterate_succ_of_eq_of_lt αᵒᵈ _ _ _ _ _ h_eq h_lt
#align order.is_min_iterate_pred_of_eq_of_lt Order.isMin_iterate_pred_of_eq_of_lt
-/
#print Order.isMin_iterate_pred_of_eq_of_ne /-
theorem isMin_iterate_pred_of_eq_of_ne {n m : ℕ} (h_eq : (pred^[n]) a = (pred^[m]) a)
(h_ne : n ≠ m) : IsMin ((pred^[n]) a) :=
@isMax_iterate_succ_of_eq_of_ne αᵒᵈ _ _ _ _ _ h_eq h_ne
#align order.is_min_iterate_pred_of_eq_of_ne Order.isMin_iterate_pred_of_eq_of_ne
-/
#print Order.Ioi_pred_of_not_isMin /-
theorem Ioi_pred_of_not_isMin (ha : ¬IsMin a) : Ioi (pred a) = Ici a :=
Set.ext fun x => pred_lt_iff_of_not_isMin ha
#align order.Ioi_pred_of_not_is_min Order.Ioi_pred_of_not_isMin
-/
#print Order.Iic_pred_of_not_isMin /-
theorem Iic_pred_of_not_isMin (ha : ¬IsMin a) : Iic (pred a) = Iio a :=
Set.ext fun x => le_pred_iff_of_not_isMin ha
#align order.Iic_pred_of_not_is_min Order.Iic_pred_of_not_isMin
-/
#print Order.Ioc_pred_left_of_not_isMin /-
theorem Ioc_pred_left_of_not_isMin (ha : ¬IsMin a) : Ioc (pred a) b = Icc a b := by
rw [← Ioi_inter_Iic, Ioi_pred_of_not_is_min ha, Ici_inter_Iic]
#align order.Ioc_pred_left_of_not_is_min Order.Ioc_pred_left_of_not_isMin
-/
#print Order.Ioo_pred_left_of_not_isMin /-
theorem Ioo_pred_left_of_not_isMin (ha : ¬IsMin a) : Ioo (pred a) b = Ico a b := by
rw [← Ioi_inter_Iio, Ioi_pred_of_not_is_min ha, Ici_inter_Iio]
#align order.Ioo_pred_left_of_not_is_min Order.Ioo_pred_left_of_not_isMin
-/
#print Order.Icc_pred_right_of_not_isMin /-
theorem Icc_pred_right_of_not_isMin (ha : ¬IsMin b) : Icc a (pred b) = Ico a b := by
rw [← Ici_inter_Iic, Iic_pred_of_not_is_min ha, Ici_inter_Iio]
#align order.Icc_pred_right_of_not_is_min Order.Icc_pred_right_of_not_isMin
-/
#print Order.Ioc_pred_right_of_not_isMin /-
theorem Ioc_pred_right_of_not_isMin (ha : ¬IsMin b) : Ioc a (pred b) = Ioo a b := by
rw [← Ioi_inter_Iic, Iic_pred_of_not_is_min ha, Ioi_inter_Iio]
#align order.Ioc_pred_right_of_not_is_min Order.Ioc_pred_right_of_not_isMin
-/
section NoMinOrder
variable [NoMinOrder α]
#print Order.pred_lt /-
theorem pred_lt (a : α) : pred a < a :=
pred_lt_of_not_isMin <| not_isMin a
#align order.pred_lt Order.pred_lt
-/
#print Order.pred_lt_iff /-
@[simp]
theorem pred_lt_iff : pred a < b ↔ a ≤ b :=
pred_lt_iff_of_not_isMin <| not_isMin a
#align order.pred_lt_iff Order.pred_lt_iff
-/
#print Order.le_pred_iff /-
@[simp]
theorem le_pred_iff : a ≤ pred b ↔ a < b :=
le_pred_iff_of_not_isMin <| not_isMin b
#align order.le_pred_iff Order.le_pred_iff
-/
#print Order.pred_le_pred_iff /-
theorem pred_le_pred_iff : pred a ≤ pred b ↔ a ≤ b := by simp
#align order.pred_le_pred_iff Order.pred_le_pred_iff
-/
#print Order.pred_lt_pred_iff /-
theorem pred_lt_pred_iff : pred a < pred b ↔ a < b := by simp
#align order.pred_lt_pred_iff Order.pred_lt_pred_iff
-/
alias pred_le_pred_iff ↔ le_of_pred_le_pred _
#align order.le_of_pred_le_pred Order.le_of_pred_le_pred
alias pred_lt_pred_iff ↔ lt_of_pred_lt_pred pred_lt_pred
#align order.lt_of_pred_lt_pred Order.lt_of_pred_lt_pred
#align order.pred_lt_pred Order.pred_lt_pred
#print Order.pred_strictMono /-
theorem pred_strictMono : StrictMono (pred : α → α) := fun a b => pred_lt_pred
#align order.pred_strict_mono Order.pred_strictMono
-/
#print Order.pred_covby /-
theorem pred_covby (a : α) : pred a ⋖ a :=
pred_covby_of_not_isMin <| not_isMin a
#align order.pred_covby Order.pred_covby
-/
#print Order.Ioi_pred /-
@[simp]
theorem Ioi_pred (a : α) : Ioi (pred a) = Ici a :=
Ioi_pred_of_not_isMin <| not_isMin a
#align order.Ioi_pred Order.Ioi_pred
-/
#print Order.Iic_pred /-
@[simp]
theorem Iic_pred (a : α) : Iic (pred a) = Iio a :=
Iic_pred_of_not_isMin <| not_isMin a
#align order.Iic_pred Order.Iic_pred
-/
#print Order.Ioc_pred_left /-
@[simp]
theorem Ioc_pred_left (a b : α) : Ioc (pred a) b = Icc a b :=
Ioc_pred_left_of_not_isMin <| not_isMin _
#align order.Ioc_pred_left Order.Ioc_pred_left
-/
#print Order.Ioo_pred_left /-
@[simp]
theorem Ioo_pred_left (a b : α) : Ioo (pred a) b = Ico a b :=
Ioo_pred_left_of_not_isMin <| not_isMin _
#align order.Ioo_pred_left Order.Ioo_pred_left
-/
#print Order.Icc_pred_right /-
@[simp]
theorem Icc_pred_right (a b : α) : Icc a (pred b) = Ico a b :=
Icc_pred_right_of_not_isMin <| not_isMin _
#align order.Icc_pred_right Order.Icc_pred_right
-/
#print Order.Ioc_pred_right /-
@[simp]
theorem Ioc_pred_right (a b : α) : Ioc a (pred b) = Ioo a b :=
Ioc_pred_right_of_not_isMin <| not_isMin _
#align order.Ioc_pred_right Order.Ioc_pred_right
-/
end NoMinOrder
end Preorder
section PartialOrder
variable [PartialOrder α] [PredOrder α] {a b : α}
#print Order.pred_eq_iff_isMin /-
@[simp]
theorem pred_eq_iff_isMin : pred a = a ↔ IsMin a :=
⟨fun h => min_of_le_pred h.ge, fun h => h.eq_of_le <| pred_le _⟩
#align order.pred_eq_iff_is_min Order.pred_eq_iff_isMin
-/
alias pred_eq_iff_is_min ↔ _ _root_.is_min.pred_eq
#align is_min.pred_eq IsMin.pred_eq
#print Order.pred_le_le_iff /-
theorem pred_le_le_iff {a b : α} : pred a ≤ b ∧ b ≤ a ↔ b = a ∨ b = pred a :=
by
refine'
⟨fun h =>
or_iff_not_imp_left.2 fun hba : b ≠ a => (le_pred_of_lt <| h.2.lt_of_ne hba).antisymm h.1, _⟩
rintro (rfl | rfl)
· exact ⟨pred_le b, le_rfl⟩
· exact ⟨le_rfl, pred_le a⟩
#align order.pred_le_le_iff Order.pred_le_le_iff
-/
#print Order.Covby.pred_eq /-
theorem Order.Covby.pred_eq {a b : α} (h : a ⋖ b) : pred b = a :=
(le_pred_of_lt h.lt).eq_of_not_gt fun h' => h.2 h' <| pred_lt_of_not_isMin h.lt.not_isMin
#align covby.pred_eq Order.Covby.pred_eq
-/
#print Order.Wcovby.pred_le /-
theorem Order.Wcovby.pred_le (h : a ⩿ b) : pred b ≤ a :=
by
obtain h | rfl := h.covby_or_eq
· exact h.pred_eq.le
· exact pred_le _
#align wcovby.pred_le Order.Wcovby.pred_le
-/
#print Order.pred_le_iff_eq_or_le /-
theorem pred_le_iff_eq_or_le : pred a ≤ b ↔ b = pred a ∨ a ≤ b :=
by
by_cases ha : IsMin a
· rw [ha.pred_eq, or_iff_right_of_imp ge_of_eq]
· rw [← pred_lt_iff_of_not_is_min ha, le_iff_eq_or_lt, eq_comm]
#align order.pred_le_iff_eq_or_le Order.pred_le_iff_eq_or_le
-/
#print Order.pred_lt_iff_eq_or_lt_of_not_isMin /-
theorem pred_lt_iff_eq_or_lt_of_not_isMin (ha : ¬IsMin a) : pred a < b ↔ a = b ∨ a < b :=
(pred_lt_iff_of_not_isMin ha).trans le_iff_eq_or_lt
#align order.pred_lt_iff_eq_or_lt_of_not_is_min Order.pred_lt_iff_eq_or_lt_of_not_isMin
-/
#print Order.Ici_pred /-
theorem Ici_pred (a : α) : Ici (pred a) = insert (pred a) (Ici a) :=
ext fun _ => pred_le_iff_eq_or_le
#align order.Ici_pred Order.Ici_pred
-/
#print Order.Ioi_pred_eq_insert_of_not_isMin /-
theorem Ioi_pred_eq_insert_of_not_isMin (ha : ¬IsMin a) : Ioi (pred a) = insert a (Ioi a) :=
by
ext x; simp only [insert, mem_set_of, @eq_comm _ x a]
exact pred_lt_iff_eq_or_lt_of_not_is_min ha
#align order.Ioi_pred_eq_insert_of_not_is_min Order.Ioi_pred_eq_insert_of_not_isMin
-/
#print Order.Icc_pred_left /-
theorem Icc_pred_left (h : pred a ≤ b) : Icc (pred a) b = insert (pred a) (Icc a b) := by
simp_rw [← Ici_inter_Iic, Ici_pred, insert_inter_of_mem (mem_Iic.2 h)]
#align order.Icc_pred_left Order.Icc_pred_left
-/
#print Order.Ico_pred_left /-
theorem Ico_pred_left (h : pred a < b) : Ico (pred a) b = insert (pred a) (Ico a b) := by
simp_rw [← Ici_inter_Iio, Ici_pred, insert_inter_of_mem (mem_Iio.2 h)]
#align order.Ico_pred_left Order.Ico_pred_left
-/
section NoMinOrder
variable [NoMinOrder α]
#print Order.pred_eq_pred_iff /-
@[simp]
theorem pred_eq_pred_iff : pred a = pred b ↔ a = b := by
simp_rw [eq_iff_le_not_lt, pred_le_pred_iff, pred_lt_pred_iff]
#align order.pred_eq_pred_iff Order.pred_eq_pred_iff
-/
#print Order.pred_injective /-
theorem pred_injective : Injective (pred : α → α) := fun a b => pred_eq_pred_iff.1
#align order.pred_injective Order.pred_injective
-/
#print Order.pred_ne_pred_iff /-
theorem pred_ne_pred_iff : pred a ≠ pred b ↔ a ≠ b :=
pred_injective.ne_iff
#align order.pred_ne_pred_iff Order.pred_ne_pred_iff
-/
alias pred_ne_pred_iff ↔ _ pred_ne_pred
#align order.pred_ne_pred Order.pred_ne_pred
#print Order.pred_lt_iff_eq_or_lt /-
theorem pred_lt_iff_eq_or_lt : pred a < b ↔ a = b ∨ a < b :=
pred_lt_iff.trans le_iff_eq_or_lt
#align order.pred_lt_iff_eq_or_lt Order.pred_lt_iff_eq_or_lt
-/
#print Order.pred_eq_iff_covby /-
theorem pred_eq_iff_covby : pred b = a ↔ a ⋖ b :=
⟨by
rintro rfl
exact pred_covby _, Order.Covby.pred_eq⟩
#align order.pred_eq_iff_covby Order.pred_eq_iff_covby
-/
#print Order.Ioi_pred_eq_insert /-
theorem Ioi_pred_eq_insert (a : α) : Ioi (pred a) = insert a (Ioi a) :=
ext fun _ => pred_lt_iff_eq_or_lt.trans <| or_congr_left eq_comm
#align order.Ioi_pred_eq_insert Order.Ioi_pred_eq_insert
-/
#print Order.Ico_pred_right_eq_insert /-
theorem Ico_pred_right_eq_insert (h : a ≤ b) : Ioc (pred a) b = insert a (Ioc a b) := by
simp_rw [← Ioi_inter_Iic, Ioi_pred_eq_insert, insert_inter_of_mem (mem_Iic.2 h)]
#align order.Ico_pred_right_eq_insert Order.Ico_pred_right_eq_insert
-/
#print Order.Ioo_pred_right_eq_insert /-
theorem Ioo_pred_right_eq_insert (h : a < b) : Ioo (pred a) b = insert a (Ioo a b) := by
simp_rw [← Ioi_inter_Iio, Ioi_pred_eq_insert, insert_inter_of_mem (mem_Iio.2 h)]
#align order.Ioo_pred_right_eq_insert Order.Ioo_pred_right_eq_insert
-/
end NoMinOrder
section OrderBot
variable [OrderBot α]
/- warning: order.pred_bot -> Order.pred_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Eq.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Eq.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
Case conversion may be inaccurate. Consider using '#align order.pred_bot Order.pred_botₓ'. -/
@[simp]
theorem pred_bot : pred (⊥ : α) = ⊥ :=
isMin_bot.pred_eq
#align order.pred_bot Order.pred_bot
/- warning: order.le_pred_iff_eq_bot -> Order.le_pred_iff_eq_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)) (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) a (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a)) (Eq.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.le_pred_iff_eq_bot Order.le_pred_iff_eq_botₓ'. -/
@[simp]
theorem le_pred_iff_eq_bot : a ≤ pred a ↔ a = ⊥ :=
@succ_le_iff_eq_top αᵒᵈ _ _ _ _
#align order.le_pred_iff_eq_bot Order.le_pred_iff_eq_bot
/- warning: order.pred_lt_iff_ne_bot -> Order.pred_lt_iff_ne_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) a) (Ne.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) a) (Ne.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.pred_lt_iff_ne_bot Order.pred_lt_iff_ne_botₓ'. -/
@[simp]
theorem pred_lt_iff_ne_bot : pred a < a ↔ a ≠ ⊥ :=
@lt_succ_iff_ne_top αᵒᵈ _ _ _ _
#align order.pred_lt_iff_ne_bot Order.pred_lt_iff_ne_bot
end OrderBot
section OrderTop
variable [OrderTop α]
/- warning: order.pred_top_lt_iff -> Order.pred_top_lt_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : NoMinOrder.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) a) (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : NoMinOrder.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) a) (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))
Case conversion may be inaccurate. Consider using '#align order.pred_top_lt_iff Order.pred_top_lt_iffₓ'. -/
@[simp]
theorem pred_top_lt_iff [NoMinOrder α] : pred ⊤ < a ↔ a = ⊤ :=
@lt_succ_bot_iff αᵒᵈ _ _ _ _ _
#align order.pred_top_lt_iff Order.pred_top_lt_iff
/- warning: order.pred_top_le_iff -> Order.pred_top_le_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) a) (Or (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Eq.{succ u1} α a (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] {a : α} [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))], Iff (LE.le.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) a) (Or (Eq.{succ u1} α a (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))) (Eq.{succ u1} α a (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3)))))
Case conversion may be inaccurate. Consider using '#align order.pred_top_le_iff Order.pred_top_le_iffₓ'. -/
theorem pred_top_le_iff : pred ⊤ ≤ a ↔ a = ⊤ ∨ a = pred ⊤ :=
@le_succ_bot_iff αᵒᵈ _ _ _ _
#align order.pred_top_le_iff Order.pred_top_le_iff
variable [Nontrivial α]
/- warning: order.pred_lt_top -> Order.pred_lt_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
Case conversion may be inaccurate. Consider using '#align order.pred_lt_top Order.pred_lt_topₓ'. -/
theorem pred_lt_top (a : α) : pred a < ⊤ :=
(pred_mono le_top).trans_lt <| pred_lt_of_not_isMin not_isMin_top
#align order.pred_lt_top Order.pred_lt_top
/- warning: order.pred_ne_top -> Order.pred_ne_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), Ne.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : PartialOrder.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1))] [_inst_4 : Nontrivial.{u1} α] (a : α), Ne.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1) _inst_2 a) (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_1)) _inst_3))
Case conversion may be inaccurate. Consider using '#align order.pred_ne_top Order.pred_ne_topₓ'. -/
theorem pred_ne_top (a : α) : pred a ≠ ⊤ :=
(pred_lt_top a).Ne
#align order.pred_ne_top Order.pred_ne_top
end OrderTop
end PartialOrder
/-- There is at most one way to define the predecessors in a `partial_order`. -/
instance [PartialOrder α] : Subsingleton (PredOrder α) :=
⟨by
intro h₀ h₁
ext a
by_cases ha : IsMin a
· exact (@IsMin.pred_eq _ _ h₀ _ ha).trans ha.pred_eq.symm
· exact @Order.Covby.pred_eq _ _ h₀ _ _ (pred_covby_of_not_is_min ha)⟩
section CompleteLattice
variable [CompleteLattice α] [PredOrder α]
/- warning: order.pred_eq_supr -> Order.pred_eq_supᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CompleteLattice.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))] (a : α), Eq.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1))) _inst_2 a) (supᵢ.{u1, succ u1} α (CompleteSemilatticeSup.toHasSup.{u1} α (CompleteLattice.toCompleteSemilatticeSup.{u1} α _inst_1)) α (fun (b : α) => supᵢ.{u1, 0} α (CompleteSemilatticeSup.toHasSup.{u1} α (CompleteLattice.toCompleteSemilatticeSup.{u1} α _inst_1)) (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) b a) (fun (h : LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) b a) => b)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CompleteLattice.{u1} α] [_inst_2 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))] (a : α), Eq.{succ u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1))) _inst_2 a) (supᵢ.{u1, succ u1} α (CompleteLattice.toSupSet.{u1} α _inst_1) α (fun (b : α) => supᵢ.{u1, 0} α (CompleteLattice.toSupSet.{u1} α _inst_1) (LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) b a) (fun (h : LT.lt.{u1} α (Preorder.toLT.{u1} α (PartialOrder.toPreorder.{u1} α (CompleteSemilatticeInf.toPartialOrder.{u1} α (CompleteLattice.toCompleteSemilatticeInf.{u1} α _inst_1)))) b a) => b)))
Case conversion may be inaccurate. Consider using '#align order.pred_eq_supr Order.pred_eq_supᵢₓ'. -/
theorem pred_eq_supᵢ (a : α) : pred a = ⨆ (b) (h : b < a), b :=
by
refine' le_antisymm _ (supᵢ_le fun b => supᵢ_le le_pred_of_lt)
obtain rfl | ha := eq_or_ne a ⊥
· rw [pred_bot]
exact bot_le
· exact @le_supᵢ₂ _ _ (fun b => b < a) _ (fun a _ => a) (pred a) (pred_lt_iff_ne_bot.2 ha)
#align order.pred_eq_supr Order.pred_eq_supᵢ
end CompleteLattice
/-! ### Successor-predecessor orders -/
section SuccPredOrder
variable [PartialOrder α] [SuccOrder α] [PredOrder α] {a b : α}
#print Order.succ_pred_of_not_isMin /-
@[simp]
theorem succ_pred_of_not_isMin (h : ¬IsMin a) : succ (pred a) = a :=
(pred_covby_of_not_isMin h).succ_eq
#align order.succ_pred_of_not_is_min Order.succ_pred_of_not_isMin
-/
#print Order.pred_succ_of_not_isMax /-
@[simp]
theorem pred_succ_of_not_isMax (h : ¬IsMax a) : pred (succ a) = a :=
(covby_succ_of_not_isMax h).pred_eq
#align order.pred_succ_of_not_is_max Order.pred_succ_of_not_isMax
-/
#print Order.succ_pred /-
@[simp]
theorem succ_pred [NoMinOrder α] (a : α) : succ (pred a) = a :=
(pred_covby _).succ_eq
#align order.succ_pred Order.succ_pred
-/
#print Order.pred_succ /-
@[simp]
theorem pred_succ [NoMaxOrder α] (a : α) : pred (succ a) = a :=
(covby_succ _).pred_eq
#align order.pred_succ Order.pred_succ
-/
#print Order.pred_succ_iterate_of_not_isMax /-
theorem pred_succ_iterate_of_not_isMax (i : α) (n : ℕ) (hin : ¬IsMax ((succ^[n - 1]) i)) :
(pred^[n]) ((succ^[n]) i) = i := by
induction' n with n hn
· simp only [Function.iterate_zero, id.def]
rw [Nat.succ_sub_succ_eq_sub, Nat.sub_zero] at hin
have h_not_max : ¬IsMax ((succ^[n - 1]) i) :=
by
cases n
· simpa using hin
rw [Nat.succ_sub_succ_eq_sub, Nat.sub_zero] at hn⊢
have h_sub_le : (succ^[n]) i ≤ (succ^[n.succ]) i :=
by
rw [Function.iterate_succ']
exact le_succ _
refine' fun h_max => hin fun j hj => _
have hj_le : j ≤ (succ^[n]) i := h_max (h_sub_le.trans hj)
exact hj_le.trans h_sub_le
rw [Function.iterate_succ, Function.iterate_succ']
simp only [Function.comp_apply]
rw [pred_succ_of_not_is_max hin]
exact hn h_not_max
#align order.pred_succ_iterate_of_not_is_max Order.pred_succ_iterate_of_not_isMax
-/
#print Order.succ_pred_iterate_of_not_isMin /-
theorem succ_pred_iterate_of_not_isMin (i : α) (n : ℕ) (hin : ¬IsMin ((pred^[n - 1]) i)) :
(succ^[n]) ((pred^[n]) i) = i :=
@pred_succ_iterate_of_not_isMax αᵒᵈ _ _ _ i n hin
#align order.succ_pred_iterate_of_not_is_min Order.succ_pred_iterate_of_not_isMin
-/
end SuccPredOrder
end Order
open Order
/-! ### `with_bot`, `with_top`
Adding a greatest/least element to a `succ_order` or to a `pred_order`.
As far as successors and predecessors are concerned, there are four ways to add a bottom or top
element to an order:
* Adding a `⊤` to an `order_top`: Preserves `succ` and `pred`.
* Adding a `⊤` to a `no_max_order`: Preserves `succ`. Never preserves `pred`.
* Adding a `⊥` to an `order_bot`: Preserves `succ` and `pred`.
* Adding a `⊥` to a `no_min_order`: Preserves `pred`. Never preserves `succ`.
where "preserves `(succ/pred)`" means
`(succ/pred)_order α → (succ/pred)_order ((with_top/with_bot) α)`.
-/
namespace WithTop
/-! #### Adding a `⊤` to an `order_top` -/
section Succ
variable [DecidableEq α] [PartialOrder α] [OrderTop α] [SuccOrder α]
instance : SuccOrder (WithTop α)
where
succ a :=
match a with
| ⊤ => ⊤
| some a => ite (a = ⊤) ⊤ (some (succ a))
le_succ a := by
cases a
· exact le_top
change _ ≤ ite _ _ _
split_ifs
· exact le_top
· exact some_le_some.2 (le_succ a)
max_of_succ_le a ha := by
cases a
· exact isMax_top
change ite _ _ _ ≤ _ at ha
split_ifs at ha with ha'
· exact (not_top_le_coe _ ha).elim
· rw [some_le_some, succ_le_iff_eq_top] at ha
exact (ha' ha).elim
succ_le_of_lt a b h := by
cases b
· exact le_top
cases a
· exact (not_top_lt h).elim
rw [some_lt_some] at h
change ite _ _ _ ≤ _
split_ifs with ha
· rw [ha] at h
exact (not_top_lt h).elim
· exact some_le_some.2 (succ_le_of_lt h)
le_of_lt_succ a b h := by
cases a
· exact (not_top_lt h).elim
cases b
· exact le_top
change _ < ite _ _ _ at h
rw [some_le_some]
split_ifs at h with hb
· rw [hb]
exact le_top
· exact le_of_lt_succ (some_lt_some.1 h)
/- warning: with_top.succ_coe_top -> WithTop.succ_coe_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)], Eq.{succ u1} (WithTop.{u1} α) (Order.succ.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithTop.succOrder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3)))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)], Eq.{succ u1} (WithTop.{u1} α) (Order.succ.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithTop.instSuccOrderWithTopPreorderToPreorder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) (WithTop.some.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3)))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))
Case conversion may be inaccurate. Consider using '#align with_top.succ_coe_top WithTop.succ_coe_topₓ'. -/
@[simp]
theorem succ_coe_top : succ ↑(⊤ : α) = (⊤ : WithTop α) :=
dif_pos rfl
#align with_top.succ_coe_top WithTop.succ_coe_top
/- warning: with_top.succ_coe_of_ne_top -> WithTop.succ_coe_of_ne_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)] {a : α}, (Ne.{succ u1} α a (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3))) -> (Eq.{succ u1} (WithTop.{u1} α) (Order.succ.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithTop.succOrder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2) _inst_4 a)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : SuccOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)] {a : α}, (Ne.{succ u1} α a (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3))) -> (Eq.{succ u1} (WithTop.{u1} α) (Order.succ.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithTop.instSuccOrderWithTopPreorderToPreorder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) (WithTop.some.{u1} α a)) (WithTop.some.{u1} α (Order.succ.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2) _inst_4 a)))
Case conversion may be inaccurate. Consider using '#align with_top.succ_coe_of_ne_top WithTop.succ_coe_of_ne_topₓ'. -/
theorem succ_coe_of_ne_top {a : α} (h : a ≠ ⊤) : succ (↑a : WithTop α) = ↑(succ a) :=
dif_neg h
#align with_top.succ_coe_of_ne_top WithTop.succ_coe_of_ne_top
end Succ
section Pred
variable [Preorder α] [OrderTop α] [PredOrder α]
instance : PredOrder (WithTop α)
where
pred a :=
match a with
| ⊤ => some ⊤
| some a => some (pred a)
pred_le a :=
match a with
| ⊤ => le_top
| some a => some_le_some.2 (pred_le a)
min_of_le_pred a ha := by
cases a
· exact ((coe_lt_top (⊤ : α)).not_le ha).elim
· exact (min_of_le_pred <| some_le_some.1 ha).WithTop
le_pred_of_lt a b h := by
cases a
· exact (le_top.not_lt h).elim
cases b
· exact some_le_some.2 le_top
exact some_le_some.2 (le_pred_of_lt <| some_lt_some.1 h)
le_of_pred_lt a b h := by
cases b
· exact le_top
cases a
· exact (not_top_lt <| some_lt_some.1 h).elim
· exact some_le_some.2 (le_of_pred_lt <| some_lt_some.1 h)
/- warning: with_top.pred_top -> WithTop.pred_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1], Eq.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1], Eq.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.instPredOrderWithTopPreorder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) (WithTop.some.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))
Case conversion may be inaccurate. Consider using '#align with_top.pred_top WithTop.pred_topₓ'. -/
@[simp]
theorem pred_top : pred (⊤ : WithTop α) = ↑(⊤ : α) :=
rfl
#align with_top.pred_top WithTop.pred_top
#print WithTop.pred_coe /-
@[simp]
theorem pred_coe (a : α) : pred (↑a : WithTop α) = ↑(pred a) :=
rfl
#align with_top.pred_coe WithTop.pred_coe
-/
/- warning: with_top.pred_untop -> WithTop.pred_untop is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1] (a : WithTop.{u1} α) (ha : Ne.{succ u1} (WithTop.{u1} α) a (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))), Eq.{succ u1} α (Order.pred.{u1} α _inst_1 _inst_3 (WithTop.untop.{u1} α a ha)) (WithTop.untop.{u1} α (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) a) (WithTop.recTopCoe.{u1, 0} α (fun (a : WithTop.{u1} α) => (Ne.{succ u1} (WithTop.{u1} α) a (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) -> (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) (fun (ha : Ne.{succ u1} (WithTop.{u1} α) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) => Eq.mpr.{0} (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) True) (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not False) True (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) (Not False) (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Ne.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) ((fun (a : WithTop.{u1} α) (a_1 : WithTop.{u1} α) (e_1 : Eq.{succ u1} (WithTop.{u1} α) a a_1) (b : WithTop.{u1} α) (b_1 : WithTop.{u1} α) (e_2 : Eq.{succ u1} (WithTop.{u1} α) b b_1) => congr.{succ u1, 1} (WithTop.{u1} α) Prop (Ne.{succ u1} (WithTop.{u1} α) a) (Ne.{succ u1} (WithTop.{u1} α) a_1) b b_1 (congr_arg.{succ u1, succ u1} (WithTop.{u1} α) ((WithTop.{u1} α) -> Prop) a a_1 (Ne.{succ u1} (WithTop.{u1} α)) e_1) e_2) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (WithTop.pred_top.{u1} α _inst_1 _inst_2 _inst_3) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)) (rfl.{succ u1} (WithTop.{u1} α) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) (Ne.def.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) ((fun (a : Prop) (a_1 : Prop) (e_1 : Eq.{1} Prop a a_1) => congr_arg.{1, 1} Prop Prop a a_1 Not e_1) (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) False (propext (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) False ((fun {α : Type.{u1}} {a : α} => iff_false_intro (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (WithTop.coe_ne_top.{u1} α a)) α (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))))) (propext (Not False) True not_false_iff))) trivial) (fun (a : α) (ha : Ne.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) => Eq.mpr.{0} (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) True) (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not False) True (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) (Not False) (Eq.trans.{1} Prop (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Ne.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (Not (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) ((fun (a : WithTop.{u1} α) (a_1 : WithTop.{u1} α) (e_1 : Eq.{succ u1} (WithTop.{u1} α) a a_1) (b : WithTop.{u1} α) (b_1 : WithTop.{u1} α) (e_2 : Eq.{succ u1} (WithTop.{u1} α) b b_1) => congr.{succ u1, 1} (WithTop.{u1} α) Prop (Ne.{succ u1} (WithTop.{u1} α) a) (Ne.{succ u1} (WithTop.{u1} α) a_1) b b_1 (congr_arg.{succ u1, succ u1} (WithTop.{u1} α) ((WithTop.{u1} α) -> Prop) a a_1 (Ne.{succ u1} (WithTop.{u1} α)) e_1) e_2) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.predOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (WithTop.pred_coe.{u1} α _inst_1 _inst_2 _inst_3 a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)) (rfl.{succ u1} (WithTop.{u1} α) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) (Ne.def.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α)))) ((fun (a : Prop) (a_1 : Prop) (e_1 : Eq.{1} Prop a a_1) => congr_arg.{1, 1} Prop Prop a a_1 Not e_1) (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) False (propext (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) False ((fun {α : Type.{u1}} {a : α} => iff_false_intro (Eq.{succ u1} (WithTop.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithTop.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithTop.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithTop.{u1} α) (WithTop.hasCoeT.{u1} α))) a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.hasTop.{u1} α))) (WithTop.coe_ne_top.{u1} α a)) α (Order.pred.{u1} α _inst_1 _inst_3 a))))) (propext (Not False) True not_false_iff))) trivial) a ha))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1] (a : WithTop.{u1} α) (ha : Ne.{succ u1} (WithTop.{u1} α) a (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))), Eq.{succ u1} α (Order.pred.{u1} α _inst_1 _inst_3 (WithTop.untop.{u1} α a ha)) (WithTop.untop.{u1} α (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.instPredOrderWithTopPreorder.{u1} α _inst_1 _inst_2 _inst_3) a) (WithTop.recTopCoe.{u1, 0} α (fun (a : WithTop.{u1} α) => (Ne.{succ u1} (WithTop.{u1} α) a (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) -> (Ne.{succ u1} (WithTop.{u1} α) (Order.pred.{u1} (WithTop.{u1} α) (WithTop.preorder.{u1} α _inst_1) (WithTop.instPredOrderWithTopPreorder.{u1} α _inst_1 _inst_2 _inst_3) a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)))) (fun (ha : Ne.{succ u1} (WithTop.{u1} α) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) => of_eq_true (Not (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)))) (Eq.trans.{1} Prop (Not (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)))) (Not False) True (congrArg.{1, 1} Prop Prop (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) False Not (Mathlib.Order.WithBot._auxLemma.22.{u1} α (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))) Std.Logic._auxLemma.4)) (fun (a : α) (ha : Ne.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α a) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) => of_eq_true (Not (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)))) (Eq.trans.{1} Prop (Not (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α)))) (Not False) True (congrArg.{1, 1} Prop Prop (Eq.{succ u1} (WithTop.{u1} α) (WithTop.some.{u1} α (Order.pred.{u1} α _inst_1 _inst_3 a)) (Top.top.{u1} (WithTop.{u1} α) (WithTop.top.{u1} α))) False Not (Mathlib.Order.WithBot._auxLemma.22.{u1} α (Order.pred.{u1} α _inst_1 _inst_3 a))) Std.Logic._auxLemma.4)) a ha))
Case conversion may be inaccurate. Consider using '#align with_top.pred_untop WithTop.pred_untopₓ'. -/
@[simp]
theorem pred_untop :
∀ (a : WithTop α) (ha : a ≠ ⊤),
pred (a.untop ha) = (pred a).untop (by induction a using WithTop.recTopCoe <;> simp)
| ⊤, ha => (ha rfl).elim
| (a : α), ha => rfl
#align with_top.pred_untop WithTop.pred_untop
end Pred
/-! #### Adding a `⊤` to a `no_max_order` -/
section Succ
variable [Preorder α] [NoMaxOrder α] [SuccOrder α]
#print WithTop.succOrderOfNoMaxOrder /-
instance succOrderOfNoMaxOrder : SuccOrder (WithTop α)
where
succ a :=
match a with
| ⊤ => ⊤
| some a => some (succ a)
le_succ a := by
cases a
· exact le_top
· exact some_le_some.2 (le_succ a)
max_of_succ_le a ha := by
cases a
· exact isMax_top
· exact (not_isMax _ <| max_of_succ_le <| some_le_some.1 ha).elim
succ_le_of_lt a b h := by
cases a
· exact (not_top_lt h).elim
cases b
· exact le_top
· exact some_le_some.2 (succ_le_of_lt <| some_lt_some.1 h)
le_of_lt_succ a b h := by
cases a
· exact (not_top_lt h).elim
cases b
· exact le_top
· exact some_le_some.2 (le_of_lt_succ <| some_lt_some.1 h)
#align with_top.succ_order_of_no_max_order WithTop.succOrderOfNoMaxOrder
-/
#print WithTop.succ_coe /-
@[simp]
theorem succ_coe (a : α) : succ (↑a : WithTop α) = ↑(succ a) :=
rfl
#align with_top.succ_coe WithTop.succ_coe
-/
end Succ
section Pred
variable [Preorder α] [NoMaxOrder α]
instance [hα : Nonempty α] : IsEmpty (PredOrder (WithTop α)) :=
⟨by
intro
cases' h : pred (⊤ : WithTop α) with a ha
· exact hα.elim fun a => (min_of_le_pred h.ge).not_lt <| coe_lt_top a
· obtain ⟨c, hc⟩ := exists_gt a
rw [← some_lt_some, ← h] at hc
exact (le_of_pred_lt hc).not_lt (some_lt_none _)⟩
end Pred
end WithTop
namespace WithBot
/-! #### Adding a `⊥` to an `order_bot` -/
section Succ
variable [Preorder α] [OrderBot α] [SuccOrder α]
instance : SuccOrder (WithBot α)
where
succ a :=
match a with
| ⊥ => some ⊥
| some a => some (succ a)
le_succ a :=
match a with
| ⊥ => bot_le
| some a => some_le_some.2 (le_succ a)
max_of_succ_le a ha := by
cases a
· exact ((none_lt_some (⊥ : α)).not_le ha).elim
· exact (max_of_succ_le <| some_le_some.1 ha).WithBot
succ_le_of_lt a b h := by
cases b
· exact (not_lt_bot h).elim
cases a
· exact some_le_some.2 bot_le
· exact some_le_some.2 (succ_le_of_lt <| some_lt_some.1 h)
le_of_lt_succ a b h := by
cases a
· exact bot_le
cases b
· exact (not_lt_bot <| some_lt_some.1 h).elim
· exact some_le_some.2 (le_of_lt_succ <| some_lt_some.1 h)
/- warning: with_bot.succ_bot -> WithBot.succ_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1], Eq.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1], Eq.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.instSuccOrderWithBotPreorder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) (WithBot.some.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))
Case conversion may be inaccurate. Consider using '#align with_bot.succ_bot WithBot.succ_botₓ'. -/
@[simp]
theorem succ_bot : succ (⊥ : WithBot α) = ↑(⊥ : α) :=
rfl
#align with_bot.succ_bot WithBot.succ_bot
#print WithBot.succ_coe /-
@[simp]
theorem succ_coe (a : α) : succ (↑a : WithBot α) = ↑(succ a) :=
rfl
#align with_bot.succ_coe WithBot.succ_coe
-/
/- warning: with_bot.succ_unbot -> WithBot.succ_unbot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1] (a : WithBot.{u1} α) (ha : Ne.{succ u1} (WithBot.{u1} α) a (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))), Eq.{succ u1} α (Order.succ.{u1} α _inst_1 _inst_3 (WithBot.unbot.{u1} α a ha)) (WithBot.unbot.{u1} α (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) a) (WithBot.recBotCoe.{u1, 0} α (fun (a : WithBot.{u1} α) => (Ne.{succ u1} (WithBot.{u1} α) a (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) -> (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) (fun (ha : Ne.{succ u1} (WithBot.{u1} α) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) => Eq.mpr.{0} (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) True) (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not False) True (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) (Not False) (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Ne.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) ((fun (a : WithBot.{u1} α) (a_1 : WithBot.{u1} α) (e_1 : Eq.{succ u1} (WithBot.{u1} α) a a_1) (b : WithBot.{u1} α) (b_1 : WithBot.{u1} α) (e_2 : Eq.{succ u1} (WithBot.{u1} α) b b_1) => congr.{succ u1, 1} (WithBot.{u1} α) Prop (Ne.{succ u1} (WithBot.{u1} α) a) (Ne.{succ u1} (WithBot.{u1} α) a_1) b b_1 (congr_arg.{succ u1, succ u1} (WithBot.{u1} α) ((WithBot.{u1} α) -> Prop) a a_1 (Ne.{succ u1} (WithBot.{u1} α)) e_1) e_2) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (WithBot.succ_bot.{u1} α _inst_1 _inst_2 _inst_3) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)) (rfl.{succ u1} (WithBot.{u1} α) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) (Ne.def.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) ((fun (a : Prop) (a_1 : Prop) (e_1 : Eq.{1} Prop a a_1) => congr_arg.{1, 1} Prop Prop a a_1 Not e_1) (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) False (propext (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) False ((fun {α : Type.{u1}} {a : α} => iff_false_intro (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (WithBot.coe_ne_bot.{u1} α a)) α (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))))) (propext (Not False) True not_false_iff))) trivial) (fun (a : α) (ha : Ne.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) => Eq.mpr.{0} (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) True (id_tag Tactic.IdTag.simp (Eq.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) True) (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not False) True (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) (Not False) (Eq.trans.{1} Prop (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Ne.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (Not (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) ((fun (a : WithBot.{u1} α) (a_1 : WithBot.{u1} α) (e_1 : Eq.{succ u1} (WithBot.{u1} α) a a_1) (b : WithBot.{u1} α) (b_1 : WithBot.{u1} α) (e_2 : Eq.{succ u1} (WithBot.{u1} α) b b_1) => congr.{succ u1, 1} (WithBot.{u1} α) Prop (Ne.{succ u1} (WithBot.{u1} α) a) (Ne.{succ u1} (WithBot.{u1} α) a_1) b b_1 (congr_arg.{succ u1, succ u1} (WithBot.{u1} α) ((WithBot.{u1} α) -> Prop) a a_1 (Ne.{succ u1} (WithBot.{u1} α)) e_1) e_2) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.succOrder.{u1} α _inst_1 _inst_2 _inst_3) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (WithBot.succ_coe.{u1} α _inst_1 _inst_2 _inst_3 a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)) (rfl.{succ u1} (WithBot.{u1} α) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) (Ne.def.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α)))) ((fun (a : Prop) (a_1 : Prop) (e_1 : Eq.{1} Prop a a_1) => congr_arg.{1, 1} Prop Prop a a_1 Not e_1) (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) False (propext (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) False ((fun {α : Type.{u1}} {a : α} => iff_false_intro (Eq.{succ u1} (WithBot.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))) (WithBot.coe_ne_bot.{u1} α a)) α (Order.succ.{u1} α _inst_1 _inst_3 a))))) (propext (Not False) True not_false_iff))) trivial) a ha))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1] (a : WithBot.{u1} α) (ha : Ne.{succ u1} (WithBot.{u1} α) a (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))), Eq.{succ u1} α (Order.succ.{u1} α _inst_1 _inst_3 (WithBot.unbot.{u1} α a ha)) (WithBot.unbot.{u1} α (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.instSuccOrderWithBotPreorder.{u1} α _inst_1 _inst_2 _inst_3) a) (WithBot.recBotCoe.{u1, 0} α (fun (a : WithBot.{u1} α) => (Ne.{succ u1} (WithBot.{u1} α) a (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) -> (Ne.{succ u1} (WithBot.{u1} α) (Order.succ.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α _inst_1) (WithBot.instSuccOrderWithBotPreorder.{u1} α _inst_1 _inst_2 _inst_3) a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)))) (fun (ha : Ne.{succ u1} (WithBot.{u1} α) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) => of_eq_true (Not (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)))) (Eq.trans.{1} Prop (Not (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)))) (Not False) True (congrArg.{1, 1} Prop Prop (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) False Not (Mathlib.Order.WithBot._auxLemma.3.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2)))) Std.Logic._auxLemma.4)) (fun (a : α) (ha : Ne.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α a) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) => of_eq_true (Not (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)))) (Eq.trans.{1} Prop (Not (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α)))) (Not False) True (congrArg.{1, 1} Prop Prop (Eq.{succ u1} (WithBot.{u1} α) (WithBot.some.{u1} α (Order.succ.{u1} α _inst_1 _inst_3 a)) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))) False Not (Mathlib.Order.WithBot._auxLemma.3.{u1} α (Order.succ.{u1} α _inst_1 _inst_3 a))) Std.Logic._auxLemma.4)) a ha))
Case conversion may be inaccurate. Consider using '#align with_bot.succ_unbot WithBot.succ_unbotₓ'. -/
@[simp]
theorem succ_unbot :
∀ (a : WithBot α) (ha : a ≠ ⊥),
succ (a.unbot ha) = (succ a).unbot (by induction a using WithBot.recBotCoe <;> simp)
| ⊥, ha => (ha rfl).elim
| (a : α), ha => rfl
#align with_bot.succ_unbot WithBot.succ_unbot
end Succ
section Pred
variable [DecidableEq α] [PartialOrder α] [OrderBot α] [PredOrder α]
instance : PredOrder (WithBot α)
where
pred a :=
match a with
| ⊥ => ⊥
| some a => ite (a = ⊥) ⊥ (some (pred a))
pred_le a := by
cases a
· exact bot_le
change ite _ _ _ ≤ _
split_ifs
· exact bot_le
· exact some_le_some.2 (pred_le a)
min_of_le_pred a ha := by
cases a
· exact isMin_bot
change _ ≤ ite _ _ _ at ha
split_ifs at ha with ha'
· exact (not_coe_le_bot _ ha).elim
· rw [some_le_some, le_pred_iff_eq_bot] at ha
exact (ha' ha).elim
le_pred_of_lt a b h := by
cases a
· exact bot_le
cases b
· exact (not_lt_bot h).elim
rw [some_lt_some] at h
change _ ≤ ite _ _ _
split_ifs with hb
· rw [hb] at h
exact (not_lt_bot h).elim
· exact some_le_some.2 (le_pred_of_lt h)
le_of_pred_lt a b h := by
cases b
· exact (not_lt_bot h).elim
cases a
· exact bot_le
change ite _ _ _ < _ at h
rw [some_le_some]
split_ifs at h with ha
· rw [ha]
exact bot_le
· exact le_of_pred_lt (some_lt_some.1 h)
/- warning: with_bot.pred_coe_bot -> WithBot.pred_coe_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)], Eq.{succ u1} (WithBot.{u1} α) (Order.pred.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithBot.predOrder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3)))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.hasBot.{u1} α))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)], Eq.{succ u1} (WithBot.{u1} α) (Order.pred.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithBot.instPredOrderWithBotPreorderToPreorder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) (WithBot.some.{u1} α (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3)))) (Bot.bot.{u1} (WithBot.{u1} α) (WithBot.bot.{u1} α))
Case conversion may be inaccurate. Consider using '#align with_bot.pred_coe_bot WithBot.pred_coe_botₓ'. -/
@[simp]
theorem pred_coe_bot : pred ↑(⊥ : α) = (⊥ : WithBot α) :=
dif_pos rfl
#align with_bot.pred_coe_bot WithBot.pred_coe_bot
/- warning: with_bot.pred_coe_of_ne_bot -> WithBot.pred_coe_of_ne_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)] {a : α}, (Ne.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3))) -> (Eq.{succ u1} (WithBot.{u1} α) (Order.pred.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithBot.predOrder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) a)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) α (WithBot.{u1} α) (HasLiftT.mk.{succ u1, succ u1} α (WithBot.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} α (WithBot.{u1} α) (WithBot.hasCoeT.{u1} α))) (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2) _inst_4 a)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : PartialOrder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2))] [_inst_4 : PredOrder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)] {a : α}, (Ne.{succ u1} α a (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) _inst_3))) -> (Eq.{succ u1} (WithBot.{u1} α) (Order.pred.{u1} (WithBot.{u1} α) (WithBot.preorder.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2)) (WithBot.instPredOrderWithBotPreorderToPreorder.{u1} α (fun (a : α) (b : α) => _inst_1 a b) _inst_2 _inst_3 _inst_4) (WithBot.some.{u1} α a)) (WithBot.some.{u1} α (Order.pred.{u1} α (PartialOrder.toPreorder.{u1} α _inst_2) _inst_4 a)))
Case conversion may be inaccurate. Consider using '#align with_bot.pred_coe_of_ne_bot WithBot.pred_coe_of_ne_botₓ'. -/
theorem pred_coe_of_ne_bot {a : α} (h : a ≠ ⊥) : pred (↑a : WithBot α) = ↑(pred a) :=
dif_neg h
#align with_bot.pred_coe_of_ne_bot WithBot.pred_coe_of_ne_bot
end Pred
/-! #### Adding a `⊥` to a `no_min_order` -/
section Succ
variable [Preorder α] [NoMinOrder α]
instance [hα : Nonempty α] : IsEmpty (SuccOrder (WithBot α)) :=
⟨by
intro
cases' h : succ (⊥ : WithBot α) with a ha
· exact hα.elim fun a => (max_of_succ_le h.le).not_lt <| bot_lt_coe a
· obtain ⟨c, hc⟩ := exists_lt a
rw [← some_lt_some, ← h] at hc
exact (le_of_lt_succ hc).not_lt (none_lt_some _)⟩
end Succ
section Pred
variable [Preorder α] [NoMinOrder α] [PredOrder α]
#print WithBot.predOrderOfNoMinOrder /-
instance predOrderOfNoMinOrder : PredOrder (WithBot α)
where
pred a :=
match a with
| ⊥ => ⊥
| some a => some (pred a)
pred_le a := by
cases a
· exact bot_le
· exact some_le_some.2 (pred_le a)
min_of_le_pred a ha := by
cases a
· exact isMin_bot
· exact (not_isMin _ <| min_of_le_pred <| some_le_some.1 ha).elim
le_pred_of_lt a b h := by
cases b
· exact (not_lt_bot h).elim
cases a
· exact bot_le
· exact some_le_some.2 (le_pred_of_lt <| some_lt_some.1 h)
le_of_pred_lt a b h := by
cases b
· exact (not_lt_bot h).elim
cases a
· exact bot_le
· exact some_le_some.2 (le_of_pred_lt <| some_lt_some.1 h)
#align with_bot.pred_order_of_no_min_order WithBot.predOrderOfNoMinOrder
-/
#print WithBot.pred_coe /-
@[simp]
theorem pred_coe (a : α) : pred (↑a : WithBot α) = ↑(pred a) :=
rfl
#align with_bot.pred_coe WithBot.pred_coe
-/
end Pred
end WithBot
/-! ### Archimedeanness -/
#print IsSuccArchimedean /-
/-- A `succ_order` is succ-archimedean if one can go from any two comparable elements by iterating
`succ` -/
class IsSuccArchimedean (α : Type _) [Preorder α] [SuccOrder α] : Prop where
exists_succ_iterate_of_le {a b : α} (h : a ≤ b) : ∃ n, (succ^[n]) a = b
#align is_succ_archimedean IsSuccArchimedean
-/
#print IsPredArchimedean /-
/-- A `pred_order` is pred-archimedean if one can go from any two comparable elements by iterating
`pred` -/
class IsPredArchimedean (α : Type _) [Preorder α] [PredOrder α] : Prop where
exists_pred_iterate_of_le {a b : α} (h : a ≤ b) : ∃ n, (pred^[n]) b = a
#align is_pred_archimedean IsPredArchimedean
-/
export IsSuccArchimedean (exists_succ_iterate_of_le)
export IsPredArchimedean (exists_pred_iterate_of_le)
section Preorder
variable [Preorder α]
section SuccOrder
variable [SuccOrder α] [IsSuccArchimedean α] {a b : α}
instance : IsPredArchimedean αᵒᵈ :=
⟨fun a b h => by convert exists_succ_iterate_of_le h.of_dual⟩
#print LE.le.exists_succ_iterate /-
theorem LE.le.exists_succ_iterate (h : a ≤ b) : ∃ n, (succ^[n]) a = b :=
exists_succ_iterate_of_le h
#align has_le.le.exists_succ_iterate LE.le.exists_succ_iterate
-/
#print exists_succ_iterate_iff_le /-
theorem exists_succ_iterate_iff_le : (∃ n, (succ^[n]) a = b) ↔ a ≤ b :=
by
refine' ⟨_, exists_succ_iterate_of_le⟩
rintro ⟨n, rfl⟩
exact id_le_iterate_of_id_le le_succ n a
#align exists_succ_iterate_iff_le exists_succ_iterate_iff_le
-/
#print Succ.rec /-
/-- Induction principle on a type with a `succ_order` for all elements above a given element `m`. -/
@[elab_as_elim]
theorem Succ.rec {P : α → Prop} {m : α} (h0 : P m) (h1 : ∀ n, m ≤ n → P n → P (succ n)) ⦃n : α⦄
(hmn : m ≤ n) : P n := by
obtain ⟨n, rfl⟩ := hmn.exists_succ_iterate; clear hmn
induction' n with n ih
· exact h0
· rw [Function.iterate_succ_apply']
exact h1 _ (id_le_iterate_of_id_le le_succ n m) ih
#align succ.rec Succ.rec
-/
#print Succ.rec_iff /-
theorem Succ.rec_iff {p : α → Prop} (hsucc : ∀ a, p a ↔ p (succ a)) {a b : α} (h : a ≤ b) :
p a ↔ p b := by
obtain ⟨n, rfl⟩ := h.exists_succ_iterate
exact iterate.rec (fun b => p a ↔ p b) (fun c hc => hc.trans (hsucc _)) Iff.rfl n
#align succ.rec_iff Succ.rec_iff
-/
end SuccOrder
section PredOrder
variable [PredOrder α] [IsPredArchimedean α] {a b : α}
instance : IsSuccArchimedean αᵒᵈ :=
⟨fun a b h => by convert exists_pred_iterate_of_le h.of_dual⟩
#print LE.le.exists_pred_iterate /-
theorem LE.le.exists_pred_iterate (h : a ≤ b) : ∃ n, (pred^[n]) b = a :=
exists_pred_iterate_of_le h
#align has_le.le.exists_pred_iterate LE.le.exists_pred_iterate
-/
#print exists_pred_iterate_iff_le /-
theorem exists_pred_iterate_iff_le : (∃ n, (pred^[n]) b = a) ↔ a ≤ b :=
@exists_succ_iterate_iff_le αᵒᵈ _ _ _ _ _
#align exists_pred_iterate_iff_le exists_pred_iterate_iff_le
-/
#print Pred.rec /-
/-- Induction principle on a type with a `pred_order` for all elements below a given element `m`. -/
@[elab_as_elim]
theorem Pred.rec {P : α → Prop} {m : α} (h0 : P m) (h1 : ∀ n, n ≤ m → P n → P (pred n)) ⦃n : α⦄
(hmn : n ≤ m) : P n :=
@Succ.rec αᵒᵈ _ _ _ _ _ h0 h1 _ hmn
#align pred.rec Pred.rec
-/
#print Pred.rec_iff /-
theorem Pred.rec_iff {p : α → Prop} (hsucc : ∀ a, p a ↔ p (pred a)) {a b : α} (h : a ≤ b) :
p a ↔ p b :=
(@Succ.rec_iff αᵒᵈ _ _ _ _ hsucc _ _ h).symm
#align pred.rec_iff Pred.rec_iff
-/
end PredOrder
end Preorder
section LinearOrder
variable [LinearOrder α]
section SuccOrder
variable [SuccOrder α] [IsSuccArchimedean α] {a b : α}
#print exists_succ_iterate_or /-
theorem exists_succ_iterate_or : (∃ n, (succ^[n]) a = b) ∨ ∃ n, (succ^[n]) b = a :=
(le_total a b).imp exists_succ_iterate_of_le exists_succ_iterate_of_le
#align exists_succ_iterate_or exists_succ_iterate_or
-/
#print Succ.rec_linear /-
theorem Succ.rec_linear {p : α → Prop} (hsucc : ∀ a, p a ↔ p (succ a)) (a b : α) : p a ↔ p b :=
(le_total a b).elim (Succ.rec_iff hsucc) fun h => (Succ.rec_iff hsucc h).symm
#align succ.rec_linear Succ.rec_linear
-/
end SuccOrder
section PredOrder
variable [PredOrder α] [IsPredArchimedean α] {a b : α}
#print exists_pred_iterate_or /-
theorem exists_pred_iterate_or : (∃ n, (pred^[n]) b = a) ∨ ∃ n, (pred^[n]) a = b :=
(le_total a b).imp exists_pred_iterate_of_le exists_pred_iterate_of_le
#align exists_pred_iterate_or exists_pred_iterate_or
-/
#print Pred.rec_linear /-
theorem Pred.rec_linear {p : α → Prop} (hsucc : ∀ a, p a ↔ p (pred a)) (a b : α) : p a ↔ p b :=
(le_total a b).elim (Pred.rec_iff hsucc) fun h => (Pred.rec_iff hsucc h).symm
#align pred.rec_linear Pred.rec_linear
-/
end PredOrder
end LinearOrder
section IsWellOrder
variable [LinearOrder α]
#print IsWellOrder.toIsPredArchimedean /-
instance (priority := 100) IsWellOrder.toIsPredArchimedean [h : IsWellOrder α (· < ·)]
[PredOrder α] : IsPredArchimedean α :=
⟨fun a => by
refine' WellFounded.fix h.wf fun b ih hab => _
replace hab := hab.eq_or_lt
rcases hab with (rfl | hab)
· exact ⟨0, rfl⟩
cases' le_or_lt b (pred b) with hb hb
· cases (min_of_le_pred hb).not_lt hab
obtain ⟨k, hk⟩ := ih (pred b) hb (le_pred_of_lt hab)
refine' ⟨k + 1, _⟩
rw [iterate_add_apply, iterate_one, hk]⟩
#align is_well_order.to_is_pred_archimedean IsWellOrder.toIsPredArchimedean
-/
#print IsWellOrder.toIsSuccArchimedean /-
instance (priority := 100) IsWellOrder.toIsSuccArchimedean [h : IsWellOrder α (· > ·)]
[SuccOrder α] : IsSuccArchimedean α := by convert@OrderDual.isSuccArchimedean αᵒᵈ _ _ _
#align is_well_order.to_is_succ_archimedean IsWellOrder.toIsSuccArchimedean
-/
end IsWellOrder
section OrderBot
variable [Preorder α] [OrderBot α] [SuccOrder α] [IsSuccArchimedean α]
/- warning: succ.rec_bot -> Succ.rec_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1] [_inst_4 : IsSuccArchimedean.{u1} α _inst_1 _inst_3] (p : α -> Prop), (p (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) -> (forall (a : α), (p a) -> (p (Order.succ.{u1} α _inst_1 _inst_3 a))) -> (forall (a : α), p a)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : SuccOrder.{u1} α _inst_1] [_inst_4 : IsSuccArchimedean.{u1} α _inst_1 _inst_3] (p : α -> Prop), (p (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) -> (forall (a : α), (p a) -> (p (Order.succ.{u1} α _inst_1 _inst_3 a))) -> (forall (a : α), p a)
Case conversion may be inaccurate. Consider using '#align succ.rec_bot Succ.rec_botₓ'. -/
theorem Succ.rec_bot (p : α → Prop) (hbot : p ⊥) (hsucc : ∀ a, p a → p (succ a)) (a : α) : p a :=
Succ.rec hbot (fun x _ h => hsucc x h) (bot_le : ⊥ ≤ a)
#align succ.rec_bot Succ.rec_bot
end OrderBot
section OrderTop
variable [Preorder α] [OrderTop α] [PredOrder α] [IsPredArchimedean α]
/- warning: pred.rec_top -> Pred.rec_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1] [_inst_4 : IsPredArchimedean.{u1} α _inst_1 _inst_3] (p : α -> Prop), (p (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) -> (forall (a : α), (p a) -> (p (Order.pred.{u1} α _inst_1 _inst_3 a))) -> (forall (a : α), p a)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] [_inst_3 : PredOrder.{u1} α _inst_1] [_inst_4 : IsPredArchimedean.{u1} α _inst_1 _inst_3] (p : α -> Prop), (p (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))) -> (forall (a : α), (p a) -> (p (Order.pred.{u1} α _inst_1 _inst_3 a))) -> (forall (a : α), p a)
Case conversion may be inaccurate. Consider using '#align pred.rec_top Pred.rec_topₓ'. -/
theorem Pred.rec_top (p : α → Prop) (htop : p ⊤) (hpred : ∀ a, p a → p (pred a)) (a : α) : p a :=
Pred.rec htop (fun x _ h => hpred x h) (le_top : a ≤ ⊤)
#align pred.rec_top Pred.rec_top
end OrderTop
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Order/SuccPred/Basic.lean"}
|
"""
SCRIPT TO CONVERT WRITE CHARMM RTF AND PRM FILES
FROM BOSS ZMATRIX
Created on Mon Feb 15 15:40:05 2016
@author: Matthew Robinson matthew.robinson@yale.edu
@author: William L. Jorgensen Lab
Usage: python OPM_Routines.py -z phenol.z -r PHN
REQUIREMENTS:
BOSS (need to set BOSSdir in bashrc and cshrc)
Preferably Anaconda python with following modules
pandas
argparse
numpy
"""
from LigParGen.BOSSReader import bossPdbAtom2Element,bossElement2Mass,ucomb,tor_cent
import pickle
import os
import pandas as pd
import numpy as np
ATOM_NUMBER_DICT = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4,
'B': 5, 'C': 6, 'N': 7, 'O': 8,
'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12,
'Al': 13, 'Si': 14, 'P': 15, 'S': 16,
'Cl': 17, 'Ar': 18, 'k': 19, 'Ca': 20,
'Sc': 21, 'Ti': 22, 'v': 23, 'Cr': 24,
'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28,
'Cu': 29, 'Zn': 30, 'Ga': 31, 'Ge': 32,
'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36,
'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40,
'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 45,
'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49,
'Sn': 50, 'Sb': 51, 'Te': 52, 'I': 53,
'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57,
'Hf': 72, 'Ta': 73, 'W': 74, 'Re': 75,
'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79,
'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83,
'Po': 84, 'At': 85, 'Rn': 86, 'Fr': 87,
'Ra': 88, 'Ac': 89}
def bossData(molecule_data):
ats_file = molecule_data.MolData['ATOMS']
types = []
for i in enumerate(ats_file):
types.append([i[1].split()[1], 'opls_' + i[1].split()[2]])
st_no = 3
Qs = molecule_data.MolData['Q_LJ']
assert len(Qs) == len(types), 'Please check the at_info and Q_LJ_dat files'
num2opls = {}
for i in range(0, len(types)):
num2opls[i] = Qs[i][0]
num2typ2symb = {i: types[i] for i in range(len(Qs))}
for i in range(len(Qs)):
num2typ2symb[i].append(bossPdbAtom2Element(
num2typ2symb[i][0]) + num2typ2symb[i][1][-3:])
num2typ2symb[i].append(bossPdbAtom2Element(num2typ2symb[i][0]))
num2typ2symb[i].append(bossElement2Mass(num2typ2symb[i][3]))
num2typ2symb[i].append(Qs[i][0])
return (types, Qs, num2opls, st_no, num2typ2symb)
def Boss2Tinker(resid, molecule_data, xyz_dict):
types, Qs, num2opls, st_no, num2typ2symb = bossData(molecule_data)
bnd_df = boss2CharmmBond(molecule_data, st_no)
bndlist = list(bnd_df.UR) + (list(bnd_df.UR))
ang_df = boss2CharmmAngle(molecule_data.MolData['ANGLES'], num2opls, st_no,num2typ2symb)
tor_df = Boss2CharmmTorsion(bnd_df, num2opls, st_no,
molecule_data, num2typ2symb)
prm = open('/tmp/'+ resid + '.key', 'w+')
prm.write(
'''
##############################
## ##
## Force Field Definition ##
## ##
##############################
forcefield OPLS-AA
vdwindex TYPE
vdwtype LENNARD-JONES
radiusrule GEOMETRIC
radiustype SIGMA
radiussize DIAMETER
epsilonrule GEOMETRIC
torsionunit 1.0
imptorunit 1.0
vdw-14-scale 2.0
chg-14-scale 2.0
electric 332.06
dielectric 1.0
#############################
## ##
## Atom Type Definitions ##
## ##
#############################
''')
dict_counter = 1
for type_list in types:
type_num = int(type_list[1].strip('_opls'))
prm.write('atom %10d %4d %5s %8s %10d %10.3f %5d \n' %
(type_num, type_num, type_list[-1], '"' + type_list[0] + '"',
ATOM_NUMBER_DICT[type_list[-3]], type_list[-2], xyz_dict[dict_counter][2]))
dict_counter += 1
prm.write(
'''
################################
## ##
## Van der Waals Parameters ##
## ##
################################
''')
types_idx = 0
for vdw_list in Qs:
type_num = int(types[types_idx][1].strip('_opls'))
sigma = float(vdw_list[2])
e_min = float(vdw_list[3])
prm.write('vdw %11d %16.4f %8.4f \n' %
(type_num, sigma, e_min))
types_idx += 1
prm.write(
'''
##################################
## ##
## Bond Stretching Parameters ##
## ##
##################################
'''
)
# ask about this one
for index, row in bnd_df.iterrows():
atom1_type = int(types[int(row['cl1'])][1].strip('_opls'))
atom2_type = int(types[int(row['cl2'])][1].strip('_opls'))
R = float(row['RIJ'])
K = float(row['KIJ'])
prm.write('bond %10d %4d %16.2f %8.4f \n' %
(atom1_type, atom2_type, K, R))
prm.write(
'''
################################
## ##
## Angle Bending Parameters ##
## ##
################################
''')
for index, row in ang_df.iterrows():
atom1_type = int(types[int(row['cl1'])][1].strip('_opls'))
atom2_type = int(types[int(row['cl2'])][1].strip('_opls'))
atom3_type = int(types[int(row['cl3'])][1].strip('_opls'))
R = float(row['R'])
K = float(row['K'])
prm.write('angle %9d %4d %4d %8.2f %8.2f \n' %
(atom1_type, atom2_type, atom3_type, K, R))
prm.write(
'''
################################
## ##
## Urey-Bradley Parameters ##
## ##
################################
ureybrad 35 34 35 38.25 1.5537
#####################################
## ##
## Improper Torsional Parameters ##
## ##
#####################################
''')
### Impropers ###
for index, row in tor_df.iterrows():
if row['TY'] == 'Improper':
cen_nums = tor_cent([row.I,row.J,row.K,row.L],bndlist)
atom1_type = int(num2typ2symb[cen_nums[1]][1][5:])#int[int(row['I'])][1].strip('_opls'))
atom2_type = int(num2typ2symb[cen_nums[2]][1][5:])#int[int(row['I'])][1].strip('_opls'))
atom3_central_type = int(num2typ2symb[cen_nums[0]][1][5:]) #int(types[int(row['J'])][1].strip('_opls'))
atom4_type = int(num2typ2symb[cen_nums[3]][1][5:])
V2 = float(row['V2'])
gamma = 180.0
n = 2
# ordering for this is weird
# see https://ryanmrichard.github.io/ForceManII/tinkerformat.html
prm.write('imptors %7d %4d %4d %4d %12.3f %4.1f %2d \n' %
(atom1_type, atom2_type, atom3_central_type, atom4_type, V2, gamma, n))
prm.write(
'''
############################
## ##
## Torsional Parameters ##
## ##
############################
''')
for index, row in tor_df.iterrows():
if row['TY'] == 'Proper':
atom1_type = int(types[int(row['I'])][1].strip('_opls'))
atom2_type = int(types[int(row['J'])][1].strip('_opls'))
atom3_type = int(types[int(row['K'])][1].strip('_opls'))
atom4_type = int(types[int(row['L'])][1].strip('_opls'))
V1 = float(row['V1'])
gamma1 = 0.0
n1 = 1
V2 = float(row['V2'])
gamma2 = 180.0
n2 = 2
V3 = float(row['V3'])
gamma3 = 0.0
n3 = 3
prm.write('torsion %7d %4d %4d %4d %12.3f %4.1f %2d %6.3f %4.1f %2d %6.3f %4.1f %2d \n' %
(atom1_type, atom2_type, atom3_type, atom4_type, V1, gamma1, n1, V2, gamma2, n2, V3, gamma3, n3))
prm.write(
'''
torsion 0 0 0 0 0.000 0.0 1 0.000 180.0 2 0.000 0.0 3
########################################
## ##
## Atomic Partial Charge Parameters ##
## ##
########################################
''')
types_idx = 0
for vdw_list in Qs:
type_num = int(types[types_idx][1].strip('_opls'))
charge = float(vdw_list[1])
prm.write('charge %11d %16.4f \n' %
(type_num, charge))
types_idx += 1
prm.close()
def boss2CharmmBond(molecule_data, st_no):
bdat = molecule_data.MolData['BONDS']
bdat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl1']]
bdat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl2']]
bnd_df = pd.DataFrame(bdat)
bnd_df['UF'] = ((bnd_df.cl1 + bnd_df.cl2) *
(bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl2
bnd_df['UR'] = ((bnd_df.cl1 + bnd_df.cl2) *
(bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl1
# bnd_df.to_csv('bos_bonds.csv', index=False)
hb_df = bnd_df.drop(['cl1', 'cl2', 'UF', 'UR'], 1)
hb_df = hb_df.drop_duplicates()
return bnd_df
def boss2CharmmAngle(anglefile, num2opls, st_no,num2typ2symb):
adat = anglefile
adat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl1']]
adat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl2']]
adat['cl3'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl3']]
ang_df = pd.DataFrame(adat)
ang_df = ang_df[ang_df.K > 0]
# ang_df.to_csv('bos_angles.csv', index=False)
ang_df['TY'] = np.array([num2opls[i] + '-' + num2opls[j] + '-' + num2opls[k]
for i, j, k in zip(ang_df.cl1, ang_df.cl2, ang_df.cl3)])
ang_df['TI']=[num2typ2symb[ang_df.cl1[i]][2] for i in ang_df.index]
ang_df['TJ']=[num2typ2symb[ang_df.cl2[i]][2] for i in ang_df.index]
ang_df['TK']=[num2typ2symb[ang_df.cl3[i]][2] for i in ang_df.index]
ang_df['TY'] = np.array([i + ' ' + j + ' ' + k
for i, j, k in zip(ang_df.TI, ang_df.TJ, ang_df.TK)])
return ang_df
def Boss2CharmmTorsion(bnd_df, num2opls, st_no, molecule_data, num2typ2symb):
# print num2opls
dhd = []
for line in molecule_data.MolData['TORSIONS']:
dt = [float(l) for l in line]
dhd.append(dt)
dhd = np.array(dhd)
dhd = dhd # kcal to kj conversion
dhd = dhd / 2.0 # Komm = Vopls/2
dhd_df = pd.DataFrame(dhd, columns=['V1', 'V2', 'V3', 'V4'])
ats = []
for line in molecule_data.MolData['ATOMS'][3:]:
dt = [line.split()[0], line.split()[4],
line.split()[6], line.split()[8]]
dt = [int(d) for d in dt]
ats.append(dt)
for line in molecule_data.MolData['ADD_DIHED']:
dt = [int(l) for l in line]
ats.append(dt)
assert len(ats) == len(
dhd), 'Number of Dihedral angles in Zmatrix and Out file dont match'
ats = np.array(ats) - st_no
for i in range(len(ats)):
for j in range(len(ats[0])):
if ats[i][j] < 0:
ats[i][j] = 0
at_df = pd.DataFrame(ats, columns=['I', 'J', 'K', 'L'])
final_df = pd.concat([dhd_df, at_df], axis=1, join_axes=[at_df.index])
bndlist = list(bnd_df.UR) + (list(bnd_df.UR))
final_df['TY'] = ['Proper' if ucomb(list([final_df.I[n], final_df.J[n], final_df.K[
n], final_df.L[n]]), bndlist) == 3 else 'Improper' for n in range(len(final_df.I))]
# final_df['SumV'] = np.abs(
# final_df.V1) + np.abs(final_df.V2) + np.abs(final_df.V3) + np.abs(final_df.V4)
# final_df = final_df[final_df['SumV'] != 0.00]
final_df['TI'] = [num2typ2symb[j][2] for j in final_df.I]
final_df['TJ'] = [num2typ2symb[j][2] for j in final_df.J]
final_df['TK'] = [num2typ2symb[j][2] for j in final_df.K]
final_df['TL'] = [num2typ2symb[j][2] for j in final_df.L]
final_df['SYMB'] = [' '.join([num2typ2symb[final_df.I[i]][0], num2typ2symb[final_df.J[i]][
0], num2typ2symb[final_df.K[i]][0], num2typ2symb[final_df.L[i]][0]]) for i in final_df.index]
if len(final_df.index) > 0:
final_df['NAME'] = final_df.TI + '-' + final_df.TJ + \
'-' + final_df.TK + '-' + final_df.TL
return final_df
def create_xyz_file(residue_name,mol):
boss_xyz = mol.MolData['XYZ']
# convert .pdb to Tinker style .xyz file
os.system('babel -ipdb %s.pdb -otxyz %s.xyz > LLN 2>&1' % (residue_name,residue_name))
# Read in the file
with open('/tmp/%s.xyz' % residue_name, 'r') as xyz_file:
xyz_data = xyz_file.readlines()
num_atoms = int(xyz_data[0][0:6])
xyz_dict = {}
line_counter = 1
for line in xyz_data[1:]:
row=(boss_xyz.iloc[line_counter-1])
atom_number = int(line[0:6])
element = str(line[7:9].strip())
atom_type = int(line[49:53])
num_bonds = len(line[55:].split())
xyz_dict[atom_number] = [element, atom_type, num_bonds]
# change atom type
new_atom_type_str = (' ' + str(799+atom_number))[-4:]
# xyz_data[line_counter] = line[:49] + new_atom_type_str + line[53:]
xyz_data[line_counter] = line[:12] + '%11.6f %11.6f %11.6f '%(row.X,row.Y,row.Z) + new_atom_type_str + line[53:]
line_counter += 1
xyz_data[0] = '%6d %s LigParGen generated OPLS-AA/CM1A Parameters\n'%(num_atoms,residue_name)
with open('/tmp/%s.new.xyz' % residue_name, 'w') as new_xyz_file:
new_xyz_file.writelines(xyz_data)
# print(xyz_dict)
return xyz_dict
def mainBOSS2TINKER(resid, clu=False):
mol = pickle.load(open(resid + ".p", "rb"))
# if clu:
# pdb_file = '/tmp/clu.pdb'
# else:
# pdb_file = '/tmp/plt.pdb'
xyz_dict = create_xyz_file(resid,mol)
Boss2Tinker(resid, mol, xyz_dict)
return None
|
{"hexsha": "e627fffc805b86f5455ff0afc9e20d8ccf98fc5b", "size": 14411, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/LigParGen/BOSS2TINKER.py", "max_stars_repo_name": "mikemhenry/LigParGen_2.3", "max_stars_repo_head_hexsha": "538985aba95e8a49aebe0f118e3dd919de95d736", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build/lib/LigParGen/BOSS2TINKER.py", "max_issues_repo_name": "mikemhenry/LigParGen_2.3", "max_issues_repo_head_hexsha": "538985aba95e8a49aebe0f118e3dd919de95d736", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/LigParGen/BOSS2TINKER.py", "max_forks_repo_name": "mikemhenry/LigParGen_2.3", "max_forks_repo_head_hexsha": "538985aba95e8a49aebe0f118e3dd919de95d736", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1177944862, "max_line_length": 121, "alphanum_fraction": 0.4840746652, "include": true, "reason": "import numpy", "num_tokens": 4570}
|
from typing import Dict
import json
import logging
from allennlp.data import Token
from overrides import overrides
from pytorch_pretrained_bert import BertTokenizer
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from ctxteval.dataset_readers.numeric_field import NumericField
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from numpy import isnan
SEP_TOKEN: str = "[SEP]"
START_TOKEN: str = "[CLS]"
@DatasetReader.register("mteval")
class MTReader(DatasetReader):
"""
Reads a file from the Stanford Natural Language Inference (SNLI) dataset. This data is
formatted as jsonl, one json-formatted instance per line. The keys in the data are
"gold_label", "sentence1", and "sentence2". We convert these keys into fields named "label",
"premise" and "hypothesis", along with a metadata field containing the tokenized strings of the
premise and hypothesis.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We use this ``Tokenizer`` for both the premise and the hypothesis. See :class:`Tokenizer`.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We similarly use this for both the premise and the hypothesis. See :class:`TokenIndexer`.
"""
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
maxlen: int = 100,
bert_name: str = None,
bert_do_lowercase: bool = None,
qesetting: bool = False,
inp_type: str = "metric",
lazy: bool = False) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self.bert_name = bert_name
if bert_name and not bert_do_lowercase:
if 'uncased' in bert_name:
bert_do_lowercase = True
else:
bert_do_lowercase = False
if bert_name:
self._tokenizer = BertTokenizer.from_pretrained(bert_name, do_lower_case = bert_do_lowercase)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._maxlen = maxlen
self._qesetting = qesetting
valid_inp_types = ["metric", "qe" ]
assert inp_type in valid_inp_types
self._inp_type = inp_type
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r') as snli_file:
logger.info("Reading MT instances from jsonl dataset at: %s", file_path)
for line in snli_file:
example = json.loads(line)
score = example.get( "score" , 0)
if isnan(score):
continue
src = example.get('srcsent', "")
ref = example["ref"]
mt = example["sys"]
yield self.text_to_instance(ref, mt, src= src,
score= score)
@overrides
def text_to_instance(self, # type: ignore
ref: str,
mt: str,
src: str = "",
score: str = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
ref_tokens = self._tokenizer.tokenize(ref)[:self._maxlen]
mt_tokens = self._tokenizer.tokenize(mt)[:self._maxlen]
src_tokens = self._tokenizer.tokenize(src)[:self._maxlen]
if self.bert_name:
ref_tokens = [Token(t) for t in ref_tokens]
mt_tokens = [Token(t) for t in mt_tokens]
src_tokens = [Token(t) for t in src_tokens]
if self._inp_type == 'metric':
fields['mt'] = TextField(mt_tokens, self._token_indexers)
if self._qesetting: #this is just for backwards compatibility
fields['ref'] = TextField(src_tokens, self._token_indexers)
else:
fields['ref'] = TextField(ref_tokens, self._token_indexers)
elif self._inp_type == 'qe':
fields['ref'] = TextField(src_tokens, self._token_indexers)
fields['mt'] = TextField(mt_tokens, self._token_indexers)
if score is not None:
fields['score'] = NumericField(score)
# metadata = {"ref_tokens": [x.text for x in ref_tokens],
# "mt_tokens": [x.text for x in mt_tokens]}
# fields["metadata"] = MetadataField(metadata)
return Instance(fields)
|
{"hexsha": "4bec12361e7a464a91637bee15acddcae0d5abdf", "size": 5016, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctxteval/dataset_readers/mteval.py", "max_stars_repo_name": "nitikam/mteval-in-context", "max_stars_repo_head_hexsha": "518fd156503e0dc4ef98b2f46311e10a342e04d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-12-10T04:19:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T09:26:43.000Z", "max_issues_repo_path": "ctxteval/dataset_readers/mteval.py", "max_issues_repo_name": "nitikam/mteval-in-context", "max_issues_repo_head_hexsha": "518fd156503e0dc4ef98b2f46311e10a342e04d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-23T10:20:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-23T10:20:43.000Z", "max_forks_repo_path": "ctxteval/dataset_readers/mteval.py", "max_forks_repo_name": "nitikam/mteval-in-context", "max_forks_repo_head_hexsha": "518fd156503e0dc4ef98b2f46311e10a342e04d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4516129032, "max_line_length": 107, "alphanum_fraction": 0.6172248804, "include": true, "reason": "from numpy", "num_tokens": 1125}
|
[STATEMENT]
lemma dim_vec_last[simp]: "dim_vec (vec_last v n) = n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dim_vec (vec_last v n) = n
[PROOF STEP]
unfolding vec_last_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dim_vec (vec n (\<lambda>i. v $ (dim_vec v - n + i))) = n
[PROOF STEP]
by auto
|
{"llama_tokens": 141, "file": "Jordan_Normal_Form_Matrix", "length": 2}
|
module ParticleCoordinateModule
use ParticleLocationModule,only : ParticleLocationType
implicit none
! Set default access status to private
private
!--------------------------------------
! type:
!--------------------------------------
type,public,extends(ParticleLocationType) :: ParticleCoordinateType
doubleprecision :: GlobalX
doubleprecision :: GlobalY
doubleprecision :: GlobalZ
contains
procedure,private :: ParticleCoordinateType_SetData1
procedure,private :: ParticleCoordinateType_SetData2
procedure :: Reset=>pr_Reset
generic :: SetData=>ParticleCoordinateType_SetData1, ParticleCoordinateType_SetData2
end type
contains
subroutine pr_Reset(this)
!***************************************************************************************************************
! Description goes here
!***************************************************************************************************************
!
! Specifications
!---------------------------------------------------------------------------------------------------------------
implicit none
class(ParticleCoordinateType) :: this
!---------------------------------------------------------------------------------------------------------------
this%CellNumber = 0
this%LocalX = 0.0d0
this%LocalY = 0.0d0
this%LocalZ = 0.0d0
this%GlobalX = 0.0d0
this%GlobalY = 0.0d0
this%GlobalZ = 0.0d0
this%TrackingTime = 0.0d0
end subroutine pr_Reset
!------------------------------------------
! Method:
!------------------------------------------
subroutine ParticleCoordinateType_SetData1(this,cellNumber,localX,localY,localZ,globalX,globalY,globalZ,trackingTime)
implicit none
class(ParticleCoordinateType) :: this
integer,intent(in) :: cellNumber
doubleprecision,intent(in) :: localX,localY,localZ,globalX,globalY,globalZ,trackingTime
this%CellNumber = cellNumber
this%LocalX = localX
this%LocalY = localY
this%LocalZ = localZ
this%GlobalX = globalX
this%GlobalY = globalY
this%GlobalZ = globalZ
this%TrackingTime = trackingTime
end subroutine ParticleCoordinateType_SetData1
!------------------------------------------
! Method:
!------------------------------------------
subroutine ParticleCoordinateType_SetData2(this,particleLocation,globalX,globalY,globalZ)
implicit none
class(ParticleCoordinateType) :: this
doubleprecision,intent(in) :: globalX,globalY,globalZ
type(ParticleLocationType),intent(in) :: particleLocation
this%CellNumber = particleLocation%CellNumber
this%LocalX = particleLocation%LocalX
this%LocalY = particleLocation%LocalY
this%LocalZ = particleLocation%LocalZ
this%TrackingTime = particleLocation%TrackingTime
this%GlobalX = globalX
this%GlobalY = globalY
this%GlobalZ = globalZ
end subroutine ParticleCoordinateType_SetData2
end module ParticleCoordinateModule
|
{"hexsha": "b1edc36a321e0636f72918f88fff240816affd1e", "size": 2887, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/MODPATH7/source/ParticleCoordinate.f90", "max_stars_repo_name": "usgs/neversink_workflow", "max_stars_repo_head_hexsha": "acd61435b8553e38d4a903c8cd7a3afc612446f9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/MODPATH7/source/ParticleCoordinate.f90", "max_issues_repo_name": "usgs/neversink_workflow", "max_issues_repo_head_hexsha": "acd61435b8553e38d4a903c8cd7a3afc612446f9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/MODPATH7/source/ParticleCoordinate.f90", "max_forks_repo_name": "usgs/neversink_workflow", "max_forks_repo_head_hexsha": "acd61435b8553e38d4a903c8cd7a3afc612446f9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5697674419, "max_line_length": 119, "alphanum_fraction": 0.5888465535, "num_tokens": 645}
|
import numpy as np
from .io_gdal import GeoDataset
def open(input_data):
ds = GeoDataset(input_data)
return ds
def getspectra(x, y, ds):
nbands = ds.RasterCount
reflectance = np.empty(nbands)
for b in range(1, nbands + 1):
reflectance[b - 1] = ds.GetRasterBand(b).ReadAsArray(y, x, 1, 1)
mergedref = np.empty(nbands - 1)
mergedref[:4] = reflectance[:4]
mergedref[4] = (reflectance[4] + reflectance[5]) / 2
mergedref[5:] = reflectance[6:]
return mergedref
|
{"hexsha": "4401adbb237f464a2d0be2a70de77646810e3f4c", "size": 506, "ext": "py", "lang": "Python", "max_stars_repo_path": "plio/io/io_multibandimager.py", "max_stars_repo_name": "kaitlyndlee/plio", "max_stars_repo_head_hexsha": "99f0852d8eb92efeba72f366077bd023a7da7cdd", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-02-01T02:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T12:08:12.000Z", "max_issues_repo_path": "plio/io/io_multibandimager.py", "max_issues_repo_name": "kaitlyndlee/plio", "max_issues_repo_head_hexsha": "99f0852d8eb92efeba72f366077bd023a7da7cdd", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 151, "max_issues_repo_issues_event_min_datetime": "2016-06-15T21:31:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T16:55:53.000Z", "max_forks_repo_path": "plio/io/io_multibandimager.py", "max_forks_repo_name": "kaitlyndlee/plio", "max_forks_repo_head_hexsha": "99f0852d8eb92efeba72f366077bd023a7da7cdd", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-06-17T17:02:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-08T20:47:50.000Z", "avg_line_length": 26.6315789474, "max_line_length": 72, "alphanum_fraction": 0.6501976285, "include": true, "reason": "import numpy", "num_tokens": 165}
|
from abc import ABC, abstractmethod
import numpy as np
class FeatureTransformerBase(ABC):
@abstractmethod
def get_transformed_features(self, x_array_2d: np.ndarray) -> np.ndarray:
"""
Trains the model. This can be called multiple times.
Parameters
----------
x_array_2d:
N-by-n array for X.
Returns
-------
transformed_feature:
Transformed features
"""
|
{"hexsha": "a34287b8ddd480f5ef49f99f37848e29ee594218", "size": 457, "ext": "py", "lang": "Python", "max_stars_repo_path": "optmlstat/ml/features/feature_transformer_base.py", "max_stars_repo_name": "sungheeyun/optmlstat", "max_stars_repo_head_hexsha": "11d529c915bf27976da9157471a6dbf7df34d205", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-04-08T03:48:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-30T01:39:52.000Z", "max_issues_repo_path": "optmlstat/ml/features/feature_transformer_base.py", "max_issues_repo_name": "sungheeyun/optmlstat", "max_issues_repo_head_hexsha": "11d529c915bf27976da9157471a6dbf7df34d205", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optmlstat/ml/features/feature_transformer_base.py", "max_forks_repo_name": "sungheeyun/optmlstat", "max_forks_repo_head_hexsha": "11d529c915bf27976da9157471a6dbf7df34d205", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-08T04:04:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-08T04:04:00.000Z", "avg_line_length": 21.7619047619, "max_line_length": 77, "alphanum_fraction": 0.5864332604, "include": true, "reason": "import numpy", "num_tokens": 96}
|
import numpy as np
import math
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,Dense, Activation
import tensorflow.keras as keras# as k
import tensorflow as t
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.linalg import trace
#batch_size=100
#epochs=1000
#verbose=2
#lr=0.001
class gl(Layer):
def advrelu(self,x,q):
i1=(type(q[0])==type(""))
i2=(type(q[1])==type(""))
if i1:
if i2:
return x
else:
return q[1]-K.relu(q[1]-x)
else:
if i2:
return K.relu(x-q[0])+q[0]
else:
return K.relu(x-q[0])-K.relu(x-q[1])+q[0]
#high number of iterations fail..why?
def __init__(self,graphmax=20,graphvar=40,keepconst=10,iterations=1,alinearity=[-1.0,1.0],kernel_initializer='glorot_uniform',**kwargs):
self.kernel_initializer=kernel_initializer
self.graphmax=graphmax
self.graphvar=graphvar
self.keepconst=keepconst
self.makezerolmat=K.constant(self.genmakezerolmat(graphmax))
self.iterations=iterations
self.activate=False
if len(alinearity)==2:
self.activate=True
self.activation=alinearity#most general form of continous activation: const,x,const
else:
self.activation=[]
super(gl,self).__init__(input_shape=(graphmax,graphmax+graphvar))
def get_config(self):
mi={"graphmax":self.graphmax,"graphvar":self.graphvar,"keepconst":self.keepconst,"iterations":self.iterations,"alinearity":self.activation,"kernel_initializer":self.kernel_initializer}
th=super(gl,self).get_config()
th.update(mi)
return th
def from_config(config):
return gl(**config)
def build(self, input_shape):
self.neigintact=self.add_weight(name='neigthbourinteraction',
shape=(self.graphvar,self.graphvar-self.keepconst,),
initializer=self.kernel_initializer,
trainable=True)
self.selfintact=self.add_weight(name='selfinteraction',
shape=(self.graphvar,self.graphvar-self.keepconst,),
initializer=self.kernel_initializer,
trainable=True)
self.built=True
def genmakezerolmat(self,n):
ret=np.zeros((n,n))+1.0
for i in range(n):
ret[i,i]=0.0
return ret
def call(self,x):
# print("!",x.shape)
mat=x[:,:,:self.graphmax]
val=x[:,:,self.graphmax:]
con=val[:,:,:self.keepconst]
var=val[:,:,self.keepconst:]
mat0=mat*self.makezerolmat
tra=trace(mat)
# print(mat0.shape,val.shape)
# val=K.permute_dimensions(val,(1,0,2))
# print(mat0.shape,val.shape)
# mat0=K.permute_dimensions(mat0,(1,0,2))
# val =K.permute_dimensions(val ,(1,2,0))
# print(mat0.shape,val.shape)
# weignei=K.dot(mat0,val)
# weignei=K.dot(val,mat0)
for i in range(self.iterations):
weignei=K.batch_dot(mat0,val)
# print("---",weignei.shape)
# return K.sum(K.sum(weignei,axis=-1),axis=-1)
# exit()
# print(weignei.shape)
parta=K.dot(weignei,self.neigintact)
partb=K.dot(val,self.selfintact)
# print(parta.shape,tra.shape)
# print(parta.shape)
# exit()
var=parta+partb
var=K.permute_dimensions(var,(1,2,0))
# return tra
var/=tra
var=K.permute_dimensions(var,(2,0,1))
if self.activate:
val=self.advrelu(val,self.activation)
val=K.concatenate((con,var),axis=-1)
# return K.sum(K.sum(val,axis=-1),axis=-1)
# print("###",K.eval(val))
# print(parta.shape,partb.shape,var.shape,val.shape)
# exit()
return K.concatenate((mat,val),axis=-1)
# print("call")
# # print(x.shape)
# m=((K.dot(K.pow(x,2),self.kernel)))
# # print("!",m.shape)
# E=((K.dot(x,K.constant(np.array([[1.0],[0.0],[0.0],[0.0]])))))
# px=((K.dot(x,K.constant(np.array([[0.0],[1.0],[0.0],[0.0]])))))
# py=((K.dot(x,K.constant(np.array([[0.0],[0.0],[1.0],[0.0]])))))
# pt=K.sqrt(K.square(px)+K.square(py))
return x#K.constant([1,1])#K.concatenate((m,pt),axis=-1)#still missing: matrix*E+ matrix*abstand(vektor, ich)
def compute_output_shape(self,input_shape):
# return tuple([input_shape[0],40,60])
# return tuple([input_shape[0]])
# return tuple(input_shape)
shape=list(input_shape)
assert len(shape)==3
assert shape[-1]==self.graphmax+self.graphvar
assert shape[-2]==self.graphmax
#shape[-1]=self.graphmax+self.graphvar#this layer should not chance the size of the network, so this line becomes kinda useless
# shape[-2]=self.graphmax
return tuple(shape)
# return tuple([15,2])
# return tuple(K.constant([1,1]).shape)
# assert len(shape)==2
# assert shape[-1]==4
# shape[-1]=2
# return tuple(shape)
|
{"hexsha": "dc147dab8367f9d96e943a69a91e03195120e11f", "size": 5000, "ext": "py", "lang": "Python", "max_stars_repo_path": "grapaold/layerfiles/gl.py", "max_stars_repo_name": "psorus/grapa", "max_stars_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grapaold/layerfiles/gl.py", "max_issues_repo_name": "psorus/grapa", "max_issues_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grapaold/layerfiles/gl.py", "max_forks_repo_name": "psorus/grapa", "max_forks_repo_head_hexsha": "6af343bb35c466c971ded1876e7a9d00e77cef00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.027027027, "max_line_length": 188, "alphanum_fraction": 0.6122, "include": true, "reason": "import numpy", "num_tokens": 1372}
|
import numpy as np
import operator
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import util
IS_TRAINING = tf.placeholder(tf.bool, name="is_training")
FLIP_HORIZONTALLY = tf.placeholder(tf.bool, name="flip_horizontally")
class Network(object):
"""Common class for handling ops for making / updating target networks."""
def __init__(self, namespace):
self.namespace = namespace
self.target_update_op = None
def _create_variables_copy_op(self, source_namespace, affine_combo_coeff):
"""create an op that does updates all vars in source_namespace to target_namespace"""
assert affine_combo_coeff >= 0.0 and affine_combo_coeff <= 1.0
assign_ops = []
with tf.variable_scope(self.namespace, reuse=True):
for src_var in tf.all_variables():
if not src_var.name.startswith(source_namespace):
continue
target_var_name = src_var.name.replace(source_namespace+"/", "").replace(":0", "")
target_var = tf.get_variable(target_var_name)
assert src_var.get_shape() == target_var.get_shape()
assign_ops.append(target_var.assign_sub(affine_combo_coeff * (target_var - src_var)))
single_assign_op = tf.group(*assign_ops)
return single_assign_op
def set_as_target_network_for(self, source_network, target_update_rate):
"""Create an op that will update this networks weights based on a source_network"""
# first, as a one off, copy _all_ variables across.
# i.e. initial target network will be a copy of source network.
op = self._create_variables_copy_op(source_network.namespace, affine_combo_coeff=1.0)
tf.get_default_session().run(op)
# next build target update op for running later during training
self.update_weights_op = self._create_variables_copy_op(source_network.namespace,
target_update_rate)
def update_target_weights(self):
"""called during training to update target network."""
return tf.get_default_session().run(self.update_weights_op)
def trainable_model_vars(self):
v = []
for var in tf.all_variables():
if var.name.startswith(self.namespace):
v.append(var)
return v
def hidden_layers_on(self, layer, layer_sizes):
if not isinstance(layer_sizes, list):
layer_sizes = map(int, layer_sizes.split(","))
assert len(layer_sizes) > 0
for i, size in enumerate(layer_sizes):
layer = slim.fully_connected(scope="h%d" % i,
inputs=layer,
num_outputs=size,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=tf.nn.relu)
# if opts.use_dropout:
# layer = slim.dropout(layer, is_training=IS_TRAINING, scope="do%d" % i)
return layer
def conv_net_on(self, input_layer, opts):
# TODO: reinclude batch_norm config, hasn't been helping at all...
# convert input_layer from uint8 (0, 255) to float32 (0.0, 1.0)
input_layer = tf.to_float(input_layer) / 255
# whiten image, per channel, using batch_normalisation layer with
# params calculated directly from batch.
axis = list(range(input_layer.get_shape().ndims - 1))
batch_mean, batch_var = tf.nn.moments(input_layer, axis) # calcs moments per channel
whitened_input_layer = tf.nn.batch_normalization(input_layer, batch_mean, batch_var,
scale=None, offset=None,
variance_epsilon=1e-6)
model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1a')
# model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool1')
self.pool1 = model
print >>sys.stderr, "pool1", util.shape_and_product_of(model)
model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2a')
# model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool2')
self.pool2 = model
print >>sys.stderr, "pool2", util.shape_and_product_of(model)
model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3a')
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool3')
self.pool3 = model
print >>sys.stderr, "pool3", util.shape_and_product_of(model)
# a final unpooled conv net just to drop params down. maybe pool here too actually?
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv4a')
# model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
# model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool4')
# self.pool3 = model
# print >>sys.stderr, "pool4", util.shape_and_product_of(model)
# do simple maxout on output to reduce dimensionality down for the upcoming
# fully connected layers. see https://arxiv.org/abs/1302.4389
# model = tf.reshape(model, (-1, 15, 20, 8, 4)) # (?, 15, 20, 32) -> (?, 15, 20, 8, 4)
# model = tf.reduce_max(model, reduction_indices=4) # (?, 15, 20, 8)
# print >>sys.stderr, "maxout", util.shape_and_product_of(model)
model = slim.flatten(model, scope='flat')
if opts.use_dropout:
model = slim.dropout(model, is_training=IS_TRAINING, scope="drop" % i)
return model
def render_convnet_activations(self, activations, filename_base):
_batch, height, width, num_filters = activations.shape
for f_idx in range(num_filters):
single_channel = activations[0,:,:,f_idx]
single_channel /= np.max(single_channel)
img = np.empty((height, width, 3))
img[:,:,0] = single_channel
img[:,:,1] = single_channel
img[:,:,2] = single_channel
util.write_img_to_png_file(img, "%s_f%02d.png" % (filename_base, f_idx))
def render_all_convnet_activations(self, step, input_state_placeholder, state):
activations = tf.get_default_session().run([self.pool1, self.pool2, self.pool3],
feed_dict={input_state_placeholder: [state],
IS_TRAINING: False,
FLIP_HORIZONTALLY: False})
filename_base = "/tmp/activation_s%03d" % step
self.render_convnet_activations(activations[0], filename_base + "_p0")
self.render_convnet_activations(activations[1], filename_base + "_p1")
self.render_convnet_activations(activations[2], filename_base + "_p2")
|
{"hexsha": "da17c891da0d5520f0ca3018ce36b164af95b1ed", "size": 6773, "ext": "py", "lang": "Python", "max_stars_repo_path": "base_network.py", "max_stars_repo_name": "matpalm/malmomo", "max_stars_repo_head_hexsha": "35861223dc65b7f10c780ec7cd7cdcaf50a46ead", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2016-11-10T18:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-14T11:57:42.000Z", "max_issues_repo_path": "base_network.py", "max_issues_repo_name": "matpalm/malmomo", "max_issues_repo_head_hexsha": "35861223dc65b7f10c780ec7cd7cdcaf50a46ead", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "base_network.py", "max_forks_repo_name": "matpalm/malmomo", "max_forks_repo_head_hexsha": "35861223dc65b7f10c780ec7cd7cdcaf50a46ead", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-11-11T07:11:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-10T17:52:01.000Z", "avg_line_length": 48.726618705, "max_line_length": 97, "alphanum_fraction": 0.6648457109, "include": true, "reason": "import numpy", "num_tokens": 1661}
|
"""
*Horizontal Split*
"""
from dataclasses import dataclass
import jax.numpy as jnp
from ._operator import SplitOperator
__all__ = ["HorizontalSplit"]
@dataclass
class HorizontalSplit(
SplitOperator,
):
operator = jnp.hsplit
|
{"hexsha": "2cefd9587ffd22ebec3b9792153d66ed1d302e3c", "size": 246, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tensor/op/geometric/split/horizontal.py", "max_stars_repo_name": "jedhsu/tensor", "max_stars_repo_head_hexsha": "3b2fe21029fa7c50b034190e77d79d1a94ea5e8f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tensor/op/geometric/split/horizontal.py", "max_issues_repo_name": "jedhsu/tensor", "max_issues_repo_head_hexsha": "3b2fe21029fa7c50b034190e77d79d1a94ea5e8f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tensor/op/geometric/split/horizontal.py", "max_forks_repo_name": "jedhsu/tensor", "max_forks_repo_head_hexsha": "3b2fe21029fa7c50b034190e77d79d1a94ea5e8f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.7142857143, "max_line_length": 36, "alphanum_fraction": 0.7195121951, "include": true, "reason": "import jax", "num_tokens": 59}
|
import bayesnewton
import numpy as np
from bayesnewton.utils import solve
from jax.config import config
config.update("jax_enable_x64", True)
import pytest
def wiggly_time_series(x_):
noise_var = 0.15 # true observation noise
return (np.cos(0.04*x_+0.33*np.pi) * np.sin(0.2*x_) +
np.math.sqrt(noise_var) * np.random.normal(0, 1, x_.shape))
def build_data(N):
# np.random.seed(12345)
x = np.random.permutation(np.linspace(-25.0, 150.0, num=N) + 0.5*np.random.randn(N)) # unevenly spaced
x = np.sort(x) # since MarkovGP sorts the inputs, they must also be sorted for GP
y = wiggly_time_series(x)
# x_test = np.linspace(np.min(x)-15.0, np.max(x)+15.0, num=500)
# y_test = wiggly_time_series(x_test)
# x_plot = np.linspace(np.min(x)-20.0, np.max(x)+20.0, 200)
x = x[:, None]
# y = y[:, None]
# x_plot = x_plot[:, None]
return x, y
def initialise_gp_model(var_f, len_f, var_y, x, y):
kernel = bayesnewton.kernels.Matern52(variance=var_f, lengthscale=len_f)
likelihood = bayesnewton.likelihoods.Gaussian(variance=var_y)
model = bayesnewton.models.VariationalGP(kernel=kernel, likelihood=likelihood, X=x, Y=y)
return model
@pytest.mark.parametrize('var_f', [0.5, 1.5])
@pytest.mark.parametrize('len_f', [0.75, 2.5])
@pytest.mark.parametrize('var_y', [0.1, 0.5])
@pytest.mark.parametrize('N', [30, 60])
def test_marg_lik(var_f, len_f, var_y, N):
"""
test whether VI with newt's GP and Gaussian likelihood gives the exact marginal likelihood
"""
x, y = build_data(N)
gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)
gp_model.inference(lr=1.) # update variational params
loss_gp = gp_model.energy()
print(loss_gp)
K_X = gp_model.kernel(x, x)
K_Y = K_X + var_y * np.eye(K_X.shape[0])
L_Y = np.linalg.cholesky(K_Y)
exact_marg_lik = (
-0.5 * y.T @ solve(K_Y, y)
- np.sum(np.log(np.diag(L_Y)))
- 0.5 * y.shape[0] * np.log(2 * np.pi)
)
print(exact_marg_lik)
np.testing.assert_almost_equal(loss_gp, -exact_marg_lik, decimal=4)
|
{"hexsha": "10b1e5eb5cb2b4069b8c85f1af4b9bfd859f852a", "size": 2115, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_vs_exact_marg_lik.py", "max_stars_repo_name": "AaltoML/Newt-test", "max_stars_repo_head_hexsha": "e3a725124eb63e9994653ed756be7ae8632f52b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2021-11-03T06:40:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T17:11:05.000Z", "max_issues_repo_path": "tests/test_vs_exact_marg_lik.py", "max_issues_repo_name": "AaltoML/Newt-test", "max_issues_repo_head_hexsha": "e3a725124eb63e9994653ed756be7ae8632f52b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-19T05:51:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T09:35:22.000Z", "max_forks_repo_path": "tests/test_vs_exact_marg_lik.py", "max_forks_repo_name": "AaltoML/Newt-test", "max_forks_repo_head_hexsha": "e3a725124eb63e9994653ed756be7ae8632f52b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-11-08T01:30:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T17:39:25.000Z", "avg_line_length": 32.0454545455, "max_line_length": 107, "alphanum_fraction": 0.6543735225, "include": true, "reason": "import numpy,from jax", "num_tokens": 673}
|
import csv, cv2
import numpy as np
from sklearn.model_selection import train_test_split
from EmoPy.src.dataset import Dataset
class _DataLoader(object):
"""
Abstract class to load image and label data from a directory or csv file.
Methods load_data and _validate_arguments must be implemented by subclasses.
"""
def __init__(self, validation_split, time_delay=None):
self.validation_split = validation_split
self.time_delay = time_delay
self._validate_arguments()
def load_data(self):
"""
Loads image and label data from path specified in subclass initialization.
:return: Dataset object containing image and label data.
"""
raise NotImplementedError("Class %s doesn't implement load_data()" % self.__class__.__name__)
def _load_dataset(self, images, labels, emotion_index_map):
"""
Loads Dataset object with images, labels, and other data.
:param images: numpy array of image data
:param labels: numpy array of one-hot vector labels
:param emotion_index_map: map linking string/integer emotion class to integer index used in labels vectors
:return: Dataset object containing image and label data.
"""
train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size=self.validation_split, random_state=42, stratify=labels)
dataset = Dataset(train_images, test_images, train_labels, test_labels, emotion_index_map, self.time_delay)
return dataset
def _validate_arguments(self):
if self.out_channels not in (1, 3):
raise ValueError("Out put channel should be either 3(RGB) or 1(Grey) but got {channels}".format(channels=self.out_channels))
if self.validation_split < 0 or self.validation_split > 1:
raise ValueError("validation_split must be a float between 0 and 1")
raise NotImplementedError("Class %s doesn't implement _validate_arguments()" % self.__class__.__name__)
def _reshape(self, image):
if image.ndim == 2:
image = np.expand_dims(image, axis=2)
channels = image.shape[-1]
if channels == 3 and self.out_channels == 1:
gray = cv2.cvtColor(image, code=cv2.COLOR_BGR2GRAY)
return np.expand_dims(gray, axis=2)
if channels == 1 and self.out_channels == 3:
return np.repeat(image, repeats=3, axis=2)
return image
def _check_data_not_empty(self, images):
if len(images) == 0:
raise AssertionError('There are no images in the data set.')
def _vectorize_labels(self, label_index_map, labels):
label_values = list()
label_count = len(label_index_map.keys())
for label in labels:
label_value = [0] * label_count
label_value[label_index_map[label]] = 1.0
label_values.append(label_value)
return label_values
|
{"hexsha": "b7046a48c4ce5b832ce411ff616f3091405fae6f", "size": 2965, "ext": "py", "lang": "Python", "max_stars_repo_path": "EmoPy/EmoPy/src/data_loader.py", "max_stars_repo_name": "Rahmatullina/FinalYearProject", "max_stars_repo_head_hexsha": "326f521b9f600dbbc7ace2223bd5aafc79b2267c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EmoPy/EmoPy/src/data_loader.py", "max_issues_repo_name": "Rahmatullina/FinalYearProject", "max_issues_repo_head_hexsha": "326f521b9f600dbbc7ace2223bd5aafc79b2267c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:09:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:32:30.000Z", "max_forks_repo_path": "EmoPy/EmoPy/src/data_loader.py", "max_forks_repo_name": "Rahmatullina/FinalYearProject", "max_forks_repo_head_hexsha": "326f521b9f600dbbc7ace2223bd5aafc79b2267c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1805555556, "max_line_length": 162, "alphanum_fraction": 0.6795952782, "include": true, "reason": "import numpy", "num_tokens": 636}
|
# -*- coding: utf-8 -*-
# ===============LICENSE_START=======================================================
# Acumos Apache-2.0
# ===================================================================================
# Copyright (C) 2017-2018 AT&T Intellectual Property & Tech Mahindra. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by AT&T and Tech Mahindra
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
"""
Provides wrapped model tests
"""
import io
import sys
import json
import logging
from os.path import join as path_join
from collections import Counter
from operator import eq
from tempfile import TemporaryDirectory
import pytest
import PIL
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from google.protobuf.json_format import MessageToJson, MessageToDict
from acumos.wrapped import _unpack_pb_msg, load_model, _pack_pb_msg
from acumos.modeling import Model, create_dataframe, List, Dict, create_namedtuple, new_type
from acumos.session import _dump_model, _copy_dir, Requirements
from test_pickler import _build_tf_model
from utils import TEST_DIR
_IMG_PATH = path_join(TEST_DIR, 'att.png')
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@pytest.mark.skipif(sys.version_info < (3, 6), reason='Requires python3.6')
def test_py36_namedtuple():
'''Tests to make sure that new syntax for NamedTuple works with wrapping'''
from py36_namedtuple import Input, Output
def adder(data: Input) -> Output:
return Output(data.x + data.y)
_generic_test(adder, Input(1, 2), Output(3))
@pytest.mark.flaky(reruns=5)
def test_wrapped_prim_type():
'''Tests model wrap and load functionality'''
def f1(x: int, y: int) -> int:
return x + y
def f2(x: int, y: int) -> None:
pass
def f3() -> None:
pass
def f4() -> int:
return 3330
def f5(data: bytes) -> str:
'''Something more complex'''
buffer = io.BytesIO(data)
img = PIL.Image.open(buffer)
return img.format
def f6(x: List[int]) -> int:
return sum(x)
def f7(x: List[str]) -> Dict[str, int]:
return Counter(x)
def f8(x: List[np.int32]) -> np.int32:
return np.sum(x)
# input / output "answers"
f1_in = (1, 2)
f1_out = (3, )
f2_in = (1, 2)
f2_out = ()
f3_in = ()
f3_out = ()
f4_in = (0, )
f4_out = (3330, )
with open(_IMG_PATH, 'rb') as f:
f5_in = (f.read(), )
f5_out = ('PNG', )
f6_in = ([1, 2, 3], )
f6_out = (6, )
f7_in = (['a', 'a', 'b'], )
f7_out = ({'a': 2, 'b': 1}, )
f8_in = ([1, 2, 3], )
f8_out = (6, )
for func, in_, out in ((f1, f1_in, f1_out), (f2, f2_in, f2_out), (f3, f3_in, f3_out),
(f4, f4_in, f4_out), (f6, f6_in, f6_out), (f8, f8_in, f8_out)):
_generic_test(func, in_, out)
_generic_test(f5, f5_in, f5_out, reqs=Requirements(req_map={'PIL': 'pillow'}))
_generic_test(f7, f7_in, f7_out, skip=_dict_skips)
@pytest.mark.flaky(reruns=5)
def test_wrapped_nested_type():
'''Tests to make sure that nested NamedTuple messages are unpacked correctly'''
Inner = create_namedtuple('Inner', [('x', int), ('y', int), ('z', int)])
N1 = create_namedtuple('N1', [('dict_data', Dict[str, int])])
N2 = create_namedtuple('N2', [('n1s', List[N1])])
def f1(x: List[Inner]) -> Inner:
'''Returns the component-wise sum of a sequence of Inner'''
sums = np.vstack(x).sum(axis=0)
return Inner(*sums)
def f2(n2_in: N2) -> N2:
'''Returns another N2 type using data from the input N2 type'''
n1_in = n2_in.n1s[0]
dict_data = dict(**n1_in.dict_data) # shallow copy
dict_data['b'] = 2
n1_out = N1(dict_data=dict_data)
n2_out = N2(n1s=[n1_out, n1_out])
return n2_out
f1_in = ([Inner(1, 2, 3), ] * 5, )
f1_out = (5, 10, 15)
n1 = N1(dict_data={'a': 1})
n1_out = N1(dict_data={'a': 1, 'b': 2})
f2_in = N2(n1s=[n1])
f2_out = N2(n1s=[n1_out, n1_out])
_generic_test(f1, f1_in, f1_out)
_generic_test(f2, f2_in, f2_out, skip=_dict_skips)
def _dict_skips(as_, from_):
'''Skips byte and json str output comparison due to odd failures, perhaps related to dict ordering'''
return as_ in {'as_pb_bytes', 'as_json'}
Text = new_type(str, 'Text', {'dcae_input_name': 'a', 'dcae_output_name': 'a'}, 'example description')
Image = new_type(bytes, 'Image', {'dcae_input_name': 'a', 'dcae_output_name': 'a'}, 'example description')
Dictionary = new_type(dict, 'Dictionary', {'dcae_input_name': 'a', 'dcae_output_name': 'a'}, 'example description')
def f1(text: Text) -> Text:
'''Return a raw text'''
return Text(text)
def f2(image: Image) -> Image:
'''Return an image'''
return Image(image)
def f3(dictionary: Dictionary) -> Dictionary:
'''Return a raw dictionary'''
return Dictionary(dictionary)
def f4(image: Image) -> int:
'''Return the size in bytes of the image'''
return len(image)
def f5(x: int, y: int) -> Image:
'''Return an empty image'''
return Image(b"\00" * x * y)
@pytest.mark.parametrize(
["func", "f_in", "f_out", "in_media_type", "out_media_type", "in_is_raw", "out_is_raw"], (
pytest.param(f1, "test string", "test string", ["text/plain"], ["text/plain"], True, True, id="string"),
pytest.param(f2, b'test bytes', b'test bytes', ["application/octet-stream"], ["application/octet-stream"], True, True, id="bytes"),
pytest.param(f3, {'a': 1, 'b': 2}, {'a': 1, 'b': 2}, ["application/json"], ["application/json"], True, True, id="dict"),
pytest.param(f4, b'test bytes', 10, ["application/octet-stream"], ["application/vnd.google.protobuf"], True, False, id="bytes->int"),
pytest.param(f5, (2, 2), b"\00\00\00\00", ["application/vnd.google.protobuf"], ["application/octet-stream"], False, True, id="int->bytes"),
))
def test_raw_type(func, f_in, f_out, in_media_type, out_media_type, in_is_raw, out_is_raw):
'''Tests to make sure that supported raw data type models are working correctly'''
model = Model(transform=func)
model_name = 'my-model'
with TemporaryDirectory() as tdir:
with _dump_model(model, model_name) as dump_dir:
_copy_dir(dump_dir, tdir, model_name)
copied_dump_dir = path_join(tdir, model_name)
metadata_file_path = path_join(copied_dump_dir, 'metadata.json')
with open(metadata_file_path) as metadata_file:
metadata_json = json.load(metadata_file)
assert metadata_json['methods']['transform']['input']['media_type'] == in_media_type
assert metadata_json['methods']['transform']['output']['media_type'] == out_media_type
wrapped_model = load_model(copied_dump_dir)
if in_is_raw:
wrapped_return = wrapped_model.transform.from_raw(f_in)
else:
arguments = model.transform.input_type(*f_in)
arguments_pb_msg = _pack_pb_msg(arguments, wrapped_model.transform._module)
wrapped_return = wrapped_model.transform.from_pb_msg(arguments_pb_msg)
if out_is_raw:
ret = wrapped_return.as_raw()
else:
ret_pb_msg = wrapped_return.as_pb_msg()
ret = _unpack_pb_msg(model.transform.output_type, ret_pb_msg).value
assert ret == f_out
@pytest.mark.flaky(reruns=5)
def test_wrapped_sklearn():
'''Tests model wrap and load functionality'''
iris = load_iris()
X = iris.data
y = iris.target
clf = RandomForestClassifier(random_state=0)
clf.fit(X, y)
yhat = clf.predict(X)
columns = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth']
X_df = pd.DataFrame(X, columns=columns)
IrisDataFrame = create_dataframe('IrisDataFrame', X_df)
def f1(data: IrisDataFrame) -> List[int]:
'''Creates a numpy ndarray and predicts'''
X = np.column_stack(data)
return clf.predict(X)
def f2(data: IrisDataFrame) -> List[int]:
'''Creates a pandas DataFrame and predicts'''
X = np.column_stack(data)
df = pd.DataFrame(X, columns=columns)
return clf.predict(df.values)
in_ = tuple(col for col in X.T)
out = (yhat, )
for func in (f1, f2):
_generic_test(func, in_, out, wrapped_eq=lambda a, b: (a[0] == b[0]).all())
@pytest.mark.flaky(reruns=5)
def test_wrapped_tensorflow():
'''Tests model wrap and load functionality'''
tf.set_random_seed(0)
iris = load_iris()
data = iris.data
target = iris.target
target_onehot = pd.get_dummies(target).values.astype(float)
# =============================================================================
# test with explicit session
# =============================================================================
tf.reset_default_graph()
session = tf.Session()
x, y, prediction = _build_tf_model(session, data, target_onehot)
yhat = session.run([prediction], {x: data})[0]
X_df = pd.DataFrame(data, columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'])
IrisDataFrame = create_dataframe('IrisDataFrame', X_df)
def f1(df: IrisDataFrame) -> List[int]:
'''Tests with explicit session provided'''
X = np.column_stack(df)
return prediction.eval({x: X}, session)
in_ = tuple(col for col in data.T)
out = (yhat, )
_generic_test(f1, in_, out, wrapped_eq=lambda a, b: (a[0] == b[0]).all(), preload=tf.reset_default_graph)
# =============================================================================
# test with implicit default session
# =============================================================================
tf.reset_default_graph()
session = tf.InteractiveSession()
x, y, prediction = _build_tf_model(session, data, target_onehot)
yhat = session.run([prediction], {x: data})[0]
def f2(df: IrisDataFrame) -> List[int]:
'''Tests with implicit default session'''
X = np.column_stack(df)
return prediction.eval({x: X})
in_ = tuple(col for col in data.T)
out = (yhat, )
_generic_test(f2, in_, out, wrapped_eq=lambda a, b: (a[0] == b[0]).all(), preload=tf.reset_default_graph)
def _generic_test(func, in_, out, wrapped_eq=eq, pb_mg_eq=eq, pb_bytes_eq=eq, dict_eq=eq, json_eq=eq, preload=None, reqs=None, skip=None):
'''Reusable wrap test routine with swappable equality functions'''
model = Model(transform=func)
model_name = 'my-model'
with TemporaryDirectory() as tdir:
with _dump_model(model, model_name, reqs) as dump_dir:
_copy_dir(dump_dir, tdir, model_name)
if preload is not None:
preload()
copied_dump_dir = path_join(tdir, model_name)
wrapped_model = load_model(copied_dump_dir)
TransIn = model.transform.input_type
TransOut = model.transform.output_type
trans_in = TransIn(*in_)
trans_out = TransOut(*out)
trans_in_pb = _pack_pb_msg(trans_in, wrapped_model.transform._module)
trans_out_pb = _pack_pb_msg(trans_out, wrapped_model.transform._module)
trans_in_pb_bytes = trans_in_pb.SerializeToString()
trans_out_pb_bytes = trans_out_pb.SerializeToString()
trans_in_dict = MessageToDict(trans_in_pb)
trans_out_dict = MessageToDict(trans_out_pb)
trans_in_json = MessageToJson(trans_in_pb, indent=0)
trans_out_json = MessageToJson(trans_out_pb, indent=0)
# test all from / as combinations
for as_method_name, as_data_expected, eq_func in (('as_wrapped', trans_out, wrapped_eq),
('as_pb_msg', trans_out_pb, pb_mg_eq),
('as_pb_bytes', trans_out_pb_bytes, pb_bytes_eq),
('as_dict', trans_out_dict, dict_eq),
('as_json', trans_out_json, json_eq)):
for from_method_name, from_data in (('from_wrapped', trans_in),
('from_pb_msg', trans_in_pb),
('from_pb_bytes', trans_in_pb_bytes),
('from_dict', trans_in_dict),
('from_json', trans_in_json)):
if skip is not None and skip(as_method_name, from_method_name):
logger.info("Skipping {} -> {}".format(from_method_name, as_method_name))
continue
from_method = getattr(wrapped_model.transform, from_method_name)
resp = from_method(from_data)
as_data_method = getattr(resp, as_method_name)
as_data = as_data_method()
assert eq_func(as_data, as_data_expected)
if __name__ == '__main__':
'''Test area'''
pytest.main([__file__, ])
|
{"hexsha": "5d4db6a51813eec84094b5893f116fb4990559d4", "size": 13795, "ext": "py", "lang": "Python", "max_stars_repo_path": "acumos-package/acumos/tests/test_wrapped.py", "max_stars_repo_name": "acumos/acumos-python-client", "max_stars_repo_head_hexsha": "d4faad0ed3fe6da0c8b0bfb23b548fa9ace546e5", "max_stars_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-07-28T00:01:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T19:30:52.000Z", "max_issues_repo_path": "acumos-package/acumos/tests/test_wrapped.py", "max_issues_repo_name": "acumos/acumos-python-client", "max_issues_repo_head_hexsha": "d4faad0ed3fe6da0c8b0bfb23b548fa9ace546e5", "max_issues_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "acumos-package/acumos/tests/test_wrapped.py", "max_forks_repo_name": "acumos/acumos-python-client", "max_forks_repo_head_hexsha": "d4faad0ed3fe6da0c8b0bfb23b548fa9ace546e5", "max_forks_repo_licenses": ["Apache-2.0", "CC-BY-4.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-07-30T21:48:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T11:01:17.000Z", "avg_line_length": 35.2813299233, "max_line_length": 147, "alphanum_fraction": 0.6006524103, "include": true, "reason": "import numpy", "num_tokens": 3449}
|
module dummy
character(len=65) :: message = "This is a random message"
end module dummy
program test
use dummy
print *, "message ", message
end program test
|
{"hexsha": "53f512c7c094c631091f894920c3762b31495e7f", "size": 165, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/output_tests/global_string.f90", "max_stars_repo_name": "clementval/fc", "max_stars_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/output_tests/global_string.f90", "max_issues_repo_name": "clementval/fc", "max_issues_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/output_tests/global_string.f90", "max_forks_repo_name": "clementval/fc", "max_forks_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.3333333333, "max_line_length": 59, "alphanum_fraction": 0.7151515152, "num_tokens": 42}
|
import random
import numpy as np
from tresearcher.tabular.glob import *
class KfoldIndexer():
def __init__(self, folds, base_df):
self.folds = folds
self.base_df = base_df
self.splits = np.array_split(range(len(base_df)), folds)
def get_indices(self, fold):
return [idx for ary in self.splits[:fold] + self.splits[fold+1:] for idx in ary], self.splits[fold]
def all_indices(self):
return [idx for ary in self.splits[:] for idx in ary]
class SplitIterator():
def __init__(self, base_df, folds, pipeline, x_cols, y_cols):
self.index = 0
self.base_df = base_df
self.folds = folds
self.pipeline = pipeline
self.x_cols = x_cols
self.y_cols = y_cols
self.indexer = KfoldIndexer(folds, base_df)
def __next__(self):
if self.index >= self.folds:
self.index = 0
raise StopIteration
trn_idx, val_idx = self.indexer.get_indices(self.index)
self.index += 1
modified_df, y_scale = self.pipeline.apply(self.base_df, trn_idx)
val = modified_df.iloc[val_idx]
trn = modified_df.iloc[trn_idx]
return trn[self.x_cols], trn[self.y_cols], val[self.x_cols], val[self.y_cols], y_scale
def __iter__(self):
return self
|
{"hexsha": "0357789fd4b68eb1221574d96eb325cba89ac675", "size": 1322, "ext": "py", "lang": "Python", "max_stars_repo_path": "tresearcher/tabular/split.py", "max_stars_repo_name": "Lewington-pitsos/tabularresearcher", "max_stars_repo_head_hexsha": "bb69d15e24a1734e6c7293e00867a25fd5fb99a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tresearcher/tabular/split.py", "max_issues_repo_name": "Lewington-pitsos/tabularresearcher", "max_issues_repo_head_hexsha": "bb69d15e24a1734e6c7293e00867a25fd5fb99a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tresearcher/tabular/split.py", "max_forks_repo_name": "Lewington-pitsos/tabularresearcher", "max_forks_repo_head_hexsha": "bb69d15e24a1734e6c7293e00867a25fd5fb99a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7441860465, "max_line_length": 107, "alphanum_fraction": 0.6293494705, "include": true, "reason": "import numpy", "num_tokens": 341}
|
(** * Validity of Annotations
In this file, we prove all the tools we need to show that a welltyped judgment
of PTS can be annotated in a valid judgement of PTS_{atr}. With this, we give
a validity to the annotation system we chose.*)
Require Import Peano_dec.
Require Import Compare_dec.
Require Import Lt Le Gt Plus Minus.
Require Import base.
Require Import term.
Require Import env red.
Require Import typ_annot.
Require Import List.
Require Import ut_term ut_env ut_red.
Require Import strip.
Module Type glue_mod (X:term_sig) (Y:pts_sig X) (TM:term_mod X) (EM: env_mod X TM) (RM: red_mod X TM)
(UTM:ut_term_mod X) (UEM: ut_env_mod X UTM) (URM: ut_red_mod X UTM).
Import X Y UTM UEM URM TM EM RM.
Include (strip_mod X UTM TM UEM EM).
Include (PTS_ATR_mod X Y TM EM RM).
Open Scope Typ_scope.
(** This lemma gives some partial information about a term, when its types are
not convertible, it will be usefull to prove that annotation in application
are safe.*)
Lemma weak_type_shape : forall Γ M N A, Γ ⊢ M ▹ N : A -> forall P B, Γ ⊢ M ▹ P : B ->
Γ ⊢ A ≡' B \/ (exists U,exists V, Γ ⊢ M ▹ λ[U],V : A /\ Γ ⊢ M ▹ λ[U],V : B) \/
(exists s, Γ ⊢ M ▹ !s : A /\ Γ ⊢ M ▹ !s : B) \/
(exists U, exists V, Γ ⊢ M ▹ Π(U),V : A /\ Γ ⊢ M ▹ Π(U),V : B).
induction 1; intros.
apply pgen_var in H1. destruct H1 as ( _ & A' & ? & ? ).
left. replace A with A'. intuition. eapply fun_item_lift. apply H1. trivial.
(**)
apply pgen_sort in H1. destruct H1 as ( _ & t & ? & ? ).
right; right; left. exists s1; split.
eauto. destruct H2. subst; eauto. eapply typ_pcompat; eauto.
(**)
clear IHtyp1 IHtyp2. apply pgen_pi in H2 as (A'' & B'' & s & t & u & h); decompose [and] h; clear h.
right; right; right. exists A'; exists B'; split. eapply typ_pi; eauto.
destruct H7. subst. apply typ_pi with s t; trivial.
eapply relocate. apply H4. apply H0. eapply relocate. apply H3. apply H1. apply typ_pcompat with !u; intuition.
apply typ_pi with s t; trivial. eapply relocate. apply H4. apply H0. eapply relocate. apply H3. apply H1.
(**)
clear IHtyp1 IHtyp2 IHtyp3.
right; left. apply pgen_la in H3 as (A'' & M'' & D & s & t &u & h); decompose [and] h; clear h.
exists A'; exists M'; split. eapply typ_la. apply H. trivial. apply H1. trivial.
apply typ_pcompat with (Π(A),D). eapply typ_la. apply H3. eapply relocate. apply H5. apply H0.
apply H6. eapply relocate. apply H4. apply H2. trivial.
(**)
apply pgen_app in H4 as (C & C' & D' & N'' & s & t & u & h). decompose [and] h; clear h.
left; trivial.
(**)
apply pgen_app in H7. destruct H7 as (C & C' & D' & N'' & s & t & u & h). decompose [and] h; clear h.
left; trivial.
(**)
destruct (IHtyp1 P B0 H1) as [ | [ | [ | ] ] ]. left; eauto.
destruct H2 as (U & V & ? & ?). right; left. exists U; exists V; split. eapply typ_pcompat with A; eauto. trivial.
destruct H2 as ( t & ? & ?). right; right; left. exists t; split. eapply typ_pcompat with A; eauto. trivial.
destruct H2 as (U & V & ? & ?). right; right; right. exists U; exists V; split. eapply typ_pcompat with A; eauto. trivial.
(**)
destruct (IHtyp1 P B0 H1) as [ | [ | [ | ] ] ]. left; eauto.
destruct H2 as (U & V & ? & ?). right; left. exists U; exists V; split. eapply typ_pcompat with B; eauto. trivial.
destruct H2 as ( t & ? & ?). right; right; left. exists t; split. eapply typ_pcompat with B; eauto. trivial.
destruct H2 as (U & V & ? & ?). right; right; right. exists U; exists V; split. eapply typ_pcompat with B; eauto. trivial.
Qed.
Lemma peq_not_Pi_sort : forall Γ A B S, ~(Γ ⊢ Π(A),B ≡' !S).
intros; intro. apply Confluence in H as (Z & ? & ? &? & ?).
apply Sort_Reds in H0 as ( -> & ?& ?).
apply Pi_Reds in H as ( A' & B' & ? & ? & ? & ? & ? & _ ). discriminate.
Qed.
(** First step to prove the annotation valid: we need to find a path between two
different annotated version of a same "stripped" term. We don't care that [A] is not
equal to [B] here, since this results will mainly be used for types, so both
[A] and [B] will be sorts, which is enough to build an equality.*)
(** This is the main point of proving annotations valid, and we need the full
power of confluence and type exchange to be able to prove it.*)
Lemma ErasedTermConfluence : forall M N Γ A B, strip M = strip N ->
Γ ⊢ M ▹ M : A -> Γ ⊢ N ▹ N : B ->
exists P, Γ ⊢ M ▹▹ P : A /\ Γ ⊢ N ▹▹ P : B.
induction M; intros.
(**)
destruct N; simpl in H; try discriminate. injection H; intros; subst; clear H.
apply red_refl_lt in H1. apply red_refl_lt in H0. exists #v0; intuition.
(**)
destruct N; simpl in H; try discriminate. injection H; intros; subst; clear H.
apply red_refl_lt in H1. apply red_refl_lt in H0. exists !s0; intuition.
(**)
destruct N; simpl in H; try discriminate. injection H; intros; subst; clear H.
rename M1 into P. rename M4 into Q. rename M2 into An. rename M3 into D.
rename N1 into P'. rename N4 into Q'. rename N2 into An'. rename N3 into E.
apply pgen_app in H0 as (C & C1 & D' & Q1 & s1 & t1 & u1 & h). decompose [and] h ; clear h.
apply pgen_app in H1 as (C' & C'1 & E' & Q'1 & s2 & t2 & u2 & h). decompose [and] h ; clear h.
assert (exists PP, Γ ⊢ P ▹ PP : Π(C),D). destruct H8. destruct H8 as (P'' & ? & ? & ?). subst. exists P''; trivial.
destruct H8 as (U0 & K & K' & T & T' & ? & -> & ? & _). exists (La C1 T'). econstructor. subst. apply H. trivial. apply red_refl_lt in H0; apply H0.
trivial.
assert (exists PP', Γ ⊢ P' ▹ PP' : Π(C'),E). destruct H13. destruct H13 as (P'' & ? & ? & ?). subst. exists P''; trivial.
destruct H13 as (U0 & K & K' & T & T' & ? & -> & ? & _ ). exists (La C'1 T'). subst. econstructor. apply H1. trivial.
apply red_refl_lt in H7; apply H7. trivial.
destruct H12 as (PP & ?). destruct H14 as (PP' & ?).
apply red_refl_lt in H5. apply red_refl_lt in H10.
destruct (IHM4 Q' Γ C C' H2 H5 H10) as (RQ & ? & ? ). clear IHM4.
apply red_refl_lt in H12. apply red_refl_lt in H14.
destruct (IHM1 P' Γ (Pi C D) (Pi C' E) H3 H12 H14) as (RP & ? & ? ). clear IHM1. clear IHM2 IHM3.
destruct (weak_type_shape Γ RP RP (Π(C),D)) with (P := RP) (B := Π(C'),E) as [ | [ | [ ] ] ].
apply reds_refl_rt in H17; trivial. apply reds_refl_rt in H18; trivial.
(** Phase 1 : Pi C D == Pi C' E **)
destruct (PiInj Γ C D C' E H19) as (? & ?).
(** 1 / 4 **)
destruct H8. destruct H8 as (? & _ & ? & _ ). subst. clear x. destruct H13. destruct H8 as (? & _ & ? & _ ). clear x. subst.
apply Confluence in H21 as (Z & d1 & d2 & ? & ?). destruct (Confluence Γ C C' H20) as (ZA & d3 & d4 & ? & ?).
rename H8 into HH8. assert (H8 : C::Γ ⊢ D ▹▹ Z : !t1). eapply typ_reds_relocate. apply HH8. apply red_refl_lt in H0; apply H0. clear HH8.
rename H13 into HH13. assert (H13 : C'::Γ ⊢ E ▹▹ Z : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H7; apply H7. clear HH13.
rename H21 into HH21. assert (H21 : Γ ⊢ C ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH21. apply red_refl_lt in H4; apply H4. clear HH21.
rename H22 into HH22. assert (H22 : Γ ⊢ C' ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH22. apply red_refl_lt in H9; apply H9. clear HH22.
clear d1 d2 d3 d4.
exists (App RP ZA Z RQ); split.
apply reds_typ_pcompat with (D [ ← Q]); trivial. eapply reds_App. trivial. trivial. apply H21. apply H8.
apply reds_typ_pcompat with (E [ ← Q']); trivial. eapply reds_App. trivial. trivial. apply H22. apply H13.
(** 2 / 4 **)
destruct H8 as (G0 & G & G' & ? & ? & ? & _ & _ & _ & HH2 & HH1 & HH0). clear x x0. subst.
apply Confluence in H21 as (Z & d1 & d2 & ? & ?). destruct (Confluence Γ C G) as (ZA & d3 & d4 & ? & ?). apply reds_to_conv in HH1. apply reds_to_conv in HH2.
apply typ_peq_trans with C'; intuition. apply typ_peq_trans with G0; intuition.
rename H8 into HH8. assert (H8 : C::Γ ⊢ D ▹▹ Z : !t1). eapply typ_reds_relocate. apply HH8. apply red_refl_lt in H0; apply H0. clear HH8.
rename H13 into HH13. assert (H13 : C'::Γ ⊢ E ▹▹ Z : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H7; apply H7. clear HH13.
rename H21 into HH21. assert (H21 : Γ ⊢ C ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH21. apply red_refl_lt in H4; apply H4. clear HH21.
rename H22 into HH22. assert (H22 : Γ ⊢ G ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH22. apply red_refl_lt in HH0; apply HH0. clear HH22.
clear d1 d2 d3 d4.
assert(HEQ1: Γ ⊢ C' ≡' G). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
assert (HEQ2: Γ ⊢ Π(C'),E ≡' Π(G),E). apply typ_peq_trans with (Π(G0),E). apply typ_peq_sym.
apply reds_to_conv with u2. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
apply reds_to_conv with u2. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
exists (App RP ZA Z RQ); split.
apply reds_typ_pcompat with (D [ ← Q]); trivial. eapply reds_App. trivial. trivial. apply H21. apply H8.
apply reds_typ_pcompat with (E [ ← Q']); trivial. eapply reds_App. apply reds_typ_pcompat with (Π(C'),E); trivial. apply reds_typ_pcompat with C'; trivial.
apply H22. eapply conv_in_env_reds. apply H13. eauto.
(** 3 / 4 **)
destruct H8 as (G0 & G & G' & ? & ? & ? & _ & _ & _ & HH2 & HH1 & HH0). subst. clear x x0. destruct H13.
destruct H8 as ( ? & _ & ? & _ ). subst. clear x.
assert(HEQ1: Γ ⊢ C' ≡' G). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
assert (HEQ3: Γ ⊢ C ≡' G). eauto.
assert (HEQ2: Γ ⊢ Π(C),D ≡' Π(G),D). apply typ_peq_trans with (Π(G0),D). apply typ_peq_sym.
apply reds_to_conv with u1. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
apply reds_to_conv with u1. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
apply Confluence in H21 as (Z & d1 & d2 & ? & ?). destruct (Confluence Γ G C') as (ZA & d3 & d4 & ? & ?). intuition.
rename H8 into HH8. assert (H8 : G::Γ ⊢ D ▹▹ Z : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH8. eauto. eapply conv_in_env.
apply red_refl_lt in H0; apply H0. eauto. clear HH8.
rename H13 into HH13. assert (H13 : C'::Γ ⊢ E ▹▹ Z : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H7; apply H7. clear HH13.
rename H21 into HH21. assert (H21 : Γ ⊢ G ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH21. apply reds_refl_rt in HH2; apply HH2. clear HH21.
rename H22 into HH22. assert (H22 : Γ ⊢ C' ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH22. apply red_refl_lt in H9; apply H9. clear HH22.
clear d1 d2 d3 d4.
exists (App RP ZA Z RQ); split.
apply reds_typ_pcompat with (D [ ← Q]); trivial. eapply reds_App. apply reds_typ_pcompat with (Π(C),D); trivial. apply reds_typ_pcompat with C; trivial. apply H21. apply H8.
apply reds_typ_pcompat with (E [ ← Q']); trivial. eapply reds_App. trivial. trivial. apply H22. apply H13.
(** 4 / 4 **)
destruct H8 as (F0 & F & F' & ? & ? & ? & _ & _ & _ & HH5 & HH4 & HH3). subst. clear x x0.
assert (HEQ1 : Γ ⊢ G ≡' C). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
assert (HEQ2 : Γ ⊢ F ≡' C'). apply reds_to_conv in HH4. apply reds_to_conv in HH5. eauto.
assert (HEQ3 : Γ ⊢ G ≡' F). eauto.
assert (HEQ4: Γ ⊢ Π(C),D ≡' Π(G),D). apply typ_peq_trans with (Π(G0),D). apply typ_peq_sym.
apply reds_to_conv with u1. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
apply reds_to_conv with u1. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
assert (HEQ5: Γ ⊢ Π(C'),E ≡' Π(F),E). apply typ_peq_trans with (Π(F0),E). apply typ_peq_sym.
apply reds_to_conv with u2. eapply reds_Pi. apply HH4. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
apply reds_to_conv with u2. eapply reds_Pi. apply HH5. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
apply Confluence in H21 as (Z & d1 & d2 & ? & ?). destruct (Confluence Γ G F HEQ3) as (ZA & d3 & d4 & ? & ?).
rename H8 into HH8. assert (H8 : G::Γ ⊢ D ▹▹ Z : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH8. eauto. eapply conv_in_env.
apply red_refl_lt in H0; apply H0. eauto. clear HH8.
rename H13 into HH13. assert (H13 : F::Γ ⊢ E ▹▹ Z : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto. eapply conv_in_env.
apply red_refl_lt in H7; apply H7. eauto. clear HH13.
rename H21 into HH21. assert (H21 : Γ ⊢ G ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH21. apply reds_refl_rt in HH2; apply HH2. clear HH21.
rename H22 into HH22. assert (H22 : Γ ⊢ F ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH22. apply reds_refl_rt in HH5; apply HH5. clear HH22.
clear d1 d2 d3 d4.
exists (App RP ZA Z RQ); split.
apply reds_typ_pcompat with (D [ ← Q]); trivial. eapply reds_App. apply reds_typ_pcompat with (Π(C),D); trivial. apply reds_typ_pcompat with C; intuition. apply H21. apply H8.
apply reds_typ_pcompat with (E [ ← Q']); trivial. eapply reds_App. apply reds_typ_pcompat with (Π(C'),E); trivial. apply reds_typ_pcompat with C'; intuition. apply H22. apply H13.
(** Phase 2 : RP -> λ **)
destruct H19 as (U & V & ? & ?).
destruct (pgen_la Γ U V (λ[U],V) (Π(C),D)) as (U' & V' & K & s & t & u & h).
apply red_refl_rt in H19; trivial. decompose [and] h; clear h. injection H25; intros; subst; clear H25.
destruct (pgen_la Γ U' V' (λ[U'],V') (Π(C'),E)) as (U'' & V'' & L & s' & t' & u' & h).
apply red_refl_rt in H20; trivial. decompose [and] h; clear h. injection H30; intros; subst; clear H30.
destruct (PiInj Γ U'' K C D H27) as ( ? & ?). destruct (PiInj Γ U'' L C' E H32) as ( ? & ?). exists (V''[← RQ]).
(** 1 / 4 **)
destruct H8. destruct H8 as (? & _ & ? & _ ). subst. clear x. destruct H13. destruct H8 as (? & _ & ? & _ ). clear x. subst.
split. apply reds_typ_pcompat with (D [ ← Q]); trivial.
destruct (Confluence (U''::Γ) K D H31) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' C H30) as (ZA & c & d & ? & ?).
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s). eapply typ_reds_relocate. apply HH35. apply H23. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ C ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH36. apply red_refl_lt in H4; apply H4. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ K ▹▹ ZB : !t). eapply typ_reds_relocate. apply HH8. apply H24. clear HH8.
rename H13 into HH13. assert (H13 : C::Γ ⊢ D ▹▹ ZB : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H0; apply H0. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. apply H17. constructor. apply H19. constructor; apply red_refl_lt in H5; apply H5. constructor; apply red_refl_lt in H4; apply H4.
constructor. apply red_refl_lt in H0; apply H0. eapply typ_reds_trans. eapply reds_App. constructor; apply red_refl_rt in H19; apply H19. apply H15. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H21. apply H23. eapply reds_refl_rt. apply H35. constructor. apply H23.
trivial. apply reds_refl_rt in H8; apply H8. apply typ_pcompat with K. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; trivial. eauto.
apply typ_peq_sym. apply reds_to_conv with t1. change !t1 with (!t1[← Q]). eapply reds_subst_gen. apply H13.
apply reds_typ_pcompat with C; eauto. apply reds_typ_pcompat with (E [ ← Q']); trivial.
destruct (Confluence (U''::Γ) L E H34) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' C' H33) as (ZA & c & d & ? & ?).
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s'). eapply typ_reds_relocate. apply HH35. apply H28. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ C' ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH36. apply red_refl_lt in H9; apply H9. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ L ▹▹ ZB : !t'). eapply typ_reds_relocate. apply HH8. apply H29. clear HH8.
rename H13 into HH13. assert (H13 : C'::Γ ⊢ E ▹▹ ZB : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H7; apply H7. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. apply H18. constructor. apply H20. constructor; apply red_refl_lt in H10; apply H10.
constructor; apply red_refl_lt in H9; apply H9. constructor; apply red_refl_lt in H7; apply H7.
eapply typ_reds_trans. eapply reds_App. constructor; apply red_refl_rt in H20; apply H20. apply H16. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H25. apply H28. eapply reds_refl_rt. apply H35. constructor; apply H28.
trivial. eapply reds_refl_rt. apply H8. apply typ_pcompat with L. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; apply H15. eauto.
apply typ_peq_sym. apply reds_to_conv with t2. change !t2 with (!t2[← Q']). eapply reds_subst_gen. apply H13. apply reds_typ_pcompat with C'; eauto.
(** 2 / 4 **)
destruct H8 as (G0 & G & G' & ? & ? & ? & _ & _ & _ & HH2 & HH1 & HH0). clear x x0. subst. split.
apply reds_typ_pcompat with (D [ ← Q]); trivial.
destruct (Confluence (U''::Γ) K D H31) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' C H30) as (ZA & c & d & ? & ?).
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s). eapply typ_reds_relocate. apply HH35. apply H23. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ C ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH36. apply red_refl_lt in H4; apply H4. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ K ▹▹ ZB : !t). eapply typ_reds_relocate. apply HH8. apply H24. clear HH8.
rename H13 into HH13. assert (H13 : C::Γ ⊢ D ▹▹ ZB : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H0; apply H0. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. apply H17. constructor. apply H19. constructor; apply red_refl_lt in H5; apply H5. constructor; apply red_refl_lt in H4; apply H4.
constructor. apply red_refl_lt in H0; apply H0. eapply typ_reds_trans. eapply reds_App. constructor; apply red_refl_rt in H19; apply H19. apply H15. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H21. apply H23. eapply reds_refl_rt. apply H35. constructor. apply H23.
trivial. apply reds_refl_rt in H8; apply H8. apply typ_pcompat with K. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; trivial. eauto.
apply typ_peq_sym. apply reds_to_conv with t1. change !t1 with (!t1[← Q]). eapply reds_subst_gen. apply H13.
apply reds_typ_pcompat with C; eauto. apply reds_typ_pcompat with (E [ ← Q']); trivial.
destruct (Confluence (U''::Γ) L E H34) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' G) as (ZA & c & d & ? & ?).
apply typ_peq_trans with C'; trivial. apply typ_peq_trans with G0. apply reds_to_conv in HH1; eauto. apply reds_to_conv in HH2; eauto.
assert(HEQ1: Γ ⊢ C' ≡' G). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s'). eapply typ_reds_relocate. apply HH35. apply H28. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ G ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH36. apply reds_refl_rt in HH2; apply HH2. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ L ▹▹ ZB : !t'). eapply typ_reds_relocate. apply HH8. apply H29. clear HH8.
rename H13 into HH13. assert (H13 : G::Γ ⊢ E ▹▹ ZB : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto. eapply conv_in_env.
apply red_refl_lt in H7; apply H7. eauto. clear HH13.
clear a b c d.
assert (HEQ2: Γ ⊢ Π(C'),E ≡' Π(G),E). apply typ_peq_trans with (Π(G0),E). apply typ_peq_sym.
apply reds_to_conv with u2. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
apply reds_to_conv with u2. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. eapply reds_typ_pcompat with (Π(C'),E). apply H18. trivial. constructor. apply typ_pcompat with (Π(C'),E). apply H20. trivial.
apply reds_typ_pcompat with C'; trivial. constructor; apply red_refl_lt in H10; apply H10.
constructor; apply reds_refl_rt in HH2; apply HH2. eapply conv_in_env_reds. constructor; apply red_refl_lt in H7; apply H7. eauto.
eapply typ_reds_trans. eapply reds_App. apply reds_typ_pcompat with (Π(C'),E); trivial. constructor; apply red_refl_rt in H20; apply H20. apply reds_typ_pcompat with C'; trivial. apply H16. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H25. apply H28. eapply reds_refl_rt. apply H35. constructor; apply H28.
trivial. eapply reds_refl_rt. apply H8. apply typ_pcompat with L. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; apply H15. eauto.
apply typ_peq_sym. apply reds_to_conv with t2. change !t2 with (!t2[← Q']). eapply reds_subst_gen. apply H13. apply reds_typ_pcompat with C'; eauto.
(** 3 / 4 **)
destruct H8 as (G0 & G & G' & ? & ? & ? & _ & _ & _ & HH2 & HH1 & HH0). subst. clear x x0. destruct H13.
destruct H8 as ( ? & _ & ? & _ ). subst. clear x. split.
apply reds_typ_pcompat with (D [ ← Q]); trivial.
assert(HEQ1: Γ ⊢ C ≡' G). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
assert (HEQ2: Γ ⊢ Π(C),D ≡' Π(G),D). apply typ_peq_trans with (Π(G0),D). apply typ_peq_sym.
apply reds_to_conv with u1. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
apply reds_to_conv with u1. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
assert (HEQ3: Γ ⊢ C' ≡' G). eauto.
destruct (Confluence (U''::Γ) K D H31) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' G ) as (ZA & c & d & ? & ?). eauto.
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s). eapply typ_reds_relocate. apply HH35. apply H23. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ G ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH36. apply reds_refl_rt in HH2; apply HH2. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ K ▹▹ ZB : !t). eapply typ_reds_relocate. apply HH8. apply H24. clear HH8.
rename H13 into HH13. assert (H13 : G::Γ ⊢ D ▹▹ ZB : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto. eapply conv_in_env.
apply red_refl_lt in H0; apply H0. eauto. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. apply reds_typ_pcompat with (Π(C),D); trivial. apply H17. apply reds_typ_pcompat with (Π(C),D); trivial. constructor. apply H19. apply reds_typ_pcompat with C; trivial.
constructor; apply red_refl_lt in H5; apply H5. constructor; apply reds_refl_rt in HH2; apply HH2.
constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. eapply typ_reds_trans. eapply reds_App. apply reds_typ_pcompat with (Π(C),D);trivial.
constructor; apply red_refl_rt in H19; apply H19. apply reds_typ_pcompat with C; trivial. apply H15. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H21. apply H23. eapply reds_refl_rt. apply H35. constructor. apply H23.
trivial. apply reds_refl_rt in H8; apply H8. apply typ_pcompat with K. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; trivial. eauto.
apply typ_peq_sym. apply reds_to_conv with t1. change !t1 with (!t1[← Q]). eapply reds_subst_gen. apply H13.
apply reds_typ_pcompat with C; eauto. apply reds_typ_pcompat with (E [ ← Q']); trivial.
destruct (Confluence (U''::Γ) L E H34) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' C' H33) as (ZA & c & d & ? & ?).
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s'). eapply typ_reds_relocate. apply HH35. apply H28. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ C' ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH36. apply red_refl_lt in H9; apply H9. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ L ▹▹ ZB : !t'). eapply typ_reds_relocate. apply HH8. apply H29. clear HH8.
rename H13 into HH13. assert (H13 : C'::Γ ⊢ E ▹▹ ZB : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto.
apply red_refl_lt in H7; apply H7. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. eapply typ_reds_trans. apply H18. constructor. apply H20. constructor; apply red_refl_lt in H10; apply H10.
constructor; apply red_refl_lt in H9; apply H9. constructor; apply red_refl_lt in H7; apply H7.
eapply typ_reds_trans. eapply reds_App. constructor; apply red_refl_rt in H20; apply H20. apply H16. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H25. apply H28. eapply reds_refl_rt. apply H35. constructor; apply H28.
trivial. eapply reds_refl_rt. apply H8. apply typ_pcompat with L. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; apply H15. eauto.
apply typ_peq_sym. apply reds_to_conv with t2. change !t2 with (!t2[← Q']). eapply reds_subst_gen. apply H13. apply reds_typ_pcompat with C'; eauto.
(** 4 / 4 **)
destruct H8 as (F0 & F & F' & ? & ? & ? & _ & _ & _ & HH5 & HH4 & HH3). subst. clear x x0.
assert (HEQ1 : Γ ⊢ G ≡' C). apply reds_to_conv in HH1. apply reds_to_conv in HH2. eauto.
assert (HEQ2 : Γ ⊢ F ≡' C'). apply reds_to_conv in HH4. apply reds_to_conv in HH5. eauto.
assert (HEQ3 : Γ ⊢ G ≡' F). apply typ_peq_trans with U''. eauto. eauto.
assert (HEQ4: Γ ⊢ Π(C),D ≡' Π(G),D). apply typ_peq_trans with (Π(G0),D). apply typ_peq_sym.
apply reds_to_conv with u1. eapply reds_Pi. apply HH1. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
apply reds_to_conv with u1. eapply reds_Pi. apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. trivial.
assert (HEQ5: Γ ⊢ Π(C'),E ≡' Π(F),E). apply typ_peq_trans with (Π(F0),E). apply typ_peq_sym.
apply reds_to_conv with u2. eapply reds_Pi. apply HH4. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
apply reds_to_conv with u2. eapply reds_Pi. apply HH5. constructor. eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto. trivial.
split. apply reds_typ_pcompat with (D [ ← Q]); trivial.
destruct (Confluence (U''::Γ) K D H31) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' G ) as (ZA & c & d & ? & ?). eauto.
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s). eapply typ_reds_relocate. apply HH35. apply H23. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ G ▹▹ ZA : !s1). eapply typ_reds_relocate. apply HH36. apply reds_refl_rt in HH2; apply HH2. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ K ▹▹ ZB : !t). eapply typ_reds_relocate. apply HH8. apply H24. clear HH8.
rename H13 into HH13. assert (H13 : G::Γ ⊢ D ▹▹ ZB : !t1). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto. eapply conv_in_env.
apply red_refl_lt in H0; apply H0. eauto. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. apply reds_typ_pcompat with (Π(C),D); intuition. eapply typ_reds_trans. apply H17. constructor. apply H19. apply reds_typ_pcompat with C; intuition. constructor; apply red_refl_lt in H5; apply H5.
constructor; apply reds_refl_rt in HH2; apply HH2. constructor. eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto. eapply typ_reds_trans. eapply reds_App.
apply reds_typ_pcompat with (Π(C),D). constructor; apply red_refl_rt in H19; apply H19. intuition. apply reds_typ_pcompat with C; intuition. apply H15. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H21. apply H23. eapply reds_refl_rt. apply H35. constructor. apply H23.
trivial. apply reds_refl_rt in H8; apply H8. apply typ_pcompat with K. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; trivial. eauto.
apply typ_peq_sym. apply reds_to_conv with t1. change !t1 with (!t1[← Q]). eapply reds_subst_gen. apply H13.
apply reds_typ_pcompat with C; eauto. apply reds_typ_pcompat with (E [ ← Q']); trivial.
destruct (Confluence (U''::Γ) L E H34) as (ZB & a & b & ? & ?). destruct (Confluence Γ U'' F ) as (ZA & c & d & ? & ?). eauto.
rename H35 into HH35. assert (H35: Γ ⊢ U'' ▹▹ ZA : !s'). eapply typ_reds_relocate. apply HH35. apply H28. clear HH35.
rename H36 into HH36. assert (H36: Γ ⊢ F ▹▹ ZA : !s2). eapply typ_reds_relocate. apply HH36. apply reds_refl_rt in HH5; apply HH5. clear HH36.
rename H8 into HH8. assert (H8 : U''::Γ ⊢ L ▹▹ ZB : !t'). eapply typ_reds_relocate. apply HH8. apply H29. clear HH8.
rename H13 into HH13. assert (H13 : F::Γ ⊢ E ▹▹ ZB : !t2). eapply typ_reds_relocate. eapply conv_in_env_reds. apply HH13. eauto. eapply conv_in_env.
apply red_refl_lt in H7; apply H7. eauto. clear HH13.
clear a b c d.
eapply typ_reds_trans. eapply reds_App. apply reds_typ_pcompat with (Π(C'),E). eapply typ_reds_trans. apply H18. constructor. apply H20. intuition. apply reds_typ_pcompat with C'; intuition. constructor; apply red_refl_lt in H10; apply H10.
constructor; apply reds_refl_rt in HH5; apply HH5. eapply conv_in_env_reds. constructor; apply red_refl_lt in H7; apply H7. eauto.
eapply typ_reds_trans. eapply reds_App. apply reds_typ_pcompat with (Π(C'),E). constructor; apply red_refl_rt in H20; apply H20. intuition. apply reds_typ_pcompat with C'; intuition. apply H16. apply H36. apply H13. constructor.
apply typ_pcompat with (ZB [← RQ]). eapply typ_beta. apply H25. apply H28. eapply reds_refl_rt. apply H35. constructor; apply H28.
trivial. eapply reds_refl_rt. apply H8. apply typ_pcompat with L. trivial. eapply reds_to_conv; apply H8. apply typ_pcompat with C. apply reds_refl_rt in H15; apply H15. eauto.
apply typ_peq_sym. apply reds_to_conv with t2. change !t2 with (!t2[← Q']). eapply reds_subst_gen. apply H13. apply reds_typ_pcompat with C'; eauto.
(** impossible cases **)
destruct H19 as (z & ? & ?). apply red_refl_rt in H19. apply pgen_sort in H19 as ( ? & ? & ? & ?).
destruct H22. discriminate. apply peq_not_Pi_sort in H22; elim H22.
destruct H19 as (U & V & ? & ?). apply red_refl_rt in H19. apply pgen_pi in H19 as ( U' & V' & s & t & u & h).
decompose [and] h; clear h. destruct H25. discriminate. apply peq_not_Pi_sort in H24; elim H24.
(**)
destruct N; simpl in H; try discriminate. injection H; intros; subst; clear H.
apply pgen_pi in H0. destruct H0 as (M1' & M2' & s1 & t1 & u1 & h). decompose [and] h; clear h.
apply pgen_pi in H1. destruct H1 as (N1' & N2' & s2 & t2 & u2 & h). decompose [and] h; clear h.
apply red_refl_lt in H4. apply red_refl_lt in H8.
destruct (IHM1 N1 Γ !s1 !s2 H3 H4 H8) as (R1 & ? & ?).
assert (Γ ⊢ M1 ≡' N1). apply typ_peq_trans with R1. apply reds_to_conv in H10; trivial. apply reds_to_conv in H12; intuition.
apply red_refl_lt in H0.
destruct (IHM2 N2 (M1::Γ) !t1 !t2 H2 H0 ) as (R2 & ? & ?).
eapply conv_in_env. apply red_refl_lt in H6; apply H6.
eauto. exists (Pi R1 R2); split.
destruct H7. subst. eapply reds_Pi. apply H10. apply H14. trivial.
apply reds_typ_pcompat with !u1. eapply reds_Pi. apply H10. apply H14. trivial. intuition.
destruct H11. subst. eapply reds_Pi. apply H12. eapply conv_in_env_reds. apply H15. eauto. trivial.
apply reds_typ_pcompat with !u2. eapply reds_Pi. apply H12. eapply conv_in_env_reds. apply H15. eauto. trivial. intuition.
(**)
destruct N; simpl in H; try discriminate. injection H; intros; subst; clear H.
apply pgen_la in H0. destruct H0 as (M1' & M2' & D & s1 & t1 & u1 & h). decompose [and] h; clear h.
apply pgen_la in H1. destruct H1 as (N1' & N2' & D2 & s2 & t2 & u2 & h). decompose [and] h; clear h.
apply red_refl_lt in H4. apply red_refl_lt in H9.
destruct (IHM1 N1 Γ !s1 !s2 H3 H4 H9) as (R1 & ? & ? ).
assert (Γ ⊢ M1 ≡' N1). apply typ_peq_trans with R1. apply reds_to_conv in H12; trivial. apply reds_to_conv in H14; intuition.
destruct (IHM2 N2 (M1::Γ) D D2 H2) as (R2 & ? & ? ); trivial.
eapply conv_in_env. apply red_refl_lt in H0; apply H0. eauto.
eapply conv_in_env. apply red_refl_lt in H7; apply H7. eauto.
exists (La R1 R2); split.
apply reds_typ_pcompat with (Π (M1), D); trivial. eapply reds_La. apply H12. trivial. apply H5. apply H.
apply reds_typ_pcompat with (Π (N1), D2); trivial. eapply reds_La. apply H14. eapply conv_in_env_reds. apply H17. eauto.
apply H10. apply H1.
Qed.
(** Next step: if there is a path for any term, there is path for types, so two
version of a same "stripped" types are equivalent in PTS_{atr}.*)
Lemma ErasedTypeConversion : forall A B Γ s t, strip A = strip B -> Γ ⊢ A ▹ A : !s -> Γ ⊢ B ▹ B : !t ->
Γ ⊢ A ≡' B.
intros.
destruct (ErasedTermConfluence A B Γ !s !t H H0 H1) as ( P & ?& ?).
apply typ_peq_trans with P. apply reds_to_conv in H2; trivial. apply reds_to_conv in H3; intuition.
Qed.
(** And if it's true for types, its true for list of types, so for context.*)
Lemma L43_ : forall Γ Γ', strip_env Γ = strip_env Γ' -> wf Γ -> wf Γ' -> env_conv Γ Γ'.
induction Γ; intros. destruct Γ'; simpl in H.
intuition. discriminate. destruct Γ'; simpl in *. discriminate.
injection H; intros; subst; clear H. eapply c_trans with (a::Γ').
apply env_conv_cons. inversion H0; subst; clear H0. econstructor. apply red_refl_lt in H4; apply H4.
inversion H1; inversion H0; subst; clear H0 H1. apply wf_from_typ in H4. apply wf_from_typ in H7. intuition.
apply env_conv_cons. inversion H1; inversion H0; subst; clear H0 H1.
eapply ErasedTypeConversion. trivial. eapply conv_in_env.
apply red_refl_lt in H7; apply H7.
apply IHΓ. trivial. apply wf_from_typ in H7; trivial.
apply wf_from_typ in H4; trivial. apply red_refl_lt in H4; apply H4.
inversion H1; inversion H0; subst; clear H0 H1.
apply wf_from_typ in H4. apply wf_from_typ in H7. intuition.
Qed.
(** Now that we know that two versions of a "stripped" context are equivalent,
we can chose to use both version to build our judgment. This well be usefull to
glue together the intermediate steps of the annotations process.*)
Lemma ErasedContextSwitch : (forall Γ M N T, Γ ⊢ M ▹ N : T -> forall Γ', wf Γ' -> strip_env Γ = strip_env Γ' -> Γ' ⊢ M ▹ N : T) /\
(forall Γ M N T, Γ ⊢ M ▹▹ N : T -> forall Γ', wf Γ' -> strip_env Γ = strip_env Γ' -> Γ' ⊢ M ▹▹ N : T) /\
(forall Γ, Γ ⊣ -> True).
apply typ_induc; simpl; intros; trivial.
(**)
apply conv_in_env with Γ. intuition. apply L43_; trivial.
(**)
intuition.
(**)
apply typ_pi with s1 s2; intuition. eapply H0. econstructor. eapply H.
trivial. trivial. simpl; rewrite H2; trivial.
(**)
apply typ_la with s1 s2 s3; intuition. eapply H0. econstructor. eapply H.
trivial. trivial. simpl; rewrite H3; trivial. eapply H1. econstructor. eapply H.
trivial. trivial. simpl; rewrite H3; trivial.
(**)
eapply typ_app. apply r. eapply H; eauto. eapply H0; eauto. simpl. rewrite H4; intuition.
eapply H1; eauto. eapply H2; eauto.
(**)
eapply typ_beta. apply r. eapply H; eauto. eapply H0; eauto. eapply H1; eauto.
eapply H2; eauto. eapply H3; eauto. simpl; rewrite H7; trivial. eapply H4; eauto.
simpl; rewrite H7; trivial. eapply H5; eauto.
(**)
eauto.
(**)
eauto.
(* reds *)
eauto. eauto.
Qed.
(** Some property of the stripping process and reduction .*)
Lemma L33 : forall M N', URM.Beta (strip M) N' ->exists N, strip N = N' /\ Beta M N.
induction M; intros; simpl in *.
inversion H. inversion H.
inversion H; subst; clear H. destruct M1; simpl in *; try discriminate.
injection H1; intros; subst; clear H1.
exists ( M1_2 [ ← M4]); split. rewrite strip_subst. trivial. eauto.
destruct (IHM1 M' H3) as (n & ? & ?). exists (App n M2 M3 M4); split. simpl. subst. trivial.
intuition. destruct (IHM4 N'0 H3) as (n & ? & ?). exists (App M1 M2 M3 n); split. simpl. subst. trivial.
intuition. inversion H; subst; clear H.
destruct (IHM2 B' H3) as (n & ? & ?). exists (Pi M1 n); split. simpl. subst. trivial.
intuition. destruct (IHM1 A' H3) as (n & ? & ?). exists (Pi n M2); split. simpl. subst. trivial.
intuition. inversion H; subst; clear H.
destruct (IHM2 M' H3) as (n & ? & ?). exists (La M1 n); split. simpl. subst. trivial.
intuition. destruct (IHM1 A' H3) as (n & ? & ?). exists (La n M2); split. simpl. subst. trivial.
intuition.
Qed.
Lemma L33' : forall M N', URM.Betas (strip M) N' ->exists N, strip N = N' /\ Betas M N.
intros. remember (strip M) as MM. revert M HeqMM. induction H; intros; subst.
exists M0; intuition. apply L33 in H as (N0 & ? & ?).
exists N0; intuition. destruct (IHBetas1 M0 ) as (N0 & ? & ?); trivial.
destruct (IHBetas2 N0) as (NN & ?& ?). intuition.
exists NN; intuition. eauto.
Qed.
End glue_mod.
|
{"author": "coq-contribs", "repo": "ptsatr", "sha": "e57ad4552055340ea97bc6a2c61b837c56c11a7d", "save_path": "github-repos/coq/coq-contribs-ptsatr", "path": "github-repos/coq/coq-contribs-ptsatr/ptsatr-e57ad4552055340ea97bc6a2c61b837c56c11a7d/glue.v"}
|
Theorem ex1: forall p q r : Prop,
(p -> q) -> (((p -> r) -> q) -> q).
Proof.
Require Import Classical.
intros. apply NNPP. intro.
apply H1. apply H0. intro.
apply NNPP. intro. apply H1.
apply (H H2).
Qed.
Theorem ex2: forall p q r s : Prop,
((p -> q) -> r) -> ((r -> p) -> (s -> p)).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H2.
apply H0. apply H. intro. contradiction.
Qed.
Theorem ex3: forall p q r s : Prop,
((p -> r) -> (s -> p)) ->
(((r -> q) -> p) -> (s -> p)).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H2.
apply H0. intro. apply NNPP. intro. apply H2.
apply H. intro. assumption. assumption.
Qed.
Theorem ex4: forall p q r s : Prop,
((r -> q) -> (s -> p)) ->
((r -> p) -> (s -> p)).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H2.
apply H. intro. apply NNPP. intro.
apply H2. apply (H0 H3).
assumption.
Qed.
Theorem ex5: forall p q r s : Prop,
(((r -> p) -> p) -> (s -> p))
-> (((p -> q) -> r) -> (s -> p)).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H2.
apply H. intro. apply H3. apply H0.
intro. contradiction. assumption.
Qed.
Theorem ex6: forall p q r : Prop,
(((p -> r) -> q) -> q) ->
((q -> r) -> (p -> r)).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H2.
apply H0. apply H. intro.
pose proof (H3 H1) as H4. contradiction.
Qed.
Theorem ex7: forall p s : Prop,
((p -> s) -> p) -> ((s -> p) -> p) -> p.
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H1. apply H0.
intro. apply H. intro. (*contradiction.*)
assumption.
Qed.
Theorem ex8: forall a b c : Prop,
((a -> b) -> c) -> ((a -> c) -> c).
Proof.
Require Import Classical.
intros. apply NNPP. intro. apply H1.
apply H. intro. apply NNPP. intro. apply H1.
apply (H0 H2).
Qed.
|
{"author": "limitedeternity", "repo": "PrPr-Labs", "sha": "0c83eb2dbf0c8b15e558ed7586d5e39e18a51c62", "save_path": "github-repos/coq/limitedeternity-PrPr-Labs", "path": "github-repos/coq/limitedeternity-PrPr-Labs/PrPr-Labs-0c83eb2dbf0c8b15e558ed7586d5e39e18a51c62/PrPr-01/Ex2_Church.v"}
|
\chapter{Platform-Level Interrupt Controller (PLIC)}
\label{plic}
This chapter describes the general architecture for the RISC-V
platform-level interrupt controller (PLIC), which prioritizes and
distributes global interrupts in a RISC-V system.
\section{PLIC Overview}
Figure~\ref{fig:plic} provides a quick overview of PLIC operation.
The PLIC connects global {\em interrupt sources}, which are usually
I/O devices, to {\em interrupt targets}, which are usually {\em hart
contexts}. The PLIC contains multiple {\em interrupt gateways}, one
per interrupt source, together with a {\em PLIC core} that performs
interrupt prioritization and routing. Global interrupts are sent from
their source to an {\em interrupt gateway} that processes the
interrupt signal from each source and sends a single {\em interrupt
request} to the PLIC core, which latches these in the core interrupt
pending bits (IP). Each interrupt source is assigned a separate
priority. The PLIC core contains a matrix of interrupt enable (IE)
bits to select the interrupts that are enabled for each target. The
PLIC core forwards an {\em interrupt notification} to one or more
targets if the targets have any pending interrupts enabled, and the
priority of the pending interrupts exceeds a per-target threshold.
When the target takes the external interrupt, it sends an {\em
interrupt claim} request to retrieve the identifier of the
highest-priority global interrupt source pending for that target from
the PLIC core, which then clears the corresponding interrupt source
pending bit. After the target has serviced the interrupt, it sends
the associated interrupt gateway an {\em interrupt completion} message
and the interrupt gateway can now forward another interrupt request
for the same source to the PLIC. The rest of this chapter describes
each of these components in detail, though many details are
necessarily platform specific.
\begin{figure}[tb]
\centering
\includegraphics[width=\textwidth]{figs/PLIC-block-diagram.pdf}
\caption{Platform-Level Interrupt Controller (PLIC) conceptual block
diagram. The figure shows the first two of potentially many
interrupt sources, and the first two of potentially many interrupt
targets. The figure is just intended to show the logic of the
PLIC's operation, not to represent a realistic implementation
strategy.}
\label{fig:plic}
\end{figure}
\section{Interrupt Sources}
RISC-V harts can have both local and global interrupt sources. Only
global interrupt sources are handled by the PLIC.
\subsection{Local Interrupt Sources}
Each hart has a number of {\em local interrupt sources} that do not
pass through the PLIC, including the standard software interrupts and
timer interrupts for each privilege level. Local interrupts can be
serviced quickly since there will be minimal latency between the
source and the servicing hart, no arbitration is required to determine
which hart will service the request, and the servicing hart can
quickly determine the interrupt source using the {\tt mcause}
register.
All local interrupts follow a level-based model, where an interrupt is
pending if the corresponding bit in {\tt mip} is set. The interrupt
handler must clear the hardware condition that is causing the {\tt
mip} bit to be set to avoid retaking the interrupt after re-enabling
interrupts on exit from the interrupt handler.
Additional non-standard local interrupt sources can be made visible to
machine-mode by adding them to the high bits of the {\tt mip}/{\tt
mie} registers, with corresponding additional cause values returned
in the {\tt mcause} register. These additional non-standard local
interrupts may also be made visible to lower privilege levels, using
the corresponding bits in the {\tt mideleg} register. The priority of
non-standard local interrupt sources relative to external, timer, and
software interrupts is platform-specific.
\subsection{Global Interrupt Sources}
{\em Global interrupt sources} are those that are prioritized and
distributed by the PLIC. Depending on the platform-specific PLIC
implementation, any global interrupt source could be routed to any
hart context.
Global interrupt sources can take many forms, including
level-triggered, edge-triggered, and message-signalled. Some sources
might queue up a number of interrupt requests. All global interrupt
sources are converted to a common interrupt request format for the
PLIC.
\section{Interrupt Targets and Hart Contexts}
Interrupt targets are usually hart contexts, where a hart context is a
given privilege mode on a given hart (though there are other possible
interrupt targets, such as DMA engines). Not all hart contexts need
be interrupt targets, in particular, if a processor core does not
support delegating external interrupts to lower-privilege modes, then
the lower-privilege hart contexts will not be interrupt targets.
Interrupt notifications generated by the PLIC appear in the {\tt
meip}/{\tt seip}/{\tt ueip} bits of the {\tt mip}/{\tt sip}/{\tt
uip} registers for M/S/U modes, respectively. For the notifications
to appear in lower-privilege {\em x}{\tt ip} registers, the corresponding
external interrupts must have been delegated in the higher-privilege
{\em y}{\tt ideleg} registers.
Each processor core must define a policy on how simultaneous active
interrupts are taken by multiple hart contexts on the core. For the
simple case of a single stack of hart contexts, one for each supported
privileged mode, interrupts for higher-privilege contexts can preempt
execution of interrupt handlers for lower-privilege contexts. A
multithreaded processor core could run multiple independent interrupt
handlers on different hart contexts at the same time. A processor
core could also provide hart contexts that are only used for interrupt
handling to reduce interrupt service latency, and these might preempt
interrupt handlers for other harts on the same core.
The PLIC treats each interrupt target independently and does not take
into account any interrupt prioritization scheme used by a component
that contains multiple interrupt targets. As a result, the PLIC
provides no concept of interrupt preemption or nesting so this must be
handled by the cores hosting multiple interrupt target contexts.
\section{Interrupt Gateways}
The interrupt gateways are responsible for converting global interrupt
signals into a common interrupt request format, and for controlling
the flow of interrupt requests to the PLIC core. At most one
interrupt request per interrupt source can be pending in the PLIC core
at any time, indicated by setting the source's IP bit. The gateway
only forwards a new interrupt request to the PLIC core after receiving
notification that the interrupt handler servicing the previous
interrupt request from the same source has completed.
If the global interrupt source uses level-sensitive interrupts, the
gateway will convert the first assertion of the interrupt level into
an interrupt request, but thereafter the gateway will not forward an
additional interrupt request until it receives an interrupt completion
message. On receiving an interrupt completion message, if the
interrupt is level-triggered and the interrupt is still asserted, a
new interrupt request will be forwarded to the PLIC core. The gateway
does not have the facility to retract an interrupt request once
forwarded to the PLIC core. If a level-sensitive interrupt source
deasserts the interrupt after the PLIC core accepts the request and
before the interrupt is serviced, the interrupt request remains
present in the IP bit of the PLIC core and will be serviced by a
handler, which will then have to determine that the interrupt device
no longer requires service.
If the global interrupt source was edge-triggered, the gateway will
convert the first matching signal edge into an interrupt request.
Depending on the design of the device and the interrupt handler,
between sending an interrupt request and receiving notice of its
handler's completion, the gateway might either ignore additional
matching edges or increment a counter of pending interrupts. In
either case, the next interrupt request will not be forwarded to the
PLIC core until the previous completion message has been received. If
the gateway has a pending interrupt counter, the counter will be
decremented when the interrupt request is accepted by the PLIC core.
Unlike dedicated-wire interrupt signals, message-signalled interrupts
(MSIs) are sent over the system interconnect via a message packet that
describes which interrupt is being asserted. The message is decoded
to select an interrupt gateway, and the relevant gateway then handles
the MSI similar to an edge-triggered interrupt.
\section{Interrupt Identifiers (IDs)}
Global interrupt sources are assigned small unsigned integer
identifiers, beginning at the value 1. An interrupt ID of 0 is
reserved to mean ``no interrupt''.
Interrupt identifiers are also used to break ties when two or more
interrupt sources have the same assigned priority. Smaller values of
interrupt ID take precedence over larger values of interrupt ID.
\section{Interrupt Priorities}
Interrupt priorities are small unsigned integers, with a
platform-specific maximum number of supported levels. The priority
value 0 is reserved to mean ``never interrupt'', and interrupt
priority increases with increasing integer values.
Each global interrupt source has an associated interrupt priority held
in a platform-specific memory-mapped register. Different interrupt
sources need not support the same set of priority values. A valid
implementation can hardwire all input priority levels. Interrupt
source priority registers should be \warl\ fields to allow software to
determine the number and position of read-write bits in each priority
specification, if any. To simplify discovery of supported priority
values, each priority register must support any combination of values
in the bits that are variable within the register, i.e., if there are
two variable bits in the register, all four combinations of values in
those bits must operate as valid priority levels.
\begin{commentary}
In the degenerate case, all priorities can be hardwired to the value
1, in which case input priorities are effectively determined by
interrupt ID.
The supported priority values can be determined as follows: 1) write
all zeros to the priority register then 2) read back the value. Any
set bits are hardwired to 1. Next, 3) write all ones to the
register, and 4) read back the value. Any clear bits are hardwired
to 0. Any set bits that were not found to be hardwired in step 2 are
variable. The supported priority levels are the set of values
obtained by substituting all combinations of ones and zeros in the
variable bits within the priority field.
\end{commentary}
\section{Interrupt Enables}
Each target has a vector of interrupt enable (IE) bits, one per
interrupt source. The target will not receive interrupts from sources
that are disabled. The IE bits for a single target should be packed
together as a bit vector in platform-specific memory-mapped control
registers to support rapid context switching of the IE bits for a
target. IE bits are \warl\ fields that can be hardwired to either 0
or 1.
\begin{commentary}
A large number of potential IE bits might be hardwired to zero in
cases where some interrupt sources can only be routed to
a subset of targets.
A larger number of bits might be wired to 1 for an embedded device
with fixed interrupt routing. Interrupt priorities, thresholds, and
hart-internal interrupt masking provide considerable flexibility in
ignoring external interrupts even if a global interrupt source is
always enabled.
\end{commentary}
\section{Interrupt Priority Thresholds}
Each interrupt target has an associated priority threshold, held in a
platform-specific memory-mapped register. Only active interrupts that
have a priority strictly greater than the threshold will cause a
interrupt notification to be sent to the target. Different interrupt
targets need not support the same set of priority threshold values.
Interrupt target threshold registers should be \warl\ fields to allow
software to determine the supported thresholds. A threshold register
should always be able to hold the value zero, in which case, no
interrupts are masked. If implemented, the threshold register will
usually also be able to hold the maximum priority level, in which case
all interrupts are masked.
\begin{commentary}
A simple valid implementation is to hardwire the threshold to zero, in
which case it has no effect, and the individual enable bits will have
to be saved and restored to attain the same effect. While the
function of the threshold can be achieved by changing the
interrupt-enable bits, manipulating a single threshold value avoids
the target having to consider the individual priority levels of each
interrupt source, and saving and restoring all the interrupt enables.
Changing the threshold quickly might be especially important for
systems that move frequently between power states.
\end{commentary}
\section{Interrupt Notifications}
Each interrupt target has an {\em external interrupt pending} (EIP)
bit in the PLIC core that indicates that the corresponding target has
a pending interrupt waiting for service. The value in EIP can change
as a result of changes to state in the PLIC core, brought on by
interrupt sources, interrupt targets, or other agents manipulating
register values in the PLIC. The value in EIP is communicated to the
destination target as an interrupt notification. If the target is a
RISC-V hart context, the interrupt notifications arrive on the {\tt
meip}/{\tt seip}/{\tt ueip} bits depending on the
privilege level of the hart context.
\begin{commentary}
In simple systems, the interrupt notifications will be simple wires
connected to the processor implementing a hart. In more complex
platforms, the notifications might be routed as messages across a
system interconnect.
\end{commentary}
The PLIC hardware only supports multicasting of interrupts, such that
all enabled targets will receive interrupt notifications for a given
active interrupt.
\begin{commentary}
Multicasting provides rapid response since the fastest responder
claims the interrupt, but can be wasteful in high-interrupt-rate
scenarios if multiple harts take a trap for an interrupt that only one
can successfully claim. Software can modulate the PLIC IE bits as
part of each interrupt handler to provide alternate policies, such as
interrupt affinity or round-robin unicasting.
\end{commentary}
Depending on the platform architecture and the method used to
transport interrupt notifications, these might take some time to be
received at the targets. The PLIC is guaranteed to eventually deliver
all state changes in EIP to all targets, provided there is no
intervening activity in the PLIC core.
\begin{commentary}
The value in an interrupt notification is only guaranteed to hold an
EIP value that was valid at some point in the past. In particular, a
second target can respond and claim an interrupt while a notification
to the first target is still in flight, such that when the first
target tries to claim the interrupt it finds it has no active
interrupts in the PLIC core.
\end{commentary}
\section{Interrupt Claims}
Sometime after a target receives an interrupt notification, it might
decide to service the interrupt. The target sends an {\em interrupt
claim} message to the PLIC core, which will usually be implemented
as a non-idempotent memory-mapped I/O control register read. On
receiving a claim message, the PLIC core will atomically determine the
ID of the highest-priority pending interrupt for the target and then
clear down the corresponding source's IP bit. The PLIC core will then
return the ID to the target. The PLIC core will return an ID of zero,
if there were no pending interrupts for the target when the claim was
serviced.
After the highest-priority pending interrupt is claimed by a target
and the corresponding IP bit is cleared, other lower-priority pending
interrupts might then become visible to the target, and so the PLIC
EIP bit might not be cleared after a claim. The interrupt handler
can check the local {\tt meip}/{\tt seip}/{\tt ueip} bits
before exiting the handler, to allow more efficient service of other
interrupts without first restoring the interrupted context and taking
another interrupt trap.
It is always legal for a hart to perform a claim even if the EIP is
not set. In particular, a hart could set the threshold value to maximum
to disable interrupt notifications and instead poll for active
interrupts using periodic claim requests, though a simpler approach to
implement polling would be to clear the external interrupt enable in
the corresponding {\em x}{\tt ie} register for privilege mode {\em x}.
\section{Interrupt Completion}
After a handler has completed service of an interrupt, the associated
gateway must be sent an interrupt completion message, usually as a
write to a non-idempotent memory-mapped I/O control register. The
gateway will only forward additional interrupts to the PLIC core after
receiving the completion message.
\section{Interrupt Flow}
Figure~\ref{fig:intflow} shows the messages flowing between agents
when handling interrupts via the PLIC.
\begin{figure}[hb!]
\centering
\includegraphics[width=4.0in]{figs/PLIC-interrupt-flow.pdf}
\caption{ Flow of interrupt processing via the PLIC.}
\label{fig:intflow}
\end{figure}
The gateway will only forward a single interrupt request at a time to
the PLIC, and not forward subsequent interrupts requests until an
interrupt completion is received. The PLIC will set the IP bit once
it accepts an interrupt request from the gateway, and sometime later
forward an interrupt notification to the target. The target might
take a while to respond to a new interrupt arriving, but will then
send an interrupt claim request to the PLIC core to obtain the
interrupt ID. The PLIC core will atomically return the ID and clear
the corresponding IP bit, after which no other target can claim the
same interrupt request. Once the handler has processed the interrupt,
it sends an interrupt completion message to the gateway to allow a new
interrupt request.
\section{PLIC Core Specification}
The operation of the PLIC core can be specified as a non-deterministic
finite-state machine with input and output message queues, with the
following atomic actions:
\begin{itemize}
\item {\bf Write Register: } A message containing a register write
request is dequeued. One of the internal registers is written,
where an internal register can be a priority, an interrupt-enable
(IE), or a threshold.
\item {\bf Accept Request: } If the IP bit corresponding to the
interrupt source is clear, a message containing an interrupt request
from a gateway is dequeued and the IP bit is set.
\item {\bf Process Claim: } An interrupt claim message is dequeued. A
claim-response message is enqueued to the requester with the ID of
the highest-priority active interrupt for that target, and the IP
bit corresponding to this interrupt source is cleared.
\end{itemize}
The value in the EIP bit is determined as a combinational function of
the PLIC Core state. Interrupt notifications are sent via an
autonomous process that ensures the EIP value is eventually reflected
at the target.
Note that the operation of the interrupt gateways is decoupled from
the PLIC core. A gateway can handle parsing of interrupt signals and
processing interrupt completion messages concurrently with other
operations in the PLIC core.
\begin{commentary}
Figure~\ref{fig:plic} is a high-level conceptual view of the PLIC
design. The PLIC core can be implemented in many ways provided its
behavior can always be understood as following from some sequential
ordering of these atomic actions. In particular, the PLIC might
process multiple actions in a single clock cycle, or might process
each action over many clock cycles.
\end{commentary}
\section{Controlling Access to the PLIC}
In the expected use case, only machine mode accesses the source
priority, source pending, and target interrupt enables to configure
the interrupt subsystem. Lower-privilege modes access these features
via ABI or SBI calls. The interrupt enables act as a protection
mechanism where a target can only signal completion to an interrupt
gateway that is currently enabled for that target.
Interrupt handlers that run with lower than machine-mode privilege
need only be able to perform a claim read and a completion write, and
to set their target threshold value. The memory map for these
registers should allow machine mode to protect different targets from
each other's accesses, using either physical memory protection or
virtual memory page protections.
|
{"hexsha": "f976e496d952c16fefc6da3ee3eb25fd958fcaa2", "size": 20933, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/plic.tex", "max_stars_repo_name": "kdockser/riscv-isa-manual", "max_stars_repo_head_hexsha": "015b13e021d3a0604435a0e81ef98ac2e0e4fb21", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-13T21:49:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T11:23:49.000Z", "max_issues_repo_path": "src/plic.tex", "max_issues_repo_name": "kdockser/riscv-isa-manual", "max_issues_repo_head_hexsha": "015b13e021d3a0604435a0e81ef98ac2e0e4fb21", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plic.tex", "max_forks_repo_name": "kdockser/riscv-isa-manual", "max_forks_repo_head_hexsha": "015b13e021d3a0604435a0e81ef98ac2e0e4fb21", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-30T14:52:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T10:28:57.000Z", "avg_line_length": 48.9088785047, "max_line_length": 73, "alphanum_fraction": 0.8079587255, "num_tokens": 4436}
|
The sophomore walk is a nickname for Scripps Drive and the Anderson plaza The term stems from the fact that the Davis High School sophomores lack a drivers license and have to walk or bike somewhere to obtain grubs if they dont want to have the on site food. DHS is an open campus at lunch time, although there is talk of bringing fast food vendors to the MPR area (the Multi Purpose Room with triangle windows visible from Oak Avenue) after it is renovated and making the campus closed.
Residents of Scripps have had problems with littering and fights and have motorcycle cops and more commonly the yard narcs (noon duty supervisors and campus safety officers) in the DHS golf cart to make sure the students dont act a fool on Scripps or in the Save Mart complex. Recently, however, there has been no supervision.
Dont try and visit the Save Mart complex during the lunch hour for DHS (11:30 to 1ish), unless you like lines out the door and a grip of unruly high school students
DaVinci High School students also have their own variation of the sophomore walk involving the Davis Manor Center adjacent the school, often causing similar problems, albeit to a lesser extent, along the Dollar Tree and Red Orchid
20070412 19:47:09 nbsp hahahahaha...a grip...didnt think they used that one up here Users/TylerFelix
20070910 11:34:07 nbsp As a high school student, I found the cops to extremely excessive and obnoxious, zooming up to people on motorbikes at high speeds and threatening to ticket and even arrest them for walking in the gutter. I feel they caused more problems than they solved. Users/TobinJones
20080505 19:49:28 nbsp Ill miss living on the sophomore walk. I actually liked scaring kids with my car as I drove to and from home. But more so, I liked swerving around them like they are an obstacle course. One time, some group of kids started dancing to the music I was playing when I had my windows down... Users/SunjeetBaadkar
20100413 01:34:20 nbsp I used to work for the BaskinRobbins thats in Anderson Plaza, and believe me, we used to dread lunchtime. Now, I know not all students are bad, having been one myself, but the ones who are spoil it for everyone else. We would have trouble with vandalism, people stealing ice cream out of the freezer, and the fact that everyone wanted the timeconsuming blended cappachino blasts or smoothies, and started getting unruly if there was more than a thirty second wait. That sort of thing is why there is an excessive police and administrative presence at lunchtime. The final straw was when they actually SET FIRE to one of our plastic tables outside. Yeah, just.. no.
Personally? I always brownbagged it when I was a student. Thats what lockers are for, plus its easier to eat healthy. Users/Flynn
|
{"hexsha": "2a9321563585d918c741c8828cfe75b7d13def93", "size": 2774, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Sophomore_Walk.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Sophomore_Walk.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Sophomore_Walk.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 146.0, "max_line_length": 687, "alphanum_fraction": 0.7970439798, "num_tokens": 636}
|
import argparse
from multiprocessing import Pool, cpu_count, Manager
import os
from collections import defaultdict
import numpy as np
import bpy
from utils.io import write_serialized, read_serialized
from utils.misc import get_host_id
from utils.shape_net import SIM_SHAPE_NET_FOLDER, RENDER_SHAPE_NET_FOLDER, SHAPE_NET_CATEGORY, SHAPE_NET_NUMS, mkdir
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--start_index', help='image index to start', type=int)
parser.add_argument("--stride", help="image index stride", type=int, default=1)
parser.add_argument("--reduce", help="reduce all results", type=int, default=0)
return parser.parse_args()
def obj_to_blend(cat_name, shape_name, all_dimensions):
name = cat_name + shape_name
file_path = os.path.join(SIM_SHAPE_NET_FOLDER, cat_name, "{}.obj".format(shape_name))
mkdir(os.path.join(RENDER_SHAPE_NET_FOLDER, cat_name))
out_path = os.path.join(RENDER_SHAPE_NET_FOLDER, cat_name, "{}.blend".format(shape_name))
bpy.ops.wm.read_homefile()
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
bpy.ops.import_scene.obj(filepath=file_path, split_mode="OFF")
bpy.context.scene.objects.active = bpy.context.scene.objects[0]
object = bpy.context.selected_objects[0]
object.name = name
# setting the centre to the center of bounding box
max_dimension = max(object.dimensions)
scaling = 2. / max_dimension
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
object.data.show_double_sided = True
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.transform.translate(value=[0, 0, 0])
bpy.ops.transform.rotate(value=np.pi / 2, axis=(1, 0, 0))
bpy.ops.transform.resize(value=[scaling, scaling, scaling])
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.mode_set(mode="OBJECT")
all_dimensions[name] = list(x / 2 for x in object.dimensions)
# remove all materials
for material in bpy.data.materials:
material.user_clear()
bpy.data.materials.remove(material)
for ob in bpy.context.selected_editable_objects:
ob.active_material_index = 0
for i in range(len(ob.material_slots)):
bpy.ops.object.material_slot_remove({'object': ob})
bpy.ops.wm.save_as_mainfile(filepath=out_path)
print("{} generated".format(name))
if __name__ == '__main__':
args = parse_args()
if args.start_index is None:
args.start_index = get_host_id() % args.stride
if args.reduce:
all_dimensions = dict()
for i in range(args.stride):
all_dimensions.update(
read_serialized(os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions_{:02d}.json".format(i))))
write_serialized(dict(all_dimensions),
os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions.json"))
to_rotate_index = defaultdict(int)
for name, dimension in all_dimensions.items():
# x > y bad
if dimension[0] > dimension[1]:
to_rotate_index[name[:4]] += 1
else:
to_rotate_index[name[:4]] -= 1
write_serialized(dict(to_rotate_index),
os.path.join(SIM_SHAPE_NET_FOLDER, "categories_to_rotate.json"))
else:
manager = Manager()
all_dimensions = manager.dict()
worker_args = []
for cat_id in SHAPE_NET_CATEGORY.keys():
for shape_id in range(SHAPE_NET_NUMS[cat_id]):
shape_id = "{:06d}".format(shape_id)
worker_args.append((cat_id, shape_id, all_dimensions))
worker_args = worker_args[args.start_index::args.stride]
with Pool(cpu_count()) as p:
p.starmap(obj_to_blend, worker_args)
write_serialized(dict(all_dimensions),
os.path.join(SIM_SHAPE_NET_FOLDER, "all_dimensions_{:02d}.json".format(args.start_index)))
|
{"hexsha": "0e42cc4947460bf48d46f78d7c11ba8a88adc915", "size": 3993, "ext": "py", "lang": "Python", "max_stars_repo_path": "render/data/builder/collect_blend.py", "max_stars_repo_name": "JerryLingjieMei/ADEPT-Dataset-Release", "max_stars_repo_head_hexsha": "900d9d5c7e780a5daa06f0484d91539a4ca92ff8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-11-06T23:13:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-04T17:28:31.000Z", "max_issues_repo_path": "render/data/builder/collect_blend.py", "max_issues_repo_name": "JerryLingjieMei/ADEPT-Dataset-Release", "max_issues_repo_head_hexsha": "900d9d5c7e780a5daa06f0484d91539a4ca92ff8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "render/data/builder/collect_blend.py", "max_forks_repo_name": "JerryLingjieMei/ADEPT-Dataset-Release", "max_forks_repo_head_hexsha": "900d9d5c7e780a5daa06f0484d91539a4ca92ff8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-12-05T02:34:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-14T02:33:32.000Z", "avg_line_length": 36.6330275229, "max_line_length": 116, "alphanum_fraction": 0.6786877035, "include": true, "reason": "import numpy", "num_tokens": 893}
|
import random
import torch
import numpy as np
from scipy.linalg import sqrtm
import sklearn.datasets
from sklearn.mixture import GaussianMixture
class Sampler:
def __init__(
self, device='cuda',
dtype=torch.float,
requires_grad=False
):
self.device = device
self.dtype = dtype
self.requires_grad = requires_grad
def sample(self, batch_size=5):
pass
class SwissRollSampler(Sampler):
def __init__(
self, dim=2, device='cuda',
dtype=torch.float, requires_grad=False
):
super(SwissRollSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
assert dim == 2
self.dim = 2
def sample(self, batch_size=10):
batch = sklearn.datasets.make_swiss_roll(
n_samples=batch_size,
noise=0.8
)[0].astype('float32')[:, [0, 2]] / 7.5
return torch.tensor(
batch, dtype=self.dtype,
device=self.device, requires_grad=self.requires_grad
)
class StandartNormalSampler(Sampler):
def __init__(
self, dim=1, device='cuda',
dtype=torch.float, requires_grad=False
):
super(StandartNormalSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
self.dim = dim
def sample(self, batch_size=10):
return torch.randn(
batch_size, self.dim, dtype=self.dtype,
device=self.device, requires_grad=self.requires_grad
)
class StandartUniformSampler(Sampler):
def __init__(
self, dim=1, device='cuda',
dtype=torch.float, requires_grad=False
):
super(StandartUniformSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
self.dim = dim
def sample(self, batch_size=10):
return torch.rand(
batch_size, self.dim, dtype=self.dtype,
device=self.device, requires_grad=self.requires_grad
)
class BallUniformSampler(Sampler):
def __init__(
self, dim=1, device='cuda',
dtype=torch.float, requires_grad=False
):
super(BallUniformSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
self.dim = dim
def sample(self, batch_size=10):
batch = torch.randn(
batch_size, self.dim,
device=self.device, dtype=self.dtype
)
batch /= torch.norm(batch, dim=1)[:, None]
r = torch.rand(
batch_size, device=self.device, dtype=self.dtype
) ** (1. / self.dim)
return torch.tensor(
(batch.transpose(0, 1) * r).transpose(0, 1),
device=self.device, dtype=self.dtype,
requires_grad=self.requires_grad
)
class Mix8GaussiansSampler(Sampler):
def __init__(
self, with_central=False, std=1, r=12, dim=2, device='cuda',
dtype=torch.float, requires_grad=False
):
super(Mix8GaussiansSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
assert dim == 2
self.dim = 2
self.std, self.r = std, r
self.with_central = with_central
centers = [
(1, 0), (-1, 0), (0, 1), (0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
if self.with_central:
centers.append((0, 0))
self.centers = torch.tensor(
centers, device=self.device, dtype=self.dtype
)
def sample(self, batch_size=10):
batch = torch.randn(
batch_size, self.dim,
device=self.device, dtype=self.dtype
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.r * self.centers[indices, :]
return torch.tensor(
batch, device=self.device, dtype=self.dtype,
requires_grad = self.requires_grad
)
class MixN2GaussiansSampler(Sampler):
def __init__(self, n=5, dim=2, std=1, step=9, device='cuda',
dtype=torch.float, requires_grad=False
):
super(MixN2GaussiansSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
assert dim == 2
self.dim = 2
self.std, self.step = std, step
self.n = n
grid_1d = np.linspace(-(n-1) / 2., (n-1) / 2., n)
xx, yy = np.meshgrid(grid_1d, grid_1d)
centers = np.stack([xx, yy]).reshape(2, -1).T
self.centers = torch.tensor(
centers,
device=self.device,
dtype=self.dtype
)
def sample(self, batch_size=10):
batch = torch.randn(
batch_size, self.dim,
device=self.device, dtype=self.dtype
)
indices = random.choices(range(len(self.centers)), k=batch_size)
batch *= self.std
batch += self.step * self.centers[indices, :]
return torch.tensor(
batch, device=self.device, dtype=self.dtype,
requires_grad=self.requires_grad
)
class TensorDatasetSampler(Sampler):
def __init__(
self, dataset, transform=None, storage='cpu', storage_dtype=torch.float, device='cuda',
dtype=torch.float, requires_grad=False
):
super(TensorDatasetSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
self.storage = storage
if transform is not None:
self.transform = transform
else:
self.transform = lambda t: t
self.storage_dtype = storage_dtype
self.dataset = torch.tensor(
dataset, device=storage, dtype=storage_dtype, requires_grad=False
)
def sample(self, batch_size=10):
if batch_size:
ind = random.choices(range(len(self.dataset)), k=batch_size)
else:
ind = range(len(self.dataset))
batch = self.transform(torch.tensor(
self.dataset[ind], device=self.device,
dtype=self.dtype, requires_grad=False
)).detach()
if self.requires_grad:
batch.requires_grad_(True)
return batch
class NormalTensorDatasetSampler(Sampler):
def __init__(
self, dataset, device='cuda',
dtype=torch.float, requires_grad=False,
n_components=1
):
super(NormalTensorDatasetSampler, self).__init__(
device=device, dtype=dtype, requires_grad=requires_grad
)
self.sampler = GaussianMixture(n_components)
self.sampler.fit(dataset)
def sample(self, batch_size=10):
if not batch_size:
batch_size = 10
batch = torch.tensor(
self.sampler.sample(batch_size)[0], device=self.device,
dtype=self.dtype, requires_grad=self.requires_grad
)
return batch
class Transformer(object):
def __init__(
self, device='cuda', dtype=torch.float,
requires_grad=False
):
self.device = device
self.dtype = dtype
self.requires_grad = requires_grad
class NormalNoiseTransformer(Transformer):
def __init__(
self, std=0.01,
device='cuda', dtype=torch.float,
requires_grad=False
):
super(NormalNoiseTransformer, self).__init__(
device=device, dtype=dtype,
requires_grad=requires_grad
)
self.std = std
def fit(self, base_sampler):
self.base_sampler = base_sampler
def sample(self, batch_size=4):
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False, dtype=self.dtype
)
batch = batch + self.std * torch.randn_like(batch)
batch = batch.detach()
batch.requires_grad_(self.requires_grad)
return batch
class StandardNormalScaler(Transformer):
def __init__(
self, device='cuda',
dtype=torch.float,
requires_grad=False
):
super(StandardNormalScaler, self).__init__(
device=device, dtype=dtype,
requires_grad=requires_grad
)
def fit(self, base_sampler, batch_size=1000):
self.base_sampler = base_sampler
batch = self.base_sampler.sample(batch_size).cpu().detach().numpy()
mean, cov = np.mean(batch, axis=0), np.cov(batch.T)
self.mean = torch.tensor(
mean, device=self.device, dtype=self.dtype
)
multiplier = sqrtm(cov)
self.multiplier = torch.tensor(
multiplier, device=self.device, dtype=self.dtype
)
self.inv_multiplier = torch.tensor(
np.linalg.inv(multiplier),
device=self.device, dtype=self.dtype
)
torch.cuda.empty_cache()
def sample(self, batch_size=10):
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, dtype=self.dtype
)
batch -= self.mean
batch @= self.inv_multiplier
if self.requires_grad:
batch.requires_grad_(True)
return batch
def inverse_transform(self, batch):
batch @= self.multiplier
batch += self.mean
return batch
class LinearTransformer(Transformer):
def __init__(
self, weight, bias=None,
device='cuda', dtype=torch.float,
requires_grad=False
):
super(LinearTransformer, self).__init__(
device=device, dtype=dtype,
requires_grad=requires_grad
)
self.weight = torch.tensor(weight, device=device, dtype=dtype, requires_grad=False)
if bias is not None:
self.bias = torch.tensor(bias, device=device, dtype=dtype, requires_grad=False)
else:
self.bias = None
def fit(self, base_sampler):
self.base_sampler = base_sampler
def sample(self, batch_size=4):
batch = torch.tensor(
self.base_sampler.sample(batch_size),
device=self.device, requires_grad=False, dtype=self.dtype
)
with torch.no_grad():
batch = batch @ self.weight.T
if self.bias is not None:
batch += self.bias
batch = batch.detach()
batch.requires_grad_(self.requires_grad)
return batch
|
{"hexsha": "403c78d5befc0b1414fb652c97f8173b45f639c6", "size": 10876, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/distributions.py", "max_stars_repo_name": "iamalexkorotin/Wasserstein2GenerativeNetworks", "max_stars_repo_head_hexsha": "8b0b10a90456893da07805a5d8e910b275223658", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2020-08-01T11:43:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:57:36.000Z", "max_issues_repo_path": "src/distributions.py", "max_issues_repo_name": "iamalexkorotin/Wasserstein2GenerativeNetworks", "max_issues_repo_head_hexsha": "8b0b10a90456893da07805a5d8e910b275223658", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-07T12:35:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:56:14.000Z", "max_forks_repo_path": "src/distributions.py", "max_forks_repo_name": "iamalexkorotin/Wasserstein2GenerativeNetworks", "max_forks_repo_head_hexsha": "8b0b10a90456893da07805a5d8e910b275223658", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-08-21T02:42:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T20:36:13.000Z", "avg_line_length": 31.5246376812, "max_line_length": 95, "alphanum_fraction": 0.5766826039, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2394}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
def energy(string_name):
"""
Genaración de los gráficos de energía en función del tiempo.
"""
data = np.loadtxt(string_name, dtype=float)
time = data[:,0]
energy_K = 0.01*data[:,1]
energy_U = 0.01*data[:,2]
total_energy = 0.01*data[:,3]
plt.plot(time, energy_K, label="kinetic energy")
plt.plot(time, energy_U, label="potential energy")
plt.plot(time, total_energy, label="total energy")
plt.legend(loc='upper right', bbox_to_anchor=(1.0, 0.9))
plt.grid(True, linestyle="--", color="0.5")
plt.xlim(xmin=time[0], xmax=time[-1])
#plt.title("Energía del Sistema", fontsize=20)
plt.ylabel("$E$[$x10^2$]", fontsize=20)
plt.xlabel("$t$", fontsize=20)
plt.show()
def histogram(string_name):
"""
Genaración del histograma de velocidades.
"""
archive = open(string_name, "r")
data = np.array([float(value.replace("\n", "")) for value in archive.readlines()])
media = data.mean()
sigma = data.std()
num_bins = 19
n, bins, patches = plt.hist(data, num_bins, normed=1, facecolor="blue", alpha=1.0, edgecolor="black",
label=" $\sigma$ = {0:.3f}".format(sigma))
y = mlab.normpdf(bins, media, sigma)
plt.plot(bins, y, "k--")
plt.xlabel('$|v|$', fontsize=20)
plt.ylabel('$P(|v|)/N$', fontsize=20)
#plt.title("Histograma", fontsize=20)
plt.grid(True, linestyle="--", color="0.5")
plt.legend()
plt.subplots_adjust(left=0.15)
plt.show()
def main():
energy("Energy")
histogram("histogr_1")
if __name__ == '__main__':
main()
|
{"hexsha": "3526c58b8e294d28cf08f67fda708a9701cbe338", "size": 1667, "ext": "py", "lang": "Python", "max_stars_repo_path": "exercise_1/graphics.py", "max_stars_repo_name": "williamGOC/Molecular-Dynamic", "max_stars_repo_head_hexsha": "9f06b0466ddba03c76b6507e1616b08e1ff20de3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-30T10:51:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T10:52:13.000Z", "max_issues_repo_path": "exercise_1/graphics.py", "max_issues_repo_name": "williamGOC/Molecular-Dynamic", "max_issues_repo_head_hexsha": "9f06b0466ddba03c76b6507e1616b08e1ff20de3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercise_1/graphics.py", "max_forks_repo_name": "williamGOC/Molecular-Dynamic", "max_forks_repo_head_hexsha": "9f06b0466ddba03c76b6507e1616b08e1ff20de3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-21T16:53:47.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-21T16:53:47.000Z", "avg_line_length": 28.7413793103, "max_line_length": 106, "alphanum_fraction": 0.6178764247, "include": true, "reason": "import numpy", "num_tokens": 485}
|
\documentclass[11pt]{beamer}
\usepackage{url}
\usepackage[utf8]{inputenc}
\parindent0pt
\parskip3pt
\newcommand{\Rahmen}[2]{
\setlength{\fboxsep}{12pt}\begin{center}
\shadowbox{\parbox{#1\textwidth}{\em #2}}\end{center}}
% Designelemente
\usetheme{Hannover}
\beamertemplatenavigationsymbolsempty
\title[The Inventor School Movement in the GDR]{The Heritage of the Inventor
School Movement in the GDR}
\subtitle{Presentation at the TRIZ Summit 2019 in Minsk}
\author[Hans-Gert Gr\"abe]{Prof. Hans-Gert Gräbe}
\institute{Institut f\"ur Informatik, Universit\"at Leipzig,\\
\url{http://bis.informatik.uni-leipzig.de/HansGertGraebe/}}
\date{June 14, 2019}
\begin{document}
\begin{frame}
\maketitle
\end{frame}
\section{Inventor School Movement in the GDR -- The Facts}
\begin{frame}{Inventor School Movement in the GDR -- The Facts}
\small
\textbf{Size:} Between 1981 and 1990 in the GDR there were about 300
inventor schools with about 7\,000 participants. The model for the
implementation of inventor schools was realized along a standard methodology
based on TRIZ ideas constantly evolving since 1982.
\textbf{Outcome:} There is no precise statistics but it can be estimated
that nevertheless 600 patent applications and 1\,000 practical problem
solutions were achieved.
\textbf{Materials:} From 1982, the participants were provided with a
specially developed methodical hand material -- a small book. Authors:
Michael Herrlich and others. In 1988/89, the material was significantly
enhanced and was now available in two small books, both very demanding.
Authors: Hans-Jochen Rindfleisch and Rainer Thiel.
\end{frame}
\begin{frame}{Inventor School Movement in the GDR -- Their Methodology}
\small
One to two dozen engineers from an industrial plant gather two courses in a
rural place, each for a week, to learn innovative problem-solving methods
and to inventively solve one to three business problems into one to three
groups of community work.
In the first week about 12 hours lectures are offered. In about 40 hours of
teamwork, a problem is exposed and a solution is created. The moderation of
each group -- ideally 7 participants -- is done by an experienced inventor,
he acts as a methodologist and trainer.
In the following weeks, the patent study is deepened in the company,
calculations and hand tests, also laboratory tests are made.
Finally, a second week follows in the rural place to complete patent
applications and initiate the start of the pilot series.
\end{frame}
\section{ARIZ as Method in the Analysis of Social Processes}
\begin{frame}{ARIZ as Method in the Analysis of Social Processes}
How methodologically analyze that development, highly driven by
contradictions on several levels? \vfill
Method = the consciousness of the form of the inner self-movement of the
content. (Hegel) \vfill
From an \textbf{ARIZ-like system approach} we get as first approximation
\begin{itemize}
\item The socio-political system (SPS) as \emph{supersystem}
\item The inventive system (IS)
\item The particular inventory schools as \emph{subsystems}.
\end{itemize}
\end{frame}
\begin{frame}{Inventor School Movement in the GDR -- Periodization}
The Inventor School Movement grew up in the in the highly contradictorily
developing socio-political conditions within the SPS of ``real
socialism''. \vfill
\small
\textbf{Phase A: 1962 -- 1970}
IS: Incubation phase of ideas that later lead to inventor schools. Leeway
for protagonists to propagate these ideas.
SPS: Encouraging socio-political conditions on the background of ongoing
ideological narrowness: half-hearted political experiments on economic
mechanisms using modern scientific ways of thinking (cybernetics,
prognostics, operations research, mathematical modeling, computer use)
within the ``New Economic System of Planning and Management'' (NÖSPL).
\end{frame}
\begin{frame}{Inventor School Movement in the GDR -- Periodization}
\small
\textbf{Phase B: 1971 -- 1978}
IS: Formation of the plans of the inventor school concept and formation of a
network of enthusiasts.
SPS: Restoration of a rigid centralism under Honecker. Degradation of the
timid turn to modern ways of thinking. The concept of the ``Unity of
Economic and Social Policy'' replaces NÖSPL.
\textbf{Phase C: 1979 -- 1982}
IS: Creation of organizational structures for inventor schools within the
Engineering Association (KDT). First practical tests and business contacts
via trusts (``Kombinate''). First teaching material, mainly due to
Michael Herrlich as author.
SPS: Increasing oppressive feelings in parts of the political establishment
given by the low growth rates of the economy. Within the intelligence grows
the feeling of the need for profound reforms, but this is abandoned by the
establishment.
\end{frame}
\begin{frame}{Inventor School Movement in the GDR -- Periodization}
\small
\textbf{Phase D: 1983 -- 1989}
IS: Inventor school movement enters the industry. The number of trainers and
participants grow. Efforts, the breadth and depth effect increase rapidly.
The second generation of teaching material and also coaching material is
written.
SPS: Increasing self-deception and political fraud by the political
leadership. Hectic attempts, by concentration of all reserves in the
high-tech sector despite the tightening of the trade embargo (COCOM lists)
and ongoing currency shortages to achieve breakthroughs to the world class
level.
\end{frame}
\begin{frame}{ARIZ as Method in the Analysis of Social Processes}
During an \textbf{ARIZ-like component analysis} of the inventive system we
identify
\begin{itemize}
\item the trainers, many of them from the group of \emph{Honored Inventors},
\item inventive practices in the industry,
\item dialectical traditions of thought in contradictions coming from
cybernetics,
\item structural relicts from the ``Systematic Heuristics'', an innovation
theory developed by Johannes Müller and strongly pushed by the
establishment until the early 1970th.
\end{itemize}
\end{frame}
\begin{frame}{The System of Honored Inventors}
\emph{Honored inventor} was a state honorary title of the GDR, which
was awarded from 1950 in conjunction with a badge of honor and a monetary
bonus.
There existed a system of strong social ties between them that worked
independently of all political changes, mainly inspired by Michael Herrlich.
\end{frame}
\begin{frame}{Social Contradictions of the First Kind}
\textbf{Observations}\small
\begin{itemize}
\item Small, often evolutionary changes in the SPS often lead to disruptive
changes in the IS.
\item Cybernetics and MLO (ML Organizational Science) were heavily driven out
of the SPS (``planned development'').
\item There is a main contradiction between real socialist development
concepts (SPS) and the practical dynamics of the economy, that limit the
possibilities of the SPS.
\item In the 1980s, the SPS lost its ruling capabilities, the practical
management of the processes passed over to economic forces, thus to the
Socio-Economic System (SES). Hence \textbf{we put the SES into role of the
\emph{subsystem}}.
\end{itemize}
All this is highly relevant for the history of ideas in TRIZ, but can be
worked up in this complexity only in a larger historical project.
\end{frame}
\begin{frame}{Social Contradictions of the Second Kind}
\textbf{Yet another observation:} All components of the IS as parts of the SPS
were subject to strongly changing restrictions -- only the System of Honored
Inventors as part of the SES remained constantly visible over the entire 30
years.
\textbf{Change the approach:} Consider \emph{TRIZ Theory} and \emph{TRIZ
Practices} as two poles between which the IS mediates.
Consider IS as mediating structure (field) between these two poles
(substances). The poles themselves are embedded in the contradictory
structures of the SPS (TRIZ Theory) and the practices of the SES (TRIZ
Practices).
\end{frame}
\begin{frame}{Social Contradictions of the Second Kind}
We applied a \textbf{Substance Field Swap}, a common method of nounification
of verbs in philosophy, not contained in the 76 TRIZ Standard Solutions.
A \textbf{contradiction of the 2nd kind} is a contradiction between the
contradictions of the SPS and those of the SES.
Only from such an approach, the contradictions of the GDR Inventor School
Movement can readily be explained.
\end{frame}
\section{Theoretical Contributions of the Inventor School Movement}
\begin{frame}{Theoretical Contributions of the Inventor School Movement}
\textbf{Three Theoretical Frameworks}
\begin{itemize}
\item WOIS -- Contradiction Oriented Innovation Strategies (Linde, TU Dresden
1988)
\item PROHEAL -- Program for the Development of Inventive Approaches and
Solutions (Rindfleisch, Thiel 1988)
\item Michael Herrlich: Inventing as process of information processing and
generation, presented at the own innovative work and at the approaches
within the KDT inventor schools. (TU Ilmenau 1988)
\end{itemize}
\end{frame}
\begin{frame}{Theoretical Contributions of the Inventor School Movement}
\textbf{Main Contributions}
\begin{itemize}
\item The PROHEAL Path Model
\item Closer analysis of administrative contradictions on a technical-economic
level (TÖW), thus already close to today's challenges of the inventor's
everyday life in business.
\item Differentiation of contradictions on the three levels TÖW, TTW, TNW
\item The ABER\footnote{\textbf{A}nforderungen, \textbf{B}edingungen,
\textbf{E}rwartungen, \textbf{R}estriktionen. $=$ Requirements, Conditions,
Expectations, Restrictions.} matrix in three versions as a
unified analysis tool on these three levels.
\end{itemize}
\end{frame}
\end{document}
Beobachtungen
Kleine, oft evolutionäre Veränderungen im SPS lösen oft disruptive
Veränderungen im IS aus.
Kybernetik und MLO (ML Organisatonswissenschaft) waren noch stark von Vorgaben
aus dem SPS getrieben („planmäßige Entwicklung“)
Es gibt einen Hauptwiderspruch zwischen realsozialistischen
Entwicklungskonzeptionen (SPS) und der praktischen Dynamik der Wirtschaft,
welcher die Möglichkeiten des SPS begrenzt.
In den 1980er Jahren war das SPS kaum noch durchsetzungsfähig, die praktische
Führung der Prozesse lag bei Kräften aus der Wirtschaft, also im
sozio-ökonomischen System (SES).
All das ist für die Ideengeschichte der TRIZ hoch relevant, in dieser
Komplexität aber nur in einem größeren ideengeschichtlichen Projekt
aufzuarbeiten.
Beobachtung
Alle Komponenten des IS als Teile des SPS unterlagen stark wechselnden
Restritionen -- allein das System der HI als Teil des SES bleibt eine
sichtbare Konstante über die gesamten 30 Jahre.
Ansatz: Betrachte TRIZ Theorie und TRIZ Praxis als zwei Pole, zwischen denen
sich das IS entfaltet.
Betrachte IS als Vermittlungsstruktur (Feld) zwischen diesen beiden Polen.
Die Pole selbst sind eingebettet in die Widerspruchsstrukturen des SPS (TRIZ
Theorie) und der Praxen des SES (TRIZ Praxen).
Substanz-Feld-Swap
Widersprüche 2. Art = Widerspruch zwischen den Widersprüchen des SPS und den
Widersprüchen des SES.
Erst aus einem solchen Ansatz heraus werden die Widersprüche der
Erfinderschulbewegung sprechbar.
WOIS - Widerspruchsorientierte Innovationsstrategien (Linde, TU Dresden 1988)
PROHEAL - Programm zur Herausarbeitung von Erfindungsansätzen und
Lösungsansätzen (Rindfleisch, Thiel 1988)
Michael Herrlich: Erfinden als Informationsverarbeitungs- und
-generierungsprozess, dargestellt am eigenen erfinderischen Schaffen und am
Vorgehen in KDT-Erfinderschulen. (TU Ilmenau 1988)
PROHEAL Wegemodell
Genauere Analyse administrativer Widersprüche auf einem technisch-ökonomischen
Level, damit nahe an heutigen Herausforderungen des inventiven Alltags.
Unterscheidung von Widersprüchen auf drei Ebenen TÖW, TTW, TNW
ABER-Matrix in drei Versionen als einheitliches Analyse-Instrument auf diesen
drei Ebenen.
|
{"hexsha": "45fffb4ad1a12bffac90e2927ff9bc3e4a0807c0", "size": 12091, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Sources/Graebe_HG/Minsk-2019-Slides.tex", "max_stars_repo_name": "wumm-project/OpenDiscovery", "max_stars_repo_head_hexsha": "445b25b8a6f5d03e41a98c28a60c38003e9b84a4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-21T08:48:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-21T08:48:43.000Z", "max_issues_repo_path": "Sources/Graebe_HG/Minsk-2019-Slides.tex", "max_issues_repo_name": "wumm-project/OpenDiscovery", "max_issues_repo_head_hexsha": "445b25b8a6f5d03e41a98c28a60c38003e9b84a4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/Graebe_HG/Minsk-2019-Slides.tex", "max_forks_repo_name": "wumm-project/OpenDiscovery", "max_forks_repo_head_hexsha": "445b25b8a6f5d03e41a98c28a60c38003e9b84a4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6666666667, "max_line_length": 78, "alphanum_fraction": 0.7859564966, "num_tokens": 3155}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from scipy.io import wavfile
from vocoder.hifigan_generator import Generator
import hparams as hp
import os
import text
import json
from pydub import AudioSegment
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import matplotlib.font_manager as fm
# !! Speaker 불러오기
def get_speakers(synthesize=False):
path = 'preprocessed/' + hp.dataset + '/alignment'
file_list = os.listdir(path)
file_list.sort()
n_speakers = len(file_list)
speaker_table = {}
if synthesize:
with open('speaker_info.json', 'r') as f:
pre_speakers = json.load(f)
n_speakers = pre_speakers['n_speakers']
speaker_table = pre_speakers['speaker_table']
# Multi-speaker training 하는 경우 테이블 내용 저장
elif n_speakers > 1:
speakers = {}
speakers['n_speakers'] = n_speakers
cnt = 0
for file in file_list:
speaker_table[file] = cnt
cnt+=1
pre_speakers = {}
pre_speakers['n_speakers'] = n_speakers
pre_speakers['speaker_table'] = speaker_table
with open('speaker_info.json', 'w') as f:
json.dump(pre_speakers, f)
# single-speaker 즉, fine-tuning의 경우
# 참고할 table이 있는지 exist, 있으면 가져오고 없으면 그냥 table은 한개로 설정
else:
if os.path.exists('speaker_info.json'):
with open('speaker_info.json', 'r') as f:
pre_speakers = json.load(f)
# n_speakers 개수만 불러옴
n_speakers = pre_speakers['n_speakers']
speaker_table[file_list[0]] = n_speakers
else: # 싱글 스피커 학습일 때
speaker_table = {}
speaker_table[file_list[0]] = 0
return n_speakers, speaker_table
# !! Embedding 레이어 추가
def Embedding(num_embeddings, embedding_dim, padding_idx, std=0.01):
# !!!! 54개까지 스피커를 두었는데, 실제로 넣는건 이름으로 '100064' 넣어서 문제 생기는 듯
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, std) # weight를 normalize하는 과정
return m
# !! 스피커를 하나로 통합하는 모듈 구현
class SpeakerIntegrator(nn.Module):
def __init__(self):
super(SpeakerIntegrator, self).__init__()
def forward(self, x, spembs):
"""
x shape : (batch, 39, 256)
spembs shape : (batch, 256)
"""
spembs = spembs.unsqueeze(1)
spembs = spembs.repeat(1, x.shape[1], 1)
x = x + spembs
return x
def get_alignment(tier):
sil_phones = ['sil', 'sp', 'spn']
phones = []
durations = []
start_time = 0
end_time = 0
end_idx = 0
for t in tier._objects:
s, e, p = t.start_time, t.end_time, t.text
# Trimming leading silences
if phones == []:
if p in sil_phones:
continue
else:
start_time = s
if p not in sil_phones:
phones.append(p)
end_time = e
end_idx = len(phones)
else:
phones.append(p)
durations.append(int(e*hp.sampling_rate/hp.hop_length)-int(s*hp.sampling_rate/hp.hop_length))
# Trimming tailing silences
phones = phones[:end_idx]
durations = durations[:end_idx]
return phones, np.array(durations), start_time, end_time
def process_meta(meta_path):
with open(meta_path, "r", encoding="utf-8") as f:
text = []
name = []
for line in f.readlines():
n, t = line.strip('\n').split('|')
name.append(n)
text.append(t)
return name, text
def get_param_num(model):
num_param = sum(param.numel() for param in model.parameters())
return num_param
def plot_data(data, sentence_list, titles=None, filename=None):
fonts = 'data/NanumGothic.ttf'
fontprop = fm.FontProperties(fname=fonts)
# total_mel_postnet_torch[0].detach().cpu().numpy()
fig, axes = plt.subplots(1, len(data[0][0]), squeeze=False)
if titles is None:
titles = [None for i in range(len(data))]
def add_axis(fig, old_ax, offset=0):
ax = fig.add_axes(old_ax.get_position(), anchor='W')
ax.set_facecolor("None")
return ax
plt.rcParams["figure.figsize"] = (10,4)
for i in range(len(data)):
spectrograms, pitchs, energies = data[i]
for j in range(len(spectrograms)):
spectrogram = spectrograms[j][0].detach().cpu().numpy() # Spectrogram은 통째로 받아서 사용할 때 0번째 numpy로 재정의
axes[0][j].imshow(spectrogram, origin='lower')
axes[0][j].set_aspect(2.5, adjustable='box')
axes[0][j].set_ylim(0, hp.n_mel_channels)
#axes[0][j].set_title(titles[0]+'_'+str(j), fontsize='medium')
axes[0][j].set_title(sentence_list[j], fontsize='medium', fontproperties=fontprop)
axes[0][j].tick_params(labelsize='x-small', left=False, labelleft=False)
axes[0][j].set_anchor('W')
ax1 = add_axis(fig, axes[0][j])
ax1.plot(pitchs[j], color='tomato')
ax1.set_xlim(0, spectrogram.shape[1])
ax1.set_ylim(0, hp.f0_max)
ax1.set_ylabel('F0', color='tomato')
ax1.tick_params(labelsize='x-small', colors='tomato', bottom=False, labelbottom=False)
ax2 = add_axis(fig, axes[0][j], 1.2)
ax2.plot(energies[j], color='darkviolet')
ax2.set_xlim(0, spectrogram.shape[1])
ax2.set_ylim(hp.energy_min, hp.energy_max)
ax2.set_ylabel('Energy', color='darkviolet')
ax2.yaxis.set_label_position('right')
ax2.tick_params(labelsize='x-small', colors='darkviolet', bottom=False, labelbottom=False, left=False, labelleft=False, right=True, labelright=True)
#curFilename = filename[:-4] + '_' + str(i) + filename[-4:]
plt.savefig(filename, dpi=200)
plt.close()
def get_mask_from_lengths(lengths, max_len=None):
batch_size = lengths.shape[0]
if max_len is None:
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len).unsqueeze(0).expand(batch_size, -1).to(device)
mask = (ids >= lengths.unsqueeze(1).expand(-1, max_len))
return mask
def get_vocgan(ckpt_path, n_mel_channels=hp.n_mel_channels, generator_ratio = [4, 4, 2, 2, 2, 2], n_residual_layers=4, mult=256, out_channels=1):
checkpoint = torch.load(ckpt_path, map_location=device)
model = Generator(n_mel_channels, n_residual_layers,
ratios=generator_ratio, mult=mult,
out_band=out_channels)
model.load_state_dict(checkpoint['model_g'])
model.to(device).eval()
return model
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_hifigan(ckpt_path):
state_dict_g = load_checkpoint(ckpt_path, device)
model = Generator().to(device)
model.load_state_dict(state_dict_g['generator'], strict=False)
return model
def combine_wav(path, cnt):
for i in range(cnt):
curPath = path[:-4] + '_' + str(i+1) + path[-4:]
if i == 0:
combined_sounds = AudioSegment.from_wav(curPath)
else:
combined_sounds += AudioSegment.from_wav(curPath)
os.remove(curPath)
combined_sounds.export(path, format="wav")
print(path, 'done')
def hifigan_infer(mel_list, path, synthesize=False):
if torch.cuda.is_available():
torch.cuda.manual_seed(1234)
device = torch.device('cuda')
else:
device = torch.device('cpu')
generator = Generator().to(device)
state_dict_g = load_checkpoint(hp.vocoder_pretrained_model_path, device)
generator.load_state_dict(state_dict_g['generator'], strict=False)
generator.eval()
generator.remove_weight_norm()
cnt = 0
for mel in mel_list:
cnt += 1
with torch.no_grad():
if not synthesize:
mel = torch.unsqueeze(mel, 0)
x = mel
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * 32768.0 # MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
curPath = path[:-4] + '_' + str(cnt) + path[-4:]
wavfile.write(curPath, hp.sampling_rate, audio)
print(curPath, 'done')
combine_wav(path, cnt)
def pad_1D(inputs, PAD=0):
def pad_data(x, length, PAD):
x_padded = np.pad(x, (0, length - x.shape[0]),
mode='constant',
constant_values=PAD)
return x_padded
max_len = max((len(x) for x in inputs))
padded = np.stack([pad_data(x, max_len, PAD) for x in inputs])
return padded
def pad_2D(inputs, maxlen=None):
def pad(x, max_len):
PAD = 0
if np.shape(x)[0] > max_len:
raise ValueError("not max_len")
s = np.shape(x)[1]
x_padded = np.pad(x, (0, max_len - np.shape(x)[0]),
mode='constant',
constant_values=PAD)
return x_padded[:, :s]
if maxlen:
output = np.stack([pad(x, maxlen) for x in inputs])
else:
max_len = max(np.shape(x)[0] for x in inputs)
output = np.stack([pad(x, max_len) for x in inputs])
return output
def pad(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0)for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len-batch.size(0)), "constant", 0.0)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len-batch.size(0)), "constant", 0.0)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded
# from dathudeptrai's FastSpeech2 implementation
def standard_norm(x, mean, std, is_mel=False):
if not is_mel:
x = remove_outlier(x)
zero_idxs = np.where(x == 0.0)[0]
x = (x - mean) / std
x[zero_idxs] = 0.0
return x
return (x - mean) / std
def de_norm(x, mean, std):
zero_idxs = torch.where(x == 0.0)[0]
x = mean + std * x
x[zero_idxs] = 0.0
return x
def _is_outlier(x, p25, p75):
"""Check if value is an outlier."""
lower = p25 - 1.5 * (p75 - p25)
upper = p75 + 1.5 * (p75 - p25)
return np.logical_or(x <= lower, x >= upper)
# old_man f0를 싹다 outlier로 판단해버림
def remove_outlier(x):
"""Remove outlier from x."""
p25 = np.percentile(x, 25)
p75 = np.percentile(x, 75)
indices_of_outliers = []
for ind, value in enumerate(x):
if _is_outlier(value, p25, p75):
indices_of_outliers.append(ind)
x[indices_of_outliers] = 0.0
# replace by mean f0.
x[indices_of_outliers] = np.max(x)
return x
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
# calculate charactor f0/energy
x_char = np.zeros((durs.shape[0],), dtype=np.float32)
for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):
values = x[start:end][np.where(x[start:end] != 0.0)[0]]
x_char[idx] = np.mean(values) if len(values) > 0 else 0.0 # np.mean([]) = nan.
return x_char.astype(np.float32)
## HiFi-GAN use fuction
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
|
{"hexsha": "1099757529074c27617a95b2f88093c1359e695b", "size": 12372, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "hwRG/FastSpeech2-Pytorch-old-man_city", "max_stars_repo_head_hexsha": "c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "hwRG/FastSpeech2-Pytorch-old-man_city", "max_issues_repo_head_hexsha": "c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "hwRG/FastSpeech2-Pytorch-old-man_city", "max_forks_repo_head_hexsha": "c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9689922481, "max_line_length": 161, "alphanum_fraction": 0.5845457485, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3365}
|
#ifndef LEAF_87F274C4D4BA11E89928D55AC82C3C47
#define LEAF_87F274C4D4BA11E89928D55AC82C3C47
// Copyright (c) 2018-2019 Emil Dotchevski and Reverge Studios, Inc.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/leaf/config.hpp>
#ifdef LEAF_NO_EXCEPTIONS
# error This header requires exception handling
#endif
#include <boost/leaf/capture.hpp>
#include <boost/leaf/handle_error.hpp>
#include <boost/leaf/detail/demangle.hpp>
namespace boost { namespace leaf {
namespace leaf_detail
{
template <class Ex>
LEAF_CONSTEXPR inline bool check_exception_pack( std::exception const * ex, Ex const * ) noexcept
{
return dynamic_cast<Ex const *>(ex)!=0;
}
template <class Ex, class... ExRest>
LEAF_CONSTEXPR inline bool check_exception_pack( std::exception const * ex, Ex const *, ExRest const * ... ex_rest ) noexcept
{
return dynamic_cast<Ex const *>(ex)!=0 || check_exception_pack(ex, ex_rest...);
}
LEAF_CONSTEXPR inline bool check_exception_pack( std::exception const * )
{
return true;
}
}
template <class... Ex>
class catch_
{
std::exception const * const value_;
public:
LEAF_CONSTEXPR explicit catch_( std::exception const * value ) noexcept:
value_(value)
{
}
LEAF_CONSTEXPR bool operator()() const noexcept
{
return value_ && leaf_detail::check_exception_pack(value_,static_cast<Ex const *>(0)...);
}
LEAF_CONSTEXPR std::exception const & value() const noexcept
{
assert(value_!=0);
return *value_;
}
};
template <class Ex>
class catch_<Ex>
{
Ex const * const value_;
public:
LEAF_CONSTEXPR explicit catch_( std::exception const * value ) noexcept:
value_(dynamic_cast<Ex const *>(value))
{
}
LEAF_CONSTEXPR bool operator()() const noexcept
{
return this->value_!=0;
}
LEAF_CONSTEXPR Ex const & value() const noexcept
{
assert(this->value_!=0);
return *this->value_;
}
};
namespace leaf_detail
{
template <class... Exceptions> struct translate_type_impl<catch_<Exceptions...>> { using type = void; };
template <class... Exceptions> struct translate_type_impl<catch_<Exceptions...> const>;
template <class... Exceptions> struct translate_type_impl<catch_<Exceptions...> const *> { static_assert(sizeof(catch_<Exceptions...>)==0, "Handlers should take catch_<> by value, not as catch_<> const *"); };
template <class... Exceptions> struct translate_type_impl<catch_<Exceptions...> const &> { static_assert(sizeof(catch_<Exceptions...>)==0, "Handlers should take catch_<> by value, not as catch_<> const &"); };
template <class SlotsTuple, class... Ex>
struct check_one_argument<SlotsTuple,catch_<Ex...>>
{
LEAF_CONSTEXPR static bool check( SlotsTuple const &, error_info const & ei ) noexcept
{
if( ei.exception_caught() )
return catch_<Ex...>(ei.exception())();
else
return false;
}
};
template <class... Ex>
struct get_one_argument<catch_<Ex...>>
{
template <class SlotsTuple>
LEAF_CONSTEXPR static catch_<Ex...> get( SlotsTuple const &, error_info const & ei ) noexcept
{
std::exception const * ex = ei.exception();
assert(ex!=0);
return catch_<Ex...>(ex);
}
};
}
////////////////////////////////////////
template <class TryBlock, class... H>
LEAF_CONSTEXPR inline decltype(std::declval<TryBlock>()()) try_catch( TryBlock && try_block, H && ... h )
{
using namespace leaf_detail;
context_type_from_handlers<H...> ctx;
auto active_context = activate_context(ctx, on_deactivation::propagate_if_uncaught_exception);
return ctx.try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<H>(h)...);
}
template <class TryBlock, class RemoteH>
LEAF_CONSTEXPR inline decltype(std::declval<TryBlock>()()) remote_try_catch( TryBlock && try_block, RemoteH && h )
{
using namespace leaf_detail;
context_type_from_remote_handler<RemoteH> ctx;
auto active_context = activate_context(ctx, on_deactivation::propagate_if_uncaught_exception);
return ctx.remote_try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<RemoteH>(h));
}
namespace leaf_detail
{
template <class... E>
template <class R, class... H>
LEAF_CONSTEXPR inline R context_base<E...>::handle_current_exception( H && ... h ) const
{
return this->try_catch_(
[]{ throw; },
std::forward<H>(h)...);
}
template <class... E>
template <class R, class RemoteH>
LEAF_CONSTEXPR inline R context_base<E...>::remote_handle_current_exception( RemoteH && h ) const
{
return this->remote_try_catch_(
[]() -> R { throw; },
std::forward<RemoteH>(h));
}
template <class... E>
template <class R, class... H>
LEAF_CONSTEXPR inline R context_base<E...>::handle_exception( std::exception_ptr const & ep, H && ... h ) const
{
return this->try_catch_(
[&]{ std::rethrow_exception(ep); },
std::forward<H>(h)...);
}
template <class... E>
template <class R, class RemoteH>
LEAF_CONSTEXPR inline R context_base<E...>::remote_handle_exception( std::exception_ptr const & ep, RemoteH && h ) const
{
return this->remote_try_catch_(
[&]() -> R { std::rethrow_exception(ep); },
std::forward<RemoteH>(h));
}
////////////////////////////////////////
template <class... E>
template <class TryBlock, class... H>
LEAF_CONSTEXPR inline typename std::decay<decltype(std::declval<TryBlock>()().value())>::type catch_context<E...>::try_handle_all( TryBlock && try_block, H && ... h )
{
using namespace leaf_detail;
static_assert(is_result_type<decltype(std::declval<TryBlock>()())>::value, "The return type of the try_block passed to a try_handle_all function must be registered with leaf::is_result_type");
auto active_context = activate_context(*this, on_deactivation::propagate_if_uncaught_exception);
if( auto r = this->try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<H>(h)...) )
return r.value();
else
return this->handle_all(r, std::forward<H>(h)...);
}
template <class... E>
template <class TryBlock, class RemoteH>
LEAF_CONSTEXPR inline typename std::decay<decltype(std::declval<TryBlock>()().value())>::type catch_context<E...>::remote_try_handle_all( TryBlock && try_block, RemoteH && h )
{
using namespace leaf_detail;
static_assert(is_result_type<decltype(std::declval<TryBlock>()())>::value, "The return type of the try_block passed to a try_handle_all function must be registered with leaf::is_result_type");
auto active_context = activate_context(*this, on_deactivation::propagate_if_uncaught_exception);
if( auto r = this->remote_try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<RemoteH>(h)) )
return r.value();
else
return this->remote_handle_all(r, std::forward<RemoteH>(h));
}
template <class... E>
template <class TryBlock, class... H>
LEAF_CONSTEXPR inline typename std::decay<decltype(std::declval<TryBlock>()())>::type catch_context<E...>::try_handle_some( TryBlock && try_block, H && ... h )
{
using namespace leaf_detail;
static_assert(is_result_type<decltype(std::declval<TryBlock>()())>::value, "The return type of the try_block passed to a try_handle_some function must be registered with leaf::is_result_type");
auto active_context = activate_context(*this, on_deactivation::propagate_if_uncaught_exception);
if( auto r = this->try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<H>(h)...) )
return r;
else
{
auto rr = this->handle_some(std::move(r), std::forward<H>(h)...);
if( !rr )
active_context.set_on_deactivate(on_deactivation::propagate);
return rr;
}
}
template <class... E>
template <class TryBlock, class RemoteH>
LEAF_CONSTEXPR inline typename std::decay<decltype(std::declval<TryBlock>()())>::type catch_context<E...>::remote_try_handle_some( TryBlock && try_block, RemoteH && h )
{
auto active_context = activate_context(*this, on_deactivation::propagate_if_uncaught_exception);
if( auto r = this->remote_try_catch_(
[&]
{
return std::forward<TryBlock>(try_block)();
},
std::forward<RemoteH>(h)) )
return r;
else
{
auto rr = this->remote_handle_some(std::move(r), std::forward<RemoteH>(h));
if( !rr )
active_context.set_on_deactivate(on_deactivation::propagate);
return rr;
}
}
////////////////////////////////////////
inline void exception_info_::print( std::ostream & os ) const
{
if( ex_ )
{
os <<
"\nException dynamic type: " << leaf_detail::demangle(typeid(*ex_).name()) <<
"\nstd::exception::what(): " << ex_->what();
}
else
os << "\nUnknown exception type (not a std::exception)";
}
LEAF_CONSTEXPR inline exception_info_::exception_info_( std::exception const * ex ) noexcept:
exception_info_base(ex)
{
}
template <class... E>
template <class TryBlock, class... H>
inline decltype(std::declval<TryBlock>()()) context_base<E...>::try_catch_( TryBlock && try_block, H && ... h ) const
{
using namespace leaf_detail;
using R = decltype(std::declval<TryBlock>()());
try
{
return std::forward<TryBlock>(try_block)();
}
catch( capturing_exception const & cap )
{
try
{
cap.unload_and_rethrow_original_exception();
}
catch( std::exception const & ex )
{
return leaf_detail::handle_error_<R>(this->tup(), error_info(exception_info_(&ex)), std::forward<H>(h)...,
[]() -> R { throw; } );
}
catch(...)
{
return leaf_detail::handle_error_<R>(this->tup(), error_info(exception_info_(0)), std::forward<H>(h)...,
[]() -> R { throw; } );
}
}
catch( std::exception const & ex )
{
return leaf_detail::handle_error_<R>(this->tup(), error_info(exception_info_(&ex)), std::forward<H>(h)...,
[]() -> R { throw; } );
}
catch(...)
{
return leaf_detail::handle_error_<R>(this->tup(), error_info(exception_info_(0)), std::forward<H>(h)...,
[]() -> R { throw; } );
}
}
template <class... E>
template <class TryBlock, class RemoteH>
inline decltype(std::declval<TryBlock>()()) context_base<E...>::remote_try_catch_( TryBlock && try_block, RemoteH && h ) const
{
using namespace leaf_detail;
try
{
return std::forward<TryBlock>(try_block)();
}
catch( capturing_exception const & cap )
{
try
{
cap.unload_and_rethrow_original_exception();
}
catch( std::exception const & ex )
{
return std::forward<RemoteH>(h)(error_info(exception_info_(&ex), this)).get();
}
catch(...)
{
return std::forward<RemoteH>(h)(error_info(exception_info_(0), this)).get();
}
}
catch( std::exception const & ex )
{
return std::forward<RemoteH>(h)(error_info(exception_info_(&ex), this)).get();
}
catch(...)
{
return std::forward<RemoteH>(h)(error_info(exception_info_(0), this)).get();
}
}
}
////////////////////////////////////////
namespace leaf_detail
{
template <class R, class... H>
struct remote_handle_exception_dispatch_impl
{
using result_type = handler_result<H...>;
LEAF_CONSTEXPR static result_type handle( error_info const & err, H && ... h )
{
using Ctx = context_type_from_handlers<H...>;
return { leaf_detail::handle_error_<R>(static_cast<Ctx const *>(err.remote_handling_ctx_)->tup(), err, std::forward<H>(h)...,
[]() -> R { throw; } ) };
}
};
template <class... H>
struct remote_handle_exception_dispatch_impl<void, H...>
{
using result_type = handler_result_void<H...>;
LEAF_CONSTEXPR static result_type handle( error_info const & err, H && ... h )
{
using Ctx = context_type_from_handlers<H...>;
leaf_detail::handle_error_<void>(static_cast<Ctx const *>(err.remote_handling_ctx_)->tup(), err, std::forward<H>(h)...,
[]{ throw; } );
return { };
}
};
template <class... H>
using remote_handle_exception_dispatch = remote_handle_exception_dispatch_impl<handler_pack_return<H...>, H...>;
}
template <class... H>
LEAF_CONSTEXPR inline typename leaf_detail::remote_handle_exception_dispatch<H...>::result_type remote_handle_exception( error_info const & err, H && ... h )
{
using namespace leaf_detail;
return remote_handle_exception_dispatch<H...>::handle(err, std::forward<H>(h)...);
}
////////////////////////////////////////
namespace leaf_detail
{
inline error_id unpack_error_id( std::exception const * ex )
{
if( std::system_error const * se = dynamic_cast<std::system_error const *>(ex) )
return error_id(se->code());
else if( std::error_code const * ec = dynamic_cast<std::error_code const *>(ex) )
return error_id(*ec);
else if( error_id const * err_id = dynamic_cast<error_id const *>(ex) )
return *err_id;
else
return next_error();
}
LEAF_CONSTEXPR inline exception_info_base::exception_info_base( std::exception const * ex ) noexcept:
ex_(ex)
{
assert(!dynamic_cast<capturing_exception const *>(ex_));
}
inline exception_info_base::~exception_info_base() noexcept
{
}
}
inline error_info::error_info( leaf_detail::exception_info_ const & xi, void const * remote_handling_ctx ) noexcept:
remote_handling_ctx_(remote_handling_ctx),
xi_(&xi),
err_id_(leaf_detail::unpack_error_id(xi_->ex_))
{
}
} }
#endif
|
{"hexsha": "cd08b63e5fd17d67e50555d082ed7d58e698d753", "size": 13580, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/leaf/handle_exception.hpp", "max_stars_repo_name": "vector-of-bool/leaf", "max_stars_repo_head_hexsha": "329d1c27a53b8b4e22e18ed6cf164bfc4cb2d8c2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/leaf/handle_exception.hpp", "max_issues_repo_name": "vector-of-bool/leaf", "max_issues_repo_head_hexsha": "329d1c27a53b8b4e22e18ed6cf164bfc4cb2d8c2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/leaf/handle_exception.hpp", "max_forks_repo_name": "vector-of-bool/leaf", "max_forks_repo_head_hexsha": "329d1c27a53b8b4e22e18ed6cf164bfc4cb2d8c2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7239819005, "max_line_length": 211, "alphanum_fraction": 0.6588365243, "num_tokens": 3727}
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .layer_harness import create_layer, delete_layer, layer_path
from ingestclient.core.config import Configuration
import json
import numpy as np
import os
from pkg_resources import resource_filename
import unittest
class TestCloudVolume(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.vol_name = 'test_vol'
create_layer((1036, 1026, 78), (0, 0, 0), layer_name=cls.vol_name, dtype=np.uint8)
cls.config_file = os.path.join(
resource_filename("ingestclient", "test/data"), "boss-v0.2-cloudvolume.json")
with open(cls.config_file, 'rt') as example_file:
cls.example_config_data = json.load(example_file)
cls.config = Configuration(cls.example_config_data)
cls.config.load_plugins()
# Point config at generated CloudVolume.
cls.config.config_data["client"]["chunk_processor"]["params"]["cloudpath"] = (
'file://{}{}'.format(layer_path, cls.vol_name))
cls.chunk_procesor = cls.config.chunk_processor_class
cls.chunk_procesor.setup(cls.config.get_chunk_processor_params())
cls.chunk_size = (
cls.config.config_data["ingest_job"]["chunk_size"]["x"],
cls.config.config_data["ingest_job"]["chunk_size"]["y"],
cls.config.config_data["ingest_job"]["chunk_size"]["z"],
1) # Time dimension.
@classmethod
def teardDownClass(cls):
# Remove test CloudVolume.
delete_layer()
def test_process(self):
foo = None
chunk, order = self.chunk_procesor.process(foo, 0, 0, 0)
self.assertEqual(self.chunk_size, chunk.shape)
def test_process_chunk_trimmed(self):
"""
Ensure that a smaller chunk returned when the CloudVolume's extents
exceeded.
"""
foo = None
chunk, order = self.chunk_procesor.process(foo, 1, 1, 1)
expected = (12, 2, 14, 1)
self.assertEqual(expected, chunk.shape)
|
{"hexsha": "1b9b94e77e9c238638997c1ce23e02941203e31b", "size": 2638, "ext": "py", "lang": "Python", "max_stars_repo_path": "ingestclient/test/test_plugin_cloudvolume.py", "max_stars_repo_name": "jhuapl-boss/ingest-client", "max_stars_repo_head_hexsha": "62d7f39a8d8ab1f0b12226bbeab7f2ab429da08a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-11-22T21:45:20.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-04T21:57:12.000Z", "max_issues_repo_path": "ingestclient/test/test_plugin_cloudvolume.py", "max_issues_repo_name": "jhuapl-boss/ingest-client", "max_issues_repo_head_hexsha": "62d7f39a8d8ab1f0b12226bbeab7f2ab429da08a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2017-01-05T22:25:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-07T20:56:52.000Z", "max_forks_repo_path": "ingestclient/test/test_plugin_cloudvolume.py", "max_forks_repo_name": "jhuapl-boss/ingest-client", "max_forks_repo_head_hexsha": "62d7f39a8d8ab1f0b12226bbeab7f2ab429da08a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-09-29T20:52:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-26T14:45:42.000Z", "avg_line_length": 36.6388888889, "max_line_length": 90, "alphanum_fraction": 0.6849886277, "include": true, "reason": "import numpy", "num_tokens": 592}
|
#!/usr/bin/env python3
import sys
sys.path.append("../")
import io
import numpy as np
from struct import *
from edfreader import EDFreader
from edfreader import EDFexception
from edfwriter import EDFwriter
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Must be using Python version >= 3.5.0")
sys.exit()
if np.__version__ < "1.17.0":
print("Must be using NumPy version >= 1.17.0")
sys.exit()
def dblcmp(val1, val2):
diff = val1 - val2
if diff > 1e-13:
return 1
else:
if -diff > 1e-13:
return -1
else:
return 0
def dblcmp_lim(val1, val2, lim):
diff = val1 - val2
if diff > lim:
return 1
else:
if -diff > lim:
return -1
else:
return 0
def modify_and_try(path, offset, b):
fp = open(path, "rb+")
fp.seek(offset, io.SEEK_SET)
fp.write(b)
fp.close()
try:
hdl = EDFreader(path)
except EDFexception:
return 3
if hdl.close() != 0:
return 6
return 7
################################### EDF writing ###############################
dbuf = np.zeros(10240, dtype = np.float64)
sbuf = np.zeros(300, dtype = np.int16)
ibuf = np.zeros(300, dtype = np.int32)
hdl_out = EDFwriter("test.edf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, 512)
assert(hdl_out.version() == 100)
for i in range(0, 512):
assert(hdl_out.setSampleFrequency(i, 10239) == 0)
assert(hdl_out.setPhysicalMaximum(i, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(i, -30000) == 0)
assert(hdl_out.setDigitalMaximum(i, 10000) == 0)
assert(hdl_out.setDigitalMinimum(i, -10000) == 0)
assert(hdl_out.writeSamples(dbuf) == 0)
assert(hdl_out.close() == 0)
hdl_out = EDFwriter("test.edf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, 512)
for i in range(0, 512):
assert(hdl_out.setSampleFrequency(i, 10240) == 0)
assert(hdl_out.setPhysicalMaximum(i, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(i, -30000) == 0)
assert(hdl_out.setDigitalMaximum(i, 10000) == 0)
assert(hdl_out.setDigitalMinimum(i, -10000) == 0)
assert(hdl_out.writeSamples(dbuf) == hdl_out.EDFLIB_DATARECORD_SIZE_TOO_BIG)
assert(hdl_out.close() == 0)
chns = 2
hdl_out = EDFwriter("test.edf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, chns)
assert(hdl_out.setSampleFrequency(0, 20) == 0)
assert(hdl_out.setSampleFrequency(1, 23) == 0)
assert(hdl_out.setPhysicalMaximum(0, 10000) == 0)
assert(hdl_out.setPhysicalMinimum(0, -5000) == 0)
assert(hdl_out.setPhysicalMaximum(1, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(1, -30000) == 0)
assert(hdl_out.setDigitalMaximum(0, 10000) == 0)
assert(hdl_out.setDigitalMinimum(0, -10000) == 0)
assert(hdl_out.setDigitalMaximum(1, 30000) == 0)
assert(hdl_out.setDigitalMinimum(1, 10000) == 0)
assert(hdl_out.setSignalLabel(0, "trace1") == 0)
assert(hdl_out.setSignalLabel(1, "trace2") == 0)
assert(hdl_out.setPreFilter(0, "qwerty") == 0)
assert(hdl_out.setPreFilter(1, "zxcvbn") == 0)
assert(hdl_out.setTransducer(0, "asdfgh") == 0)
assert(hdl_out.setTransducer(1, "poklhyg") == 0)
assert(hdl_out.setPhysicalDimension(0, "\xb5Vxxxxxxxxxxxxxxxxxxxx") == 0)
assert(hdl_out.setPhysicalDimension(1, "\xb0\xf8xxxxxxxxxxxxxxxxxxxx") == 0)
assert(hdl_out.setStartDateTime(2017, 12, 5, 12, 23, 8, 0) == 0)
assert(hdl_out.setPatientName("John Doe") == 0)
assert(hdl_out.setPatientCode("01234") == 0)
assert(hdl_out.setPatientGender(1) == 0)
assert(hdl_out.setPatientBirthDate(2010, 7, 4) == 0)
assert(hdl_out.setAdditionalPatientInfo("nop") == 0)
assert(hdl_out.setAdministrationCode("789") == 0)
assert(hdl_out.setTechnician("Richard Roe") == 0)
assert(hdl_out.setEquipment("device") == 0)
assert(hdl_out.setNumberOfAnnotationSignals(3) == 0)
assert(hdl_out.setDataRecordDuration(130000) == 0)
assert(hdl_out.writeAnnotation(0, -1, "Recording starts") == 0)
assert(hdl_out.writeAnnotation(9000, 1000, "Test 1") == 0)
assert(hdl_out.writeAnnotation(13000, -1, "Recording ends") == 0)
for i in range(0, 20):
dbuf[i] = -5100 + (i * 800)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 23):
dbuf[i] = -30100 + (i * 909)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 20):
dbuf[i] = -5100 + (i * 800)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 23):
dbuf[i] = -30100 + (i * 909)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 20):
sbuf[i] = -10100 + (i * 1053)
assert(hdl_out.writeSamples(sbuf) == 0)
for i in range(0, 23):
sbuf[i] = 9900 + (i * 1053)
assert(hdl_out.writeSamples(sbuf) == 0)
for i in range(0, 20):
sbuf[i] = -10100 + (i * 1053)
assert(hdl_out.writeSamples(sbuf) == 0)
for i in range(0, 23):
sbuf[i] = 9900 + (i * 1053)
assert(hdl_out.writeSamples(sbuf) == 0)
for i in range(0, 20):
ibuf[i] = -10100 + (i * 1053)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 9900 + (i * 1053)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 20):
ibuf[i] = -10100 + (i * 1053)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 9900 + (i * 1053)
assert(hdl_out.writeSamples(ibuf) == 0)
ival1 = -10100
ival2 = 9900
for j in range(0, 4):
for i in range(0, 20):
ibuf[i] = ival1
ival1 += 253
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = ival2
ival2 += 253
assert(hdl_out.writeSamples(ibuf) == 0)
assert(hdl_out.close() == 0)
################################### BDF writing ###############################
hdl_out = EDFwriter("test.bdf", EDFwriter.EDFLIB_FILETYPE_BDFPLUS, 512)
for i in range(0, 512):
assert(hdl_out.setSampleFrequency(i, 10239) == 0)
assert(hdl_out.setPhysicalMaximum(i, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(i, -30000) == 0)
assert(hdl_out.setDigitalMaximum(i, 10000) == 0)
assert(hdl_out.setDigitalMinimum(i, -10000) == 0)
assert(hdl_out.writeSamples(dbuf) == 0)
assert(hdl_out.close() == 0)
hdl_out = EDFwriter("test.bdf", EDFwriter.EDFLIB_FILETYPE_BDFPLUS, 512)
for i in range(0, 512):
assert(hdl_out.setSampleFrequency(i, 10240) == 0)
assert(hdl_out.setPhysicalMaximum(i, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(i, -30000) == 0)
assert(hdl_out.setDigitalMaximum(i, 10000) == 0)
assert(hdl_out.setDigitalMinimum(i, -10000) == 0)
assert(hdl_out.writeSamples(dbuf) == hdl_out.EDFLIB_DATARECORD_SIZE_TOO_BIG)
assert(hdl_out.close() == 0)
hdl_out = EDFwriter("test.bdf", EDFwriter.EDFLIB_FILETYPE_BDFPLUS, chns)
assert(hdl_out.setSampleFrequency(0, 20) == 0)
assert(hdl_out.setSampleFrequency(1, 23) == 0)
assert(hdl_out.setPhysicalMaximum(0, 10000) == 0)
assert(hdl_out.setPhysicalMinimum(0, -5000) == 0)
assert(hdl_out.setPhysicalMaximum(1, -10000) == 0)
assert(hdl_out.setPhysicalMinimum(1, -30000) == 0)
assert(hdl_out.setDigitalMaximum(0, 1000000) == 0)
assert(hdl_out.setDigitalMinimum(0, -1000000) == 0)
assert(hdl_out.setDigitalMaximum(1, 3000000) == 0)
assert(hdl_out.setDigitalMinimum(1, 1000000) == 0)
assert(hdl_out.setSignalLabel(0, "trace1") == 0)
assert(hdl_out.setSignalLabel(1, "trace2") == 0)
assert(hdl_out.setPreFilter(0, "qwerty") == 0)
assert(hdl_out.setPreFilter(1, "zxcvbn") == 0)
assert(hdl_out.setTransducer(0, "asdfgh") == 0)
assert(hdl_out.setTransducer(1, "poklhyg") == 0)
assert(hdl_out.setPhysicalDimension(0, "\xb5Vxxxxxxxxxxxxxxxxxxxx") == 0)
assert(hdl_out.setPhysicalDimension(1, "\xb0\xf8xxxxxxxxxxxxxxxxxxxx") == 0)
assert(hdl_out.setStartDateTime(2017, 12, 5, 12, 23, 8, 0) == 0)
assert(hdl_out.setPatientName("John Doe") == 0)
assert(hdl_out.setPatientCode("01234") == 0)
assert(hdl_out.setPatientGender(1) == 0)
assert(hdl_out.setPatientBirthDate(2010, 7, 4) == 0)
assert(hdl_out.setAdditionalPatientInfo("nop") == 0)
assert(hdl_out.setAdministrationCode("789") == 0)
assert(hdl_out.setTechnician("Richard Roe") == 0)
assert(hdl_out.setEquipment("device") == 0)
assert(hdl_out.setNumberOfAnnotationSignals(3) == 0)
assert(hdl_out.setDataRecordDuration(117000) == 0)
assert(hdl_out.writeAnnotation(0, -1, "Recording starts") == 0)
assert(hdl_out.writeAnnotation(6000, 2000, "Test 2") == 0)
assert(hdl_out.writeAnnotation(11700, -1, "Recording ends") == 0)
for i in range(0, 20):
dbuf[i] = -5100 + (i * 800)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 23):
dbuf[i] = -30100 + (i * 909)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 20):
dbuf[i] = -5100 + (i * 800)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 23):
dbuf[i] = -30100 + (i * 909)
assert(hdl_out.writeSamples(dbuf) == 0)
for i in range(0, 20):
ibuf[i] = -1010000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 990000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 20):
ibuf[i] = -1010000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 990000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 20):
ibuf[i] = -1010000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 990000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 20):
ibuf[i] = -1010000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = 990000 + (i * 105300)
assert(hdl_out.writeSamples(ibuf) == 0)
ival1 = -1010000
ival2 = 990000
for j in range(0, 4):
for i in range(0, 20):
ibuf[i] = ival1
ival1 += 25300
assert(hdl_out.writeSamples(ibuf) == 0)
for i in range(0, 23):
ibuf[i] = ival2
ival2 += 25300
assert(hdl_out.writeSamples(ibuf) == 0)
assert(hdl_out.close() == 0)
################################### EDF reading ###############################
hdl_in = EDFreader("test.edf")
assert(hdl_in.getFileType() == hdl_in.EDFLIB_FILETYPE_EDFPLUS)
assert(hdl_in.getNumSignals() == 2)
assert(dblcmp_lim(hdl_in.getSampleFrequency(0), 153.8461538, 1e-6) == 0)
assert(dblcmp_lim(hdl_in.getSampleFrequency(1), 176.9230769, 1e-6) == 0)
assert(hdl_in.getTotalSamples(0) == 200)
assert(hdl_in.getTotalSamples(1) == 230)
assert((hdl_in.getNumDataRecords() * hdl_in.getLongDataRecordDuration()) == 13000000)
assert(hdl_in.getStartDateDay() == 5)
assert(hdl_in.getStartDateMonth() == 12)
assert(hdl_in.getStartDateYear() == 2017)
assert(hdl_in.getStartTimeSecond() == 8)
assert(hdl_in.getStartTimeMinute() == 23)
assert(hdl_in.getStartTimeHour() == 12)
assert(hdl_in.getStartTimeSubSecond() == 0)
assert(hdl_in.getPatientName() == "John Doe")
assert(hdl_in.getPatientCode() == "01234")
assert(hdl_in.getPatientGender() == "Male")
assert(hdl_in.getPatientBirthDate() == "04 jul 2010")
assert(hdl_in.getPatientAdditional()[0 : 3] == "nop")
assert(hdl_in.getAdministrationCode() == "789")
assert(hdl_in.getTechnician() == "Richard Roe")
assert(hdl_in.getEquipment() == "device")
assert(hdl_in.getLongDataRecordDuration() == 1300000)
assert(hdl_in.getNumDataRecords() == 10)
assert(len(hdl_in.annotationslist) == 3)
assert(hdl_in.getSignalLabel(0) == "trace1 ")
assert(hdl_in.getSignalLabel(1) == "trace2 ")
assert(hdl_in.getPhysicalMaximum(0) == 10000)
assert(hdl_in.getPhysicalMaximum(1) == -10000)
assert(hdl_in.getPhysicalMinimum(0) == -5000)
assert(hdl_in.getPhysicalMinimum(1) == -30000)
assert(hdl_in.getDigitalMaximum(0) == 10000)
assert(hdl_in.getDigitalMaximum(1) == 30000)
assert(hdl_in.getDigitalMinimum(0) == -10000)
assert(hdl_in.getDigitalMinimum(1) == 10000)
assert(hdl_in.getSampelsPerDataRecord(0) == 20)
assert(hdl_in.getSampelsPerDataRecord(1) == 23)
assert(hdl_in.getPhysicalDimension(0) == "uVxxxxxx")
assert(hdl_in.getPhysicalDimension(1) == " 0xxxxxx")
assert(hdl_in.getPreFilter(0)[0 : 9] == "qwerty ")
assert(hdl_in.getPreFilter(1)[0 : 9] == "zxcvbn ")
assert(hdl_in.getTransducer(0)[0 : 9] == "asdfgh ")
assert(hdl_in.getTransducer(1)[0 : 9] == "poklhyg ")
assert(len(hdl_in.annotationslist) == 3)
assert(hdl_in.annotationslist[0].onset == 0)
assert(hdl_in.annotationslist[0].duration == -1)
assert(hdl_in.annotationslist[0].description == "Recording starts")
assert(hdl_in.annotationslist[1].onset == 9000000)
assert(hdl_in.annotationslist[1].duration == 1000000)
assert(hdl_in.annotationslist[1].description == "Test 1")
assert(hdl_in.annotationslist[2].onset == 13000000)
assert(hdl_in.annotationslist[2].duration == -1)
assert(hdl_in.annotationslist[2].description == "Recording ends")
assert(hdl_in.fseek(1, 400, hdl_in.EDFSEEK_SET) != 400)
assert(hdl_in.fseek(0, 412, hdl_in.EDFSEEK_SET) != 412)
assert(hdl_in.fseek(0, 20, hdl_in.EDFSEEK_SET) == 20)
assert(hdl_in.readSamples(0, dbuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(dblcmp(dbuf[i], -5000) == 0)
continue
if i == 19:
assert(dblcmp(dbuf[i], 10000) == 0)
continue
assert(dblcmp_lim(dbuf[i], -5100 + (i * 800), 0.75) == 0)
assert(hdl_in.fseek(1, 23, hdl_in.EDFSEEK_SET) == 23)
assert(hdl_in.readSamples(1, dbuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(dblcmp(dbuf[i], -30000) == 0)
continue
assert(dblcmp(dbuf[i], -30100 + (i * 909)) == 0)
hdl_in.rewind(0)
assert(hdl_in.readSamples(0, dbuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(dblcmp(dbuf[i], -5000) == 0)
continue
if i == 19:
assert(dblcmp(dbuf[i], 10000) == 0)
continue
assert(dblcmp_lim(dbuf[i], -5100 + (i * 800), 0.75) == 0)
hdl_in.rewind(1)
assert(hdl_in.readSamples(1, dbuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(dblcmp(dbuf[i], -30000) == 0)
continue
assert(dblcmp(dbuf[i], -30100 + (i * 909)) == 0)
assert(hdl_in.fseek(0, 40, hdl_in.EDFSEEK_SET) == 40)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -10000)
continue
assert(ibuf[i] == -10100 + (i * 1053))
assert(hdl_in.fseek(1, 46, hdl_in.EDFSEEK_SET) == 46)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 10000)
continue
if (i == 20) or (i == 21):
assert(ibuf[i] == 30000)
continue
if i == 22:
assert(ibuf[i] == 10000)
continue
assert(ibuf[i] == 9900 + (i * 1053))
assert(hdl_in.fseek(0, 80, hdl_in.EDFSEEK_SET) == 80)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -10000)
continue
assert(ibuf[i] == -10100 + (i * 1053))
assert(hdl_in.fseek(1, 92, hdl_in.EDFSEEK_SET) == 92)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 10000)
continue
if i >= 20:
assert(ibuf[i] == 30000)
continue
assert(ibuf[i] == 9900 + (i * 1053))
assert(hdl_in.fseek(0, 60, hdl_in.EDFSEEK_SET) == 60)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -10000)
continue
assert(ibuf[i] == -10100 + (i * 1053))
assert(hdl_in.fseek(1, 69, hdl_in.EDFSEEK_SET) == 69)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 10000)
continue
if (i == 20) or (i == 21):
assert(ibuf[i] == 30000)
continue
if i == 22:
assert(ibuf[i] == 10000)
continue
assert(ibuf[i] == 9900 + (i * 1053))
assert(hdl_in.fseek(0, 100, hdl_in.EDFSEEK_SET) == 100)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -10000)
continue
assert(ibuf[i] == -10100 + (i * 1053))
assert(hdl_in.fseek(1, 115, hdl_in.EDFSEEK_SET) == 115)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 10000)
continue
if i >= 20:
assert(ibuf[i] == 30000)
continue
assert(ibuf[i] == 9900 + (i * 1053))
assert(hdl_in.readSamples(0, ibuf, 80) == 80)
for i in range(0, 80):
if i == 0:
assert(ibuf[i] == -10000)
continue
assert(ibuf[i] == -10100 + (i * 253))
assert(hdl_in.readSamples(1, ibuf, 92) == 92)
for i in range(0, 92):
if i == 0:
assert(ibuf[i] == 10000)
continue
if i >= 80:
assert(ibuf[i] == 30000)
continue
assert(ibuf[i] == 9900 + (i * 253))
assert(hdl_in.fseek(0, 185, hdl_in.EDFSEEK_SET) == 185)
assert(hdl_in.readSamples(0, ibuf, 15) == 15)
for i in range(0, 15):
assert(ibuf[i] == -10100 + ((i + 65) * 253))
assert(hdl_in.close() == 0)
#############################################
assert(modify_and_try("test.edf", 1, bytes("1", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 1, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 16, bytes(" ", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 16, bytes("0", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0xaa, bytes(":", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0xaa, bytes(".", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0xab, bytes("9", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0xab, bytes("1", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0xac, bytes("q", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0xac, bytes("2", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0xc4, bytes("D", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0xc4, bytes("C", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x12e, bytes(" ", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x12e, bytes("s", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x1ac, bytes(chr(181), encoding="latin_1")) == 3)
###################################
assert(modify_and_try("test.edf", 0x1ac, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x308, bytes(" ", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x308, bytes("-", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x30d, bytes(",", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x30d, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x3a5, bytes(".", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x3a5, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x3bc, bytes(chr(207), encoding="latin_1")) == 3)
###################################
assert(modify_and_try("test.edf", 0x3bc, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x40b, bytes(chr(247), encoding="latin_1")) == 3)
###################################
assert(modify_and_try("test.edf", 0x40b, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x560, bytes(chr(127), encoding="latin_1")) == 3)
###################################
assert(modify_and_try("test.edf", 0x560, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x5ff, bytes(chr(13), encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x5ff, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x54a, bytes(".", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x54a, bytes(" ", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0xad, bytes("-", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0xad, bytes(".", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x803, bytes("0.12", encoding="ascii")) == 3)
assert(modify_and_try("test.edf", 0x803, bytes("0.131", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x803, bytes("0.130", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x802, bytes("-", encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x802, bytes("+", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x750, bytes(chr(0), encoding="ascii")) == 3)
###################################
assert(modify_and_try("test.edf", 0x750, bytes(chr(0x14), encoding="latin_1")) == 7)
assert(modify_and_try("test.edf", 0x751, bytes(chr(0), encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.edf", 0x358, bytes("-32769", encoding="ascii")) == 3)
assert(modify_and_try("test.edf", 0x358, bytes("-10000", encoding="ascii")) == 7)
assert(modify_and_try("test.edf", 0x380, bytes("32768", encoding="ascii")) == 3)
assert(modify_and_try("test.edf", 0x380, bytes("10000", encoding="ascii")) == 7)
###################################
fp = open("test.edf", "rb")
fp.seek(0x600, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
if i >= 19:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
assert(dblcmp_lim(unpack_from("<h", rbuf, i * 2)[0], ((-5100 + (i * 800)) / 0.75) - 3333.333333, 1.0001) == 0)
###################################
fp = open("test.edf", "rb")
fp.seek(0x628, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if i >= 19:
assert(unpack_from("<h", rbuf, i * 2)[0] == (-30100 + (i * 909)) + 40000)
continue
###################################
fp = open("test.edf", "rb")
fp.seek(0x7ac, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
if i >= 19:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
assert(dblcmp_lim(unpack_from("<h", rbuf, i * 2)[0], ((-5100 + (i * 800)) / 0.75) - 3333.333333, 1.0001) == 0)
###################################
fp = open("test.edf", "rb")
fp.seek(0x7d4, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if i >= 19:
assert(unpack_from("<h", rbuf, i * 2)[0] == (-30100 + (i * 909)) + 40000)
continue
###################################
fp = open("test.edf", "rb")
fp.seek(0x958, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == -10100 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0x980, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if (i == 0) or (i == 22):
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if (i == 20) or (i == 21):
assert(unpack_from("<h", rbuf, i * 2)[0] == 30000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == 9900 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xb04, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == -10100 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xb2c, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if (i == 0) or (i == 22):
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if (i == 20) or (i == 21):
assert(unpack_from("<h", rbuf, i * 2)[0] == 30000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == 9900 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xcb0, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == -10100 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xcd8, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if i >= 20:
assert(unpack_from("<h", rbuf, i * 2)[0] == 30000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == 9900 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xe5c, io.SEEK_SET)
rbuf = fp.read(40)
fp.close()
for i in range(0, 20):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == -10000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == -10100 + (i * 1053))
###################################
fp = open("test.edf", "rb")
fp.seek(0xe84, io.SEEK_SET)
rbuf = fp.read(46)
fp.close()
for i in range(0, 23):
if i == 0:
assert(unpack_from("<h", rbuf, i * 2)[0] == 10000)
continue
if i >= 20:
assert(unpack_from("<h", rbuf, i * 2)[0] == 30000)
continue
assert(unpack_from("<h", rbuf, i * 2)[0] == 9900 + (i * 1053))
################################### BDF reading ###############################
hdl_in = EDFreader("test.bdf")
assert(hdl_in.getFileType() == hdl_in.EDFLIB_FILETYPE_BDFPLUS)
assert(hdl_in.getNumSignals() == 2)
assert(dblcmp_lim(hdl_in.getSampleFrequency(0), 170.9401709, 1e-5) == 0)
assert(dblcmp_lim(hdl_in.getSampleFrequency(1), 196.5811996, 1e-5) == 0)
assert(hdl_in.getTotalSamples(0) == 200)
assert(hdl_in.getTotalSamples(1) == 230)
assert((hdl_in.getNumDataRecords() * hdl_in.getLongDataRecordDuration()) == 11700000)
assert(hdl_in.getStartDateDay() == 5)
assert(hdl_in.getStartDateMonth() == 12)
assert(hdl_in.getStartDateYear() == 2017)
assert(hdl_in.getStartTimeSecond() == 8)
assert(hdl_in.getStartTimeMinute() == 23)
assert(hdl_in.getStartTimeHour() == 12)
assert(hdl_in.getStartTimeSubSecond() == 0)
assert(hdl_in.getPatientName() == "John Doe")
assert(hdl_in.getPatientCode() == "01234")
assert(hdl_in.getPatientGender() == "Male")
assert(hdl_in.getPatientBirthDate() == "04 jul 2010")
assert(hdl_in.getPatientAdditional()[0 : 3] == "nop")
assert(hdl_in.getAdministrationCode() == "789")
assert(hdl_in.getTechnician() == "Richard Roe")
assert(hdl_in.getEquipment() == "device")
assert(hdl_in.getLongDataRecordDuration() == 1170000)
assert(hdl_in.getNumDataRecords() == 10)
assert(len(hdl_in.annotationslist) == 3)
assert(hdl_in.getSignalLabel(0) == "trace1 ")
assert(hdl_in.getSignalLabel(1) == "trace2 ")
assert(hdl_in.getPhysicalMaximum(0) == 10000)
assert(hdl_in.getPhysicalMaximum(1) == -10000)
assert(hdl_in.getPhysicalMinimum(0) == -5000)
assert(hdl_in.getPhysicalMinimum(1) == -30000)
assert(hdl_in.getDigitalMaximum(0) == 1000000)
assert(hdl_in.getDigitalMaximum(1) == 3000000)
assert(hdl_in.getDigitalMinimum(0) == -1000000)
assert(hdl_in.getDigitalMinimum(1) == 1000000)
assert(hdl_in.getSampelsPerDataRecord(0) == 20)
assert(hdl_in.getSampelsPerDataRecord(1) == 23)
assert(hdl_in.getPhysicalDimension(0) == "uVxxxxxx")
assert(hdl_in.getPhysicalDimension(1) == " 0xxxxxx")
assert(hdl_in.getPreFilter(0)[0 : 9] == "qwerty ")
assert(hdl_in.getPreFilter(1)[0 : 9] == "zxcvbn ")
assert(hdl_in.getTransducer(0)[0 : 9] == "asdfgh ")
assert(hdl_in.getTransducer(1)[0 : 9] == "poklhyg ")
assert(len(hdl_in.annotationslist) == 3)
assert(hdl_in.annotationslist[0].onset == 0)
assert(hdl_in.annotationslist[0].duration == -1)
assert(hdl_in.annotationslist[0].description == "Recording starts")
assert(hdl_in.annotationslist[1].onset == 6000000)
assert(hdl_in.annotationslist[1].duration == 2000000)
assert(hdl_in.annotationslist[1].description == "Test 2")
assert(hdl_in.annotationslist[2].onset == 11700000)
assert(hdl_in.annotationslist[2].duration == -1)
assert(hdl_in.annotationslist[2].description == "Recording ends")
assert(hdl_in.fseek(1, 400, hdl_in.EDFSEEK_SET) != 400)
assert(hdl_in.fseek(0, 412, hdl_in.EDFSEEK_SET) != 412)
assert(hdl_in.fseek(0, 20, hdl_in.EDFSEEK_SET) == 20)
assert(hdl_in.readSamples(0, dbuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(dblcmp_lim(dbuf[i], -5000, 0.00001) == 0)
continue
if i == 19:
assert(dblcmp_lim(dbuf[i], 10000, 0.00001) == 0)
continue
assert(dblcmp_lim(dbuf[i], -5100 + (i * 800), 0.75) == 0)
assert(hdl_in.fseek(1, 23, hdl_in.EDFSEEK_SET) == 23)
assert(hdl_in.readSamples(1, dbuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(dblcmp_lim(dbuf[i], -30000, 0.00001) == 0)
continue
assert(dblcmp_lim(dbuf[i], -30100 + (i * 909), 0.00001) == 0)
hdl_in.rewind(0)
assert(hdl_in.readSamples(0, dbuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(dblcmp_lim(dbuf[i], -5000, 0.00001) == 0)
continue
if i == 19:
assert(dblcmp_lim(dbuf[i], 10000, 0.00001) == 0)
continue
assert(dblcmp_lim(dbuf[i], -5100 + (i * 800), 0.75) == 0)
hdl_in.rewind(1)
assert(hdl_in.readSamples(1, dbuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(dblcmp_lim(dbuf[i], -30000, 0.00001) == 0)
continue
assert(dblcmp_lim(dbuf[i], -30100 + (i * 909), 0.00001) == 0)
assert(hdl_in.fseek(0, 40, hdl_in.EDFSEEK_SET) == 40)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 105300))
assert(hdl_in.fseek(1, 46, hdl_in.EDFSEEK_SET) == 46)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 20:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 105300))
assert(hdl_in.fseek(0, 80, hdl_in.EDFSEEK_SET) == 80)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 105300))
assert(hdl_in.fseek(1, 92, hdl_in.EDFSEEK_SET) == 92)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 20:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 105300))
assert(hdl_in.fseek(0, 60, hdl_in.EDFSEEK_SET) == 60)
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 105300))
assert(hdl_in.fseek(1, 69, hdl_in.EDFSEEK_SET) == 69)
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 20:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 105300))
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 105300))
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 20:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 105300))
assert(hdl_in.readSamples(0, ibuf, 20) == 20)
for i in range(0, 20):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 105300))
assert(hdl_in.readSamples(1, ibuf, 23) == 23)
for i in range(0, 23):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 20:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 105300))
assert(hdl_in.readSamples(0, ibuf, 80) == 80)
for i in range(0, 80):
if i == 0:
assert(ibuf[i] == -1000000)
continue
assert(ibuf[i] == -1010000 + (i * 25300))
assert(hdl_in.readSamples(1, ibuf, 92) == 92)
for i in range(0, 92):
if i == 0:
assert(ibuf[i] == 1000000)
continue
if i >= 80:
assert(ibuf[i] == 3000000)
continue
assert(ibuf[i] == 990000 + (i * 25300))
assert(hdl_in.fseek(0, 185, hdl_in.EDFSEEK_SET) == 185)
assert(hdl_in.readSamples(0, ibuf, 15) == 15)
for i in range(0, 15):
assert(ibuf[i] == -1010000 + ((i + 65) * 25300))
assert(hdl_in.close() == 0)
#############################################
assert(modify_and_try("test.bdf", 1, bytes("1", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 1, bytes("B", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0, bytes("0", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0, bytes("\xff", encoding="latin_1")) == 7)
###################################
fp = open("test.bdf", "rb")
fp.seek(0x600, io.SEEK_SET)
rbuf = fp.read(60)
fp.close()
for i in range(0, 20):
if i == 0:
itmp = int.from_bytes(rbuf[(i * 3) + 0 : (i * 3) + 1], byteorder="little", signed=False)
itmp |= (int.from_bytes(rbuf[(i * 3) + 1 : (i * 3) + 3], byteorder="little", signed=True) << 8)
assert(itmp == -1000000)
continue
if i >= 19:
itmp = int.from_bytes(rbuf[(i * 3) + 0 : (i * 3) + 1], byteorder="little", signed=False)
itmp |= (int.from_bytes(rbuf[(i * 3) + 1 : (i * 3) + 3], byteorder="little", signed=True) << 8)
assert(itmp == 1000000)
continue
itmp = int.from_bytes(rbuf[(i * 3) + 0 : (i * 3) + 1], byteorder="little", signed=False)
itmp |= (int.from_bytes(rbuf[(i * 3) + 1 : (i * 3) + 3], byteorder="little", signed=True) << 8)
assert(dblcmp_lim(itmp, ((-5100 + (i * 800)) / 0.0075) - 333333.333333, 1.0001) == 0)
###################################
fp = open("test.bdf", "rb")
fp.seek(0x63c, io.SEEK_SET)
rbuf = fp.read(69)
fp.close()
for i in range(0, 23):
if i == 0:
itmp = int.from_bytes(rbuf[(i * 3) + 0 : (i * 3) + 1], byteorder="little", signed=False)
itmp |= (int.from_bytes(rbuf[(i * 3) + 1 : (i * 3) + 3], byteorder="little", signed=True) << 8)
assert(itmp == 1000000)
continue
itmp = int.from_bytes(rbuf[(i * 3) + 0 : (i * 3) + 1], byteorder="little", signed=False)
itmp |= (int.from_bytes(rbuf[(i * 3) + 1 : (i * 3) + 3], byteorder="little", signed=True) << 8)
assert(dblcmp_lim(itmp, ((-30100 + (i * 909)) / 0.01) + 4000000.0, 1.0001) == 0)
###################################
assert(modify_and_try("test.bdf", 0x37f, bytes("7", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x37f, bytes("8", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0x39e, bytes("6", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x39e, bytes("7", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0x318, bytes("1 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x318, bytes("-1", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0x358, bytes("2000000 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x358, bytes("1000000 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x358, bytes("-1000000", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0xec, bytes("+10", encoding="ascii")) == 7)
assert(modify_and_try("test.bdf", 0xec, bytes("-10", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0xec, bytes("-1 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0xec, bytes("0 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0xec, bytes(" 10", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0xec, bytes("10 ", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0x358, bytes("-8388609", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x358, bytes("-1000000", encoding="ascii")) == 7)
###################################
assert(modify_and_try("test.bdf", 0x380, bytes("8388608 ", encoding="ascii")) == 3)
assert(modify_and_try("test.bdf", 0x380, bytes("1000000 ", encoding="ascii")) == 7)
################################### EDF writing ###############################
hdl_out = EDFwriter("test2.edf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, 1)
assert(hdl_out.setSampleFrequency(0, 100) == 0)
assert(hdl_out.setPhysicalMaximum(0, 10000) == 0)
assert(hdl_out.setPhysicalMinimum(0, -1000) == 0)
assert(hdl_out.setDigitalMaximum(0, 32767) == 0)
assert(hdl_out.setDigitalMinimum(0, -32768) == 0)
assert(hdl_out.setAdditionalPatientInfo("Test") == 0)
assert(hdl_out.setAdditionalRecordingInfo("tEST") == 0)
for i in range(0, 100):
dbuf[i] = 0
assert(hdl_out.writeSamples(dbuf) == 0)
assert(hdl_out.close() == 0)
################################### EDF reading ###############################
hdl_in = EDFreader("test2.edf")
assert(hdl_in.getFileType() == hdl_in.EDFLIB_FILETYPE_EDFPLUS)
assert(hdl_in.getNumSignals() == 1)
assert(hdl_in.getPatientAdditional()[0 : 4] == "Test")
assert(hdl_in.getRecordingAdditional()[0 : 4] == "tEST")
assert(hdl_in.close() == 0)
################################### EDF writing ###############################
hdl_out = EDFwriter("test3.edf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, 1)
assert(hdl_out.setDataRecordDuration(777770) == 0)
assert(hdl_out.setStartDateTime(2008, 12, 31, 23, 59, 58, 1234) == 0)
assert(hdl_out.setNumberOfAnnotationSignals(3) == 0)
for i in range(0, 60):
l_tmp = 10000 * (i + 1)
assert(hdl_out.writeAnnotation(l_tmp, -1, str("test %d sec" %(l_tmp / 10000))) == 0)
l_tmp += 3333
assert(hdl_out.writeAnnotation(l_tmp, -1, str("test %d.%04d sec" %(l_tmp / 10000, l_tmp % 10000))) == 0)
assert(hdl_out.setSampleFrequency(0, 100) == 0)
assert(hdl_out.setPhysicalMaximum(0, 10000) == 0)
assert(hdl_out.setPhysicalMinimum(0, -1000) == 0)
assert(hdl_out.setDigitalMaximum(0, 32767) == 0)
assert(hdl_out.setDigitalMinimum(0, -32768) == 0)
assert(hdl_out.setPatientName("\xc3lpha") == 0)
assert(hdl_out.setPatientCode("Br\xe0v\xf3") == 0)
assert(hdl_out.setPatientGender(1) == 0)
assert(hdl_out.setPatientBirthDate(2005, 7, 4) == 0)
assert(hdl_out.setAdditionalPatientInfo("Charlie") == 0)
assert(hdl_out.setAdministrationCode("D\xeblta") == 0)
assert(hdl_out.setTechnician("\xcbcho") == 0)
assert(hdl_out.setEquipment("Foxtr\xf6t") == 0)
assert(hdl_out.setAdditionalRecordingInfo("Golf") == 0)
for i in range(0, 100):
dbuf[i] = 0
for i in range(0, 40):
assert(hdl_out.writeSamples(dbuf) == 0)
assert(hdl_out.close() == 0)
################################### EDF reading ###############################
hdl_in = EDFreader("test3.edf")
assert(hdl_in.getFileType() == hdl_in.EDFLIB_FILETYPE_EDFPLUS)
assert(hdl_in.getNumSignals() == 1)
assert(hdl_in.getStartTimeSubSecond() == 1234000)
assert(len(hdl_in.annotationslist) == 120)
for i in range(0, 60):
assert(hdl_in.annotationslist[i * 2].onset == (10000000 * (i + 1)))
assert(hdl_in.annotationslist[i * 2].duration == -1)
assert(hdl_in.annotationslist[i * 2 + 1].onset == ((10000000 * (i + 1)) + 3333000))
assert(hdl_in.annotationslist[i * 2 + 1].duration == -1)
assert(hdl_in.getPatientName() == "Alpha")
assert(hdl_in.getPatientCode() == "Bravo")
assert(hdl_in.getPatientGender() == "Male")
assert(hdl_in.getPatientBirthDate() == "04 jul 2005")
assert(hdl_in.getPatientAdditional()[0 : 7] == "Charlie")
assert(hdl_in.getAdministrationCode() == "Delta")
assert(hdl_in.getTechnician() == "Echo")
assert(hdl_in.getEquipment() == "Foxtrot"), ("->%s<-" %(hdl_in.getEquipment()))
assert(hdl_in.getRecordingAdditional()[0 : 4] == "Golf")
assert(hdl_in.close() == 0)
fp = open("test3.edf", "rb")
rbuf = fp.read(256)
fp.close()
assert(rbuf[8 : 88].decode("ascii") == "Bravo M 04-JUL-2005 Alpha Charlie ")
assert(rbuf[88 : 168].decode("ascii") == "Startdate 31-DEC-2008 Delta Echo Foxtrot Golf ")
################################### BDF writing ###############################
hdl_out = EDFwriter("test3.bdf", EDFwriter.EDFLIB_FILETYPE_EDFPLUS, 1)
assert(hdl_out.setDataRecordDuration(110000) == 0)
assert(hdl_out.setStartDateTime(2008, 12, 31, 23, 59, 58, 1234) == 0)
assert(hdl_out.setNumberOfAnnotationSignals(3) == 0)
assert(hdl_out.writeAnnotation(10000, -1, b"\xeb\x8c\x80\xed\x95\x9c\xeb\xaf\xbc\xea\xb5\xad".decode("utf-8")) == 0)
assert(hdl_out.writeAnnotation(20000, -1, b"\xeb\x8c\x80\xed\x95\x9c\xeb\xaf\xbc\xea\xb5\xad\x00".decode("utf-8")) == 0)
assert(hdl_out.writeAnnotation(30000, -1, b"\xeb\x8c\x80\xed\x95\x9c\xeb\xaf\xbc\xea\xb5\xad\x00\x00\x00\x00\x00\x00\x00".decode("utf-8")) == 0)
for i in range(0, 50):
l_tmp = 10000 * (i + 1)
assert(hdl_out.writeAnnotation(l_tmp, -1, str("test %d sec" %(l_tmp / 10000))) == 0)
l_tmp += 3333
assert(hdl_out.writeAnnotation(l_tmp, -1, str("test %d.%04d sec" %(l_tmp / 10000, l_tmp % 10000))) == 0)
assert(hdl_out.setSampleFrequency(0, 100) == 0)
assert(hdl_out.setPhysicalMaximum(0, 10000) == 0)
assert(hdl_out.setPhysicalMinimum(0, -1000) == 0)
assert(hdl_out.setDigitalMaximum(0, 32767) == 0)
assert(hdl_out.setDigitalMinimum(0, -32768) == 0)
for i in range(0, 100):
dbuf[i] = 0
for i in range(0, 10):
assert(hdl_out.writeSamples(dbuf) == 0)
assert(hdl_out.close() == 0)
################################### BDF reading ###############################
hdl_in = EDFreader("test3.bdf")
assert(hdl_in.getFileType() == hdl_in.EDFLIB_FILETYPE_EDFPLUS)
assert(hdl_in.getNumSignals() == 1)
assert(hdl_in.getStartTimeSubSecond() == 1234000)
assert(len(hdl_in.annotationslist) == 30)
assert(hdl_in.close() == 0)
|
{"hexsha": "87e887a02ce03e118739ab6ef6d0728f2d095cc3", "size": 41252, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ext_libs/edflibpy/test/edf_unit_test.py", "max_stars_repo_name": "greydongilmore/merPrep", "max_stars_repo_head_hexsha": "d84fd5617667180ae88805a7b73d5865b79026bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ext_libs/edflibpy/test/edf_unit_test.py", "max_issues_repo_name": "greydongilmore/merPrep", "max_issues_repo_head_hexsha": "d84fd5617667180ae88805a7b73d5865b79026bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ext_libs/edflibpy/test/edf_unit_test.py", "max_forks_repo_name": "greydongilmore/merPrep", "max_forks_repo_head_hexsha": "d84fd5617667180ae88805a7b73d5865b79026bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0244565217, "max_line_length": 144, "alphanum_fraction": 0.6170367497, "include": true, "reason": "import numpy", "num_tokens": 14585}
|
"""
Created on Fri 15 May 2020
@author: Yoann Pradat
Functions for simulation different matrices for the factorization V = WH + E
"""
using Combinatorics
using LinearAlgebra
function scale_col(X; p=1)
norms_X = mapslices(x -> norm(x, p), X, dims=1)
X .* repeat(norms_X .^ -1, size(X, 1), 1)
end
function random_fixed_total(n::Integer, total::Integer, rng::AbstractRNG)
μ = total÷n
T = zeros(Integer, n)
T[1] = total
while sum(T[1:n-1]) >= total
T[1:n-1] .= rand(rng, 1:2μ, n-1)
end
T[n] = total - sum(T[1:n-1])
T
end
"""
simulate_X_clustered_a(K, N, n_clusters, dist_X, rng)
Exaclty one cluster of rows is active in one cluster of columns. Number of clusters of rows is equal to the number of
clusters of columns.
"""
function simulate_X_clustered_a(K::Integer, N::Integer, n_clusters::Integer, dist_X::Distribution, rng::AbstractRNG)
X = zeros(Float64, K,N)
lims_K = cumsum(random_fixed_total(n_clusters, K, rng))
lims_N = cumsum(random_fixed_total(n_clusters, N, rng))
pushfirst!(lims_K, 0)
pushfirst!(lims_N, 0)
for i = 1:n_clusters
range_K = (lims_K[i]+1):lims_K[i+1]
range_N = (lims_N[i]+1):lims_N[i+1]
X[range_K, range_N] .= rand(rng, dist_X, length(range_K), length(range_N))
end
X, lims_K, lims_N, Matrix{Int}(I,n_clusters, n_clusters)
end
# """
# simulate_X_clustered_b(K, N, n_clusters, dist_X, rng)
#
# One or more clusters of rows are active in one cluster of columns. The number of clusters of rows is equal to the number
# of clusters of columns.
# """
# function simulate_X_clustered_b(K::Integer, N::Integer, n_clusters::Integer, dist_X::Distribution, rng::AbstractRNG)
# X = zeros(Float64, K,N)
#
# lims_K = cumsum(random_fixed_total(n_clusters, K, rng))
# lims_N = cumsum(random_fixed_total(n_clusters, N, rng))
# pushfirst!(lims_K, 0)
# pushfirst!(lims_N, 0)
#
# active_clusters = zeros(Int, n_clusters, n_clusters)
# combinations_K = collect(combinations(1:n_clusters))
# randperm_comb_K = randperm(rng, 2^n_clusters-1)
# for j = 1:n_clusters
# active_clusters[combinations_K[randperm_comb_K[j]], j] .= 1
# end
#
# for j = 1:n_clusters
# range_N = (lims_N[j]+1):lims_N[j+1]
# for i in 1:n_clusters
# range_K = (lims_K[i]+1):lims_K[i+1]
# if active_clusters[i,j] == 1
# X[range_K, range_N] .= rand(rng, dist_X, length(range_K), length(range_N))
# end
# end
# end
#
# X, lims_K, lims_N, active_clusters
# end
"""
simulate_X_clustered_b(K, N, n_clusters_K, n_clusters_N, dist_X, rng)
One or more clusters of rows are active in one cluster of columns. Number of clusters of rows is inferior or equal to
the number of clusters of columns.
"""
function simulate_X_clustered_b(K::Integer, N::Integer, n_clusters_K::Integer, n_clusters_N::Integer, dist_X::Distribution, rng::AbstractRNG)
X = zeros(Float64, K, N)
n_clusters_K <= n_clusters_N || throw(ArgumentError("Please choose n_clusters_K <= n_clusters_N"))
lims_K = cumsum(random_fixed_total(n_clusters_K, K, rng))
lims_N = cumsum(random_fixed_total(n_clusters_N, N, rng))
pushfirst!(lims_K, 0)
pushfirst!(lims_N, 0)
active_clusters = zeros(Int, n_clusters_K, n_clusters_N)
combinations_K = collect(combinations(1:n_clusters_K))
randperm_comb_K = randperm(rng, 2^n_clusters_K-1)
for j = 1:n_clusters_N
active_clusters[combinations_K[randperm_comb_K[j]], j] .= 1
end
for j = 1:n_clusters_N
range_N = (lims_N[j]+1):lims_N[j+1]
for i in 1:n_clusters_K
range_K = (lims_K[i]+1):lims_K[i+1]
if active_clusters[i,j] == 1
X[range_K, range_N] .= rand(rng, dist_X, length(range_K), length(range_N))
end
end
end
X, lims_K, lims_N, active_clusters
end
"""
simulate_WH_clustered(; F, N, K, n_clusters_K, n_clusters_N, dist_W, dist_H, H_cluster_mode, rng)
Simulate W and H with clustering on H.
"""
function simulate_WH_clustered(;F::Integer, N::Integer, K::Integer, n_clusters_K::Integer=0, n_clusters_N::Integer, dist_W::Distribution, dist_H::Distribution, H_cluster_mode::Symbol, rng::AbstractRNG)
K = K
W = rand(rng, dist_W, F, K)
W = scale_col(W, p=1)
if H_cluster_mode == :a
H, lims_K, lims_N, active_clusters = simulate_X_clustered_a(K, N, n_clusters_N, dist_H, rng)
elseif H_cluster_mode == :b
H, lims_K, lims_N, active_clusters = simulate_X_clustered_c(K, N, n_clusters_K, n_clusters_N, dist_H, rng)
end
W, H, lims_K, lims_N, active_clusters
end
"""
simulate_WH(F, N; n_factors, dist_W, dist_H, rng)
Simulate W and H.
"""
function simulate_WH(;F::Integer, N::Integer, n_factors::Integer, dist_W::Distribution, dist_H::Distribution, rng::AbstractRNG)
K = n_factors
W = rand(rng, dist_W, F, K)
W = scale_col(W, p=1)
H = rand(rng, dist_H, K, N)
W, H
end
|
{"hexsha": "b32eff05bcbeeb3740332cec668893aff50fe873", "size": 5020, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "demo/src/simulation/simulate.jl", "max_stars_repo_name": "Durzot/MT_NMF", "max_stars_repo_head_hexsha": "a3e3c2fb4a23cc09e78e1ad1e324787c6017a4fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo/src/simulation/simulate.jl", "max_issues_repo_name": "Durzot/MT_NMF", "max_issues_repo_head_hexsha": "a3e3c2fb4a23cc09e78e1ad1e324787c6017a4fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/src/simulation/simulate.jl", "max_forks_repo_name": "Durzot/MT_NMF", "max_forks_repo_head_hexsha": "a3e3c2fb4a23cc09e78e1ad1e324787c6017a4fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.375, "max_line_length": 201, "alphanum_fraction": 0.6613545817, "num_tokens": 1546}
|
(* Title: Kleene Algebra
Author: Alasdair Armstrong, Georg Struth, Tjark Weber
Maintainer: Georg Struth <g.struth at sheffield.ac.uk>
Tjark Weber <tjark.weber at it.uu.se>
*)
header {* Dioids *}
theory Dioid
imports Signatures
begin
subsection {* Join Semilattices *}
text {* Join semilattices can be axiomatised order-theoretically or
algebraically. A join semilattice (or upper semilattice) is either a
poset in which every pair of elements has a join (or least upper
bound), or a set endowed with an associative, commutative, idempotent
binary operation. It is well known that the order-theoretic definition
induces the algebraic one and vice versa. We start from the algebraic
axiomatisation because it is easily expandable to dioids, using
Isabelle's type class mechanism.
In Isabelle/HOL, a type class @{class semilattice_sup} is available.
Alas, we cannot use this type class because we need the symbol~@{text
"+"} for the join operation in the dioid expansion and subclass
proofs in Isabelle/HOL require the two type classes involved to have
the same fixed signature.
Using {\em add\_assoc} as a name for the first assumption in class
{\em join\_semilattice} would lead to name clashes: we will later
define classes that inherit from @{class semigroup_add}, which
provides its own assumption {\em add\_assoc}, and prove that these are
subclasses of {\em join\_semilattice}. Hence the primed name.
*}
class join_semilattice = plus_ord +
assumes add_assoc' [simp]: "(x + y) + z = x + (y + z)"
and add_comm [simp]: "x + y = y + x"
and add_idem [simp]: "x + x = x"
begin
lemma add_left_comm [simp]:
"b + (a + c) = a + (b + c)"
unfolding add_assoc' [symmetric] by simp
lemma add_left_idem [simp]:
"a + (a + b) = a + b"
unfolding add_assoc' [symmetric] by simp
text {* The definition @{term "x \<le> y \<longleftrightarrow> x + y = y"} of the order is
hidden in class @{class plus_ord}.
We show some simple order-based properties of semilattices. The
first one states that every semilattice is a partial order. *}
subclass order
proof
fix x y z :: 'a
show "x < y \<longleftrightarrow> x \<le> y \<and> \<not> y \<le> x"
by (metis add_comm less_def less_eq_def)
show "x \<le> x"
by (metis add_idem less_eq_def)
show "x \<le> y \<Longrightarrow> y \<le> z \<Longrightarrow> x \<le> z"
by (metis add_assoc' less_eq_def)
show "x \<le> y \<Longrightarrow> y \<le> x \<Longrightarrow> x = y"
by (metis add_comm less_eq_def)
qed
text {* Next we show that joins are least upper bounds. *}
lemma add_ub1 [simp]: "x \<le> x + y"
by (metis add_assoc' add_idem less_eq_def)
lemma add_ub2 [simp]: "y \<le> x + y"
by (metis add_assoc' add_comm add_idem less_eq_def)
lemma add_lub_var: "x \<le> z \<longrightarrow> y \<le> z \<longrightarrow> x + y \<le> z"
by (metis add_assoc' less_eq_def)
lemma add_lub: "x + y \<le> z \<longleftrightarrow> x \<le> z \<and> y \<le> z"
by (metis add_lub_var add_ub1 add_ub2 order_trans)
text {* Next we prove that joins are isotone (order preserving). *}
lemma add_iso: "x \<le> y \<longrightarrow> x + z \<le> y + z"
by (metis add_lub add_ub2 less_eq_def)
lemma add_iso_var: "x \<le> y \<longrightarrow> u \<le> v \<longrightarrow> x + u \<le> y + v"
by (metis add_comm add_iso add_lub)
text {* The next lemma links the definition of order as @{term "x \<le> y
\<longleftrightarrow> x + y = y"}
with a perhaps more conventional one known, e.g., from
arithmetics. *}
lemma order_prop: "x \<le> y \<longleftrightarrow> (\<exists>z. x + z = y)"
proof
assume "x \<le> y"
hence "x + y = y"
by (metis less_eq_def)
thus "\<exists>z. x + z = y"
by auto
next
assume "\<exists>z. x + z = y"
then obtain c where "x + c = y"
by auto
also have "x + c \<le> y"
by (metis calculation eq_refl)
thus "x \<le> y"
by (metis add_ub1 calculation)
qed
end (* join_semilattice *)
subsection {* Join Semilattices with an Additive Unit *}
text {* We now expand join semilattices by an additive unit~$0$. Is
the least element with respect to the order, and therefore often
denoted by~@{text \<bottom>}. Semilattices with a least element are often
called \emph{bounded}. *}
class join_semilattice_zero = join_semilattice + zero +
assumes add_zero_l [simp]: "0 + x = x"
begin
subclass comm_monoid_add
apply unfold_locales
apply auto
apply (metis add_comm add_zero_l)
done
lemma zero_least [simp]: "0 \<le> x"
by (metis add_zero_l less_eq_def)
lemma add_zero_r [simp]: "x + 0 = x"
by (metis add_comm add_zero_l)
lemma zero_unique [simp]: "x \<le> 0 \<longleftrightarrow> x = 0"
by (metis zero_least eq_iff)
lemma no_trivial_inverse: "x \<noteq> 0 \<longrightarrow> \<not>(\<exists>y. x + y = 0)"
by (metis zero_unique order_prop)
end (* join_semilattice_zero *)
subsection {* Near Semirings *}
text {* \emph{Near semirings} (also called seminearrings) are
generalisations of near rings to the semiring case. They have been
studied, for instance, in G.~Pilz's book~\cite{pilz83nearrings} on
near rings. According to his definition, a near semiring consists of
an additive and a multiplicative semigroup that interact via a single
distributivity law (left or right). The additive semigroup is not
required to be commutative. The definition is influenced by partial
transformation semigroups.
We only consider near semirings in which addition is commutative, and
in which the right distributivity law holds. We call such near
semirings \emph{abelian}. *}
class ab_near_semiring = ab_semigroup_add + semigroup_mult +
assumes distrib_right': "(x + y) \<cdot> z = x \<cdot> z + y \<cdot> z"
subclass (in semiring) ab_near_semiring
by (unfold_locales, metis distrib_right)
subsection {* Variants of Dioids *}
text {* A \emph{near dioid} is an abelian near semiring in which
addition is idempotent. This generalises the notion of (additively)
idempotent semirings by dropping one distributivity law. Near dioids
are a starting point for process algebras.
By modelling variants of dioids as variants of semirings in which
addition is idempotent we follow the tradition of
Birkhoff~\cite{birkhoff67lattices}, but deviate from the definitions
in Gondran and Minoux's book~\cite{gondran10graphs}. *}
class near_dioid = ab_near_semiring + plus_ord +
assumes add_idem' [simp]: "x + x = x"
begin
text {* Since addition is idempotent, the additive (commutative)
semigroup reduct of a near dioid is a semilattice. Near dioids are
therefore ordered by the semilattice order. *}
subclass join_semilattice
by unfold_locales (auto simp add: add.commute add.left_commute)
text {* It follows that multiplication is right-isotone (but not
necessarily left-isotone). *}
lemma mult_isor: "x \<le> y \<longrightarrow> x \<cdot> z \<le> y \<cdot> z"
proof
assume "x \<le> y"
hence "x + y = y"
by (metis less_eq_def)
also have "x \<cdot> z + y \<cdot> z = (x + y) \<cdot> z"
by (metis distrib_right')
moreover have "... = y \<cdot> z"
by (metis calculation)
thus "x \<cdot> z \<le> y \<cdot> z"
by (metis calculation less_eq_def)
qed
lemma "x \<le> y \<longrightarrow> z \<cdot> x \<le> z \<cdot> y"
nitpick [expect=genuine] -- "3-element counterexample"
oops
text {* The next lemma states that, in every near dioid, left
isotonicity and left subdistributivity are equivalent. *}
lemma mult_isol_equiv_subdistl:
"(\<forall>x y z. x \<le> y \<longrightarrow> z \<cdot> x \<le> z \<cdot> y) \<longleftrightarrow> (\<forall>x y z. z \<cdot> x \<le> z \<cdot> (x + y))"
by (metis add_ub1 less_eq_def)
end (* near_dioid *)
text {* We now make multiplication in near dioids left isotone, which
is equivalent to left subdistributivity, as we have seen. The
corresponding structures form the basis of probabilistic Kleene
algebras~\cite{mciverweber05pka} and game
algebras~\cite{venema03gamealgebra}. We are not aware that these
structures have a special name, so we baptise them \emph{pre-dioids}.
We do not explicitly define pre-semirings since we have no application
for them. *}
class pre_dioid = near_dioid +
assumes subdistl: "z \<cdot> x \<le> z \<cdot> (x + y)"
begin
text {* Now, obviously, left isotonicity follows from left
subdistributivity. *}
lemma subdistl_var: "z \<cdot> x + z \<cdot> y \<le> z \<cdot> (x + y)"
by (metis add.commute add_lub subdistl)
lemma mult_isol: "x \<le> y \<longrightarrow> z \<cdot> x \<le> z \<cdot> y"
proof
assume "x \<le> y"
hence "x + y = y"
by (metis less_eq_def)
also have "z \<cdot> x + z \<cdot> y \<le> z \<cdot> (x + y)"
by (metis subdistl_var)
moreover have "... = z \<cdot> y"
by (metis calculation)
thus "z \<cdot> x \<le> z \<cdot> y"
by (metis add_ub1 calculation order_trans)
qed
lemma mult_isol_var: "u \<le> x \<and> v \<le> y \<longrightarrow> u \<cdot> v \<le> x \<cdot> y"
by (metis mult_isol mult_isor order_trans)
lemma mult_double_iso: "x \<le> y \<longrightarrow> w \<cdot> x \<cdot> z \<le> w \<cdot> y \<cdot> z"
by (metis mult_isol mult_isor)
end (* pre_dioid *)
text {* By adding a full left distributivity law we obtain semirings
(which are already available in Isabelle/HOL as @{class semiring})
from near semirings, and dioids from near dioids. Dioids are therefore
idempotent semirings. *}
class dioid = near_dioid + semiring
subclass (in dioid) pre_dioid
by (unfold_locales, metis order_prop distrib_left)
subsection {* Families of Nearsemirings with a Multiplicative Unit *}
text {* Multiplicative units are important, for instance, for defining
an operation of finite iteration or Kleene star on dioids. We do not
introduce left and right units separately since we have no application
for this. *}
class ab_near_semiring_one = ab_near_semiring + one +
assumes mult_onel [simp]: "1 \<cdot> x = x"
and mult_oner [simp]: "x \<cdot> 1 = x"
begin
subclass monoid_mult
by (unfold_locales, simp_all)
end (* ab_near_semiring_one *)
class near_dioid_one = near_dioid + ab_near_semiring_one
text {* For near dioids with one, it would be sufficient to require
$1+1=1$. This implies @{term "x+x=x"} for arbitray~@{term x} (but that
would lead to annoying redundant proof obligations in mutual
subclasses of @{class near_dioid_one} and @{class near_dioid} later).
*}
class pre_dioid_one = pre_dioid + near_dioid_one
class dioid_one = dioid + near_dioid_one
subclass (in dioid_one) pre_dioid_one ..
subsection {* Families of Nearsemirings with Additive Units *}
text {*
We now axiomatise an additive unit~$0$ for nearsemirings. The zero is
usually required to satisfy annihilation properties with respect to
multiplication. Due to applications we distinguish a zero which is
only a left annihilator from one that is also a right annihilator.
More briefly, we call zero either a left unit or a unit.
Semirings and dioids with a right zero only can be obtained from those
with a left unit by duality.
*}
class ab_near_semiring_one_zerol = ab_near_semiring_one + zero +
assumes add_zerol [simp]: "0 + x = x"
and annil [simp]: "0 \<cdot> x = 0"
begin (* ab_near_semiring_one_zerol *)
text {* Note that we do not require~$0 \neq 1$. *}
lemma add_zeror [simp]: "x + 0 = x"
by (metis add.commute add_zerol)
end (* ab_near_semiring_one_zerol *)
class near_dioid_one_zerol = near_dioid_one + ab_near_semiring_one_zerol
subclass (in near_dioid_one_zerol) join_semilattice_zero
by (unfold_locales, metis add_zerol)
class pre_dioid_one_zerol = pre_dioid_one + ab_near_semiring_one_zerol
subclass (in pre_dioid_one_zerol) near_dioid_one_zerol ..
class semiring_one_zerol = semiring + ab_near_semiring_one_zerol
class dioid_one_zerol = dioid_one + ab_near_semiring_one_zerol
subclass (in dioid_one_zerol) pre_dioid_one_zerol ..
text {* We now make zero also a right annihilator. *}
class ab_near_semiring_one_zero = ab_near_semiring_one_zerol +
assumes annir [simp]: "x \<cdot> 0 = 0"
class semiring_one_zero = semiring + ab_near_semiring_one_zero
class near_dioid_one_zero = near_dioid_one_zerol + ab_near_semiring_one_zero
class pre_dioid_one_zero = pre_dioid_one_zerol + ab_near_semiring_one_zero
subclass (in pre_dioid_one_zero) near_dioid_one_zero ..
class dioid_one_zero = dioid_one_zerol + ab_near_semiring_one_zero
subclass (in dioid_one_zero) pre_dioid_one_zero ..
subclass (in dioid_one_zero) semiring_one_zero ..
subsection {* Duality by Opposition *}
text {*
Swapping the order of multiplication in a semiring (or dioid) gives
another semiring (or dioid), called its \emph{dual} or
\emph{opposite}.
*}
definition (in times) opp_mult (infixl "\<odot>" 70)
where "x \<odot> y \<equiv> y \<cdot> x"
lemma (in semiring_1) dual_semiring_1:
"class.semiring_1 1 (op \<odot>) (op +) 0"
by unfold_locales (auto simp add: opp_mult_def mult.assoc distrib_right distrib_left)
lemma (in dioid_one_zero) dual_dioid_one_zero:
"class.dioid_one_zero (op +) (op \<odot>) 1 0 (op \<le>) (op <)"
by unfold_locales (auto simp add: opp_mult_def mult.assoc distrib_right distrib_left)
subsection {* Selective Near Semirings *}
text {* In this section we briefly sketch a generalisation of the
notion of \emph{dioid}. Some important models, e.g. max-plus and
min-plus semirings, have that property. *}
class selective_near_semiring = ab_near_semiring + plus_ord +
assumes select: "x + y = x \<or> x + y = y"
begin
lemma select_alt: "x + y \<in> {x,y}"
by (metis insert_iff select)
text {* It follows immediately that every selective near semiring is a
near dioid. *}
subclass near_dioid
by (unfold_locales, metis select)
text {* Moreover, the order in a selective near semiring is obviously
linear. *}
subclass linorder
by (unfold_locales, metis add.commute add_ub1 select)
end (*selective_near_semiring*)
class selective_semiring = selective_near_semiring + semiring_one_zero
begin
subclass dioid_one_zero ..
end (* selective_semiring *)
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Kleene_Algebra/Dioid.thy"}
|
function x = RofCurve(X,Y)
% X=[10 0 -8];
% Y=[0 10 0];
% Defining the three nonlinear equations to be solved (equation of circle
% passing through the 3 points)
defn1=['[ ( x(1)-' num2str(X(1)) ' )^2+( x(2)-' num2str(Y(1)) ')^2-x(3)^2 ;' ];
defn2=[ '( x(1)-' num2str(X(2)) ' )^2+( x(2)-' num2str(Y(2)) ')^2-x(3)^2 ;' ];
defn3=[ '( x(1)-' num2str(X(3)) ' )^2+( x(2)-' num2str(Y(3)) ')^2-x(3)^2 ]' ];
inlinedef=[defn1 defn2 defn3];
%Defining the inline function that will compute the error, which has to be
%minimized
myfun=inline(inlinedef,'x');
options = optimset('Display','off','TolX',1e-10);
x0=[0;0;0];
[x,fval] = fsolve(myfun,x0,options);
x=[x(1) x(2) abs(x(3))];
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/12889-airfoil-analyzer/Airfoil_Analyzer/FitCircle.m"}
|
program async_test
use :: mpi_f08
implicit none
integer :: my_rank
call MPI_Init()
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank)
if (MPI_ASYNC_PROTECTS_NONBLOCKING) then
print '("rank ", I0, " protects async")', my_rank
else
print '("rank ", I0, " does not protect async")', my_rank
end if
call MPI_Finalize()
end program async_test
|
{"hexsha": "24b52913aa21b790c78fe960b8b6de1802ec2082", "size": 384, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Debugging/Mpi/PointToPointFortran/async_test.f90", "max_stars_repo_name": "Gjacquenot/training-material", "max_stars_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 115, "max_stars_repo_stars_event_min_datetime": "2015-03-23T13:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T00:27:21.000Z", "max_issues_repo_path": "Debugging/Mpi/PointToPointFortran/async_test.f90", "max_issues_repo_name": "Gjacquenot/training-material", "max_issues_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2015-02-25T15:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T07:42:48.000Z", "max_forks_repo_path": "Debugging/Mpi/PointToPointFortran/async_test.f90", "max_forks_repo_name": "Gjacquenot/training-material", "max_forks_repo_head_hexsha": "16b29962bf5683f97a1072d961dd9f31e7468b8d", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 59, "max_forks_repo_forks_event_min_datetime": "2015-11-26T11:44:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:27:22.000Z", "avg_line_length": 25.6, "max_line_length": 65, "alphanum_fraction": 0.65625, "num_tokens": 106}
|
__precompile__()
module SimpleWebsockets
using HTTP, Base64, Sockets, MbedTLS
import Sockets: listen
export WebsocketServer,
WebsocketClient,
WebsocketConnection,
RequestDetails,
WebsocketError,
ConnectError,
CallbackError,
FrameError
export serve, send, ping, listen, emit
include("opt/errors.jl")
include("opt/vars.jl")
include("opt/utils.jl")
include("lib/WebsocketConnection.jl")
include("lib/WebsocketClient.jl")
include("lib/WebsocketServer.jl")
end
|
{"hexsha": "2c437e6e7a0dd34ec6ca8a7f0a9bfceb8c876611", "size": 525, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SimpleWebsockets.jl", "max_stars_repo_name": "citkane/SimpleWebsockets", "max_stars_repo_head_hexsha": "2ba66e4716de7ba4589b26a41338c84cdbcaf044", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-10-14T11:12:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T10:16:03.000Z", "max_issues_repo_path": "src/SimpleWebsockets.jl", "max_issues_repo_name": "citkane/SimpleWebsockets", "max_issues_repo_head_hexsha": "2ba66e4716de7ba4589b26a41338c84cdbcaf044", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-06T19:01:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-26T18:00:03.000Z", "max_forks_repo_path": "src/SimpleWebsockets.jl", "max_forks_repo_name": "citkane/SimpleWebsockets", "max_forks_repo_head_hexsha": "2ba66e4716de7ba4589b26a41338c84cdbcaf044", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-01T14:30:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-01T14:30:53.000Z", "avg_line_length": 21.0, "max_line_length": 38, "alphanum_fraction": 0.7047619048, "num_tokens": 127}
|
[STATEMENT]
lemma [code]:
\<open>Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis
[foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
have \<open>foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0 = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. foldr (\<lambda>b k. of_bool b + k * (2::'a)) [b0, b1, b2, b3, b4, b5, b6] (0::'a) = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
foldr (\<lambda>b k. of_bool b + k * (2::?'a1)) [b0, b1, b2, b3, b4, b5, b6] (0::?'a1) = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
foldr (\<lambda>b k. of_bool b + k * (2::?'a1)) [b0, b1, b2, b3, b4, b5, b6] (0::?'a1) = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
have \<open>Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis
[of_char (Char b0 b1 b2 b3 b4 b5 b6 False)] + s\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [of_char (Char b0 b1 b2 b3 b4 b5 b6 False)] + s
[PROOF STEP]
by (unfold Literal'_def) (transfer, simp only: list.simps comp_apply char_of_char, simp)
[PROOF STATE]
proof (state)
this:
Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [of_char (Char b0 b1 b2 b3 b4 b5 b6 False)] + s
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
foldr (\<lambda>b k. of_bool b + k * (2::?'a1)) [b0, b1, b2, b3, b4, b5, b6] (0::?'a1) = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)
Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [of_char (Char b0 b1 b2 b3 b4 b5 b6 False)] + s
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
foldr (\<lambda>b k. of_bool b + k * (2::?'a1)) [b0, b1, b2, b3, b4, b5, b6] (0::?'a1) = of_char (Char b0 b1 b2 b3 b4 b5 b6 False)
Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [of_char (Char b0 b1 b2 b3 b4 b5 b6 False)] + s
goal (1 subgoal):
1. Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Literal' b0 b1 b2 b3 b4 b5 b6 s = String.literal_of_asciis [foldr (\<lambda>b k. of_bool b + k * 2) [b0, b1, b2, b3, b4, b5, b6] 0] + s
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1718, "file": null, "length": 10}
|
import copy
import typing
import math
import numpy as np
import revlib
import torch
import torch.utils.data
from deepspeed.runtime import lr_schedules
from torch.nn import functional as F
from src.dataclass import Context
from src.optimizers.build import build_optimizer
QUAD_TENSOR = typing.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
def orthonormal(inp: typing.Union[torch.Tensor, torch.nn.Parameter, typing.List[int]], gain: float):
original_input = inp
if isinstance(inp, list):
inp = torch.zeros(inp)
if isinstance(inp, torch.nn.Parameter):
inp = inp.data
flat_shape = (inp.shape[0], np.prod(inp.shape[1:]))
a = torch.rand(flat_shape)
u, _, v = torch.linalg.svd(a, full_matrices=False)
inp.copy_((u if u.shape == flat_shape else v).reshape(inp.shape).mul(gain).to(device=inp.device, dtype=inp.dtype))
if isinstance(original_input, list):
return torch.nn.Parameter(inp)
return original_input
def init_(t, dim = None):
dim = dim if dim is not None else t.shape[-1]
std = 1. / math.sqrt(dim)
return torch.nn.init.normal_(t, mean=0, std=std)
class TripleNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, scale0: torch.Tensor, scale1: torch.Tensor, shift: torch.Tensor, norm_power: int):
# linear_attention chunk names:
# scale0 = depth, scale1 = scale, shift = shift
scale0_relu = scale0.relu()
inp = scale0_relu.pow(3) * scale1 + shift
inp = inp - inp.mean(1, True)
rstd = inp.size(1) ** (1 / norm_power) / inp.norm(norm_power, 1, True)
inp *= rstd
if scale1.requires_grad:
ctx.save_for_backward(scale0_relu, scale1, inp, rstd)
return inp
@staticmethod
def backward(ctx, dout: torch.Tensor):
if not ctx.saved_tensors:
return None, None, None, None
scale0_relu, scale1, out, rstd = ctx.saved_tensors
dout = dout * rstd
dout -= (dout * out).mean(1, True) * out
dout -= dout.mean(1, True)
d_scale = dout * scale0_relu.square()
return d_scale * scale1 * 3, d_scale * scale0_relu, dout, None
def conv(inp: torch.Tensor, weight: torch.Tensor, groups: int, use_pad: bool) -> torch.Tensor:
if use_pad and weight.size()[-1] - 1 > 0:
inp = F.pad(inp, (weight.size()[-1] - 1, 0))
return F.conv1d(inp, weight, groups=groups)
def expert_matmul(inp: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
return torch.einsum("bgf,gfo->bgo", inp, weight)
class AuxLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, inp: torch.Tensor):
ctx.save_for_backward(inp)
return inp
@staticmethod
def backward(ctx, grad_outputs: torch.Tensor):
inp, = ctx.saved_tensors
inp.mean().backward()
def rnoise(params, zero, x):
N, c, d = x.shape
A, b, alpha, r = params
mu = x.sum(1, keepdim=True)
mu_mean = mu.sum(dim=(1),keepdim=True)*(1/c)
s = mu - mu_mean
s = s / torch.abs(s).max()
sd = A * s + b
s = alpha*sd + (1 - alpha) + 1
sigma = s / torch.linalg.vector_norm(s)
out = r * sigma * x + r * sigma * zero.repeat(x.shape).normal_()
return out
def moe(inp: torch.Tensor, expert_weights: torch.nn.ParameterList, r: typing.Optional[torch.nn.ParameterList], zero: typing.Optional[torch.Tensor], training: bool,
jitter_epsilon: float, feature_shuffle: torch.Tensor, groups: int, experts: int, model_noise: bool) -> torch.Tensor:
*expert_weights, gate = expert_weights
batch, features, sequence = inp.size()
tokens = batch * sequence
capacity = tokens // experts
# get gates
if gate.dtype != torch.float32:
gate = gate.float()
input_fp32 = inp.float()
if training and model_noise:
input_fp32 = rnoise(r, zero, input_fp32)
elif training:
input_fp32 = input_fp32 * (torch.rand_like(input_fp32) * jitter_epsilon + 1)
inp = input_fp32.transpose(1, 2).reshape(tokens, features)
#matrix multiplication to find tokens' most similar expert
logits = inp.mm(gate)
gates = F.softmax(logits, dim=1)
# calculate permutation/ assign experts
with torch.no_grad():
mask = torch.ones_like(gates[:, 0])
out = []
for g in gates.unbind(1):
_, idx = torch.topk(g * mask, capacity, 0)
out.append(idx)
mask[idx] = 0
expert_permutation = torch.stack(out, 1)
expert_permutation = expert_permutation.view(-1, 1).long()
permutation_inverse = torch.argsort(expert_permutation, 0).view(-1, 1)
expert_index = permutation_inverse // capacity
# apply loss
AuxLoss(gates.sum() / tokens)
inp = inp * gates.gather(1, expert_index)
# permute
inp = inp.gather(0, expert_permutation.expand_as(inp))
if feature_shuffle is not None:
inp = inp.gather(1, feature_shuffle.view(1, -1).expand_as(inp))
inp = inp.view(tokens // experts, experts * groups, features // groups)
if len(expert_weights) == 1:
inp = expert_matmul(inp, expert_weights[0])
else:
inp = torch.cat([expert_matmul(c, w) for c, w in zip(inp.chunk(len(expert_weights), 1), expert_weights)], -1)
inp = inp.reshape(tokens, -1)
inp = inp.gather(0, permutation_inverse.view(-1, 1).expand_as(inp))
inp = inp.view(batch, sequence, -1).transpose(1, 2)
return inp
def moe_check(inp: torch.Tensor, w: torch.nn.ParameterList, r: typing.Optional[torch.nn.ParameterList], zero: typing.Optional[torch.Tensor], training: bool,
jitter_epsilon: float, feature_shuffle: torch.Tensor, groups: int, experts: int, model_noise: bool) -> torch.Tensor:
if experts > 0:
return moe(inp, w, r, zero, training, jitter_epsilon, feature_shuffle, groups, experts, model_noise)
return conv(inp, w[0], groups, False)
def linear_attention(inp: torch.Tensor, divisor: torch.Tensor,
w0: typing.Union[torch.nn.ParameterList, torch.nn.Parameter], r0: typing.Optional[torch.nn.ParameterList],
feature_shuffle0: typing.Optional[torch.Tensor], groups0: int, experts0: int,
w1: torch.Tensor, r1: typing.Optional[torch.nn.ParameterList], w2: torch.nn.ParameterList, zero: typing.Optional[torch.Tensor],
feature_shuffle2: typing.Optional[torch.Tensor], groups2: int, experts2: int,
input_cache: torch.Tensor, cumsum_cache: torch.Tensor, bottleneck_group: int, training: bool,
caching: bool, idx: int, norm_power: int, jitter_epsilon: float,
pkm_layer: bool, pkm_keys: torch.nn.Parameter, pkm_values: typing.Optional[torch.nn.EmbeddingBag], input_dropout: typing.Optional[torch.nn.Dropout],
query_dropout: typing.Optional[torch.nn.Dropout], value_dropout: typing.Optional[torch.nn.Dropout],
pkm_topk: int, num_keys: int, pkm_heads: int, norm: typing.Optional[torch.nn.BatchNorm1d], model_noise: bool
) -> typing.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# TODO: Fix kernel_size back to being dynamic
kernel_size = 7
pad = True
if not training and caching:
if idx - 1 > kernel_size and inp.size(2) == 1:
pad = False
inp = torch.cat([input_cache, inp], -1)
input_cache = inp[:, :, -kernel_size + 1:].detach()
# w0 and w2 = moe params
#input projection to (intermediaries * 3)
#input dims = batch, features, sequence
# featues -> intermediate * 3
# inp.shape = (batch, features * 3, sequence)
inp = moe_check(inp, w0, r0, zero, training, jitter_epsilon, feature_shuffle0, groups0, experts0, model_noise)
#split projected tensor into three, each with orig. intermediary size
depth, scale, shift = inp.chunk(3, 1)
cum = depth.cumsum(-1)
if not training and caching:
cum = cum + cumsum_cache
scale = scale[:, :, -1:]
shift = shift[:, :, -1:]
cum = cum[:, :, -1:]
if idx - 1 > kernel_size:
cumsum_cache = cum.detach()
# intermediate * 3 -> intermediate
inp = TripleNorm.apply(cum / divisor, scale, shift, norm_power)
if pkm_layer:
inp = conv(inp, w1, groups2, True)
inp = inp.transpose(2,1)
inp = pkm(inp, w2, pkm_keys, pkm_values, input_dropout, query_dropout, value_dropout, pkm_topk, num_keys, pkm_heads, norm)
inp = inp.transpose(2,1)
else:
# intermediate -> intermediate * 3
inp = conv(inp, w1, bottleneck_group, pad)
# intermediate * 3 -> intermediate
inp = TripleNorm.apply(*inp.chunk(3, 1), norm_power)
# intermediate -> features
inp = moe_check(inp, w2, r1, zero, training, jitter_epsilon, feature_shuffle2, groups2, experts2, False)
return input_cache, cumsum_cache, inp
def pkm(inp: torch.Tensor, to_queries: torch.nn.Parameter, pkm_keys: torch.nn.Parameter,
pkm_values: torch.nn.EmbeddingBag, input_dropout: torch.nn.Dropout,
query_dropout:torch.nn.Dropout, value_dropout: torch.nn.Dropout, pkm_topk: int,
num_keys: int, heads: int, norm: torch.nn.BatchNorm1d):
b, t, e, h = *inp.shape, heads
inp = input_dropout(inp)
queries = F.linear(inp,to_queries)
queries = norm(queries)
queries = query_dropout(queries)
queries = queries.chunk(2, dim=-1)
queries = torch.stack(queries).reshape(2, b, t, h, -1)
dots = torch.einsum('pbthd,hnpd->bthpn', queries, pkm_keys)
scores, indices = dots.topk(k=pkm_topk, dim=-1)
scores, indices = map(lambda x: x.chunk(2, dim=3), (scores, indices))
all_topk = pkm_topk ** 2
shape = (b, t, h, all_topk)
all_scores = (
scores[0][..., :, None] +
scores[1][..., None, :]
).reshape(*shape)
all_indices = (
indices[0][..., :, None] * num_keys +
indices[1][..., None, :]
).reshape(*shape)
final_topk, final_indices = all_scores.topk(pkm_topk, dim=-1)
value_indices = all_indices.gather(-1, final_indices)
attn = final_topk.softmax(dim=-1)
value_indices, attn = map(lambda x: x.reshape(-1, pkm_topk * h), (value_indices, attn))
out = pkm_values(value_indices, per_sample_weights=attn)
out = value_dropout(out)
return out.reshape(b, t, e)
# w1 inputs:
# conv_weight(intermediate, intermediate * 3, ctx.model.conv_kernel_size, ctx.model.bottleneck_group,
# ctx.model.activation_std)
def conv_weight(in_features: int, out_features: int, kernel_size: int, groups: int, std: float):
return orthonormal(torch.nn.Conv1d(in_features, out_features, (kernel_size,), groups=groups).weight, 1 / std)
class Trainer(torch.nn.Module):
def __init__(self, ctx: Context, model: torch.nn.Module, data: typing.Optional[torch.Tensor]):
super(Trainer, self).__init__()
self.ctx = ctx
self.model = torch.jit.trace(model, data) if data else model
self.optimizer = build_optimizer(ctx, self.model.parameters())
self.scheduler = lr_schedules.OneCycle(self.optimizer,
ctx.optimizer.one_cycle.cycle_min_lr,
ctx.optimizer.one_cycle.cycle_max_lr,
ctx.optimizer.one_cycle.decay_lr_rate,
ctx.optimizer.one_cycle.cycle_first_step_size,
ctx.optimizer.one_cycle.cycle_second_step_size,
ctx.optimizer.one_cycle.cycle_first_stair_count,
ctx.optimizer.one_cycle.cycle_second_stair_count,
ctx.optimizer.one_cycle.decay_step_size,
ctx.optimizer.one_cycle.cycle_momentum,
ctx.optimizer.one_cycle.cycle_min_mom,
ctx.optimizer.one_cycle.cycle_max_mom,
ctx.optimizer.one_cycle.decay_mom_rate,
ctx.optimizer.one_cycle.last_batch_iteration)
@torch.no_grad()
def _to_device_detach(self, inp: torch.Tensor) -> torch.Tensor:
return inp.to(device=self.ctx.model.device, non_blocking=True).detach()
def _forward_backward(self, src: torch.Tensor, tgt: torch.Tensor) -> torch.Tensor:
loss = F.cross_entropy(self.model(self._to_device_detach(src)), self._to_device_detach(tgt))
loss.backward()
return loss.detach()
@torch.no_grad()
def _clip_gradient(self):
for p in self.gradients():
g_norm = p.grad.norm(2, 0, True).clamp(min=self.ctx.optimizer.agc.zero_division_eps)
p_norm = p.norm(2, 0, True).clamp(min=self.ctx.optimizer.agc.eps)
grad_scale = (p_norm / g_norm * self.ctx.optimizer.agc.gradient_clipping).clamp(max=1)
p.grad.data.copy_(p.grad * grad_scale)
def accumulated_step(self, data: torch.Tensor) -> torch.Tensor:
loss = sum(self._forward_backward(s, t) for s, t in zip(*data))
self._clip_gradient()
return loss
@torch.no_grad()
def zero_grad(self):
for p in self.model.parameters():
p.grad = None
@torch.no_grad()
def gradients(self) -> torch.nn.Parameter:
for p in self.model.parameters():
if p.grad is None:
continue
yield p
def save(self):
torch.save(self.state_dict(), self.ctx.model.checkpoint_path)
def load(self):
wrong_keys = self.load_state_dict(torch.load(self.ctx.model.checkpoint_path), strict=False)
for key in wrong_keys.missing_keys + wrong_keys.unexpected_keys:
if not any(k.startswith('_') for k in key.split('.')):
if key in wrong_keys.missing_keys:
raise ValueError(f"{key} is missing in checkpoint but exists in model")
if key in wrong_keys.unexpected_keys:
raise ValueError(f"{key} is missing in model but exists in checkpoint")
class MomentumNetSide(torch.nn.Module):
def __init__(self, beta: float):
super(MomentumNetSide, self).__init__()
self.beta = beta
def forward(self, inp: torch.Tensor):
return inp * self.beta
class LinearAttention(torch.nn.Module):
def __init__(self, ctx: Context):
super(LinearAttention, self).__init__()
self.embedding = torch.nn.Embedding(ctx.dataset.classes, ctx.model.features * 2).to(ctx.model.device)
orthonormal(self.embedding.weight, ctx.model.input_embedding_std * 2 ** -0.5)
pos_embd = torch.arange(0, ctx.model.sequence_length).unsqueeze(0) + 1
self.register_buffer("divisor", pos_embd.unsqueeze(0).to(torch.float).to(ctx.model.device))
cell = LinearAttentionCell(self, ctx, 1)
self.stem = revlib.ReversibleSequential(*[c
for i in range(1, 1 + ctx.model.depth)
for c in [cell.momentum((1 - ctx.model.momentumnet_beta) /
ctx.model.momentumnet_beta ** i, not ctx.model.weight_sharing, i),
MomentumNetSide(ctx.model.momentumnet_beta ** i)]],
target_device=ctx.model.device)
self.output = torch.nn.Conv1d(ctx.model.features * 2, ctx.dataset.classes, (1,)).to(ctx.model.device)
torch.nn.init.zeros_(self.output.weight.data)
def forward(self, inp: torch.Tensor):
return self.output(self.stem(self.embedding(inp).transpose(1, 2)))
def reset_cache(self):
for mod in self.stem.modules():
if isinstance(mod, LinearAttentionCell):
mod.reset_cache()
class MaskedBatchNorm1D(torch.nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, mask = None):
b, t, d = x.shape
has_mask = mask is not None
if has_mask:
initial_x = x
mask = mask.unsqueeze(-1)
x = x.masked_select(mask)
shape = x.shape
x = x.reshape(-1, d)
x = self.fn(x)
x = x.reshape(*shape)
if has_mask:
x = initial_x.masked_scatter(mask, x)
return x
class ParameterStore(torch.nn.Module):
"""
Something (likely deepspeed) changes all parameters in a ParameterList to [1] even though standalone parameters
work. That's why a torch.nn.ModuleList of ParameterStores needs to be initialized.
"""
def __init__(self, param: torch.Tensor):
super(ParameterStore, self).__init__()
self.param = torch.nn.Parameter(param)
def __repr__(self):
return (f'{self.__class__.__name__}(shape={str(list(self.param.size()))}, device={self.param.device}, '
f'dtype={self.param.dtype})')
def get_riemann_noise_params(size):
params = []
params.append(torch.nn.Parameter(torch.rand(1, size)))
params.append(torch.nn.Parameter(torch.rand(1, )))
params.append(torch.nn.Parameter(torch.rand(1, )))
params.append(torch.nn.Parameter(torch.rand(1, )))
return torch.nn.ParameterList(params)
def get_moe_param(in_features: int, out_features: int, groups: int, experts: int, expert_chunks: int, std: float
) -> typing.List[torch.nn.Parameter]:
if experts:
experts = groups if experts < 0 else experts
out = orthonormal([in_features // groups, out_features // groups], std).view(1, in_features // groups, -1)
out = out.repeat(experts // expert_chunks * groups, 1, 1).detach()
gate = [orthonormal([in_features, experts], 1)]
return [torch.nn.Parameter(copy.deepcopy(out)) for _ in range(expert_chunks)] + gate
return [torch.nn.Parameter(conv_weight(in_features, out_features, 1, groups, std))]
class LinearAttentionCell(torch.nn.Module):
def __init__(self, base: LinearAttention, ctx: Context, init_scale: float):
super(LinearAttentionCell, self).__init__()
self.divisor = lambda: base.divisor
self.init_scale = init_scale
self.caching = ctx.eval.cache
self.kernel_size = ctx.model.conv_kernel_size
self.bottleneck_group = ctx.model.bottleneck_group
self.norm_power = ctx.model.norm_power
self.groups0 = ctx.model.input_groups
self.groups2 = ctx.model.output_groups
self.experts0 = ctx.model.experts_in_input
self.experts2 = ctx.model.experts_in_output
self.jitter_epsilon = ctx.model.moe_jitter_epsilon
self.activation_std = ctx.model.activation_std
self.num_features = ctx.model.features
self.expert_chunks = ctx.model.expert_chunks
self.pkm = ctx.model.pkm.use_pkm
self.pkm_layers = ctx.model.pkm.pkm_layer_depths
self.ff_factor = ctx.model.feed_forward_intermediate_factor
self.input_dropout = ctx.model.pkm.input_dropout
self.query_dropout = ctx.model.pkm.query_dropout
self.value_dropout = ctx.model.pkm.value_dropout
self.pkm_topk = ctx.model.pkm.topk
self.pkm_num_keys = ctx.model.pkm.num_keys
self.pkm_layer = False
self.pkm_heads = ctx.model.pkm.heads
self.pkm_dim_head = ctx.model.pkm.dim_head
self.pkm_keys = None
self.pkm_values = None # Will be initialized upon cell copy if layer_num in pkm_layers
self.norm = None
self.input_dropout = ctx.model.pkm.input_dropout
self.query_dropout = ctx.model.pkm.query_dropout
self.value_dropout = ctx.model.pkm.value_dropout
self.model_noise = ctx.model.use_riemann_noise
intermediate = int(ctx.model.features * ctx.model.feed_forward_intermediate_factor)
# conv_weight params:
# in_features: int, out_features: int, kernel_size: int, groups: int, std: float
self.w0 = torch.nn.ParameterList(get_moe_param(ctx.model.features, intermediate * 3, self.groups0,
self.experts0, self.expert_chunks, ctx.model.activation_std))
self.w1 = conv_weight(intermediate, intermediate * 3, ctx.model.conv_kernel_size, ctx.model.bottleneck_group,
ctx.model.activation_std)
if ctx.model.use_riemann_noise:
self.r0 = get_riemann_noise_params(ctx.model.features)
self.r1 = get_riemann_noise_params(ctx.model.features)
self.zero_holder = torch.Tensor([0]).to(torch.device('cuda'))
else:
self.r0 = None
self.r1 = None
self.zero_holder = None
self.w2 = torch.nn.ParameterList(get_moe_param(intermediate, ctx.model.features, self.groups2,
self.experts2, self.expert_chunks, 1))
self.idx: int = 0
self._input_cache = torch.zeros([])
self._cumsum_cache = torch.zeros([])
if ctx.model.feature_shuffle:
self.register_buffer("feature_shuffle0", torch.argsort(torch.randn(ctx.model.features)).view(1, -1, 1))
self.register_buffer("feature_shuffle2", torch.argsort(torch.randn(intermediate)).view(1, -1, 1))
else:
self.feature_shuffle0 = None
self.feature_shuffle2 = None
def layer_check(self, layer_num: int):
# Method to modify variables according to depth
self.layer_num = layer_num
if self.pkm:
if layer_num in self.pkm_layers:
self.pkm_layer = True
self.experts2 = 0
dim_query = self.pkm_dim_head * self.pkm_heads
intermediate = int(self.num_features * self.ff_factor)
if dim_query % 2 != 0:
raise ValueError("Invalid PKM dim query. \"model.pkm.dim_head\" * \
\"model.pkm_heads\" must equal a number divisible by two.")
self.w1 = conv_weight(intermediate, self.num_features, self.kernel_size,
self.groups2, self.activation_std)
self.w2 = torch.nn.Parameter(torch.normal(torch.zeros(dim_query, self.num_features),
torch.ones(dim_query, self.num_features)))
# w2 == "keys"
self.pkm_keys = torch.nn.Parameter(torch.zeros(self.pkm_heads,
self.pkm_num_keys, 2, self.pkm_dim_head // 2))
self.pkm_values = torch.nn.EmbeddingBag(self.pkm_num_keys ** 2, self.num_features, mode='sum', sparse=True)
# Use MaskedBatchNorm1D if using mask objective
self.norm = torch.nn.BatchNorm1d(self.num_features)
init_(self.pkm_keys)
init_(self.pkm_values.weight)
self.input_dropout = torch.nn.Dropout(self.input_dropout)
self.query_dropout = torch.nn.Dropout(self.query_dropout)
self.value_dropout = torch.nn.Dropout(self.value_dropout)
def reset_cache(self):
self._cumsum_cache = torch.zeros([])
self._input_cache = torch.zeros([])
self.idx = 0
def forward(self, inp: torch.Tensor) -> torch.Tensor:
if self.training:
div = self.divisor()
elif self.caching:
self.idx += inp.size(2)
div = torch.LongTensor([self.idx]).to(inp.device)
else:
self.idx = inp.size(2)
div = torch.arange(self.idx, device=inp.device).view(1, 1, -1) + 1
self._input_cache, self._cumsum_cache, out = linear_attention(inp, div,
self.w0, self.r0, self.feature_shuffle0, self.groups0,
self.experts0,
self.w1, self.r1,
self.w2, self.zero_holder, self.feature_shuffle2, self.groups2,
self.experts2, self._input_cache,
self._cumsum_cache, self.bottleneck_group,
self.training, self.caching, self.idx,
self.norm_power, self.jitter_epsilon,
self.pkm_layer, self.pkm_keys, self.pkm_values,
self.input_dropout, self.query_dropout,
self.value_dropout, self.pkm_topk, self.pkm_num_keys, self.pkm_heads,
self.norm, self.model_noise
)
out = out * self.init_scale
return out
def momentum(self, init_scale: float, deep: bool, layer_num: int):
out = copy.deepcopy(self) if deep else copy.copy(self)
out.init_scale = init_scale
out.layer_check(layer_num)
return out
|
{"hexsha": "b4335d40809a8abad2d221ae481ce1b3dec59539", "size": 25647, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "ClashLuke/HomebrewNLP", "max_stars_repo_head_hexsha": "18d9a9a32af4e5e5672a9261ef6ac613dc9194c0", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "ClashLuke/HomebrewNLP", "max_issues_repo_head_hexsha": "18d9a9a32af4e5e5672a9261ef6ac613dc9194c0", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "ClashLuke/HomebrewNLP", "max_forks_repo_head_hexsha": "18d9a9a32af4e5e5672a9261ef6ac613dc9194c0", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.7158469945, "max_line_length": 169, "alphanum_fraction": 0.6025266113, "include": true, "reason": "import numpy", "num_tokens": 5854}
|
macro checktoplevel(ctx, expr)
quote
if !istoplevel($(esc(ctx)).current)
msg($(esc(ctx)), :E100, "$($(esc(expr))) expression must be at top level")
return
end
end
end
macro checkisa(ctx, var, typ)
quote
if !isa($(esc(var)), $(esc(typ)))
msg($(esc(ctx)), :E101, $(esc(var)), "this expression must be a $($(esc(typ)))")
return
end
end
end
function lintexpr(ex::Symbol, ctx::LintContext)
registersymboluse(ex, ctx)
end
function lintexpr(ex::QuoteNode, ctx::LintContext)
if typeof(ex.value) == Expr
ctx.quoteLvl += 1
lintexpr(ex.value, ctx)
ctx.quoteLvl -= 1
end
end
function lintexpr(ex::Expr, ctx::LintContext)
# TODO: reenable linthelpers
if ex.head == :line
# ignore line numer nodes
return
elseif ex.head == :block
lintblock(ex, ctx)
elseif ex.head == :quote
ctx.quoteLvl += 1
lintexpr(ex.args[1], ctx)
ctx.quoteLvl -= 1
elseif ex.head == :if
lintifexpr(ex, ctx)
elseif ex.head == :(=) && typeof(ex.args[1])==Expr && ex.args[1].head == :call
lintfunction(ex, ctx)
elseif expand_assignment(ex) !== nothing
ea = expand_assignment(ex)
lintassignment(Expr(:(=), ea[1], ea[2]), ctx)
elseif ex.head == :local
lintlocal(ex, ctx)
elseif ex.head == :global
lintglobal(ex, ctx)
elseif ex.head == :const
if typeof(ex.args[1]) == Expr && ex.args[1].head == :(=)
lintassignment(ex.args[1], ctx; isConst = true)
end
elseif ex.head == :module
lintmodule(ex, ctx)
elseif ex.head == :export
lintexport(ex, ctx)
elseif isexpr(ex, [:import, :using, :importall])
lintimport(ex, ctx)
elseif ex.head == :comparison # only the odd indices
for i in 1:2:length(ex.args)
# comparison like match != 0:-1 is allowed, and shouldn't trigger lint warnings
if Meta.isexpr(ex.args[i], :(:)) && length(ex.args[i].args) == 2 &&
typeof(ex.args[i].args[1]) <: Real &&
typeof(ex.args[i].args[2]) <: Real
continue
else
lintexpr(ex.args[i], ctx)
end
end
lintcomparison(ex, ctx)
elseif ex.head == :type
linttype(ex, ctx)
elseif ex.head == :typealias
# TODO: deal with X{T} = Y assignments, also const X = Y
linttypealias(ex, ctx)
elseif ex.head == :abstract
lintabstract(ex, ctx)
elseif ex.head == :bitstype
lintbitstype(ex, ctx)
elseif ex.head == :(->)
lintlambda(ex, ctx)
elseif ex.head == :($) && ctx.quoteLvl > 0 # an unquoted node inside a quote node
ctx.quoteLvl -= 1
lintexpr(ex.args[1], ctx)
ctx.quoteLvl += 1
elseif ex.head == :function
lintfunction(ex, ctx)
elseif ex.head == :stagedfunction
lintfunction(ex, ctx, isstaged=true)
elseif ex.head == :macrocall && ex.args[1] == Symbol("@generated")
lintfunction(ex.args[2], ctx, isstaged=true)
elseif ex.head == :macro
lintmacro(ex, ctx)
elseif ex.head == :macrocall
lintmacrocall(ex, ctx)
elseif ex.head == :call
lintfunctioncall(ex, ctx)
elseif ex.head == :(:) # TODO(felipe) check for `Colon()`
lintrange(ex, ctx)
elseif ex.head == :(::) # type assert/convert
lintexpr(ex.args[1], ctx)
elseif ex.head == :(.) # a.b
lintexpr(ex.args[1], ctx)
elseif ex.head == :ref # it could be a ref a[b], or an array Int[1,2]
lintref(ex, ctx)
elseif ex.head == :typed_vcat# it could be a ref a[b], or an array Int[1,2]
linttyped_vcat(ex, ctx)
elseif ex.head == :vcat
lintvcat(ex, ctx)
elseif ex.head == :vect # 0.4
lintvect(ex, ctx)
elseif ex.head == :hcat
linthcat(ex, ctx)
elseif ex.head == :typed_hcat
linttyped_hcat(ex, ctx)
elseif ex.head == :cell1d
lintcell1d(ex, ctx)
elseif ex.head == :while
lintwhile(ex, ctx)
elseif ex.head == :for
lintfor(ex, ctx)
elseif ex.head == :let
lintlet(ex, ctx)
elseif ex.head in (:comprehension, :dict_comprehension, :generator)
lintgenerator(ex, ctx; typed = false)
elseif ex.head in (:typed_comprehension, :typed_dict_comprehension)
lintgenerator(ex, ctx; typed = true)
elseif ex.head == :try
linttry(ex, ctx)
elseif ex.head == :curly # e.g. Ptr{T}
lintcurly(ex, ctx)
elseif ex.head in [:(&&), :(||)]
lintboolean(ex.args[1], ctx)
lintexpr(ex.args[2], ctx) # do not enforce boolean. e.g. b==1 || error("b must be 1!")
elseif ex.head == :incomplete
msg(ctx, :E112, ex.args[1])
else
for sube in ex.args
lintexpr(sube, ctx)
end
end
end
# no-op fallback for other kinds of expressions (e.g. LineNumberNode) that we
# don’t care to handle
lintexpr(::Any, ::LintContext) = return
|
{"hexsha": "c1564b16a72209664cd6a9ab59f02eb5fa72db71", "size": 5035, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ast.jl", "max_stars_repo_name": "FelipeLema/Lint.jl", "max_stars_repo_head_hexsha": "37153b427c6666a2abbfc4bfcd538fb458ad2d31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 159, "max_stars_repo_stars_event_min_datetime": "2015-01-02T08:17:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T14:42:02.000Z", "max_issues_repo_path": "src/ast.jl", "max_issues_repo_name": "FelipeLema/Lint.jl", "max_issues_repo_head_hexsha": "37153b427c6666a2abbfc4bfcd538fb458ad2d31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 192, "max_issues_repo_issues_event_min_datetime": "2015-01-01T04:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-05T16:31:49.000Z", "max_forks_repo_path": "src/ast.jl", "max_forks_repo_name": "FelipeLema/Lint.jl", "max_forks_repo_head_hexsha": "37153b427c6666a2abbfc4bfcd538fb458ad2d31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2015-02-08T15:31:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T17:02:36.000Z", "avg_line_length": 33.3443708609, "max_line_length": 94, "alphanum_fraction": 0.566633565, "num_tokens": 1406}
|
from fabric import Connection
import pandas as pd
import numpy as np
import time
import os
import subprocess
import signal
import server_config
asynch_rl_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
remote_relaunch = True
net_version = str(1002)
difficulty = str(2)
print('preparing to connect')
host = server_config.username+"@"+server_config.ip_address
password = server_config.password
c = Connection(host=host,connect_kwargs={"password":password})
print('connection completed')
result=c.run("cat /home/"+server_config.username+"/GitHub_repos/asynch-rl/Data/RobotEnv/ConvModel"+net_version+"/train_log.txt")
print("data extracted")
df = pd.DataFrame.from_records([ i.split(';') for i in result.stdout.split('\n')][:-1], columns = ['iteration', 'date'])
df.loc[-1] = df.loc[0] # adding a row
df.index = df.index + 1 # shifting index
df.sort_index(inplace=True)
df['date']= pd.to_datetime(df['date'])
df['duration']= 0.0
df['duration'][1:]= (df['date'][1:].values-df['date'][:-1].values)/ np.timedelta64(1, 's')
last_iteration = df.iloc[-1]['iteration']
df = df[2:]
print(f'{df[-20:]}')
print(f'last iteration: {last_iteration}')
video_condition = False and not int(last_iteration) % 20
makedir_string = "mkdir "+asynch_rl_path+"/asynch-rl/Data/RobotEnv/ConvModel"+net_version+"/"
copy_string = "sshpass -p "+server_config.password+" scp "+server_config.username+"@"+server_config.ip_address+":GitHub_repos/asynch-rl/Data/RobotEnv/ConvModel"\
+net_version+"/\{'TrainingLog.pkl','train_params.txt','*'$iteration'*','val_history.npy','PG_training.npy'\} "+asynch_rl_path+"/asynch-rl/Data/RobotEnv/ConvModel"+net_version+"/"
os.system(makedir_string)
os.system(copy_string)
os.system("bash "+ asynch_rl_path +"/asynch-rl/launch_test.sh VERS='"+ net_version +"' ITER='"+last_iteration+"' DIFF='"+str(difficulty) +"' SIM='"+ str(video_condition) +"' SAVE='True' RL='AC'")
time.sleep(5)
os.system("cp "+ asynch_rl_path +"/asynch-rl/Data/RobotEnv/ConvModel"+net_version+"/video/*.png ~/Dropbox/CrowdNavigationTraining")
"""
if video_condition:
time.sleep(200)
os.system("cp "+ asynch_rl_path +"/asynch-rl/Data/RobotEnv/ConvModel"+net_version+"/video/*"+str(last_iteration)+"* ~/Dropbox/CrowdNavigationTraining")
"""
current_duration = round(time.time() - df.iloc[-1]['date'].timestamp())
print(f'current duration: {current_duration}s')
if current_duration > 300: #3*df[df['duration']<300]['duration'].mean():
os.system("rm ~/Dropbox/CrowdNavigationTraining/SIMULATION_STALLED_*")
os.system("touch ~/Dropbox/CrowdNavigationTraining/SIMULATION_STALLED_"+ last_iteration)
if remote_relaunch:
try:
c.run("kill -9 -1 -u "+server_config.username)
except Exception:
print('pseudo error after kill')
relaunch_string = "nohup sshpass -p '"+password+"' ssh "+host+" \"nohup bash -sl < perform_training.sh DIFF='"+difficulty+"' VERS='"+net_version+"' ITER='"+last_iteration+"'\" > "+asynch_rl_path+\
"/asynch-rl/Data/RobotEnv/ConvModel" +net_version+"/nohup.log 2>&1 "
os.system(relaunch_string)
#relaunch_command = "nohup bash "+ asynch_rl_path +"/asynch-rl/launch_training.sh 'ITER'=" + last_iteration + " 'VERS'=" + net_version + " &"
#os.system(relaunch_command)
time.sleep(250)
print('########### relaunch completed ###########')
else:
print('########### iteration went well. simulation advancing...###########')
os.system("rm ~/Dropbox/CrowdNavigationTraining/last_successful*")
os.system("touch ~/Dropbox/CrowdNavigationTraining/last_successful_"+ last_iteration +" &")
|
{"hexsha": "7d1dca639ad7a2dac7bc5eacb0f71289d28c30b4", "size": 3707, "ext": "py", "lang": "Python", "max_stars_repo_path": "autoload_CrowdNav.py", "max_stars_repo_name": "EnricoReg/asynch-rl", "max_stars_repo_head_hexsha": "acd01a49a7a4b8ff4ff0694d1e24274ba87691ee", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autoload_CrowdNav.py", "max_issues_repo_name": "EnricoReg/asynch-rl", "max_issues_repo_head_hexsha": "acd01a49a7a4b8ff4ff0694d1e24274ba87691ee", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autoload_CrowdNav.py", "max_forks_repo_name": "EnricoReg/asynch-rl", "max_forks_repo_head_hexsha": "acd01a49a7a4b8ff4ff0694d1e24274ba87691ee", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9902912621, "max_line_length": 205, "alphanum_fraction": 0.6868087402, "include": true, "reason": "import numpy", "num_tokens": 972}
|
"""
PatientTrajectory.py
Examples
--------
# Patient that goes through all stages but eventually recovers
>>> K = 5
>>> p = PatientTrajectory()
>>> p.state_ids = [0, 1, 2, 3, 4]
>>> p.durations = [5, 4, 3, 2, 2]
>>> p.health_state_ids = [0, 0, 0, 0, 1]
>>> p.is_terminal_0 = False
>>> T = np.sum(p.durations) + 1
>>> empty_count_TK = np.zeros((T, K), dtype=np.int32)
>>> t_st = 0 # start time,
## Check terminal counts is accurate
>>> term_T1 = p.update_terminal_count_matrix(empty_count_TK.copy()[:,0], t_st)
>>> np.sum(term_T1)
0
## Check occupancy counts are accurate
>>> occ_TK = p.update_count_matrix(empty_count_TK.copy(), 0)
>>> np.allclose(np.sum(occ_TK[:-1,:], axis=1), 1.0)
True
>>> occ_TK
array([[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]], dtype=int32)
>>> trans_time_ids = np.hstack([[0], np.cumsum(p.durations)])
## Check admit counts has only one entry
>>> admit_TK = p.update_admit_count_matrix(empty_count_TK.copy(), 0)
>>> np.sum(admit_TK)
1
>>> admit_TK[trans_time_ids]
array([[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=int32)
## Check discharge counts has only one entry
>>> discharge_TK = p.update_discharge_count_matrix(empty_count_TK.copy(), 0)
>>> np.sum(discharge_TK)
1
>>> discharge_TK[trans_time_ids, :]
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]], dtype=int32)
>>> summary_dict = p.get_length_of_stay_summary_dict()
>>> summary_dict['duration_State00']
5
>>> summary_dict['duration_State00+Recovering']
0
>>> summary_dict['duration_State00+Declining']
5
>>> summary_dict['duration_State01']
4
>>> summary_dict['duration_State02']
3
>>> summary_dict['duration_State03']
2
>>> summary_dict['duration_State04']
2
"""
import numpy as np
import pandas as pd
from collections import defaultdict
HEALTH_STATE_ID_TO_NAME = {0: 'Declining', 1: 'Recovering'}
class PatientTrajectory(object):
''' Represents a simulated patient trajectory using semi-markov model
Attributes
----------
durations: list_of_int
List of integer giving duration of current (ordinal state, health state) pair
state_ids : list_of_int
List of integer indicator (indicating current ordinal state)
health_state_ids : list_of_int
List of which binary health state (declining, recovering)
is_terminal_0 : bool
Indicates if final state is terminal, declining
'''
def __init__(self, start_state=None, config_dict=None, prng=None, next_state_map=None, state_name_to_id=None, t=None):
''' Construct a PatientTrajectory from provided input
Args
----
start_state : str
Name of state the patient starts in
config_dict : dict
Dict containing parameters of semi-markov process (read from JSON file)
prng : numpy RandomState
Returns
-------
Newly constructed PatientTrajectory instance
'''
self.durations = list()
self.state_ids = list()
self.health_state_ids = list()
self.state_name_to_id = state_name_to_id
self.is_terminal = 0
if start_state is None:
# Shortcut for testing. Avoid need to mockup config dict, etc.
pass
else:
self.simulate_trajectory(start_state, config_dict, prng, next_state_map, state_name_to_id, t)
def simulate_trajectory(self, start_state, config_dict, prng, next_state_map, state_name_to_id, t):
## Simulate trajectory
state = start_state
health_state_id = 0
while (state != 'TERMINAL' and state != 'RELEASE'):
if health_state_id < 1:
health_state_id = prng.rand() < config_dict['proba_Recovering_given_%s' % state]
choices_and_probas_dict = config_dict['pmf_duration_%s_%s' % (HEALTH_STATE_ID_TO_NAME[health_state_id], state)]
choices = np.fromiter(choices_and_probas_dict.keys(), dtype=np.int32)
probas = np.fromiter(choices_and_probas_dict.values(), dtype=np.float64)
try:
assert(np.allclose(1.0, np.sum(probas)))
except AssertionError as e:
L = len(probas)
diagnostic_df = pd.DataFrame(
np.hstack([probas, np.cumsum(probas)]).reshape((2,L)).T,
columns=['probas', 'cumsum'])
raise ValueError("Probabilities do not sum to one for state %s,%s\n%s" % (
state,
HEALTH_STATE_ID_TO_NAME[health_state_id],
str(diagnostic_df)))
duration = prng.choice(choices, p=probas)
if len(self.state_ids) == 0 and t <= 0:
try:
choices_and_probas_dict = config_dict['pmf_initial_duration_spent_%s' % (state)]
choices = np.fromiter(choices_and_probas_dict.keys(), dtype=np.int32)
probas = np.fromiter(choices_and_probas_dict.values(), dtype=np.float64)
assert np.allclose(1.0, np.sum(probas))
duration_spent = prng.choice(choices, p=probas)
duration = np.maximum(duration - duration_spent, 1)
except KeyError:
pass
self.state_ids.append(state_name_to_id[state])
self.health_state_ids.append(health_state_id)
self.durations.append(duration)
# Progress to the next state
next_state = next_state_map[state+HEALTH_STATE_ID_TO_NAME[health_state_id]]
# Allow option for premature terminal state
if health_state_id < 1:
try:
if prng.rand() < config_dict['proba_Die_after_Declining_%s' % (state)]:
next_state = 'TERMINAL'
except KeyError: # proba_Die not specified, so premature death from this STATE does not exist
pass
# Advance to next state
state = next_state
# End while loop block. Continue if not terminal or recovered.
self.is_terminal_0 = (state == 'TERMINAL' and health_state_id < 1)
def update_count_matrix(self, count_TK, t_start):
''' Update count matrix tracking population of each state at each time
Returns
-------
count_TK : 2D array with shape (T, K)
One row for each timestep
One column for each state
'''
t = t_start
for ii in range(len(self.state_ids)):
count_TK[t:t+self.durations[ii], self.state_ids[ii]] += 1
t += self.durations[ii]
return count_TK
def update_terminal_count_matrix(self, count_T1, t_start):
''' Update count matrix tracking population of each state at each time
Returns
-------
count_T1 : 2D array with shape (T, 1)
One row for each timestep
One column only, for terminal state
'''
if self.is_terminal_0:
t_terminal = t_start + np.sum(self.durations)
count_T1[t_terminal, 0] += 1
return count_T1
def update_admit_count_matrix(self, count_TK, t_start):
''' Update count matrix tracking "newly admitted" at each state, time
Returns
-------
count_TK : 2D array with shape (T, K)
One row for each timestep
One column for each state
Exactly one 1 entry for each patient in simulation.
'''
count_TK[t_start, self.state_ids[0]] += 1
return count_TK
def update_discharge_count_matrix(self, count_TK, t_start):
''' Update count matrix tracking "recovery" from each state at each time
Returns
-------
count_TK : 2D array with shape (T, K)
One row for each timestep
One column for each state
At most one 1 entry for each patient in simulation,
'''
if not self.is_terminal_0:
count_TK[t_start + np.sum(self.durations), self.state_ids[-1]] += 1
return count_TK
def get_length_of_stay_summary_dict(self):
''' Compute summary statistics about this patient's entire stay
Returns
-------
summary_dict : dict
Dictionary with string keys and count values
'''
if self.state_name_to_id is None:
state_id_to_name = dict([
(a, 'State%02d' % a) for a in range(1+np.max(self.state_ids))])
else:
state_id_to_name = dict(
zip(self.state_name_to_id.values(),
self.state_name_to_id.keys()))
L = len(self.durations)
summary_dict = defaultdict(int)
summary_dict['is_Terminal'] = int(self.is_terminal_0)
summary_dict['is_InICU'] = 0
summary_dict['is_OnVent'] = 0
summary_dict['duration_All'] = np.sum(self.durations)
for ll in range(L):
health_state = HEALTH_STATE_ID_TO_NAME[self.health_state_ids[ll]]
state_name = state_id_to_name[self.state_ids[ll]]
duration = self.durations[ll]
summary_dict['duration_' + state_name] += duration
summary_dict['duration_' + state_name + "+" + health_state] += duration
if state_name.count("ICU"):
summary_dict['is_InICU'] = 1
if state_name.count("OnVent"):
summary_dict['is_OnVent'] = 1
summary_dict['duration_All'] = np.sum(self.durations)
return summary_dict
|
{"hexsha": "ea2f0161a0fa13d267e6f7d303de3c370eec1911", "size": 10077, "ext": "py", "lang": "Python", "max_stars_repo_path": "semimarkov_forecaster/PatientTrajectory.py", "max_stars_repo_name": "tufts-ml/covid19-forecasting", "max_stars_repo_head_hexsha": "b0e3eed6cc03a981598d8f0b7c6fe882310c710d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-02T23:38:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-08T18:57:16.000Z", "max_issues_repo_path": "semimarkov_forecaster/PatientTrajectory.py", "max_issues_repo_name": "tufts-ml/covid19-forecasting", "max_issues_repo_head_hexsha": "b0e3eed6cc03a981598d8f0b7c6fe882310c710d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-04-03T13:58:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-27T02:12:07.000Z", "max_forks_repo_path": "semimarkov_forecaster/PatientTrajectory.py", "max_forks_repo_name": "tufts-ml/covid19-forecasting", "max_forks_repo_head_hexsha": "b0e3eed6cc03a981598d8f0b7c6fe882310c710d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7482758621, "max_line_length": 123, "alphanum_fraction": 0.5840031755, "include": true, "reason": "import numpy", "num_tokens": 2661}
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Description
# ==============================================================================
#
# Two Body orbit propagator.
#
# This algorithm considers a perfect Keplerian orbit. In other words, no
# perturbation is considered during the propagation and the Earth is modeled
# as a perfect sphere.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# References
# ==============================================================================
#
# [1] Vallado, D. A (2013). Fundamentals of Astrodynamics and Applications.
# Microcosm Press, Hawthorn, CA, USA.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
export TwoBody_Structure
export twobody_init, twobody!
################################################################################
# Functions
################################################################################
"""
twobody_init(epoch::Tepoch, a_0::Number, e_0::Number, i_0::Number, Ω_0::Number, ω_0::Number, f_0::Number; μ::T = m0) where {Tepoch, T}
Initialize the data structure of two body orbit propagator algorithm.
!!! note
The type used in the propagation will be the same as used to define the
gravitational constant `μ`.
# Args
- `epoch::Number`: Epoch of the initial mean orbital elements [Julian Day].
- `a_0::Number`: Initial mean semi-major axis [m].
- `e_0::Number`: Initial mean eccentricity.
- `i_0::Number`: Initial mean inclination [rad].
- `Ω_0::Number`: Initial mean right ascension of the ascending node [rad].
- `ω_0::Number`: Initial mean argument of perigee [rad].
- `f_0::Number`: Initial mean true anomaly [rad].
# Keywords
- `μ::T`: Standard gravitational parameter of the central body [m^3/s^2].
(**Default** = `m0`)
# Returns
The structure [`TwoBody_Structure`](@ref) with the initialized parameters.
"""
function twobody_init(
epoch::Tepoch,
a_0::Number,
e_0::Number,
i_0::Number,
Ω_0::Number,
ω_0::Number,
f_0::Number;
μ::T = m0
) where {Tepoch, T}
# The propagator is only defined for 0 <= e < 1.
if !(0 <= e_0 < 1)
throw(ArgumentError("The two body propagator only supports eccentricities in the interval [0,1)"))
end
# Compute the mean motion using the semi-major axis.
n_0 = sqrt(μ / T(a_0)^3)
# Compute the initial mean anomaly.
M_0 = f_to_M(T(e_0), T(f_0))
# Create and return the Two Body orbit propagator structure.
TwoBody_Structure{Tepoch, T}(
epoch = epoch,
a_0 = a_0,
n_0 = n_0,
e_0 = e_0,
i_0 = i_0,
Ω_0 = Ω_0,
ω_0 = ω_0,
M_0 = M_0,
f_0 = f_0,
μ = μ,
Δt = 0,
f_k = f_0,
M_k = M_0
)
end
"""
twobody!(tbd::TwoBody_Structure{Tepoch, T}, t::Number) where {Tepoch, T}
Propagate the orbit defined in `tbd` (see [`TwoBody_Structure`](@ref)) until the
time `t` [s].
!!! note
The internal values in `tbd` will be modified.
# Returns
- The position vector represented in the inertial frame at time `t` [m].
- The velocity vector represented in the inertial frame at time `t` [m/s]
# Remarks
The inertial frame in which the output is represented depends on which frame it
was used to generate the orbit parameters.
"""
function twobody!(tbd::TwoBody_Structure{Tepoch, T}, t::Number) where {Tepoch, T}
# Time elapsed since epoch.
Δt = T(t)
tbd.Δt = Δt
# Update the mean anomaly.
tbd.M_k = tbd.M_0 + tbd.n_0 * Δt
# Convert the mean anomaly to true anomaly.
tbd.f_k = M_to_f(tbd.e_0, tbd.M_k)
# Compute the position and velocity vectors given the orbital elements.
r_i_k, v_i_k = kepler_to_rv(tbd.a_0, tbd.e_0, tbd.i_0, tbd.Ω_0, tbd.ω_0, tbd.f_k)
# Return the position and velocity vector represented in the inertial
# reference frame.
return r_i_k, v_i_k
end
|
{"hexsha": "c1a8d9f3ae31005fbd44e3e7c1a64ccfec1a429b", "size": 4027, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/orbit/propagators/twobody.jl", "max_stars_repo_name": "yashi/SatelliteToolbox.jl", "max_stars_repo_head_hexsha": "3a2bea103083f7fcf5729bcd1446cb23c7f23cd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 157, "max_stars_repo_stars_event_min_datetime": "2018-06-19T21:11:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T19:24:41.000Z", "max_issues_repo_path": "src/orbit/propagators/twobody.jl", "max_issues_repo_name": "yashi/SatelliteToolbox.jl", "max_issues_repo_head_hexsha": "3a2bea103083f7fcf5729bcd1446cb23c7f23cd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2018-06-18T20:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T21:33:20.000Z", "max_forks_repo_path": "src/orbit/propagators/twobody.jl", "max_forks_repo_name": "SatelliteToolbox/SatelliteToolbox.jl", "max_forks_repo_head_hexsha": "a1ad4b4331fda1ddfb78993e05fe947514cf483a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2018-10-02T02:42:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T20:36:51.000Z", "avg_line_length": 30.2781954887, "max_line_length": 138, "alphanum_fraction": 0.5550037249, "num_tokens": 1197}
|
!***********************************************************************
SUBROUTINE setisompi (isofile)
! An MPI container for setiso which opens, checks, and loads isofile
! to get isotope data. Data loaded are:
! EMN,Z,/NPAR/,/NSMDAT/
! where /.../ means whole common block.
!
! Xinghong He 98-08-06
!
!***********************************************************************
!...Translated by Pacific-Sierra Research 77to90 4.3E 14:04:58 1/ 3/07
!...Modified by Charlotte Froese Fischer
! Gediminas Gaigalas 10/05/17
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE MPI_C
USE DEF_C
USE NPAR_C
USE NSMDAT_C, ONLY: SQN, DMOMNM, QMOMB
!-----------------------------------------------
! I n t e r f a c e B l o c k s
!-----------------------------------------------
USE setiso_I
USE orthsc_I
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
CHARACTER (LEN = *) :: isofile
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: J, I, K, NWIN, IOS, NPY, NAKY, MY
! integer :: ierr
REAL(DOUBLE) :: CON, FKK, EY, PZY, DNORM
real(double), dimension(:), pointer :: PA, QA, RA
!-----------------------------------------------
IF (myid .EQ. 0) CALL SETISO (isofile)
CALL MPI_Bcast (Z,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (EMN,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (PARM,2,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (NPARM,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (SQN,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr)
CALL MPI_Bcast (DMOMNM,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD, &
ierr)
CALL MPI_Bcast (QMOMB,1,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD, &
ierr)
RETURN
END
|
{"hexsha": "db25b7d71cf9ad774c44769a4f2745e6a07d6cb2", "size": 2076, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/mpi90/setisompi.f90", "max_stars_repo_name": "sylas/grasp-continuum", "max_stars_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-03-10T04:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:01:15.000Z", "max_issues_repo_path": "src/lib/mpi90/setisompi.f90", "max_issues_repo_name": "sylas/grasp-continuum", "max_issues_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2019-03-07T17:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T16:45:24.000Z", "max_forks_repo_path": "src/lib/mpi90/setisompi.f90", "max_forks_repo_name": "sylas/grasp-continuum", "max_forks_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-10T04:00:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:06:40.000Z", "avg_line_length": 38.4444444444, "max_line_length": 74, "alphanum_fraction": 0.4566473988, "num_tokens": 561}
|
abstract type Constraint end
# expected fields (mutable)
# - score
# reduce Base.show() to Base.string()
Base.show(io::IO, cnstr::Constraint) = print(io, string(cnstr))
Base.show(io::IO, ::MIME"application/prs.juno.inline", cnstr::Constraint) = print(io, string(cnstr))
Base.string(cnstr::Constraint) = @sprintf "ToDo: implement Base.string for %s" typeof(cnstr)
integrand_x1(cnstr::Constraint, args...; kwargs...) = integrand_x1(cnstr.score, args...; kwargs...)
update!(cnstr::Constraint, x::TI, n::TI) where {TS<:Score,TI<:Integer} = update!(cnstr.score, x, n)
function update(cnstr::Constraint, prior::Prior)
cnstr = deepcopy(cnstr)
cnstr.score = update(cnstr.score, prior)
return cnstr
end
function conditional(cnstr::Constraint, bounds::Tuple{Real,Real})
cnstr = deepcopy(cnstr)
cnstr.conditional = bounds
return cnstr
end
mutable struct PowerConstraint <: Constraint
score::Power
β::Real
conditional::Tuple{Real,Real}
end
>=(power::Power, threshold::Real) = PowerConstraint(power, 1 - threshold, (.5, .99))
function Base.string(cnstr::PowerConstraint)
return @sprintf "%s >= %5.1f%% (given x1: %5.1f%% - %5.1f%%)" string(cnstr.score) 100*(1 - cnstr.β) 100*cnstr.conditional[1] 100*cnstr.conditional[2]
end
mutable struct TypeOneErrorRateConstraint <: Constraint
score::Power
α::Real
conditional::Tuple{Real,Real}
end
<=(power::Power, threshold::Real) = TypeOneErrorRateConstraint(power, threshold, (.001, .99))
function Base.string(cnstr::TypeOneErrorRateConstraint)
return @sprintf "%s <= %5.1f%% (given x1: %5.1f%% - %5.1f%%)" string(cnstr.score) 100*cnstr.α 100*cnstr.conditional[1] 100*cnstr.conditional[2]
end
|
{"hexsha": "0c128987844541520ebab61f9d8d9210fdf9a652", "size": 1703, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/constraints.jl", "max_stars_repo_name": "JuliaTagBot/bad.jl", "max_stars_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/constraints.jl", "max_issues_repo_name": "JuliaTagBot/bad.jl", "max_issues_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-10T17:25:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-06T10:33:25.000Z", "max_forks_repo_path": "src/constraints.jl", "max_forks_repo_name": "JuliaTagBot/bad.jl", "max_forks_repo_head_hexsha": "7cccc038b65e4d6e923221064c20b361466e21cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:43:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:43:02.000Z", "avg_line_length": 29.3620689655, "max_line_length": 153, "alphanum_fraction": 0.6975924839, "num_tokens": 507}
|
from __future__ import print_function
import os
import flask
import json
import time
import mxnet as mx
import cv2
import base64
from face import Face
import numpy as np
import insightface
from insightface.utils import face_align
model_root_dir = '/opt/ml/model'
# 'retinaface_mnet025_v2+LResNet100E-IR'
# 'retinaface_mnet025_v2+LResNet50E-IR'
# 'retinaface_mnet025_v2+LResNet34E-IR'
# 'retinaface_mnet025_v2+MobileFaceNet'
# 'retinaface_r50_v1+LResNet100E-IR'
# 'retinaface_r50_v1+LResNet50E-IR'
# 'retinaface_r50_v1+LResNet34E-IR'
# 'retinaface_r50_v1+MobileFaceNet'
face_detection_and_comparison_model_name = os.environ.get('FACE_DETECTION_AND_COMPARISON_MODEL_NAME', 'retinaface_mnet025_v2+MobileFaceNet')
face_detection_model_name = face_detection_and_comparison_model_name.split('+')[0]
face_representation_model_name = face_detection_and_comparison_model_name.split('+')[1]
# A singleton for holding the model. This simply loads the model and holds it.
# It has a predict function that does a prediction based on the model and the input data.
class FaceRecognizerService(object):
# class attributes
face_detector = None
face_embedding_model = None
ctx = mx.cpu() if mx.context.num_gpus() == 0 else mx.gpu()
# face representation configuration
face_size = (112, 112)
if face_representation_model_name == 'LResNet100E-IR':
face_representation_model_prefix = os.path.join(model_root_dir, 'model-r100-ii/model')
elif face_representation_model_name == 'LResNet50E-IR':
face_representation_model_prefix = os.path.join(model_root_dir, 'model-r50-am-lfw/model')
elif face_representation_model_name == 'LResNet34E-IR':
face_representation_model_prefix = os.path.join(model_root_dir, 'model-r34-amf/model')
elif face_representation_model_name == 'MobileFaceNet':
face_representation_model_prefix = os.path.join(model_root_dir, 'model-y1-test2/model')
else:
face_representation_model_prefix = 'None'
@classmethod
def get_model(cls):
"""
Get the face detector and face representation model for this instance, loading it if it's not already loaded.
:return:
"""
# face detector model
if cls.face_detector is None:
cls.face_detector = insightface.model_zoo.get_model(
name=face_detection_model_name,
root=model_root_dir
)
ctx_id = -1 if mx.context.num_gpus() == 0 else 0
cls.face_detector.prepare(ctx_id=ctx_id)
# face representation (embedding vector representation) model
if cls.face_embedding_model is None:
sym, arg_params, aux_params = mx.model.load_checkpoint(
prefix=cls.face_representation_model_prefix, epoch=0)
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
cls.face_embedding_model = mx.mod.Module(symbol=sym, context=cls.ctx, label_names=None)
cls.face_embedding_model.bind(data_shapes=[('data', (1, 3, cls.face_size[0], cls.face_size[1]))])
cls.face_embedding_model.set_params(arg_params, aux_params)
return cls.face_detector, cls.face_embedding_model
@classmethod
def get_largest_face(cls, bbox_list):
largest_area_index = 0
largest_area = -1.0
for i in range(bbox_list.shape[0]):
bbox = bbox_list[i]
area = (bbox[0] - bbox[2]) * (bbox[1] - bbox[3])
if largest_area < area:
largest_area = area
largest_area_index = i
return largest_area_index
@classmethod
def detect_and_align(cls, raw_input_image, is_source_image=False, threshold=0.70):
face_detector, _ = cls.get_model()
height, width, _ = raw_input_image.shape
short_size = height if height < width else width
scale = 1.0 if short_size < 480.0 else 480.0 / short_size
bbox_list, pts5_list = face_detector.detect(raw_input_image, threshold=threshold, scale=scale)
if bbox_list.shape[0] == 0:
return None
if is_source_image:
max_face_index = cls.get_largest_face(bbox_list)
bbox = bbox_list[max_face_index, :]
pts5 = pts5_list[max_face_index, :]
aligned_source_face = face_align.norm_crop(raw_input_image, pts5)
face = Face(
bbox=[float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])],
aligned_face_img=aligned_source_face,
confidence=float(bbox[-1]),
key_points={
'eyeLeft': [float(pts5[0][0]), float(pts5[0][1])],
'eyeRight': [float(pts5[1][0]), float(pts5[1][1])],
'nose': [float(pts5[2][0]), float(pts5[2][1])],
'mouthLeft': [float(pts5[3][0]), float(pts5[3][1])],
'mouthRight': [float(pts5[4][0]), float(pts5[4][1])],
}
)
return face
else:
face_list = list()
for index in range(len(bbox_list)):
bbox = bbox_list[index]
pts5 = pts5_list[index]
aligned_target_face = face_align.norm_crop(raw_input_image, pts5)
face = Face(
bbox=[float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])],
aligned_face_img=aligned_target_face,
confidence=float(bbox[-1]),
key_points={
'eyeLeft': [float(pts5[0][0]), float(pts5[0][1])],
'eyeRight': [float(pts5[1][0]), float(pts5[1][1])],
'nose': [float(pts5[2][0]), float(pts5[2][1])],
'mouthLeft': [float(pts5[3][0]), float(pts5[3][1])],
'mouthRight': [float(pts5[4][0]), float(pts5[4][1])],
}
)
face_list.append(face)
return face_list
@classmethod
def get_feature(cls, aligned):
_, face_embedding_model = cls.get_model()
a = cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB)
a = np.transpose(a, (2, 0, 1))
input_blob = np.expand_dims(a, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data, ))
face_embedding_model.forward(db, is_train=False)
emb = face_embedding_model.get_outputs()[0].asnumpy()[0]
norm = np.sqrt(np.sum(emb * emb) + 0.00001)
emb /= norm
return emb
@classmethod
def predict(cls, source_image_base64, target_image_base64, min_confidence_thresh=0.40):
source_image = cv2.imdecode(np.frombuffer(base64.b64decode(source_image_base64), np.uint8), cv2.IMREAD_COLOR)
target_image = cv2.imdecode(np.frombuffer(base64.b64decode(target_image_base64), np.uint8), cv2.IMREAD_COLOR)
t1 = time.time()
source_detected_face = cls.detect_and_align(source_image, is_source_image=True, threshold=min_confidence_thresh)
target_detected_faces = cls.detect_and_align(target_image, is_source_image=False, threshold=min_confidence_thresh)
t2 = time.time()
print('Time Cost of Face Detecting & Aligning for 2 Images = {} seconds'.format(t2 - t1))
response = {
'SourceImageFace': None,
'FaceMatches': []
}
if source_detected_face is not None:
[x_min, y_min, x_max, y_max] = source_detected_face.bbox
response['SourceImageFace'] = {
'BoundingBox': [x_min, y_min, x_max, y_max],
'Confidence': source_detected_face.confidence,
'KeyPoints': source_detected_face.key_points
}
else:
return response
for target_comp_face in target_detected_faces:
base_feat_representation = cls.get_feature(source_detected_face.aligned_face_img)
target_feat_representation = cls.get_feature(target_comp_face.aligned_face_img)
similarity_score = np.dot(base_feat_representation, target_feat_representation)
# add comparison to response body
[x_min, y_min, x_max, y_max] = target_comp_face.bbox
response['FaceMatches'].append({
'Similarity': float(similarity_score),
'Face': {
'BoundingBox': [x_min, y_min, x_max, y_max],
'Confidence': target_comp_face.confidence,
'KeyPoints': target_comp_face.key_points
}
})
return response
# The flask app for serving predictions
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
"""
Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully.
:return:
"""
detector_mode, embedding_model = FaceRecognizerService.get_model()
health = (detector_mode is not None) and (embedding_model is not None)
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
"""
Do an inference on a single batch of data. In this sample server, we take image data as base64 formation,
decode it for internal use and then convert the predictions to json format
:return:
"""
t_start = time.time()
if flask.request.content_type == 'application/json':
request_body = flask.request.data.decode('utf-8')
request_body = json.loads(request_body)
source_image_base64 = request_body['source_image_bytes']
target_image_base64 = request_body['target_image_bytes']
else:
return flask.Response(
response='Face comparison only supports application/json data',
status=415,
mimetype='text/plain')
# inference
body = FaceRecognizerService.predict(
source_image_base64,
target_image_base64,
min_confidence_thresh=0.65
)
t_end = time.time()
print('Time consumption = {} second'.format(t_end - t_start))
print('Response = {}'.format(body))
return flask.Response(response=json.dumps(body), status=200, mimetype='application/json')
|
{"hexsha": "baf6eb95e1959e947212bd9c55e11d4f5b967794", "size": 10357, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/containers/face-comparison/recognizer/predictor.py", "max_stars_repo_name": "aws-samples/amazon-ipc-ai-saas", "max_stars_repo_head_hexsha": "f412634ab407e54990ec8913146cff53f417453f", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-05-07T07:03:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T02:04:48.000Z", "max_issues_repo_path": "source/containers/face-comparison/recognizer/predictor.py", "max_issues_repo_name": "aws-samples/amazon-ipc-ai-saas", "max_issues_repo_head_hexsha": "f412634ab407e54990ec8913146cff53f417453f", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/containers/face-comparison/recognizer/predictor.py", "max_forks_repo_name": "aws-samples/amazon-ipc-ai-saas", "max_forks_repo_head_hexsha": "f412634ab407e54990ec8913146cff53f417453f", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-10T09:57:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T05:12:12.000Z", "avg_line_length": 39.5305343511, "max_line_length": 140, "alphanum_fraction": 0.6360915323, "include": true, "reason": "import numpy", "num_tokens": 2411}
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
from collections import OrderedDict
import multiprocessing
import os
import sys
import time
import h5py
import numpy as np
import pandas as pd
import pyBigWig
import pysam
from basenji import dna_io
from basenji import gff
from basenji import gene
"""basenji_hdf5_genes.py
Tile a set of genes and save the result in HDF5 for Basenji processing.
Notes:
-At the moment, I'm excluding target measurements, but that could be included
if I want to measure accuracy on specific genes.
"""
################################################################################
# main
################################################################################
def main():
usage = "usage: %prog [options] <fasta_file> <gtf_file> <hdf5_file>"
parser = OptionParser(usage)
parser.add_option(
"-g",
dest="genome_file",
default=None,
help="Chromosome lengths file [Default: %default]",
)
parser.add_option(
"-l",
dest="seq_length",
default=1024,
type="int",
help="Sequence length [Default: %default]",
)
parser.add_option(
"-c",
dest="center_t",
default=0.333,
type="float",
help="Center proportion in which TSSs are required to be [Default: %default]",
)
parser.add_option(
"-p",
dest="processes",
default=1,
type="int",
help="Number parallel processes to load data [Default: %default]",
)
parser.add_option(
"-t",
dest="target_wigs_file",
default=None,
help="Store target values, extracted from this list of WIG files",
)
parser.add_option(
"-w",
dest="pool_width",
type="int",
default=1,
help="Average pooling width [Default: %default]",
)
parser.add_option(
"--w5",
dest="w5",
default=False,
action="store_true",
help="Coverage files are w5 rather than BigWig [Default: %default]",
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error("Must provide genes as GTF, genome FASTA, and output HDF5")
else:
fasta_file = args[0]
gtf_file = args[1]
hdf5_file = args[2]
if options.target_wigs_file is not None:
check_wigs(options.target_wigs_file)
################################################################
# organize TSS's by chromosome
# read transcripts
transcripts = gff.read_genes(gtf_file, key_id="transcript_id")
# read transcript --> gene mapping
transcript_genes = gff.t2g(gtf_file, feature="exon")
# make gene --> strand mapping
gene_strand = {}
for tx_id in transcripts:
gene_strand[transcript_genes[tx_id]] = transcripts[tx_id].strand
# cluster TSSs by gene
gene_tss = cluster_tss(transcript_genes, transcripts, options.pool_width / 2)
# hash TSS's by chromosome
gene_chrom = {}
for tx_id in transcripts:
gene_id = transcript_genes[tx_id]
gene_chrom[gene_id] = transcripts[tx_id].chrom
chrom_tss = {}
for gene_id in gene_tss:
for tss_pos in gene_tss[gene_id]:
chrom_tss.setdefault(gene_chrom[gene_id], []).append((tss_pos, gene_id))
# sort TSS's by chromosome
for chrom in chrom_tss:
chrom_tss[chrom].sort()
################################################################
# determine segments / map transcripts
# open fasta (to verify chromosome presence)
fasta = pysam.Fastafile(fasta_file)
chrom_sizes = OrderedDict()
for line in open(options.genome_file):
a = line.split()
if a[0] in fasta.references:
chrom_sizes[a[0]] = int(a[1])
elif a[0] in chrom_tss:
print("FASTA missing chromosome - %s" % a[0], file=sys.stderr)
del chrom_tss[a[0]]
merge_distance = options.center_t * options.seq_length
seq_coords = []
tss_list = []
# ordering by options.genome_file allows for easier
# bigwig output in downstream scripts.
for chrom in chrom_sizes:
ctss = chrom_tss.get(chrom, [])
left_i = 0
while left_i < len(ctss):
# left TSS
left_tss = ctss[left_i][0]
# right TSS
right_i = left_i
while (
right_i + 1 < len(ctss)
and ctss[right_i + 1][0] - left_tss < merge_distance
):
right_i += 1
right_tss = ctss[right_i][0]
# determine segment midpoint
seg_mid = (left_tss + right_tss) // 2
# extend
seg_start = seg_mid - options.seq_length // 2
seg_end = seg_start + options.seq_length
# rescue
if seg_start < 0 or seg_end >= chrom_sizes[chrom]:
if chrom_sizes[chrom] == options.seq_length:
seg_start = 0
seg_end = options.seq_length
elif chrom_sizes[chrom] > options.seq_length:
# also rescuable but not important right now
pass
# save segment
if seg_start >= 0 and seg_end <= chrom_sizes[chrom]:
seq_coords.append((chrom, seg_start, seg_end))
# annotate TSS to indexes
seq_index = len(seq_coords) - 1
for i in range(left_i, right_i + 1):
tss_pos, gene_id = ctss[i]
tss = gene.TSS(
"TSS%d" % len(tss_list),
gene_id,
chrom,
tss_pos,
seq_index,
True,
gene_strand[gene_id],
)
tss_list.append(tss)
# update
left_i = right_i + 1
################################################################
# extract target values
if options.target_wigs_file:
t0 = time.time()
# get wig files and labels
target_wigs_df = pd.read_table(options.target_wigs_file, index_col=0)
target_wigs = OrderedDict()
target_labels = []
for i in range(target_wigs_df.shape[0]):
target_wig_series = target_wigs_df.iloc[i]
target_wigs[target_wig_series.identifier] = target_wig_series.file
target_labels.append(target_wig_series.description)
# initialize multiprocessing pool
pool = multiprocessing.Pool(options.processes)
# bigwig_read parameters
bwt_params = [
(wig_file, tss_list, seq_coords, options.pool_width)
for wig_file in target_wigs.values()
]
# pull the target values in parallel
if options.w5:
tss_targets = pool.starmap(wig5_tss_targets, bwt_params)
else:
tss_targets = pool.starmap(bigwig_tss_targets, bwt_params)
# convert to array
tss_targets = np.transpose(np.array(tss_targets))
################################################################
# extract sequences
seqs_1hot = []
for chrom, start, end in seq_coords:
seq = fasta.fetch(chrom, start, end)
seqs_1hot.append(dna_io.dna_1hot(seq))
seqs_1hot = np.array(seqs_1hot)
fasta.close()
################################################################
# save to HDF5
# write to HDF5
hdf5_out = h5py.File(hdf5_file, "w")
# store pooling
hdf5_out.create_dataset("pool_width", data=options.pool_width, dtype="int")
# store gene sequences
hdf5_out.create_dataset("seqs_1hot", data=seqs_1hot, dtype="bool")
# store genesequence coordinates
seq_chrom = np.array([sc[0] for sc in seq_coords], dtype="S")
seq_start = np.array([sc[1] for sc in seq_coords])
seq_end = np.array([sc[2] for sc in seq_coords])
hdf5_out.create_dataset("seq_chrom", data=seq_chrom)
hdf5_out.create_dataset("seq_start", data=seq_start)
hdf5_out.create_dataset("seq_end", data=seq_end)
# store TSSs
tss_id = np.array([tss.identifier for tss in tss_list], dtype="S")
tss_gene = np.array([tss.gene_id for tss in tss_list], dtype="S")
tss_chrom = np.array([tss.chrom for tss in tss_list], dtype="S")
tss_pos = np.array([tss.pos for tss in tss_list])
tss_seq = np.array([tss.gene_seq for tss in tss_list])
tss_strand = np.array([tss.strand for tss in tss_list], dtype="S")
hdf5_out.create_dataset("tss_id", data=tss_id)
hdf5_out.create_dataset("tss_gene", data=tss_gene)
hdf5_out.create_dataset("tss_chrom", data=tss_chrom)
hdf5_out.create_dataset("tss_pos", data=tss_pos)
hdf5_out.create_dataset("tss_seq", data=tss_seq)
hdf5_out.create_dataset("tss_strand", data=tss_strand)
# store targets
if options.target_wigs_file:
# ids
target_ids = np.array([tl for tl in target_wigs.keys()], dtype="S")
hdf5_out.create_dataset("target_ids", data=target_ids)
# labels
target_labels = np.array(target_labels, dtype="S")
hdf5_out.create_dataset("target_labels", data=target_labels)
# values
hdf5_out.create_dataset("tss_targets", data=tss_targets, dtype="float16")
hdf5_out.close()
################################################################################
def bigwig_tss_targets(wig_file, tss_list, seq_coords, pool_width=1):
""" Read gene target values from a bigwig
Args:
wig_file: Bigwig filename
tss_list: list of TSS instances
seq_coords: list of (chrom,start,end) sequence coordinates
pool_width: average pool adjacent nucleotides of this width
Returns:
tss_targets:
"""
# initialize target values
tss_targets = np.zeros(len(tss_list), dtype="float16")
# open wig
wig_in = pyBigWig.open(wig_file)
# warn about missing chromosomes just once
warned_chroms = set()
# for each TSS
for tss_i in range(len(tss_list)):
tss = tss_list[tss_i]
# extract sequence coordinates
seq_chrom, seq_start, seq_end = seq_coords[tss.gene_seq]
# determine bin coordinates
tss_bin = (tss.pos - seq_start) // pool_width
bin_start = seq_start + tss_bin * pool_width
bin_end = bin_start + pool_width
# pull values
try:
tss_targets[tss_i] = np.array(
wig_in.values(seq_chrom, bin_start, bin_end), dtype="float32"
).sum()
except RuntimeError:
if seq_chrom not in warned_chroms:
print(
"WARNING: %s doesn't see %s (%s:%d-%d). Setting to all zeros. No additional warnings will be offered for %s"
% (
wig_file,
tss.identifier,
seq_chrom,
seq_start,
seq_end,
seq_chrom,
),
file=sys.stderr,
)
warned_chroms.add(seq_chrom)
# check NaN
if np.isnan(tss_targets[tss_i]):
print(
"WARNING: %s (%s:%d-%d) pulled NaN from %s. Setting to zero."
% (tss.identifier, seq_chrom, seq_start, seq_end, wig_file),
file=sys.stderr,
)
tss_targets[tss_i] = 0
# close wig file
wig_in.close()
return tss_targets
################################################################################
def check_wigs(target_wigs_file):
target_wigs_df = pd.read_table(target_wigs_file, index_col=0)
for wig_file in target_wigs_df.file:
if not os.path.isfile(wig_file):
print("Cannot find %s" % wig_file, file=sys.stderr)
exit(1)
################################################################################
def cluster_tss(transcript_genes, transcripts, merge_distance):
""" Cluster transcript TSSs and return a dict mapping gene_id
to a TSS list. """
# hash gene_id to all TSSs
gene_tss_all = {}
for tx_id in transcript_genes:
gene_id = transcript_genes[tx_id]
gene_tss_all.setdefault(gene_id, []).append(transcripts[tx_id].tss())
# initialize gene TSS dict
gene_tss = {}
# for each gene
for gene_id in gene_tss_all:
# initialize TSS cluster summary stats
cluster_mean = []
cluster_n = []
# for each sorted TSS
for tss_pos in sorted(gene_tss_all[gene_id]):
# if it's first, add it
if len(cluster_mean) == 0:
cluster_mean.append(tss_pos)
cluster_n.append(1)
else:
# if it's close to the previous
if tss_pos - cluster_mean[-1] < merge_distance:
# merge
cluster_mean[-1] = (cluster_mean[-1] * cluster_n[-1] + tss_pos) / (
cluster_n[-1] + 1
)
cluster_n[-1] += 1
else:
# create a new cluster
cluster_mean.append(tss_pos)
cluster_n.append(1)
# map gene_id to TSS cluster means (and correct for GFF to BED index)
gene_tss[gene_id] = [int(cm) for cm in cluster_mean]
return gene_tss
################################################################################
def wig5_tss_targets(w5_file, tss_list, seq_coords, pool_width=1):
""" Read gene target values from a bigwig
Args:
w5_file: wiggle HDF5 filename
tss_list: list of TSS instances
seq_coords: list of (chrom,start,end) sequence coordinates
pool_width: average pool adjacent nucleotides of this width
Returns:
tss_targets:
"""
# initialize target values
tss_targets = np.zeros(len(tss_list), dtype="float16")
# open wig h5
w5_in = h5py.File(w5_file)
# warn about missing chromosomes just once
warned_chroms = set()
# for each TSS
for tss_i in range(len(tss_list)):
tss = tss_list[tss_i]
# extract sequence coordinates
seq_chrom, seq_start, seq_end = seq_coords[tss.gene_seq]
# determine bin coordinates
tss_bin = (tss.pos - seq_start) // pool_width
bin_start = seq_start + tss_bin * pool_width
bin_end = bin_start + pool_width
# pull values
try:
tss_targets[tss_i] = w5_in[seq_chrom][bin_start:bin_end].sum(
dtype="float32"
)
except RuntimeError:
if seq_chrom not in warned_chroms:
print(
"WARNING: %s doesn't see %s (%s:%d-%d). Setting to all zeros. No additional warnings will be offered for %s"
% (
w5_file,
tss.identifier,
seq_chrom,
seq_start,
seq_end,
seq_chrom,
),
file=sys.stderr,
)
warned_chroms.add(seq_chrom)
# check NaN
if np.isnan(tss_targets[tss_i]):
print(
"WARNING: %s (%s:%d-%d) pulled NaN from %s. Setting to zero."
% (tss.identifier, seq_chrom, seq_start, seq_end, w5_file),
file=sys.stderr,
)
tss_targets[tss_i] = 0
# close w5 file
w5_in.close()
return tss_targets
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
main()
|
{"hexsha": "6a72143453b4a4f05af36e102753733feb9bb9ee", "size": 16510, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/basenji_hdf5_genes.py", "max_stars_repo_name": "lisabang/basenji", "max_stars_repo_head_hexsha": "f91bb195b4062c55e487a4091e13a0e813ef07d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/basenji_hdf5_genes.py", "max_issues_repo_name": "lisabang/basenji", "max_issues_repo_head_hexsha": "f91bb195b4062c55e487a4091e13a0e813ef07d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/basenji_hdf5_genes.py", "max_forks_repo_name": "lisabang/basenji", "max_forks_repo_head_hexsha": "f91bb195b4062c55e487a4091e13a0e813ef07d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8111753372, "max_line_length": 128, "alphanum_fraction": 0.5520290733, "include": true, "reason": "import numpy", "num_tokens": 3836}
|
"""
3D Neuroimaging cartesian reconstruction
========================================
Author: LElgueddari
In this tutorial we will reconstruct an MRI image from the sparse 3D kspace
measurements.
Import neuroimaging data
------------------------
We use the toy datasets available in pysap, more specifically the 3D orange
and the cartesian acquisition scheme.
"""
# Package import
from modopt.math.metrics import ssim
from mri.operators import FFT, WaveletN
from mri.operators.utils import convert_mask_to_locations
from mri.reconstructors import SingleChannelReconstructor
from pysap.data import get_sample_data
import pysap
# Third party import
from modopt.opt.linear import Identity
from modopt.opt.proximity import SparseThreshold
import numpy as np
# Loading input data and convert it into a single channel using Sum-Of-Squares
image = get_sample_data('3d-pmri')
image.data = np.sqrt(np.sum(np.abs(image.data)**2, axis=0))
# Obtain K-Space Cartesian Mask
mask = get_sample_data("2d-poisson-disk-mask")
mask.data = np.repeat(np.expand_dims(mask.data, axis=-1), image.shape[-1],
axis=-1)
# View Input
# image.show()
# mask.show()
#############################################################################
# Generate the kspace
# -------------------
#
# From the 3D Orange volume and the acquisition mask, we retrospectively
# undersample the k-space using a cartesian acquisition mask
# We then reconstruct the zero order solution as a baseline
# Get the locations of the kspace samples
kspace_loc = convert_mask_to_locations(mask.data)
# Generate the subsampled kspace
fourier_op = FFT(samples=kspace_loc, shape=image.shape)
kspace_data = fourier_op.op(image)
# Zero order solution
image_rec0 = pysap.Image(data=fourier_op.adj_op(kspace_data),
metadata=image.metadata)
# image_rec0.show()
# Calculate SSIM
base_ssim = ssim(image_rec0, image)
print(base_ssim)
#############################################################################
# FISTA optimization
# ------------------
#
# We now want to refine the zero order solution using a FISTA optimization.
# The cost function is set to Proximity Cost + Gradient Cost
# Setup the operators
linear_op = WaveletN(
wavelet_name="sym8",
nb_scales=4,
dim=3,
padding_mode="periodization",
)
regularizer_op = SparseThreshold(Identity(), 2 * 1e-11, thresh_type="soft")
# Setup Reconstructor
reconstructor = SingleChannelReconstructor(
fourier_op=fourier_op,
linear_op=linear_op,
regularizer_op=regularizer_op,
gradient_formulation='synthesis',
verbose=1,
)
# Start Reconstruction
x_final, costs, metrics = reconstructor.reconstruct(
kspace_data=kspace_data,
optimization_alg='fista',
num_iterations=200,
)
image_rec = pysap.Image(data=np.abs(x_final))
# image_rec.show()
# Calculate SSIM
recon_ssim = ssim(image_rec, image)
print('The Reconstruction SSIM is : ' + str(recon_ssim))
|
{"hexsha": "ecfcfb4e931e4ac537de6b8f6ea72576be1fb9d2", "size": 2941, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/cartesian_reconstruction_3d.py", "max_stars_repo_name": "Zaineb18/pysap-mri", "max_stars_repo_head_hexsha": "cf4391049b2fe264e8eb96dff07b44fce3b8e8d4", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2018-03-16T14:36:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T21:46:16.000Z", "max_issues_repo_path": "examples/cartesian_reconstruction_3d.py", "max_issues_repo_name": "Zaineb18/pysap-mri", "max_issues_repo_head_hexsha": "cf4391049b2fe264e8eb96dff07b44fce3b8e8d4", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": 139, "max_issues_repo_issues_event_min_datetime": "2018-03-02T10:06:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T08:57:37.000Z", "max_forks_repo_path": "examples/cartesian_reconstruction_3d.py", "max_forks_repo_name": "Zaineb18/pysap-mri", "max_forks_repo_head_hexsha": "cf4391049b2fe264e8eb96dff07b44fce3b8e8d4", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-03-21T15:49:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T10:14:49.000Z", "avg_line_length": 29.1188118812, "max_line_length": 78, "alphanum_fraction": 0.6943216593, "include": true, "reason": "import numpy", "num_tokens": 682}
|
program p
type t
end type t
type(t) :: tvar
end program p
|
{"hexsha": "38e999c13443d3ea5c253850c24b3b39836c5d7a", "size": 65, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/type-no-components.f90", "max_stars_repo_name": "kurtsansom/ftnchek", "max_stars_repo_head_hexsha": "cb5b24689fec20e0dac4d158ae57a5b7387e9723", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/type-no-components.f90", "max_issues_repo_name": "kurtsansom/ftnchek", "max_issues_repo_head_hexsha": "cb5b24689fec20e0dac4d158ae57a5b7387e9723", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/type-no-components.f90", "max_forks_repo_name": "kurtsansom/ftnchek", "max_forks_repo_head_hexsha": "cb5b24689fec20e0dac4d158ae57a5b7387e9723", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 9.2857142857, "max_line_length": 17, "alphanum_fraction": 0.6307692308, "num_tokens": 24}
|
import keras
import cv2
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
df = pd.read_csv('data/driving_log.csv')
data = []
imageData = []
images = [ df['center'].values,df['left'].values,df['right'].values ]
for i in range(len(df['center'].values)):
for j in range(0,3):
if j == 0:
imageCenter = df['center'][i].split('/')[-1]
image = (cv2.imread('data/IMG/'+imageCenter))
imageData.append(image)
dataTemp = float(df['steering'][i])
data.append(dataTemp)
imageData.append(cv2.flip(image,1))
data.append((dataTemp)*-1)
elif j == 1:
imageCenter = df['right'][i].split('/')[-1]
image = (cv2.imread('data/IMG/'+imageCenter))
imageData.append(image)
dataTemp = float(df['steering'][i])-0.2
data.append(dataTemp)
imageData.append(cv2.flip(image,1))
data.append((dataTemp-0.2)*-1)
elif j == 2:
imageCenter = df['left'][i].split('/')[-1]
image = (cv2.imread('data/IMG/'+imageCenter))
imageData.append(image)
dataTemp = float(df['steering'][i])+0.2
data.append(dataTemp)
imageData.append(cv2.flip(image,1))
data.append((dataTemp+0.2)*-1)
# print(i)
# y_data = sklearn.utils.shuffle(data)
# X_data = sklearn.utils.shuffle(imageData)
X_train,X_test,Y_train,y_test = train_test_split(np.array(imageData),np.array(data),test_size=0.2)
# print(X_train[0])
# print(type(X_train))
print(X_train[0].shape)
from keras.layers import Dense,Conv2D,Dropout,Cropping2D
# from keras.layers.convolutional import Convolution2D
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda x : (x/255)-0.5,input_shape = (160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(24,5,5,activation='relu'))
model.add(Conv2D(36,5,5,activation='relu'))
model.add(Conv2D(48,5,5,activation='relu'))
model.add(Conv2D(64,5,5,activation='relu'))
model.add(keras.layers.Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dense(1,activation='relu'))
model.compile(loss='mse',optimizer='adam',metrics=['accuracy'])
result = model.fit(X_train,Y_train,nb_epoch=2,shuffle=True)
model.save('model.h5')
|
{"hexsha": "da19047babc1a5aaf1507a19a6c8dcda8283843c", "size": 2706, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "CAdarsh/behavioural_cloning", "max_stars_repo_head_hexsha": "8783083254056456d1c9e6bb395e6fd85b964ff6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-06T11:45:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-06T11:45:00.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "CAdarsh/behavioural_cloning", "max_issues_repo_head_hexsha": "8783083254056456d1c9e6bb395e6fd85b964ff6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "CAdarsh/behavioural_cloning", "max_forks_repo_head_hexsha": "8783083254056456d1c9e6bb395e6fd85b964ff6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6024096386, "max_line_length": 118, "alphanum_fraction": 0.5513673319, "include": true, "reason": "import numpy", "num_tokens": 653}
|
# coding=utf-8
# Copyright Studio-Ouisa and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for LUKE."""
import itertools
import json
import os
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ... import RobertaTokenizer
from ...file_utils import add_end_docstrings, is_tf_available, is_torch_available
from ...tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
PaddingStrategy,
TensorType,
TextInput,
TextInputPair,
TruncationStrategy,
_is_tensorflow,
_is_torch,
to_py_obj,
)
from ...utils import logging
logger = logging.get_logger(__name__)
EntitySpan = Tuple[int, int]
EntitySpanInput = List[EntitySpan]
Entity = str
EntityInput = List[Entity]
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"entity_vocab_file": "entity_vocab.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/vocab.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/vocab.json",
},
"merges_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/merges.txt",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/merges.txt",
},
"entity_vocab_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/entity_vocab.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/entity_vocab.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"studio-ousia/luke-base": 512,
"studio-ousia/luke-large": 512,
}
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
return_token_type_ids (:obj:`bool`, `optional`):
Whether to return token type IDs. If left to the default, will return the token type IDs according to
the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are token type IDs? <../glossary.html#token-type-ids>`__
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
`What are attention masks? <../glossary.html#attention-mask>`__
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
of pairs) is provided with :obj:`truncation_strategy = longest_first` or :obj:`True`, an error is
raised instead of returning overflowing tokens.
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return special tokens mask information.
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return :obj:`(char_start, char_end)` for each token.
This is only available on fast tokenizers inheriting from
:class:`~transformers.PreTrainedTokenizerFast`, if using Python's tokenizer, this method will raise
:obj:`NotImplementedError`.
return_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return the lengths of the encoded inputs.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print more information and warnings.
**kwargs: passed to the :obj:`self.tokenize()` method
Return:
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
`What are input IDs? <../glossary.html#input-ids>`__
- **token_type_ids** -- List of token type ids to be fed to a model (when :obj:`return_token_type_ids=True`
or if `"token_type_ids"` is in :obj:`self.model_input_names`).
`What are token type IDs? <../glossary.html#token-type-ids>`__
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
:obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`).
`What are attention masks? <../glossary.html#attention-mask>`__
- **entity_ids** -- List of entity ids to be fed to a model.
`What are input IDs? <../glossary.html#input-ids>`__
- **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model.
- **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when
:obj:`return_token_type_ids=True` or if `"entity_token_type_ids"` is in :obj:`self.model_input_names`).
`What are token type IDs? <../glossary.html#token-type-ids>`__
- **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model
(when :obj:`return_attention_mask=True` or if `"entity_attention_mask"` is in
:obj:`self.model_input_names`).
`What are attention masks? <../glossary.html#attention-mask>`__
- **entity_start_positions** -- List of the start positions of entities in the word token sequence (when
:obj:`task="entity_span_classification"`).
- **entity_end_positions** -- List of the end positions of entities in the word token sequence (when
:obj:`task="entity_span_classification"`).
- **overflowing_tokens** -- List of overflowing tokens sequences (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **num_truncated_tokens** -- Number of tokens truncated (when a :obj:`max_length` is specified and
:obj:`return_overflowing_tokens=True`).
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
regular sequence tokens (when :obj:`add_special_tokens=True` and :obj:`return_special_tokens_mask=True`).
- **length** -- The length of the inputs (when :obj:`return_length=True`)
"""
class LukeTokenizer(RobertaTokenizer):
r"""
Construct a LUKE tokenizer.
This tokenizer inherits from :class:`~transformers.RobertaTokenizer` which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods. Compared to
:class:`~transformers.RobertaTokenizer`, :class:`~transformers.LukeTokenizer` also creates entity sequences, namely
:obj:`entity_ids`, :obj:`entity_attention_mask`, :obj:`entity_token_type_ids`, and :obj:`entity_position_ids` to be
used by the LUKE model.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
entity_vocab_file (:obj:`str`):
Path to the entity vocabulary file.
task (:obj:`str`, `optional`):
Task for which you want to prepare sequences. One of :obj:`"entity_classification"`,
:obj:`"entity_pair_classification"`, or :obj:`"entity_span_classification"`. If you specify this argument,
the entity sequence is automatically created based on the given entity span(s).
max_entity_length (:obj:`int`, `optional`, defaults to 32):
The maximum length of :obj:`entity_ids`.
max_mention_length (:obj:`int`, `optional`, defaults to 30):
The maximum number of tokens inside an entity span.
entity_token_1 (:obj:`str`, `optional`, defaults to :obj:`<ent>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
``task`` is set to :obj:`"entity_classification"` or :obj:`"entity_pair_classification"`.
entity_token_2 (:obj:`str`, `optional`, defaults to :obj:`<ent2>`):
The special token used to represent an entity span in a word token sequence. This token is only used when
``task`` is set to :obj:`"entity_pair_classification"`.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
merges_file,
entity_vocab_file,
task=None,
max_entity_length=32,
max_mention_length=30,
entity_token_1="<ent>",
entity_token_2="<ent2>",
**kwargs
):
# we add 2 special tokens for downstream tasks
# for more information about lstrip and rstrip, see https://github.com/huggingface/transformers/pull/2778
entity_token_1 = (
AddedToken(entity_token_1, lstrip=False, rstrip=False)
if isinstance(entity_token_1, str)
else entity_token_1
)
entity_token_2 = (
AddedToken(entity_token_2, lstrip=False, rstrip=False)
if isinstance(entity_token_2, str)
else entity_token_2
)
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
kwargs["additional_special_tokens"] += [entity_token_1, entity_token_2]
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
task=task,
max_entity_length=32,
max_mention_length=30,
entity_token_1="<ent>",
entity_token_2="<ent2>",
**kwargs,
)
with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle:
self.entity_vocab = json.load(entity_vocab_handle)
self.task = task
if task is None or task == "entity_span_classification":
self.max_entity_length = max_entity_length
elif task == "entity_classification":
self.max_entity_length = 1
elif task == "entity_pair_classification":
self.max_entity_length = 2
else:
raise ValueError(
f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification', 'entity_span_classification'] only."
)
self.max_mention_length = max_mention_length
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, List[TextInput]],
text_pair: Optional[Union[TextInput, List[TextInput]]] = None,
entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
entities: Optional[Union[EntityInput, List[EntityInput]]] = None,
entities_pair: Optional[Union[EntityInput, List[EntityInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences, depending on the task you want to prepare them for.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
tokenizer does not support tokenization based on pretokenized strings.
entity_spans (:obj:`List[Tuple[int, int]]`, :obj:`List[List[Tuple[int, int]]]`, `optional`):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify
:obj:`"entity_classification"` or :obj:`"entity_pair_classification"` as the ``task`` argument in the
constructor, the length of each sequence must be 1 or 2, respectively. If you specify ``entities``, the
length of each sequence must be equal to the length of each sequence of ``entities``.
entity_spans_pair (:obj:`List[Tuple[int, int]]`, :obj:`List[List[Tuple[int, int]]]`, `optional`):
The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
with two integers denoting character-based start and end positions of entities. If you specify the
``task`` argument in the constructor, this argument is ignored. If you specify ``entities_pair``, the
length of each sequence must be equal to the length of each sequence of ``entities_pair``.
entities (:obj:`List[str]`, :obj:`List[List[str]]`, `optional`):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the ``task`` argument in the constructor. The length
of each sequence must be equal to the length of each sequence of ``entity_spans``. If you specify
``entity_spans`` without specifying this argument, the entity sequence or the batch of entity sequences
is automatically constructed by filling it with the [MASK] entity.
entities_pair (:obj:`List[str]`, :obj:`List[List[str]]`, `optional`):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the ``task`` argument in the constructor. The length
of each sequence must be equal to the length of each sequence of ``entity_spans_pair``. If you specify
``entity_spans_pair`` without specifying this argument, the entity sequence or the batch of entity
sequences is automatically constructed by filling it with the [MASK] entity.
max_entity_length (:obj:`int`, `optional`):
The maximum length of :obj:`entity_ids`.
"""
# Input type checking for clearer error
is_valid_single_text = isinstance(text, str)
is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or (isinstance(text[0], str)))
assert (
is_valid_single_text or is_valid_batch_text
), "text input must be of type `str` (single example) or `List[str]` (batch)."
is_valid_single_text_pair = isinstance(text_pair, str)
is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (
len(text_pair) == 0 or isinstance(text_pair[0], str)
)
assert (
text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair
), "text_pair input must be of type `str` (single example) or `List[str]` (batch)."
is_batched = bool(isinstance(text, (list, tuple)))
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
if entities is None:
batch_entities_or_entities_pairs = None
else:
batch_entities_or_entities_pairs = (
list(zip(entities, entities_pair)) if entities_pair is not None else entities
)
if entity_spans is None:
batch_entity_spans_or_entity_spans_pairs = None
else:
batch_entity_spans_or_entity_spans_pairs = (
list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans
)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
entities=entities,
entities_pair=entities_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
.. warning:: This method is deprecated, ``__call__`` should be used instead.
Args:
text (:obj:`str`):
The first sequence to be encoded. Each sequence must be a string.
text_pair (:obj:`str`):
The second sequence to be encoded. Each sequence must be a string.
entity_spans (:obj:`List[Tuple[int, int]]`, :obj:`List[List[Tuple[int, int]]]`, `optional`)::
The first sequence of entity spans to be encoded. The sequence consists of tuples each with two
integers denoting character-based start and end positions of entities. If you specify
:obj:`"entity_classification"` or :obj:`"entity_pair_classification"` as the ``task`` argument in the
constructor, the length of each sequence must be 1 or 2, respectively. If you specify ``entities``, the
length of the sequence must be equal to the length of ``entities``.
entity_spans_pair (:obj:`List[Tuple[int, int]]`, :obj:`List[List[Tuple[int, int]]]`, `optional`)::
The second sequence of entity spans to be encoded. The sequence consists of tuples each with two
integers denoting character-based start and end positions of entities. If you specify the ``task``
argument in the constructor, this argument is ignored. If you specify ``entities_pair``, the length of
the sequence must be equal to the length of ``entities_pair``.
entities (:obj:`List[str]` `optional`)::
The first sequence of entities to be encoded. The sequence consists of strings representing entities,
i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument
is ignored if you specify the ``task`` argument in the constructor. The length of the sequence must be
equal to the length of ``entity_spans``. If you specify ``entity_spans`` without specifying this
argument, the entity sequence is automatically constructed by filling it with the [MASK] entity.
entities_pair (:obj:`List[str]`, :obj:`List[List[str]]`, `optional`)::
The second sequence of entities to be encoded. The sequence consists of strings representing entities,
i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument
is ignored if you specify the ``task`` argument in the constructor. The length of the sequence must be
equal to the length of ``entity_spans_pair``. If you specify ``entity_spans_pair`` without specifying
this argument, the entity sequence is automatically constructed by filling it with the [MASK] entity.
max_entity_length (:obj:`int`, `optional`):
The maximum length of the entity sequence.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
entities=entities,
entities_pair=entities_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
if is_split_into_words:
raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kwargs,
)
# prepare_for_model will create the attention_mask and token_type_ids
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]],
batch_entity_spans_or_entity_spans_pairs: Optional[
Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]]
] = None,
batch_entities_or_entities_pairs: Optional[
Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]]
] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
.. warning::
This method is deprecated, ``__call__`` should be used instead.
Args:
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of string or a list of pair
of string (see details in ``encode_plus``).
batch_entity_spans_or_entity_spans_pairs (:obj:`List[List[Tuple[int, int]]]`,
:obj:`List[Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]]`, `optional`)::
Batch of entity span sequences or pairs of entity span sequences to be encoded (see details in
``encode_plus``).
batch_entities_or_entities_pairs (:obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`,
`optional`):
Batch of entity sequences or pairs of entity sequences to be encoded (see details in ``encode_plus``).
max_entity_length (:obj:`int`, `optional`):
The maximum length of the entity sequence.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]],
batch_entity_spans_or_entity_spans_pairs: Optional[
Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]]
] = None,
batch_entities_or_entities_pairs: Optional[
Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]]
] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: Optional[bool] = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
if is_split_into_words:
raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
# input_ids is a list of tuples (one for each example in the batch)
input_ids = []
entity_ids = []
entity_token_spans = []
for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
if not isinstance(text_or_text_pair, (list, tuple)):
text, text_pair = text_or_text_pair, None
else:
text, text_pair = text_or_text_pair
entities, entities_pair = None, None
if batch_entities_or_entities_pairs is not None:
entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
if entities_or_entities_pairs:
if isinstance(entities_or_entities_pairs[0], str):
entities, entities_pair = entities_or_entities_pairs, None
else:
entities, entities_pair = entities_or_entities_pairs
entity_spans, entity_spans_pair = None, None
if batch_entity_spans_or_entity_spans_pairs is not None:
entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
if entity_spans_or_entity_spans_pairs:
if isinstance(entity_spans_or_entity_spans_pairs[0][0], int):
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
else:
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kwargs,
)
input_ids.append((first_ids, second_ids))
entity_ids.append((first_entity_ids, second_entity_ids))
entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
batch_outputs = self._batch_prepare_for_model(
input_ids,
batch_entity_ids_pairs=entity_ids,
batch_entity_token_spans_pairs=entity_token_spans,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
def _create_input_sequence(
self,
text: Union[TextInput],
text_pair: Optional[Union[TextInput]] = None,
entities: Optional[EntityInput] = None,
entities_pair: Optional[EntityInput] = None,
entity_spans: Optional[EntitySpanInput] = None,
entity_spans_pair: Optional[EntitySpanInput] = None,
**kwargs
) -> Tuple[list, list, list, list, list, list]:
def get_input_ids(text):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
def get_input_ids_and_entity_token_spans(text, entity_spans):
if entity_spans is None:
return get_input_ids(text), None
cur = 0
input_ids = []
entity_token_spans = [None] * len(entity_spans)
split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
char_pos2token_pos = {}
for split_char_position in split_char_positions:
orig_split_char_position = split_char_position
if (
split_char_position > 0 and text[split_char_position - 1] == " "
): # whitespace should be prepended to the following token
split_char_position -= 1
if cur != split_char_position:
input_ids += get_input_ids(text[cur:split_char_position])
cur = split_char_position
char_pos2token_pos[orig_split_char_position] = len(input_ids)
input_ids += get_input_ids(text[cur:])
entity_token_spans = [
(char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans
]
return input_ids, entity_token_spans
first_ids, second_ids = None, None
first_entity_ids, second_entity_ids = None, None
first_entity_token_spans, second_entity_token_spans = None, None
if self.task is None:
unk_entity_id = self.entity_vocab["[UNK]"]
mask_entity_id = self.entity_vocab["[MASK]"]
if entity_spans is None:
first_ids = get_input_ids(text)
else:
assert isinstance(entity_spans, list) and (
len(entity_spans) == 0 or isinstance(entity_spans[0], tuple)
), "entity_spans should be given as a list of tuples containing the start and end character indices"
assert entities is None or (
isinstance(entities, list) and (len(entities) == 0 or isinstance(entities[0], str))
), "If you specify entities, they should be given as a list of entity names"
assert entities is None or len(entities) == len(
entity_spans
), "If you specify entities, entities and entity_spans must be the same length"
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
if entities is None:
first_entity_ids = [mask_entity_id] * len(entity_spans)
else:
first_entity_ids = [self.entity_vocab.get(entity, unk_entity_id) for entity in entities]
if text_pair is not None:
if entity_spans_pair is None:
second_ids = get_input_ids(text_pair)
else:
assert isinstance(entity_spans_pair, list) and (
len(entity_spans_pair) == 0 or isinstance(entity_spans_pair[0], tuple)
), "entity_spans_pair should be given as a list of tuples containing the start and end character indices"
assert entities_pair is None or (
isinstance(entities_pair, list)
and (len(entities_pair) == 0 or isinstance(entities_pair[0], str))
), "If you specify entities_pair, they should be given as a list of entity names"
assert entities_pair is None or len(entities_pair) == len(
entity_spans_pair
), "If you specify entities_pair, entities_pair and entity_spans_pair must be the same length"
second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
text_pair, entity_spans_pair
)
if entities_pair is None:
second_entity_ids = [mask_entity_id] * len(entity_spans_pair)
else:
second_entity_ids = [self.entity_vocab.get(entity, unk_entity_id) for entity in entities_pair]
elif self.task == "entity_classification":
assert (
isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)
), "Entity spans should be a list containing a single tuple containing the start and end character indices of an entity"
first_entity_ids = [self.entity_vocab["[MASK]"]]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
# add special tokens to input ids
entity_token_start, entity_token_end = first_entity_token_spans[0]
first_ids = (
first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start]
+ [self.additional_special_tokens_ids[0]]
+ first_ids[entity_token_start:]
)
first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
elif self.task == "entity_pair_classification":
assert (
isinstance(entity_spans, list)
and len(entity_spans) == 2
and isinstance(entity_spans[0], tuple)
and isinstance(entity_spans[1], tuple)
), "Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity"
head_span, tail_span = entity_spans
first_entity_ids = [self.entity_vocab["[MASK]"], self.entity_vocab["[MASK2]"]]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
head_token_span, tail_token_span = first_entity_token_spans
token_span_with_special_token_ids = [
(head_token_span, self.additional_special_tokens_ids[0]),
(tail_token_span, self.additional_special_tokens_ids[1]),
]
if head_token_span[0] < tail_token_span[0]:
first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
else:
first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
elif self.task == "entity_span_classification":
mask_entity_id = self.entity_vocab["[MASK]"]
assert isinstance(entity_spans, list) and isinstance(
entity_spans[0], tuple
), "Entity spans should be provided as a list of tuples, each tuple containing the start and end character indices of an entity"
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
first_entity_ids = [mask_entity_id] * len(entity_spans)
else:
raise ValueError(f"Task {self.task} not supported")
return (
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Tuple[List[int], None]],
batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]],
batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
batch_entity_ids_pairs: list of entity ids or entity ids pairs
batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
max_entity_length: The maximum length of the entity sequence.
"""
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(
batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(
first_ids,
second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
entity_ids: Optional[List[int]] = None,
pair_entity_ids: Optional[List[int]] = None,
entity_token_spans: Optional[List[Tuple[int, int]]] = None,
pair_entity_token_spans: Optional[List[Tuple[int, int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
"""
Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
while taking into account the special tokens and manages a moving window (with user defined stride) for
overflowing tokens. Please Note, for `pair_ids` different than `None` and `truncation_strategy = longest_first`
or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
error.
Args:
ids (:obj:`List[int]`):
Tokenized input ids of the first sequence.
pair_ids (:obj:`List[int]`, `optional`):
Tokenized input ids of the second sequence.
entity_ids (:obj:`List[int]`, `optional`):
Entity ids of the first sequence.
pair_entity_ids (:obj:`List[int]`, `optional`):
Entity ids of the second sequence.
entity_token_spans (:obj:`List[Tuple[int, int]]`, `optional`):
Entity spans of the first sequence.
pair_entity_token_spans (:obj:`List[Tuple[int, int]]`, `optional`):
Entity spans of the second sequence.
max_entity_length (:obj:`int`, `optional`):
The maximum length of the entity sequence.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
# Compute lengths
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned word encodings
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length and max_entity_length
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
# truncate words up to max_length
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
entity_token_offset = 1 # 1 * <s> token
pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
entity_token_offset = 0
pair_entity_token_offset = len(ids)
# Build output dictionary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Set max entity length
if not max_entity_length:
max_entity_length = self.max_entity_length
if entity_ids is not None:
total_entity_len = 0
num_invalid_entities = 0
valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)]
valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
total_entity_len += len(valid_entity_ids)
num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
if pair_entity_ids is not None:
valid_pair_entity_ids = [
ent_id
for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
if span[1] <= len(pair_ids)
]
valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)]
total_entity_len += len(valid_pair_entity_ids)
num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
if num_invalid_entities != 0:
logger.warning(
f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the truncation of input tokens"
)
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length:
# truncate entities up to max_entity_length
valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(
valid_entity_ids,
pair_ids=valid_pair_entity_ids,
num_tokens_to_remove=total_entity_len - max_entity_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
if valid_pair_entity_token_spans is not None:
valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)]
if return_overflowing_tokens:
encoded_inputs["overflowing_entities"] = overflowing_entities
encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids
encoded_inputs["entity_ids"] = list(final_entity_ids)
entity_position_ids = []
entity_start_positions = []
entity_end_positions = []
for (token_spans, offset) in (
(valid_entity_token_spans, entity_token_offset),
(valid_pair_entity_token_spans, pair_entity_token_offset),
):
if token_spans is not None:
for start, end in token_spans:
start += offset
end += offset
position_ids = list(range(start, end))[: self.max_mention_length]
position_ids += [-1] * (self.max_mention_length - end + start)
entity_position_ids.append(position_ids)
entity_start_positions.append(start)
entity_end_positions.append(end - 1)
encoded_inputs["entity_position_ids"] = entity_position_ids
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = entity_start_positions
encoded_inputs["entity_end_positions"] = entity_end_positions
if return_token_type_ids:
encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
# Check lengths
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Padding
# To do: add padding of entities
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``) .. note:: If the
``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result
will use the same type unless you provide a different tensor type with ``return_tensors``. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
Args:
encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`):
Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or :obj:`Dict[str,
List[int]]`) or a batch of tokenized inputs (list of :class:`~transformers.BatchEncoding`, `Dict[str,
List[List[int]]]` or `List[Dict[str, List[int]]]`) so you can use this method during preprocessing as
well as in a PyTorch Dataloader collate function. Instead of :obj:`List[int]` you can have tensors
(numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
single sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
max_entity_length (:obj:`int`, `optional`):
The maximum length of the entity sequence.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_attention_mask (:obj:`bool`, `optional`):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are
attention masks? <../glossary.html#attention-mask>`__
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
If set, will return tensors instead of list of python integers. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to print more information and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
# The model's main input name, usually `input_ids`, has be passed for padding
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
if not isinstance(first_element, (int, list, tuple)):
if is_tf_available() and _is_tensorflow(first_element):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and _is_torch(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if max_entity_length is None:
max_entity_length = self.max_entity_length
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
max_entity_length = (
max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0
)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
max_entity_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
max_entity_length: The maximum length of the entity sequence.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
entities_provided = bool("entity_ids" in encoded_inputs)
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if entities_provided:
max_entity_length = len(encoded_inputs["entity_ids"])
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
if (
entities_provided
and max_entity_length is not None
and pad_to_multiple_of is not None
and (max_entity_length % pad_to_multiple_of != 0)
):
max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
len(encoded_inputs["input_ids"]) != max_length
or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
)
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs:
encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
if entities_provided:
entity_difference = max_entity_length - len(encoded_inputs["entity_ids"])
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if entities_provided:
encoded_inputs["entity_attention_mask"] = (
encoded_inputs["entity_attention_mask"] + [0] * entity_difference
)
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference
if entities_provided:
encoded_inputs["entity_token_type_ids"] = (
encoded_inputs["entity_token_type_ids"] + [0] * entity_difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
if entities_provided:
encoded_inputs["entity_ids"] = encoded_inputs["entity_ids"] + [0] * entity_difference
encoded_inputs["entity_position_ids"] = (
encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference
)
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = (
encoded_inputs["entity_start_positions"] + [0] * entity_difference
)
encoded_inputs["entity_end_positions"] = (
encoded_inputs["entity_end_positions"] + [0] * entity_difference
)
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if entities_provided:
encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[
"entity_attention_mask"
]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"]
if entities_provided:
encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[
"entity_token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
if entities_provided:
encoded_inputs["entity_ids"] = [0] * entity_difference + encoded_inputs["entity_ids"]
encoded_inputs["entity_position_ids"] = [
[-1] * self.max_mention_length
] * entity_difference + encoded_inputs["entity_position_ids"]
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[
"entity_start_positions"
]
encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[
"entity_end_positions"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
vocab_file, merge_file = super().save_vocabulary(save_directory, filename_prefix)
entity_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["entity_vocab_file"]
)
with open(entity_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.entity_vocab, ensure_ascii=False))
return vocab_file, merge_file, entity_vocab_file
|
{"hexsha": "13b65365741bcbcbef0d465affcfa4ecbfd86408", "size": 79104, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/transformers/models/luke/tokenization_luke.py", "max_stars_repo_name": "HimashiRathnayake/adapter-transformers", "max_stars_repo_head_hexsha": "d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 50404, "max_stars_repo_stars_event_min_datetime": "2019-09-26T09:55:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:07:49.000Z", "max_issues_repo_path": "src/transformers/models/luke/tokenization_luke.py", "max_issues_repo_name": "HimashiRathnayake/adapter-transformers", "max_issues_repo_head_hexsha": "d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13179, "max_issues_repo_issues_event_min_datetime": "2019-09-26T10:10:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:17:08.000Z", "max_forks_repo_path": "src/transformers/models/luke/tokenization_luke.py", "max_forks_repo_name": "HimashiRathnayake/adapter-transformers", "max_forks_repo_head_hexsha": "d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13337, "max_forks_repo_forks_event_min_datetime": "2019-09-26T10:49:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T23:06:17.000Z", "avg_line_length": 51.1668822768, "max_line_length": 212, "alphanum_fraction": 0.6353787419, "include": true, "reason": "import numpy", "num_tokens": 16261}
|
using DelayDiffEq, DiffEqProblemLibrary.DDEProblemLibrary
using Test
DDEProblemLibrary.importddeproblems()
@testset "init" begin
prob = DDEProblemLibrary.prob_dde_constant_1delay_ip
prob_scalar = DDEProblemLibrary.prob_dde_constant_1delay_scalar
inferred = [BS3(), Tsit5(), RK4(), Vern6()]
for alg in inferred
ddealg = MethodOfSteps(alg)
@inferred init(prob, ddealg)
@inferred init(prob_scalar, ddealg)
end
notinferred = [SDIRK2(), TRBDF2(), KenCarp4(), Rosenbrock23(), Rodas4()]
for alg in notinferred
ddealg = MethodOfSteps(alg)
@test_broken @inferred init(prob, ddealg)
@test_broken @inferred init(prob_scalar, ddealg)
end
end
|
{"hexsha": "7c2505452c61e9784b196c9216833ee622bfd64b", "size": 715, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/regression/inference.jl", "max_stars_repo_name": "UnofficialJuliaMirror/DelayDiffEq.jl-bcd4f6db-9728-5f36-b5f7-82caef46ccdb", "max_stars_repo_head_hexsha": "041844864da58e78882ce212ebda52e66b028147", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/regression/inference.jl", "max_issues_repo_name": "UnofficialJuliaMirror/DelayDiffEq.jl-bcd4f6db-9728-5f36-b5f7-82caef46ccdb", "max_issues_repo_head_hexsha": "041844864da58e78882ce212ebda52e66b028147", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/regression/inference.jl", "max_forks_repo_name": "UnofficialJuliaMirror/DelayDiffEq.jl-bcd4f6db-9728-5f36-b5f7-82caef46ccdb", "max_forks_repo_head_hexsha": "041844864da58e78882ce212ebda52e66b028147", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6, "max_line_length": 76, "alphanum_fraction": 0.7048951049, "num_tokens": 209}
|
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import csv
import json
from collections import Counter
from matplotlib.patches import Rectangle
import math
from skimage.measure import block_reduce
# import tensorflow as tf
# keras = tf.keras
# DOCUMENTATION
############ CLOUD USAGE (or local if you want to load a fraction of the dataset (see f param below) )
# LOAD DATASET
# !! TLDR; RUN THIS ! ---> x_train,y_train,x_val,y_val,x_test,y_test = data_load(f=1)
# f param below is the fraction of the dataset you want to load
# for example, pass in f=0.1 to load only 10% of the train,val,and test sets
# this is useful if you want to test locally and load only a subset of data to fit in RAM
# also note that the png files are COMPRESSED, and thus the RAM memory footprint of the png files
# is approximately 2 to 7 times larger (uncompressed)
# see the data_load() implementation directly below for more information
# note: the output will say "Removed x lesions.." this is because the neighboring slice was missing for those slices
def data_load(dl_info, dl_info_vector, json_labels, organ_id, f=1) :
organ_data = load_all_data_for_term(dl_info, dl_info_vector, json_labels, organ_id,f) #8 is the term ID which corresponds to liver lesion
# the above is defined below and returns a dict with keys train, val, test , each of which has value that is a tuple of [ X,Y ]
# so we can destructure this like so:
from operator import itemgetter
[x_train,y_train] , [x_val, y_val] , [x_test,y_test] = itemgetter("train","val","test")(organ_data)
# and then we can return all these elements in one big tuple
print("Train Size: {}\nVal Size: {}\nTest Size: {}".format(str(len(x_train)),str(len(x_val)),str(len(x_test))))
return (x_train,y_train,x_val,y_val,x_test,y_test)
# END LOAD DATASET
############ END CLOUD USAGE
# END DOCUMENTATION
# manage windows/linux stuff
import platform
_os = platform.system()
if _os == "Linux" :
fdelim = "/"
elif _os == "Darwin" :
fdelim = "/"
elif _os == "Windows" :
fdelim = "\\"
else :
print("unrecognized os!")
def set_image_dir(d) :
global image_dir
global dl_info_vector
global dl_info
image_dir = d
dl_info_vector = read_dl_info_vector()
dl_info = read_dl_info()
def get_files() :
# prepare the image directories
image_dir = "images" + fdelim + "Images_png" + fdelim
sub_dirs = os.listdir(image_dir)
sub_dirs.sort()
# replace each sub_dir with [ sub_dir [file_list] ]
files = []
for d in sub_dirs :
sub_files = os.listdir(os.path.join(image_dir,d))
sub_files.sort()
sub_files_fp = [ os.path.join(image_dir,d,x) for x in sub_files ]
files.append( [ d , sub_files_fp ] )
return files
# helper functions
def check_for_file(fname) :
import os.path
return os.path.isfile(fname)
def append_file(fname, strang) :
if not check_for_file(fname) :
mode = 'w'
else :
mode = 'a+'
with open(fname, mode) as outfile :
outfile.write(strang)
# given a file can we produce a numpy array
def read_image(dl_info, fn,with_win=False,bb=True,verbose=True) :
im = cv2.imread(fn,-1)
im = (im.astype(np.int32)-32768).astype(np.int16)
# only look up the window if with_win is False
win = with_win or [float(x) for x in dl_info[fn]['DICOM_windows'].split(",")]
if verbose :
print("For fn: {}, using window: [{},{}]".format(fn,win[0],win[1]))
im = windowing(im,win)
# now get the bounding box as well
if bb :
_bb = [round(float(x)) for x in dl_info[fn]['Bounding_boxes'].split(',') ]
return (im , _bb, win )
else :
return im
def gen_neighbor_names(fn) :
tok = fn.split(fdelim) # ['images', 'Images_png', '000001_01_01', '103.png']
slice_tok = tok[-1].split(".")
left_num = "{:03d}".format(int(slice_tok[0]) - 1)
right_num = "{:03d}".format(int(slice_tok[0]) + 1)
left_fn = fdelim.join(tok[0:-1]) + fdelim + left_num + ".png"
right_fn = fdelim.join(tok[0:-1]) + fdelim + right_num + ".png"
return (left_fn, right_fn)
def read_image_and_neighbors(dl_info, fn,verbose=True) :
# should be able to assume that the slices are available on either side
lfn, rfn = gen_neighbor_names(fn)
# first we read the main image and get the window and bounding box
mim, bb, win = read_image(dl_info, fn,verbose=verbose)
# now we will read the left and right images using the same window and w/o bb
lim = read_image(dl_info, lfn,with_win=win,bb=False,verbose=verbose)
rim = read_image(dl_info, rfn,with_win=win,bb=False,verbose=verbose)
# are going to produce a matrix (512,512,3)
slices = np.zeros( (512,512,3 ) )
try:
slices[:,:,0] = lim
except:
downsample_factor = len(lim) / 512
if downsample_factor % 1 == 0:
downsample_factor = int(downsample_factor)
lim = block_reduce(lim, block_size=(downsample_factor, downsample_factor), func=np.mean)
slices[:,:,0] = lim
else:
pass
try:
slices[:,:,1] = mim
except:
downsample_factor = len(mim) / 512
if downsample_factor % 1 == 0:
downsample_factor = int(downsample_factor)
mim = block_reduce(mim, block_size=(downsample_factor, downsample_factor), func=np.mean)
slices[:,:,1] = mim
try:
slices[:,:,2] = rim
except:
downsample_factor = len(rim) / 512
if downsample_factor % 1 == 0:
downsample_factor = int(downsample_factor)
rim = block_reduce(rim, block_size=(downsample_factor, downsample_factor), func=np.mean)
slices[:,:,2] = rim
return (slices, np.array(bb))
def nb_imshow(im,bb=False) :
plt.imshow(im[:,:,1],cmap='gray')
# if bounding box will also draw the bb
if bb.any() :
# unnormalize the bounding box
bb = 512*bb
# need to convert to appropriate shapes
pt = (bb[0], bb[1])
w = bb[2] - bb[0]
h = bb[3] - bb[1]
print("Using bb coords: ({},{}),{},{}".format(pt[0],pt[1],w,h))
plt.gca().add_patch(Rectangle(pt,w,h,linewidth=1,edgecolor='lime',facecolor='none'))
def show_image(im,bb=False) :
plt.gca().cla()
plt.imshow(im,cmap='gray')
plt.ion()
plt.show()
# if bounding box will also draw the bb
if bb.any() :
# need to convert to appropriate shapes
pt = (bb[0], bb[1])
w = bb[2] - bb[0]
h = bb[3] - bb[1]
print("Using bb coords: ({},{}),{},{}".format(pt[0],pt[1],w,h))
plt.gca().add_patch(Rectangle(pt,w,h,linewidth=1,edgecolor='lime',facecolor='none'))
plt.draw()
plt.pause(0.001) # non blocking
def disp(fn,bb=False) :
im, bb, win = read_image(dl_info, fn) # read the image and the bounding box
if bb :
show_image(im,bb=bb)
else :
show_image(im)
def disp_loop() :
plt.figure()
for folder in files :
for f in folder[1] :
im = read_image(f)
plt.imshow(im,cmap='gray')
plt.pause(0.1)
plt.draw()
def test_show() :
disp("images/Images_png/000001_03_01/088.png", bb=True)
def read_json_labels(path_to_json_labels = 'text_mined_labels_171_and_split.json') :
with open(path_to_json_labels) as json_file:
data = json.load(json_file)
return data
def get_index_of_term(t) :
return json_labels['term_list'].index(t)
def search_for_term(term, to_search) : # term is actually an index here
matches = []
for i,val in enumerate(to_search) :
# each val here is a list [x, x2, x3.. ]
# if 'term' is in this list then we add it to matches
if term in val :
matches.append([i,val])
return matches
def read_dl_info_vector(image_dir = "images" + fdelim + "Images_png" + fdelim , DL_INFO_PATH = './') :
#function for modifying map object after generated
def transform_map(m) :
#fixes fname
tok = m['File_name'].split("_")
m['File_name'] = image_dir + "_".join(tok[0:3]) + fdelim + tok[-1]
return m
with open(DL_INFO_PATH + 'DL_info.csv') as f:
a = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
return [transform_map(x) for x in a]
def read_dl_info() :
info = {}
a = read_dl_info_vector(
image_dir = '../images/Images_png/',
DL_INFO_PATH = '../cs230/')
for d in a :
info[d['File_name']] = d
return info
def select_lesion_idxs(dl_info_vector, s) :
return [ dl_info_vector[x] for x in s ]
def get_folders_for_lesions_set(ls) :
return [ "/".join(x['File_name'].split("/")[0:3]) for x in ls ]
def fname_with_neighbors(fname) :
(ln, rn) = gen_neighbor_names(fname)
return [ln, fname, rn ]
def get_fnames_and_neighbors_for_lesions_set(ls) :
res = [ fname_with_neighbors(x['File_name']) for x in ls ]
return [item for sublist in res for item in sublist if check_for_file(item) ]
def write_list_to_file(fname,l) :
for i in l :
append_file(fname,i + "\n")
def generate_term_specific_set(dl_info_vector, json_labels, train_val_test, term,v=True) :
labs = search_for_term(term, json_labels['{}_relevant_labels'.format(train_val_test)])
labs_idx = [ x[0] for x in labs ]
lesion_idx = [ json_labels['{}_lesion_idxs'.format(train_val_test)][i] for i in labs_idx ]
lesions = select_lesion_idxs(dl_info_vector, lesion_idx)
coarse_types = Counter([x['Coarse_lesion_type'] for x in lesions])
def filt(l) :
ln,rn = gen_neighbor_names(l['File_name'])
#print(ln)
return (check_for_file(ln) and check_for_file(rn) )
final_lesions = list(filter( filt , lesions))
if v :
print("Removed {} lesion(s) of {}".format(len(lesions) - len(final_lesions) , len(lesions)))
return { "lesions" : final_lesions ,
"coarse_types" : coarse_types ,
"lesion_idx" : lesion_idx ,
"labs_idx" : labs_idx ,
"labs" : labs }
def load_data_to_memory(dl_info, lesions,msg=None) :
num_lesions = len(lesions)
xs = np.zeros( (num_lesions, 512,512,3 ) )
ys = np.zeros( (num_lesions, 4 ) )
if msg :
print(msg)
for i,v in enumerate(lesions) :
if (i % 100 == 0 and i != 0 ) :
print("On index: " + str(i))
# get the filename of the lesion
fn = lesions[i]['File_name']
# get the data
slices,bounding_box = read_image_and_neighbors(dl_info, fn,verbose=False)
# append the data
xs[i,:,:,:] = slices
ys[i,:] = bounding_box/512
# now we return the xs and ys
return (xs,ys)
def load_all_data_for_term(dl_info, dl_info_vector, json_labels, t, f=1) :
sets = ["train" , "val" , "test" ]
print("\nLoading data for term index: " + str(t) )
print("Fraction of data that will be loaded={}\n".format(f))
data = {}
for s in sets :
print("Loading {} set".format(s))
term_dataset_with_metadata = generate_term_specific_set(dl_info_vector, json_labels, s,t)
max_index = int(len(term_dataset_with_metadata["lesions"])*f)
data[s] = load_data_to_memory(dl_info, term_dataset_with_metadata["lesions"][0:max_index])
print("Done\n")
return data
def build_partitioned_dataset(lesions,name,num_parts) :
num_per_part = math.floor(len(lesions)/num_parts)
print("{} per part".format(num_per_part))
for k in range(0,num_parts) :
part = lesions[num_per_part*k:num_per_part*(k+1)]
part_number = str(k+ 1)
print("Building part #{} with {} lesions".format(part_number,len(part)))
build_dataset(part,name+"_part_" + part_number)
if len(lesions) % num_per_part != 0 :
part = lesions[num_per_part*num_parts:len(lesions)]
part_number = str(k+ 1)
print("Building part #{} with {} lesions".format(part_number,len(part)))
build_dataset(part,name+"_part_" + part_number)
#done
def build_dataset(lesions,name) :
num_lesions = len(lesions)
xs = np.zeros( (num_lesions, 512,512,3 ) )
ys = np.zeros( (num_lesions, 1, 4 ) )
print("Generating data set...")
for i,v in enumerate(lesions) :
#if ( i % 200 == 0 ) :
# print("On index: " + str(i))
# get the filename of the lesion
fn = lesions[i]['File_name']
# TODO get the data -- WILL wrap INSIDE TRY CATCH and if error then will
# print the FILENAME so I can explore where the error happened
slices,bounding_box = read_image_and_neighbors(dl_info, fn,verbose=False)
# append the data
xs[i,:,:,:] = slices
ys[i,:,:] = bounding_box
# at this point data set should be built
# will write the data to numpy binary file
#print("Saving xs...")
np.save(name + '_xs',xs)
#print("Saving ys...")
np.save(name + '_ys',ys)
print("Done!")
def get_dataset() :
# FOR LOADING XS and YS
fbase = "datasets" + fdelim + "liver_train_part_1_"
xs_fn = fbase + "xs.npy"
ys_fn = fbase + "ys.npy"
xs = np.load(xs_fn)
ys = np.load(ys_fn)
return (xs, ys )
def plot_data(xs,ys,num) :
show_image(xs[num,:,:,1] , bb=ys[num,:,:].flatten() )
def windowing2(im,win):
im = im.astype(float)
return np.min(255, np.max(0, 255*(im-win[0])/(win[1]-win[0])))
def windowing(im, win):
# (https://github.com/rsummers11/CADLab/blob/master/LesaNet/load_ct_img.py)
# scale intensity from win[0]~win[1] to float numbers in 0~255
im1 = im.astype(float)
im1 -= win[0]
im1 /= (win[1] - win[0])
im1[im1 > 1] = 1
im1[im1 < 0] = 0
im1 *= 255
return im1
def reload() :
import importlib
import sys
importlib.reload(sys.modules['util'])
def convert_to_iou_format(y) :
"""
Will convert from [x_min, y_min, x_max, y_max] to [x, y, width, height]
"""
return np.array([ y[0] , y[1] , y[2]-y[0] , y[3]-y[1] ] )
def calculate_iou(y_true, y_pred):
"""
Input:
Keras provides the input as numpy arrays with shape (batch_size, num_columns).
Arguments:
y_true -- first box, numpy array with format [x, y, width, height, conf_score]
y_pred -- second box, numpy array with format [x, y, width, height, conf_score]
x any y are the coordinates of the top left corner of each box.
Output: IoU of type float32. (This is a ratio. Max is 1. Min is 0.)
"""
results = []
# set the types so we are sure what type we are using
#y_true = convert_to_iou_format(y_true.astype(np.float32))
#y_pred = convert_to_iou_format(y_pred.astype(np.float32))
# boxTrue
x_boxTrue_tleft = y_true[0] # numpy index selection
y_boxTrue_tleft = y_true[1]
boxTrue_width = y_true[2]
boxTrue_height = y_true[3]
area_boxTrue = (boxTrue_width * boxTrue_height)
# boxPred
x_boxPred_tleft = y_pred[0]
y_boxPred_tleft = y_pred[1]
boxPred_width = y_pred[2]
boxPred_height = y_pred[3]
area_boxPred = (boxPred_width * boxPred_height)
x_boxTrue_br = x_boxTrue_tleft + boxTrue_width
y_boxTrue_br = y_boxTrue_tleft + boxTrue_height # Version 2 revision
# calculate the top left and bottom right coordinates for the intersection box, boxInt
x_boxPred_br = x_boxPred_tleft + boxPred_width
y_boxPred_br = y_boxPred_tleft + boxPred_height
# boxInt - top left coords
x_boxInt_tleft = np.max([x_boxTrue_tleft,x_boxPred_tleft])
y_boxInt_tleft = np.max([y_boxTrue_tleft,y_boxPred_tleft]) # Version 2 revision
# boxInt - bottom right coords
x_boxInt_br = np.min([x_boxTrue_br,x_boxPred_br])
y_boxInt_br = np.min([y_boxTrue_br,y_boxPred_br])
# Calculate the area of boxInt, i.e. the area of the intersection
# between boxTrue and boxPred.
# The np.max() function forces the intersection area to 0 if the boxes don't overlap.
# Version 2 revision
area_of_intersection = \
np.max([0,(x_boxInt_br - x_boxInt_tleft)]) * np.max([0,(y_boxInt_br - y_boxInt_tleft)])
iou = area_of_intersection / ((area_boxTrue + area_boxPred) - area_of_intersection)
# This must match the type used in py_func
iou = iou.astype(np.float32)
# return the mean IoU score for the batch
return iou
def IoU(y_true, y_pred):
# Note: the type float32 is very important. It must be the same type as the output from
# the python function above or you too may spend many late night hours
# trying to debug and almost give up.
iou = tf.py_func(calculate_iou, [y_true, y_pred], tf.float32)
return iou
if __name__ == '__main__' :
pass
|
{"hexsha": "3e5f086a183ddf457f259f65ddd01fd4fd9028eb", "size": 17391, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "surya-narayanan/Meta-Learning-Deep-Leision", "max_stars_repo_head_hexsha": "088d279ac72529a70bca27f399d2dd8f64e3f3bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-31T15:29:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-31T15:29:57.000Z", "max_issues_repo_path": "util.py", "max_issues_repo_name": "surya-narayanan/Meta-Learning-Deep-Leision", "max_issues_repo_head_hexsha": "088d279ac72529a70bca27f399d2dd8f64e3f3bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "surya-narayanan/Meta-Learning-Deep-Leision", "max_forks_repo_head_hexsha": "088d279ac72529a70bca27f399d2dd8f64e3f3bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1109123435, "max_line_length": 142, "alphanum_fraction": 0.6156632741, "include": true, "reason": "import numpy", "num_tokens": 4883}
|
#include <boost/test/unit_test.hpp>
#include "expression/Parser.hpp"
#include "expression/Ast.hpp"
#include "expression/AstOp.hpp"
#include "expression/Lexer.hpp"
#include "Function.hpp"
#include "Error.hpp"
using namespace slim;
using namespace slim::expr;
BOOST_AUTO_TEST_SUITE(TestExprParser)
ExpressionNodePtr parse(const std::string &str)
{
Lexer lexer(str);
LocalVarNames vars;
vars.add("myvar");
Parser parser(vars, lexer);
return parser.full_expression();
}
std::string parse_part(const std::string &str)
{
Lexer lexer(str);
LocalVarNames vars;
Parser parser(vars, lexer);
return parser.expression()->to_string();
}
std::string parse_stmt(const std::string &str)
{
Lexer lexer(str);
LocalVarNames vars;
Parser parser(vars, lexer);
return parser.statement()->to_string();
}
template<class T> bool is_node_type(ExpressionNodePtr ptr)
{
return dynamic_cast<T*>(ptr.get()) != nullptr;
}
BOOST_AUTO_TEST_CASE(single_values)
{
BOOST_CHECK(is_node_type<Literal>(parse("true")));
BOOST_CHECK_EQUAL("true", parse("true")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("false")));
BOOST_CHECK_EQUAL("false", parse("false")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("nil")));
BOOST_CHECK_EQUAL("nil", parse("nil")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("55")));
BOOST_CHECK_EQUAL("55", parse("55")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("55.5")));
BOOST_CHECK_EQUAL("55.5", parse("55.5")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("'true'")));
BOOST_CHECK_EQUAL("\"true\"", parse("'true'")->to_string());
BOOST_CHECK(is_node_type<Literal>(parse("/true/")));
BOOST_CHECK_EQUAL("/true/", parse("/true/")->to_string());
BOOST_CHECK_THROW(parse("55.5.5"), SyntaxError);
BOOST_CHECK(is_node_type<ArrayLiteral>(parse("[5]")));
BOOST_CHECK_EQUAL("[]", parse("[]")->to_string());
BOOST_CHECK_EQUAL("[5]", parse("[5]")->to_string());
BOOST_CHECK_EQUAL("[2, true]", parse("[2,true]")->to_string());
BOOST_CHECK(is_node_type<HashLiteral>(parse("{a: 5}")));
BOOST_CHECK_EQUAL("{}", parse("{}")->to_string());
BOOST_CHECK_EQUAL("{:a => 5}", parse("{a: 5}")->to_string());
BOOST_CHECK_EQUAL("{1 => 2, 5 => true}", parse("{1 => 2, 5 => true}")->to_string());
BOOST_CHECK(is_node_type<GlobalConstant>(parse("Const")));
BOOST_CHECK_EQUAL("Const", parse("Const")->to_string());
//Note that myvar was added to the variable scope for the parser
BOOST_CHECK(is_node_type<Variable>(parse("myvar")));
BOOST_CHECK_EQUAL("myvar", parse("myvar")->to_string());
BOOST_CHECK(is_node_type<Attribute>(parse("@myvar")));
BOOST_CHECK_EQUAL("@myvar", parse("@myvar")->to_string());
}
BOOST_AUTO_TEST_CASE(const_nav)
{
BOOST_CHECK(is_node_type<ConstantNav>(parse("X::PI")));
BOOST_CHECK_EQUAL("X::PI", parse("X::PI")->to_string());
BOOST_CHECK_EQUAL("x()::PI", parse("x::PI")->to_string());
BOOST_CHECK_EQUAL("myvar::PI", parse("myvar::PI")->to_string());
BOOST_CHECK_EQUAL("myvar::Math::PI", parse("myvar::Math::PI")->to_string());
BOOST_CHECK_EQUAL("myvar.class()::Math::PI", parse("myvar.class::Math::PI")->to_string());
BOOST_CHECK_THROW(parse("X::not_a_const"), SyntaxError);
BOOST_CHECK_THROW(parse("X::"), SyntaxError);
//NOTE: This syntax may be available in the future to refer to "main" if it ever exists
BOOST_CHECK_THROW(parse("::X"), SyntaxError);
BOOST_CHECK_THROW(parse("::"), SyntaxError);
}
BOOST_AUTO_TEST_CASE(single_ops)
{
BOOST_CHECK_EQUAL("(5 .. 10)", parse("5 .. 10")->to_string());
BOOST_CHECK_EQUAL("(5 ... 10)", parse("5 ... 10")->to_string());
BOOST_CHECK_EQUAL("(5 && 10)", parse("5 && 10")->to_string());
BOOST_CHECK_EQUAL("(5 || 10)", parse("5 || 10")->to_string());
BOOST_CHECK_EQUAL("(5 == 10)", parse("5 == 10")->to_string());
BOOST_CHECK_EQUAL("(5 != 10)", parse("5 != 10")->to_string());
BOOST_CHECK_EQUAL("(5 <=> 10)", parse("5 <=> 10")->to_string());
BOOST_CHECK_EQUAL("(5 < 10)", parse("5 < 10")->to_string());
BOOST_CHECK_EQUAL("(5 <= 10)", parse("5 <= 10")->to_string());
BOOST_CHECK_EQUAL("(5 > 10)", parse("5 > 10")->to_string());
BOOST_CHECK_EQUAL("(5 >= 10)", parse("5 >= 10")->to_string());
BOOST_CHECK_EQUAL("(5 << 10)", parse("5 << 10")->to_string());
BOOST_CHECK_EQUAL("(5 >> 10)", parse("5 >> 10")->to_string());
BOOST_CHECK_EQUAL("(5 & 10)", parse("5 & 10")->to_string());
BOOST_CHECK_EQUAL("(5 | 10)", parse("5 | 10")->to_string());
BOOST_CHECK_EQUAL("(5 ^ 10)", parse("5 ^ 10")->to_string());
BOOST_CHECK_EQUAL("(~10)", parse("~10")->to_string());
BOOST_CHECK_EQUAL("(5 + 10)", parse("5 + 10")->to_string());
BOOST_CHECK_EQUAL("(5 - 10)", parse("5 - 10")->to_string());
BOOST_CHECK_EQUAL("(5 * 10)", parse("5 * 10")->to_string());
BOOST_CHECK_EQUAL("(5 ** 2)", parse("5 ** 2")->to_string());
BOOST_CHECK_EQUAL("(5 / 10)", parse("5 / 10")->to_string());
BOOST_CHECK_EQUAL("(5 % 10)", parse("5 % 10")->to_string());
BOOST_CHECK_EQUAL("(!10)", parse("! 10")->to_string());
BOOST_CHECK_EQUAL("-10", parse("-10")->to_string());
BOOST_CHECK_EQUAL("10", parse("+10")->to_string());
BOOST_CHECK_EQUAL(":name.to_proc()", parse("&:name")->to_string());
BOOST_CHECK_EQUAL("(5 ? @a : @b)", parse("5 ? @a : @b")->to_string());
BOOST_CHECK_EQUAL("(5 ? :a : :b)", parse("5 ? :a : :b")->to_string());
BOOST_CHECK_EQUAL("(5 ? @a.func() : @b.func())", parse("5 ? @a.func : @b.func")->to_string());
BOOST_CHECK_THROW(parse("true ? @a.f :x :y"), SyntaxError);
BOOST_CHECK_THROW(parse("true ? @a.f f 5 : y"), SyntaxError);
BOOST_CHECK_THROW(parse("true ? @a.f :x : f 5"), SyntaxError);
}
BOOST_AUTO_TEST_CASE(associativity_single)
{
//conditional right to left
BOOST_CHECK_EQUAL("(5 ? @a : (@b ? @d : @e))", parse("5 ? @a : @b ? @d : @e")->to_string());
// binary, left to right
BOOST_CHECK_EQUAL("(((5 && 10) && 5) && true)", parse("5 && 10 && 5 && true")->to_string());
// unary, right to left
BOOST_CHECK_EQUAL("(-(!(-5)))", parse("-!-+5")->to_string());
}
BOOST_AUTO_TEST_CASE(grouping)
{
BOOST_CHECK_EQUAL("(5 - 5)", parse("(5 - 5)")->to_string());
BOOST_CHECK_EQUAL("(5 - 5)", parse("(((5) - 5))")->to_string()); //because groups dont exist in the AST
BOOST_CHECK_EQUAL("((5 - 5) * 6)", parse("(5 - 5) * 6")->to_string());
BOOST_CHECK_EQUAL("(-((5 - 5) * 6))", parse("-((5 - 5) * 6)")->to_string());
BOOST_CHECK_EQUAL("[2, (5 + 5), @a.f()]", parse("[2,5+5,@a.f]")->to_string());
}
BOOST_AUTO_TEST_CASE(string_interp)
{
BOOST_CHECK_EQUAL("\"hello world\"", parse("'hello world'")->to_string());
BOOST_CHECK_EQUAL("\"hello #{@x}\"", parse("'hello #{@x}'")->to_string());
BOOST_CHECK_EQUAL("\"hello #{(@x + @y)}\"", parse("'hello #{@x + @y}'")->to_string());
BOOST_CHECK_EQUAL("\"hello #{\"nested\"}\"", parse("'hello #{'nested'}'")->to_string());
BOOST_CHECK_EQUAL(
"\"hello #{\"nested #{@x} interp\"}\"",
parse("'hello #{'nested #{@x} interp'}'")->to_string());
}
BOOST_AUTO_TEST_CASE(method_call)
{
BOOST_CHECK_EQUAL("f()", parse("f")->to_string());
BOOST_CHECK_EQUAL("f()", parse("f()")->to_string());
BOOST_CHECK_EQUAL("f(5)", parse("f(5)")->to_string());
BOOST_CHECK_EQUAL("f(5, true)", parse("f(5, true)")->to_string());
BOOST_CHECK_EQUAL("f(5, true)", parse("f 5, true")->to_string());
BOOST_CHECK_EQUAL("f(@attr)", parse("f @attr")->to_string());
BOOST_CHECK_EQUAL("f({:a => 5, :b => 6})", parse("f({a: 5, b: 6})")->to_string());
BOOST_CHECK_EQUAL("f({:a => 5, :b => 6})", parse("f a: 5, b: 6")->to_string());
BOOST_CHECK_EQUAL("f({:a => 5, :b => 6})", parse("f :a => 5, :b => 6")->to_string());
BOOST_CHECK_EQUAL("@a.f()", parse("@a.f")->to_string());
BOOST_CHECK_EQUAL("@a.f()", parse("@a.f()")->to_string());
BOOST_CHECK_EQUAL("@a.f(5)", parse("@a.f(5)")->to_string()); //because groups dont exist in the AST
BOOST_CHECK_EQUAL("@a.f(5, true)", parse("@a.f(5, true)")->to_string());
BOOST_CHECK_EQUAL("@a.f().g()", parse("@a.f.g")->to_string());
BOOST_CHECK_EQUAL("@a.f(5).g()", parse("@a.f(5).g")->to_string());
BOOST_CHECK_EQUAL("@a.f(2, 3, 4)", parse("@a.f 2, 3, 4")->to_string());
BOOST_CHECK_EQUAL("@a.f(2, 3, (4 + 4))", parse("@a.f 2, 3, 4 + 4")->to_string());
BOOST_CHECK_EQUAL("(@a.f(2, 3, 4) + 4)", parse("(@a.f 2, 3, 4) + 4")->to_string());
BOOST_CHECK_EQUAL("@a&.f(2, 3, 4)", parse("@a&.f 2, 3, 4")->to_string());
BOOST_CHECK_EQUAL("@a[5]", parse("@a[5]")->to_string());
BOOST_CHECK_EQUAL("@a[5][5, 10]", parse("@a[5][5, 10]")->to_string());
BOOST_CHECK_EQUAL("@a[5][5, 10].to_i()", parse("@a[5][5, 10].to_i")->to_string());
BOOST_CHECK_EQUAL("@a.f()[5]", parse("@a.f[5]")->to_string());
BOOST_CHECK_EQUAL("@a.f()[5, 7]", parse("@a.f[5, 7]")->to_string());
BOOST_CHECK_EQUAL("@a.f()[5, 7].g()", parse("@a.f[5, 7].g")->to_string());
BOOST_CHECK_EQUAL("@a.f()[5, (7 + 8)].g()", parse("@a.f[5, 7 + 8].g")->to_string());
BOOST_CHECK_EQUAL("@a.f({|| 5})", parse("@a.f{|| 5}")->to_string());
BOOST_CHECK_EQUAL("@a.f(4, {|x| x})", parse("@a.f(4){|x| x}")->to_string());
BOOST_CHECK_EQUAL("@a.f({|x, y| (x * y)})", parse("@a.f{|x, y| x * y}")->to_string());
//TODO: Space before '[' is argument, without is [] operator
//BOOST_CHECK_EQUAL("a.contains?([1, 2, 3])", parse("a.contains? [1,2,3]")->to_string());
BOOST_CHECK_EQUAL("@a.to_a()[0]", parse("@a.to_a[0]")->to_string());
//'{' as first argument is always a block
BOOST_CHECK_EQUAL("@a.contains?({:a => 5})", parse("@a.contains?({a: 5})")->to_string());
BOOST_CHECK_EQUAL("@a.contains?(1, {:a => 5})", parse("@a.contains? 1, {a: 5}")->to_string());
BOOST_CHECK_THROW(parse("@a.contains? {:a => 5}"), SyntaxError);
//Regex and division ambiguity
BOOST_CHECK_EQUAL("(@a.match() / 5)", parse("@a.match / 5")->to_string());
BOOST_CHECK_EQUAL("@a.match(/5/)", parse("@a.match(/5/)")->to_string());
BOOST_CHECK_THROW(parse("@a.match /5/"), SyntaxError);
}
BOOST_AUTO_TEST_CASE(precedence)
{
BOOST_CHECK_EQUAL("((5 - 5) && 10)", parse("5 - 5 && 10")->to_string());
BOOST_CHECK_EQUAL("(((5 - 5) == 0) && (10 != nil))", parse("5 - 5 == 0 && 10 != nil")->to_string());
BOOST_CHECK_EQUAL("((5 < 10) == (15 >= 10))", parse("5 < 10 == 15 >= 10")->to_string());
BOOST_CHECK_EQUAL("((5 - (-5)) == 0)", parse("5 - - 5 == 0")->to_string());
BOOST_CHECK_EQUAL("(@c + (@m * @x))", parse("@c + @m * @x")->to_string());
BOOST_CHECK_EQUAL("((@m * @x) + @c)", parse("@m * @x + @c")->to_string());
BOOST_CHECK_EQUAL("((-(5 - 5)) * 6)", parse("-(5 - 5) * 6")->to_string());
BOOST_CHECK_EQUAL("((5 && 1) || (0 && 7))", parse("5 && 1 || 0 && 7")->to_string());
}
BOOST_AUTO_TEST_CASE(basic_syntax_errors)
{
//empty expr
BOOST_CHECK_THROW(parse(""), SyntaxError);
BOOST_CHECK_THROW(parse("5 + ()"), SyntaxError);
//adjacent values
BOOST_CHECK_THROW(parse("5 true"), SyntaxError);
BOOST_CHECK_THROW(parse("(10 + 4) x"), SyntaxError);
BOOST_CHECK_THROW(parse("(10 + 4) (54 / 5)"), SyntaxError);
//unary operators, missing value
BOOST_CHECK_THROW(parse("-"), SyntaxError);
BOOST_CHECK_THROW(parse("- == 5"), SyntaxError);
BOOST_CHECK_THROW(parse("5 == -"), SyntaxError);
//binary operators, missing value
BOOST_CHECK_THROW(parse("=="), SyntaxError);
BOOST_CHECK_THROW(parse("5 =="), SyntaxError);
BOOST_CHECK_THROW(parse("== 5"), SyntaxError);
BOOST_CHECK_THROW(parse("5 == < 5"), SyntaxError);
//unmatched group parenthesis
BOOST_CHECK_THROW(parse("(5 + 5"), SyntaxError);
BOOST_CHECK_THROW(parse("5 + 5)"), SyntaxError);
BOOST_CHECK_THROW(parse("((5 + 5) * 8"), SyntaxError);
//method call
BOOST_CHECK_THROW(parse("."), SyntaxError);
BOOST_CHECK_THROW(parse("@a."), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f("), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f(@b"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f(@b,"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f(@b,)"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f(@b,,@c)"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f(@b)."), SyntaxError);
BOOST_CHECK_THROW(parse("@a[]"), SyntaxError);
BOOST_CHECK_THROW(parse("@a[@a,]"), SyntaxError);
BOOST_CHECK_THROW(parse("@a[,@a]"), SyntaxError);
BOOST_CHECK_THROW(parse("@a["), SyntaxError);
BOOST_CHECK_THROW(parse("@a[@a"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{|"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{|x"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{||"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{||}"), SyntaxError);
BOOST_CHECK_THROW(parse("@a.f{|x|x"), SyntaxError);
//array
BOOST_CHECK_THROW(parse("["), SyntaxError);
BOOST_CHECK_THROW(parse("]"), SyntaxError);
BOOST_CHECK_THROW(parse("[,]"), SyntaxError);
BOOST_CHECK_THROW(parse("[5"), SyntaxError);
BOOST_CHECK_THROW(parse("[5,"), SyntaxError);
BOOST_CHECK_THROW(parse("[5,]"), SyntaxError);
//hash
BOOST_CHECK_THROW(parse("{"), SyntaxError);
BOOST_CHECK_THROW(parse("}"), SyntaxError);
BOOST_CHECK_THROW(parse("{,}"), SyntaxError);
BOOST_CHECK_THROW(parse("{5"), SyntaxError);
BOOST_CHECK_THROW(parse("{5 =>"), SyntaxError);
BOOST_CHECK_THROW(parse("{5 => 6"), SyntaxError);
BOOST_CHECK_THROW(parse("{5 => 6,"), SyntaxError);
BOOST_CHECK_THROW(parse("{5 => 6,}"), SyntaxError);
}
BOOST_AUTO_TEST_CASE(syntax_error_info)
{
//start
try
{
parse("");
BOOST_FAIL("Expected SyntaxError");
}
catch (const SyntaxError &e)
{
BOOST_CHECK_EQUAL(1, e.line());
BOOST_CHECK_EQUAL(1, e.offset());
}
//offset, syntax
try
{
auto str = parse("5 + 6 + #");
BOOST_FAIL("Expected SyntaxError");
}
catch (const SyntaxError &e)
{
BOOST_CHECK_EQUAL(1, e.line());
BOOST_CHECK_EQUAL(9, e.offset());
}
//lines, syntax
try
{
parse("5 + 6\n + #");
BOOST_FAIL("Expected SyntaxError");
}
catch (const SyntaxError &e)
{
BOOST_CHECK_EQUAL(2, e.line());
BOOST_CHECK_EQUAL(4, e.offset());
}
//parser
try
{
parse("5 + \n (1 + 2) (4)");
BOOST_FAIL("Expected SyntaxError");
}
catch (const SyntaxError &e)
{
BOOST_CHECK_EQUAL(2, e.line());
BOOST_CHECK_EQUAL(10, e.offset());
}
}
BOOST_AUTO_TEST_CASE(partial_expression)
{
BOOST_CHECK_EQUAL("(5 - 5)", parse_part("5 - 5"));
BOOST_CHECK_EQUAL("(5 - 5)", parse_part("5 - 5 unexpected"));
BOOST_CHECK_EQUAL("(5 - 5)", parse_part("5 - 5 =unexpected"));
BOOST_CHECK_EQUAL("\"str\"", parse_part("\"str\" unexpected"));
}
BOOST_AUTO_TEST_CASE(statement)
{
BOOST_CHECK_EQUAL("a = 5", parse_stmt("a = 5"));
BOOST_CHECK_EQUAL("a()", parse_stmt("a"));
BOOST_CHECK_EQUAL("a = @x.foo(6)", parse_stmt("a = @x.foo 6"));
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "9136ccb76ffaa591c9ab665f713dacbdb84af7b2", "size": 15236, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/expression/Parser.cpp", "max_stars_repo_name": "wnewbery/cpp-slim", "max_stars_repo_head_hexsha": "c7087294b55db5d7ca846438ebddfaec395d1a12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-12-24T23:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-16T09:35:46.000Z", "max_issues_repo_path": "tests/expression/Parser.cpp", "max_issues_repo_name": "wnewbery/cpp-slim", "max_issues_repo_head_hexsha": "c7087294b55db5d7ca846438ebddfaec395d1a12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 98.0, "max_issues_repo_issues_event_min_datetime": "2016-07-01T14:55:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-13T14:12:49.000Z", "max_forks_repo_path": "tests/expression/Parser.cpp", "max_forks_repo_name": "wnewbery/cpp-slim", "max_forks_repo_head_hexsha": "c7087294b55db5d7ca846438ebddfaec395d1a12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-04-03T12:16:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-13T14:13:25.000Z", "avg_line_length": 40.6293333333, "max_line_length": 107, "alphanum_fraction": 0.6016014702, "num_tokens": 4390}
|
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import six
import numpy
import pandas
import json
from datetime import date, datetime
from functools import partial
from ipywidgets import Widget
from traitlets import observe, Unicode
from ..core.data import deconstruct_pandas
from ..core.exception import PerspectiveError
from ..libpsp import is_libpsp
from ..viewer import PerspectiveViewer
from ..core._version import major_minor_version
def _type_to_string(t):
'''Convert a type object to a string representing a Perspective-supported
type. Redefine here as we can't have any dependencies on libbinding in
client mode.
'''
if t in six.integer_types:
return "integer"
elif t is float:
return "float"
elif t is bool:
return "boolean"
elif t is date:
return "date"
elif t is datetime:
return "datetime"
elif t is six.binary_type or t is six.text_type:
return "string"
else:
raise PerspectiveError(
"Unsupported type `{0}` in schema - Perspective supports `int`, `float`, `bool`, `date`, `datetime`, and `str` (or `unicode`).".format(str(t)))
def _serialize(data):
# Attempt to serialize data and pass it to the front-end as JSON
if isinstance(data, list):
return data
elif isinstance(data, dict):
for v in data.values():
# serialize schema values to string
if isinstance(v, type):
return {k: _type_to_string(data[k]) for k in data}
elif isinstance(v, numpy.ndarray):
return {k: data[k].tolist() for k in data}
else:
return data
elif isinstance(data, numpy.ndarray):
# structured or record array
if not isinstance(data.dtype.names, tuple):
raise NotImplementedError("Data should be dict of numpy.ndarray or a structured array.")
columns = [data[col].tolist() for col in data.dtype.names]
return dict(zip(data.dtype.names, columns))
elif isinstance(data, pandas.DataFrame) or isinstance(data, pandas.Series):
# Take flattened dataframe and make it serializable
d = {}
for name in data.columns:
column = data[name]
values = column.values
# Timezone-aware datetime64 dtypes throw an exception when using
# `numpy.issubdtype` - match strings here instead.
str_dtype = str(column.dtype)
if "datetime64" in str_dtype:
# Convert all datetimes to string for serializing
values = numpy.datetime_as_string(column.values, unit="ms")
d[name] = values.tolist()
return d
else:
raise NotImplementedError("Cannot serialize a dataset of `{0}`.".format(str(type(data))))
class _PerspectiveWidgetMessage(object):
'''A custom message that will be passed from the Python widget to the
front-end.
When creating new messages, use this class as it defines a concrete schema
for the message and prevents loosely creating `dict` objects everywhere.
Use `to_dict()` to obtain the message in a form that can be sent through
IPyWidgets.
'''
def __init__(self, msg_id, msg_type, msg_data):
'''Create a new PerspectiveWidgetMessage.'''
self.id = msg_id
self.type = msg_type
self.data = msg_data
def to_dict(self):
'''Returns a dictionary representation of the message.'''
return {
"id": self.id,
"type": self.type,
"data": self.data
}
class PerspectiveWidget(Widget, PerspectiveViewer):
''':class`~perspective.PerspectiveWidget` allows for Perspective to be used
in the form of a JupyterLab IPython widget.
Using `perspective.Table`, you can create a widget that extends the full
functionality of `perspective-viewer`. Changes on the viewer can be
programatically set on the :class`~perspective.PerspectiveWidget` instance,
and state is maintained across page refreshes.
Examples:
>>> from perspective import Table, PerspectiveWidget
>>> data = {
... "a": [1, 2, 3],
... "b": [
... "2019/07/11 7:30PM",
... "2019/07/11 8:30PM",
... "2019/07/11 9:30PM"
... ]
... }
>>> tbl = Table(data, index="a")
>>> widget = PerspectiveWidget(
... tbl,
... row_pivots=["a"],
... sort=[["b", "desc"]],
... filter=[["a", ">", 1]]
... )
>>> widget.sort
[["b", "desc"]]
>>> widget.sort.append(["a", "asc"])
>>> widget.sort
[["b", "desc"], ["a", "asc"]]
>>> widget.update({"a": [4, 5]}) # Browser UI updates
'''
# Required by ipywidgets for proper registration of the backend
_model_name = Unicode('PerspectiveModel').tag(sync=True)
_model_module = Unicode('@finos/perspective-jupyterlab').tag(sync=True)
_model_module_version = Unicode("~{}".format(major_minor_version)).tag(sync=True)
_view_name = Unicode('PerspectiveView').tag(sync=True)
_view_module = Unicode('@finos/perspective-jupyterlab').tag(sync=True)
_view_module_version = Unicode("~{}".format(major_minor_version)).tag(sync=True)
def __init__(self,
table_or_data,
index=None,
limit=None,
client=not is_libpsp(),
**kwargs):
'''Initialize an instance of :class`~perspective.PerspectiveWidget`
with the given table/data and viewer configuration.
If a pivoted DataFrame or MultiIndex table is passed in, the widget
preserves pivots and applies them. See `PerspectiveViewer.__init__` for
arguments that transform the view shown in the widget.
Args:
table_or_data (perspective.Table|dict|list|pandas.DataFrame): The
`Table` or data that will be viewed in the widget.
Keyword Arguments:
index (`str`): A column name to be used as the primary key.
Ignored if a `Table` is supplied.
limit (`int`): A upper limit on the number of rows in the Table.
Cannot be set at the same time as `index`, ignored if a `Table`
is passed in.
client (`bool`): If True, convert the dataset into an Apache Arrow
binary and create the Table in Javascript using a copy of the
data. Defaults to False.
kwargs (`dict`): configuration options for the `PerspectiveViewer`,
and `Table` constructor if `table_or_data` is a dataset.
Examples:
>>> widget = PerspectiveWidget(
... {"a": [1, 2, 3]},
... aggregates={"a": "avg"},
... row_pivots=["a"],
... sort=[["b", "desc"]],
... filter=[["a", ">", 1]],
... computed_columns=[{
... "column": "sqrt(a)",
... "computed_function_name": "sqrt",
... "inputs": ["a"]
... }])
'''
self._displayed = False
self.on_displayed(self._on_display)
# If `self.client` is True, the front-end `perspective-viewer` is given
# a copy of the data serialized to Arrow, and changes made in Python
# do not reflect to the front-end.
self.client = client
if self.client:
# Pass table load options to the front-end in client mode
self._client_options = {}
# Cache calls to `update()` before the widget has been displayed.
self._predisplay_update_cache = []
if index is not None and limit is not None:
raise PerspectiveError("Index and Limit cannot be set at the same time!")
# Parse the dataset we pass in - if it's Pandas, preserve pivots
if isinstance(table_or_data, pandas.DataFrame) or isinstance(table_or_data, pandas.Series):
data, config = deconstruct_pandas(table_or_data)
table_or_data = data
if config.get("row_pivots", None) and "row_pivots" not in kwargs:
kwargs.update({"row_pivots": config["row_pivots"]})
if config.get("column_pivots", None) and "column_pivots" not in kwargs:
kwargs.update({"column_pivots": config["column_pivots"]})
if config.get("columns", None) and "columns" not in kwargs:
kwargs.update({"columns": config["columns"]})
# Initialize the viewer
super(PerspectiveWidget, self).__init__(**kwargs)
# Handle messages from the the front end
# `PerspectiveJupyterClient.send()`:
# - The "data" value of the message should be a JSON-serialized string.
# - Both `on_msg` and `@observe("value")` must be specified on the
# handler for custom messages to be parsed by the Python widget.
self.on_msg(self.handle_message)
if self.client:
if is_libpsp():
from ..libpsp import Table
if isinstance(table_or_data, Table):
raise PerspectiveError("Client mode PerspectiveWidget expects data or schema, not a `perspective.Table`!")
if index is not None:
self._client_options["index"] = index
if limit is not None:
self._client_options["limit"] = limit
# cache self._data so creating multiple views don't reserialize the
# same data
if not hasattr(self, "_data") or self._data is None:
self._data = _serialize(table_or_data)
else:
# If an empty dataset is provided, don't call `load()`
load_kwargs = {}
if table_or_data is None:
if index is not None or limit is not None:
raise PerspectiveError("Cannot initialize PerspectiveWidget `index` or `limit` without a Table, data, or schema!")
else:
if index is not None:
load_kwargs.update({"index": index})
if limit is not None:
load_kwargs.update({"limit": limit})
self.load(table_or_data, **load_kwargs)
def load(self, data, **options):
'''Load the widget with data. If running in client mode, this method
serializes the data and calls the browser viewer's load method.
Otherwise, it calls `Viewer.load()` using `super()`.
'''
if self.client is True:
# serialize the data and send a custom message to the browser
if isinstance(data, pandas.DataFrame) or isinstance(data, pandas.Series):
data, _ = deconstruct_pandas(data)
d = _serialize(data)
self._data = d
else:
super(PerspectiveWidget, self).load(data, **options)
# proactively notify front-end of new data
message = self._make_load_message()
self.send(message.to_dict())
def update(self, data):
'''Update the widget with new data. If running in client mode, this
method serializes the data and calls the browser viewer's update
method. Otherwise, it calls `Viewer.update()` using `super()`.
'''
if self.client is True:
if self._displayed is False:
self._predisplay_update_cache.append(data)
return
# serialize the data and send a custom message to the browser
if isinstance(data, pandas.DataFrame) or isinstance(data, pandas.Series):
data, _ = deconstruct_pandas(data)
d = _serialize(data)
self.post({
"cmd": "update",
"data": d
})
else:
super(PerspectiveWidget, self).update(data)
def clear(self):
'''Clears the widget's underlying `Table`.
In client mode, clears the `_data` attribute of the widget.
'''
if self.client is True:
self.post({
"cmd": "clear"
})
self._data = None
else:
super(PerspectiveWidget, self).clear()
def replace(self, data):
'''Replaces the widget's `Table` with new data conforming to the same
schema. Does not clear user-set state. If in client mode, serializes
the data and sends it to the browser.
'''
if self.client is True:
if isinstance(data, pandas.DataFrame) or isinstance(data, pandas.Series):
data, _ = deconstruct_pandas(data)
d = _serialize(data)
self.post({
"cmd": "replace",
"data": d
})
self._data = d
else:
super(PerspectiveWidget, self).replace(data)
def delete(self, delete_table=True):
'''Delete the Widget's data and clears its internal state. If running in
client mode, sends the `delete()` command to the browser. Otherwise
calls `delete` on the underlying viewer.
Args:
delete_table (`bool`): whether the underlying `Table` will be
deleted. Defaults to True.
'''
if self.client is False:
super(PerspectiveWidget, self).delete(delete_table)
self.post({
"cmd": "delete"
})
# Close the underlying comm and remove widget from the front-end
self.close()
def post(self, msg, msg_id=None):
'''Post a serialized message to the `PerspectiveJupyterClient`
in the front end.
The posted message should conform to the `PerspectiveJupyterMessage`
interface as defined in `@finos/perspective-jupyterlab`.
Args:
msg (dict): a message from `PerspectiveManager` for the front-end
viewer to process.
msg_id (int): an integer id that allows the client to process
the message.
'''
message = _PerspectiveWidgetMessage(msg_id, "cmd", msg)
self.send(message.to_dict())
@observe("value")
def handle_message(self, widget, content, buffers):
'''Given a message from `PerspectiveJupyterClient.send()`, process the
message and return the result to `self.post`.
Args:
widget: a reference to the `Widget` instance that received the
message.
content (dict): the message from the front-end. Automatically
de-serialized by ipywidgets.
buffers : optional arraybuffers from the front-end, if any.
'''
if content["type"] == "cmd":
parsed = json.loads(content["data"])
if parsed["cmd"] == "init":
self.post({'id': -1, 'data': None})
elif parsed["cmd"] == "table":
# return the dataset or table name to the front-end
msg = self._make_load_message()
self.send(msg.to_dict())
# In client mode, users can call `update()` before the widget
# is visible. This applies the updates after the viewer has
# loaded the initial dataset.
if self.client is True and len(self._predisplay_update_cache) > 0:
for data in self._predisplay_update_cache:
self.update(data)
else:
# For all calls to Perspective, process it in the manager.
post_callback = partial(self.post, msg_id=parsed["id"])
self.manager._process(parsed, post_callback)
def _make_load_message(self):
'''Send a message to the front-end either containing the name of a
Table in python, or the serialized dataset with options while in client
mode.
'''
msg_data = None
if self.client and self._data is not None:
# Send data to the client, transferring ownership to the browser
msg_data = {
"data": self._data
}
if len(self._client_options.keys()) > 0:
msg_data["options"] = self._client_options
elif self.table_name is not None:
# Only pass back the table if it's been loaded. If the table isn't
# loaded, the `load()` method will handle synchronizing the
# front-end.
msg_data = {
"table_name": self.table_name
}
if msg_data is not None:
return _PerspectiveWidgetMessage(-2, "table", msg_data)
else:
raise PerspectiveError("Widget could not find a dataset or a `Table` to load.")
def _on_display(self, widget, **kwargs):
'''When the widget has been displayed, make sure `displayed` is set to
True so updates stop being cached.
'''
self._displayed = True
|
{"hexsha": "4a4b1ef1cea97f60e6ea8f5056495eb9684f98d3", "size": 17266, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/perspective/perspective/widget/widget.py", "max_stars_repo_name": "mehtabhavin10/perspective", "max_stars_repo_head_hexsha": "faa9f721d0518a620508a78298a96cb005c07275", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-12T10:41:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-12T10:41:12.000Z", "max_issues_repo_path": "python/perspective/perspective/widget/widget.py", "max_issues_repo_name": "mehtabhavin10/perspective", "max_issues_repo_head_hexsha": "faa9f721d0518a620508a78298a96cb005c07275", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/perspective/perspective/widget/widget.py", "max_forks_repo_name": "mehtabhavin10/perspective", "max_forks_repo_head_hexsha": "faa9f721d0518a620508a78298a96cb005c07275", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.691954023, "max_line_length": 155, "alphanum_fraction": 0.5835167381, "include": true, "reason": "import numpy", "num_tokens": 3703}
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.patches import FancyArrowPatch
from PIL import Image
import networkx as nx
#pos = {0:[10,10], 1:[300,300], 2:[800.800]}
#coor = {0:'(585,230)'}
#def pfinder(start,end):
'''start== start room eg. 100 or 'enter', end == end room eg. 110 or 'stairs' '''
''' Lists for rooms and location or coordinates'''
rooms = ['enter','p1',100,102,113,114,'restroom1',103,104,105,115,116,106,107,'stairs',
108,117,'restroom2','1132',109,110,111,112,'elevator', 100,'p2','enter']#'p3','exit 2']
location =[(100,590),(190,590),(190,470),(270,470),(270,470),(400,470),(400,470),(473,470),(585,470),
(585,470),(585,350),(585,300),(585,235),(585,180),(540,180),(498,180),(400,180),(400,180),
(340,180),(250,180),(190,180),(190,253),(190,310),(190,410),(190,470),(190,590),(100,590)]
#nodes = dict(zip(rooms,location))
''' Create Graph'''
G = nx.DiGraph()
''' Create Dictionary of rooms and coordinates'''
i = 0
coords = []
coords.append(i)
while i <= len(rooms):
i = i + 1
coords.append(i)
nodes = dict(zip(coords,location))
r_nodes = dict(zip(coords,rooms))
G.add_nodes_from(nodes.keys())
for n,p in nodes.items():
G.node[n]['pos']=p
'''Input start room and end room based(inuputs must be inside rooms List)'''
n = int(5)
start_room = input("Start room:")
if type(start_room) != type(n):
print("Are you inputing text? yes or no?")
Yes_no = str(input(" Yes or no?:"))
if Yes_no == 'yes':
start_room = str(start_room)
print("string")
elif Yes_no == 'no':
start_room =int(start_room)
print("integer")
end_room = input("End room:")
if type(end_room) != type(n):
print("Are you inputing text? yes or no?")
Yes_no = str(input(" Yes or no?:"))
if Yes_no == 'yes':
end_room = str(end_room)
print("string")
elif Yes_no == 'no':
end_room=int(end_room)
print("integer")
for key,val in r_nodes.items():
if val == start_room:
start = int(key)
print(start)
if val == end_room:
end = int(key)
print(end)
'''Create nodes and paths in graph'''
pos = nx.get_node_attributes(G,'pos')
G.add_path(nodes.keys())
''' Comput shortest path from start to end'''
path = nx.shortest_path(G,source = start, target = end)
#path = nx.bidirectional_dijkstra(G,start,end)
#paths = nx.shortest_simple_paths(G,start,end)
#end_edge= nx.shortest_path(G,source = int(start-1), target = int(end-1))
print(path)
el_one= list(path)
#print(el_one)
el = list(path)
del el[0]
el_two = el
#print(el)
#print(el_two)
path_edges = list(zip(el_one,el_two))
#print(end_edge[start:])
#path_edges=G.edges(end_edge[1:])
#G.add_edges_from(end_edge)
#path_edges=filter(node_edges,lambda x: x[0]< end)
#print(path_edges)
#G.remove_edge(end,int(end+1))
#path_edges = zip(path,path[0:])
#degree = nx.degree(G)
#print (degree)
#nx.draw_network_nodes(G,pos)
''' Plot figure and draw graph'''
plt.figure(dpi = 200)
img = mpimg.imread('WH-31-1.jpg')
#img = Image.open('C:/Users/mighe/Documents/Python_Scripts_37/office-floor-plans_1.jpg')
plt.imshow(img)
nx.draw(G,pos, node_color='w',edge_color='w',node_size = 0.01, width = 0.05)
nx.draw_networkx(G,pos,nodelist=path,node_color='b',node_size=10,edgelist=path_edges,width=2,
edge_color='r',arrows=True,arrowstyle='-|>',arrowsize=10,with_labels=False)
plt.axis('off')
plt.savefig('WH-31-1TEST.jpg', dpi= 1200)
#img.save('C:/Users/mighe/Documents/Python_Scripts_37/test3.jpg')
plt.show()
|
{"hexsha": "ae37250662f466fb15fac0fb858a5d125108ef19", "size": 3671, "ext": "py", "lang": "Python", "max_stars_repo_path": "PRESENTATIONDAY/path_finder_prototype.py", "max_stars_repo_name": "rlin2k1/ANGBot", "max_stars_repo_head_hexsha": "f78216be8d918b1e5596f6d36faf57ad8c5c9a17", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PRESENTATIONDAY/path_finder_prototype.py", "max_issues_repo_name": "rlin2k1/ANGBot", "max_issues_repo_head_hexsha": "f78216be8d918b1e5596f6d36faf57ad8c5c9a17", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PRESENTATIONDAY/path_finder_prototype.py", "max_forks_repo_name": "rlin2k1/ANGBot", "max_forks_repo_head_hexsha": "f78216be8d918b1e5596f6d36faf57ad8c5c9a17", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3760683761, "max_line_length": 102, "alphanum_fraction": 0.6317079815, "include": true, "reason": "import networkx", "num_tokens": 1067}
|
myTestRule {
# Input parameters are:
# File descriptor
# Optional length to read
# Output Parameter is:
# Buffer holding the data read
# Output from running the example is:
for (*I = 0 ; *I < 4 ; *I = *I + 1) {
acRunWorkFlow("/raja8/home/rods/msso/mssop1/mssop1.run",*R_BUF);
msiBytesBufToStr(*R_BUF, *Str);
writeLine("stdout", *Str);
}
}
INPUT *Obj="/raja8/home/rods/msso/mssop1/mssop1.run", *Flag="O_RDONLY", *OFlags="objPath=*Obj++++openFlags=*Flag", *Len=100
OUTPUT ruleExecOut, *Str
|
{"hexsha": "1f70f3a3eb295abea9803741390dd0ff4caeb000", "size": 520, "ext": "r", "lang": "R", "max_stars_repo_path": "clients/icommands/test/rules3.0/ruleTestWSO3.r", "max_stars_repo_name": "DICE-UNC/iRODS-FUSE-Mod", "max_stars_repo_head_hexsha": "8f8e965493a03bcb085df0b6467e7dfcce308d0f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clients/icommands/test/rules3.0/ruleTestWSO3.r", "max_issues_repo_name": "DICE-UNC/iRODS-FUSE-Mod", "max_issues_repo_head_hexsha": "8f8e965493a03bcb085df0b6467e7dfcce308d0f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-09-24T04:20:30.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-24T04:20:30.000Z", "max_forks_repo_path": "clients/icommands/test/rules3.0/ruleTestWSO3.r", "max_forks_repo_name": "DICE-UNC/iRODS-FUSE-Mod", "max_forks_repo_head_hexsha": "8f8e965493a03bcb085df0b6467e7dfcce308d0f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5, "max_line_length": 123, "alphanum_fraction": 0.6557692308, "num_tokens": 181}
|
'''
This includes my test runs to understand the data structure when attempting the problem.
Feel free to go through and try it yourself.
'''
import gym
import numpy as np
from lake_envs import *
# uncomment to check out stochastic/deterministic environments
#env = gym.make("Deterministic-4x4-FrozenLake-v0")
env = gym.make("Stochastic-4x4-FrozenLake-v0")
'''
P: nested dictionary
From gym.core.Environment
For each pair of states in [1, nS] and actions in [1, nA], P[state][action] is a
tuple of the form (probability, nextstate, reward, terminal) where
- probability: float
the probability of transitioning from "state" to "nextstate" with "action"
- nextstate: int
denotes the state we transition to (in range [0, nS - 1])
- reward: int
either 0 or 1, the reward for transitioning from "state" to
"nextstate" with "action"
- terminal: bool
True when "nextstate" is a terminal state (hole or goal), False otherwise
'''
P = env.P
nA = 4
nS = 16
gamma = 0.9
policy = 2 * np.ones(nS, dtype='int')
for state in P:
A = P[state]
for action in A:
for prob, next_state, reward, terminal in A[action]:
print('p(s_{}|s_{},a_{})={}, with reward {}'.format(next_state,state,action,prob,reward))
env.close()
|
{"hexsha": "4a1624a5b9d2bd1c474a184efa97d17f9ac50df8", "size": 1284, "ext": "py", "lang": "Python", "max_stars_repo_path": "Reinforcement-Learning/assignment1/draft.py", "max_stars_repo_name": "gajeraj/MLSA-workshops-2020-student", "max_stars_repo_head_hexsha": "cafbf5ac8750dd2b962174ad71dabf35ac90e2f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-02-27T07:04:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T17:20:55.000Z", "max_issues_repo_path": "Reinforcement-Learning/assignment1/draft.py", "max_issues_repo_name": "Phoebe0222/MLSA-workshops-2019-student", "max_issues_repo_head_hexsha": "cafbf5ac8750dd2b962174ad71dabf35ac90e2f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Reinforcement-Learning/assignment1/draft.py", "max_forks_repo_name": "Phoebe0222/MLSA-workshops-2019-student", "max_forks_repo_head_hexsha": "cafbf5ac8750dd2b962174ad71dabf35ac90e2f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-08-09T12:08:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-16T06:35:22.000Z", "avg_line_length": 27.3191489362, "max_line_length": 101, "alphanum_fraction": 0.6806853583, "include": true, "reason": "import numpy", "num_tokens": 369}
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _pickle as cPickle
import argparse
import datetime
import hashlib
import math
import numpy as np
import pandas as pd
import pyspark.sql.functions as F
import time
from pyspark.context import SparkContext, SparkConf
from pyspark.ml.linalg import SparseVector, VectorUDT
from pyspark.sql.session import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructType, StructField, TimestampType, FloatType, ArrayType
evaluation_verbose = False
OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/"
DATA_BUCKET_FOLDER = "/outbrain/orig/"
SPARK_TEMP_FOLDER = "/outbrain/spark-temp/"
conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set(
"spark.local.dir", SPARK_TEMP_FOLDER)
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
start_time = time.time()
def hashstr(s, nr_bins):
return int(hashlib.md5(s.encode('utf8')).hexdigest(), 16) % (nr_bins - 1) + 1
parser = argparse.ArgumentParser()
parser.add_argument(
'--submission',
action='store_true',
default=False
)
args = parser.parse_args()
evaluation = not args.submission
# ## UDFs
def date_time_to_unix_epoch(date_time):
return int(time.mktime(date_time.timetuple()))
def date_time_to_unix_epoch_treated(dt):
if dt is not None:
try:
epoch = date_time_to_unix_epoch(dt)
return epoch
except Exception as e:
print("Error processing dt={}".format(dt), e)
return 0
else:
return 0
timestamp_null_to_zero_int_udf = F.udf(lambda x: date_time_to_unix_epoch_treated(x), IntegerType())
INT_DEFAULT_NULL_VALUE = -1
int_null_to_minus_one_udf = F.udf(lambda x: x if x is not None else INT_DEFAULT_NULL_VALUE, IntegerType())
int_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(IntegerType()))
float_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(FloatType()))
str_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(StringType()))
def truncate_day_from_timestamp(ts):
return int(ts / 1000 / 60 / 60 / 24)
truncate_day_from_timestamp_udf = F.udf(lambda ts: truncate_day_from_timestamp(ts), IntegerType())
extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo is not None else '', StringType())
extract_country_state_udf = F.udf(lambda geo: geo.strip()[:5] if geo is not None else '', StringType())
list_len_udf = F.udf(lambda x: len(x) if x is not None else 0, IntegerType())
def convert_odd_timestamp(timestamp_ms_relative):
TIMESTAMP_DELTA = 1465876799998
return datetime.datetime.fromtimestamp((int(timestamp_ms_relative) + TIMESTAMP_DELTA) // 1000)
# # Loading Files
# ## Loading UTC/BST for each country and US / CA states (local time)
country_utc_dst_df = pd.read_csv('preproc/data/country_codes_utc_dst_tz_delta.csv', keep_default_na=False)
countries_utc_dst_dict = dict(
zip(country_utc_dst_df['country_code'].tolist(), country_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
countries_utc_dst_broad = sc.broadcast(countries_utc_dst_dict)
us_states_utc_dst_df = pd.read_csv('preproc/data/us_states_abbrev_bst.csv', keep_default_na=False)
us_states_utc_dst_dict = dict(
zip(us_states_utc_dst_df['state_abb'].tolist(), us_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
us_states_utc_dst_broad = sc.broadcast(us_states_utc_dst_dict)
ca_states_utc_dst_df = pd.read_csv('preproc/data/ca_states_abbrev_bst.csv', keep_default_na=False)
ca_countries_utc_dst_dict = dict(
zip(ca_states_utc_dst_df['state_abb'].tolist(), ca_states_utc_dst_df['utc_dst_time_offset_cleaned'].tolist()))
ca_countries_utc_dst_broad = sc.broadcast(ca_countries_utc_dst_dict)
# ## Loading competition csvs
events_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("uuid_event", StringType(), True),
StructField("document_id_event", IntegerType(), True),
StructField("timestamp_event", IntegerType(), True),
StructField("platform_event", IntegerType(), True),
StructField("geo_location_event", StringType(), True)]
)
events_df = spark.read.schema(events_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "events.csv") \
.withColumn('dummyEvents', F.lit(1)) \
.withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \
.withColumn('event_country', extract_country_udf('geo_location_event')) \
.withColumn('event_country_state', extract_country_state_udf('geo_location_event')) \
.alias('events')
events_df.count()
# Drop rows with empty "geo_location"
events_df = events_df.dropna(subset="geo_location_event")
events_df.count()
# Drop rows with empty "platform"
events_df = events_df.dropna(subset="platform_event")
events_df.count()
page_views_schema = StructType(
[StructField("uuid_pv", StringType(), True),
StructField("document_id_pv", IntegerType(), True),
StructField("timestamp_pv", IntegerType(), True),
StructField("platform_pv", IntegerType(), True),
StructField("geo_location_pv", StringType(), True),
StructField("traffic_source_pv", IntegerType(), True)]
)
page_views_df = spark.read.schema(page_views_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "page_views.csv") \
.withColumn('day_pv', truncate_day_from_timestamp_udf('timestamp_pv')) \
.alias('page_views')
page_views_df.createOrReplaceTempView('page_views')
page_views_users_df = spark.sql('''
SELECT uuid_pv, document_id_pv, max(timestamp_pv) as max_timestamp_pv, 1 as dummyPageView
FROM page_views p
GROUP BY uuid_pv, document_id_pv
''').alias('page_views_users')
promoted_content_schema = StructType(
[StructField("ad_id", IntegerType(), True),
StructField("document_id_promo", IntegerType(), True),
StructField("campaign_id", IntegerType(), True),
StructField("advertiser_id", IntegerType(), True)]
)
promoted_content_df = spark.read.schema(promoted_content_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \
.withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content').cache()
documents_meta_schema = StructType(
[StructField("document_id_doc", IntegerType(), True),
StructField("source_id", IntegerType(), True),
StructField("publisher_id", IntegerType(), True),
StructField("publish_time", TimestampType(), True)]
)
documents_meta_df = spark.read.schema(documents_meta_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_meta.csv") \
.withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta').cache()
documents_meta_df.count()
# Drop rows with empty "source_id"
documents_meta_df = documents_meta_df.dropna(subset="source_id")
documents_meta_df.count()
source_publishers_df = documents_meta_df.select(["source_id", "publisher_id"]).dropDuplicates()
source_publishers_df.count()
# get list of source_ids without publisher_id
rows_no_pub = source_publishers_df.filter("publisher_id is NULL")
source_ids_without_publisher = [row['source_id'] for row in rows_no_pub.collect()]
len(source_ids_without_publisher)
# maximum value of publisher_id used so far
max_pub = max(source_publishers_df.select(["publisher_id"]).dropna().collect())['publisher_id']
max_pub
# rows filled with new publisher_ids
new_publishers = [(source, max_pub + 1 + nr) for nr, source in enumerate(source_ids_without_publisher)]
new_publishers_df = spark.createDataFrame(new_publishers, ("source_id", "publisher_id"))
new_publishers_df.take(10)
# old and new publishers merged
fixed_source_publishers_df = source_publishers_df.dropna().union(new_publishers_df)
fixed_source_publishers_df.collect()[-30:]
# update documents_meta with bew publishers
documents_meta_df = documents_meta_df.drop('publisher_id').join(fixed_source_publishers_df, on='source_id')
documents_meta_df.count()
# Joining with Page Views to get traffic_source_pv
events_joined_df = events_df.join(documents_meta_df
.withColumnRenamed('source_id', 'source_id_doc_event')
.withColumnRenamed('publisher_id', 'publisher_doc_event')
.withColumnRenamed('publish_time', 'publish_time_doc_event'),
on=F.col("document_id_event") == F.col("document_id_doc"), how='left') \
.join(page_views_df,
on=[F.col('uuid_event') == F.col('uuid_pv'),
F.col('document_id_event') == F.col('document_id_pv'),
F.col('platform_event') == F.col('platform_pv'),
F.col('geo_location_event') == F.col('geo_location_pv'),
F.col('day_event') == F.col('day_pv')],
how='left') \
.alias('events').cache()
documents_categories_schema = StructType(
[StructField("document_id_cat", IntegerType(), True),
StructField("category_id", IntegerType(), True),
StructField("confidence_level_cat", FloatType(), True)]
)
documents_categories_df = spark.read.schema(documents_categories_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_categories.csv") \
.alias('documents_categories').cache()
documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \
.agg(F.collect_list('category_id').alias('category_id_list'),
F.collect_list('confidence_level_cat').alias('confidence_level_cat_list')) \
.withColumn('dummyDocumentsCategory', F.lit(1)) \
.alias('documents_categories_grouped')
documents_topics_schema = StructType(
[StructField("document_id_top", IntegerType(), True),
StructField("topic_id", IntegerType(), True),
StructField("confidence_level_top", FloatType(), True)]
)
documents_topics_df = spark.read.schema(documents_topics_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_topics.csv") \
.alias('documents_topics').cache()
documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \
.agg(F.collect_list('topic_id').alias('topic_id_list'),
F.collect_list('confidence_level_top').alias('confidence_level_top_list')) \
.withColumn('dummyDocumentsTopics', F.lit(1)) \
.alias('documents_topics_grouped')
documents_entities_schema = StructType(
[StructField("document_id_ent", IntegerType(), True),
StructField("entity_id", StringType(), True),
StructField("confidence_level_ent", FloatType(), True)]
)
documents_entities_df = spark.read.schema(documents_entities_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "documents_entities.csv") \
.alias('documents_entities').cache()
documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \
.agg(F.collect_list('entity_id').alias('entity_id_list'),
F.collect_list('confidence_level_ent').alias('confidence_level_ent_list')) \
.withColumn('dummyDocumentsEntities', F.lit(1)) \
.alias('documents_entities_grouped')
clicks_train_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True),
StructField("clicked", IntegerType(), True)]
)
clicks_train_df = spark.read.schema(clicks_train_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_train.csv") \
.withColumn('dummyClicksTrain', F.lit(1)).alias('clicks_train')
clicks_train_joined_df = clicks_train_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(events_joined_df, on='display_id', how='left')
clicks_train_joined_df.createOrReplaceTempView('clicks_train_joined')
if evaluation:
table_name = 'user_profiles_eval'
else:
table_name = 'user_profiles'
user_profiles_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + table_name) \
.withColumn('dummyUserProfiles', F.lit(1)).alias('user_profiles')
# # Spliting Train/validation set | Test set
if evaluation:
validation_set_exported_df = spark.read.parquet(
OUTPUT_BUCKET_FOLDER + "validation_set.parquet") \
.alias('validation_set')
validation_set_exported_df.select('display_id').distinct() \
.createOrReplaceTempView("validation_display_ids")
validation_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').alias('clicks') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("clicks.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("clicks.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
# print("validation_set_df.count() =", validation_set_df.count())
# Added to validation set information about the event and the user for statistics of the error (avg ctr)
validation_set_ground_truth_df = validation_set_df.filter('clicked = 1') \
.join(user_profiles_df,
on=[F.col("user_profiles.uuid") == F.col("uuid_event")],
how='left') \
.withColumn('user_categories_count', list_len_udf('category_id_list')) \
.withColumn('user_topics_count', list_len_udf('topic_id_list')) \
.withColumn('user_entities_count', list_len_udf('entity_id_list')) \
.select('display_id', 'ad_id', 'platform_event', 'day_event', 'timestamp_event',
'geo_location_event', 'event_country', 'event_country_state', 'views',
'user_categories_count', 'user_topics_count', 'user_entities_count') \
.withColumnRenamed('ad_id', 'ad_id_gt') \
.withColumnRenamed('views', 'user_views_count') \
.cache()
# print("validation_set_ground_truth_df.count() =", validation_set_ground_truth_df.count())
train_set_df = spark.sql('''
SELECT * FROM clicks_train_joined t
WHERE NOT EXISTS (SELECT display_id FROM validation_display_ids
WHERE display_id = t.display_id)''').cache()
print("train_set_df.count() =", train_set_df.count())
# validation_display_ids_df.groupBy("day_event").count().show()
else:
clicks_test_schema = StructType(
[StructField("display_id", IntegerType(), True),
StructField("ad_id", IntegerType(), True)]
)
clicks_test_df = spark.read.schema(clicks_test_schema) \
.options(header='true', inferschema='false', nullValue='\\N') \
.csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \
.withColumn('dummyClicksTest', F.lit(1)) \
.withColumn('clicked', F.lit(-999)) \
.alias('clicks_test')
test_set_df = clicks_test_df \
.join(promoted_content_df, on='ad_id', how='left') \
.join(documents_meta_df,
on=F.col("promoted_content.document_id_promo") == F.col("documents_meta.document_id_doc"),
how='left') \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(events_joined_df, on='display_id', how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.join(page_views_users_df,
on=[F.col("events.uuid_event") == F.col("page_views_users.uuid_pv"),
F.col("promoted_content.document_id_promo") == F.col("page_views_users.document_id_pv")],
how='left')
train_set_df = clicks_train_joined_df.cache()
print("train_set_df.count() =", train_set_df.count())
# # Training models
def is_null(value):
return value is None or len(str(value).strip()) == 0
LESS_SPECIAL_CAT_VALUE = 'less'
def get_category_field_values_counts(field, df, min_threshold=10):
category_counts = dict(list(filter(lambda x: not is_null(x[0]) and x[1] >= min_threshold,
df.select(field).groupBy(field).count().rdd.map(
lambda x: (x[0], x[1])).collect())))
# Adding a special value to create a feature for values in this category that are less than min_threshold
category_counts[LESS_SPECIAL_CAT_VALUE] = -1
return category_counts
# ## Building category values counters and indexers
event_country_values_counts = get_category_field_values_counts('event_country', events_df, min_threshold=10)
len(event_country_values_counts)
# All non-null categories: 230
event_country_state_values_counts = get_category_field_values_counts('event_country_state', events_df, min_threshold=10)
len(event_country_state_values_counts)
event_geo_location_values_counts = get_category_field_values_counts('geo_location_event', events_df, min_threshold=10)
len(event_geo_location_values_counts)
# All non-null categories: 2988
doc_entity_id_values_counts = get_category_field_values_counts('entity_id', documents_entities_df, min_threshold=10)
len(doc_entity_id_values_counts)
# All non-null categories: 1326009
# ## Processing average CTR by categories
def get_percentiles(df, field, quantiles_levels=None, max_error_rate=0.0):
if quantiles_levels is None:
quantiles_levels = np.arange(0.0, 1.1, 0.1).tolist()
quantiles = df.approxQuantile(field, quantiles_levels, max_error_rate)
return dict(zip(quantiles_levels, quantiles))
# REG = 10
REG = 0
ctr_udf = F.udf(lambda clicks, views: clicks / float(views + REG), FloatType())
# ### Average CTR by ad_id
ad_id_popularity_df = train_set_df.groupby('ad_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# ad_id_popularity_df.count()
# get_percentiles(ad_id_popularity_df, 'clicks')
# get_percentiles(ad_id_popularity_df, 'views')
ad_id_popularity = ad_id_popularity_df.filter('views > 5').select('ad_id', 'ctr', 'views') \
.rdd.map(lambda x: (x['ad_id'], (x['ctr'], x['views'], 1, 1))).collectAsMap()
ad_id_popularity_broad = sc.broadcast(ad_id_popularity)
list(ad_id_popularity.values())[:3]
len(ad_id_popularity)
# get_ad_id_ctr_udf = F.udf(lambda ad_id: ad_id_popularity[ad_id] if ad_id in ad_id_popularity else -1, FloatType())
ad_id_avg_ctr = sum(map(lambda x: x[0], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_avg_ctr
ad_id_weighted_avg_ctr = sum(map(lambda x: x[0] * x[1], ad_id_popularity.values())) / float(
sum(map(lambda x: x[1], ad_id_popularity.values())))
ad_id_weighted_avg_ctr
ad_id_views_median = np.median(np.array(list(map(lambda x: x[1], ad_id_popularity.values()))))
ad_id_views_median
ad_id_views_mean = sum(map(lambda x: x[1], ad_id_popularity.values())) / float(len(ad_id_popularity))
ad_id_views_mean
# ### Average CTR by document_id (promoted_content)
document_id_popularity_df = train_set_df \
.groupby('document_id_promo') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
document_id_popularity = document_id_popularity_df.filter('views > 5') \
.select('document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['document_id_promo'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(document_id_popularity)
document_id_popularity_broad = sc.broadcast(document_id_popularity)
# document_id_popularity_df.count()
# get_percentiles(document_id_popularity_df, 'clicks')
# get_percentiles(document_id_popularity_df, 'views')
document_id_avg_ctr = sum(map(lambda x: x[0], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_avg_ctr
document_id_weighted_avg_ctr = sum(list(map(lambda x: x[0] * x[1], document_id_popularity.values()))) / float(
sum(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_weighted_avg_ctr
document_id_views_median = np.median(np.array(list(map(lambda x: x[1], document_id_popularity.values()))))
document_id_views_median
document_id_views_mean = sum(map(lambda x: x[1], document_id_popularity.values())) / float(len(document_id_popularity))
document_id_views_mean
# ### Average CTR by (doc_event, doc_ad)
doc_event_doc_ad_avg_ctr_df = train_set_df.groupBy('document_id_event', 'document_id_promo') \
.agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'), F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
doc_event_doc_ad_avg_ctr = doc_event_doc_ad_avg_ctr_df.filter('views > 5') \
.select('document_id_event', 'document_id_promo', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: ((x['document_id_event'], x['document_id_promo']),
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(doc_event_doc_ad_avg_ctr)
doc_event_doc_ad_avg_ctr_broad = sc.broadcast(doc_event_doc_ad_avg_ctr)
# ### Average CTR by country, source_id
source_id_by_country_popularity_df = train_set_df \
.select('clicked', 'source_id', 'event_country', 'ad_id') \
.groupby('event_country', 'source_id') \
.agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
source_id_by_country_popularity = source_id_by_country_popularity_df.filter(
'views > 5 and source_id is not null and event_country <> ""').select('event_country', 'source_id', 'ctr', 'views',
'distinct_ad_ids').rdd.map(
lambda x: ((x['event_country'], x['source_id']), (x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(source_id_by_country_popularity)
source_id_by_country_popularity_broad = sc.broadcast(source_id_by_country_popularity)
source_id_by_country_avg_ctr = sum(map(lambda x: x[0], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_avg_ctr
source_id_by_country_weighted_avg_ctr = sum(
map(lambda x: x[0] * x[1], source_id_by_country_popularity.values())) / float(
sum(map(lambda x: x[1], source_id_by_country_popularity.values())))
source_id_by_country_weighted_avg_ctr
source_id_by_country_views_median = np.median(
np.array(list(map(lambda x: x[1], source_id_by_country_popularity.values()))))
source_id_by_country_views_median
source_id_by_country_views_mean = sum(map(lambda x: x[1], source_id_by_country_popularity.values())) / float(
len(source_id_by_country_popularity))
source_id_by_country_views_mean
# ### Average CTR by source_id
source_id_popularity_df = train_set_df.select('clicked', 'source_id', 'ad_id') \
.groupby('source_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
source_id_popularity = source_id_popularity_df \
.filter('views > 10 and source_id is not null') \
.select('source_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['source_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(source_id_popularity)
source_id_popularity_broad = sc.broadcast(source_id_popularity)
# source_id_popularity_df.count()
# get_percentiles(source_id_popularity_df, 'clicks')
# get_percentiles(source_id_popularity_df, 'views')
# source_id_popularity = source_id_popularity_df
# .filter('views > 100 and source_id is not null')
# .select('source_id', 'ctr')
# .rdd.collectAsMap()
# ### Average CTR by publisher_id
publisher_popularity_df = train_set_df.select('clicked', 'publisher_id', 'ad_id') \
.groupby('publisher_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
publisher_popularity = publisher_popularity_df \
.filter('views > 10 and publisher_id is not null') \
.select('publisher_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['publisher_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(publisher_popularity)
publisher_popularity_broad = sc.broadcast(publisher_popularity)
# publisher_popularity_df.count()
# ##863
# get_percentiles(publisher_popularity_df, 'clicks')
# get_percentiles(publisher_popularity_df, 'views')
# publisher_id_popularity = publisher_popularity_df
# .filter('views > 100 and publisher_id is not null')
# .select('publisher_id', 'ctr')
# .rdd.collectAsMap()
# len(publisher_id_popularity)
# ##639
# ### Average CTR by advertiser_id
advertiser_id_popularity_df = train_set_df.select('clicked', 'advertiser_id', 'ad_id') \
.groupby('advertiser_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
advertiser_id_popularity = advertiser_id_popularity_df \
.filter('views > 10 and advertiser_id is not null') \
.select('advertiser_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['advertiser_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], 1))).collectAsMap()
len(advertiser_id_popularity)
advertiser_id_popularity_broad = sc.broadcast(advertiser_id_popularity)
# advertiser_id_popularity_df.count()
# ##4063
# get_percentiles(advertiser_id_popularity_df, 'clicks')
# get_percentiles(advertiser_id_popularity_df, 'views')
# advertiser_id_popularity = advertiser_id_popularity_df
# .filter('views > 100 and advertiser_id is not null')
# .select('advertiser_id', 'ctr')
# .rdd.collectAsMap()
# len(advertiser_id_popularity)
# ##3129
# ### Average CTR by campaign_id
campaign_id_popularity_df = train_set_df.select('clicked', 'campaign_id', 'ad_id') \
.groupby('campaign_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
campaign_id_popularity = campaign_id_popularity_df \
.filter('views > 10 and campaign_id is not null') \
.select('campaign_id', 'ctr', 'views', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['campaign_id'], (x['ctr'], x['views'], x['distinct_ad_ids'], 1))) \
.collectAsMap()
len(campaign_id_popularity)
campaign_id_popularity_broad = sc.broadcast(campaign_id_popularity)
# campaign_id_popularity_df.count()
# ##31390
# get_percentiles(campaign_id_popularity_df, 'clicks')
# get_percentiles(campaign_id_popularity_df, 'views')
# campaign_id_popularity = campaign_id_popularity_df
# .filter('views > 100 and campaign_id is not null')
# .select('campaign_id', 'ctr')
# .rdd.collectAsMap()
# len(campaign_id_popularity)
# ##16097
# ### Average CTR by category
category_id_popularity_df = train_set_df.join(
documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'ad_id') \
.groupby('category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_popularity = category_id_popularity_df.filter('views > 10') \
.select('category_id', 'ctr', 'views', 'avg_confidence_level_cat', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['category_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_popularity)
category_id_popularity_broad = sc.broadcast(category_id_popularity)
list(category_id_popularity.values())[:10]
np.median(np.array(list(map(lambda x: x[1], category_id_popularity.values()))))
sum(map(lambda x: x[1], category_id_popularity.values())) / float(len(category_id_popularity))
# Parece haver uma hierarquia nas categorias pelo padrão dos códigos...
# category_id_popularity
# ### Average CTR by (country, category)
category_id_by_country_popularity_df = train_set_df \
.join(documents_categories_df.alias('cat_local'),
on=F.col("document_id_promo") == F.col("cat_local.document_id_cat"), how='inner') \
.select('clicked', 'category_id', 'confidence_level_cat', 'event_country', 'ad_id') \
.groupby('event_country', 'category_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_cat').alias('avg_confidence_level_cat'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
category_id_by_country_popularity = category_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'category_id', 'ctr', 'views', 'avg_confidence_level_cat',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['category_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_cat']))).collectAsMap()
len(category_id_by_country_popularity)
category_id_by_country_popularity_broad = sc.broadcast(category_id_by_country_popularity)
# ### Average CTR by Topic
topic_id_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'ad_id') \
.groupby('topic_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_popularity = topic_id_popularity_df.filter('views > 10') \
.select('topic_id', 'ctr', 'views', 'avg_confidence_level_top', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['topic_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_popularity)
topic_id_popularity_broad = sc.broadcast(topic_id_popularity)
sum(map(lambda x: x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
sum(map(lambda x: x[2] * x[1], topic_id_popularity.values())) / float(len(topic_id_popularity))
# ### Average CTR by (country, topic)
topic_id_by_country_popularity_df = train_set_df.join(
documents_topics_df.alias('top_local'),
on=F.col("document_id_promo") == F.col("top_local.document_id_top"), how='inner') \
.select('clicked', 'topic_id', 'confidence_level_top', 'event_country', 'ad_id') \
.groupby('event_country', 'topic_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_top').alias('avg_confidence_level_top'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
topic_id_id_by_country_popularity = topic_id_by_country_popularity_df \
.filter('views > 10 and event_country <> ""') \
.select('event_country', 'topic_id', 'ctr', 'views', 'avg_confidence_level_top',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['topic_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_top']))).collectAsMap()
len(topic_id_id_by_country_popularity)
topic_id_id_by_country_popularity_broad = sc.broadcast(topic_id_id_by_country_popularity)
# ### Average CTR by Entity
entity_id_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'confidence_level_ent', 'ad_id') \
.groupby('entity_id').agg(F.sum('clicked').alias('clicks'), F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_popularity = entity_id_popularity_df.filter('views > 5') \
.select('entity_id', 'ctr', 'views', 'avg_confidence_level_ent', 'distinct_ad_ids') \
.rdd.map(lambda x: (x['entity_id'],
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_popularity)
entity_id_popularity_broad = sc.broadcast(entity_id_popularity)
np.median(np.array(list(map(lambda x: x[1], entity_id_popularity.values()))))
sum(map(lambda x: x[1], entity_id_popularity.values())) / float(len(entity_id_popularity))
# ### Average CTR by (country, entity)
entity_id_by_country_popularity_df = train_set_df.join(
documents_entities_df.alias('ent_local'),
on=F.col("document_id_promo") == F.col("ent_local.document_id_ent"), how='inner') \
.select('clicked', 'entity_id', 'event_country', 'confidence_level_ent', 'ad_id') \
.groupby('event_country', 'entity_id').agg(F.sum('clicked').alias('clicks'),
F.count('*').alias('views'),
F.mean('confidence_level_ent').alias('avg_confidence_level_ent'),
F.countDistinct('ad_id').alias('distinct_ad_ids')) \
.withColumn('ctr', ctr_udf('clicks', 'views'))
entity_id_by_country_popularity = entity_id_by_country_popularity_df \
.filter('views > 5 and event_country <> ""') \
.select('event_country', 'entity_id', 'ctr', 'views', 'avg_confidence_level_ent',
'distinct_ad_ids') \
.rdd.map(lambda x: ((x['event_country'], x['entity_id']),
(x['ctr'], x['views'], x['distinct_ad_ids'], x['avg_confidence_level_ent']))).collectAsMap()
len(entity_id_by_country_popularity)
entity_id_by_country_popularity_broad = sc.broadcast(entity_id_by_country_popularity)
# ### Loading # docs by categories, topics, entities
df_filenames_suffix = ''
if evaluation:
df_filenames_suffix = '_eval'
with open(OUTPUT_BUCKET_FOLDER + 'categories_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
categories_docs_counts = cPickle.load(input_file)
len(categories_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'topics_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
topics_docs_counts = cPickle.load(input_file)
len(topics_docs_counts)
with open(OUTPUT_BUCKET_FOLDER + 'entities_docs_counts' + df_filenames_suffix + '.pickle', 'rb') as input_file:
entities_docs_counts = cPickle.load(input_file)
len(entities_docs_counts)
documents_total = documents_meta_df.count()
documents_total
# ## Exploring Publish Time
publish_times_df = train_set_df.filter('publish_time is not null').select('document_id_promo',
'publish_time').distinct().select(
F.col('publish_time').cast(IntegerType()))
publish_time_percentiles = get_percentiles(publish_times_df, 'publish_time', quantiles_levels=[0.5],
max_error_rate=0.001)
publish_time_percentiles
publish_time_median = int(publish_time_percentiles[0.5])
datetime.datetime.utcfromtimestamp(publish_time_median)
def get_days_diff(newer_timestamp, older_timestamp):
sec_diff = newer_timestamp - older_timestamp
days_diff = sec_diff / 60 / 60 / 24
return days_diff
def get_time_decay_factor(timestamp, timestamp_ref=None, alpha=0.001):
if timestamp_ref is None:
timestamp_ref = time.time()
days_diff = get_days_diff(timestamp_ref, timestamp)
denominator = math.pow(1 + alpha, days_diff)
if denominator != 0:
return 1.0 / denominator
else:
return 0.0
TIME_DECAY_ALPHA = 0.0005
ref_dates = [
1476714880, # 7 days
1474727680, # 30 days
1469370880, # 90 days
1461508480, # 180 days
1445697280, # 1 year
1414161280 # 2 years
]
for d in ref_dates:
print(datetime.datetime.utcfromtimestamp(d), get_time_decay_factor(d, alpha=TIME_DECAY_ALPHA))
# ### Get local time
DEFAULT_TZ_EST = -4.0
def get_local_utc_bst_tz(event_country, event_country_state):
local_tz = DEFAULT_TZ_EST
if len(event_country) > 0:
if event_country in countries_utc_dst_broad.value:
local_tz = countries_utc_dst_broad.value[event_country]
if len(event_country_state) > 2:
state = event_country_state[3:5]
if event_country == 'US':
if state in us_states_utc_dst_broad.value:
local_tz = us_states_utc_dst_broad.value[state]
elif event_country == 'CA':
if state in ca_countries_utc_dst_broad.value:
local_tz = ca_countries_utc_dst_broad.value[state]
return float(local_tz)
hour_bins_dict = {'EARLY_MORNING': 0,
'MORNING': 1,
'MIDDAY': 2,
'AFTERNOON': 3,
'EVENING': 4,
'NIGHT': 5}
hour_bins_values = sorted(hour_bins_dict.values())
def get_hour_bin(hour):
if hour >= 5 and hour < 8:
hour_bin = hour_bins_dict['EARLY_MORNING']
elif hour >= 8 and hour < 11:
hour_bin = hour_bins_dict['MORNING']
elif hour >= 11 and hour < 14:
hour_bin = hour_bins_dict['MIDDAY']
elif hour >= 14 and hour < 19:
hour_bin = hour_bins_dict['AFTERNOON']
elif hour >= 19 and hour < 22:
hour_bin = hour_bins_dict['EVENING']
else:
hour_bin = hour_bins_dict['NIGHT']
return hour_bin
def get_local_datetime(dt, event_country, event_country_state):
local_tz = get_local_utc_bst_tz(event_country, event_country_state)
tz_delta = local_tz - DEFAULT_TZ_EST
local_time = dt + datetime.timedelta(hours=tz_delta)
return local_time
get_local_datetime(datetime.datetime.now(), 'US', 'US>CA')
def is_weekend(dt):
return dt.weekday() >= 5
is_weekend(datetime.datetime(2016, 6, 14))
# ## Average CTR functions
timestamp_ref = date_time_to_unix_epoch(datetime.datetime(2016, 6, 29, 3, 59, 59))
decay_factor_default = get_time_decay_factor(publish_time_median, timestamp_ref, alpha=TIME_DECAY_ALPHA)
print("decay_factor_default", decay_factor_default)
def get_confidence_sample_size(sample, max_for_reference=100000):
# Avoiding overflow for large sample size
if sample >= max_for_reference:
return 1.0
ref_log = math.log(1 + max_for_reference,
2) # Curiosly reference in log with base 2 gives a slightly higher score, so I will keep
return math.log(1 + sample) / float(ref_log)
for i in [0, 0.5, 1, 2, 3, 4, 5, 10, 20, 30, 100, 200, 300, 1000, 2000, 3000, 10000, 20000, 30000, 50000, 90000, 100000,
500000, 900000, 1000000, 2171607]:
print(i, get_confidence_sample_size(i))
def get_popularity(an_id, a_dict):
return (a_dict[an_id][0], get_confidence_sample_size(a_dict[an_id][1] / float(a_dict[an_id][2])) * a_dict[an_id][
3]) if an_id in a_dict else (None, None)
def get_weighted_avg_popularity_from_list(ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity(an_id, pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
# print("pops",pops)
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_weighted_avg_country_popularity_from_list(event_country, ids_list, confidence_ids_list, pop_dict):
pops = list(filter(lambda x: x[0][0] is not None,
[(get_popularity((event_country, an_id), pop_dict), confidence) for an_id, confidence in
zip(ids_list, confidence_ids_list)]))
if len(pops) > 0:
weighted_avg = sum(map(lambda x: x[0][0] * x[0][1] * x[1], pops)) / float(
sum(map(lambda x: x[0][1] * x[1], pops)))
confidence = max(map(lambda x: x[0][1] * x[1], pops))
return weighted_avg, confidence
else:
return None, None
def get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
probs = []
avg_ctr, confidence = get_popularity(ad_id, ad_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_ad_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(document_id, document_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_document_id', avg_ctr, confidence))
avg_ctr, confidence = get_popularity((document_id_event, document_id), doc_event_doc_ad_avg_ctr_broad.value)
if avg_ctr is not None:
probs.append(('pop_doc_event_doc_ad', avg_ctr, confidence))
if source_id != -1:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_popularity((event_country, source_id),
source_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_popularity(source_id, source_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_source_id', avg_ctr, confidence))
if publisher_id is not None:
avg_ctr, confidence = get_popularity(publisher_id, publisher_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_publisher_id', avg_ctr, confidence))
if advertiser_id is not None:
avg_ctr, confidence = get_popularity(advertiser_id, advertiser_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_advertiser_id', avg_ctr, confidence))
if campaign_id is not None:
avg_ctr, confidence = get_popularity(campaign_id, campaign_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_campain_id', avg_ctr, confidence))
if len(entity_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
entity_ids_by_doc, ent_confidence_level_by_doc,
entity_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_entity_id', avg_ctr, confidence))
if len(topic_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
topic_ids_by_doc, top_confidence_level_by_doc,
topic_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_topic_id', avg_ctr, confidence))
if len(category_ids_by_doc) > 0:
avg_ctr = None
if event_country != '':
avg_ctr, confidence = get_weighted_avg_country_popularity_from_list(
event_country, category_ids_by_doc, cat_confidence_level_by_doc,
category_id_by_country_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id_country', avg_ctr, confidence))
avg_ctr, confidence = get_weighted_avg_popularity_from_list(
category_ids_by_doc, cat_confidence_level_by_doc,
category_id_popularity_broad.value)
if avg_ctr is not None:
probs.append(('pop_category_id', avg_ctr, confidence))
# print("[get_popularity_score] probs", probs)
if output_detailed_list:
return probs
else:
if len(probs) > 0:
# weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * math.log(1+x[2],2), probs)) \
# / float(sum(map(lambda x: math.log(1+x[2],2), probs)))
weighted_avg_probs_by_confidence = sum(map(lambda x: x[1] * x[2], probs)) / float(
sum(map(lambda x: x[2], probs)))
confidence = max(map(lambda x: x[2], probs))
return weighted_avg_probs_by_confidence, confidence
else:
return None, None
# ## Content-Based similarity functions
def cosine_similarity_dicts(dict1, dict2):
dict1_norm = math.sqrt(sum([v ** 2 for v in dict1.values()]))
dict2_norm = math.sqrt(sum([v ** 2 for v in dict2.values()]))
sum_common_aspects = 0.0
intersections = 0
for key in dict1:
if key in dict2:
sum_common_aspects += dict1[key] * dict2[key]
intersections += 1
return sum_common_aspects / (dict1_norm * dict2_norm), intersections
def cosine_similarity_user_docs_aspects(user_aspect_profile, doc_aspect_ids, doc_aspects_confidence,
aspect_docs_counts):
if user_aspect_profile is None or len(user_aspect_profile) == 0 or doc_aspect_ids is None or len(
doc_aspect_ids) == 0:
return None, None
doc_aspects = dict(zip(doc_aspect_ids, doc_aspects_confidence))
doc_aspects_tfidf_confid = {}
for key in doc_aspects:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_aspects[key]
doc_aspects_tfidf_confid[key] = tf * idf * confidence
user_aspects_tfidf_confid = {}
for key in user_aspect_profile:
tfidf = user_aspect_profile[key][0]
confidence = user_aspect_profile[key][1]
user_aspects_tfidf_confid[key] = tfidf * confidence
similarity, intersections = cosine_similarity_dicts(doc_aspects_tfidf_confid, user_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_aspects) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(user_aspect_profile) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_aspects) / float(len(aspect_docs_counts))) *
(len(user_aspect_profile) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def cosine_similarity_doc_event_doc_ad_aspects(doc_event_aspect_ids, doc_event_aspects_confidence,
doc_ad_aspect_ids, doc_ad_aspects_confidence,
aspect_docs_counts):
if doc_event_aspect_ids is None or len(doc_event_aspect_ids) == 0 \
or doc_ad_aspect_ids is None or len(doc_ad_aspect_ids) == 0:
return None, None
doc_event_aspects = dict(zip(doc_event_aspect_ids, doc_event_aspects_confidence))
doc_event_aspects_tfidf_confid = {}
for key in doc_event_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_event_aspects[key]
doc_event_aspects_tfidf_confid[key] = tf * idf * confidence
doc_ad_aspects = dict(zip(doc_ad_aspect_ids, doc_ad_aspects_confidence))
doc_ad_aspects_tfidf_confid = {}
for key in doc_ad_aspect_ids:
tf = 1.0
idf = math.log(math.log(documents_total / float(aspect_docs_counts[key])))
confidence = doc_ad_aspects[key]
doc_ad_aspects_tfidf_confid[key] = tf * idf * confidence
similarity, intersections = cosine_similarity_dicts(doc_event_aspects_tfidf_confid, doc_ad_aspects_tfidf_confid)
if intersections > 0:
# P(A intersect B)_intersections = P(A)^intersections * P(B)^intersections
random_error = math.pow(len(doc_event_aspect_ids) / float(len(aspect_docs_counts)),
intersections) * math.pow(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts)),
intersections)
else:
# P(A not intersect B) = 1 - P(A intersect B)
random_error = 1 - ((len(doc_event_aspect_ids) / float(len(aspect_docs_counts))) *
(len(doc_ad_aspect_ids) / float(len(aspect_docs_counts))))
confidence = 1.0 - random_error
return similarity, confidence
def get_user_cb_interest_score(user_views_count, user_categories, user_topics, user_entities,
timestamp_event, category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_user_docs_aspects(user_categories,
category_ids_by_doc,
cat_confidence_level_by_doc,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('user_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_user_docs_aspects(user_topics, topic_ids_by_doc,
top_confidence_level_by_doc,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('user_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_user_docs_aspects(user_entities, entity_ids_by_doc,
ent_confidence_level_by_doc,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('user_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
def get_doc_event_doc_ad_cb_similarity_score(doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=False):
# Content-Based
sims = []
categories_similarity, cat_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
categories_docs_counts)
if categories_similarity is not None:
sims.append(('doc_event_doc_ad_sim_categories', categories_similarity, cat_sim_confidence))
topics_similarity, top_sim_confidence = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
topics_docs_counts)
if topics_similarity is not None:
sims.append(('doc_event_doc_ad_sim_topics', topics_similarity, top_sim_confidence))
entities_similarity, entity_sim_confid = cosine_similarity_doc_event_doc_ad_aspects(
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
entities_docs_counts)
if entities_similarity is not None:
sims.append(('doc_event_doc_ad_sim_entities', entities_similarity, entity_sim_confid))
if output_detailed_list:
return sims
else:
if len(sims) > 0:
weighted_avg_sim_by_confidence = sum(map(lambda x: x[1] * x[2], sims)) / float(
sum(map(lambda x: x[2], sims)))
confidence = sum(map(lambda x: x[2], sims)) / float(len(sims))
# print("[get_user_cb_interest_score] sims: {} | \
# Avg: {} - Confid: {}".format(sims, weighted_avg_sim_by_confidence, confidence))
return weighted_avg_sim_by_confidence, confidence
else:
return None, None
# # Feature Vector export
bool_feature_names = ['event_weekend',
'user_has_already_viewed_doc']
int_feature_names = ['user_views',
'ad_views',
'doc_views',
'doc_event_days_since_published',
'doc_event_hour',
'doc_ad_days_since_published',
]
float_feature_names = [
'pop_ad_id',
'pop_ad_id_conf',
'pop_ad_id_conf_multipl',
'pop_document_id',
'pop_document_id_conf',
'pop_document_id_conf_multipl',
'pop_publisher_id',
'pop_publisher_id_conf',
'pop_publisher_id_conf_multipl',
'pop_advertiser_id',
'pop_advertiser_id_conf',
'pop_advertiser_id_conf_multipl',
'pop_campain_id',
'pop_campain_id_conf',
'pop_campain_id_conf_multipl',
'pop_doc_event_doc_ad',
'pop_doc_event_doc_ad_conf',
'pop_doc_event_doc_ad_conf_multipl',
'pop_source_id',
'pop_source_id_conf',
'pop_source_id_conf_multipl',
'pop_source_id_country',
'pop_source_id_country_conf',
'pop_source_id_country_conf_multipl',
'pop_entity_id',
'pop_entity_id_conf',
'pop_entity_id_conf_multipl',
'pop_entity_id_country',
'pop_entity_id_country_conf',
'pop_entity_id_country_conf_multipl',
'pop_topic_id',
'pop_topic_id_conf',
'pop_topic_id_conf_multipl',
'pop_topic_id_country',
'pop_topic_id_country_conf',
'pop_topic_id_country_conf_multipl',
'pop_category_id',
'pop_category_id_conf',
'pop_category_id_conf_multipl',
'pop_category_id_country',
'pop_category_id_country_conf',
'pop_category_id_country_conf_multipl',
'user_doc_ad_sim_categories',
'user_doc_ad_sim_categories_conf',
'user_doc_ad_sim_categories_conf_multipl',
'user_doc_ad_sim_topics',
'user_doc_ad_sim_topics_conf',
'user_doc_ad_sim_topics_conf_multipl',
'user_doc_ad_sim_entities',
'user_doc_ad_sim_entities_conf',
'user_doc_ad_sim_entities_conf_multipl',
'doc_event_doc_ad_sim_categories',
'doc_event_doc_ad_sim_categories_conf',
'doc_event_doc_ad_sim_categories_conf_multipl',
'doc_event_doc_ad_sim_topics',
'doc_event_doc_ad_sim_topics_conf',
'doc_event_doc_ad_sim_topics_conf_multipl',
'doc_event_doc_ad_sim_entities',
'doc_event_doc_ad_sim_entities_conf',
'doc_event_doc_ad_sim_entities_conf_multipl'
]
TRAFFIC_SOURCE_FV = 'traffic_source'
EVENT_HOUR_FV = 'event_hour'
EVENT_COUNTRY_FV = 'event_country'
EVENT_COUNTRY_STATE_FV = 'event_country_state'
EVENT_GEO_LOCATION_FV = 'event_geo_location'
EVENT_PLATFORM_FV = 'event_platform'
AD_ADVERTISER_FV = 'ad_advertiser'
DOC_AD_SOURCE_ID_FV = 'doc_ad_source_id'
DOC_AD_PUBLISHER_ID_FV = 'doc_ad_publisher_id'
DOC_EVENT_SOURCE_ID_FV = 'doc_event_source_id'
DOC_EVENT_PUBLISHER_ID_FV = 'doc_event_publisher_id'
DOC_AD_CATEGORY_ID_FV = 'doc_ad_category_id'
DOC_AD_TOPIC_ID_FV = 'doc_ad_topic_id'
DOC_AD_ENTITY_ID_FV = 'doc_ad_entity_id'
DOC_EVENT_CATEGORY_ID_FV = 'doc_event_category_id'
DOC_EVENT_TOPIC_ID_FV = 'doc_event_topic_id'
DOC_EVENT_ENTITY_ID_FV = 'doc_event_entity_id'
# ### Configuring feature vector
category_feature_names_integral = ['ad_advertiser',
'doc_ad_category_id_1',
'doc_ad_category_id_2',
'doc_ad_category_id_3',
'doc_ad_topic_id_1',
'doc_ad_topic_id_2',
'doc_ad_topic_id_3',
'doc_ad_entity_id_1',
'doc_ad_entity_id_2',
'doc_ad_entity_id_3',
'doc_ad_entity_id_4',
'doc_ad_entity_id_5',
'doc_ad_entity_id_6',
'doc_ad_publisher_id',
'doc_ad_source_id',
'doc_event_category_id_1',
'doc_event_category_id_2',
'doc_event_category_id_3',
'doc_event_topic_id_1',
'doc_event_topic_id_2',
'doc_event_topic_id_3',
'doc_event_entity_id_1',
'doc_event_entity_id_2',
'doc_event_entity_id_3',
'doc_event_entity_id_4',
'doc_event_entity_id_5',
'doc_event_entity_id_6',
'doc_event_publisher_id',
'doc_event_source_id',
'event_country',
'event_country_state',
'event_geo_location',
'event_hour',
'event_platform',
'traffic_source']
feature_vector_labels_integral = bool_feature_names \
+ int_feature_names \
+ float_feature_names \
+ category_feature_names_integral
feature_vector_labels_integral_dict = dict([(key, idx) for idx, key in enumerate(feature_vector_labels_integral)])
with open(OUTPUT_BUCKET_FOLDER + 'feature_vector_labels_integral.txt', 'w') as output:
output.writelines('\n'.join(feature_vector_labels_integral))
# ### Building feature vectors
def set_feature_vector_cat_value_integral(field_name, field_value, feature_vector):
if not is_null(field_value): # and str(field_value) != '-1':
feature_vector[feature_vector_labels_integral_dict[field_name]] = float(field_value)
def set_feature_vector_cat_top_multi_values_integral(
field_name, values, confidences, feature_vector, top=5):
top_values = list(filter(lambda z: z != -1,
map(lambda y: y[0], sorted(zip(values, confidences), key=lambda x: -x[1]))))[:top]
for idx, field_value in list(enumerate(top_values)):
set_feature_vector_cat_value_integral(
'{}_{}'.format(field_name, idx + 1), field_value, feature_vector)
def get_ad_feature_vector_integral(
user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels):
try:
feature_vector = {}
if user_views_count is not None:
feature_vector[feature_vector_labels_integral_dict['user_views']] = float(user_views_count)
if user_doc_ids_viewed is not None:
feature_vector[feature_vector_labels_integral_dict['user_has_already_viewed_doc']] = float(
document_id in user_doc_ids_viewed)
if ad_id in ad_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['ad_views']] = float(
ad_id_popularity_broad.value[ad_id][1])
if document_id in document_id_popularity_broad.value:
feature_vector[feature_vector_labels_integral_dict['doc_views']] = float(
document_id_popularity_broad.value[document_id][1])
if timestamp_event > -1:
dt_timestamp_event = convert_odd_timestamp(timestamp_event)
if doc_ad_publish_time is not None:
delta_days = (dt_timestamp_event - doc_ad_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_ad_days_since_published']] = float(
delta_days)
if doc_event_publish_time is not None:
delta_days = (dt_timestamp_event - doc_event_publish_time).days
if 0 <= delta_days <= 365 * 10: # 10 years
feature_vector[feature_vector_labels_integral_dict['doc_event_days_since_published']] = float(
delta_days)
# Local period of the day (hours)
dt_local_timestamp_event = get_local_datetime(dt_timestamp_event, event_country, event_country_state)
local_hour_bin = get_hour_bin(dt_local_timestamp_event.hour)
feature_vector[feature_vector_labels_integral_dict['doc_event_hour']] = float(
local_hour_bin) # Hour for Decision Trees
set_feature_vector_cat_value_integral(EVENT_HOUR_FV, local_hour_bin,
feature_vector) # Period of day for FFM
# Weekend
weekend = int(is_weekend(dt_local_timestamp_event))
feature_vector[feature_vector_labels_integral_dict['event_weekend']] = float(weekend)
conf_field_suffix = '_conf'
conf_multiplied_field_suffix = '_conf_multipl'
# Setting Popularity fields
pop_scores = get_popularity_score(event_country, ad_id, document_id, source_id,
publisher_id, advertiser_id, campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in pop_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting User-Doc_ad CB Similarity fields
user_doc_ad_cb_sim_scores = get_user_cb_interest_score(
user_views_count, user_categories, user_topics, user_entities,
timestamp_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in user_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Setting Doc_event-doc_ad CB Similarity fields
doc_event_doc_ad_cb_sim_scores = get_doc_event_doc_ad_cb_similarity_score(
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
output_detailed_list=True)
for score in doc_event_doc_ad_cb_sim_scores:
feature_vector[feature_vector_labels_integral_dict[score[0]]] = score[1]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_field_suffix]] = score[2]
feature_vector[feature_vector_labels_integral_dict[score[0] + conf_multiplied_field_suffix]] = \
score[1] * score[2]
# Process code for event_country
if event_country in event_country_values_counts:
event_country_code = event_country_values_counts[event_country]
else:
event_country_code = event_country_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_FV, event_country_code, feature_vector)
# Process code for event_country_state
if event_country_state in event_country_state_values_counts:
event_country_state_code = event_country_state_values_counts[event_country_state]
else:
event_country_state_code = event_country_state_values_counts[LESS_SPECIAL_CAT_VALUE]
set_feature_vector_cat_value_integral(EVENT_COUNTRY_STATE_FV, event_country_state_code, feature_vector)
# Process code for geo_location_event
if geo_location_event in event_geo_location_values_counts:
geo_location_event_code = event_geo_location_values_counts[geo_location_event]
else:
geo_location_event_code = event_geo_location_values_counts[LESS_SPECIAL_CAT_VALUE]
# -1 to traffic_source and platform_event
if traffic_source_pv is not None:
feature_vector[feature_vector_labels_integral_dict[TRAFFIC_SOURCE_FV]] = int(traffic_source_pv - 1)
if platform_event is not None:
feature_vector[feature_vector_labels_integral_dict[EVENT_PLATFORM_FV]] = int(platform_event - 1)
set_feature_vector_cat_value_integral(EVENT_GEO_LOCATION_FV, geo_location_event_code, feature_vector)
# set_feature_vector_cat_value_integral(TRAFFIC_SOURCE_FV, traffic_source_pv - 1, feature_vector)
# set_feature_vector_cat_value_integral(EVENT_PLATFORM_FV, platform_event - 1, feature_vector)
set_feature_vector_cat_value_integral(AD_ADVERTISER_FV, advertiser_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_SOURCE_ID_FV, source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_AD_PUBLISHER_ID_FV, publisher_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_SOURCE_ID_FV, doc_event_source_id, feature_vector)
set_feature_vector_cat_value_integral(DOC_EVENT_PUBLISHER_ID_FV, doc_event_publisher_id, feature_vector)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_CATEGORY_ID_FV, doc_ad_category_ids,
doc_ad_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_AD_TOPIC_ID_FV, doc_ad_topic_ids,
doc_ad_top_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_CATEGORY_ID_FV, doc_event_category_ids,
doc_event_cat_confidence_levels, feature_vector, top=3)
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_TOPIC_ID_FV, doc_event_topic_ids,
doc_event_top_confidence_levels, feature_vector, top=3)
# Process codes for doc_ad_entity_ids
doc_ad_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_ad_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_AD_ENTITY_ID_FV, doc_ad_entity_ids_codes,
doc_ad_ent_confidence_levels, feature_vector, top=6)
# Process codes for doc_event_entity_ids
doc_event_entity_ids_codes = [doc_entity_id_values_counts[x]
if x in doc_entity_id_values_counts
else doc_entity_id_values_counts[LESS_SPECIAL_CAT_VALUE]
for x in doc_event_entity_ids]
set_feature_vector_cat_top_multi_values_integral(DOC_EVENT_ENTITY_ID_FV, doc_event_entity_ids_codes,
doc_event_ent_confidence_levels, feature_vector, top=6)
# Creating dummy column as the last column
# because xgboost have a problem if the last column is undefined for all rows,
# saying that dimentions of data and feature_names do not match
# feature_vector[feature_vector_labels_dict[DUMMY_FEATURE_COLUMN]] = float(0)
# Ensuring that all elements are floats for compatibility with UDF output (ArrayType(FloatType()))
# feature_vector = list([float(x) for x in feature_vector])
except Exception as e:
raise Exception("[get_ad_feature_vector_integral] ERROR PROCESSING FEATURE VECTOR! Params: {}"
.format([user_doc_ids_viewed, user_views_count, user_categories, user_topics, user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
doc_ad_category_ids, doc_ad_cat_confidence_levels,
doc_ad_topic_ids, doc_ad_top_confidence_levels,
doc_ad_entity_ids, doc_ad_ent_confidence_levels,
doc_event_category_ids, doc_event_cat_confidence_levels,
doc_event_topic_ids, doc_event_top_confidence_levels,
doc_event_entity_ids, doc_event_ent_confidence_levels]),
e)
return SparseVector(len(feature_vector_labels_integral_dict), feature_vector)
get_ad_feature_vector_integral_udf = F.udf(
lambda user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities, event_country, event_country_state, ad_id, document_id, source_id,
doc_ad_publish_time, timestamp_event, platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent:
get_ad_feature_vector_integral(user_doc_ids_viewed, user_views_count, user_categories, user_topics,
user_entities,
event_country, event_country_state,
ad_id, document_id, source_id, doc_ad_publish_time, timestamp_event,
platform_event,
geo_location_event,
doc_event_source_id, doc_event_publisher_id, doc_event_publish_time,
traffic_source_pv, advertiser_id, publisher_id,
campaign_id, document_id_event,
category_ids_by_doc, cat_confidence_level_by_doc,
topic_ids_by_doc, top_confidence_level_by_doc,
entity_ids_by_doc, ent_confidence_level_by_doc,
doc_event_category_id_list, doc_event_confidence_level_cat_list,
doc_event_topic_id_list, doc_event_confidence_level_top,
doc_event_entity_id_list, doc_event_confidence_level_ent),
VectorUDT())
# ## Export Train set feature vectors
train_set_enriched_df = train_set_df \
.join(documents_categories_grouped_df,
on=F.col("document_id_promo") == F.col("documents_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df,
on=F.col("document_id_promo") == F.col("documents_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df,
on=F.col("document_id_promo") == F.col("documents_entities_grouped.document_id_ent"),
how='left') \
.join(documents_categories_grouped_df
.withColumnRenamed('category_id_list', 'doc_event_category_id_list')
.withColumnRenamed('confidence_level_cat_list', 'doc_event_confidence_level_cat_list')
.alias('documents_event_categories_grouped'),
on=F.col("document_id_event") == F.col("documents_event_categories_grouped.document_id_cat"),
how='left') \
.join(documents_topics_grouped_df
.withColumnRenamed('topic_id_list', 'doc_event_topic_id_list')
.withColumnRenamed('confidence_level_top_list', 'doc_event_confidence_level_top_list')
.alias('documents_event_topics_grouped'),
on=F.col("document_id_event") == F.col("documents_event_topics_grouped.document_id_top"),
how='left') \
.join(documents_entities_grouped_df
.withColumnRenamed('entity_id_list', 'doc_event_entity_id_list')
.withColumnRenamed('confidence_level_ent_list', 'doc_event_confidence_level_ent_list')
.alias('documents_event_entities_grouped'),
on=F.col("document_id_event") == F.col("documents_event_entities_grouped.document_id_ent"),
how='left') \
.select('display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time', 'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id').alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
train_set_feature_vectors_df = train_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid_event').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
'feature_vector')
if evaluation:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral_eval'
else:
train_feature_vector_gcs_folder_name = 'train_feature_vectors_integral'
train_set_feature_vectors_df.write.parquet(OUTPUT_BUCKET_FOLDER + train_feature_vector_gcs_folder_name,
mode='overwrite')
# # Export Validation/Test set feature vectors
def is_leak(max_timestamp_pv_leak, timestamp_event):
return max_timestamp_pv_leak >= 0 and max_timestamp_pv_leak >= timestamp_event
is_leak_udf = F.udf(lambda max_timestamp_pv_leak, timestamp_event: int(is_leak(max_timestamp_pv_leak, timestamp_event)),
IntegerType())
if evaluation:
data_df = validation_set_df
else:
data_df = test_set_df
test_validation_set_enriched_df = data_df.select(
'display_id', 'uuid_event', 'event_country', 'event_country_state', 'platform_event',
'source_id_doc_event', 'publisher_doc_event', 'publish_time_doc_event',
'publish_time',
'ad_id', 'document_id_promo', 'clicked',
'geo_location_event', 'advertiser_id', 'publisher_id',
'campaign_id', 'document_id_event',
'traffic_source_pv',
int_list_null_to_empty_list_udf('doc_event_category_id_list')
.alias('doc_event_category_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_cat_list')
.alias('doc_event_confidence_level_cat_list'),
int_list_null_to_empty_list_udf('doc_event_topic_id_list')
.alias('doc_event_topic_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_top_list')
.alias('doc_event_confidence_level_top_list'),
str_list_null_to_empty_list_udf('doc_event_entity_id_list')
.alias('doc_event_entity_id_list'),
float_list_null_to_empty_list_udf('doc_event_confidence_level_ent_list')
.alias('doc_event_confidence_level_ent_list'),
int_null_to_minus_one_udf('source_id')
.alias('source_id'),
int_null_to_minus_one_udf('timestamp_event').alias('timestamp_event'),
int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'),
float_list_null_to_empty_list_udf('confidence_level_cat_list')
.alias('confidence_level_cat_list'),
int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'),
float_list_null_to_empty_list_udf('confidence_level_top_list')
.alias('confidence_level_top_list'),
str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'),
float_list_null_to_empty_list_udf('confidence_level_ent_list')
.alias('confidence_level_ent_list'),
int_null_to_minus_one_udf('max_timestamp_pv').alias('max_timestamp_pv_leak')) \
.join(user_profiles_df, on=[F.col("user_profiles.uuid") == F.col("uuid_event")], how='left') \
.withColumnRenamed('categories', 'user_categories') \
.withColumnRenamed('topics', 'user_topics') \
.withColumnRenamed('entities', 'user_entities') \
.withColumnRenamed('doc_ids', 'user_doc_ids_viewed') \
.withColumnRenamed('views', 'user_views_count')
test_validation_set_feature_vectors_df = test_validation_set_enriched_df \
.withColumn('feature_vector',
get_ad_feature_vector_integral_udf(
'user_doc_ids_viewed',
'user_views_count',
'user_categories',
'user_topics',
'user_entities',
'event_country',
'event_country_state',
'ad_id',
'document_id_promo',
'source_id',
'publish_time',
'timestamp_event',
'platform_event',
'geo_location_event',
'source_id_doc_event',
'publisher_doc_event',
'publish_time_doc_event',
'traffic_source_pv',
'advertiser_id',
'publisher_id',
'campaign_id',
'document_id_event',
'category_id_list',
'confidence_level_cat_list',
'topic_id_list',
'confidence_level_top_list',
'entity_id_list',
'confidence_level_ent_list',
'doc_event_category_id_list',
'doc_event_confidence_level_cat_list',
'doc_event_topic_id_list',
'doc_event_confidence_level_top_list',
'doc_event_entity_id_list',
'doc_event_confidence_level_ent_list')) \
.select(F.col('uuid').alias('uuid'), 'display_id', 'ad_id', 'document_id_event',
F.col('document_id_promo').alias('document_id'), F.col('clicked').alias('label'),
is_leak_udf('max_timestamp_pv_leak', 'timestamp_event').alias('is_leak'),
'feature_vector')
if evaluation:
test_validation_feature_vector_gcs_folder_name = 'validation_feature_vectors_integral'
else:
test_validation_feature_vector_gcs_folder_name = 'test_feature_vectors_integral'
test_validation_set_feature_vectors_df.write.parquet(
OUTPUT_BUCKET_FOLDER + test_validation_feature_vector_gcs_folder_name, mode='overwrite')
spark.stop()
|
{"hexsha": "95e40fb879d2625ccef645ebb6a55bf6c1a59307", "size": 91445, "ext": "py", "lang": "Python", "max_stars_repo_path": "DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/preproc/preproc3.py", "max_stars_repo_name": "puririshi98/benchmark", "max_stars_repo_head_hexsha": "79f554f1e1cf36f62994c78e0e6e5b360f554022", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/preproc/preproc3.py", "max_issues_repo_name": "puririshi98/benchmark", "max_issues_repo_head_hexsha": "79f554f1e1cf36f62994c78e0e6e5b360f554022", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/preproc/preproc3.py", "max_forks_repo_name": "puririshi98/benchmark", "max_forks_repo_head_hexsha": "79f554f1e1cf36f62994c78e0e6e5b360f554022", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.7271333674, "max_line_length": 120, "alphanum_fraction": 0.6712668817, "include": true, "reason": "import numpy", "num_tokens": 20505}
|
# -*- coding: utf-8 -*-
# @Author: Marte
# @Date: 2017-12-18 10:08:04
# @Last Modified by: Marte
# @Last Modified time: 2017-12-18 10:08:12
'''
Created on 21 nov. 2011
@author: Antoine Vacavant, ISIT lab, antoine.vacavant@iut.u-clermont1.fr, http://isit.u-clermont1.fr/~anvacava
Modified by Christopher Godfrey, on 17 July 2012 (lines 32-34)
'''
import numpy
import scipy.ndimage
from numpy.ma.core import exp
from scipy.constants.constants import pi
'''
The function to compute SSIM
@param param: img_mat_1 1st 2D matrix
@param param: img_mat_2 2nd 2D matrix
'''
def compute_ssim(img_mat_1, img_mat_2):
#Variables for Gaussian kernel definition
gaussian_kernel_sigma=1.5
gaussian_kernel_width=11
gaussian_kernel=numpy.zeros((gaussian_kernel_width,gaussian_kernel_width))
#Fill Gaussian kernel
for i in range(gaussian_kernel_width):
for j in range(gaussian_kernel_width):
gaussian_kernel[i,j]=\
(1/(2*pi*(gaussian_kernel_sigma**2)))*\
exp(-(((i-5)**2)+((j-5)**2))/(2*(gaussian_kernel_sigma**2)))
#Convert image matrices to double precision (like in the Matlab version)
img_mat_1=img_mat_1.astype(numpy.float)
img_mat_2=img_mat_2.astype(numpy.float)
#Squares of input matrices
img_mat_1_sq=img_mat_1**2
img_mat_2_sq=img_mat_2**2
img_mat_12=img_mat_1*img_mat_2
#Means obtained by Gaussian filtering of inputs
img_mat_mu_1=scipy.ndimage.filters.convolve(img_mat_1,gaussian_kernel)
img_mat_mu_2=scipy.ndimage.filters.convolve(img_mat_2,gaussian_kernel)
#Squares of means
img_mat_mu_1_sq=img_mat_mu_1**2
img_mat_mu_2_sq=img_mat_mu_2**2
img_mat_mu_12=img_mat_mu_1*img_mat_mu_2
#Variances obtained by Gaussian filtering of inputs' squares
img_mat_sigma_1_sq=scipy.ndimage.filters.convolve(img_mat_1_sq,gaussian_kernel)
img_mat_sigma_2_sq=scipy.ndimage.filters.convolve(img_mat_2_sq,gaussian_kernel)
#Covariance
img_mat_sigma_12=scipy.ndimage.filters.convolve(img_mat_12,gaussian_kernel)
#Centered squares of variances
img_mat_sigma_1_sq=img_mat_sigma_1_sq-img_mat_mu_1_sq
img_mat_sigma_2_sq=img_mat_sigma_2_sq-img_mat_mu_2_sq
img_mat_sigma_12=img_mat_sigma_12-img_mat_mu_12;
#c1/c2 constants
#First use: manual fitting
c_1=6.5025
c_2=58.5225
#Second use: change k1,k2 & c1,c2 depend on L (width of color map)
l=255
k_1=0.01
c_1=(k_1*l)**2
k_2=0.03
c_2=(k_2*l)**2
#Numerator of SSIM
num_ssim=(2*img_mat_mu_12+c_1)*(2*img_mat_sigma_12+c_2)
#Denominator of SSIM
den_ssim=(img_mat_mu_1_sq+img_mat_mu_2_sq+c_1)*\
(img_mat_sigma_1_sq+img_mat_sigma_2_sq+c_2)
#SSIM
ssim_map=num_ssim/den_ssim
index=numpy.average(ssim_map)
return index
|
{"hexsha": "c441ca43eddfe0d0abca5615e413d356b7e4210e", "size": 2775, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyssim.py", "max_stars_repo_name": "lby314xx/MLP-coursework", "max_stars_repo_head_hexsha": "1e3daa9ebe40b247e051fc03a6624c5bac081607", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyssim.py", "max_issues_repo_name": "lby314xx/MLP-coursework", "max_issues_repo_head_hexsha": "1e3daa9ebe40b247e051fc03a6624c5bac081607", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyssim.py", "max_forks_repo_name": "lby314xx/MLP-coursework", "max_forks_repo_head_hexsha": "1e3daa9ebe40b247e051fc03a6624c5bac081607", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8965517241, "max_line_length": 110, "alphanum_fraction": 0.7362162162, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 904}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_x_ft97_params *params;
assert(p->params != NULL);
params = (gga_x_ft97_params * )(p->params);
*)
ft97_beta := (rs, z, xs) -> params_a_beta0
+ params_a_beta1*sigma_spin(rs, z, xs)/(params_a_beta2 + sigma_spin(rs, z, xs)):
ft97_fx := (rs, z, xs) -> 1 + ft97_beta(rs, z, xs)*xs^2 /
(X_FACTOR_C*sqrt(1 + 9*xs^2*ft97_beta(rs, z, xs)^2*arcsinh(xs^2)^2)):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange_nsp(ft97_fx, rs, zeta, xs0, xs1):
|
{"hexsha": "88362e1bdc41d92b80992b956c1d70c2e11bfaad", "size": 717, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "libxc-5.1.6/maple/gga_exc/gga_x_ft97.mpl", "max_stars_repo_name": "pwang234/lsms", "max_stars_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-04-03T15:35:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T03:19:23.000Z", "max_issues_repo_path": "libxc-5.1.6/maple/gga_exc/gga_x_ft97.mpl", "max_issues_repo_name": "pwang234/lsms", "max_issues_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-30T13:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:43:35.000Z", "max_forks_repo_path": "libxc-5.1.6/maple/gga_exc/gga_x_ft97.mpl", "max_forks_repo_name": "pwang234/lsms", "max_forks_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-06-30T00:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:14:29.000Z", "avg_line_length": 29.875, "max_line_length": 82, "alphanum_fraction": 0.6513249651, "num_tokens": 262}
|
[STATEMENT]
lemma product_run_embed_limit_finiteness:
fixes \<iota>\<^sub>m \<delta>\<^sub>m w q\<^sub>0 k
defines "\<rho> \<equiv> run\<^sub>t (\<Delta>\<^sub>\<times> \<delta>\<^sub>m) \<iota>\<^sub>m w"
defines "\<rho>' \<equiv> run\<^sub>t (\<delta>\<^sub>m k) q\<^sub>0 w"
assumes "\<iota>\<^sub>m k = Some q\<^sub>0"
assumes "finite (range \<rho>)"
shows "limit \<rho> \<inter> (\<Union> (\<upharpoonleft>\<^sub>k ` S)) = {} \<longleftrightarrow> limit \<rho>' \<inter> S = {}"
(is "?lhs \<longleftrightarrow> ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
have "\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<longrightarrow> limit \<rho> \<inter> (\<Union> (\<upharpoonleft>\<^sub>k ` S)) \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
assume "\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {}"
[PROOF STATE]
proof (state)
this:
\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {}
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {}
[PROOF STEP]
obtain q \<nu> q' where "(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>" and "(q, \<nu>, q') \<in> S"
[PROOF STATE]
proof (prove)
using this:
\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {}
goal (1 subgoal):
1. (\<And>q \<nu> q'. \<lbrakk>(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>; (q, \<nu>, q') \<in> S\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>
(q, \<nu>, q') \<in> S
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>
(q, \<nu>, q') \<in> S
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
have "\<And>m \<nu> m' i. (m, \<nu>, m') = \<rho> i \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m \<nu> m' i. (m, \<nu>, m') = \<rho> i \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'
[PROOF STEP]
using assms product_run_Some[of \<iota>\<^sub>m , OF assms(3)]
[PROOF STATE]
proof (prove)
using this:
\<rho> \<equiv> run\<^sub>t (\<Delta>\<^sub>\<times> \<delta>\<^sub>m) \<iota>\<^sub>m w
\<rho>' \<equiv> run\<^sub>t (\<delta>\<^sub>m k) q\<^sub>0 w
\<iota>\<^sub>m k = Some q\<^sub>0
finite (range \<rho>)
run (\<Delta>\<^sub>\<times> ?\<delta>\<^sub>m) \<iota>\<^sub>m ?w ?i k = Some (run (?\<delta>\<^sub>m k) q\<^sub>0 ?w ?i)
goal (1 subgoal):
1. \<And>m \<nu> m' i. (m, \<nu>, m') = \<rho> i \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(?m, ?\<nu>, ?m') = \<rho> ?i \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
hence "\<And>m \<nu> m'. (m, \<nu>, m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'"
[PROOF STATE]
proof (prove)
using this:
(?m, ?\<nu>, ?m') = \<rho> ?i \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
goal (1 subgoal):
1. \<And>m \<nu> m'. (m, \<nu>, m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'
[PROOF STEP]
using limit_in_range
[PROOF STATE]
proof (prove)
using this:
(?m, ?\<nu>, ?m') = \<rho> ?i \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
limit ?r \<subseteq> range ?r
goal (1 subgoal):
1. \<And>m \<nu> m'. (m, \<nu>, m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. m k = Some p \<and> m' k = Some p'
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
(?m, ?\<nu>, ?m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>
(q, \<nu>, q') \<in> S
(?m, ?\<nu>, ?m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
[PROOF STEP]
obtain m m' where "m k = Some q" and "m' k = Some q'" and "(m, \<nu>, m') \<in> limit \<rho>"
[PROOF STATE]
proof (prove)
using this:
(q, \<nu>, q') \<in> \<downharpoonleft>\<^sub>k ` limit \<rho>
(q, \<nu>, q') \<in> S
(?m, ?\<nu>, ?m') \<in> limit \<rho> \<Longrightarrow> \<exists>p p'. ?m k = Some p \<and> ?m' k = Some p'
goal (1 subgoal):
1. (\<And>m m'. \<lbrakk>m k = Some q; m' k = Some q'; (m, \<nu>, m') \<in> limit \<rho>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
hence "(m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)"
[PROOF STATE]
proof (prove)
using this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
goal (1 subgoal):
1. (m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)
[PROOF STEP]
using \<open>(q, \<nu>, q') \<in> S\<close>
[PROOF STATE]
proof (prove)
using this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
(q, \<nu>, q') \<in> S
goal (1 subgoal):
1. (m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
(m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)
goal (1 subgoal):
1. \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<Longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
(m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)
[PROOF STEP]
show "limit \<rho> \<inter> (\<Union> (\<upharpoonleft>\<^sub>k ` S)) \<noteq> {}"
[PROOF STATE]
proof (prove)
using this:
m k = Some q
m' k = Some q'
(m, \<nu>, m') \<in> limit \<rho>
(m, \<nu>, m') \<in> \<Union> (\<upharpoonleft>\<^sub>k ` S)
goal (1 subgoal):
1. limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
hence "?lhs \<longleftrightarrow> \<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {}"
[PROOF STATE]
proof (prove)
using this:
\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S \<noteq> {} \<longrightarrow> limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) \<noteq> {}
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {})
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {})
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {})
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
have "\<dots> \<longleftrightarrow> ?rhs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
using assms product_run_project_limit[of _ _ _ \<delta>\<^sub>m]
[PROOF STATE]
proof (prove)
using this:
\<rho> \<equiv> run\<^sub>t (\<Delta>\<^sub>\<times> \<delta>\<^sub>m) \<iota>\<^sub>m w
\<rho>' \<equiv> run\<^sub>t (\<delta>\<^sub>m k) q\<^sub>0 w
\<iota>\<^sub>m k = Some q\<^sub>0
finite (range \<rho>)
\<lbrakk>?\<iota>\<^sub>m ?x = Some ?q\<^sub>0; finite (range (run\<^sub>t (\<Delta>\<^sub>\<times> \<delta>\<^sub>m) ?\<iota>\<^sub>m ?w))\<rbrakk> \<Longrightarrow> \<downharpoonleft>\<^sub>?x ` limit (run\<^sub>t (\<Delta>\<^sub>\<times> \<delta>\<^sub>m) ?\<iota>\<^sub>m ?w) = limit (run\<^sub>t (\<delta>\<^sub>m ?x) ?q\<^sub>0 ?w)
goal (1 subgoal):
1. (\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<downharpoonleft>\<^sub>k ` limit \<rho> \<inter> S = {}) = (limit \<rho>' \<inter> S = {})
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
goal (1 subgoal):
1. (limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(limit \<rho> \<inter> \<Union> (\<upharpoonleft>\<^sub>k ` S) = {}) = (limit \<rho>' \<inter> S = {})
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4826, "file": "LTL_to_DRA_DTS", "length": 35}
|
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from sklearn.calibration import calibration_curve
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from io import BytesIO
from sklearn.metrics import confusion_matrix
import seaborn as sns
import numpy as np
import pandas as pd
from collections import Counter
def plot_calibration_curve(y, y_proba):
fig, ax = plt.subplots()
prob_true_1, prob_pred_1 = calibration_curve(y, y_proba, n_bins=12)
ax.plot([0, 1], [0, 1], linestyle='--', label='Perfect calibration')
ax.plot(prob_true_1, prob_pred_1, marker='.')
ax.set(xlabel='Average predicted probability in each bin', ylabel='Ratio of positives')
memfile = BytesIO()
plt.savefig(memfile)
return memfile
def plot_precision_recall_curve(y, y_proba):
fig, ax = plt.subplots()
precision, recall, _ = precision_recall_curve(y, y_proba)
average_precision = average_precision_score(y, y_proba)
ax.plot(precision, recall, label=f'AP = {average_precision:0.2f}')
ax.set(xlabel='Precision', ylabel='Recall')
ax.legend(loc="lower left")
memfile = BytesIO()
plt.savefig(memfile)
return memfile
def plot_roc_curve(y, y_proba):
fig, ax = plt.subplots()
fpr, tpr, _ = roc_curve(y, y_proba)
roc_auc = roc_auc_score(y, y_proba)
ax.plot(fpr, tpr, label=f'ROC = {roc_auc:0.2f}')
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate')
ax.legend(loc="lower right")
memfile = BytesIO()
plt.savefig(memfile)
return memfile
def plot_confusion_matrix(y, y_proba):
y_pred = y_proba > 0.5
fig, ax = plt.subplots()
confm = confusion_matrix(y, y_pred)
# Normalize
confm = confm.astype('float') / confm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(confm, cmap='Oranges', annot=True)
ax.set(xlabel='Predicted label', ylabel='True label')
memfile = BytesIO()
plt.savefig(memfile)
return memfile
def plot_histogram(y, y_proba):
fig, ax = plt.subplots()
predictions = {
'predicted label': y_proba,
'True label': y
}
df = pd.DataFrame.from_dict(predictions)
sns.kdeplot(data=df, x='predicted label', hue='True label', hue_order=sorted(list(Counter(y).keys())))
#colors = ['cornflowerblue', 'gold']
# for i, label in enumerate(sorted(list(Counter(y).keys()))):
# # Subset
# subset = df[df['true label'] == label]
#
# # Draw the density plot
# sns.histplot(data=subset['predicted label'], kde=True, label=label, color=colors[i])
# sns.distplot(subset['predicted label'], hist=False, kde=True,
# kde_kws={'linewidth': 2},
# label=label)
ax.set_yticks([], [])
ax.set_xlim(0, 1)
#plt.legend(prop={'size': 5}, title='Predicted label')
plt.title('Density of predicted labels')
plt.xlabel('Probability')
plt.ylabel('Density')
memfile = BytesIO()
plt.savefig(memfile)
return memfile
def plot_param_metric_relation(data, x, y, height):
fig, ax = plt.subplots()
if len(Counter(data[x])) > 8 and pd.api.types.is_numeric_dtype(data[x]):
data[x + ' '] = pd.cut(data[x], bins=8)
data[x + ' '] = data[x + ' '].apply(lambda w: np.round(w.mid, 3))
sns.violinplot(data=data[[x + ' ', y]], x=x + ' ', y=y, height=height, ax=ax)
data.drop(columns=x + ' ', inplace=True)
else:
ax = sns.violinplot(data=data, x=x, y=y, height=height)
memfile = BytesIO()
plt.savefig(memfile)
return memfile
|
{"hexsha": "5b4ed1f3900cbe7f9902cb2a36371bf7e37bba5f", "size": 3648, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/nestedcvtraining/utils/plotting.py", "max_stars_repo_name": "JaimeArboleda/nestedcvtraining", "max_stars_repo_head_hexsha": "6bcf2fc2fdcb655c36f08d9634df2449f1151907", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-03-29T09:45:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T20:56:40.000Z", "max_issues_repo_path": "nestedcvtraining/utils/plotting.py", "max_issues_repo_name": "Sandy4321/nestedcvtraining", "max_issues_repo_head_hexsha": "6bcf2fc2fdcb655c36f08d9634df2449f1151907", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nestedcvtraining/utils/plotting.py", "max_forks_repo_name": "Sandy4321/nestedcvtraining", "max_forks_repo_head_hexsha": "6bcf2fc2fdcb655c36f08d9634df2449f1151907", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-06-25T00:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T17:51:04.000Z", "avg_line_length": 35.0769230769, "max_line_length": 106, "alphanum_fraction": 0.6573464912, "include": true, "reason": "import numpy", "num_tokens": 969}
|
#!/usr/bin/env python
r"""
Visualize UMAP embeddings
"""
import argparse
import pathlib
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from matplotlib import rcParams
import scglue
scglue.plot.set_publication_params()
def parse_args() -> argparse.Namespace:
r"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(
description="Visualize UMAP embeddings"
)
parser.add_argument(
"-d", "--datesets", dest="datasets", type=pathlib.Path, required=True,
nargs="+", help="Path to datasets (.h5ad)"
)
parser.add_argument(
"-u", "--umaps", dest="umaps", type=pathlib.Path, required=True,
nargs="+", help="Path to umap embeddings (.csv)"
)
parser.add_argument(
"-l", "--label", dest="label", type=str, required=True,
help="Cell label (column name in `obs`) used for coloring"
)
parser.add_argument(
"-t", "--title", dest="title", type=str, default=None,
help="Plot title (by default same as `--label`)"
)
parser.add_argument(
"--figsize", dest="figsize", type=float, default=5.0,
help="Figure size"
)
parser.add_argument(
"-o", "--output", dest="output", type=pathlib.Path, required=True,
help="Path to output plot file"
)
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
r"""
Main function
"""
if len(args.datasets) != len(args.umaps):
raise RuntimeError("Datasets and umaps should have the same number of entries!")
print("[1/2] Reading data...")
label = np.concatenate([
anndata.read_h5ad(dataset, backed="r").obs[args.label]
for dataset in args.datasets
])
umap = np.concatenate([
pd.read_csv(item, header=None, index_col=0)
for item in args.umaps
])
if label.shape[0] != umap.shape[0]:
raise RuntimeError("Label and UMAP should have the same number of cells!")
shuffle = np.random.RandomState(0).permutation(label.shape[0])
adata = anndata.AnnData(
X=np.empty((label.shape[0], 0)),
obs=pd.DataFrame({
args.label: pd.Categorical(label[shuffle], categories=np.unique(label))
}, index=pd.RangeIndex(label.shape[0]).astype(str)),
obsm={"X_umap": umap[shuffle]}
)
print("[2/2] Plotting...")
args.output.parent.mkdir(parents=True, exist_ok=True)
rcParams["figure.figsize"] = (args.figsize, args.figsize)
fig = sc.pl.umap(adata, color=args.label, title=args.title, return_fig=True)
fig.savefig(args.output)
if __name__ == "__main__":
main(parse_args())
|
{"hexsha": "5b1639cf5edf3ce7defa6de383cddf1cba1a7adc", "size": 2663, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/workflow/scripts/visualize_umap.py", "max_stars_repo_name": "gao-lab/GLUE", "max_stars_repo_head_hexsha": "e84cb6483971dcb1e2485080f812899baaf31b5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2021-08-23T07:29:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T00:29:52.000Z", "max_issues_repo_path": "evaluation/workflow/scripts/visualize_umap.py", "max_issues_repo_name": "gao-lab/GLUE", "max_issues_repo_head_hexsha": "e84cb6483971dcb1e2485080f812899baaf31b5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-11-25T21:25:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T02:22:57.000Z", "max_forks_repo_path": "evaluation/workflow/scripts/visualize_umap.py", "max_forks_repo_name": "gao-lab/GLUE", "max_forks_repo_head_hexsha": "e84cb6483971dcb1e2485080f812899baaf31b5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-10-05T07:24:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T22:46:16.000Z", "avg_line_length": 29.2637362637, "max_line_length": 88, "alphanum_fraction": 0.6297408937, "include": true, "reason": "import numpy", "num_tokens": 655}
|
/-
Copyright (c) 2014 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Jeremy Avigad, Andrew Zipperer
Using classical logic, defines an inverse function.
-/
import .function .map
open eq.ops classical
namespace set
variables {X Y : Type}
noncomputable definition inv_fun (f : X → Y) (a : set X) (dflt : X) (y : Y) : X :=
if H : ∃₀ x ∈ a, f x = y then some H else dflt
theorem inv_fun_pos {f : X → Y} {a : set X} {dflt : X} {y : Y}
(H : ∃₀ x ∈ a, f x = y) : (inv_fun f a dflt y ∈ a) ∧ (f (inv_fun f a dflt y) = y) :=
have H1 : inv_fun f a dflt y = some H, from dif_pos H,
H1⁻¹ ▸ some_spec H
theorem inv_fun_neg {f : X → Y} {a : set X} {dflt : X} {y : Y}
(H : ¬ ∃₀ x ∈ a, f x = y) : inv_fun f a dflt y = dflt :=
dif_neg H
variables {f : X → Y} {a : set X} {b : set Y}
theorem maps_to_inv_fun {dflt : X} (dflta : dflt ∈ a) :
maps_to (inv_fun f a dflt) b a :=
let f' := inv_fun f a dflt in
take y,
assume yb : y ∈ b,
show f' y ∈ a, from
by_cases
(assume H : ∃₀ x ∈ a, f x = y,
and.left (inv_fun_pos H))
(assume H : ¬ ∃₀ x ∈ a, f x = y,
(inv_fun_neg H)⁻¹ ▸ dflta)
theorem left_inv_on_inv_fun_of_inj_on (dflt : X) (H : inj_on f a) :
left_inv_on (inv_fun f a dflt) f a :=
let f' := inv_fun f a dflt in
take x,
assume xa : x ∈ a,
have H1 : ∃₀ x' ∈ a, f x' = f x, from exists.intro x (and.intro xa rfl),
have H2 : f' (f x) ∈ a ∧ f (f' (f x)) = f x, from inv_fun_pos H1,
show f' (f x) = x, from H (and.left H2) xa (and.right H2)
theorem surj_on_inv_fun_of_inj_on (dflt : X) (mapsto : maps_to f a b) (H : inj_on f a) :
surj_on (inv_fun f a dflt) b a :=
surj_on_of_right_inv_on mapsto (left_inv_on_inv_fun_of_inj_on dflt H)
theorem right_inv_on_inv_fun_of_surj_on (dflt : X) (H : surj_on f a b) :
right_inv_on (inv_fun f a dflt) f b :=
let f' := inv_fun f a dflt in
take y,
assume yb: y ∈ b,
obtain x (Hx : x ∈ a ∧ f x = y), from H yb,
have Hy : f' y ∈ a ∧ f (f' y) = y, from inv_fun_pos (exists.intro x Hx),
and.right Hy
theorem inj_on_inv_fun (dflt : X) (H : surj_on f a b) :
inj_on (inv_fun f a dflt) b :=
inj_on_of_left_inv_on (right_inv_on_inv_fun_of_surj_on dflt H)
end set
open set
namespace map
variables {X Y : Type} {a : set X} {b : set Y}
protected noncomputable definition inverse (f : map a b) {dflt : X} (dflta : dflt ∈ a) :=
map.mk (inv_fun f a dflt) (@maps_to_inv_fun _ _ _ _ b _ dflta)
theorem left_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.injective f) :
map.left_inverse (map.inverse f dflta) f :=
left_inv_on_inv_fun_of_inj_on dflt H
theorem right_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.surjective f) :
map.right_inverse (map.inverse f dflta) f :=
right_inv_on_inv_fun_of_surj_on dflt H
theorem is_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.bijective f) :
map.is_inverse (map.inverse f dflta) f :=
and.intro
(left_inverse_inverse dflta (and.left H))
(right_inverse_inverse dflta (and.right H))
end map
|
{"author": "Bolt64", "repo": "lean2-aur", "sha": "1d7148e58a17b2d326b032ed1ebf8c5217320242", "save_path": "github-repos/lean/Bolt64-lean2-aur", "path": "github-repos/lean/Bolt64-lean2-aur/lean2-aur-1d7148e58a17b2d326b032ed1ebf8c5217320242/library/data/set/classical_inverse.lean"}
|
# -*- coding: utf-8 -*-
##
# \file init_fdtd_ground.py
# \title Definition of the numerical parameters for the FDTD method.
# The updated scheme that gives the pressure
# at each time iteration is defined in the upd_fdtd.py files.
# It is applied for the grid convergence studies.
# \author Pierre Chobeau
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 09 Aug.
##
import numpy as np
import os
import site
base_path = reduce(lambda l, r: l + os.path.sep + r,
os.path.dirname(os.path.realpath(__file__)).split(os.path.sep))
fdtd_core_path = os.path.join(base_path, 'fdtd_core')
site.addsitedir(fdtd_core_path)
from upd_fdtd import upd_p_fdtd_srl, upd_vel_pbc_fdtd
tools_path = os.path.join(base_path.rsplit(os.sep, 2)[0], 'tools')
site.addsitedir(tools_path)
from get_imped_coefts import get_coefts_Miki
import source_signals as src
data_plotting_path = os.path.join(base_path.rsplit(os.sep, 2)[0], 'data_plotting')
site.addsitedir(data_plotting_path)
from display_wavefronts import instatenous_pressure
def fdtd_srl_init_impgr(dt, dl, h_num, h_set, d_sr, h_s, h_r, T, f_max, rho, sigma, case, free_field, disp_inst_p):
"""
Setting the 2D geometries and running the FDTD update for case 3: ground reflection.
Main script that contains all the parameters to run the FDTD update in 2D.
:param dt: time step (s).
:type dt: float
:param dl: spatial step (m).
:type dl: float
:param h_num: spatial step index.
:type h_num: int
:param h_set: spatial step sequence (m).
:type h_set: list of floats
:param d_sr: horizontal distances between the source and the receivers (m).
:type d_sr: list of floats
:param h_s: height of the source (m).
:type h_s: float
:param h_r: height of the receiver (m).
:type h_r: float
:param T: simulation duration (s).
:type T: float
:param f_max: approximated maximale frequency of the source signal (Hz).
:type f_max: float
:param rho: air density (kg.m-3).
:type rho: float
:param sigma: pecific airflow resistivity (kNm-4s==CGS).
:type sigma: float
:param case: integer that sorts of the saved folders in the results directory.
:type case: int
:param free_field: the domain is enlarged
:type free_field: bool
:param disp_inst_p: display the instantaneous pressure.
:type disp_inst_p: bool
:param src_typ: source type, string "gauss_1", "sine", "ricker" "dirac"...
:param src_frq: source freauency, scalar (Hz).
:param dl: spatial step, scalar (m).
:param dt: time step, scalar (s).
:param Lx: continuous length of the domain (in meter) following the x-direction.
:param Ly: continuous length of the domain (in meter) following the y-direction.
:param Nx: discrete length of the domain (number of node) following the x-direction.
:param Ny: discrete length of the domain (number of node) following the y-direction.
:param x: discrete length sequence of a domain side, scalar (m).
:param dx: spatial step after discretization, scalar (m).
:param Nt: number of iteration time, scalar.
:param t: discretized time sequence, 1d array (s).
:param It: discretized time sequence, 1d array.
:param Ts: time step after dicretization, scalar (s).
:param Cn: Courant number, scalar.
:param p: updated pressure (n+1), numpy array (dimension of the scene).
:param p1: current pressure (n), numpy array (dimension of the scene).
:param p2: past pressure (n-1), numpy array (dimension of the scene).
:param fsrc: soft source (n+1), numpy array (dimension of the scene).
:param fsrc1: soft source (n), numpy array (dimension of the scene).
:param p_saved: pressure saved at the receiver location, 1d array (length of the time sequence).
:param A: inertance of the boundary, scalar.
:param B: stiffness of the boundary, scalar.
:param C: resistivity of the boundary, scalar.
:param Nb: boundary of the domain (1 if BC, 0 else) for the compact
pressure update, numpy array (dimension of the scene).
:param K: order of the recursive convolution method for the ground impedance, scalar.
:param a_k: residuals of the partial fraction expansion, list of K size.
:param gamma_k: poles of the partial fraction expansion, list of K size.
:param x_src: discrete x coordinate of the source, scalar (number of node).
:param y_src: discrete y coordinate of the source, scalar (number of node).
:param x_rcv: discrete x coordinate of the receiver, scalar (number of node).
:param y_rcv: discrete y coordinate of the receiver, scalar (number of node).
:param n: discrete iteration inside the for loop, scalar.
:return: the acoustic pressure at the pre-defined receivers' locations as a function of time.
:rtype: (2+1)D array of floats
"""
# ==============================================================================
# Source
# ==============================================================================
src_typ = "gauss_1"
src_frq = f_max
# ==============================================================================
# Parameters
# ==============================================================================
# dl_max = 0.075 * c * np.sqrt(2) / src_frq # 2% accuracy kowalczyk_ieee2011 ~ lambda/9.43
c = 340.00
dt = dl / (np.sqrt(2.) * c)
Lx = 3. * d_sr[-1]
if free_field:
Ly = 5. * max(h_s, h_r[-1])
else:
Ly = 2.2 * max(h_s, h_r[-1])
Nx = np.int(np.round(Lx / dl));
Ny = np.int(np.round(Ly / dl))
x = np.linspace(0, Lx, Nx + 1)
dx = np.float64(x[1] - x[0]);
dx = round(dx, 5)
Nt = int(round(T / float(dt)))
t = np.linspace(0, Nt * dt, Nt + 1)
It = range(0, t.shape[0])
Ts = np.float64(t[1] - t[0])
Cn_lim = np.sqrt(2)**-1
c = np.float64(Cn_lim * dx / Ts)
# dt_coarse = 2 * 10 ** -4
# dl_coarse = h_set[-1]
# c = np.float64(Cn_lim * dl_coarse / (dt_coarse))
Cn = np.float64(c * Ts / dx)
# nNt = 1
# print '--------------------- Courant Number --------------------------'
# while Cn >= (1 / np.sqrt(2)):
# print 'COURANT NUMBER CORRECTION!!!'
# nNt = 1 + nNt
# t = np.linspace(0, Nt * dt, Nt + (nNt)) # time discretization
# It = range(0, t.shape[0] - 1) # time iterations range
# Ts = np.float64(t[1] - t[0]) # sampling period for staggered grid
# Cn = np.float64(c * Ts / dx)
# print 'Additional iterations for stab: %i' % (nNt)
# Nt = Nt + nNt # add the additional time steps to the main time sequence
# print 'Ratio Cn/Cn_th=%g < 1.0' % (Cn * np.sqrt(2))
src_dly = int(T / 2. / Ts)
if free_field:
print ' FDTD in free field '
else:
print ' FDTD above a ground '
print '-------------------------- Time -------------------------------'
print 'TIME-STEP: Ts=%0.2e s' % (Ts)
print 'NUMBER OF It: Nt=%i' % (Nt)
print 'DURATION: T=%.3e s,' % (T)
print 'SAMP. FREQ.: Fs=%.3f Hz,' % (1 / Ts)
print 'Sound speed: c =%.2f m.s-1' %c
# print 'BANDWIDTH: FMAX=%.3f Hz,' % (0.196 / Ts)
# print '2PERCENT ACCURACY: %.3f Hz,' % (0.075 / Ts)
# print 'COURANT NUMBER: Cn = %.3f' % (Cn)
# print 'Nodes/lambda = %.3f' % (c / src_frq / dx)
print '-------------------------- Space ------------------------------'
# print 'SPATIAL-STEP: dx=%g m, dl_max=%g m.' % (dx, dl_max)
print 'DIMENSIONS: Nx=%i cells; Ny=%i cells; Lx=%g m; Ly=%g m.' % (Nx, Ny, Lx, Ly)
# print 'IMPEDANCE: Yb=%f .' %(Yb)
print '---------------------- Source Signal --------------------------'
print 'SOURCE TYPE: %s,' % (src_typ)
print 'SOURCE FREQ: f=%g Hz.' % (src_frq)
print 'SOURCE DELAY: %0.2e s, %i n' %(src_dly*Ts,src_dly)
print '---------------------------------------------------------------'
# ==============================================================================
# Variables
# ==============================================================================
p = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
p1 = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
p2 = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
fsrc = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
fsrc1 = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
fsrc2 = np.zeros((Nx + 1, Ny + 1), dtype=np.complex128)
p_saved = np.zeros((len(d_sr), len(h_r), Nt), dtype=np.complex128)
p_bc = np.zeros((Nx + 1, Ny + 1))
p1_bc = np.zeros((Nx + 1, Ny + 1))
v_x = np.zeros((Nx + 1, Ny + 1))
v1_x = np.zeros((Nx + 1, Ny + 1))
v_y = np.zeros((Nx + 1, Ny + 1))
v1_y = np.zeros((Nx + 1, Ny + 1))
psi_k = np.zeros((Nx + 1, Ny + 1, 6), dtype=np.complex128)
psi1_k = np.zeros((Nx + 1, Ny + 1, 6), dtype=np.complex128)
# ==============================================================================
# Boundaries of the domain, where the BC is calculated
# ==============================================================================
A = 0.;
B = 1;
C = 0.
Nb = np.zeros((Nx + 1, Ny + 1))
i = 1;
Nb[i, 1:-1] = 1.
i = p.shape[0] - 2;
Nb[i, 1:-1] = 1.
j = Ny - 1;
Nb[1:-1, j] = 1.
j = p.shape[1] - 2;
Nb[1:-1, j] = 1.
# ==============================================================================
# Set the ground parameters for the BC calculation (in upd_fdtd)
# ==============================================================================
K = 6
a_k, gamma_k, a_k_ncor = get_coefts_Miki(K, sigma)
# ==============================================================================
# Location of the source and receiver(s)
# ==============================================================================
x_src = int(round(Lx / 3 / dx))
x_rcv = [int(round(ii / dx) + x_src) for ii in d_sr]
if free_field:
y_src = int(round(h_s / dx) + round(Ny / 2.)) + 2 # BC length correction: 2*dx above, BC at index 1
y_rcv = [int(round(ii / dx) + round(Ny / 2.)) + 2 for ii in h_r]
else:
y_src = int(round(h_s / dx)) + 2
y_rcv = [int(round(ii / dx)) + 2 for ii in h_r]
# print y_src
# ==============================================================================
# Calculation of the pressure
# ==============================================================================
depth = 1
for n in It[:-1]:
# p1[x_src, y_src] = 1. * src.src_select(src_typ, t, n, src_frq, src_dly) # hard source impl.
fsrc[x_src, y_src] = 1. * src.src_select(src_typ, t, n, src_frq, src_dly) # hard source impl.
p = upd_p_fdtd_srl(p, p1, p2, fsrc, fsrc1, fsrc2,
Nb, c, rho, Ts, dx, Cn, A, B, C, depth)
# p = upd_vel_pbc_fdtd(p,c,rho,Ts,dx,
# v_x,v_y,v1_x,v1_y,
# p_bc,p1_bc,
# K,a_k,gamma_k,psi_k,psi1_k,depth)
if disp_inst_p:
instatenous_pressure(n, Nt, p, dx, Ts, Lx, Ly, case, False)
for d in range(len(d_sr)):
for h in range(len(h_r)):
p_saved[d, h, n] = p[x_rcv[d], y_rcv[h]]
fsrc1[:, :], fsrc2[:, :] = fsrc.copy(), fsrc1.copy()
p1[:, :], p2[:, :] = p.copy(), p1.copy()
p1_bc[:, :], v1_x[:, :], v1_y[:, :] = p_bc.copy(), v_x.copy(), v_y.copy()
import os
res_path = os.path.join(base_path.rsplit(os.sep, 2)[0], 'results',
'case%i' % case, 'fdtd')
if not os.path.exists(res_path):
os.makedirs(res_path)
if free_field:
np.save(os.path.join(res_path, 't_%i.npy' % h_num), t)
np.save(os.path.join(res_path, 'Ts_%i.npy' % h_num), Ts)
np.save(os.path.join(res_path, 'p_%s_%i.npy' % ('f', h_num)), p_saved)
else:
np.save(os.path.join(res_path, 'p_%s_%i.npy' % ('t', h_num)), p_saved)
|
{"hexsha": "01294ae84f518311090155506add511ab810bf2b", "size": 12566, "ext": "py", "lang": "Python", "max_stars_repo_path": "num_methods/fdtd/init_fdtd_ground.py", "max_stars_repo_name": "qgoestch/sinecity_testcases", "max_stars_repo_head_hexsha": "ec04ba707ff69b5c1b4b42e56e522855a2f34a65", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "num_methods/fdtd/init_fdtd_ground.py", "max_issues_repo_name": "qgoestch/sinecity_testcases", "max_issues_repo_head_hexsha": "ec04ba707ff69b5c1b4b42e56e522855a2f34a65", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "num_methods/fdtd/init_fdtd_ground.py", "max_forks_repo_name": "qgoestch/sinecity_testcases", "max_forks_repo_head_hexsha": "ec04ba707ff69b5c1b4b42e56e522855a2f34a65", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-18T13:07:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T13:07:10.000Z", "avg_line_length": 44.71886121, "max_line_length": 115, "alphanum_fraction": 0.5103453764, "include": true, "reason": "import numpy", "num_tokens": 3634}
|
module GRIN.Name
import Control.Monad.State
import Data.SortedMap
||| A name and resolved index.
public export
record Resolved a where
constructor MkResolved
res : Int
orig : a
export
Eq (Resolved a) where
(==) = (==) `on` res
export
Ord (Resolved a) where
compare = compare `on` res
export
Show a => Show (Resolved a) where
show = show . orig
||| State for resolving names.
record ResolveState name where
constructor MkResState
nextId : Int
resolved : SortedMap name Int
||| Monad for resolving names.
export
record ResolveM name a where
constructor MkResolveM
unResolveM : State (ResolveState name) a
export
runResolveM : Ord name => ResolveM name a -> a
runResolveM = evalState (MkResState 0 empty) . unResolveM
export
Functor (ResolveM name) where
map f = MkResolveM . map f . unResolveM
export
Applicative (ResolveM name) where
pure = MkResolveM . pure
f <*> x = MkResolveM $ unResolveM f <*> unResolveM x
export
Monad (ResolveM name) where
x >>= f = MkResolveM $ unResolveM x >>= (unResolveM . f)
MonadState (ResolveState name) (ResolveM name) where
get = MkResolveM get
put = MkResolveM . put
state = MkResolveM . state
export
resolve : name -> ResolveM name (Resolved name)
resolve orig = do
st <- get
case lookup orig st.resolved of
Just res => pure $ MkResolved res orig
Nothing => do
let res = st.nextId
put (record { nextId $= (+ 1), resolved $= insert orig res } st)
pure $ MkResolved res orig
|
{"hexsha": "332e1daad7b8ae8a171450cd7b0f2aa8d5a6791d", "size": 1552, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "grin/src/GRIN/Name.idr", "max_stars_repo_name": "danielkroeni/Idris2-Grin", "max_stars_repo_head_hexsha": "8fc28628300e5c9a97d83c62999290d809b50838", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2021-02-26T13:11:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T16:04:05.000Z", "max_issues_repo_path": "grin/src/GRIN/Name.idr", "max_issues_repo_name": "danielkroeni/Idris2-Grin", "max_issues_repo_head_hexsha": "8fc28628300e5c9a97d83c62999290d809b50838", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-18T08:34:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-10T07:39:42.000Z", "max_forks_repo_path": "grin/src/GRIN/Name.idr", "max_forks_repo_name": "danielkroeni/Idris2-Grin", "max_forks_repo_head_hexsha": "8fc28628300e5c9a97d83c62999290d809b50838", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-16T12:20:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T23:36:48.000Z", "avg_line_length": 22.4927536232, "max_line_length": 76, "alphanum_fraction": 0.6701030928, "num_tokens": 433}
|
### SUMMIX Script
### Main feature: SUMMIX, an efficient estimation of ancestry proportions from allele frequency data
### Authors: Jordan R. Hall, Kaichao Chang; Supervised by Dr. Audrey Hendricks
### Mixtures Research Group, Dr. Audrey E. Hendricks, Univ. of Colorado Denver, 2020.
### Import needed packages
import numpy as np
import scipy as scipy
from scipy.optimize import minimize
import timeit
import pandas as pd
### data_processor: (file_name, file_format, k, ref, obs) -> (A, taf)
### A data-processing function that takes 5 inputs:
## 1. file_name: A user-input genetic data file -- will be processed via pandas below!!!
## Must be a .txt or a .csv file (with the .txt or .csv as the last four characters of the actual file name). See data formatting standards for more info about required rows/columns.
## 2. file_format: The "file_format" of file, as a string. Default is 'tab' which is short for tab-delimited text files. Can also choose 'csv' for CSV files.
## 3. k: The number of reference ancestries, k=2,3,4,...
## 4. ref: A list of strings for which columns to use for reference allele freq's. Pass the name of each column
## as a string. So for example, if the desired reference ancestries are called "ref_eur_1000G" and "ref_afr_1000G", then use
## ref=['ref_eur_1000G','ref_afr_1000G'].
## 4. obs: Which column to use for observed allele freq's. Pass the name of this column, as a string.
## So for example, if the desired observed ancestry is stored in a column called "gnomAD_afr", then obs='gnomAD_afr'.
## There is now no clear default choice, and the user is thrown an error for not providing a choice of obs.
### and returns 2 outputs:
## 1. A: Genetic data in an input array "A" size Nxk containing N SNPs (these are the rows), and k reference ancestries (these are the columns);
## 2. taf: The observed or "total allele frequency" called "taf" in this code, which should be an Nx1 vector
def data_processor(file_name, file_format, k, ref, obs):
# Reads data file in using pandas
if (file_format=='csv') == True:
D = pd.read_csv(file_name)
else:
D = pd.read_csv(file_name, sep='\t')
# Extract key variables
N = np.shape(D)[0] # N=number of SNPs!
A = np.zeros((N,k)) # initiate empty matrix for reference AFs
taf = np.zeros((N,1)) # initial empty vector for observed/TAFs
# Then we can grab out the reference ancestries
for i in range(0,k):
A[:,i] = D[ref[i]]
taf[:,0] = D[obs] # grabs observed data
return A, taf
### SUMMIX : (ref, k, guess, obs, file_name, file format) -> (x_answer, n_iterations, time)
### A generalized function that takes 6 inputs:
## 1. ref: A list of strings for which columns to use for reference allele freq's. Pass the name of each column
## as a string. So for example, if the desired reference ancestries are called "ref_eur_1000G" and "ref_afr_1000G", then use
## ref=['ref_eur_1000G','ref_afr_1000G'].
## 2. k: The number of reference ancestries in the input data
## 3. guess: A starting guess, which should be a kx1 vector. Default is 1/k*(1,1,...,1).
## 4. obs: is which column to use for observed allele freq's. Pass the name of this column, as a string.
## So for example, if the observed is stored in a column called "gnomAD_afr", then obs='gnomAD_afr'.
## There is now no clear default choice, and the user is thrown an error for not providing a choice of obs.
## 5. file_name: The genetic data frame "file" (usually a tab-delimited text file which we read in through pandas using data processor above)
## 6. The "file_format" of file. Should be .csv or .txt as described above. Default assumption is .txt, tab delimited text.
### and returns 3 outputs:
## 1. x_answer: The hidden proportions of every reference ancestry in the data as a kx1 vector
## 2. n_iteration: The number of iterations that SLSQP did as a scalar value
## 3. time: The run time of the algoirthm as a scalar value, measured in seconds
### SUMMIX will also return a print statement to help the user interpret their results.
def SUMMIX(ref, obs, file_format='tab', k=None, guess=None, file_name=None):
# Start the clock!
start = timeit.default_timer()
# Check if a file is provided
if file_name is None:
print('Please specify a genetic data frame.')
return
# Use the data_processor to take the info we need out of the data frame D
data_array = data_processor(file_name, file_format, k, ref, obs)
A = data_array[0]
taf = data_array[1]
# Now, we perform several sanity checks, and check user inputs!
if abs(np.shape(np.shape(A))[0]-2)>0:
print('Please ensure that data matrix D is size Nxk.')
return
if k is None:
print('Please specify k, the number of reference ancestries.')
return
if isinstance(k,int)==False:
print('Please ensure that k is an integer.')
return
elif k <=0:
print('Please ensure that k is a positive integer.')
return
if guess is None:
guess=np.transpose(1/k*np.ones((k,1))) # If user doesn't give guess, we provide one.
if abs(np.shape(guess)[0]-1)>0 and abs(np.shape(guess)[1]-1)>0:
print('Please ensure that initial iterate guess is a vector, size kx1 or 1xk.')
return
if abs(np.shape(guess)[1]-k)>0:
guess=np.transpose(np.copy(guess))
if abs(np.shape(guess)[1]-k)>0:
print('Please ensure that initial iterate guess is a vector, size kx1 or 1xk.')
if isinstance(obs,str)==False:
print('Please ensure that obs is a string, corresponding to the exact column name of the observed ancestry you wish to model.')
return
# This is the objective function!
def obj_fun(x):
# Start the value of the objective function at 0
b=0
# This adds up each k column of A scaled by the k-th ancestry
for i in range(0,k):
b=b + x[i]*A[:,i:(i+1)]
# After the for loop, b is an Nx1 vector which contains the value of the mixture model for all N SNP's
# Now we subtract off the total allele frequency at each SNP
b=b-taf
# Finally we square every entry of the Nx1 vector b, and add them all up.
# This is the value of the objective function, which we now return
return np.sum(b**2, axis=0)[0]
# This is the gradient of the objective function!
def grad_obj_fun(x):
# Initiate empty kx1 vector
gradvec = np.zeros((k,1))
# Start the value of the gradient entries with 0
d = 0
# We still need the value of the "inside" of the objective function, so we repeat part of what we did above:
for i in range(0,k):
d = d + x[i]*A[:,i:(i+1)]
d = d - taf
# Now d is Nx1 and contains the value of the mixture model minus the total allele frequencies at each SNP
# Now we form the k entries of the gradient and return that vector
for i in range(0,k):
gradvec[i,:] = np.sum(2*A[:,i:(i+1)]*d, axis=0)
return gradvec
# These are wrappers that make our constraints (all proportions must add to 1) and our bounds (all proportions are 0 or greater)
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x,axis=0) -1},)
bnds = ((0, None),)
for i in range(0,k-1):
bnds = bnds + ((0, None),)
# We now form an answer object which will store all of the outputs to running SLSQP given our inputs above
ans_obj = scipy.optimize.minimize(obj_fun, guess, method='SLSQP', jac=grad_obj_fun, bounds=bnds, constraints=cons, tol=1e-5)
# Stop the clock!
stop = timeit.default_timer()
# Difference stop-start tells us run time
time= stop-start
# Print results for the user!
print('Numerical solution via SLSQP, pi_final = ',ans_obj.x, '\n \n using observed population:', obs, '\n \n Number of SLSQP iterations:',ans_obj.nit, '\n \n Runtime:',time, 'seconds','\n \n \n Detailed results:')
for i in range(0,k):
print(ans_obj.x[i], 'is the estimated proportion of',ref[i] ,'\n' )
# Return the 3 outputs we wanted, namely: the solution vector, number of iterations, and run time
return ans_obj.x, ans_obj.nit, time
|
{"hexsha": "804811eabbb711d9c00e1e245c1b8bf9f23f4315", "size": 8236, "ext": "py", "lang": "Python", "max_stars_repo_path": "summix/summix.py", "max_stars_repo_name": "jordanrhall/summix_py", "max_stars_repo_head_hexsha": "244d621838def52c88b7334c0d876fcd4fee1a8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-15T21:12:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-15T21:12:56.000Z", "max_issues_repo_path": "summix/summix.py", "max_issues_repo_name": "jordanrhall/summix_py", "max_issues_repo_head_hexsha": "244d621838def52c88b7334c0d876fcd4fee1a8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "summix/summix.py", "max_forks_repo_name": "jordanrhall/summix_py", "max_forks_repo_head_hexsha": "244d621838def52c88b7334c0d876fcd4fee1a8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-08T23:48:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-08T23:48:17.000Z", "avg_line_length": 40.1756097561, "max_line_length": 217, "alphanum_fraction": 0.6730208839, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2224}
|
/**
* Facebook Internet Explorer Toolbar Software License
* Copyright (c) 2009 Facebook, Inc.
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (which, together with any graphical images included with such
* software, are collectively referred to below as the "Software") to (a) use,
* reproduce, display, distribute, execute, and transmit the Software, (b)
* prepare derivative works of the Software (excluding any graphical images
* included with the Software, which may not be modified or altered), and (c)
* permit third-parties to whom the Software is furnished to do so, all
* subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* Facebook, Inc. retains ownership of the Software and all associated
* intellectual property rights. All rights not expressly granted in this
* license are reserved by Facebook, Inc.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "StdAfx.h"
#include "ClientServiceConnection.h"
#include <afxmt.h>
#include <boost/bind.hpp>
#include "../../FBClientService/FBClientService_i.c"
namespace facebook{
// ---------------------------------------------------------------------
// class ClientServiceConnection
// ---------------------------------------------------------------------
ClientServiceConnection::ClientServiceConnection():
connectionItems_(),
criticalSection_() {
}
ClientServiceConnection::~ClientServiceConnection() {
std::for_each(connectionItems_.begin(), connectionItems_.end(),
boost::bind(&ClientServiceConnection::releaseSink, this, _1));
}
ClientServicePtr ClientServiceConnection::getClientService(
const DWORD threadId) {
return getService(obtainConnection(threadId));
}
void ClientServiceConnection::subscribeServiceObserver(
ClientServiceObserver* observer) {
const ThreadId threadId = ::GetCurrentThreadId();
ConnectionItem connection = obtainConnection(threadId);
ClientServiceEventsSinkPtr eventsSink = getEventsSink(connection);
if (!hasObservers(eventsSink)) {
advise(getService(connection), eventsSink);
}
eventsSink->subscribe(observer);
}
void ClientServiceConnection::unsubscribeServiceObserver(
ClientServiceObserver* observer) {
const ThreadId threadId = ::GetCurrentThreadId();
ConnectionItem connection = obtainConnection(threadId);
ClientServiceEventsSinkPtr eventsSink = getEventsSink(connection);
if (!hasObservers(eventsSink)) {
return;
}
eventsSink->unsubscribe(observer);
if (!hasObservers(eventsSink)) {
unadvise(getService(connection), eventsSink);
}
}
ClientServiceEventsSinkPtr ClientServiceConnection::getEventsSink() {
const ThreadId threadId = ::GetCurrentThreadId();
return getEventsSink(obtainConnection(threadId));
}
ClientServicePtr ClientServiceConnection::createClientService() {
ClientServicePtr service;
const HRESULT createResult = service.CoCreateInstance(CLSID_FBClientService);
UNREFERENCED_PARAMETER(createResult);
return service;
}
ClientServiceEventsSinkPtr ClientServiceConnection::createEventsSink() {
ClientServiceEventsSinkPtr sink;
const HRESULT createResult =
ClientServiceEventsSinkObject::CreateInstance(&sink);
if (FAILED(createResult)) {
_com_raise_error(createResult);
}
sink->AddRef();
return sink;
}
ClientServiceConnection::ConnectionItem ClientServiceConnection::
createConnection() {
ClientServicePtr service = createClientService();
ClientServiceEventsSinkPtr eventsSink = createEventsSink();
return std::make_pair(service, eventsSink);
}
ClientServicePtr& ClientServiceConnection::getService(ConnectionItem& item) {
return item.first;
}
const ClientServicePtr& ClientServiceConnection::getService(
const ConnectionItem& item) {
return item.first;
}
ClientServiceEventsSinkPtr& ClientServiceConnection::getEventsSink(
ConnectionItem& item) {
return item.second;
}
const ClientServiceEventsSinkPtr& ClientServiceConnection::getEventsSink(
const ConnectionItem& item) {
return item.second;
}
bool ClientServiceConnection::hasObservers(
const ClientServiceEventsSinkPtr& sink) const {
return sink->getObserversCount() > 0;
}
bool ClientServiceConnection::hasConnection(const DWORD connectionId) const {
return connectionItems_.find(connectionId) != connectionItems_.end();
}
ClientServiceConnection::ConnectionItem ClientServiceConnection::
obtainConnection(const DWORD connectionId) {
using namespace boost;
ScopeGuard critSectGuard(bind(&CCriticalSection::Lock, ref(criticalSection_)),
bind(&CCriticalSection::Unlock, ref(criticalSection_)));
const ConnectionItemsMap::iterator itemsIter =
connectionItems_.find(connectionId);
if (itemsIter != connectionItems_.end()) {
return itemsIter->second;
}
ConnectionItem newConnection = createConnection();
connectionItems_.insert(std::make_pair(connectionId, newConnection));
return newConnection;
}
void ClientServiceConnection::removeClientService(const DWORD threadId) {
const ConnectionItemsMap::iterator itemsIter =
connectionItems_.find(threadId);
if (itemsIter != connectionItems_.end()) {
releaseSink(*itemsIter);
connectionItems_.erase(itemsIter);
}
}
void ClientServiceConnection::advise(ClientServicePtr& service,
ClientServiceEventsSinkPtr& eventsSink) {
if (!service || !eventsSink) {
return;
}
const HRESULT adviseResult = eventsSink->DispEventAdvise(service);
if (FAILED(adviseResult)) {
_com_raise_error(adviseResult);
}
}
void ClientServiceConnection::unadvise(ClientServicePtr& service,
ClientServiceEventsSinkPtr& eventsSink) {
if (!service || !eventsSink) {
return;
}
const HRESULT unadviseResult = eventsSink->DispEventUnadvise(service);
if (FAILED(unadviseResult)) {
_com_raise_error(unadviseResult);
}
}
void ClientServiceConnection::releaseSink(ConnectionEntry& connectionEntry) {
ConnectionItem& connection = connectionEntry.second;
getEventsSink(connection)->Release();
}
} //!namespace facebook
|
{"hexsha": "60be474d14a708c0a3dd982ccd3319452e91e7ee", "size": 6953, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Sources/FBIEToolbar/system/ClientServiceConnection.cpp", "max_stars_repo_name": "facebookarchive/ie-toolbar", "max_stars_repo_head_hexsha": "cfcc1a8ffd6d6c7d8b1e12c8317ff728d2173cac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-05-12T23:53:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-31T15:18:19.000Z", "max_issues_repo_path": "Sources/FBIEToolbar/system/ClientServiceConnection.cpp", "max_issues_repo_name": "facebookarchive/ie-toolbar", "max_issues_repo_head_hexsha": "cfcc1a8ffd6d6c7d8b1e12c8317ff728d2173cac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/FBIEToolbar/system/ClientServiceConnection.cpp", "max_forks_repo_name": "facebookarchive/ie-toolbar", "max_forks_repo_head_hexsha": "cfcc1a8ffd6d6c7d8b1e12c8317ff728d2173cac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2015-01-10T18:23:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-31T15:18:10.000Z", "avg_line_length": 30.2304347826, "max_line_length": 80, "alphanum_fraction": 0.757370919, "num_tokens": 1460}
|
import collections
import json
import random
import tensorflow as tf
import numpy as np
import energypy as ep
class BaseEnv(object):
""" time series environment """
def __init__(
self,
seed=None,
dataset='example',
):
if seed:
self.seed(seed)
def seed(self, seed):
""" sets random seeds """
seed = int(seed)
random.seed(seed)
tf.set_random_seed(seed)
np.random.seed(seed)
def reset(self):
""" resets the environment, returns an initial observation """
self.steps = 0
self.done = False
self.info = collections.defaultdict(list)
self.outputs = collections.defaultdict(list)
if not hasattr(self, 'episode_logger'):
self.episode_logger = ep.common.make_new_logger('episode')
return self._reset()
def step(self, action, log=True):
""" run one timestep of the environment's dynamics """
if not hasattr(self, 'state'):
raise ValueError(
'You need to reset the environment before calling step()')
action = np.array(action).reshape(1, *self.action_space.shape)
assert self.action_space.contains(action)
# child class specific transition dynamics
transition = self._step(action)
self.steps += 1
self.state = transition['next_state']
self.observation = transition['next_observation']
# saving to info dict
for k, v in transition.items():
transition[k] = np.array(v).tolist()
self.info[k].append(v)
t = transition
# TODO
if log:
# episode logger is set during experiment
self.episode_logger.debug(json.dumps(transition))
return self.observation, t['reward'], t['done'], self.info
def get_state_variable(self, variable_name):
""" get single element of the current state """
idx = list(self.state_space.keys()).index(variable_name)
return self.state[0][idx]
def update_info(self, **kwargs):
for name, data in kwargs.items():
self.info[name].append(data)
return self.info
|
{"hexsha": "138163d75b8661efe6c0431938e3e486eb506172", "size": 2224, "ext": "py", "lang": "Python", "max_stars_repo_path": "energypy/envs/env.py", "max_stars_repo_name": "winonecheng/energy-py", "max_stars_repo_head_hexsha": "1ac00446cfd6baf335f3772c679488758f9bfded", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-02T15:47:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-02T15:47:40.000Z", "max_issues_repo_path": "energypy/envs/env.py", "max_issues_repo_name": "winonecheng/energy-py", "max_issues_repo_head_hexsha": "1ac00446cfd6baf335f3772c679488758f9bfded", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "energypy/envs/env.py", "max_forks_repo_name": "winonecheng/energy-py", "max_forks_repo_head_hexsha": "1ac00446cfd6baf335f3772c679488758f9bfded", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8, "max_line_length": 74, "alphanum_fraction": 0.5971223022, "include": true, "reason": "import numpy", "num_tokens": 455}
|
# The IPython Notebook
* Facilitates interactive computing using a web-browser
* Developing,
* Documenting,
* Executing code and
* Communicating the results
## Starting the Notebook Server
** `ipython notebook`**
A new notebook document is created with following features
* Notebook name,
* Menu bar,
* Toolbar and
* Empty code cell
Structure of a notebook document
------------------------------------
### There are four types of cells:
* Code cells,
* Markdown cells,
* Raw cells and
* Heading cells
### Code cells
* A code cell allows you to edit and write new code, with full syntax highlighting and tab completion
```python
from __future__ import division, print_function
import numpy as np
a = np.random.rand(10)
print(type(a))
print (a)
```
<type 'numpy.ndarray'>
[ 0.82183566 0.4249464 0.66023236 0.24285299 0.11646147 0.26724622
0.43001898 0.08840917 0.83570893 0.12698752]
```python
np.sin(a)
```
array([ 0.19420961, 0.80987413, 0.37662322, 0.61959548, 0.4818346 ,
0.52143703, 0.76786701, 0.43159441, 0.29240385, 0.71729244])
* Its possible to use np.sin(_);
* (_) will represent last output,
* (__) will represent second to last output,
* (_N) represent Out [N]
```python
%matplotlib inline
import matplotlib.pyplot as plt
#import numpy as np
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot (x, y)
```
### Markdown Cells
1. It is possible to document the computational process in a literate way, alternating descriptive text with code
2. Within Markdown cells, you can also include mathematics in a straightforward way, using standard LaTeX notation
3. Standard mathematics environments defined by LaTeX and AMS-LaTeX (the amsmath package) also work
$\sin^2\theta+\cos^2\theta= 1 $
$$\sin^2\theta+\cos^2{\theta} = 1 $$
\begin{equation}
y = 3x^3+2x^2+x+10
\end{equation}
\begin{align}
y_1 & = x_1^3+x_2^2+x+5 \\
y_2 & = x_1^4+x_2^3+x+10
\end{align}
$$P_{r-j} = \begin{cases}
0 & \text{if $r-j$ is odd},\\
r!\,(-1)^{(r-j)/2}& \text{if $r-j$ is even}.
\end{cases}$$
## Inserting Images
### Raw Cells
* Raw cells provide a place in which you can write output directly.
* Raw cells are not evaluated by the notebook.
* When passed through _nbconvert_, raw cells arrive in the destination format unmodified.
* For example, this allows you to type full LaTeX into a raw cell, which will only be rendered by LaTeX after conversion by nbconvert.
### Heading Cells
* You can provide a conceptual structure for your computational document as a whole using different levels of headings
* There are 6 levels available, from level 1 (top level) down to level 6 (paragraph)
### Keyboard Shortcuts
1. ** `Shift-Enter`: Run cell **
2. ** `Ctrl-Enter`: Run cell in-place **
3. ** `Alt-Enter`: Run cell, insert below **
4. ** `Esc`** and **`Enter`: Command mode** and **edit mode **
## Converting notebooks to other formats
** `$ ipython nbconvert --to FORMAT notebook.ipynb` **
* The default output format is html
* For example, for this notebook I can give the following command to export it to pdf format (via LaTeX)
** `$ ipython nbconvert --to html session1.ipynb` **
```python
```
|
{"hexsha": "b715b5b1d894e2aec6d3bcec3acdcb85d794b01a", "size": 19083, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "bvbcet/Working_with_IPython_Notebooks.ipynb", "max_stars_repo_name": "satish-annigeri/Notebooks", "max_stars_repo_head_hexsha": "92a7dc1d4cf4aebf73bba159d735a2e912fc88bb", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bvbcet/Working_with_IPython_Notebooks.ipynb", "max_issues_repo_name": "satish-annigeri/Notebooks", "max_issues_repo_head_hexsha": "92a7dc1d4cf4aebf73bba159d735a2e912fc88bb", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bvbcet/Working_with_IPython_Notebooks.ipynb", "max_forks_repo_name": "satish-annigeri/Notebooks", "max_forks_repo_head_hexsha": "92a7dc1d4cf4aebf73bba159d735a2e912fc88bb", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 62.1596091205, "max_line_length": 12052, "alphanum_fraction": 0.7910181837, "converted": true, "num_tokens": 948}
|
from backend.gcloud_app.style_transfer.style_transfer_interface import StyleTransferInterface
import backend.gcloud_app.image_transformation as img_transform
import numpy as np
class MonetStyleTransfer(StyleTransferInterface):
neural_network = {}
size = 256
def apply(self, image):
shape, image_segments = img_transform.segment_image(image, self.size)
generated_image_segments = []
for segment in image_segments:
# TO-DO: replace mock call with network forward pass
generated_image = self.mock_apply_style(segment)
generated_image_segments.append(generated_image)
result = img_transform.reconstruct_image(generated_image_segments, shape)
return result
def load_model(self, path):
pass
def mock_apply_style(self, image):
image = np.copy(image)
print(image.shape)
lin, col, _ = image.shape
for i in range(lin):
for j in range(col):
r = image[i][j][2]
image[i][j] = [r, r, r]
return image
|
{"hexsha": "a84b7104ca988d6a55cc5c66f1e6c2dcd21c7d59", "size": 1086, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/backend/gcloud_app/style_transfer/monet_style_transfer.py", "max_stars_repo_name": "sergiuiacob1/monet-photos", "max_stars_repo_head_hexsha": "4c11048cfba2f3a9f1df370b075b2abb8de0ab75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/backend/gcloud_app/style_transfer/monet_style_transfer.py", "max_issues_repo_name": "sergiuiacob1/monet-photos", "max_issues_repo_head_hexsha": "4c11048cfba2f3a9f1df370b075b2abb8de0ab75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/backend/gcloud_app/style_transfer/monet_style_transfer.py", "max_forks_repo_name": "sergiuiacob1/monet-photos", "max_forks_repo_head_hexsha": "4c11048cfba2f3a9f1df370b075b2abb8de0ab75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-27T07:18:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-27T07:18:26.000Z", "avg_line_length": 27.8461538462, "max_line_length": 93, "alphanum_fraction": 0.6583793738, "include": true, "reason": "import numpy", "num_tokens": 225}
|
myTestRule {
#Workflow operator to iterate through variable
#Input parameters are:
# Loop initiation
# Loop termination
# Loop increment
# Workflow in brackets
#Output from running the example is:
# abcd
*A = list("a","b","c","d");
*B = "";
for(*I=0;*I<4;*I=*I+1) {
*B = *B ++ elem(*A, *I);
}
writeLine("stdout", *B);
}
INPUT null
OUTPUT ruleExecOut
|
{"hexsha": "e6df334a9f1310545c2f3532b001258365679e97", "size": 384, "ext": "r", "lang": "R", "max_stars_repo_path": "iRODS/clients/icommands/test/rules3.0/ruleworkflowfor.r", "max_stars_repo_name": "PlantandFoodResearch/irods", "max_stars_repo_head_hexsha": "9dfe7ffe5aa0760b7493bd9392ea1270df9335d4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-03-10T13:13:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-16T11:02:51.000Z", "max_issues_repo_path": "iRODS/clients/icommands/test/rules3.0/ruleworkflowfor.r", "max_issues_repo_name": "PlantandFoodResearch/irods", "max_issues_repo_head_hexsha": "9dfe7ffe5aa0760b7493bd9392ea1270df9335d4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-09-24T04:20:30.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-24T04:20:30.000Z", "max_forks_repo_path": "iRODS/clients/icommands/test/rules3.0/ruleworkflowfor.r", "max_forks_repo_name": "PlantandFoodResearch/irods", "max_forks_repo_head_hexsha": "9dfe7ffe5aa0760b7493bd9392ea1270df9335d4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-16T11:03:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-16T11:03:31.000Z", "avg_line_length": 19.2, "max_line_length": 46, "alphanum_fraction": 0.6015625, "num_tokens": 124}
|
import numpy as np
from sklearn.datasets import make_classification
import mlsurvey as mls
from .dataset import DataSet
from .dataset_factory import DataSetFactory
class NClassRandomClassificationWithNoise(DataSet):
def generate(self):
"""
Generate data of make_classification from parameters
:return: (x, y) : data and label
"""
n_samples = self.params.get('n_samples', 100)
shuffle = self.params.get('shuffle', True)
noise = self.params.get('noise', 0)
random_state = self.params.get('random_state', None)
x, y = make_classification(n_features=2,
n_redundant=0,
n_informative=2,
n_clusters_per_class=2,
n_samples=n_samples,
shuffle=shuffle,
random_state=random_state,
flip_y=noise / 10,
class_sep=2 - 2 * noise
)
rng = np.random.RandomState(random_state)
x += noise * 2 * rng.uniform(size=x.shape)
data_array = np.concatenate((x, np.array([y]).T), axis=1)
func_create_df = mls.Utils.func_create_dataframe(self.storage)
result = func_create_df(data_array)
return result
class Factory:
@staticmethod
def create(t, storage): return NClassRandomClassificationWithNoise(t, storage)
DataSetFactory.add_factory('NClassRandomClassificationWithNoise', NClassRandomClassificationWithNoise.Factory)
|
{"hexsha": "c82d2206faa158bca388936719a8850c9e357f90", "size": 1656, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlsurvey/sl/datasets/nclassrandomclassificationwithnoise.py", "max_stars_repo_name": "jlaumonier/mlsurvey", "max_stars_repo_head_hexsha": "373598d067c7f0930ba13fe8da9756ce26eecbaf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlsurvey/sl/datasets/nclassrandomclassificationwithnoise.py", "max_issues_repo_name": "jlaumonier/mlsurvey", "max_issues_repo_head_hexsha": "373598d067c7f0930ba13fe8da9756ce26eecbaf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlsurvey/sl/datasets/nclassrandomclassificationwithnoise.py", "max_forks_repo_name": "jlaumonier/mlsurvey", "max_forks_repo_head_hexsha": "373598d067c7f0930ba13fe8da9756ce26eecbaf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.511627907, "max_line_length": 110, "alphanum_fraction": 0.5742753623, "include": true, "reason": "import numpy", "num_tokens": 319}
|
\documentclass[../main]{subfiles}
\begin{document}
\thispagestyle{fancy}
\setlength{\parindent}{18pt}
\begin{onehalfspacing}
\vspace*{75pt}
\section*{\huge Abstract}
The first part of this thesis is an analysis of the \textit{virtual large cardinals}, being critical points of set-sized generic elementary embeddings where the target model is a subset of the ground model. We show that virtually measurables are equiconsistent with virtually strongs, and that virtually Woodins are virtually Vop\v enka. We separate most of these large cardinals, but show that such separations do not hold within core models. We define \textit{prestrong cardinals}, being an equivalent characterisation of strongs, but which in a virtual setting are strictly weaker than virtually strongs. We show that the existence of this separation is equivalent to the existence of virtually rank-into-rank cardinals in the universe, and that virtually Berkeley cardinals can be characterised in the same fashion with $\on$ being virtually \textit{pre-Woodin} but not virtually Woodin, answering a question by Gitman and Hamkins. Building on the work of Wilson, we show that the virtual version of the \textit{Weak Vop\v enka Principle} is equivalent to a weakening of virtually pre-Woodins. We end the first part with several indestructibility results, including that a slight strengthening of the virtually supercompacts is always indestructible by ${<}\kappa$-directed closed forcings.
The second part is concerned with connections between the virtual large cardinals and other set-theoretic objects. We analyse cardinals arising from a certain \textit{filter game}, for various lengths of the game. When the games are finite we show that this results in a characterisation of the completely ineffable cardinals, and at length $\omega$ we arrive at another characterisation of the virtually measurable cardinals. At length $\omega+1$ the cardinals become equiconsistent with a measurable cardinal, and at uncountable cofinalities the cardinals are downward absolute to $K$ below $0^\pistol$. The results in this section answer most of the open questions raised in \cite{HolySchlicht}. We also introduce the notion of \textit{ideal-absolute} properties of forcings, being properties such that generic elementary embeddings can be characterised by ideals in the ground model. We show that several properties are ideal-absolute, which includes an improvement of an unpublished theorem of Foreman. This also results in another characterisation of completely ineffables.
\end{onehalfspacing}
\end{document}
|
{"hexsha": "4c999c43d6ffa575b708c4b06f16483f6a5bb13a", "size": 2602, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "frontbackmatter/abstract.tex", "max_stars_repo_name": "saattrupdan/phd", "max_stars_repo_head_hexsha": "21481596be517c874e311797f5a70829e0cba7d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "frontbackmatter/abstract.tex", "max_issues_repo_name": "saattrupdan/phd", "max_issues_repo_head_hexsha": "21481596be517c874e311797f5a70829e0cba7d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "frontbackmatter/abstract.tex", "max_forks_repo_name": "saattrupdan/phd", "max_forks_repo_head_hexsha": "21481596be517c874e311797f5a70829e0cba7d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 123.9047619048, "max_line_length": 1298, "alphanum_fraction": 0.8082244427, "num_tokens": 568}
|
import NumericalIntegration
"""
trapz(x, y)
Compute the area under the curve of 2-dimensional points (x, y) using
the trapezoidal method.
"""
function trapz end
trapz(x, y) = trapz(promote(x, y)...)
function trapz(
x::AbstractVector{T},
y::AbstractVector{T},
)::T where T
if length(x) != length(y)
error("length(x) != length(y)")
end
if length(x) == 0
error("length(x) == 0")
end
if !all(x .== sort(x; rev = false))
error("x needs to be sorted in ascending order")
end
twoI::T = zero(T)
for k = 2:length(x)
twoI += ( y[k] + y[k-1] ) * ( x[k] - x[k-1] )
end
I_verify::T = NumericalIntegration.integrate(
x,
y,
NumericalIntegration.Trapezoidal(),
)
@assert isapprox(twoI/2, I_verify; atol=0.00000001)
return I_verify
end
|
{"hexsha": "5e9ee4f5decde3b3429cec6f1a4b92151e32b1ef", "size": 863, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/toplevel/always-loaded/utils/trapz.jl", "max_stars_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_stars_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-05-24T14:59:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T08:17:39.000Z", "max_issues_repo_path": "src/toplevel/always-loaded/utils/trapz.jl", "max_issues_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_issues_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 383, "max_issues_repo_issues_event_min_datetime": "2018-04-12T21:53:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-24T15:53:37.000Z", "max_forks_repo_path": "src/toplevel/always-loaded/utils/trapz.jl", "max_forks_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_forks_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-05-06T23:16:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:49:49.000Z", "avg_line_length": 22.7105263158, "max_line_length": 69, "alphanum_fraction": 0.5631517961, "num_tokens": 264}
|
from enum import Enum
import numpy as np
class InvalidMapError(Exception):
"""
Custom Exception for when an invalid map was created
"""
pass
class Directions(Enum):
EAST = 0
E = 0
NORTH = 1
N = 1
WEST = 2
W = 2
SOUTH = 3
S = 3
class MapTiles(Enum):
U = -1
UNKNOWN = -1
P = 0
PATH = 0
S = 1
SAND = 1
M = 2
MOUNTAIN = 2
W = 3
WALL = 3
tile_cost = {
MapTiles.PATH: 1,
MapTiles.SAND: 3,
MapTiles.MOUNTAIN: 10}
class MapObject(object):
def __init__(self):
self.strength = 0
self.label = 'mapobject'
self.delta = 0
def move(self):
"""
Returns
-------
direction: Directions
Which direction to move
"""
pass
class AgentPlaceholder(MapObject):
"""
A placeholder for an agent for when an agent appears in another agent's
visible part of the map
"""
def __init__(self, strength):
super().__init__()
self.strength = strength
self.label = 'agent'
self.delta = -strength
class StaticMonster(MapObject):
def __init__(self):
super().__init__()
self.strength = 10
self.label = 'skeleton'
self.delta = -10
class DynamicMonster(MapObject):
def __init__(self, initial_i, initial_j):
super().__init__()
self.initial_i = initial_i
self.initial_j = initial_j
self.strength = 10
self.label = 'skeleton'
self.delta = -10
def move(self):
"""
Returns
-------
direction: Directions
Which direction to move
"""
return np.random.choice(list(Directions))
class PowerUp(MapObject):
def __init__(self):
super().__init__()
self.label = 'medkit'
self.delta = 10
class Boss(StaticMonster):
def __init__(self):
super().__init__()
self.strength = 100
self.label = 'boss'
self.delta = -100
|
{"hexsha": "6f180bc3ce6d03de1b475fa309c9bd800317ff21", "size": 2044, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "erfannoury/cmsc671-fall2018-project", "max_stars_repo_head_hexsha": "7ba3867f0b512785fc1686c3b79e3c9c8c802deb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-01T19:50:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-01T19:50:27.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "erfannoury/cmsc671-fall2018-project", "max_issues_repo_head_hexsha": "7ba3867f0b512785fc1686c3b79e3c9c8c802deb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-06T18:35:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-16T05:15:42.000Z", "max_forks_repo_path": "utils.py", "max_forks_repo_name": "erfannoury/cmsc671-fall2018-project", "max_forks_repo_head_hexsha": "7ba3867f0b512785fc1686c3b79e3c9c8c802deb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-24T00:44:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-03T17:10:28.000Z", "avg_line_length": 16.8925619835, "max_line_length": 75, "alphanum_fraction": 0.5454990215, "include": true, "reason": "import numpy", "num_tokens": 527}
|
from pathlib import Path
from PIL import Image
import numpy as np
import xml.etree.ElementTree as ET
import random
import pickle
import torch
import sys
from utils.config import cfg
anno_path = cfg.VOC2011.KPT_ANNO_DIR
img_path = cfg.VOC2011.ROOT_DIR + 'JPEGImages'
ori_anno_path = cfg.VOC2011.ROOT_DIR + 'Annotations_original'
set_path = cfg.VOC2011.SET_SPLIT
cache_path = cfg.CACHE_PATH
KPT_NAMES = {
'cat': ['L_B_Elbow', 'L_B_Paw', 'L_EarBase', 'L_Eye', 'L_F_Elbow',
'L_F_Paw', 'Nose', 'R_B_Elbow', 'R_B_Paw', 'R_EarBase', 'R_Eye',
'R_F_Elbow', 'R_F_Paw', 'TailBase', 'Throat', 'Withers'],
'bottle': ['L_Base', 'L_Neck', 'L_Shoulder', 'L_Top', 'R_Base', 'R_Neck',
'R_Shoulder', 'R_Top'],
'horse': ['L_B_Elbow', 'L_B_Paw', 'L_EarBase', 'L_Eye', 'L_F_Elbow',
'L_F_Paw', 'Nose', 'R_B_Elbow', 'R_B_Paw', 'R_EarBase', 'R_Eye',
'R_F_Elbow', 'R_F_Paw', 'TailBase', 'Throat', 'Withers'],
'motorbike': ['B_WheelCenter', 'B_WheelEnd', 'ExhaustPipeEnd',
'F_WheelCenter', 'F_WheelEnd', 'HandleCenter', 'L_HandleTip',
'R_HandleTip', 'SeatBase', 'TailLight'],
'boat': ['Hull_Back_Bot', 'Hull_Back_Top', 'Hull_Front_Bot',
'Hull_Front_Top', 'Hull_Mid_Left_Bot', 'Hull_Mid_Left_Top',
'Hull_Mid_Right_Bot', 'Hull_Mid_Right_Top', 'Mast_Top', 'Sail_Left',
'Sail_Right'],
'tvmonitor': ['B_Bottom_Left', 'B_Bottom_Right', 'B_Top_Left',
'B_Top_Right', 'F_Bottom_Left', 'F_Bottom_Right', 'F_Top_Left',
'F_Top_Right'],
'cow': ['L_B_Elbow', 'L_B_Paw', 'L_EarBase', 'L_Eye', 'L_F_Elbow',
'L_F_Paw', 'Nose', 'R_B_Elbow', 'R_B_Paw', 'R_EarBase', 'R_Eye',
'R_F_Elbow', 'R_F_Paw', 'TailBase', 'Throat', 'Withers'],
'chair': ['BackRest_Top_Left', 'BackRest_Top_Right', 'Leg_Left_Back',
'Leg_Left_Front', 'Leg_Right_Back', 'Leg_Right_Front',
'Seat_Left_Back', 'Seat_Left_Front', 'Seat_Right_Back',
'Seat_Right_Front'],
'car': ['L_B_RoofTop', 'L_B_WheelCenter', 'L_F_RoofTop', 'L_F_WheelCenter',
'L_HeadLight', 'L_SideviewMirror', 'L_TailLight', 'R_B_RoofTop',
'R_B_WheelCenter', 'R_F_RoofTop', 'R_F_WheelCenter', 'R_HeadLight',
'R_SideviewMirror', 'R_TailLight'],
'person': ['B_Head', 'HeadBack', 'L_Ankle', 'L_Ear', 'L_Elbow', 'L_Eye',
'L_Foot', 'L_Hip', 'L_Knee', 'L_Shoulder', 'L_Toes', 'L_Wrist', 'Nose',
'R_Ankle', 'R_Ear', 'R_Elbow', 'R_Eye', 'R_Foot', 'R_Hip', 'R_Knee',
'R_Shoulder', 'R_Toes', 'R_Wrist'],
'diningtable': ['Bot_Left_Back', 'Bot_Left_Front', 'Bot_Right_Back',
'Bot_Right_Front', 'Top_Left_Back', 'Top_Left_Front', 'Top_Right_Back',
'Top_Right_Front'],
'dog': ['L_B_Elbow', 'L_B_Paw', 'L_EarBase', 'L_Eye', 'L_F_Elbow',
'L_F_Paw', 'Nose', 'R_B_Elbow', 'R_B_Paw', 'R_EarBase', 'R_Eye',
'R_F_Elbow', 'R_F_Paw', 'TailBase', 'Throat', 'Withers'],
'bird': ['Beak_Base', 'Beak_Tip', 'Left_Eye', 'Left_Wing_Base',
'Left_Wing_Tip', 'Leg_Center', 'Lower_Neck_Base', 'Right_Eye',
'Right_Wing_Base', 'Right_Wing_Tip', 'Tail_Tip', 'Upper_Neck_Base'],
'bicycle': ['B_WheelCenter', 'B_WheelEnd', 'B_WheelIntersection',
'CranksetCenter', 'F_WheelCenter', 'F_WheelEnd', 'F_WheelIntersection',
'HandleCenter', 'L_HandleTip', 'R_HandleTip', 'SeatBase'],
'train': ['Base_Back_Left', 'Base_Back_Right', 'Base_Front_Left',
'Base_Front_Right', 'Roof_Back_Left', 'Roof_Back_Right',
'Roof_Front_Middle'],
'sheep': ['L_B_Elbow', 'L_B_Paw', 'L_EarBase', 'L_Eye', 'L_F_Elbow',
'L_F_Paw', 'Nose', 'R_B_Elbow', 'R_B_Paw', 'R_EarBase', 'R_Eye',
'R_F_Elbow', 'R_F_Paw', 'TailBase', 'Throat', 'Withers'],
'aeroplane': ['Bot_Rudder', 'Bot_Rudder_Front', 'L_Stabilizer',
'L_WingTip', 'Left_Engine_Back', 'Left_Engine_Front',
'Left_Wing_Base', 'NoseTip', 'Nose_Bottom', 'Nose_Top',
'R_Stabilizer', 'R_WingTip', 'Right_Engine_Back',
'Right_Engine_Front', 'Right_Wing_Base', 'Top_Rudder'],
'sofa': ['Back_Base_Left', 'Back_Base_Right', 'Back_Top_Left',
'Back_Top_Right', 'Front_Base_Left', 'Front_Base_Right',
'Handle_Front_Left', 'Handle_Front_Right', 'Handle_Left_Junction',
'Handle_Right_Junction', 'Left_Junction', 'Right_Junction'],
'pottedplant': ['Bottom_Left', 'Bottom_Right', 'Top_Back_Middle',
'Top_Front_Middle', 'Top_Left', 'Top_Right'],
'bus': ['L_B_Base', 'L_B_RoofTop', 'L_F_Base', 'L_F_RoofTop', 'R_B_Base',
'R_B_RoofTop', 'R_F_Base', 'R_F_RoofTop']
}
class PascalVOC:
def __init__(self, sets, obj_resize):
"""
:param sets: 'train' or 'test'
:param obj_resize: resized object size
"""
self.classes = cfg.VOC2011.CLASSES
self.kpt_len = [len(KPT_NAMES[_]) for _ in cfg.VOC2011.CLASSES]
self.classes_kpts = {cls: len(KPT_NAMES[cls]) for cls in self.classes}
self.anno_path = Path(anno_path)
self.img_path = Path(img_path)
self.ori_anno_path = Path(ori_anno_path)
self.obj_resize = obj_resize
self.sets = sets
assert sets == 'train' or 'test', 'No match found for dataset {}'.format(sets)
cache_name = 'voc_db_' + sets + '.pkl'
self.cache_path = Path(cache_path)
self.cache_file = self.cache_path / cache_name
if self.cache_file.exists():
with self.cache_file.open(mode='rb') as f:
self.xml_list = pickle.load(f)
print('xml list loaded from {}'.format(self.cache_file))
else:
print('Caching xml list to {}...'.format(self.cache_file))
self.cache_path.mkdir(exist_ok=True, parents=True)
with np.load(set_path, allow_pickle=True) as f:
self.xml_list = f[sets]
before_filter = sum([len(k) for k in self.xml_list])
self.filter_list()
after_filter = sum([len(k) for k in self.xml_list])
with self.cache_file.open(mode='wb') as f:
pickle.dump(self.xml_list, f)
print('Filtered {} images to {}. Annotation saved.'.format(before_filter, after_filter))
def filter_list(self):
"""
Filter out 'truncated', 'occluded' and 'difficult' images following the practice of previous works.
In addition, this dataset has uncleaned label (in person category). They are omitted as suggested by README.
"""
for cls_id in range(len(self.classes)):
to_del = []
for xml_name in self.xml_list[cls_id]:
xml_comps = xml_name.split('/')[-1].strip('.xml').split('_')
ori_xml_name = '_'.join(xml_comps[:-1]) + '.xml'
voc_idx = int(xml_comps[-1])
xml_file = self.ori_anno_path / ori_xml_name
assert xml_file.exists(), '{} does not exist.'.format(xml_file)
tree = ET.parse(xml_file.open())
root = tree.getroot()
obj = root.findall('object')[voc_idx - 1]
difficult = obj.find('difficult')
if difficult is not None: difficult = int(difficult.text)
occluded = obj.find('occluded')
if occluded is not None: occluded = int(occluded.text)
truncated = obj.find('truncated')
if truncated is not None: truncated = int(truncated.text)
if difficult or occluded or truncated:
to_del.append(xml_name)
continue
# Exclude uncleaned images
if self.classes[cls_id] == 'person' and int(xml_comps[0]) > 2008:
to_del.append(xml_name)
continue
# Exclude overlapping images in Willow
#if self.sets == 'train' and (self.classes[cls_id] == 'motorbike' or self.classes[cls_id] == 'car') \
# and int(xml_comps[0]) == 2007:
# to_del.append(xml_name)
# continue
for x in to_del:
self.xml_list[cls_id].remove(x)
def get_pair(self, cls=None, shuffle=True):
"""
Randomly get a pair of objects from VOC-Berkeley keypoints dataset
:param cls: None for random class, or specify for a certain set
:param shuffle: random shuffle the keypoints
:return: (pair of data, groundtruth permutation matrix)
"""
if cls is None:
cls = random.randrange(0, len(self.classes))
elif type(cls) == str:
cls = self.classes.index(cls)
assert type(cls) == int and 0 <= cls < len(self.classes)
anno_pair = []
for xml_name in random.sample(self.xml_list[cls], 2):
anno_dict = self.__get_anno_dict(xml_name, cls)
if shuffle:
random.shuffle(anno_dict['keypoints'])
anno_pair.append(anno_dict)
perm_mat = np.zeros([len(_['keypoints']) for _ in anno_pair], dtype=np.float32)
row_list = []
col_list = []
for i, keypoint in enumerate(anno_pair[0]['keypoints']):
for j, _keypoint in enumerate(anno_pair[1]['keypoints']):
if keypoint['name'] == _keypoint['name']:
perm_mat[i, j] = 1
row_list.append(i)
col_list.append(j)
break
row_list.sort()
col_list.sort()
perm_mat = perm_mat[row_list, :]
perm_mat = perm_mat[:, col_list]
anno_pair[0]['keypoints'] = [anno_pair[0]['keypoints'][i] for i in row_list]
anno_pair[1]['keypoints'] = [anno_pair[1]['keypoints'][j] for j in col_list]
return anno_pair, perm_mat
def __get_anno_dict(self, xml_name, cls):
"""
Get an annotation dict from xml file
"""
xml_file = self.anno_path / xml_name
assert xml_file.exists(), '{} does not exist.'.format(xml_file)
tree = ET.parse(xml_file.open())
root = tree.getroot()
img_name = root.find('./image').text + '.jpg'
img_file = self.img_path / img_name
bounds = root.find('./visible_bounds').attrib
h = float(bounds['height'])
w = float(bounds['width'])
xmin = float(bounds['xmin'])
ymin = float(bounds['ymin'])
with Image.open(str(img_file)) as img:
ori_sizes = img.size
obj = img.resize(self.obj_resize, resample=Image.BICUBIC, box=(xmin, ymin, xmin + w, ymin + h))
keypoint_list = []
for keypoint in root.findall('./keypoints/keypoint'):
attr = keypoint.attrib
attr['x'] = (float(attr['x']) - xmin) * self.obj_resize[0] / w
attr['y'] = (float(attr['y']) - ymin) * self.obj_resize[1] / h
keypoint_list.append(attr)
anno_dict = dict()
anno_dict['image'] = obj
anno_dict['keypoints'] = keypoint_list
anno_dict['bounds'] = xmin, ymin, w, h
anno_dict['ori_sizes'] = ori_sizes
anno_dict['cls'] = self.classes[cls]
return anno_dict
if __name__ == '__main__':
dataset = PascalVOC('train', (256, 256))
a = dataset.get_pair()
pass
|
{"hexsha": "700b7344accf6e6906e67a605da5c83f9ab37a4c", "size": 11521, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/pascal_voc.py", "max_stars_repo_name": "daniil-777/graph-matching", "max_stars_repo_head_hexsha": "3ae2522d42ea287edc2d5458bccb6b86ed05664e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-30T11:33:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-30T11:33:59.000Z", "max_issues_repo_path": "data/pascal_voc.py", "max_issues_repo_name": "daniil-777/Graph_Matching", "max_issues_repo_head_hexsha": "3ae2522d42ea287edc2d5458bccb6b86ed05664e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-15T18:34:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-15T18:34:28.000Z", "max_forks_repo_path": "data/pascal_voc.py", "max_forks_repo_name": "daniil-777/Graph_Matching", "max_forks_repo_head_hexsha": "3ae2522d42ea287edc2d5458bccb6b86ed05664e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.2172131148, "max_line_length": 117, "alphanum_fraction": 0.5809391546, "include": true, "reason": "import numpy", "num_tokens": 3200}
|
[STATEMENT]
lemma transfer_imp [transfer_intro]:
"p \<equiv> eventually P \<U> \<Longrightarrow> q \<equiv> eventually Q \<U> \<Longrightarrow> p \<longrightarrow> q \<equiv> eventually (\<lambda>n. P n \<longrightarrow> Q n) \<U>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>p \<equiv> eventually P \<U>; q \<equiv> eventually Q \<U>\<rbrakk> \<Longrightarrow> p \<longrightarrow> q \<equiv> \<forall>\<^sub>F n in \<U>. P n \<longrightarrow> Q n
[PROOF STEP]
by (simp only: FreeUltrafilterNat.eventually_imp_iff)
|
{"llama_tokens": 176, "file": null, "length": 1}
|
function [p,q] = detectAndMatchFeatures(image1,image1ROI,image2,image2ROI,featureOpts)
%detectAndMatchFeatures: A wrapper for detecting and matching features
%between two images using various feature detection methods.
%Inputs
%
%image1: The first image (2D unit8, grayscale or rgb).
%
%image1ROI: A region-of-interest specifying which region(s) in image1 we
%should detect features in. If image1ROI=[] then the whole image is used.
%
%image2: The second image (2D unit8, grayscale or rgb).
%
%image2ROI: A region-of-interest specifying which region(s) in image2 we
%should detect features in. If image2ROI=[] then the whole image is used.
%
%featureOpts: Options field specifying the feature detection method.
%Currently only SURF (matlab's built-in) and ASIFT
%(http://www.ipol.im/pub/art/2011/my-asift/) are supported, but it is very
%easy to introduce others.
%for ASIFT, featureOpts should have the structures:
%
% featureOpts.featureMethod = 'ASIFT';
% featureOpts.asiftPath (the path to the asift code & compiled executable).
%
%for SURF, featureOpts should have the structures:
%
% featureOpts.featureMethod = 'SURF'; %careful, unlike ASIFT this only works well
%when the plane's viewpoint is not too tilted!!
%
% featureOpts.loweRatioThreshold = 1.2 (default) You need to
%set a confidence ratio (see Lowe's SIFT paper for the explanation of
%this). Basically, a high value means only using feature matches that are
%likely to be correct (but at the cost of fewer feature matches). A default
%of 1.2 is usually fine.
%
% outputs:
%p : 2XN matrix holding the matched points in image1
%q : 2XN matrix holding the matched points in image2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This file is part of the IPPE package for fast plane-based pose
% estimation from the paper "Infinitesimal Plane-based Pose Estimation" by Toby Collins and Adrien Bartoli,
% published in the International Journal of Computer Vision, September
% 2014. A copy of the author's pre-print version can be found here:
%
% http://isit.u-clermont1.fr/~ab/Publications/Collins_Bartoli_IJCV14.pdf
%
% This package is free and covered by the BSD licence without any warranty. We hope you find this code useful and please cite our paper in your work:
% (c) Toby Collins 2015
%
%
%@article{
%year={2014},
%issn={0920-5691},
%journal={International Journal of Computer Vision},
%volume={109},
%number={3},
%doi={10.1007/s11263-014-0725-5},
%title={Infinitesimal Plane-Based Pose Estimation},
%url={http://dx.doi.org/10.1007/s11263-014-0725-5},
%publisher={Springer US},
%keywords={Plane; Pose; SfM; PnP; Homography},
%author={Collins, Toby and Bartoli, Adrien},
%pages={252-286},
%language={English}
%}
%
%
% This is free software covered by the FreeBSD License (see IPPE_license.txt) with Copyright (c) 2015 Toby Collins
%basic argument checking
if nargin~=5
error('detectAndMatchFeatures has 5 input arguments.');
end
if nargout>2
error('detectAndMatchFeatures has 2 output arguments.');
end
assert(nargin ==5);
assert(size(image1,3)==1|size(image1,3)==3);
assert(size(image2,3)==1|size(image2,3)==3);
assert(isa(image1,'uint8'));
assert(isa(image2,'uint8'));
%check whether regions-of-interest are used:
if isempty(image1ROI)
image1ROI = true(size(image1,1),size(image1,2));
end
if isempty(image2ROI)
image2ROI = true(size(image2,1),size(image2,2));
end
assert(isa(image1ROI,'logical'));
assert(isa(image1ROI,'logical'));
%convert image to grayscale:
if size(image1,3)==3
image1 = rgb2gray(image1);
end
if size(image2,3)==3
image2 = rgb2gray(image2);
end
%perform detection and matching:
switch featureOpts.featureMethod
case 'SURF'
%detection:
pointsTemplate = detectSURFFeatures(image1,'MetricThreshold',200);
featuresTemplate = extractFeatures(image1,pointsTemplate);
p = pointsTemplate.Location';
pointsInput = detectSURFFeatures(image2,'MetricThreshold',200);
featuresInput = extractFeatures(image2,pointsInput);
%matching using Lowe's ratio test for rejecting bad matches (see his SIFT paper for details)
[IDX, D]= knnsearch(featuresInput,featuresTemplate,'K',2);
vlds = D(:,2)./D(:,1)> featureOpts.loweRatioThreshold;
q = pointsInput.Location(IDX(:,1),:)';
q = q(:,vlds);
p = p(:,vlds);
case 'ASIFT'
[p,q] = asiftWrapper(featureOpts.asiftPath,image1,image2);
otherwise
error('unknown feature detection method is specified');
end
%keep only features located in the rois:
vlds = interp2(double(image1ROI),p(1,:),p(2,:))==1;
vlds = vlds & interp2(double(image2ROI),q(1,:),q(2,:))==1;
p = p(:,vlds);
q = q(:,vlds);
|
{"author": "tobycollins", "repo": "IPPE", "sha": "3304dfa40c7cbd046ba0d540b8b1143283c83f4e", "save_path": "github-repos/MATLAB/tobycollins-IPPE", "path": "github-repos/MATLAB/tobycollins-IPPE/IPPE-3304dfa40c7cbd046ba0d540b8b1143283c83f4e/matlab/IPPE_utils/detectAndMatchFeatures.m"}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 1 18:17:51 2021
@author: maple
"""
import numpy as np
import scipy.linalg
import h5py
from numpy.polynomial import Polynomial
import scipy.optimize
import sys, os
sys.path.append(os.path.abspath('qg_dns/analysis/eigenvectors'))
from chm_utils import EigenvalueSolverFD
# %% Load data
if len(sys.argv) > 1:
case = int(sys.argv[1])
else:
case = 1
ampfile = np.load('dns_input/case{}/eigencomps_fd_qbar.npz'.format(case))
eigamps = ampfile['amps']
qbar = ampfile['qbar']
suffix = '_uphavg'
usemin = False
# %% Get eigenfunctions
nky = 8
eigsolver = EigenvalueSolverFD(qbar)
eigs = [None]*nky
for ky in range(1,nky+1):
print(ky)
try:
eigs[ky-1] = np.load('scratch/case{}_eigsolver_ky{}.npz'.format(case, ky))
print("Loaded")
except:
print("Solving")
eigs[ky-1] = eigsolver.solveEigenfunctions(ky=ky, norm='action')
np.savez('scratch/case{}_eigsolver_ky{}.npz'.format(case, ky), **eigs[ky-1])
# %% Compute the coherence of the eigenfunctions
numofs = 64
rsqt = np.zeros((eigamps.shape[0], numofs, eigamps.shape[2]))
for i in range(numofs):
fitofs = i+1
x = eigamps[:,:-fitofs,:]
y = eigamps[:,fitofs:,:]
amat = np.sum(y * np.conj(x), axis=1) / np.sum(np.abs(x)**2, axis=1)
residuals = y - (x * amat[:,np.newaxis,:])
vartot = np.average(np.abs(y)**2, axis=1)
varresid = np.average(np.abs(residuals)**2, axis=1)
rsqt[:,i,:] = 1 - (varresid/vartot)
rsquaredall = np.min(rsqt, axis=1)
rsqinds = np.argsort(-rsquaredall, axis=None)
eigenergies = np.zeros((nky, 2048))
for ky in range(1,len(eigs)+1):
eigenergies[ky-1,:] = -np.sum(eigs[ky-1]['vpsi']*eigs[ky-1]['vr'], axis=0)
energies = np.average(np.abs(eigamps[:,:,:])**2, axis=1) * eigenergies
minenergies = np.min(np.abs(eigamps[:,:,:])**2, axis=1) * eigenergies
energyinds = np.argsort(-energies, axis=None)
minenergyinds = np.argsort(-minenergies, axis=None)
kyeig = np.zeros((eigamps.shape[0], eigamps.shape[2]), dtype=int)
kyeig[:] = np.arange(1,nky+1, dtype=int)[:,np.newaxis]
# This is a hacky way to estimate kx
kxeig = np.zeros((eigamps.shape[0], eigamps.shape[2]), dtype=int)
kxeig[:] = np.arange(0,eigamps.shape[2], dtype=int)[np.newaxis,:]
kxeig = (kxeig + 1) // 2
k2eig = kyeig*kyeig + kxeig*kxeig
scaleind = np.argsort(k2eig, axis=None, kind='stable')
uphs = np.array([eigs[ky-1]['w'] for ky in range(1,len(eigs)+1)])
uphinds = np.argsort(uphs, axis=None)
inds = uphinds
# %% Pick out the eigenfunctions we're going to use
if case == 1:
#numeigs = 8
eigstoadd = np.arange(5, dtype=int)
numeigs = len(eigstoadd)
else:
#numeigs = 36
#numeigs = 9
numeigs = 22
eigstoadd = np.arange(22, dtype=int)
rsquared_abovethreshold = np.ravel(rsquaredall)[inds[eigstoadd]] > 0.4
eigstoadd = eigstoadd[rsquared_abovethreshold]
numeigs = len(eigstoadd)
# This is the info that we need filled out
print("Number of eigenfunctions: ", numeigs)
print(eigstoadd)
psiv = np.zeros((numeigs, 2048))
amps = np.zeros(numeigs)
#fits = [None]*numeigs
expfreqs = np.zeros(numeigs)
expphases = np.zeros(numeigs)
rsquared = np.zeros(numeigs)
freqmult = np.zeros(numeigs, dtype=int)
eignums = np.zeros(numeigs, dtype=int)
kys = np.zeros(numeigs, dtype=int)
# This data is for plotting later
numsnaps = eigamps.shape[1]
mode0_phasedeviation = np.zeros(numsnaps)
eigenergies = np.zeros((numeigs, numsnaps))
# This is data for time-dependent deviations
phasedevs = np.zeros((numeigs, numsnaps))
ampdevs = np.ones((numeigs, numsnaps))
# Data for testing
freqmulttest = np.zeros(numeigs, dtype=int)
eigfreqs = np.zeros(numeigs)
dt = 0.25
t = np.linspace(0, 64, num=numsnaps, endpoint=True)
for i in range(numeigs):
eig = inds[eigstoadd[i]] % 2048
ky = inds[eigstoadd[i]] // 2048 + 1
kys[i] = ky
eignums[i] = eig
rsquared[i] = rsquaredall[ky-1, eig]
# Need to shift the phase of the amplitudes, since the FFT is in the domain [0,2pi]
# while the real space coordinates are in the domain [-pi,pi]
amp = eigamps[ky-1,:,eig] * (-1)**ky
# Normalization factor for irfft
if usemin:
amps[i] = np.sqrt(np.min(np.abs(amp)**2)) / 1024
else:
amps[i] = np.sqrt(np.average(np.abs(amp)**2)) / 1024
psiv[i,:] = np.real(eigs[ky-1]['vpsi'][:,eig])
fit = Polynomial.fit(t,np.unwrap(np.angle(amp)), deg=1).convert()
expfreqs[i] = fit.coef[1]
expphases[i] = fit.coef[0]
ampdevs[i,:] = (np.abs(amp)/1024) / amps[i]
phasedevs[i,:] = np.unwrap(np.angle(amp)) - expphases[i]
eigenergies[i,:] = np.abs(amp)**2 * -np.sum(eigs[ky-1]['vpsi'][:,eig]*eigs[ky-1]['vr'][:,eig])
if i == 0:
mode0_phasedeviation = np.unwrap(np.angle(amp)) - expfreqs[0]*t - expphases[0]
freqmulttest[i] = int(np.round(ky / k2eig[ky-1,eig] * 20))
eigfreqs[i] = eigs[ky-1]['w'][eig]*ky
# Set doppler shift
ind_dop = np.argmax(np.average(eigenergies, axis=1))
ky_dop = kys[ind_dop]
print("doppler mode ky={} eignum={}".format(ky_dop, eignums[ind_dop]))
dopplerc = expfreqs[ind_dop] / ky_dop
#dopplerc = 0
expfreqs = expfreqs - kys*dopplerc
def l1_dev(basefreq):
totaldev = 0.0
for i in range(numeigs):
expfreq = expfreqs[i]
amp = amps[i]
fracparta = (expfreq / basefreq - np.round(expfreq / basefreq)) * basefreq
totaldev = totaldev + np.abs(fracparta) * (amp / ky)**2
return totaldev
freqsearchfunc = lambda x: l1_dev(-np.exp(x))
freqRes = scipy.optimize.minimize_scalar(l1_dev, bounds=(-0.90/2.0, -0.78/2.0), method='bounded')
basefreq = freqRes.x
#freqRes = scipy.optimize.minimize_scalar(freqsearchfunc, bounds=(np.log(0.06), np.log(0.08)), method='bounded')
#basefreq = -np.exp(freqRes.x)
#dopplerc = 2*basefreq
#expfreqs = expfreqs - kys*dopplerc
freqmult = np.array(list(map(int,np.round(expfreqs / basefreq))), dtype=int)
#freqmult = freqmulttest
phases = expphases
uph_exp = (expfreqs + kys*dopplerc) / kys
uph_fit = (freqmult*basefreq + kys*dopplerc) / kys
#print('residuals:', uph_fit-uph_exp)
# %% Output the input to the poincare section
print('saving: ' + 'poincare_input/case{}_poincare_config_fd_smooth{}.npz'.format(case, suffix))
savedata = { 'psiv': psiv, 'kys': kys, 'freqmult': freqmult, 'phases': phases, 'amps': amps, 'uy': eigsolver.uy, 'freq': freqRes.x, 'qbar': qbar, 'rsquared': rsquared, 'eignums': eignums, 'dopplerc': dopplerc }
np.savez('poincare_input/case{}_poincare_config_fd_smooth{}.npz'.format(case, suffix), **savedata)
np.savez('poincare_input/case{}_eigencomponent_timedata{}.npz'.format(case, suffix), ampdevs=ampdevs, phasedevs=phasedevs)
avgenergy = np.sum(np.average(eigenergies, axis=1))
timeenergies = np.sum(eigenergies, axis=0)
np.savez('plot_scripts/case{}_eigencomponent_extradata{}.npz'.format(case, suffix), mode0_phasedeviation=mode0_phasedeviation, energydeviation=timeenergies/avgenergy)
# %% Save data for the validation test
"""
x = np.linspace(-np.pi, np.pi, num=2048)
qbar = np.cos(5*x)
eigsolver = EigenvalueSolverFD(qbar)
kys = np.array([3, 4], dtype=np.int32)
freqmult = np.array([3, 4], dtype=np.int32)
# This is the info that we need filled out
numeigs = len(kys)
psiv = np.zeros((numeigs, 2048))
amps = np.zeros(numeigs)
#fits = [None]*numeigs
expphases = np.zeros(numeigs)
t = np.linspace(0, 64, num=257, endpoint=True)
for i in range(numeigs):
ky = kys[i]
# Normalization factor for irfft
amps[i] = 2 * np.pi
psiv[i,:] = -np.cos(np.round(np.sqrt(25-ky**2))*x) / 2 / np.pi / 25
expphases[i] = 0.0
phases = expphases
# %%
savedata = { 'psiv': psiv, 'kys': kys, 'freqmult': freqmult, 'phases': phases, 'amps': amps, 'uy': eigsolver.uy, 'freq': -8.0/25.0, 'qbar': qbar }
np.savez('poincare_input/poincare_config_validation.npz', **savedata)
"""
|
{"hexsha": "b8078f0f52c84b40c321e0d79bac2dfb70fa6573", "size": 7814, "ext": "py", "lang": "Python", "max_stars_repo_path": "config_generator_multisort.py", "max_stars_repo_name": "Maplenormandy/qg-edgeofchaos", "max_stars_repo_head_hexsha": "a01bba48ef765c2ca3df6dc3d4fbcb66a51dcd1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-11T23:35:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T23:35:02.000Z", "max_issues_repo_path": "config_generator_multisort.py", "max_issues_repo_name": "Maplenormandy/qg-edgeofchaos", "max_issues_repo_head_hexsha": "a01bba48ef765c2ca3df6dc3d4fbcb66a51dcd1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config_generator_multisort.py", "max_forks_repo_name": "Maplenormandy/qg-edgeofchaos", "max_forks_repo_head_hexsha": "a01bba48ef765c2ca3df6dc3d4fbcb66a51dcd1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2093862816, "max_line_length": 210, "alphanum_fraction": 0.6670079345, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 2732}
|
"""
Random dummy dataset specification.
"""
# System
import math
from functools import partial
# Externals
import numpy as np
import tensorflow.compat.v1 as tf
from .cosmo import _augment_data
def _parse_data(x, y, shape, apply_log=False, seed=None, do_augmentation=False, dist=None):
if do_augmentation:
_augment_data(x, seed)
# get model parallel slice
if dist:
dep_size = shape[0] // dist.model_parallel_size[0]
row_size = shape[1] // dist.model_parallel_size[1]
x = x[dep_size*(dist.model_parallel_rank[0]):dep_size*(dist.model_parallel_rank[0]+1),
row_size*(dist.model_parallel_rank[1]):row_size*(dist.model_parallel_rank[1]+1),
:,:]
shape = [dep_size, row_size] + shape[2:]
# convert to float
x = tf.cast(x, tf.float32)
# Data normalization/scaling
if apply_log:
# Take logarithm of the data spectrum
x = tf.math.log(x + tf.constant(1.))
else:
# Traditional mean normalization
x /= (tf.reduce_sum(x) / np.prod(shape))
return x, y
def construct_dataset(sample_shape, target_shape,
batch_size=1, n_samples=32,
prefetch=4, apply_log=False, seed=None, do_augmentation=False,
dist=None):
def data_fn():
x = tf.random.uniform([n_samples]+sample_shape, maxval=255, dtype=tf.int32) # int16 is not supported
y = tf.random.uniform([n_samples]+target_shape, minval=-1, maxval=1, dtype=tf.float32)
data = tf.data.Dataset.from_tensor_slices((x, y))
parse_data = partial(_parse_data, shape=sample_shape,
apply_log=apply_log, seed=seed, do_augmentation=do_augmentation, dist=dist)
data = data.map(parse_data, num_parallel_calls=4)
data = data.repeat().batch(batch_size)
data = data.prefetch(prefetch)
return data
return data_fn
def get_datasets(sample_shape, target_shape, batch_size,
n_train, n_valid, dist, n_epochs=None, shard=False, seed=-1,
prefetch=4, apply_log=False, do_augmentation=False):
train_dataset = construct_dataset(sample_shape, target_shape, batch_size=batch_size,
prefetch=prefetch, apply_log=apply_log,
do_augmentation=do_augmentation, dist=dist)
valid_dataset = None
if n_valid > 0:
valid_dataset = construct_dataset(sample_shape, target_shape, batch_size=batch_size,
prefetch=prefetch, apply_log=apply_log, dist=dist)
n_train_steps = n_train // batch_size
n_valid_steps = n_valid // batch_size
if shard:
n_train_steps = n_train_steps // dist.data_parallel_size
n_valid_steps = n_valid_steps // dist.data_parallel_size
return dict(train_dataset=train_dataset, valid_dataset=valid_dataset,
n_train=n_train, n_valid=n_valid, n_train_steps=n_train_steps,
n_valid_steps=n_valid_steps)
|
{"hexsha": "05d0bd75897a1f2d895cf366568468ab12743336", "size": 3050, "ext": "py", "lang": "Python", "max_stars_repo_path": "RIKEN/benchmarks/cosmoflow/implementations/implementation_fugaku_closed/cosmoflow-benchmark/data/dummy.py", "max_stars_repo_name": "bgerofi/hpc_results_v0.7", "max_stars_repo_head_hexsha": "9cd9fa80ebc57db8438b1ac8dbd2d49232da6c2e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-18T20:01:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T17:47:23.000Z", "max_issues_repo_path": "RIKEN/benchmarks/cosmoflow/implementations/implementation_fugaku_closed/cosmoflow-benchmark/data/dummy.py", "max_issues_repo_name": "bgerofi/hpc_results_v0.7", "max_issues_repo_head_hexsha": "9cd9fa80ebc57db8438b1ac8dbd2d49232da6c2e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-16T07:29:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:19:07.000Z", "max_forks_repo_path": "RIKEN/benchmarks/cosmoflow/implementations/implementation_fugaku_closed/cosmoflow-benchmark/data/dummy.py", "max_forks_repo_name": "bgerofi/hpc_results_v0.7", "max_forks_repo_head_hexsha": "9cd9fa80ebc57db8438b1ac8dbd2d49232da6c2e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-20T13:57:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T06:48:58.000Z", "avg_line_length": 38.125, "max_line_length": 108, "alphanum_fraction": 0.6472131148, "include": true, "reason": "import numpy", "num_tokens": 708}
|
[STATEMENT]
lemma parts_init_2:
"used s\<^sub>0 \<union> range (Hash \<circ> Agent) \<union> range (Hash \<circ> Auth_PubKey) \<union>
range (\<lambda>n. \<lbrace>Hash (Agent n), Hash (Auth_PubKey n)\<rbrace>) \<subseteq> parts (used s\<^sub>0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. used s\<^sub>0 \<union> range (Hash \<circ> Agent) \<union> range (Hash \<circ> Auth_PubKey) \<union> range (\<lambda>n. \<lbrace>Hash (Agent n), Hash (Auth_PubKey n)\<rbrace>) \<subseteq> parts (used s\<^sub>0)
[PROOF STEP]
by (rule subsetI, auto simp add: parts_insert)
|
{"llama_tokens": 209, "file": "Relational_Method_Authentication", "length": 1}
|
import numpy as np
from scipy.sparse import coo_matrix
import pickle
if __name__ == '__main__':
ids = np.array([
3052, 460,
3060, 3012,
5459, 5537,
5386, 5440,
5218, 5126,
5304, 6406,
4084, 6429,
595 , 2966,
605 , 2999,
1749, 1659,
1926, 1938,
2000, 2050,
3076, 3014,
1329, 3173,
3500, 3022,
1806, 3484,
4952, 4325,
4533, 4535,
6734, 6730,
6765, 6839,
1479, 837,
1047, 1050,
3334, 3331,
3365, 3439
], dtype=np.int).reshape(24,2)
dense = np.zeros((24, 6890), dtype=np.float64)
SMPL_format_converter = [15, 20, 16, 14, 21, 17, 13, 22, 18, 12, 23,
19, 1, 7, 6, 0, 8, 5, 9, 4, 10, 3, 11, 2]
ids = ids[SMPL_format_converter]
for i in range(24):
dense[i, ids[i][0]] = 0.5
dense[i, ids[i][1]] = 0.5
_24_joint_regressor = coo_matrix(dense)
with open('./model.pkl', 'rb') as rf:
params = pickle.load(rf)
params['joint_regressor'] = _24_joint_regressor
with open('./model_24_joints.pkl', 'wb') as wf:
pickle.dump(params, wf)
|
{"hexsha": "1ffdf606378a58d0485e36bbbae4d34a704ff03e", "size": 1154, "ext": "py", "lang": "Python", "max_stars_repo_path": "create_joint_regressor.py", "max_stars_repo_name": "Lotayou/SMPL", "max_stars_repo_head_hexsha": "6c00487b846ddf635bc782df19461c42bc1150e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2019-02-25T06:26:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T03:50:24.000Z", "max_issues_repo_path": "create_joint_regressor.py", "max_issues_repo_name": "Lotayou/SMPL", "max_issues_repo_head_hexsha": "6c00487b846ddf635bc782df19461c42bc1150e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-09T09:24:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-09T09:24:04.000Z", "max_forks_repo_path": "create_joint_regressor.py", "max_forks_repo_name": "Lotayou/SMPL", "max_forks_repo_head_hexsha": "6c00487b846ddf635bc782df19461c42bc1150e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-05-15T08:34:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T02:37:22.000Z", "avg_line_length": 22.1923076923, "max_line_length": 73, "alphanum_fraction": 0.5329289428, "include": true, "reason": "import numpy,from scipy", "num_tokens": 480}
|
module BitBlit2D
include("geometry/Geometry.jl")
include("graphics/Graphics.jl")
end
|
{"hexsha": "125484067e80dde6b9807a57801a953d11d2b3d6", "size": 87, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/BitBlit2D.jl", "max_stars_repo_name": "ordovician/BitBlit2D.jl", "max_stars_repo_head_hexsha": "cb2402a5a7fd9110331507cbf72855c9a7cf6a4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-22T19:50:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-22T19:50:14.000Z", "max_issues_repo_path": "src/BitBlit2D.jl", "max_issues_repo_name": "ordovician/BitBlit2D.jl", "max_issues_repo_head_hexsha": "cb2402a5a7fd9110331507cbf72855c9a7cf6a4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BitBlit2D.jl", "max_forks_repo_name": "ordovician/BitBlit2D.jl", "max_forks_repo_head_hexsha": "cb2402a5a7fd9110331507cbf72855c9a7cf6a4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.4285714286, "max_line_length": 31, "alphanum_fraction": 0.7816091954, "num_tokens": 24}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created: 07-2020 - Carmelo Mordini <carmelo> <carmelo.mordini@unitn.it>
"""Module docstring
"""
import numpy as np
from scipy.ndimage.interpolation import shift
from skimage.transform import rotate
def lsq_match(a, b, mask):
"""
beta is best lstsq coefficient for
a - beta * b
so that b matches a when multiplied by beta
"""
mask = np.where(mask)
a = a.copy()[mask]
b = b.copy()[mask]
beta = (a @ b) / (b @ b)
return beta
def probe_correction_beta(atoms, probe, mask, verbose=True):
mask = mask.astype(bool)
beta = lsq_match(atoms, probe, mask)
if verbose:
print(f"probe matched to atoms with beta = {beta:.6f}")
probe_correct = probe * beta
return probe_correct
def center_and_trim(od, roi, center, angle=None, verbose=True):
"""
center and rotate an image wrt to a given roi, then crop
Params:
-------
od: the image
roi: tuple of slices or np.index_exp. If None, the image will not be cropped
center: (mx, my) wrt the non-cropped image
angle: clock(?)wise angle. Rotation is skipped by default (angle = None)
Returns:
--------
od1: the image after shifting, rotation and crop
"""
if roi is None:
h, w = od.shape
roi = (slice(0, h), slice(0, w))
# mx, my = center
# C0 = mx - roi[1].start, my - roi[0].start
C0 = center
C1 = 0.5 * (roi[1].start + roi[1].stop), 0.5 * (roi[0].start + roi[0].stop)
center = C0
# shift in y, x
dxy = C1[1] - C0[1], C1[0] - C0[0]
if verbose:
print(f"Shift by {dxy}")
od = shift(od, dxy, mode='wrap')
center = C1
if angle:
if verbose:
print(f"Rotate image by an angle {angle}")
od = rotate(od, -angle, resize=False, center=center, mode='wrap',
order=3, preserve_range=True,)
od = od[roi]
# shape = od.shape
# assert shape[0] % 2 == 1
# assert shape[1] % 2 == 1
return od
|
{"hexsha": "00dbeca78ae1390bfbd9ad8990b29211c282603a", "size": 2048, "ext": "py", "lang": "Python", "max_stars_repo_path": "becpy/imaging/correction.py", "max_stars_repo_name": "BEC-Trento/becpy", "max_stars_repo_head_hexsha": "9094052cc856fe1b533d933a42a86398fc77591c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "becpy/imaging/correction.py", "max_issues_repo_name": "BEC-Trento/becpy", "max_issues_repo_head_hexsha": "9094052cc856fe1b533d933a42a86398fc77591c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "becpy/imaging/correction.py", "max_forks_repo_name": "BEC-Trento/becpy", "max_forks_repo_head_hexsha": "9094052cc856fe1b533d933a42a86398fc77591c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-23T22:20:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T22:20:56.000Z", "avg_line_length": 24.9756097561, "max_line_length": 84, "alphanum_fraction": 0.5766601562, "include": true, "reason": "import numpy,from scipy", "num_tokens": 616}
|
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import xmlrpclib
import socket
import fcntl
import struct
import multiprocessing
import numpy as np
import sys
import json
import time
#import mkl
import os
#mkl.set_num_threads(1)
configs = json.load(open('/home/zhifeng/apps/static/config/config.json'))
CHUNKS = configs['chunks']
replicasTracking = [100]
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Register an instance; all the methods of the instance are
# published as XML-RPC methods (in this case, just 'div').
class MyFuncs:
def __init__(self, token, mvToken):
self.token = token
self.mvToken = mvToken
self.rows = None
self.lengths = None
self.replicas = None
self.isReleased = False
def accept_matrix(self, rows, lengths, replicas=1):
self.rows = rows
self.lengths = lengths
self.replicas = replicas
self.start()
return
def start(self):
self.token.clear()
self.token.set()
return
def retrieve_matrix(self):
return self.rows, self.lengths, self.replicas
def release(self):
self.isReleased = True
self.mvToken.clear()
self.mvToken.set()
return
def is_released(self):
ret = self.isReleased
self.isReleased = False
return ret
class SlaveServerProcess(multiprocessing.Process):
def __init__(self, myIP, myPortNum, token, mvToken):
multiprocessing.Process.__init__(self)
#self.setDaemon(True)
self.daemon = True
self.server = SimpleXMLRPCServer((myIP, int(myPortNum)),
requestHandler=RequestHandler, allow_none=True)
self.server.register_introspection_functions()
myFuncs = MyFuncs(token, mvToken)
self.funcs = myFuncs
self.server.register_instance(myFuncs)
def run(self):
self.server.serve_forever()
def main():
if len(sys.argv) < 3:
print 'incorrect number of arguments'
print 'please provide id of the slave server, N/T to indicate transpose or not'
sys.exit(-1)
ID = sys.argv[1]
TP = sys.argv[2]
matrixMulKernelSlave(ID, TP)
def matrixMultiply(matrix, vector, rows, lengths, replicas, mvToken, timeTotal):
chunks = CHUNKS
product = None
print 'I am doing extra work'
print 'one replica takes %s seconds' % str(timeTotal)
for chunk in range(chunks):
row = rows[chunk]
length = lengths[chunk]
replicasTracking[0] = replicas[chunk]
for i in range(1, replicas[chunk]):
# time.sleep(timeTotal[chunk])
replicasTracking[0] -= 1
wasteFile = open('wasteSlave.out', 'a')
wasteFile.write(str(replicasTracking[0]) + '\n')
wasteFile.close()
if not chunk:
product = np.zeros((length, 1))
for rIdx in range(length):
try:
product[rIdx] = matrix[(row+rIdx), :].dot(vector)
except:
print 'race condition!!!'
break
#product = matrix[row : (row+length), :].dot(vector)
elif not i:
product = np.vstack((product, matrix[row : (row+length), :].dot(vector)))
else:
matrix[row : (row+length), :].dot(vector)
print replicas[chunk]
mvToken.clear()
mvToken.set()
def matrixMulKernelSlave(ID, TP, vector=None, matrix=None):
configs = json.load(open('/home/zhifeng/apps/static/config/config.json'))
master = configs['masterConfigs']['IP'] + ':' + configs['masterConfigs']['PortNum']
#myIP = configs['slaveConfigs']['slave' + ID]['IP']
myIP = get_ip_address('eth0')
myPortNum = configs['slaveConfigs']['slave' + ID]['PortNum']
execTimes = configs['execTimes']
# Create server
token = multiprocessing.Event()
mvToken = multiprocessing.Event()
if matrix is None:
start_time = time.time()
matrix = np.loadtxt('/home/zhifeng/apps/static/data/partition%s.mat' % ID, dtype=int)
matrixTP = np.loadtxt('/home/zhifeng/apps/static/data/partition%s_tp.mat' % ID, dtype=int)
load_time = time.time() - start_time
f = open('/home/zhifeng/apps/static/data/slaveLoadTime%s.out' % ID, 'w')
f.write(str(load_time)+'\n')
f.close()
matrices = (matrix, matrixTP)
server_process = SlaveServerProcess(myIP, myPortNum, token, mvToken)
server_process.start()
print 'starting slave server process %d...' % server_process.pid
localProxy = xmlrpclib.ServerProxy('http://' + myIP + ':' + myPortNum, allow_none=True)
masterProxy = xmlrpclib.ServerProxy('http://' + master, allow_none=True)
chunks = CHUNKS
idx = 0
ownReplicas = np.array([100]*1000)
print ownReplicas.min(), ownReplicas.max()
np.savetxt('/home/zhifeng/apps/static/data/replicas%s.out' % ID, ownReplicas.reshape(-1,1), fmt='%f')
rIndex = 0
while True:
try:
masterProxy.slave_ready(ID)
break
except:
print("master did not start/accept ACK.")
time.sleep(1)
pass
index = 0
while True:
index += 1
matrix = matrices[idx % 2]
idx += 1
product = None
timeTotal = []
for chunk in range(chunks):
if not chunk:
token.wait()
token.clear()
mvToken.clear()
localProxy.is_released()
vector = None
while vector is None or vector.shape[0] != matrix.shape[1]:
try:
vector = np.random.rand(matrix.shape[1], 1)
except:
print 'race condition due to NFS'
rows, lengths, replicas = localProxy.retrieve_matrix()
for j in range(len(replicas)):
replicas[j] = int(ownReplicas[rIndex])
rIndex += 1
if rIndex >= ownReplicas.shape[0]:
rIndex = 0
#print 'slave' + ID +': get my share of data and start to compute'
start_time = time.time()
row = rows[chunk]
length = lengths[chunk]
timeB = time.time()
if not chunk:
product = np.zeros((length, 1))
for rIdx in range(length):
try:
product[rIdx] = matrix[(row+rIdx), :].dot(vector)
except:
print 'super slow causing race condition!!'
localProxy.release()
break
#product = matrix[row : (row+length), :].dot(vector)
else:
product = np.vstack((product, matrix[row : (row+length), :].dot(vector)))
timeE = time.time()
timeTotal.append(timeE - timeB)
if not localProxy.is_released():
mv = multiprocessing.Process(target=matrixMultiply, args=(matrix, vector, rows, lengths, replicas, mvToken, timeTotal))
mv.start()
mvToken.wait()
mvToken.clear()
mv.terminate()
end_time = time.time()
print 'slave' + ID + ': time to compute: %f' % (end_time - start_time)
compTime = end_time - start_time
start_time = end_time
#if not masterProxy.checkDone():
if not localProxy.is_released():
resultFile = open('resultSlave%s.out' % ID, 'a')
resultFile.write(str(index)+', ')
resultFile.write(str(compTime)+'\n')
resultFile.close()
productFile = ''
if TP == 'N':
productFile = '/home/zhifeng/apps/static/data/product%s.mat' % ID
else:
productFile = '/home/zhifeng/apps/static/data/product%s_tp.mat' % ID
np.savetxt(productFile, product)
#cmd = 'scp %s master:%s' % (productFile, productFile)
masterIP = configs['masterConfigs']['IP']
cmd = "scp -P 5000 %s %s:%s" % (productFile, masterIP, productFile)
os.system(cmd)
end_time = time.time()
#print 'slave' + ID + ': time to "send" result: %f' % (end_time - start_time)
masterProxy.accept_product(productFile, 'slave' + ID)
else:
print 'slave'+ ID + ': I am too slow and the master has what it needs'
server_process.terminate()
if __name__ == '__main__':
main()
|
{"hexsha": "63482a999bb988096ed7ffd84a2bbda8ade4ec24", "size": 9029, "ext": "py", "lang": "Python", "max_stars_repo_path": "coded_computation_docker/apps/static/SlaveServer.py", "max_stars_repo_name": "krishnagirinarra/S2C2", "max_stars_repo_head_hexsha": "e844e01afff423643830e992dbc8578434c299e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "coded_computation_docker/apps/static/SlaveServer.py", "max_issues_repo_name": "krishnagirinarra/S2C2", "max_issues_repo_head_hexsha": "e844e01afff423643830e992dbc8578434c299e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coded_computation_docker/apps/static/SlaveServer.py", "max_forks_repo_name": "krishnagirinarra/S2C2", "max_forks_repo_head_hexsha": "e844e01afff423643830e992dbc8578434c299e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4078431373, "max_line_length": 131, "alphanum_fraction": 0.5667294274, "include": true, "reason": "import numpy", "num_tokens": 2115}
|
%RT2TR Convert rotation and translation to homogeneous transform
%
% TR = RT2TR(R, t) is a homogeneous transformation matrix (MxM) formed from an
% orthonormal rotation matrix R (NxN) and a translation vector t (Nx1) where
% M=N+1.
%
% For a sequence R (NxNxK) and t (NxK) results in a transform sequence (MxMxK).
%
% Notes::
% - Works for R in SO(2) or SO(3)
% - If R is 2x2 and t is 2x1, then TR is 3x3
% - If R is 3x3 and t is 3x1, then TR is 4x4
% - The validity of R is not checked
%
% See also T2R, R2T, TR2RT.
% Copyright (C) 1993-2015, by Peter I. Corke
%
% This file is part of The Robotics Toolbox for MATLAB (RTB).
%
% RTB is free software: you can redistribute it and/or modify
% it under the terms of the GNU Lesser General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% RTB is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Lesser General Public License for more details.
%
% You should have received a copy of the GNU Leser General Public License
% along with RTB. If not, see <http://www.gnu.org/licenses/>.
%
% http://www.petercorke.com
function T = rt2tr(R, t)
if numcols(R) ~= numrows(R)
error('R must be square');
end
if numrows(R) ~= numrows(t)
error('R and t must have the same number of rows');
end
if size(R,3) ~= numcols(t)
error('For sequence size(R,3) must equal size(t,2)');
end
if size(R,3) > 1
Z = zeros(numcols(R),1);
B = [Z' 1];
T = zeros(4,4,size(R,3));
for i=1:size(R,3)
T(:,:,i) = [R(:,:,i) t(:,i); B];
end
else
T = [R t; zeros(1,numcols(R)) 1];
end
|
{"author": "Allopart", "repo": "rbpf-gmapping", "sha": "affe0adc25fa446fc7af4902d699d92864bdba1b", "save_path": "github-repos/MATLAB/Allopart-rbpf-gmapping", "path": "github-repos/MATLAB/Allopart-rbpf-gmapping/rbpf-gmapping-affe0adc25fa446fc7af4902d699d92864bdba1b/rvctools/robot/rt2tr.m"}
|
#!/usr/bin/env python
"""
uncertainty_sampler.py
"""
import sys
import h5py
from datetime import datetime
import numpy as np
from libact.base.dataset import Dataset
from libact.models import LinearSVC
from libact.query_strategies import UncertaintySampling
class UncertaintySampler(object):
def __init__(self, X, y, labs, n=2):
y = [yy if yy >= 0 else None for yy in y]
self.dataset = Dataset(X, y)
self.labs = labs
self.uc = UncertaintySampling(self.dataset, method='lc', model=LinearSVC())
self.n = n
def get_next(self):
print >> sys.stderr, 'get_next: start'
out = self.uc.make_query(n=self.n)
print >> sys.stderr, 'get_next: done'
return out
def set_label(self, idx, label):
print >> sys.stderr, 'set_label: start'
out = self.dataset.update(idx, label)
print >> sys.stderr, 'set_label: done'
return out
def get_data(self):
X, y = zip(*self.dataset.get_entries())
X, y = np.vstack(X), np.array([yy if yy is not None else -1 for yy in y])
return X, y
def n_hits(self):
labels = np.array(zip(*self.dataset.get_entries())[1])
return (labels == 1).sum()
def n_labeled(self):
return self.dataset.len_labeled()
def is_labeled(self, idx):
return idx in np.where(zip(*self.dataset.get_entries())[1])[0]
def save(self, outpath):
""" !! This should be updated to save in same format as simple_las """
X, y = self.get_data()
f = h5py.File('%s-%s-%s.h5' % (outpath, 'uncertainty', datetime.now().strftime('%Y%m%d_%H%M%S')))
f['X'] = X
f['y'] = y
f['labs'] = self.labs
f.close()
|
{"hexsha": "fe0e7b1bc9455c38d72696abed3d34c4e074348b", "size": 1798, "ext": "py", "lang": "Python", "max_stars_repo_path": "tagless/uncertainty_sampler.py", "max_stars_repo_name": "jgawrilo/tagless", "max_stars_repo_head_hexsha": "0efacb10ad51eb0c54876cd60405aeee27ad98e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-17T06:01:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-20T02:47:58.000Z", "max_issues_repo_path": "tagless/uncertainty_sampler.py", "max_issues_repo_name": "jgawrilo/tagless", "max_issues_repo_head_hexsha": "0efacb10ad51eb0c54876cd60405aeee27ad98e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-08-28T15:16:50.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-05T12:43:41.000Z", "max_forks_repo_path": "tagless/uncertainty_sampler.py", "max_forks_repo_name": "jgawrilo/tagless", "max_forks_repo_head_hexsha": "0efacb10ad51eb0c54876cd60405aeee27ad98e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-05T23:57:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-05T23:57:27.000Z", "avg_line_length": 27.6615384615, "max_line_length": 105, "alphanum_fraction": 0.5778642937, "include": true, "reason": "import numpy", "num_tokens": 464}
|
using Test, Random, LinearAlgebra, SparseArrays
using Yao
using QuAlgorithmZoo
@testset "RotBasis" begin
rt = RotBasis(0.5, 0.4)
crt = chain(rt)
dispatch!(crt, [2., 3.])
@test nparameters(crt) == 2
for (t1, t2, t3) in zip(parameters(rt), (2, 3), parameters(crt))
@test t1 == t2 == t3
end
# check consistency
rb = roll(1, RotBasis(0.1, 0.3))#rot_basis(1)
angles = randpolar(1)
# prepair a state in the angles direction.
psi = angles |> polar2u |> register
# rotate to the same direction for measurements.
dispatch!(rb, vec(angles))
@test state(apply!(psi, rb)) ≈ [1, 0]
@test nparameters(rot_basis(3)) == 6
dispatch!(rb, :zero)
@test parameters(rb)[1] == 0
dispatch!(rb, :random)
@test parameters(rb)[1] != 0
end
@testset "polar and u" begin
polar = randpolar(10)
@test size(polar) == (2, 10)
@test polar |> polar2u |> u2polar ≈ polar
end
|
{"hexsha": "6ad3ed6ea847864dd9c7ed8cbc5a65bc80916dd3", "size": 942, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/RotBasis.jl", "max_stars_repo_name": "dgan181/QuAlgorithmZoo.jl", "max_stars_repo_head_hexsha": "071b02aaee0e1a035e9166a048a0273338788477", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-31T21:14:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T11:01:13.000Z", "max_issues_repo_path": "test/RotBasis.jl", "max_issues_repo_name": "dgan181/QuAlgorithmZoo.jl", "max_issues_repo_head_hexsha": "071b02aaee0e1a035e9166a048a0273338788477", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/RotBasis.jl", "max_forks_repo_name": "dgan181/QuAlgorithmZoo.jl", "max_forks_repo_head_hexsha": "071b02aaee0e1a035e9166a048a0273338788477", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1538461538, "max_line_length": 68, "alphanum_fraction": 0.6082802548, "num_tokens": 327}
|
/********************************************************************
*
* Software License Agreement (BSD License)
*
* Copyright (c) 2012, Max-Planck-Gesellschaft
* Copyright (c) 2012-2015, Inria
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
********************************************************************/
#ifndef GENERICSUBSCRIBER_HPP_
#define GENERICSUBSCRIBER_HPP_
#include <boost/thread/mutex.hpp>
#include <semaphore.h>
//#include <sys/sem.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <ros/node_handle.h>
#ifndef GENERATESEMAPHORENAME
#define GENERATESEMAPHORENAME
#endif
class GenericSubscriber_base {
public:
static
void resetSemaphore(sem_t* sem){
int valp;
sem_getvalue(sem, &valp);
while ( valp > 0){
sem_wait(sem);
sem_getvalue(sem, &valp);
}
}
static
std::string generateSemaphoreName(std::string const & topicName) {
std::string semName(topicName);
if (topicName[0] == '/'){
semName.erase(0,1);
}
std::replace(semName.begin(), semName.end(), '/', '_');
semName = "slk." + semName;
return semName;
}
};
template <class T_>
class GenericSubscriber : public GenericSubscriber_base {
protected:
boost::shared_ptr<T_ const> lastMsg;
mutable boost::mutex lastMsgLock;
const std::string semName;
sem_t * newMsg;
// Subscriber
ros::Subscriber sub;
void msgCallback(boost::shared_ptr<T_ const> msg) {
boost::mutex::scoped_lock lock(lastMsgLock);
lastMsg = msg;
// sem_post(newMsg);
}
public:
GenericSubscriber(ros::NodeHandle handle, const std::string& topicName, int queue_size) :
semName(generateSemaphoreName(topicName)),
newMsg(sem_open(semName.c_str(), O_CREAT, 0644, 0)),
sub(handle.subscribe(topicName, queue_size, &GenericSubscriber<T_>::msgCallback, this)){
// std::cout << "Opening semaphore in " << __func__ << " in file /dev/shm/sem." << semName << std::endl;
if (newMsg == SEM_FAILED){
//TODO: ssSetErrorStatus(S, strerror(errno));
// std::cout << " returned SEM_FAILED with error: " << strerror(errno);
}
}
virtual ~GenericSubscriber() {
boost::mutex::scoped_lock lock(lastMsgLock);
// lastMsg = boost::shared_ptr<T_ const> (new T_);
sem_close(newMsg);
sem_unlink(semName.c_str());
}
boost::shared_ptr<T_ const> getLastMsg() const {
boost::mutex::scoped_lock lock(lastMsgLock);
return lastMsg;
}
};
#endif /* GENERICSUBSCRIBER_HPP_ */
|
{"hexsha": "4ac5a3a53b07470d3e12810a24f46b09442d9cc3", "size": 4061, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "matlab_ros_bridge/include/matlab_ros_bridge/GenericSubscriber.hpp", "max_stars_repo_name": "fabrizioschiano/matlab_ros_bridge", "max_stars_repo_head_hexsha": "e1def15741ba23942aaa8f7ad953baaa204b226a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matlab_ros_bridge/include/matlab_ros_bridge/GenericSubscriber.hpp", "max_issues_repo_name": "fabrizioschiano/matlab_ros_bridge", "max_issues_repo_head_hexsha": "e1def15741ba23942aaa8f7ad953baaa204b226a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matlab_ros_bridge/include/matlab_ros_bridge/GenericSubscriber.hpp", "max_forks_repo_name": "fabrizioschiano/matlab_ros_bridge", "max_forks_repo_head_hexsha": "e1def15741ba23942aaa8f7ad953baaa204b226a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 108, "alphanum_fraction": 0.6727407043, "num_tokens": 946}
|
import json
import os
from collections import defaultdict
import networkx as nx
import pandas as pd
import stanza
from networkx.algorithms.isomorphism import DiGraphMatcher
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
from tuw_nlp.grammar.text_to_4lang import TextTo4lang
from tuw_nlp.graph.utils import (
GraphFormulaMatcher,
)
from tuw_nlp.text.pipeline import CachedStanzaPipeline
from xpotato.dataset.utils import default_pn_to_graph, ud_to_graph, amr_pn_to_graph
class GraphExtractor:
def __init__(self, cache_dir=None, cache_fn=None, lang=None):
self.cache_dir = cache_dir
self.cache_fn = cache_fn
self.lang = lang
self.nlp = None
self.matcher = None
self.amr_stog = None
def init_amr(self):
if self.amr_stog == None:
import amrlib
self.amr_stog = amrlib.load_stog_model()
def init_nlp(self):
if self.nlp == None:
if self.lang == "en_bio":
nlp = stanza.Pipeline("en", package="craft")
else:
nlp = stanza.Pipeline(self.lang)
self.nlp = CachedStanzaPipeline(nlp, self.cache_fn)
def parse_iterable(self, iterable, graph_type="fourlang"):
if graph_type == "fourlang":
with TextTo4lang(
lang=self.lang, nlp_cache=self.cache_fn, cache_dir=self.cache_dir
) as tfl:
for sen in tqdm(iterable):
fl_graphs = list(tfl(sen))
g = fl_graphs[0]
for n in fl_graphs[1:]:
g = nx.compose(g, n)
yield g
elif graph_type == "ud":
self.init_nlp()
for sen in tqdm(iterable):
doc = self.nlp(sen)
g, _ = ud_to_graph(doc.sentences[0])
for doc_sen in doc.sentences[1:]:
n, _ = ud_to_graph(doc_sen)
g = nx.compose(g, n)
yield g
elif graph_type == "amr":
self.init_amr()
for sen in tqdm(iterable):
graphs = self.stog.parse_sents([sen])
g, _ = amr_pn_to_graph(graphs[0])
yield g
class FeatureEvaluator:
def __init__(self, graph_format="ud"):
self.graph_format = graph_format
def match_features(self, dataset, features):
graphs = dataset.graph.tolist()
matches = []
predicted = []
matcher = GraphFormulaMatcher(features, converter=default_pn_to_graph)
for i, g in tqdm(enumerate(graphs)):
feats = matcher.match(g)
for key, feature in feats:
matches.append(features[feature])
predicted.append(key)
break
else:
matches.append("")
predicted.append("")
d = {
"Sentence": dataset.text.tolist(),
"Predicted label": predicted,
"Matched rule": matches,
}
df = pd.DataFrame(d)
return df
def one_versus_rest(self, df, entity):
mapper = {entity: 1}
one_versus_rest_df = df.copy()
one_versus_rest_df["one_versus_rest"] = [
mapper[item] if item in mapper else 0 for item in df.label
]
return one_versus_rest_df
def rank_features(self, cl, features, orig_data, false_negatives):
if false_negatives:
subset_data = orig_data.iloc[false_negatives]
else:
subset_data = orig_data
df, accuracy = self.evaluate_feature(cl, features, subset_data)
features_stat = []
for i, feature in enumerate(features):
features_stat.append(
(
feature,
df.iloc[i].Precision,
df.iloc[i].Recall,
df.iloc[i].Fscore,
df.iloc[i].Support,
len(df.iloc[i].True_positive_sens),
len(df.iloc[i].False_positive_sens),
)
)
def rank(feature):
return len(df.iloc[features.index(feature[0])].True_positive_graphs)
return sorted(features_stat, key=rank, reverse=True)
def train_feature(self, cl, feature, data, graph_format="ud"):
feature_graph = default_pn_to_graph(feature)[0]
graphs = data.graph.tolist()
labels = self.one_versus_rest(data, cl).one_versus_rest.tolist()
path = "trained_features.tsv"
trained_features = []
with open(path, "w+") as f:
for i, g in enumerate(graphs):
matcher = DiGraphMatcher(
g,
feature_graph,
node_match=GraphFormulaMatcher.node_matcher,
edge_match=GraphFormulaMatcher.edge_matcher,
)
if matcher.subgraph_is_isomorphic():
for iso_pairs in matcher.subgraph_isomorphisms_iter():
nodes = []
for k in iso_pairs:
if feature_graph.nodes[iso_pairs[k]]["name"] == ".*":
nodes.append(g.nodes[k]["name"])
if not nodes:
g2_to_g1 = {v: u for (u, v) in iso_pairs.items()}
for u, v, attrs in feature_graph.edges(data=True):
if attrs["color"] == ".*":
edge = g.get_edge_data(g2_to_g1[u], g2_to_g1[v])[
"color"
]
nodes.append(edge)
nodes_str = ",".join(nodes)
label = labels[i]
sentence = data.iloc[i].text
f.write(f"{feature}\t{nodes_str}\t{sentence}\t{label}\n")
trained_features.append(
(feature, nodes_str, sentence, str(label))
)
return self.cluster_feature(trained_features)
def cluster_feature(self, trained_features):
graphs = {}
if os.path.isfile("longman_zero_paths_one_exp"):
with open("longman_zero_paths_one_exp.json") as f:
graphs = json.load(f)
words = {}
for fields in trained_features:
words[fields[1] + "_" + fields[3]] = int(fields[3])
feature = fields[0]
graph = nx.MultiDiGraph()
for word in words:
if words[word] == 1:
color = "green"
else:
color = "red"
graph.add_node(word, color=color)
word_clean = word.split("_")[0]
if word_clean in graphs:
hypernyms = graphs[word_clean]
for hypernym in hypernyms:
hypernym_words = hypernyms[hypernym]
for w in hypernym_words:
if hypernym == "1":
graph.add_edge(word, w, color=hypernym)
selected_words = self.select_words(trained_features)
word_features = []
word_features.append(feature.replace(".*", "|".join(selected_words)))
return word_features
def select_words(self, trained_features):
features = []
labels = []
for fields in trained_features:
features.append(fields[1])
labels.append(int(fields[3]))
words_to_measures = {
word: {"TP": 0, "FP": 0, "TN": 0, "FN": 0} for word in set(features)
}
for word in words_to_measures:
for i, label in enumerate(labels):
if label and features[i] == word:
words_to_measures[word]["TP"] += 1
if label and features[i] != word:
words_to_measures[word]["FN"] += 1
if not label and features[i] == word:
words_to_measures[word]["FP"] += 1
if not label and features[i] != word:
words_to_measures[word]["TN"] += 1
for word in words_to_measures:
TP = words_to_measures[word]["TP"]
FP = words_to_measures[word]["FP"]
TN = words_to_measures[word]["TN"]
FN = words_to_measures[word]["FN"]
precision = TP / (TP + FP)
recall = TP / (TP + FN)
words_to_measures[word]["precision"] = precision
words_to_measures[word]["recall"] = recall
selected_words = set()
for word in words_to_measures:
if words_to_measures[word]["precision"] > 0.9 and (
words_to_measures[word]["TP"] > 1
or words_to_measures[word]["recall"] > 0.01
):
selected_words.add(word)
return selected_words
def evaluate_feature(self, cl, features, data, graph_format="ud"):
measure_features = []
graphs = data.graph.tolist()
labels = self.one_versus_rest(data, cl).one_versus_rest.tolist()
whole_predicted = []
matched = defaultdict(list)
# We want to view false negative examples for all rules, not rule specific
false_neg_g = []
false_neg_s = []
false_neg_indices = []
matcher = GraphFormulaMatcher(features, converter=default_pn_to_graph)
for i, g in enumerate(graphs):
feats = matcher.match(g)
label = 0
for key, feature in feats:
matched[i].append(features[feature][0])
label = 1
whole_predicted.append(label)
if label == 0 and labels[i] == 1:
false_neg_g.append(g)
sen = data.iloc[i].text
lab = data.iloc[i].label
false_neg_s.append((sen, lab))
false_neg_indices.append(i)
accuracy = []
for pcf in precision_recall_fscore_support(
labels, whole_predicted, average=None
):
if len(pcf) > 1:
accuracy.append(pcf[1])
else:
accuracy.append(0)
for feat in features:
measure = [feat[0]]
false_pos_g = []
false_pos_s = []
false_pos_indices = []
true_pos_g = []
true_pos_s = []
true_pos_indices = []
predicted = []
for i, g in enumerate(graphs):
feats = matched[i]
label = 1 if feat[0] in feats else 0
if label == 1 and labels[i] == 0:
false_pos_g.append(g)
sen = data.iloc[i].text
lab = data.iloc[i].label
false_pos_s.append((sen, lab))
false_pos_indices.append(i)
if label == 1 and labels[i] == 1:
true_pos_g.append(g)
sen = data.iloc[i].text
lab = data.iloc[i].label
true_pos_s.append((sen, lab))
true_pos_indices.append(i)
predicted.append(label)
for pcf in precision_recall_fscore_support(labels, predicted, average=None):
if len(pcf) > 1:
measure.append(pcf[1])
else:
measure.append(0)
measure.append(false_pos_g)
measure.append(false_pos_s)
measure.append(false_pos_indices)
measure.append(true_pos_g)
measure.append(true_pos_s)
measure.append(true_pos_indices)
measure.append(false_neg_g)
measure.append(false_neg_s)
measure.append(false_neg_indices)
measure.append(predicted)
measure_features.append(measure)
df = pd.DataFrame(
measure_features,
columns=[
"Feature",
"Precision",
"Recall",
"Fscore",
"Support",
"False_positive_graphs",
"False_positive_sens",
"False_positive_indices",
"True_positive_graphs",
"True_positive_sens",
"True_positive_indices",
"False_negative_graphs",
"False_negative_sens",
"False_negative_indices",
"Predicted",
],
)
return df, accuracy
|
{"hexsha": "79de1435eace6729b4a564df96ae3210d23f0387", "size": 12624, "ext": "py", "lang": "Python", "max_stars_repo_path": "xpotato/graph_extractor/extract.py", "max_stars_repo_name": "adaamko/exp-relation-extraction", "max_stars_repo_head_hexsha": "0af5d95260809d3d130367f856e65e2e53e53c01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-30T09:59:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-15T00:38:54.000Z", "max_issues_repo_path": "xpotato/graph_extractor/extract.py", "max_issues_repo_name": "adaamko/exp-relation-extraction", "max_issues_repo_head_hexsha": "0af5d95260809d3d130367f856e65e2e53e53c01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-09-07T14:18:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T11:15:02.000Z", "max_forks_repo_path": "xpotato/graph_extractor/extract.py", "max_forks_repo_name": "adaamko/exp-relation-extraction", "max_forks_repo_head_hexsha": "0af5d95260809d3d130367f856e65e2e53e53c01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1643454039, "max_line_length": 88, "alphanum_fraction": 0.5142585551, "include": true, "reason": "import networkx,from networkx", "num_tokens": 2611}
|
# Developed since: Feb 2010
import numpy
import upy.core
__all__ = ['add', 'subtract', 'multiply', 'divide', 'power', \
'less', 'less_equal', 'greater', 'greater_equal', 'equal', \
'not_equal']
"""Overloads the numpy operators in such a way, that in expressions
undarrays have the highest precedence."""
#
# Explicit operators ...
#
# Consider the expression:
#
# numpyarray * upyarray
#
# When executing this, NUMPYARRAY.__mul__() is called, or, equivalently,
# numpy.multiply(). This function checkes whether the other operand is an
# numpy.ndarray, and if not, it treats it as scalar and applies the operation
# to all elements of the numpy.ndararray NUMPYARRAY. This is not what was
# expected. The call executes properly if upyarray.__rmul__() is being
# called, which is done by the wrapper functions below. The wrapper
# functions only handle this special case, all other cases are handed over to
# numpy functions. The wrapper functions are registered in numpy via
# numpy.set_arithmetic_ops().
# Arithmetic operators ...
# We store the original numpy settings, then create the callable objects,
# which take their .ufunc attribute from this array.
original_numpy_ops = numpy.set_numeric_ops()
class ufuncWrap:
"""Wraps numpy ufuncs. Behaves like the original, with the exception
that __call__() will be overloaded."""
def __init__(self, ufunc_name, overload):
"""UFUNC is the ufunc to be wrapped. OVERLOAD is the name (string)
of the undarray method to be used in overloading __call__()."""
self.ufunc_name = ufunc_name
self.ufunc = original_numpy_ops[ufunc_name]
self.overload = overload
def __call__(self, a, b, *args, **kwargs):
"""When B is an undarray, call B.overload(a), else .ufunc(a, b)."""
if isinstance(b, upy.core.undarray):
return getattr(b, self.overload)(a)
else:
return self.ufunc(a, b, *args, **kwargs)
def __getattr__(self, attr):
"""Return getattr(.ufunc, ATTR)."""
return getattr(self.ufunc, attr)
def __str__(self):
return "(ufunc wrapper for %s)" % self.ufunc
def __repr__(self):
return "ufuncWrap(ufunc_name = %r, overload = %r)" % \
(self.ufunc_name, self.overload)
class Add(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'add', '__radd__')
add = Add()
class Subtract(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'subtract', '__rsub__')
subtract = Subtract()
class Multiply(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'multiply', '__rmul__')
multiply = Multiply()
class Divide(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'divide', '__rdiv__')
divide = Divide()
class Power(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'power', '__rpow__')
power = Power()
# Comparison operators ...
#
# Note that for the antisymmetric operators the called operators are the
# inverted of the original due to position swap.
class Less(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'less', '__gt__')
less = Less()
class LessEqual(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'less_equal', '__ge__')
less_equal = LessEqual()
class Greater(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'greater', '__lt__')
greater = Greater()
class GreaterEqual(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'greater_equal', '__le__')
greater_equal = GreaterEqual()
class Equal(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'equal', '__eq__')
def __call__(self, a, b, *args, **kwargs):
# numpy's calling mechanism of equal() seems to have a bug,
# such that b is always a numpy.ndarray. When b should be an undarray,
# it is a numpy.ndarray(dtype = numpy.object, shape = ()) ...
# Make the call also compatible with future, bug-fixed versions.
if isinstance(b, numpy.ndarray):
if b.ndim == 0:
# Implement some conversion from scalar array to stored object.
b = b.sum()
return ufuncWrap.__call__(self, a, b, *args, **kwargs)
equal = Equal()
class NotEqual(ufuncWrap):
def __init__(self):
ufuncWrap.__init__(self, 'not_equal', '__ne__')
def __call__(self, a, b, *args, **kwargs):
# numpy's calling mechanism of not_equal() seems to have a bug,
# such that b is always a numpy.ndarray. When b should be an undarray,
# it is a numpy.ndarray(dtype = numpy.object, shape = ()) ...
# Make the call also compatible with future, bug-fixed versions.
if isinstance(b, numpy.ndarray):
if b.ndim == 0:
# Implement some conversion from scalar array to stored object.
b = b.sum()
return ufuncWrap.__call__(self, a, b, *args, **kwargs)
not_equal = NotEqual()
# Register the operators in numpy ...
numpy.set_numeric_ops(
add = add,
subtract = subtract,
multiply = multiply,
divide = divide,
power = power,
less = less,
less_equal = less_equal,
greater = greater,
greater_equal = greater_equal,
equal = equal,
not_equal = not_equal)
|
{"hexsha": "94b462dc69cd0d3806c874ac8edec1ad4fb01c47", "size": 5425, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/upy2/pending/operators.py", "max_stars_repo_name": "friedrichromstedt/upy", "max_stars_repo_head_hexsha": "4b6b890259fb34bc69265fc400881587157b03a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-01T23:09:38.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-06T13:14:23.000Z", "max_issues_repo_path": "lib/upy2/pending/operators.py", "max_issues_repo_name": "friedrichromstedt/upy", "max_issues_repo_head_hexsha": "4b6b890259fb34bc69265fc400881587157b03a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/upy2/pending/operators.py", "max_forks_repo_name": "friedrichromstedt/upy", "max_forks_repo_head_hexsha": "4b6b890259fb34bc69265fc400881587157b03a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9723756906, "max_line_length": 79, "alphanum_fraction": 0.6458986175, "include": true, "reason": "import numpy", "num_tokens": 1348}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.