text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
def levenshtein_distance(source, target, sub_cost=1, ins_cost=1, del_cost=1):
target = np.array(target)
dist = np.arange(len(target) + 1) * ins_cost
for s in source:
dist[1:] = np.minimum(dist[1:] + del_cost, dist[:-1] + (target != s) * sub_cost)
dist[0] += del_cost
for ii in range(len(dist) - 1):
if dist[ii + 1] > dist[ii] + ins_cost:
dist[ii + 1] = dist[ii] + ins_cost
return dist[-1]
def levenshtein_alignment(source, target, sub_cost=1, ins_cost=1, del_cost=1, empty_symbol=None):
target = np.array(target)
backtrack = np.ones((len(source) + 1, len(target) + 1))
backtrack[0] = -1
dist = np.arange(len(target) + 1) * ins_cost
for ii, s in enumerate(source):
cost4sub = dist[:-1] + (target != s) * sub_cost
dist += del_cost
where_sub = cost4sub < dist[1:]
dist[1:][where_sub] = cost4sub[where_sub]
backtrack[ii + 1, 1:][where_sub] = 0
for jj in range(len(dist) - 1):
if dist[jj + 1] > dist[jj] + ins_cost:
dist[jj + 1] = dist[jj] + ins_cost
backtrack[ii + 1, jj + 1] = -1
src_pos = len(source)
tar_pos = len(target)
alig = []
while tar_pos > 0 or src_pos > 0:
where = backtrack[src_pos, tar_pos]
if where >= 0: src_pos -= 1
if where <= 0: tar_pos -= 1
alig.insert(0, (empty_symbol if where < 0 else source[src_pos],
empty_symbol if where > 0 else target[tar_pos]))
return alig
def levenshtein_alignment_path(source, target, sub_cost=1, ins_cost=1, del_cost=1, empty_symbol=None):
target = np.array(target)
backtrack = np.ones((len(source) + 1, len(target) + 1))
backtrack[0] = -1
dist = np.arange(len(target) + 1) * ins_cost
for ii, s in enumerate(source):
cost4sub = dist[:-1] + (target != s) * sub_cost
dist += del_cost
where_sub = cost4sub < dist[1:]
dist[1:][where_sub] = cost4sub[where_sub]
backtrack[ii + 1, 1:][where_sub] = 0
for jj in range(len(dist) - 1):
if dist[jj + 1] > dist[jj] + ins_cost:
dist[jj + 1] = dist[jj] + ins_cost
backtrack[ii + 1, jj + 1] = -1
src_pos = len(source)
tar_pos = len(target)
align = []
while tar_pos > 0 or src_pos > 0:
where = backtrack[src_pos, tar_pos]
if where >= 0: src_pos -= 1
if where <= 0: tar_pos -= 1
align.append(where)
return list(reversed(align))
def edit_stats_for_alignment(alig, empty_symbol=None):
if len(alig) == 0:
return 0, 0, 0, 0, 0
alig = np.array(alig)
ncor = np.sum(alig[:, 0] == alig[:, 1])
ndel = np.sum(alig[:, 0] == np.array(empty_symbol))
nphn = np.sum(alig[:, 1] != np.array(empty_symbol))
nins = len(alig) - nphn
nsub = nphn - ncor - ndel
return nphn, ncor, nins, ndel, nsub
|
{"hexsha": "b6718365165733efafa1f882f95dbb01d5ba5e6a", "size": 2941, "ext": "py", "lang": "Python", "max_stars_repo_path": "pero_ocr/sequence_alignment.py", "max_stars_repo_name": "DavidHribek/pero-ocr", "max_stars_repo_head_hexsha": "8d274282813878b3e31dd560563a36b3f02e5c33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-03-20T08:25:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T11:30:50.000Z", "max_issues_repo_path": "pero_ocr/sequence_alignment.py", "max_issues_repo_name": "DavidHribek/pero-ocr", "max_issues_repo_head_hexsha": "8d274282813878b3e31dd560563a36b3f02e5c33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-02-11T17:27:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:36:24.000Z", "max_forks_repo_path": "pero_ocr/sequence_alignment.py", "max_forks_repo_name": "DavidHribek/pero-ocr", "max_forks_repo_head_hexsha": "8d274282813878b3e31dd560563a36b3f02e5c33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-03-16T12:22:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T12:49:06.000Z", "avg_line_length": 36.3086419753, "max_line_length": 102, "alphanum_fraction": 0.5651139068, "include": true, "reason": "import numpy", "num_tokens": 928}
|
C*************************************************************
C PLEASE KEEP THIS NOTE OF MODEL-DEVELOPMENT HISTORY
C Matrix solve uses Thomas algorithm, 10/1991, Jinlun Zhang
C Spherical coordinate system, 10/27/93, Jinlun Zhang
C Latest finite differencing scheme for treatment of NP,
C 9/9/1996,Jinlun Zhang
C Alternating direction implicit (ADI) method is used, 10/1998,
C Jinlun Zhang
C For details about ADI dynamics model, see Zhang and Rothrock,
C "Modeling
C Arctic Sea ice with an efficient plastic solution",
C submitted to JGR, 1999
C Adapted for GISS coupled model Jiping Liu/Gavin Schmidt 2000
C - Further modularised May 2001
C*************************************************************
MODULE ICEDYN
!@sum ICEDYN holds local variables for dynamic sea ice
!@auth Gavin Schmidt (based on code from Jinlun Zhang)
USE CONSTANT, only : radian,radius
USE MODEL_COM, only : im,jm
USE SEAICE, only : osurf_tilt
IMPLICIT NONE
SAVE
C**** Definition for ice advection grid (EDIT FOR ADVSI GRID CHANGE)
INTEGER, PARAMETER :: IMIC=IM, JMIC=JM
C**** local grid variables for ice rheology scheme
C**** Edit the definition of nx1,ny1 to change the grid for the
C**** rheology calculations without changing ADVSI grid.
!@var nx1 number of grid points in the longitudinal direction
!@+ (calculated points from 2 through nx1-1. End points are boundaries)
!@var ny1 number of grid points in the latitudinal direction
!@+ (calculated points from 2 through ny1-1. End points are boundaries)
integer, parameter :: nx1=imic+2, ny1=jmic
INTEGER, parameter :: NYPOLE=NY1-1,NXLCYC=NX1-1
integer :: NPOL=1,LCYC=1
!@var FOCEAN land/ocean mask on ice dynamic grid
REAL*8, DIMENSION(NX1-2,NY1) :: FOCEAN
C**** input
!@var HEFFM ice mass mask (1/0)
!@var UVM ice velocity mask (1/0)
!@var COR coriolis term for ice dynamic equation
!@var GAIRX,GAIRY atmosphere-ice stress (B grid)
!@var GWATX,GWATY ocean velocities (B grid) (m/s)
!@var PGFUB,PGFVB pressure accelaration force
!@var AMASS ice mass (kg/m^2)
!@var UICEC,VICEC velocity arrays (m/s)
!@var UICE,VICE velocity arrays (m/s)
!@var HEFF ice thickness (mean over box) (m)
!@var AREA ice area (frac)
!@var UIB,VIB velocity arrays (m/s) (????)
C**** internal variables
!@var PRESS ice internal pressure (Pa)
!@var FORCEX,FORCEY external force
!@var DRAGS,DRAGA symmetric/anti-symmetric drag terms
!@var ZMAX,ZMIN max,min values of ZETA
!@var ETA,ZETA viscosities
C**** output
!@var DWATN non-linear water drag term
!@var DMU,DMV ice-ocean stress
REAL*8, DIMENSION(NX1,NY1) :: PRESS,HEFFM,UVM,DWATN,COR
* ,ZMAX,ZMIN,ETA,ZETA,DRAGS,DRAGA,GAIRX,GAIRY
* ,GWATX,GWATY,PGFUB,PGFVB,FORCEX,FORCEY,AMASS,UICEC,VICEC,UIB
* ,VIB,DMU,DMV,HEFF,AREA
REAL*8, DIMENSION(NX1,NY1,3) :: UICE,VICE
C**** Geometry
!@var SINEN sin(phi)
!@var BYDXDY
!@var DXT,DXU x-direction distances on tracer and velocity grid
!@var DYT,DYU y-direction distances on tracer and velocity grid
REAL*8, DIMENSION(NX1,NY1) :: SINEN,BYDXDY
REAL*8, DIMENSION(NX1) :: DXT,DXU,BYDX2,BYDXR
REAL*8, DIMENSION(NY1) :: DYT,DYU,BYDY2,BYDYR,CST,CSU,TNGT,TNG
* ,BYCSU
!@var OIPHI ice-ocean turning angle (25 degrees)
!@var ECCEN value of eccentricity for yield curve ellipse
REAL*8, PARAMETER :: ECCEN=2.0, OIPHI=25d0*radian
!@var SINWAT,COSWAT sin and cos of ice-ocean turning angle
REAL*8 SINWAT,COSWAT
!@var PSTAR maximum sea ice pressure (Pa)
REAL*8, PARAMETER :: PSTAR=2.75d4
!@var BYDTS reciprocal of timestep in ice dynamics code
REAL*8 :: BYDTS
CONTAINS
SUBROUTINE FORM
!@sum FORM calculates ice dynamics input parameters for relaxation
!@auth Jiping Liu/Gavin Schmidt (based on code from J. Zhang)
!@ver 1.0
IMPLICIT NONE
INTEGER I,J
REAL*8 AAA
C****
C**** Set up non linear water drag
C****
DO J=1,NY1-1
DO I=1,NX1-1
DWATN(I,J)=5.5*SQRT((UICE(I,J,1)-GWATX(I,J))**2
1 +(VICE(I,J,1)-GWATY(I,J))**2)
END DO
END DO
C NOW SET UP SYMMETRIC DRAG
DO J=1,NY1-1
DO I=1,NX1-1
DRAGS(I,J)=DWATN(I,J)*COSWAT
END DO
END DO
C NOW SET UP ANTI SYMMETRIC DRAG PLUS CORIOLIS
DO J=1,NY1
DO I=1,NX1
IF(J.GT.NY1/2) THEN
DRAGA(I,J)=DWATN(I,J)*SINWAT+COR(I,J)
ELSE
DRAGA(I,J)=DWATN(I,J)*(-SINWAT)+COR(I,J)
END IF
END DO
END DO
C NOW SET UP FORCING FIELD
DO J=1,NY1
DO I=1,NX1
C FIRST DO WIND
FORCEX(I,J)=GAIRX(i,j)
FORCEY(I,J)=GAIRY(i,j)
C NOW ADD IN CURRENT FORCE
IF(J.GT.NY1/2) THEN
FORCEX(I,J)=FORCEX(I,J)+DWATN(I,J)*(COSWAT*GWATX(I,J)
1 -SINWAT*GWATY(I,J))
FORCEY(I,J)=FORCEY(I,J)+DWATN(I,J)*(SINWAT*GWATX(I,J)
1 +COSWAT*GWATY(I,J))
ELSE
FORCEX(I,J)=FORCEX(I,J)+DWATN(I,J)*(COSWAT*GWATX(I,J)
1 +SINWAT*GWATY(I,J))
FORCEY(I,J)=FORCEY(I,J)+DWATN(I,J)*(-SINWAT*GWATX(I,J)
1 +COSWAT*GWATY(I,J))
END IF
C NOW ADD IN TILT
if (osurf_tilt.eq.1) then
C**** This assumes explicit knowledge of sea surface tilt
FORCEX(I,J)=FORCEX(I,J)+AMASS(I,J)*PGFUB(I,J)
FORCEY(I,J)=FORCEY(I,J)+AMASS(I,J)*PGFVB(I,J)
else
C**** Otherwise estimate tilt using geostrophy
FORCEX(I,J)=FORCEX(I,J)-COR(I,J)*GWATY(I,J)
FORCEY(I,J)=FORCEY(I,J)+COR(I,J)*GWATX(I,J)
end if
END DO
END DO
C NOW SET UP ICE PRESSURE AND VISCOSITIES
DO J=1,NY1
DO I=1,NX1
PRESS(I,J)=PSTAR*HEFF(I,J)*EXP(-20.0*(1.0-AREA(I,J)))
ZMAX(I,J)=(5d12/2d4)*PRESS(I,J)
c ZMIN(I,J)=0.0D+00
ZMIN(I,J)=4d8
END DO
END DO
CALL PLAST
AAA=0.0
DO I=2,NX1-1
AAA=AAA+PRESS(I,NY1-1)
END DO
AAA=AAA/FLOAT(NX1-2)
DO I=1,NX1
PRESS(I,NY1)=AAA
END DO
8481 CONTINUE
DO J=1,NY1
PRESS(1,J)=PRESS(NX1-1,J)
PRESS(NX1,J)=PRESS(2,J)
END DO
C NOW SET VISCOSITIES AND PRESSURE EQUAL TO ZERO AT OUTFLOW PTS
DO J=1,NY1
DO I=1,NX1
PRESS(I,J)=PRESS(I,J)*HEFFM(I,J)
ETA(I,J)=ETA(I,J)*HEFFM(I,J)
ZETA(I,J)=ZETA(I,J)*HEFFM(I,J)
END DO
END DO
C NOW CALCULATE PRESSURE FORCE AND ADD TO EXTERNAL FORCE
DO J=1,NY1-1
DO I=1,NX1-1
FORCEX(I,J)=FORCEX(I,J)-(0.25/(DXU(I)*CSU(J)))
1 *(PRESS(I+1,J)+PRESS(I+1,J+1)-PRESS(I,J)-PRESS(I,J+1))
FORCEY(I,J)=FORCEY(I,J)-0.25/DYU(J)
1 *(PRESS(I,J+1)+PRESS(I+1,J+1)-PRESS(I,J)-PRESS(I+1,J))
C NOW PUT IN MINIMAL MASS FOR TIME STEPPING CALCULATIONS
END DO
END DO
DO J=1,NY1
FORCEX(1,J)=FORCEX(NX1-1,J)
FORCEY(1,J)=FORCEY(NX1-1,J)
FORCEX(NX1,J)=FORCEX(2,J)
FORCEY(NX1,J)=FORCEY(2,J)
END DO
RETURN
END SUBROUTINE FORM
SUBROUTINE PLAST
!@sum PLAST Calculates strain rates and viscosity for dynamic ice
!@auth Jiping Liu/Gavin Schmidt (based on code from J. Zhang)
!@ver 1.0
IMPLICIT NONE
REAL*8, DIMENSION(NX1,NY1) :: E11,E22,E12
c REAL*8 :: SS11
REAL*8, PARAMETER :: ECM2 = 1.0/(ECCEN**2),GMIN=1d-20
REAL*8 DELT,DELT1,AAA
INTEGER I,J
C EVALUATE STRAIN RATES
DO J=2,NY1-1
DO I=2,NX1-1
E11(I,J)=0.5/(DXT(I)*CST(J))*(UICE(I,J,1)+UICE(I,J-1,1)
* -UICE(I-1,J,1)-UICE(I-1,J-1,1))-0.25*(VICE(I,J,1)+
* VICE(I-1,J,1)+VICE(I-1,J-1,1)+VICE(I,J-1,1))*TNGT(J)
* /RADIUS
E22(I,J)=0.5/DYT(J)*(VICE(I,J,1)+VICE(I-1,J,1)
* -VICE(I,J-1,1)-VICE(I-1,J-1,1))
E12(I,J)=0.5*(0.5/DYT(J)*(UICE(I,J,1)+UICE(I-1,J,1)-
* UICE(I,J-1,1)-UICE(I-1,J-1,1))+0.5/(DXT(I)*CST(J))*
* (VICE(I,J,1)+VICE(I,J-1,1)-VICE(I-1,J,1)-VICE(I-1,J-1,1))
* +0.25*(UICE(I,J,1)+UICE(I-1,J,1)+UICE(I-1,J-1,1)+UICE(I,J
* -1,1))*TNGT(J)/RADIUS)
C NOW EVALUATE VISCOSITIES
DELT=(E11(I,J)**2+E22(I,J)**2)*(1.0+ECM2)+4.0*ECM2*E12(I,J)**2
* +2.0*E11(I,J)*E22(I,J)*(1.0-ECM2)
DELT1=SQRT(DELT)
DELT1=MAX(GMIN,DELT1)
ZETA(I,J)=0.5*PRESS(I,J)/DELT1
END DO
END DO
C NOW PUT MIN AND MAX VISCOSITIES IN
DO J=1,NY1
DO I=1,NX1
ZETA(I,J)=MIN(ZMAX(I,J),ZETA(I,J))
ZETA(I,J)=MAX(ZMIN(I,J),ZETA(I,J))
END DO
END DO
AAA=0.0
DO I=2,NX1-1
AAA=AAA+ZETA(I,NY1-1)
END DO
AAA=AAA/FLOAT(NX1-2)
DO I=1,NX1
ZETA(I,NY1)=AAA
END DO
DO J=1,NY1
ZETA(1,J)=ZETA(NX1-1,J)
ZETA(NX1,J)=ZETA(2,J)
END DO
DO J=1,NY1
DO I=1,NX1
ETA(I,J)=ECM2*ZETA(I,J)
c E11(I,J)=E11(I,J)*HEFFM(I,J)
c E22(I,J)=E22(I,J)*HEFFM(I,J)
c E12(I,J)=E12(I,J)*HEFFM(I,J)
c SS11=(ZETA(I,J)-ETA(I,J))*(E11(I,J)+E22(I,J))-PRESS(I,J)*0.5
END DO
END DO
RETURN
END SUBROUTINE PLAST
SUBROUTINE RELAX
!@sum RELAX calculates ice dynamics relaxation method
!@auth Jiping Liu/Gavin Schmidt (based on code from J. Zhang)
!@ver 1.0
IMPLICIT NONE
REAL*8, DIMENSION(NX1,NY1) :: AU,BU,CU,FXY,FXY1
REAL*8, DIMENSION(NY1,NX1) :: AV,BV,CV,FXYa,FXY1a
REAL*8, DIMENSION(NX1) :: CUU,URT !CUU,
REAL*8, DIMENSION(NY1) :: CVV,VRT,U_tmp !CVV,
REAL*8, PARAMETER :: BYRAD2 = 1./(RADIUS*RADIUS)
INTEGER I,J,J1,J2,IMD,JMD
REAL*8 DELXY,DELXR,DELX2,DELY2,DELYR,ETAMEAN,ZETAMEAN,AA1,AA2
* ,AA3,AA4,AA5,AA6,AA9
DO J=1,NY1
DO I=1,NX1
FORCEX(I,J)=FORCEX(I,J)*UVM(I,J)
FORCEY(I,J)=FORCEY(I,J)*UVM(I,J)
END DO
END DO
C MUST UPDATE HEFF BEFORE CALLING RELAX
C FIRST SET U(2)=U(1)
DO J=1,NY1
DO I=1,NX1
C NOW MAKE SURE BDRY PTS ARE EQUAL TO ZERO
UICE(I,J,2)=UICE(I,J,1)
VICE(I,J,2)=VICE(I,J,1)
UICE(I,J,1)=UICE(I,J,3)*UVM(I,J)
VICE(I,J,1)=VICE(I,J,3)*UVM(I,J)
END DO
END DO
DO I=1,NX1/2
UICE(I,NY1,1)=-UICEC(I+(NX1-2)/2,NY1-1)
VICE(I,NY1,1)=-VICEC(I+(NX1-2)/2,NY1-1)
UICE(I,NY1,3)=-UICEC(I+(NX1-2)/2,NY1-1)
VICE(I,NY1,3)=-VICEC(I+(NX1-2)/2,NY1-1)
UICEC(I,NY1)=-UICEC(I+(NX1-2)/2,NY1-1)
VICEC(I,NY1)=-VICEC(I+(NX1-2)/2,NY1-1)
END DO
DO I=NX1/2+1,NX1-1
UICE(I,NY1,1)=-UICEC(I-(NX1-2)/2,NY1-1)
VICE(I,NY1,1)=-VICEC(I-(NX1-2)/2,NY1-1)
UICE(I,NY1,3)=-UICEC(I-(NX1-2)/2,NY1-1)
VICE(I,NY1,3)=-VICEC(I-(NX1-2)/2,NY1-1)
UICEC(I,NY1)=-UICEC(I-(NX1-2)/2,NY1-1)
VICEC(I,NY1)=-VICEC(I-(NX1-2)/2,NY1-1)
END DO
DO J=1,NY1
UICE(1,J,1)=UICEC(NX1-1,J)
VICE(1,J,1)=VICEC(NX1-1,J)
UICE(NX1,J,1)=UICEC(2,J)
VICE(NX1,J,1)=VICEC(2,J)
UICE(1,J,3)=UICEC(NX1-1,J)
VICE(1,J,3)=VICEC(NX1-1,J)
UICE(NX1,J,3)=UICEC(2,J)
VICE(NX1,J,3)=VICEC(2,J)
UICEC(1,J)=UICEC(NX1-1,J)
VICEC(1,J)=VICEC(NX1-1,J)
UICEC(NX1,J)=UICEC(2,J)
VICEC(NX1,J)=VICEC(2,J)
END DO
C FIRST DO UICE
C THE FIRST HALF
DO J=2,NYPOLE
DO I=2,NXLCYC
DELXY=BYDXDY(I,J) ! 0.5/(DXU(I)*DYU(J))
DELXR=BYDXR(I) ! 0.5/(DXU(I)*RADIUS)
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
FXY(I,J)=DRAGA(I,J)*VICEC(I,J)+FORCEX(I,J)
3+0.5*(ZETA(I+1,J+1)*(VICEC(I+1,J+1)+VICEC(I,J+1)
3-VICEC(I+1,J)-VICEC(I,J))+ZETA(I+1,J)*(VICEC(I+1,J)
3+VICEC(I,J)-VICEC(I+1,J-1)-VICEC(I,J-1))+ZETA(I,J+1)
3*(VICEC(I,J)+VICEC(I-1,J)-VICEC(I,J+1)-VICEC(I-1,J+1))
3+ZETA(I,J)*(VICEC(I,J-1)+VICEC(I-1,J-1)-VICEC(I,J)
3-VICEC(I-1,J)))*DELXY*BYCSU(J)
3
4-0.5*(ETA(I+1,J+1)*(VICEC(I+1,J+1)+VICEC(I,J+1)
4-VICEC(I+1,J)-VICEC(I,J))+ETA(I+1,J)*(VICEC(I+1,J)
4+VICEC(I,J)-VICEC(I+1,J-1)-VICEC(I,J-1))+ETA(I,J+1)
4*(VICEC(I,J)+VICEC(I-1,J)-VICEC(I,J+1)-VICEC(I-1,J+1))
4+ETA(I,J)*(VICEC(I,J-1)+VICEC(I-1,J-1)-VICEC(I,J)
4-VICEC(I-1,J)))*DELXY*BYCSU(J)
4
5+0.5*(VICEC(I+1,J)-VICEC(I-1,J))*(ETA(I,J+1)+ETA(I+1,J+1)
5-ETA(I,J)-ETA(I+1,J))*DELXY*BYCSU(J)+0.5*ETAMEAN*((VICEC(I+1,J+1)
5-VICEC(I-1,J+1))*BYCSU(J+1)-(VICEC(I+1,J-1)-VICEC(I-1,J-1))
5*BYCSU(J-1))*DELXY
5
6-((ZETA(I+1,J+1)+ZETA(I+1,J)-ZETA(I,J)-ZETA(I,J+1))
6+(ETA(I+1,J+1)+ETA(I+1,J)-ETA(I,J)-ETA(I,J+1)))
6*TNG(J)*VICEC(I,J)*DELXR*BYCSU(J)
6-(ETAMEAN+ZETAMEAN)*TNG(J)*(VICEC(I+1,J)-VICEC(I-1,J))
6*DELXR*BYCSU(J)
6
7-ETAMEAN*2.0*TNG(J)*(VICEC(I+1,J)-VICEC(I-1,J))*DELXR*BYCSU(J)
END DO
END DO
DO J=2,NYPOLE
DO I=2,NXLCYC
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
AA1=((ETA(I+1,J) +ZETA(I+1,J) )*BYCSU(J)+
* (ETA(I+1,J+1)+ZETA(I+1,J+1))*BYCSU(J))*BYCSU(J)
AA2=((ETA(I,J)+ZETA(I,J))*BYCSU(J)+(ETA(I,J+1)+ZETA(I,J+1))
& *BYCSU(J))*BYCSU(J)
AA3=ETA(I,J+1)+ETA(I+1,J+1)
AA4=ETA(I,J)+ETA(I+1,J)
AA5=-(ETA(I,J+1)+ETA(I+1,J+1)-ETA(I,J)-ETA(I+1,J))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
AU(I,J)=-AA2*DELX2*UVM(I,J)
BU(I,J)=((AA1+AA2)*DELX2+AA6*BYRAD2
&+AMASS(I,J)*BYDTS*2.0+DRAGS(I,J))*UVM(I,J)+(1.0-UVM(I,J))
CU(I,J)=-AA1*DELX2*UVM(I,J)
END DO
END DO
DO J=2,NYPOLE
AU(2,J)=0.0
CU(NXLCYC,J)=0.0
c CU(2,J)=CU(2,J)/BU(2,J) ! absorbed into TRIDIAG
END DO
DO 1200 J=2,NYPOLE
DO I=2,NXLCYC
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
AA1=((ETA(I+1,J) +ZETA(I+1,J) )*BYCSU(J)+
* (ETA(I+1,J+1)+ZETA(I+1,J+1))*BYCSU(J))*BYCSU(J)
AA2=((ETA(I,J)+ZETA(I,J))*BYCSU(J)+(ETA(I,J+1)+ZETA(I,J+1))
& *BYCSU(J))*BYCSU(J)
AA3=ETA(I,J+1)+ETA(I+1,J+1)
AA4=ETA(I,J)+ETA(I+1,J)
AA5=-(ETA(I,J+1)+ETA(I+1,J+1)-ETA(I,J)-ETA(I+1,J))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
IF(I.EQ.2) THEN
AA9=AA2*DELX2*UICEC(I-1,J)*UVM(I,J)*FLOAT(LCYC-0)
ELSE IF(I.EQ.NXLCYC) THEN
AA9=AA1*DELX2*UICEC(I+1,J)*UVM(I,J)*FLOAT(LCYC-0)
ELSE
AA9=0.0
END IF
URT(I)=AA9+FXY(I,J)-AA5*DELYR*UICE(I,J,2)
1-(AA3+AA4)*DELY2*UICE(I,J,2)
1+(ETA(I,J+1)+ETA(I+1,J+1))*UICE(I,J+1,2)*DELY2
2+(ETA(I,J)+ETA(I+1,J))*UICE(I,J-1,2)*DELY2
3+ETAMEAN*DELYR*(UICE(I,J+1,2)*TNG(J+1)-UICE(I,J-1,2)*TNG(J-1))
4-ETAMEAN*DELYR*2.0*TNG(J)*(UICE(I,J+1,2)-UICE(I,J-1,2))
URT(I)=(URT(I)+AMASS(I,J)*BYDTS*UICE(I,J,2)*2.0)*UVM(I,J)
END DO
CALL TRIDIAG(AU(2,J),BU(2,J),CU(2,J),URT(2),UICE(2,J,1),NXLCYC-1)
c DO I=2,NXLCYC
c CUU(I)=CU(I,J)
c END DO
c URT(2)=URT(2)/BU(2,J)
c DO I=3,NXLCYC
c IMD=I-1
c CUU(I)=CUU(I)/(BU(I,J)-AU(I,J)*CUU(IMD))
c URT(I)=(URT(I)-AU(I,J)*URT(IMD))/(BU(I,J)-AU(I,J)*CUU(IMD))
c END DO
c DO I=1,NXLCYC-2
c J1=NXLCYC-I
c J2=J1+1
c URT(J1)=URT(J1)-CUU(J1)*URT(J2)
c END DO
c DO I=2,NXLCYC
c UICE(I,J,1)=URT(I)
c END DO
1200 CONTINUE
DO I=2,NXLCYC
DO J=2,NYPOLE
UICE(I,J,3)=UICE(I,J,1)
END DO
END DO
C NOW THE SECOND HALF
DO I=2,NXLCYC
DO J=2,NYPOLE
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
AA1=ETA(I,J+1)+ETA(I+1,J+1)
AA2=ETA(I,J)+ETA(I+1,J)
AA5=-(ETA(I,J+1)+ETA(I+1,J+1)-ETA(I,J)-ETA(I+1,J))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
AV(J,I)=(-AA2*DELY2+ETAMEAN*DELYR*(TNG(J-1)-2.0*TNG(J)))*UVM(I,J)
BV(J,I)=((AA1+AA2)*DELY2+AA5*DELYR+AA6*BYRAD2
&+AMASS(I,J)*BYDTS*2.0+DRAGS(I,J))*UVM(I,J)+(1.0-UVM(I,J))
CV(J,I)=(-AA1*DELY2-ETAMEAN*DELYR*(TNG(J+1)-2.0*TNG(J)))*UVM(I,J)
END DO
END DO
DO I=2,NXLCYC
AV(2,I)=0.0
CV(NYPOLE,I)=0.0
c CV(2,I)=CV(2,I)/BV(2,I) ! absorbed into TRIDIAG
END DO
DO I=2,NXLCYC
DO J=2,NYPOLE
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
AA1=((ETA(I+1,J) +ZETA(I+1,J) )*BYCSU(J)+
* (ETA(I+1,J+1)+ZETA(I+1,J+1))*BYCSU(J))*BYCSU(J)
AA2=((ETA(I,J)+ZETA(I,J))*BYCSU(J)+(ETA(I,J+1)+ZETA(I,J+1))
& *BYCSU(J))*BYCSU(J)
IF(J.EQ.NYPOLE) THEN
AA9=( (ETA(I,J+1)+ETA(I+1,J+1))*DELY2*UICEC(I,J+1)
& +ETAMEAN*DELYR*(TNG(J+1)-2.0*TNG(J))*UICEC(I,J+1) )*UVM(I,J)
&*FLOAT(NPOL-0)
ELSE
AA9=0.0
END IF
FXY1a(J,I)=AA9+AMASS(I,J)*BYDTS*UICE(I,J,1)*2.0
5-(AA1+AA2)*DELX2*UICE(I,J,1)
6+((ETA(I+1,J)+ZETA(I+1,J)+ETA(I+1,J+1)+ZETA(I+1,J+1))
6*UICE(I+1,J,1)
6+(ETA(I,J)+ZETA(I,J)+ETA(I,J+1)+ZETA(I,J+1))*UICE(I-1,J,1))
6*DELX2*BYCSU(J)*BYCSU(J)
END DO
END DO
DO 1300 I=2,NXLCYC
DO J=2,NYPOLE
VRT(J)=FXY(I,J)+FXY1a(J,I)
VRT(J)=VRT(J)*UVM(I,J)
END DO
CALL TRIDIAG(AV(2,I),BV(2,I),CV(2,I),VRT(2),U_tmp(2),NYPOLE-1)
c DO J=2,NYPOLE
c CVV(J)=CV(J,I)
c END DO
c VRT(2)=VRT(2)/BV(2,I)
c DO J=3,NYPOLE
c JMD=J-1
c CVV(J)=CVV(J)/(BV(J,I)-AV(J,I)*CVV(JMD))
c VRT(J)=(VRT(J)-AV(J,I)*VRT(JMD))/(BV(J,I)-AV(J,I)*CVV(JMD))
c END DO
c DO J=1,NYPOLE-2
c J1=NYPOLE-J
c J2=J1+1
c VRT(J1)=VRT(J1)-CVV(J1)*VRT(J2)
c END DO
DO J=2,NYPOLE
UICE(I,J,1)=U_tmp(J) ! VRT(J) !
END DO
1300 CONTINUE
C NOW DO VICE
C THE FIRST HALF
DO I=2,NXLCYC
DO J=2,NYPOLE
DELXY=BYDXDY(I,J) ! 0.5/(DXU(I)*DYU(J))
DELXR=BYDXR(I) ! 0.5/(DXU(I)*RADIUS)
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
FXYa(J,I)=-DRAGA(I,J)*UICEC(I,J)+FORCEY(I,J)
3+(0.5*(UICEC(I+1,J)-UICEC(I-1,J))*(ZETA(I,J+1)+ZETA(I+1,J+1)
3-ZETA(I,J)-ZETA(I+1,J))*DELXY*BYCSU(J)+0.5*ZETAMEAN*
3((UICEC(I+1,J+1)
3-UICEC(I-1,J+1))*BYCSU(J+1)-(UICEC(I+1,J-1)-UICEC(I-1,J-1))
3*BYCSU(J-1))*DELXY)
3
4-(0.5*(UICEC(I+1,J)-UICEC(I-1,J))*(ETA(I,J+1)+ETA(I+1,J+1)
4-ETA(I,J)-ETA(I+1,J))*DELXY*BYCSU(J)+0.5*ETAMEAN*((UICEC(I+1,J+1)
4-UICEC(I-1,J+1))*BYCSU(J+1)-(UICEC(I+1,J-1)-UICEC(I-1,J-1))
4*BYCSU(J-1))*DELXY)
4
5+0.5*(ETA(I+1,J+1)*(UICEC(I+1,J+1)+UICEC(I,J+1)
5-UICEC(I+1,J)-UICEC(I,J))+ETA(I+1,J)*(UICEC(I+1,J)
5+UICEC(I,J)-UICEC(I+1,J-1)-UICEC(I,J-1))+ETA(I,J+1)
5*(UICEC(I,J)+UICEC(I-1,J)-UICEC(I,J+1)-UICEC(I-1,J+1))
5+ETA(I,J)*(UICEC(I,J-1)+UICEC(I-1,J-1)-UICEC(I,J)
5-UICEC(I-1,J)))*DELXY*BYCSU(J)
5
6+(ETA(I+1,J+1)+ETA(I+1,J)-ETA(I,J)-ETA(I,J+1))
6*TNG(J)*UICEC(I,J)*DELXR*BYCSU(J)
6+ETAMEAN*TNG(J)*(UICEC(I+1,J)-UICEC(I-1,J))*DELXR*BYCSU(J)
6
7+ETAMEAN*2.0*TNG(J)*(UICEC(I+1,J)-UICEC(I-1,J))*DELXR*BYCSU(J)
END DO
END DO
DO I=2,NXLCYC
DO J=2,NYPOLE
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
AA1=ETA(I,J+1)+ZETA(I,J+1)+ETA(I+1,J+1)+ZETA(I+1,J+1)
AA2=ETA(I,J)+ZETA(I,J)+ETA(I+1,J)+ZETA(I+1,J)
AA3=(ETA(I+1,J)*BYCSU(J)+ETA(I+1,J+1)*BYCSU(J))*BYCSU(J)
AA4=(ETA(I,J)*BYCSU(J)+ETA(I,J+1)*BYCSU(J))*BYCSU(J)
AA5=((ZETA(I,J+1)-ETA(I,J+1))+(ZETA(I+1,J+1)-ETA(I+1,J+1))
&-(ZETA(I,J)-ETA(I,J))-(ZETA(I+1,J)-ETA(I+1,J)))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
AV(J,I)=(-AA2*DELY2-(ZETAMEAN-ETAMEAN)*TNG(J-1)*DELYR
&-ETAMEAN*2.0*TNG(J)*DELYR)*UVM(I,J)
BV(J,I)=((AA1+AA2)*DELY2+AA5*DELYR+AA6*BYRAD2
&+AMASS(I,J)*BYDTS*2.0+DRAGS(I,J))*UVM(I,J)+(1.0-UVM(I,J))
CV(J,I)=(-AA1*DELY2+(ZETAMEAN-ETAMEAN)*TNG(J+1)*DELYR
&+ETAMEAN*2.0*TNG(J)*DELYR)*UVM(I,J)
END DO
END DO
DO I=2,NXLCYC
AV(2,I)=0.0
CV(NYPOLE,I)=0.0
c CV(2,I)=CV(2,I)/BV(2,I) ! absorbed into TRIDIAG
END DO
DO 1301 I=2,NXLCYC
DO J=2,NYPOLE
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
AA1=ETA(I,J+1)+ZETA(I,J+1)+ETA(I+1,J+1)+ZETA(I+1,J+1)
AA2=ETA(I,J)+ZETA(I,J)+ETA(I+1,J)+ZETA(I+1,J)
AA3=(ETA(I+1,J)*BYCSU(J)+ETA(I+1,J+1)*BYCSU(J))*BYCSU(J)
AA4=(ETA(I,J)*BYCSU(J)+ETA(I,J+1)*BYCSU(J))*BYCSU(J)
AA5=((ZETA(I,J+1)-ETA(I,J+1))+(ZETA(I+1,J+1)-ETA(I+1,J+1))
&-(ZETA(I,J)-ETA(I,J))-(ZETA(I+1,J)-ETA(I+1,J)))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
IF(J.EQ.NYPOLE) THEN
AA9=(AA1*DELY2-(ZETAMEAN-ETAMEAN)*TNG(J+1)*DELYR
&-ETAMEAN*2.0*TNG(J)*DELYR)*VICEC(I,J+1)*UVM(I,J)*FLOAT(NPOL-0)
ELSE
AA9=0.0
END IF
VRT(J)=AA9+FXYa(J,I)-(AA3+AA4)*DELX2*VICE(I,J,2)
6+((ETA(I+1,J)*BYCSU(J)+ETA(I+1,J+1)*BYCSU(J))*VICE(I+1,J,2)*DELX2
7 +(ETA(I,J)*BYCSU(J)+ETA(I,J+1)*BYCSU(J))*VICE(I-1,J,2)*DELX2)
* *BYCSU(J)
VRT(J)=(VRT(J)+AMASS(I,J)*BYDTS*VICE(I,J,2)*2.0)*UVM(I,J)
END DO
CALL TRIDIAG(AV(2,I),BV(2,I),CV(2,I),VRT(2),U_tmp(2),NYPOLE-1)
c DO J=2,NYPOLE
c CVV(J)=CV(J,I)
c END DO
c VRT(2)=VRT(2)/BV(2,I)
c DO J=3,NYPOLE
c JMD=J-1
c CVV(J)=CVV(J)/(BV(J,I)-AV(J,I)*CVV(JMD))
c VRT(J)=(VRT(J)-AV(J,I)*VRT(JMD))/(BV(J,I)-AV(J,I)*CVV(JMD))
c END DO
c DO J=1,NYPOLE-2
c J1=NYPOLE-J
c J2=J1+1
c VRT(J1)=VRT(J1)-CVV(J1)*VRT(J2)
c END DO
DO J=2,NYPOLE
VICE(I,J,1)=U_tmp(J) ! VRT(J) !
END DO
1301 CONTINUE
DO I=2,NXLCYC
DO J=2,NYPOLE
VICE(I,J,3)=VICE(I,J,1)
END DO
END DO
C NOW THE SECOND HALF
DO J=2,NYPOLE
DO I=2,NXLCYC
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
AA1=ETA(I,J+1)+ZETA(I,J+1)+ETA(I+1,J+1)+ZETA(I+1,J+1)
AA2=ETA(I,J)+ZETA(I,J)+ETA(I+1,J)+ZETA(I+1,J)
AA3=(ETA(I+1,J)*BYCSU(J)+ETA(I+1,J+1)*BYCSU(J))*BYCSU(J)
AA4=(ETA(I,J)*BYCSU(J)+ETA(I,J+1)*BYCSU(J))*BYCSU(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
AU(I,J)=-AA4*DELX2*UVM(I,J)
BU(I,J)=((AA3+AA4)*DELX2+AA6*BYRAD2
&+AMASS(I,J)*BYDTS*2.0+DRAGS(I,J))*UVM(I,J)+(1.0-UVM(I,J))
CU(I,J)=-AA3*DELX2*UVM(I,J)
END DO
END DO
DO J=2,NYPOLE
AU(2,J)=0.0
CU(NXLCYC,J)=0.0
c CU(2,J)=CU(2,J)/BU(2,J) ! absorbed into TRIDIAG
END DO
DO J=2,NYPOLE
DO I=2,NXLCYC
DELX2=BYDX2(I) ! 0.5/(DXU(I)*DXU(I))
DELY2=BYDY2(J) ! 0.5/(DYU(J)*DYU(J))
DELYR=BYDYR(J) ! 0.5/(DYU(J)*RADIUS)
ETAMEAN=0.25*(ETA(I,J+1)+ETA(I+1,J+1)+ETA(I,J)+ETA(I+1,J))
ZETAMEAN=0.25*(ZETA(I,J+1)+ZETA(I+1,J+1)+ZETA(I,J)+ZETA(I+1,J))
AA1=ETA(I,J+1)+ZETA(I,J+1)+ETA(I+1,J+1)+ZETA(I+1,J+1)
AA2=ETA(I,J)+ZETA(I,J)+ETA(I+1,J)+ZETA(I+1,J)
AA3=(ETA(I+1,J)*BYCSU(J)+ETA(I+1,J+1)*BYCSU(J))*BYCSU(J)
AA4=(ETA(I,J)*BYCSU(J)+ETA(I,J+1)*BYCSU(J))*BYCSU(J)
AA5=((ZETA(I,J+1)-ETA(I,J+1))+(ZETA(I+1,J+1)-ETA(I+1,J+1))
&-(ZETA(I,J)-ETA(I,J))-(ZETA(I+1,J)-ETA(I+1,J)))*TNG(J)
AA6=2.0*ETAMEAN*TNG(J)*TNG(J)
IF(I.EQ.2) THEN
AA9=AA4*DELX2*VICEC(I-1,J)*UVM(I,J)*FLOAT(LCYC-0)
ELSE IF(I.EQ.NXLCYC) THEN
AA9=AA3*DELX2*VICEC(I+1,J)*UVM(I,J)*FLOAT(LCYC-0)
ELSE
AA9=0.0
END IF
FXY1(I,J)=AA9+AMASS(I,J)*BYDTS*VICE(I,J,1)*2.0
1-AA5*DELYR*VICE(I,J,1)
1-(AA1+AA2)*DELY2*VICE(I,J,1)
1+AA1*DELY2*VICE(I,J+1,1)-((ZETAMEAN-ETAMEAN)*TNG(J+1)*DELYR
1+ETAMEAN*2.0*TNG(J)*DELYR)*VICE(I,J+1,1)
2+AA2*DELY2*VICE(I,J-1,1)+((ZETAMEAN-ETAMEAN)*TNG(J-1)*DELYR
2+ETAMEAN*2.0*TNG(J)*DELYR)*VICE(I,J-1,1)
END DO
END DO
DO 1201 J=2,NYPOLE
DO I=2,NXLCYC
URT(I)=FXYa(J,I)+FXY1(I,J)
URT(I)=URT(I)*UVM(I,J)
END DO
CALL TRIDIAG(AU(2,J),BU(2,J),CU(2,J),URT(2),VICE(2,J,1),NXLCYC-1)
c DO I=2,NXLCYC
c CUU(I)=CU(I,J)
c END DO
c URT(2)=URT(2)/BU(2,J)
c DO I=3,NXLCYC
c IMD=I-1
c CUU(I)=CUU(I)/(BU(I,J)-AU(I,J)*CUU(IMD))
c URT(I)=(URT(I)-AU(I,J)*URT(IMD))/(BU(I,J)-AU(I,J)*CUU(IMD))
c END DO
c DO I=1,NXLCYC-2
c J1=NXLCYC-I
c J2=J1+1
c URT(J1)=URT(J1)-CUU(J1)*URT(J2)
c END DO
c DO I=2,NXLCYC
c VICE(I,J,1)=URT(I)
c END DO
1201 CONTINUE
DO J=2,NYPOLE
DO I=2,NXLCYC
UICE(I,J,1)=UICE(I,J,1)*UVM(I,J)
VICE(I,J,1)=VICE(I,J,1)*UVM(I,J)
END DO
END DO
RETURN
END SUBROUTINE RELAX
SUBROUTINE setup_icedyn_grid
USE MODEL_COM, only : dts=>dtsrc
IMPLICIT NONE
REAL*8 :: dlat,dlon,phit,phiu,hemi,rms
INTEGER I,J,n,k,kki,sumk,l
C****
C**** calculate grid and initialise arrays
c****
dlat=nint(180./(ny1-1))*radian
dlon=nint(360./(nx1-2))*radian
bydts = 1./dts
c****
do j = 1,ny1
dyt(j) = dlat*radius
dyu(j) = dlat*radius
enddo
do i=1,nx1
dxt(i) = dlon*radius
dxu(i) = dlon*radius
enddo
dxt(1) = dxt(nx1-1)
dxt(nx1) = dxt(2)
dxu(1) = dxu(nx1-1)
dxu(nx1) = dxu(2)
do i=1,nx1
bydx2(i)=0.5/(dxu(i)*dxu(i))
bydxr(i)=0.5/(dxu(i)*radius)
end do
do j=1,ny1
bydy2(j)=0.5/(dyu(j)*dyu(j))
bydyr(j)=0.5/(dyu(j)*radius)
do i=1,nx1
bydxdy(i,j) = 0.5/(dxu(i)*dyu(j))
end do
end do
do j = 1,ny1
phit = (-90.+(j-1)*4.)*radian
phiu = (-88.+(j-1)*4.)*radian
cst(j) = cos(phit)
csu(j) = cos(phiu)
bycsu(j) = 1./csu(j)
tng(j) = sin(phiu)/csu(j)
TNGT(J)=SIN(PHIT)/CST(J)
DO I=1,NX1
SINEN(I,J)=SIN(PHIU)
enddo
enddo
TNGT(NY1)=TNGT(NY1-1)
TNG(NY1)=TNG(NY1-1)
CSU(NY1)=CSU(NY1-1)
bycsu(NY1) = 1./csu(NY1)
C**** sin/cos ice-ocean turning angle
SINWAT=SIN(OIPHI)
COSWAT=COS(OIPHI)
C**** Set land masks for tracer and velocity points
do j=1,ny1
do i=2,nx1-1
heffm(i,j)=nint(focean(i-1,j))
enddo
heffm(1,j)=heffm(nx1-1,j)
heffm(nx1,j)=heffm(2,j)
enddo
C**** define velocity points (including exterior corners)
do j=1,ny1-1
do i=1,nx1-1
c sumk=heffm(i,j)+heffm(i+1,j)+heffm(i,j+1)+heffm(i+1,j+1)
c if (sumk.ge.3) uvm(i,j)=1 ! includes exterior corners
uvm(i,j) = nint(min(heffm(i,j), heffm(i+1,j), heffm(i,j+1),
* heffm(i+1,j+1)))
end do
end do
C**** reset tracer points to surround velocity points (except for single
do j=2,ny1-1
do i=2,nx1-1
k = nint(max (uvm(i,j), uvm(i-1,j), uvm(i,j-1), uvm(i-1,j-1)))
c sumk = nint(uvm(i,j)+uvm(i+1,j)+uvm(i,j+1)+uvm(i+1,j+1))
c set to k except if an island
c if (.not. (sumk.eq.4.and.focean(i-1,j).eq.0) ) then
heffm(i,j) = k
c end if
enddo
enddo
C**** final sweep to reinstate islands
c do j=2,ny1-1
c do i=2,nx1-1
c sumk = nint(uvm(i,j)+uvm(i+1,j)+uvm(i,j+1)+uvm(i+1,j+1))
c if (sumk.eq.4.and.heffm(i,j).eq.0.) then
c uvm(i,j)=0 ; uvm(i+1,j)=0 ; uvm(i,j+1)=0 ; uvm(i+1,j+1)=0
c end if
c enddo
c enddo
c set lateral boundary conditions
do j=1,ny1
heffm(1,j) = heffm(nx1-1,j)
heffm(nx1,j) = heffm(2,j)
enddo
do j=1,ny1-1
do i=1,nx1-1
uvm(i,j) = nint(min(heffm(i,j), heffm(i+1,j), heffm(i,j+1),
* heffm(i+1,j+1)))
end do
end do
c set cyclic conditions on eastern and western boundary
do j=1,ny1
uvm(1,j) = uvm(nx1-1,j)
uvm(nx1,j) = uvm(2,j)
enddo
RETURN
END SUBROUTINE setup_icedyn_grid
END MODULE ICEDYN
SUBROUTINE VPICEDYN
!@sum vpicedyn is the entry point into the viscous-plastic ice
!@+ dynamics code
!@auth Gavin Schmidt (based on code from J. Zhang)
USE ICEDYN, only : nx1,ny1,form,relax,uice,vice,uicec,vicec
IMPLICIT NONE
REAL*8, DIMENSION(NX1,NY1) :: USAVE,VSAVE
REAL*8 rms
INTEGER kki,i,j
rms=0.
C KKI LOOP IS FOR PSEUDO-TIMESTEPPING
KKI=0.
10 KKI=KKI+1
C FIRST DO PREDICTOR
DO J=1,NY1
DO I=1,NX1
UICE(I,J,3)=UICE(I,J,1)
VICE(I,J,3)=VICE(I,J,1)
UICEC(I,J)=UICE(I,J,1)
VICEC(I,J)=VICE(I,J,1)
END DO
END DO
CALL FORM
CALL RELAX
DO J=1,NY1
UICE(1,J,1)=UICE(NX1-1,J,1)
VICE(1,J,1)=VICE(NX1-1,J,1)
UICE(NX1,J,1)=UICE(2,J,1)
VICE(NX1,J,1)=VICE(2,J,1)
END DO
C NOW DO REGULAR TIME STEP
C NOW DO MODIFIED EULER STEP
c
DO J=1,NY1
DO I=1,NX1
UICE(I,J,1)=0.5*(UICE(I,J,1)+UICE(I,J,2))
VICE(I,J,1)=0.5*(VICE(I,J,1)+VICE(I,J,2))
END DO
END DO
CALL FORM
C NOW SET U(1)=U(2) AND SAME FOR V
DO J=1,NY1
DO I=1,NX1
UICE(I,J,3)=UICE(I,J,1)
VICE(I,J,3)=VICE(I,J,1)
UICEC(I,J)=UICE(I,J,1)
VICEC(I,J)=VICE(I,J,1)
UICE(I,J,1)=UICE(I,J,2)
VICE(I,J,1)=VICE(I,J,2)
END DO
END DO
CALL RELAX
DO J=1,NY1
UICE(1,J,1)=UICE(NX1-1,J,1)
VICE(1,J,1)=VICE(NX1-1,J,1)
UICE(NX1,J,1)=UICE(2,J,1)
VICE(NX1,J,1)=VICE(2,J,1)
END DO
if (kki.gt.1) then ! test convergence
rms=0.
do i=1,nx1
do j=1,ny1
rms=rms+(USAVE(i,j)-UICE(i,j,1))**2+(VSAVE(i,j)-VICE(i,j,1))
* **2
end do
end do
end if
if (kki.eq.20) then
write(6,*) "Too many iterations in VPICEDYN. kki:",kki,rms
elseif (kki.eq.1 .or. rms.gt.0.01d0) then
USAVE=UICE(:,:,1)
VSAVE=VICE(:,:,1)
goto 10
end if
RETURN
END SUBROUTINE VPICEDYN
|
{"hexsha": "599b66f6b61dc18e3d615c481ae6bec3a77ec52b", "size": 31011, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/model/ICEDYN.f", "max_stars_repo_name": "Climostatistics/giss_model_e", "max_stars_repo_head_hexsha": "2ec1e4fd6ef009fe894a98955ef6e1586753ff79", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/model/ICEDYN.f", "max_issues_repo_name": "Climostatistics/giss_model_e", "max_issues_repo_head_hexsha": "2ec1e4fd6ef009fe894a98955ef6e1586753ff79", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/model/ICEDYN.f", "max_forks_repo_name": "Climostatistics/giss_model_e", "max_forks_repo_head_hexsha": "2ec1e4fd6ef009fe894a98955ef6e1586753ff79", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.011, "max_line_length": 72, "alphanum_fraction": 0.5329721712, "num_tokens": 14048}
|
[STATEMENT]
lemma cltn2_compose_right_abs:
assumes "invertible M"
shows "cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
[PROOF STEP]
from \<open>invertible M\<close> and cltn2_rep_invertible and cltn2_compose_abs
[PROOF STATE]
proof (chain)
picking this:
invertible M
invertible (cltn2_rep ?A)
\<lbrakk>invertible ?M; invertible ?N\<rbrakk> \<Longrightarrow> cltn2_compose (cltn2_abs ?M) (cltn2_abs ?N) = cltn2_abs (?M ** ?N)
[PROOF STEP]
have "cltn2_compose (cltn2_abs (cltn2_rep A)) (cltn2_abs M)
= cltn2_abs (cltn2_rep A ** M)"
[PROOF STATE]
proof (prove)
using this:
invertible M
invertible (cltn2_rep ?A)
\<lbrakk>invertible ?M; invertible ?N\<rbrakk> \<Longrightarrow> cltn2_compose (cltn2_abs ?M) (cltn2_abs ?N) = cltn2_abs (?M ** ?N)
goal (1 subgoal):
1. cltn2_compose (cltn2_abs (cltn2_rep A)) (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cltn2_compose (cltn2_abs (cltn2_rep A)) (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
goal (1 subgoal):
1. cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
[PROOF STEP]
thus "cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)"
[PROOF STATE]
proof (prove)
using this:
cltn2_compose (cltn2_abs (cltn2_rep A)) (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
goal (1 subgoal):
1. cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
[PROOF STEP]
by (simp add: cltn2_abs_rep)
[PROOF STATE]
proof (state)
this:
cltn2_compose A (cltn2_abs M) = cltn2_abs (cltn2_rep A ** M)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 860, "file": "Tarskis_Geometry_Projective", "length": 7}
|
include("../common/ortho_gram_schmidt.jl")
include("../common/wrappers_fft.jl")
include("../common/gen_lattice_pwscf.jl")
include("gen_dr.jl")
include("init_pot_harm_3d.jl")
include("op_K.jl")
include("op_Vpot.jl")
include("op_H.jl")
include("calc_rho.jl")
include("calc_grad.jl")
include("calc_Etot.jl")
include("schsolve_Emin_sd.jl")
include("schsolve_Emin_cg.jl")
include("Kprec.jl")
include("diag_lobpcg.jl")
include("diag_davidson.jl")
function test_main( ; Ns_in=nothing, ecutwfc_Ry=nothing,
solution_method="diag_lobpcg" )
LatVecs = gen_lattice_sc(6.0)
if PWGRID_VERSION == 2
if Ns_in != nothing
pw = PWGrid( Ns_in, LatVecs )
else
@printf("PWGRID_VERSION 2 needs Ns_in\n")
exit
end
elseif PWGRID_VERSION == 3
if ecutwfc_Ry != nothing
pw = PWGrid( 0.5*ecutwfc_Ry, LatVecs )
else
@printf("PWGRID_VERSION 3 needs ecutwfc_Ry\n")
exit
end
else
@printf("ERROR: Must specify Ns or ecutwfc_Ry")
exit()
end
Ω = pw.Ω
r = pw.r
G = pw.gvec.G
G2 = pw.gvec.G2
Ns = pw.Ns
Npoints = prod(Ns)
Ngwx = pw.gvecw.Ngwx
@printf("Ns = (%d,%d,%d)\n", Ns[1], Ns[2], Ns[3])
@printf("Ngwx = %d\n", Ngwx)
@printf("G2mx = %f\n", maximum(pw.gvec.G2[pw.gvecw.idx_gw2r]))
if PWGRID_VERSION == 3
@printf("ecutwfc_Ry = %f\n", ecutwfc_Ry)
end
actual = Npoints/Ngwx
theor = 1/(4*pi*0.25^3/3)
@printf("Compression: actual, theor: %f , %f\n", actual, theor)
# Generate array of distances
center = 6.0*ones(3)/2
dr = gen_dr( r, center )
# Setup potential
Vpot = init_pot_harm_3d( pw, dr )
println("sum(Vpot)*Ω/Npoints = ", sum(Vpot)*Ω/Npoints)
Nstates = 4
srand(2222)
psi = rand(ComplexF64,Ngwx,Nstates)
psi = ortho_gram_schmidt(psi)
if solution_method == "Emin"
psi, Etot = schsolve_Emin_sd( pw, Vpot, psi, NiterMax=10 )
psi, Etot = schsolve_Emin_cg( pw, Vpot, psi, NiterMax=1000 )
Y = ortho_gram_schmidt(psi)
mu = Y' * op_H( pw, Vpot, Y )
evals, evecs = eigen(mu)
Psi = Y*evecs
else
evals, psi = diag_lobpcg( pw, Vpot, psi, verbose=true, tol_avg=1e-10 )
end
# Davidson diagonalization is not working yet
#evals, psi = diag_davidson( pw, Vpot, psi, verbose=true, tol_avg=1e-10 )
for ist = 1:Nstates
@printf("State # %d, Energy = %18.10f\n", ist, real(evals[ist]))
end
end
|
{"hexsha": "2999700d07c6aa2ffbd7e3d46ff253f5efba0f26", "size": 2541, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "PW/sch_02/driver_harm.jl", "max_stars_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_stars_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-01-03T02:19:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T13:30:20.000Z", "max_issues_repo_path": "PW/sch_02/driver_harm.jl", "max_issues_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_issues_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PW/sch_02/driver_harm.jl", "max_forks_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_forks_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-03-23T06:58:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T00:54:28.000Z", "avg_line_length": 25.41, "max_line_length": 78, "alphanum_fraction": 0.5966155057, "num_tokens": 897}
|
[STATEMENT]
lemma [simp]:
"P \<turnstile> C sees M,b: Ts\<rightarrow>T = m in D \<Longrightarrow>
method (compP f P) C M = (D,b,Ts,T,f b m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
let ?P = "\<lambda>(D, b, Ts, T, m). compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
let ?a = "(D, b, Ts, T, f b m)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
assume cM: "P \<turnstile> C sees M,b: Ts\<rightarrow>T = m in D"
[PROOF STATE]
proof (state)
this:
P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
have compP_cM: "?P ?a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
[PROOF STEP]
using sees_method_compP[OF cM]
[PROOF STATE]
proof (prove)
using this:
compP ?f P \<turnstile> C sees M, b : Ts\<rightarrow>T = ?f b m in D
goal (1 subgoal):
1. case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
assume "?P x"
[PROOF STATE]
proof (state)
this:
case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
[PROOF STEP]
have "x = ?a"
[PROOF STATE]
proof (prove)
using this:
case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. x = (D, b, Ts, T, f b m)
[PROOF STEP]
using compP_cM
[PROOF STATE]
proof (prove)
using this:
case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
goal (1 subgoal):
1. x = (D, b, Ts, T, f b m)
[PROOF STEP]
by(fastforce dest:sees_method_fun)
[PROOF STATE]
proof (state)
this:
x = (D, b, Ts, T, f b m)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
case ?x2 of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> ?x2 = (D, b, Ts, T, f b m)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
case ?x2 of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> ?x2 = (D, b, Ts, T, f b m)
[PROOF STEP]
have "(THE x. ?P x) = ?a"
[PROOF STATE]
proof (prove)
using this:
case (D, b, Ts, T, f b m) of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D
case ?x2 of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> ?x2 = (D, b, Ts, T, f b m)
goal (1 subgoal):
1. (THE x. case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D) = (D, b, Ts, T, f b m)
[PROOF STEP]
by(rule the_equality)
[PROOF STATE]
proof (state)
this:
(THE x. case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D) = (D, b, Ts, T, f b m)
goal (1 subgoal):
1. P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D \<Longrightarrow> method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(THE x. case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D) = (D, b, Ts, T, f b m)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(THE x. case x of (D, b, Ts, T, m) \<Rightarrow> compP f P \<turnstile> C sees M, b : Ts\<rightarrow>T = m in D) = (D, b, Ts, T, f b m)
goal (1 subgoal):
1. method (compP f P) C M = (D, b, Ts, T, f b m)
[PROOF STEP]
by(simp add:method_def)
[PROOF STATE]
proof (state)
this:
method (compP f P) C M = (D, b, Ts, T, f b m)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2946, "file": "JinjaDCI_Compiler_PCompiler", "length": 24}
|
import numpy as np
import SimpleITK as sitk
def load_itk(filename):
itkimage = sitk.ReadImage(filename)
image = np.transpose(sitk.GetArrayFromImage(itkimage))
origin = np.array(itkimage.GetOrigin())
spacing = np.array(itkimage.GetSpacing())
return image, origin, spacing
def world_2_voxel(world_coord, origin, spacing):
stretched_voxel_coord = np.absolute(world_coord - origin)
voxel_coord = stretched_voxel_coord / spacing
return voxel_coord
def voxel_2_world(voxel_coord, origin, spacing):
stretched_voxel_coord = voxel_coord * spacing
world_coord = stretched_voxel_coord + origin
return world_coord
if __name__ == "__main__":
image, origin, spacing = load_itk('data/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.105756658031515062000744821260.mhd')
print 'Shape:', image.shape
print 'Origin:', origin
print 'Spacing:', spacing
|
{"hexsha": "6e20bc7a3c2697a8f0aee589cec805e2694bbe33", "size": 910, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_processing/xyz_utils.py", "max_stars_repo_name": "DataForces/CV_LUNA", "max_stars_repo_head_hexsha": "adc76fdc580807742fee4c6453c728a2d6d76ed3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 207, "max_stars_repo_stars_event_min_datetime": "2016-07-25T07:27:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T06:20:56.000Z", "max_issues_repo_path": "src/data_processing/xyz_utils.py", "max_issues_repo_name": "DataForces/CV_LUNA", "max_issues_repo_head_hexsha": "adc76fdc580807742fee4c6453c728a2d6d76ed3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2016-12-07T02:36:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-05T03:05:14.000Z", "max_forks_repo_path": "src/data_processing/xyz_utils.py", "max_forks_repo_name": "DataForces/CV_LUNA", "max_forks_repo_head_hexsha": "adc76fdc580807742fee4c6453c728a2d6d76ed3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2016-08-27T01:35:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-21T15:12:06.000Z", "avg_line_length": 33.7037037037, "max_line_length": 137, "alphanum_fraction": 0.7395604396, "include": true, "reason": "import numpy", "num_tokens": 248}
|
From Equations Require Import Equations.
Require Import Psatz.
Require Import Coq.Strings.String.
Require Import Coq.Lists.List.
Require Export SystemFR.OpenTOpen.
Require Export SystemFR.EqualWithRelation.
Require Export SystemFR.ReducibilityCandidate.
Require Export SystemFR.ReducibilityDefinition.
Require Export SystemFR.SubstitutionLemmas.
Require Export SystemFR.TOpenTClose.
Require Export SystemFR.NoTypeFVar.
Require Export SystemFR.PolarityLemmas.
Require Export SystemFR.FVLemmasLists.
Require Export SystemFR.NoTypeFVarLemmas.
Require Export SystemFR.TypeErasureLemmas.
Require Export SystemFR.AnnotatedTermLemmas.
Opaque makeFresh.
Opaque PeanoNat.Nat.eq_dec.
Lemma erase_type_topen2:
forall T1 T2 k,
is_annotated_type T1 ->
erase_type T2 = T2 ->
erase_type (topen k T1 T2) = topen k (erase_type T1) T2.
Proof.
induction T1;
repeat step || rewrite erase_term_topen in * || t_equality || rewrite topen_erase_term in *.
Qed.
Lemma has_polarities_erase_aux:
forall n T pols,
type_nodes T < n ->
is_annotated_type T ->
has_polarities T pols ->
has_polarities (erase_type T) pols.
Proof.
induction n; destruct T; steps; try lia;
repeat
step || step_inversion has_polarities || constructor || exists X || t_fv_erase ||
rewrite <- erase_type_topen2 || apply_any || autorewrite with bsize in *;
eauto with lia;
eauto 2 with annot step_tactic.
Qed.
Lemma has_polarities_erase:
forall T pols,
is_annotated_type T ->
has_polarities T pols ->
has_polarities (erase_type T) pols.
Proof.
eauto using has_polarities_erase_aux.
Qed.
Lemma has_polarities_subst_aux:
forall n T pols l,
type_nodes T < n ->
has_polarities T pols ->
pclosed_mapping l type_var ->
twfs l 0 ->
has_polarities (psubstitute T l term_var) pols.
Proof.
induction n; destruct T;
repeat step || constructor || step_inversion has_polarities || exists X || t_pfv_in_subst || eapply_any ||
autorewrite with bsize in * ||
(rewrite substitute_topen2 by steps);
eauto with lia.
Qed.
Lemma has_polarities_subst:
forall T pols l,
has_polarities T pols ->
pclosed_mapping l type_var ->
twfs l 0 ->
has_polarities (psubstitute T l term_var) pols.
Proof.
eauto using has_polarities_subst_aux.
Qed.
Lemma has_polarities_subst_erase:
forall (X : nat) (Γ : map nat tree) (Ts : tree) (ρ : interpretation) l pols,
is_annotated_type Ts ->
has_polarities (topen 0 Ts (fvar X type_var)) pols ->
satisfies (reducible_values ρ) (erase_context Γ) l ->
has_polarities (topen 0 (psubstitute (erase_type Ts) l term_var) (fvar X type_var)) pols.
Proof.
steps.
apply has_polarities_erase in H0;
repeat step || rewrite erase_type_topen in * by steps; eauto 2 with annot step_tactic.
rewrite substitute_topen2; steps; eauto with twf.
apply has_polarities_subst; steps; eauto with fv twf.
Qed.
|
{"author": "epfl-lara", "repo": "SystemFR", "sha": "a68d12d6360f395958506deea66112c46be492a0", "save_path": "github-repos/coq/epfl-lara-SystemFR", "path": "github-repos/coq/epfl-lara-SystemFR/SystemFR-a68d12d6360f395958506deea66112c46be492a0/PolarityErase.v"}
|
#
# Copyright (c) 2019-2020 StephLin.
#
# This file is part of wipctv
# (see https://gitea.mcl.math.ncu.edu.tw/StephLin/wipctv).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Reassignment of Short-Time Fourier Transform"""
from typing import Union
import numpy as np
import librosa
def stft(x: np.ndarray,
n_fft: int,
hop_length: int,
window: Union[str, np.ndarray] = 'hann',
center: bool = False) -> np.ndarray:
"""Short-Time Fourier Transform with prior arguments.
Args:
x: 1-D signal.
n_fft: Number of FFT computation.
hop_length: Number of audio samples between adjacent STFT columns.
window: Window function for STFT. Default to `'hann'`.
center: Argument of `librosa.core.stft`. Default to `False`.
Returns:
Complex-valued 2-D spectrum.
"""
return librosa.core.stft(x,
n_fft=n_fft,
hop_length=hop_length,
window=window,
center=center)
def istft(spectrum: np.ndarray,
hop_length: int,
window: Union[str, np.ndarray] = 'hann',
center: bool = False,
length: int = None) -> np.ndarray:
"""Inverse Short-Time Fourier Transform with prior arguments.
Args:
spectrum: Complex-valued 2-D spectrum.
hop_length: Number of audio samples between adjacent STFT columns.
window: Window function for STFT. Default to `'hann'`.
center: Argument of `librosa.core.stft`. Default to `False`.
length: Length of 1-D wave.
Returns:
Real-valued 1-D wave.
"""
return librosa.core.istft(spectrum,
hop_length=hop_length,
window=window,
center=center,
length=length)
def phase_corrected_operator(spectrum_shape: np.ndarray, sr: int,
hop_length: int,
frequency_shift: float) -> np.ndarray:
"""Phase corrected operator $E_{PC}$ for iPCTV
Args:
spectrum_shape: Shape of the target spectrum.
sr: Sampling rate of the target signal.
hop_length: Number of audio samples between adjacent STFT columns.
frequency_shift: Unit of frequency shift, which is based on FFT.
Returns:
Complex-valued 2-D phase corrected opreator $E_{PC}$.
"""
freq_idx, time_idx = [np.arange(n_idx) for n_idx in spectrum_shape]
exp_term = np.kron(freq_idx, time_idx) * hop_length * frequency_shift / sr
exp_term = -2j * np.pi * exp_term
return np.exp(exp_term).reshape(spectrum_shape)
def instantaneous_phase_operator(x: np.ndarray, spectrum: np.ndarray, sr: int,
n_fft, hop_length: int,
window: Union[str, np.ndarray],
frequency_shift: float):
"""Instantaneous phase corrected operator $E_{iPC}$ for Hann-based iPCTV.
Args:
x: 1-D signal.
spectrum: STFT-based spectrum of 1-D signal `x`.
sr: Sampling rate of the target signal.
n_fft: Number of FFT computation.
hop_length: Number of audio samples between adjacent STFT columns.
window: Window function for STFT. Default to `'hann'`.
frequency_shift: Unit of frequency shift, which is based on FFT.
Returns:
Complex-valued 2-D instantaneous phase corrected opreator $E_{iPC}$.
Raises:
ValueError: If shape of `spectrum` does not fit the shape under
arguments received.
NotImplementedError: If window function does not supported.
"""
if window != 'hann':
# TODO: General approach for various window function
raise NotImplementedError("Unsupported window function %r" % window)
dt_window_x = np.pi * np.arange(n_fft) / (n_fft - 1)
dt_window = np.sin(2 * dt_window_x) / (2 * n_fft)
delta_complex = stft(x,
n_fft=n_fft,
hop_length=hop_length,
window=dt_window)
if delta_complex.shape != spectrum.shape:
raise ValueError("Mismatch of spectrum shapes %r and %r" %
(spectrum.shape, delta_complex.shape))
delta_complex = np.where(
np.abs(spectrum) > 1e-10, delta_complex / spectrum, 0)
delta = -np.imag(delta_complex)
time_idx = np.arange(delta.shape[1])
delta_tilde = np.zeros(delta.shape)
for idx in time_idx[1:]:
s = delta_tilde[:, idx - 1] + (delta[:, idx] + delta[:, idx - 1]) / 2
delta_tilde[:, idx] = s
exp_term = -2j * np.pi * hop_length * delta_tilde
return np.exp(exp_term).reshape(spectrum.shape)
|
{"hexsha": "3581ea44d33febc6a8616a80da2656b7440433ee", "size": 5559, "ext": "py", "lang": "Python", "max_stars_repo_path": "wipctv/restft.py", "max_stars_repo_name": "StephLin/wipctv", "max_stars_repo_head_hexsha": "f289e306a9ea51c17853c58fd7e0af84dd8764cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wipctv/restft.py", "max_issues_repo_name": "StephLin/wipctv", "max_issues_repo_head_hexsha": "f289e306a9ea51c17853c58fd7e0af84dd8764cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wipctv/restft.py", "max_forks_repo_name": "StephLin/wipctv", "max_forks_repo_head_hexsha": "f289e306a9ea51c17853c58fd7e0af84dd8764cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-06T19:26:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T19:26:49.000Z", "avg_line_length": 37.06, "max_line_length": 78, "alphanum_fraction": 0.6189962223, "include": true, "reason": "import numpy", "num_tokens": 1263}
|
[STATEMENT]
theorem completeTA_is_ta: "complete_tree_automaton completeTA A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. complete_tree_automaton completeTA A
[PROOF STEP]
proof (standard, goal_cases)
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. finite (ta_rules completeTA)
2. finite (ta_initial completeTA)
3. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
4. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
5. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
goal (5 subgoals):
1. finite (ta_rules completeTA)
2. finite (ta_initial completeTA)
3. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
4. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
5. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (ta_rules completeTA)
[PROOF STEP]
by (simp add: completeTA_def)
[PROOF STATE]
proof (state)
this:
finite (ta_rules completeTA)
goal (4 subgoals):
1. finite (ta_initial completeTA)
2. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
3. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
4. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. finite (ta_initial completeTA)
2. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
3. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
4. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
goal (4 subgoals):
1. finite (ta_initial completeTA)
2. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
3. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
4. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (ta_initial completeTA)
[PROOF STEP]
by (simp add: completeTA_def)
[PROOF STATE]
proof (state)
this:
finite (ta_initial completeTA)
goal (3 subgoals):
1. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
2. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
3. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
2. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
3. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
case 3
[PROOF STATE]
proof (state)
this:
q_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
goal (3 subgoals):
1. \<And>q f qs. q \<rightarrow> f qs \<in> ta_rules completeTA \<Longrightarrow> A f = Some (length qs)
2. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
3. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
q_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
goal (1 subgoal):
1. A f_ = Some (length qs_)
[PROOF STEP]
apply (auto simp add: completeTA_def \<delta>complete_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> A f_ = Some (length qs_)
[PROOF STEP]
apply (case_tac x)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x x1 x2 x3. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; x = x1 \<rightarrow> x2 x3\<rbrakk> \<Longrightarrow> A f_ = Some (length qs_)
[PROOF STEP]
apply (auto intro: ranked)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
A f_ = Some (length qs_)
goal (2 subgoals):
1. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
2. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
2. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
case 4
[PROOF STATE]
proof (state)
this:
q_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
q'_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
goal (2 subgoals):
1. \<And>q f qs q'. \<lbrakk>q \<rightarrow> f qs \<in> ta_rules completeTA; q' \<rightarrow> f qs \<in> ta_rules completeTA\<rbrakk> \<Longrightarrow> q = q'
2. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
q_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
q'_ \<rightarrow> f_ qs_ \<in> ta_rules completeTA
goal (1 subgoal):
1. q_ = q'_
[PROOF STEP]
apply (auto simp add: completeTA_def \<delta>complete_def)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x xa. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ \<rightarrow> f_ qs_ = remap_rule Some xa; xa \<in> \<delta>\<rbrakk> \<Longrightarrow> q_ = q'_
2. \<And>x. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete\<rbrakk> \<Longrightarrow> q_ = None
3. \<And>x. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply (case_tac x, case_tac xa)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x xa x1 x2 x3 x1a x2a x3a. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ \<rightarrow> f_ qs_ = remap_rule Some xa; xa \<in> \<delta>; x = x1 \<rightarrow> x2 x3; xa = x1a \<rightarrow> x2a x3a\<rbrakk> \<Longrightarrow> q_ = q'_
2. \<And>x. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete\<rbrakk> \<Longrightarrow> q_ = None
3. \<And>x. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply (auto intro: deterministic) [1]
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete\<rbrakk> \<Longrightarrow> q_ = None
2. \<And>x. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply (case_tac x)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x x1 x2 x3. \<lbrakk>q_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; q'_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; x = x1 \<rightarrow> x2 x3\<rbrakk> \<Longrightarrow> q_ = None
2. \<And>x. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply auto [1]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply (case_tac x)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x x1 x2 x3. \<lbrakk>q_ = None; A f_ = Some (length qs_); \<forall>qo qso. qo \<rightarrow> f_ qso \<in> \<delta> \<longrightarrow> qs_ \<noteq> map Some qso; \<forall>x\<in>set qs_. x \<in> Qcomplete; q'_ \<rightarrow> f_ qs_ = remap_rule Some x; x \<in> \<delta>; x = x1 \<rightarrow> x2 x3\<rbrakk> \<Longrightarrow> None = q'_
[PROOF STEP]
apply auto [1]
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
q_ = q'_
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
case prems: (5 qs f)
[PROOF STATE]
proof (state)
this:
qs \<in> lists (ta_rstates completeTA)
A f = Some (length qs)
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
qs \<in> lists (ta_rstates completeTA)
A f = Some (length qs)
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
fix qo qso
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
assume R: "(qo \<rightarrow> f qso)\<in>\<delta>" and [simp]: "qs=map Some qso"
[PROOF STATE]
proof (state)
this:
qo \<rightarrow> f qso \<in> \<delta>
qs = map Some qso
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
hence "((Some qo) \<rightarrow> f qs) \<in> remap_rule Some ` \<delta>"
[PROOF STATE]
proof (prove)
using this:
qo \<rightarrow> f qso \<in> \<delta>
qs = map Some qso
goal (1 subgoal):
1. Some qo \<rightarrow> f qs \<in> remap_rule Some ` \<delta>
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
Some qo \<rightarrow> f qs \<in> remap_rule Some ` \<delta>
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
hence ?case
[PROOF STATE]
proof (prove)
using this:
Some qo \<rightarrow> f qs \<in> remap_rule Some ` \<delta>
goal (1 subgoal):
1. \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
by (simp add: completeTA_def \<delta>complete_def) blast
[PROOF STATE]
proof (state)
this:
\<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?qo3 \<rightarrow> f ?qso3 \<in> \<delta>; qs = map Some ?qso3\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<lbrakk>?qo3 \<rightarrow> f ?qso3 \<in> \<delta>; qs = map Some ?qso3\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
\<lbrakk>?qo3 \<rightarrow> f ?qso3 \<in> \<delta>; qs = map Some ?qso3\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
assume A: "\<not>(\<exists>qo qso. (qo \<rightarrow> f qso)\<in>\<delta> \<and> qs=map Some qso)"
[PROOF STATE]
proof (state)
this:
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
have "(Some ` Qi \<union> \<delta>_states \<delta>complete) \<subseteq> Qcomplete"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Some ` Qi \<union> \<delta>_states \<delta>complete \<subseteq> Qcomplete
[PROOF STEP]
apply (auto intro: \<delta>_states_complete)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xa. xa \<in> Qi \<Longrightarrow> Some xa \<in> Qcomplete
[PROOF STEP]
apply (simp add: Qcomplete_def ta_rstates_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Some ` Qi \<union> \<delta>_states \<delta>complete \<subseteq> Qcomplete
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
with prems
[PROOF STATE]
proof (chain)
picking this:
qs \<in> lists (ta_rstates completeTA)
A f = Some (length qs)
Some ` Qi \<union> \<delta>_states \<delta>complete \<subseteq> Qcomplete
[PROOF STEP]
have B: "qs\<in>lists Qcomplete"
[PROOF STATE]
proof (prove)
using this:
qs \<in> lists (ta_rstates completeTA)
A f = Some (length qs)
Some ` Qi \<union> \<delta>_states \<delta>complete \<subseteq> Qcomplete
goal (1 subgoal):
1. qs \<in> lists Qcomplete
[PROOF STEP]
by (auto simp add: completeTA_def ta_rstates_def)
[PROOF STATE]
proof (state)
this:
qs \<in> lists Qcomplete
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
from A B prems(2)
[PROOF STATE]
proof (chain)
picking this:
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso
qs \<in> lists Qcomplete
A f = Some (length qs)
[PROOF STEP]
have ?case
[PROOF STATE]
proof (prove)
using this:
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso
qs \<in> lists Qcomplete
A f = Some (length qs)
goal (1 subgoal):
1. \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
apply (rule_tac x=None in exI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso; qs \<in> lists Qcomplete; A f = Some (length qs)\<rbrakk> \<Longrightarrow> None \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
apply (simp add: completeTA_def \<delta>complete_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<And>qs f. \<lbrakk>qs \<in> lists (ta_rstates completeTA); A f = Some (length qs)\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?qo3 \<rightarrow> f ?qso3 \<in> \<delta>; qs = map Some ?qso3\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?qo3 \<rightarrow> f ?qso3 \<in> \<delta>; qs = map Some ?qso3\<rbrakk> \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
\<nexists>qo qso. qo \<rightarrow> f qso \<in> \<delta> \<and> qs = map Some qso \<Longrightarrow> \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal (1 subgoal):
1. \<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>q. q \<rightarrow> f qs \<in> ta_rules completeTA
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8076, "file": "Tree-Automata_Ta", "length": 56}
|
# -*- coding: utf-8 -*-
"""
Honest election simulations comparing IRV, plurality, and score voting
"""
import votesim
from votesim.models import spatial
from votesim.utilities.write import StringTable
import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
import pandas as pd
#votesim.logconfig.setInfo()
#votesim.logconfig.setDebug()
votesim.logconfig.setWarning()
types = ['irv', 'score', 'plurality', 'smith_minimax']
v = spatial.SimpleVoters(0)
v.add_random(500, ndim=1)
c = spatial.Candidates(v, 0)
e = spatial.Election(v, None, seed=0)
distances = np.linspace(0, 2, 50)
trials = np.arange(50)
for dist in distances:
c.reset()
a = [-dist, 0, dist+.05]
a = np.atleast_2d(a).T
c.add(a)
e.set_data(v, c)
for t in types:
e.run(etype=t)
df = e.dataframe()
vse = df['stats.regret.efficiency_voter']
vse1 = vse[df['args.election.1.run.etype'] == 'irv']
vse2 = vse[df['args.election.1.run.etype'] == 'score']
vse3 = vse[df['args.election.1.run.etype'] == 'plurality']
vse4 = vse[df['args.election.1.run.etype'] == 'smith_minimax']
candidates1 = df['args.candidate.0.add.candidates']
candidates1 = candidates1[df['args.election.1.run.etype'] == 'irv']
candidates1 = np.column_stack(candidates1)
plt.figure()
plt.suptitle("Center Sqeeze Suceptibility, Symmetric 3 Candidate Race")
plt.subplot(2,2,1)
plt.hist(v.voters, bins=20)
plt.xlabel('Voter Preference Location (std deviations)')
plt.ylabel('Voter Distribution')
plt.grid()
plt.subplot(2,2,2)
plt.plot(distances, vse1, '.-', label='irv')
plt.plot(distances, vse2, '.-', label='score')
plt.plot(distances, vse3, '--', label='plurality')
plt.plot(distances, vse4, '--', label='smith_minimax')
plt.legend()
plt.xlabel('Edge candidate distance from voter centroid (std deviations)')
plt.ylabel('voter satisfaction efficiency')
plt.grid()
plt.subplot(2,2,3)
plt.plot(trials, candidates1[0],'.', label='left candidate')
plt.plot(trials, candidates1[1],'.', label='center candidate')
plt.plot(trials, candidates1[2],'.', label='right candidate')
plt.xlabel("Simulation Trial Number")
plt.ylabel("Candidate preference location (std deviations)")
plt.legend()
plt.grid()
|
{"hexsha": "507b4ce57a984c71d693b16d38c53de444c4c6ee", "size": 2201, "ext": "py", "lang": "Python", "max_stars_repo_path": "archive/examples/center_squeeze_3way/center_squeeze.py", "max_stars_repo_name": "johnh865/election_sim", "max_stars_repo_head_hexsha": "b73b7e65f1bb22abb82cbe8442fcf02b0c20894e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-10-21T23:24:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T03:04:59.000Z", "max_issues_repo_path": "archive/examples/center_squeeze_3way/center_squeeze.py", "max_issues_repo_name": "johnh865/election_sim", "max_issues_repo_head_hexsha": "b73b7e65f1bb22abb82cbe8442fcf02b0c20894e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-09T23:52:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-10T04:08:35.000Z", "max_forks_repo_path": "archive/examples/center_squeeze_3way/center_squeeze.py", "max_forks_repo_name": "johnh865/election_sim", "max_forks_repo_head_hexsha": "b73b7e65f1bb22abb82cbe8442fcf02b0c20894e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-21T23:32:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-21T23:32:18.000Z", "avg_line_length": 26.2023809524, "max_line_length": 74, "alphanum_fraction": 0.7014993185, "include": true, "reason": "import numpy", "num_tokens": 640}
|
#include <boost/pool/object_pool.hpp>
int main()
{
boost::object_pool<int> pool;
int *i = pool.malloc();
*i = 1;
int *j = pool.construct(2);
pool.destroy(i);
pool.destroy(j);
}
|
{"hexsha": "a5b78307c0b198160a865dc560ae2eb9d5e81122", "size": 192, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Example/pool_02/main.cpp", "max_stars_repo_name": "KwangjoJeong/Boost", "max_stars_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Example/pool_02/main.cpp", "max_issues_repo_name": "KwangjoJeong/Boost", "max_issues_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example/pool_02/main.cpp", "max_forks_repo_name": "KwangjoJeong/Boost", "max_forks_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.7142857143, "max_line_length": 37, "alphanum_fraction": 0.6145833333, "num_tokens": 59}
|
/**
* @file visitor.hpp
* @author Leonardo Arcari (leonardo1.arcari@gmail.com)
* @version 1.0.0
* @date 2018-10-28
*
* @copyright Copyright (c) 2018 Leonardo Arcari
*
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef ARLIB_VISITOR_HPP
#define ARLIB_VISITOR_HPP
#include <boost/graph/graph_concepts.hpp>
#include <limits>
#include <unordered_set>
/**
* An Alternative-Routing library for Boost.Graph
*/
namespace arlib {
//===----------------------------------------------------------------------===//
// Bidirectional Dijkstra visitor
//===----------------------------------------------------------------------===//
/**
* CRTP interface class for bidirectional_dijkstra() visitor.
*
* A BiDijkstraVisitor is passed to bidirectional_dijkstra() in order to
* customize the algorithm behavior with respect to
* - terminating_condition(): when a vertex is found both from source and
* target searches, bidirectional_dijkstra() asks the visitor whether to end the
* search or keep going.
* - expand_vertex(): given a vertex v, its distance from one root and the
* distance-lower-bound from the other and the current minimum st-distance,
* the visitor should return whether to expand the search in the direction of
* v or not.
*
* @tparam Derived The derived class implementing BiDijkstraVisitor interface
*/
template <typename Derived> class BiDijkstraVisitor {
public:
/**
* Whether or not bidirectional_dijkstra() should stop. It is invoked when a
* vertex v is found both from source search and target search.
*
* @tparam Length The weight value type.
* @param min_distance The minimum distance to any node in current direction
* fringe.
* @param other_min_distance The minimum distance to any node in the other
* direction fringe.
* @param st_distance Current minimum source-target distance.
* @return true if bidirectional_dijkstra() should stop.
* @return false otherwise.
*/
template <typename Length>
bool terminating_condition(Length min_distance, Length other_min_distance,
Length st_distance) {
return static_cast<Derived *>(this)->terminating_condition(
min_distance, other_min_distance, st_distance);
}
/**
* Whether or not to bidirectional_dijkstra() should expand the search through
* the input Vertex v.
*
* @tparam Vertex The vertex_descriptor.
* @tparam Length The weight value type.
* @param v The candidate Vertex to expand.
* @param v_distance The Vertex distance according to current-direction
* search.
* @param lower_bound_v The Vertex distance-heuristic according to
* opposite-direction search.
* @param st_distance Current minimum source-target distance.
* @return true if bidirectional_dijkstra() should expand vertex @p v
* @return false otherwise.
*/
template <typename Vertex, typename Length>
bool expand_vertex(Vertex v, Length v_distance, Length lower_bound_v,
Length st_distance) {
return static_cast<Derived *>(this)->expand_vertex(
v, v_distance, lower_bound_v, st_distance);
}
};
/**
* BiDijkstraVisitor implementing vanilla Bidirectional Dijkstra behavior as
* described in Nicholson's paper (1966):
* https://academic.oup.com/comjnl/article/9/3/275/406281
*/
class IdentityBiDijkstraVisitor
: public BiDijkstraVisitor<IdentityBiDijkstraVisitor> {
public:
/**
* Vanilla Bidirectional Dijkstra stopping condition.
*
* @tparam Length The weight value type.
* @param min_distance The minimum distance to any node in current direction
* fringe.
* @param other_min_distance The minimum distance to any node in the other
* direction fringe.
* @param st_distance Current minimum source-target distance.
* @return true if @p min_distance + @p other_min_distance > @p st_distance.
* @return false otherwise.
*/
template <typename Length>
bool terminating_condition(Length min_distance, Length other_min_distance,
Length st_distance) {
return (min_distance + other_min_distance) > st_distance;
}
/**
* Always-expand policy.
*
* @tparam Vertex The vertex_descriptor
* @tparam Length The weight value type.
* @return true always.
*/
template <typename Vertex, typename Length>
bool expand_vertex(Vertex, Length, Length, Length) {
return true;
}
};
/**
* BiDijkstraVisitor implementing Uninformed Bidirectional Pruner as described
* in
*
* A. Paraskevopoulos, C. Zaroliagis, Improved alternative route
* planning,198in: OASIcs-OpenAccess Series in Informatics, Vol. 33, Schloss
* Dagstuhl-199Leibniz-Zentrum fuer Informatik, 2013
* http://drops.dagstuhl.de/opus/volltexte/2013/4248/
*
* The purpose of this visitor is to explore the whole graph an keep track of
* all those vertices that should be pruned.
* Let v be a Vertex, let tau be the pruning factor, v_distance be the distance
* of v from the root of current search, other_min_distance be
* distance-heuristic of v from the opposite search-root and st_distance be the
* current minimum distance between source and target nodes. Then v should be
* pruned if:
* v_distance + lower_bound_v > tau * st_distance
*
* @see uninformed_bidirectional_pruner()
*
* @tparam Vertex The vertex_descriptor
*/
template <typename Vertex>
class UninformedBiPrunerVisitor
: public BiDijkstraVisitor<UninformedBiPrunerVisitor<Vertex>> {
public:
/**
* A set of Vertex of the graph
*/
using VertexSet = std::unordered_set<Vertex, boost::hash<Vertex>>;
/**
* The VertexSet const-iterator type
*/
using const_iterator = typename VertexSet::const_iterator;
/**
* Construct a new UninformedBiPrunerVisitor object with a pruning factor of
* @p tau.
*
* @param tau The pruning factor.
*/
explicit UninformedBiPrunerVisitor(double tau)
: tau{tau}, is_pruning_phase{false} {}
/**
* Always keep searching.
*
* @tparam Length The weight value type.
* @param min_distance The minimum distance to any node in current direction
* fringe.
* @param other_min_distance The minimum distance to any node in the other
* direction fringe.
* @param st_distance Current minimum source-target distance.
* @return false always.
*/
template <typename Length>
bool terminating_condition(Length min_distance, Length other_min_distance,
Length st_distance) {
if (min_distance + other_min_distance > st_distance) {
is_pruning_phase = true;
}
return false;
}
/**
* If the shortest-path from source to target hasn't been found yet, then
* always expand. Otherwise, expand only if
* @p v_distance + @p lower_bound_v <= tau * @p st_distance
*
* @tparam Length The weight value type.
* @param v The candidate Vertex to expand.
* @param v_distance The Vertex distance according to current-direction
* search.
* @param lower_bound_v The Vertex distance-heuristic according to
* opposite-direction search.
* @param st_distance Current minimum source-target distance.
* @return true If the shortest-path from source to target hasn't been found
* yet or if @p v_distance + @p lower_bound_v <= tau * @p st_distance
* @return false otherwise.
*/
template <typename Length>
bool expand_vertex(Vertex v, Length v_distance, Length lower_bound_v,
Length st_distance) {
if (is_pruning_phase) {
if (v_distance + lower_bound_v <= tau * st_distance) {
return true;
} else {
pruned_vertices.insert(v);
return false;
}
} else
return true;
}
/**
* @return a pair of const-iterators to begin and end of the pruned VertexSet.
*/
std::pair<const_iterator, const_iterator> get_pruned_vertices() const {
return {pruned_vertices.cbegin(), pruned_vertices.cend()};
}
/**
* @param v The vertex to test
* @return true if @p v should be pruned.
* @return false otherwise.
*/
bool is_pruned(const Vertex &v) const {
return (pruned_vertices.find(v) != std::end(pruned_vertices));
}
private:
double tau;
bool is_pruning_phase;
std::unordered_set<Vertex, boost::hash<Vertex>> pruned_vertices;
};
} // namespace arlib
#endif
|
{"hexsha": "323f8ec119e08745b68cc01bd96197ceaf1462bc", "size": 9411, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/arlib/routing_kernels/visitor.hpp", "max_stars_repo_name": "ashishkashinath/arlib", "max_stars_repo_head_hexsha": "891aa8603a6e07a16aec5700e7129a0d14a40b84", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2018-10-29T17:17:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T02:09:37.000Z", "max_issues_repo_path": "include/arlib/routing_kernels/visitor.hpp", "max_issues_repo_name": "ashishkashinath/arlib", "max_issues_repo_head_hexsha": "891aa8603a6e07a16aec5700e7129a0d14a40b84", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-05T07:27:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-05T07:27:35.000Z", "max_forks_repo_path": "include/arlib/routing_kernels/visitor.hpp", "max_forks_repo_name": "ashishkashinath/arlib", "max_forks_repo_head_hexsha": "891aa8603a6e07a16aec5700e7129a0d14a40b84", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-07-20T09:31:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T12:06:49.000Z", "avg_line_length": 37.0511811024, "max_line_length": 80, "alphanum_fraction": 0.6983317395, "num_tokens": 2149}
|
#!/usr/bin/python -Wall
# ================================================================
# Tom loredo
# loredo at spacenet dot tn dot cornell dot edu
# ================================================================
# Howdy -
#
# Below are some pure Python special functions, some adapted from Numerical
# Recipes, some from formulas in standard references. They are mostly related
# to the Gamma function. This is old stuff, practically my first Python, so
# don't expect much! 8-)
#
# I didn't know about cephes at the time, and mostly use cephes now. But these
# have the virtue of being pure python, so they may be helpful to you if you
# can't compile cephes on your platform.
#
# I've done some other Python translations of NR routines, soon to appear on a
# web page. Others are already on the web:
#
# http://www.python.org/topics/scicomp/recipes_in_python.html
#
# Peace,
# Tom loredo
#
# PS: One of the Numerical Recipes coauthors is in the office next door, and
# knows I've been up to this stuff. When I first told him I'd been translating
# some NR routines into Python, his response was, "Why?!" 8-)
# ================================================================
# Note JRK 2009-02-23: See the scipy.special package. (import scipy.special).
from __future__ import division # 1/2 = 0.5, not 0.
from math import *
# ================================================================
# Globals
max_iters = 'Too many iterations: '
rt2 = sqrt(2.0)
gammln_cof = [76.18009173, -86.50532033, 24.01409822, -1.231739516e0, 0.120858003e-2, -0.536382e-5]
gammln_stp = 2.50662827465
# ================================================================
# Gamma & incomplete Gamma
def gammln(xx):
"""Logarithm of the gamma function."""
global gammln_cof, gammln_stp
x = xx - 1.0
tmp = x + 5.5
tmp = (x + 0.5)*log(tmp) - tmp
ser = 1.0
for j in range(6):
x = x + 1.0
ser = ser + gammln_cof[j]/x
return tmp + log(gammln_stp*ser)
# ----------------------------------------------------------------
# John Kerl 2008-02-04
def gamma(x):
"""Gamma function."""
return exp(gammln(x))
# ----------------------------------------------------------------
# John Kerl 2008-02-04.
def beta(a, b):
"""Beta function."""
return exp(gammln(a)+gammln(b)-gammln(a+b))
# ----------------------------------------------------------------
def gser(a, x, itmax=700, eps=3.e-7):
"""Series approx'n to the incomplete gamma function."""
gln = gammln(a)
if (x < 0.0):
raise bad_arg, x
if (x == 0.0):
return(0.0)
ap = a
sum = 1.0 / a
delta = sum
n = 1
while n <= itmax:
ap = ap + 1.0
delta = delta * x / ap
sum = sum + delta
if (abs(delta) < abs(sum)*eps):
return (sum * exp(-x + a*log(x) - gln), gln)
n = n + 1
raise max_iters, str((abs(delta), abs(sum)*eps))
# ----------------------------------------------------------------
def gcf(a, x, itmax=200, eps=3.e-7):
"""Continued fraction approx'n of the incomplete gamma function."""
gln = gammln(a)
gold = 0.0
a0 = 1.0
a1 = x
b0 = 0.0
b1 = 1.0
fac = 1.0
n = 1
while n <= itmax:
an = n
ana = an - a
a0 = (a1 + a0*ana)*fac
b0 = (b1 + b0*ana)*fac
anf = an*fac
a1 = x*a0 + anf*a1
b1 = x*b0 + anf*b1
if (a1 != 0.0):
fac = 1.0 / a1
g = b1*fac
if (abs((g-gold)/g) < eps):
return (g*exp(-x+a*log(x)-gln), gln)
gold = g
n = n + 1
raise max_iters, str(abs((g-gold)/g))
# ----------------------------------------------------------------
def gammp(a, x):
"""Incomplete gamma function."""
if (x < 0.0 or a <= 0.0):
raise ValueError, (a, x)
if (x < a+1.0):
return gser(a,x)[0]
else:
return 1.-gcf(a,x)[0]
# ----------------------------------------------------------------
def gammq(a, x):
"""Incomplete gamma function."""
if (x < 0.0 or a <= 0.0):
raise ValueError, repr((a, x))
if (x < a+1.0):
return 1.-gser(a,x)[0]
else:
return gcf(a,x)[0]
# ================================================================
# Error function, normal CDF and inverse
# ----------------------------------------------------------------
def ncdf_inv(p):
"""Inverse of the normal CDF."""
c0 = 2.515517
c1 = 0.802853
c2 = 0.010328
d1 = 1.432788
d2 = 0.189269
d3 = 0.001308
sign = -1.0
if (p > 0.5):
sign = 1.0
p = 1.0 - p
arg = -2.*log(p)
t = sqrt(arg)
g = t - (c0 + t*(c1 + t*c2)) / (1.0 + t*(d1 + t*(d2 + t*d3)))
return sign*g
# ----------------------------------------------------------------
def erfcc(x):
"""Complementary error function."""
z = abs(x)
t = 1.0 / (1.0 + 0.5*z)
r = t * exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.0):
return r
else:
return 2.0 - r
# ----------------------------------------------------------------
def ncdf(x):
"""Cumulative normal dist'n."""
global rt2
return 1.0 - 0.5*erfcc(x/rt2)
# ----------------------------------------------------------------
def ncdf_sig (nsig):
"""Cummulative normal dist'n inside nsig sigmas.
ncdf_sig = 1 - 2 * (upper tail) = 1 - erfc(sigfac/rt(2))"""
global rt2
return 1.0 - erfcc(nsig/rt2)
# ================================================================
# Chi squared distribution
# ----------------------------------------------------------------
def pchisq(chisq, nu):
"""Lower tail area of the chi**2 dist'n with nu dof.
Note that chisq is *not* the reduced chis**2!"""
hnu = 0.5 * nu
hchi = 0.5 * chisq
return gammp(hnu, hchi)
# ----------------------------------------------------------------
def qchisq(chisq, nu):
"""Upper tail area of the chi**2 dist'n with nu dof.
Note that chisq is *not* the reduced chis**2!"""
hnu = 0.5 * nu
hchi = 0.5 * chisq
return gammq(hnu, hchi)
# ----------------------------------------------------------------
def chisq_crit(nu, p, tol=1.e-5):
"""Critical chi**2 with lower tail area of p for nu dof."""
# For the first guess, use the assyptotic normal limit of the
# chi**2 distribution: chi**2 ~ N(nu,sqrt(2*nu)).
chi = nu + ncdf_inv(p)*sqrt(2.*nu)
pcur = pchisq(chi,nu)
# Now do a Newton-Raphson loop...
while 1:
dfdc = (pchisq(1.001*chi,nu) - pcur) / (0.001*chi)
chi = chi - (pcur - p)/dfdc
pcur = pchisq(chi,nu)
if (abs(pcur-p) <= tol):
return chi
# ================================================================
# Allow it to run as a script
if __name__ == "__main__":
print 'gser: ', gser(5., 5.0)
print 'gcf: ', gcf(5., 7.0)
print 'gammp, gammq: ', gammp(5.,5.0), gammq(5.,5.0)
print 'erfcc: ', erfcc(.5)
print 'ncdf_inv: ', ncdf_inv(0.977), ncdf_inv(0.5)
print 'ncdf: ', ncdf(1.0)
print 'ncdf_sig: ', ncdf_sig(2.0)
print 'q & p chisq: ', qchisq(4.,1), pchisq(4.,1)
print 'chisq_crit: ', chisq_crit(1.,0.954)
|
{"hexsha": "e6f16c066af12cab30acbafa10488e845bccc1bf", "size": 6756, "ext": "py", "lang": "Python", "max_stars_repo_path": "pythonlib/sp_funcs_m.py", "max_stars_repo_name": "johnkerl/scripts-math", "max_stars_repo_head_hexsha": "71b6c6617728290ce9643c80a75a402707ad1134", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-01-11T20:56:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-22T10:32:59.000Z", "max_issues_repo_path": "pythonlib/sp_funcs_m.py", "max_issues_repo_name": "johnkerl/scripts-math", "max_issues_repo_head_hexsha": "71b6c6617728290ce9643c80a75a402707ad1134", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pythonlib/sp_funcs_m.py", "max_forks_repo_name": "johnkerl/scripts-math", "max_forks_repo_head_hexsha": "71b6c6617728290ce9643c80a75a402707ad1134", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-01-12T00:55:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T10:33:03.000Z", "avg_line_length": 28.5063291139, "max_line_length": 99, "alphanum_fraction": 0.4906749556, "include": true, "reason": "import scipy", "num_tokens": 2185}
|
[STATEMENT]
lemma loan_zero [simp]: "\<delta> n 0 = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<delta> n 0 = 0
[PROOF STEP]
unfolding loan_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<iota> (\<lambda>m. if n = m then 0 else 0) = 0
[PROOF STEP]
using zero_account_def
[PROOF STATE]
proof (prove)
using this:
0 = \<iota> (\<lambda>_. 0)
goal (1 subgoal):
1. \<iota> (\<lambda>m. if n = m then 0 else 0) = 0
[PROOF STEP]
by fastforce
|
{"llama_tokens": 210, "file": "Risk_Free_Lending_Risk_Free_Lending", "length": 3}
|
default_datastore() = joinpath("data", "$VERSION")
default_benchmarkstore() = joinpath(default_datastore(), "benchmark")
|
{"hexsha": "148ddd83fa8442a6d6666eeb1bea176e43927708", "size": 121, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/core.jl", "max_stars_repo_name": "tkf/ThreadsAPIBenchmarks.jl", "max_stars_repo_head_hexsha": "4e6b8795334a45fd3ffa687a70bb68efec0c4839", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-26T06:14:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T06:33:08.000Z", "max_issues_repo_path": "src/core.jl", "max_issues_repo_name": "tkf/ThreadsAPIBenchmarks.jl", "max_issues_repo_head_hexsha": "4e6b8795334a45fd3ffa687a70bb68efec0c4839", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-28T22:04:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-28T22:04:39.000Z", "max_forks_repo_path": "src/core.jl", "max_forks_repo_name": "tkf/ThreadsAPIBenchmarks.jl", "max_forks_repo_head_hexsha": "4e6b8795334a45fd3ffa687a70bb68efec0c4839", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3333333333, "max_line_length": 69, "alphanum_fraction": 0.7603305785, "num_tokens": 28}
|
"""
pyrad.prod.process_product
==========================
Functions for obtaining Pyrad products from the datasets
.. autosummary::
:toctree: generated/
generate_occurrence_products
generate_cosmo_coord_products
generate_cosmo_to_radar_products
generate_sun_hits_products
generate_qvp_products
generate_ml_products
"""
from copy import deepcopy
from warnings import warn
import os
import numpy as np
import pyart
from .process_vol_products import generate_vol_products
from ..io.io_aux import get_fieldname_pyart
from ..io.io_aux import get_save_dir, make_filename
from ..io.read_data_sun import read_sun_retrieval
from ..io.read_data_other import read_ml_ts
from ..io.write_data import write_sun_hits, write_sun_retrieval, write_timeseries_point
from ..io.write_data import write_excess_gates, write_ts_ml
from ..graph.plots import plot_sun_hits
from ..graph.plots_timeseries import plot_sun_retrieval_ts, plot_ml_ts
from ..graph.plots_vol import plot_fixed_rng, plot_fixed_rng_sun
from ..util.radar_utils import create_sun_hits_field
from ..util.radar_utils import create_sun_retrieval_field
def generate_occurrence_products(dataset, prdcfg):
"""
generates occurrence products. Accepted product types:
'WRITE_EXCESS_GATES': Write the data that identifies radar gates
with clutter that has a frequency of occurrence above a certain
threshold.
User defined parameters:
quant_min: float
Minimum frequency of occurrence in percentage to keep the
gate as valid. Default 95.
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object and metadata dictionary
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
instant = False
if 'instant' in prdcfg:
instant = prdcfg['instant']
if not instant and not dataset['occu_final']:
return None
if prdcfg['type'] == 'WRITE_EXCESS_GATES':
if not dataset['occu_final']:
return None
radar = dataset['radar_out']
if (('frequency_of_occurrence' not in radar.fields) or
('occurrence' not in radar.fields) or
('number_of_samples' not in radar.fields)):
warn('Unable to create quantile excess gates file. '
'Missing data')
return None
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
quant_min = 95.
if 'quant_min' in prdcfg:
quant_min = prdcfg['quant_min']
# get index of gates exceeding quantile
freq_occu = radar.fields['frequency_of_occurrence'][
'data']
ind_ray, ind_rng = np.where(freq_occu > quant_min)
if ind_ray.size == 0:
warn('No data exceeds the frequency of occurrence ' +
str(quant_min)+' %')
return None
excess_dict = {
'starttime': dataset['starttime'],
'endtime': dataset['endtime'],
'quant_min': quant_min,
'ray_ind': ind_ray,
'rng_ind': ind_rng,
'ele': radar.elevation['data'][ind_ray],
'azi': radar.azimuth['data'][ind_ray],
'rng': radar.range['data'][ind_rng],
'nsamples': (
radar.fields['number_of_samples']['data'][ind_ray, ind_rng]),
'occurrence': (
radar.fields['occurrence']['data'][ind_ray, ind_rng]),
'freq_occu': freq_occu[ind_ray, ind_rng]
}
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['endtime'])
fname = make_filename(
'excess_gates', prdcfg['dstype'], prdcfg['prdname'], ['csv'],
prdcfginfo='quant'+'{:.1f}'.format(quant_min),
timeinfo=dataset['endtime'])
fname = savedir+fname[0]
fname = write_excess_gates(excess_dict, fname)
if fname is not None:
print('saved excess gates file: '+fname)
return fname
field_name = get_fieldname_pyart(prdcfg['voltype'])
if ((field_name == 'frequency_of_occurrence') and
(not dataset['occu_final'])):
return None
if dataset['occu_final']:
prdcfg['timeinfo'] = dataset['endtime']
return generate_vol_products(dataset, prdcfg)
def generate_cosmo_coord_products(dataset, prdcfg):
"""
generates COSMO coordinates products. Accepted product types:
'SAVEVOL': Save an object containing the index of the COSMO model grid
that corresponds to each radar gate in a C/F radial file.
User defined parameters:
file_type: str
The type of file used to save the data. Can be 'nc' or
'h5'. Default 'nc'
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
compression: str
For ODIM file formats, the type of compression. Can be any
of the allowed compression types for hdf5 files. Default
gzip
compression_opts: any
The compression options allowed by the hdf5. Depends on
the type of compression. Default 6 (The gzip compression
level).
Parameters
----------
dataset : tuple
radar object containing the COSMO coordinates
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
if prdcfg['type'] == 'SAVEVOL':
radar_obj = dataset['radar_out']
ind_rad = dataset['ind_rad']
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in radar_obj.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
compression = prdcfg.get('compression', 'gzip')
compression_opts = prdcfg.get('compression_opts', 6)
new_dataset = deepcopy(radar_obj)
new_dataset.fields = dict()
new_dataset.add_field(field_name, radar_obj.fields[field_name])
savedir = prdcfg['cosmopath'][ind_rad]+'rad2cosmo/'
fname = 'rad2cosmo_'+prdcfg['voltype']+'_'+prdcfg['procname']+'.nc'
if file_type == 'nc':
pyart.io.cfradial.write_cfradial(
savedir+fname, new_dataset, physical=physical)
elif file_type == 'h5':
pyart.aux_io.write_odim_h5(
savedir+fname, new_dataset, physical=physical,
compression=compression, compression_opts=compression_opts)
else:
warn('Data could not be saved. ' +
'Unknown saving file type '+file_type)
return None
print('saved file: '+savedir+fname)
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None
def generate_cosmo_to_radar_products(dataset, prdcfg):
"""
generates COSMO data in radar coordinates products. Accepted product
types:
'SAVEVOL': Save an object containing the COSMO data in radar
coordinatesin a C/F radial or ODIM file.
User defined parameters:
file_type: str
The type of file used to save the data. Can be 'nc' or
'h5'. Default 'nc'
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
compression: str
For ODIM file formats, the type of compression. Can be any
of the allowed compression types for hdf5 files. Default
gzip
compression_opts: any
The compression options allowed by the hdf5. Depends on
the type of compression. Default 6 (The gzip compression
level).
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object containing the COSMO coordinates
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
time_index = prdcfg.get('cosmo_time_index', 0)
if time_index > len(dataset)-1:
warn(
'COSMO time index larger than available. Skipping product ' +
prdcfg['type'])
return None
radar_dataset = dataset[time_index]
if prdcfg['type'] == 'SAVEVOL':
radar_obj = radar_dataset['radar_out']
ind_rad = radar_dataset['ind_rad']
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in radar_obj.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
compression = prdcfg.get('compression', 'gzip')
compression_opts = prdcfg.get('compression_opts', 6)
new_dataset = deepcopy(radar_obj)
new_dataset.fields = dict()
new_dataset.add_field(field_name, radar_obj.fields[field_name])
savedir = (
prdcfg['cosmopath'][ind_rad]+prdcfg['voltype']+'/radar/' +
prdcfg['timeinfo'].strftime('%Y-%m-%d')+'/'+prdcfg['procname']+'/')
fname = (
prdcfg['voltype']+'_RUN' +
prdcfg['timeinfo'].strftime('%Y%m%d%H%M%S')+'_' +
radar_dataset['dtcosmo'].strftime('%Y%m%d%H%M%S')+'.nc')
if not os.path.isdir(savedir):
os.makedirs(savedir)
if file_type == 'nc':
pyart.io.cfradial.write_cfradial(
savedir+fname, new_dataset, physical=physical)
elif file_type == 'h5':
pyart.aux_io.write_odim_h5(
savedir+fname, new_dataset, physical=physical,
compression=compression, compression_opts=compression_opts)
else:
warn('Data could not be saved. ' +
'Unknown saving file type '+file_type)
return None
print('saved file: '+savedir+fname)
return fname
return generate_vol_products(radar_dataset, prdcfg)
def generate_sun_hits_products(dataset, prdcfg):
"""
generates sun hits products. Accepted product types:
'PLOT_SUN_HITS': Plots in a sun-radar azimuth difference-sun-radar
elevation difference grid the values of all sun hits obtained
during the processing period
'PLOT_SUN_RETRIEVAL': Plots in a sun-radar azimuth difference-sun-
radar elevation difference grid the retrieved sun pattern
'PLOT_SUN_RETRIEVAL_TS': Plots time series of the retrieved sun
pattern parameters
User defined parameters:
dpi: int
The pixel density of the plot. Default 72
add_date_in_fname: Bool
If true the year is added in the plot file name
'PLOT_SUNSCAN': Plots a constant range radar azimuth-elevation of the
sunscan field data
'WRITE_SUN_HITS': Writes the information concerning possible sun hits
in a csv file
'WRITE_SUN_RETRIEVAL': Writes the retrieved sun pattern parameters in
a csv file.
User defined parameters:
add_date_in_fname: Bool
If true the year is added in the csv file name
'WRITE_SUNSCAN': Writes the sunscan parameters in a csv file
All the products of the 'VOL' dataset group
Parameters
----------
dataset : tuple
radar object and sun hits dictionary
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
prdcfg['timeinfo'] = dataset['timeinfo']
if prdcfg['type'] == 'WRITE_SUN_HITS':
if 'sun_hits' not in dataset:
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname = make_filename(
'info', prdcfg['dstype'], 'detected', ['csv'],
timeinfo=dataset['timeinfo'], timeformat='%Y%m%d')[0]
fname = savedir+fname
write_sun_hits(dataset['sun_hits'], fname)
print('saved sun hits file: '+fname)
return fname[0]
if prdcfg['type'] == 'PLOT_SUN_HITS':
if 'sun_hits_final' not in dataset:
return None
field_name = get_fieldname_pyart(prdcfg['voltype'])
if prdcfg['voltype'] not in dataset['sun_hits_final']:
warn(
' Field type ' + prdcfg['voltype'] +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname_list = make_filename(
'detected', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=dataset['timeinfo'],
timeformat='%Y%m%d')
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
field = create_sun_hits_field(
dataset['sun_hits_final']['rad_el'],
dataset['sun_hits_final']['rad_az'],
dataset['sun_hits_final']['sun_el'],
dataset['sun_hits_final']['sun_az'],
dataset['sun_hits_final'][prdcfg['voltype']],
prdcfg['sunhitsImageConfig'])
if field is None:
warn(
'Unable to create field '+prdcfg['voltype'] +
' Skipping product ' + prdcfg['type'])
return None
plot_sun_hits(field, field_name, fname_list, prdcfg)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'WRITE_SUN_RETRIEVAL':
if 'sun_retrieval' not in dataset:
return None
timeinfo = None
timeformat = None
if prdcfg.get('add_date_in_fname', False):
timeinfo = dataset['timeinfo']
timeformat = '%Y'
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=None)
fname = make_filename(
'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
write_sun_retrieval(dataset['sun_retrieval'], fname)
print('saved sun retrieval file: '+fname)
return fname
if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL':
if 'sun_retrieval' not in dataset:
return None
field_name = get_fieldname_pyart(prdcfg['voltype'])
par = None
if field_name == 'sun_est_power_h':
par = 'par_h'
elif field_name == 'sun_est_power_v':
par = 'par_v'
elif field_name == 'sun_est_differential_reflectivity':
par = 'par_zdr'
if par not in dataset['sun_retrieval']:
warn(
' Field type ' + prdcfg['voltype'] +
' not available in data set. Skipping product ' +
prdcfg['type'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=dataset['timeinfo'])
fname_list = make_filename(
'retrieval', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=dataset['timeinfo'],
timeformat='%Y%m%d')
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['sun_retrieval'][par] is None:
warn(
' Invalid retrieval parameters. Skipping product ' +
prdcfg['type'])
return None
field = create_sun_retrieval_field(
dataset['sun_retrieval'][par], field_name,
prdcfg['sunhitsImageConfig'],
lant=dataset['sun_retrieval']['lant'])
if field is not None:
plot_sun_hits(field, field_name, fname_list, prdcfg)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'PLOT_SUN_RETRIEVAL_TS':
if 'sun_retrieval' not in dataset:
return None
dpi = prdcfg.get('dpi', 72)
timeinfo = None
timeformat = None
if prdcfg.get('add_date_in_fname', False):
timeinfo = dataset['timeinfo']
timeformat = '%Y'
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdid'], timeinfo=None)
fname = make_filename(
'info', prdcfg['dstype'], 'retrieval', ['csv'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])
fname = savedir + fname[0]
sun_retrieval = read_sun_retrieval(fname)
if sun_retrieval[0] is None:
warn(
'Unable to read sun retrieval file '+fname)
return None
if len(sun_retrieval[0]) < 2:
warn(
'Unable to plot sun retrieval time series. ' +
'Not enough data points.')
return None
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=None)
fname_list = make_filename(
'retrieval_ts', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], timeinfo=timeinfo,
timeformat=timeformat, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
titl = (prdcfg['runinfo']+' Sun Retrieval ' +
sun_retrieval[1][0].strftime('%Y%m%d')+'-' +
sun_retrieval[1][-1].strftime('%Y%m%d'))
figfname = plot_sun_retrieval_ts(
sun_retrieval, prdcfg['voltype'], fname_list, titl=titl, dpi=dpi)
if figfname is None:
return None
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'WRITE_SUNSCAN':
if 'sun_retrieval' not in dataset:
return None
text = ["SunScan info",
"sun_az: [deg] Azimuth sun position ",
"sun_el: [deg] Elevation sun position",
"noise_pwr: [dBm] Noise power",
"sun_maxpwr_noise: [dBm] sun maximal power sample (including noise)",
"sun_maxpwr_nonoise: [dBm] sun maximal power sample without noise",
"sun_maxpwr_fit: [dBm] sun maximal fitted power (without noise)",
"sun_maxpwr_toa: [dBm] sun maximal power at top of atmosphere",
"az_offset: [deg] Azimuth shift of fitted maxima to sun azimuth",
"el_offset: [deg] Elevation shift of fitted maxima to sun elevation",
"az_phi3db: [deg] Half-power beam width in azimuth",
"el_phi3db: [deg] Half-power beam width in elevation",
"fit_stddev: [dBm] Standard deviation (fit to samples)",
"num_samples: [#] Number of samples used for the sun power fitting"
]
sunRdata = dataset['sun_retrieval']
if dataset['field_name'] == 'noisedBm_hh':
data = {'dstype': prdcfg['dstype'],
'unit': 'dBm',
'time': sunRdata['sunscan_time'],
'label': ["sun_az", "sun_el", "noise_pwr",
"sun_maxpwr_noise", "sun_maxpwr_nonoise", "sun_maxpwr_fit",
"sun_maxpwr_toa", "az_offset", "el_offset",
"az_phi3db", "el_phi3db", "fit_stddev",
"num_samples"],
'value': [sunRdata['sunpos_az'], sunRdata['sunpos_el'],
sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'],
sunRdata['sun_maxpwr_nonoise'], sunRdata['dBm_sun_est'],
sunRdata['dBm_sun_est_toa'], sunRdata['az_bias_h'],
sunRdata['el_bias_h'], sunRdata['az_width_h'],
sunRdata['el_width_h'], sunRdata['std(dBm_sun_est)'],
sunRdata['nhits_h']]
}
elif dataset['field_name'] == 'noisedBm_vv':
data = {'dstype': prdcfg['dstype'],
'unit': 'dBm',
'time': sunRdata['sunscan_time'],
'label': ["sun_az", "sun_el", "noise_pwr",
"sun_maxpwr_noise", "sun_maxpwr_nonoise", "sun_maxpwr_fit",
"sun_maxpwr_toa", "az_offset", "el_offset",
"az_phi3db", "el_phi3db", "fit_stddev",
"num_samples"],
'value': [sunRdata['sunpos_az'], sunRdata['sunpos_el'],
sunRdata['noise_pwr'], sunRdata['sun_maxpwr_noise'],
sunRdata['sun_maxpwr_nonoise'], sunRdata['dBmv_sun_est'],
sunRdata['dBmv_sun_est_toa'], sunRdata['az_bias_v'],
sunRdata['el_bias_v'], sunRdata['az_width_v'],
sunRdata['el_width_v'], sunRdata['std(dBmv_sun_est)'],
sunRdata['nhits_v']]
}
else:
warn('ERROR: No valid datatype for WRITE_SUNSCAN product.')
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], prdcfg['timeinfo'])
fname1 = make_filename(
'ts', prdcfg['dstype'], dataset['field_name'], ['csv'], timeinfo=prdcfg['timeinfo'],
timeformat='%Y%m%d', runinfo=prdcfg['runinfo'])[0]
fname1 = savedir+fname1
write_timeseries_point(fname1, data, prdcfg['dstype'], text)
print('saved sunscan file: ' +fname1)
return fname1
if prdcfg['type'] == 'PLOT_SUNSCAN':
radar = dataset['radar_out']
sun_hits = dataset['sun_hits']
field_name = dataset['field_name']
if field_name not in radar.fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined parameters
azi_res = prdcfg.get('azi_res', None)
ele_res = prdcfg.get('ele_res', None)
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
angtol = prdcfg.get('ang_tol', 0.5)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], prdcfg['timeinfo'])
fname_list = make_filename(
'constr', prdcfg['dstype'], prdcfg['dsname'],
prdcfg['imgformat'],
prdcfginfo='rng'+'{:.1f}'.format(
dataset['radar_out'].range['data'][0]),
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_fixed_rng_sun(radar, field_name, sun_hits, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=angtol, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if 'radar_out' in dataset:
return generate_vol_products(dataset, prdcfg)
return None
def generate_qvp_products(dataset, prdcfg):
"""
Generates quasi vertical profile-like products. Quasi vertical profiles
come from azimuthal averaging of polarimetric radar data. With the
variable 'qvp_type' the user decides if the product has to be generated
at the end of the processing period ('final') or instantaneously
('instant')
Accepted product types:
All the products of the 'VOL' dataset group
Parameters
----------
dataset : dict
dictionary containing the radar object and a keyword stating the
status of the processing
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
qvp_type = 'final'
if 'qvp_type' in prdcfg:
qvp_type = prdcfg['qvp_type']
if qvp_type == 'final' and dataset['radar_type'] != 'final':
return None
prdcfg['timeinfo'] = dataset['start_time']
return generate_vol_products(dataset, prdcfg)
def generate_ml_products(dataset, prdcfg):
"""
Generates melting layer products. Accepted product types:
'ML_TS': Plots and writes a time series of the melting layer, i.e.
the evolution of the average and standard deviation of the melting
layer top and thickness and the the number of rays used in the
retrieval.
User defined parameters:
dpi: int
The pixel density of the plot. Default 72
'SAVE_ML': Saves an object containing the melting layer retrieval
information in a C/F radial file
All the products of the 'VOL' dataset group
Parameters
----------
dataset : dict
dictionary containing the radar object and a keyword stating the
status of the processing
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
filename : str
the name of the file created. None otherwise
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'ML_TS':
dpi = prdcfg.get('dpi', 72)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
csvfname = make_filename(
'ts', prdcfg['dstype'], 'ml', ['csv'],
timeinfo=prdcfg['timeinfo'], timeformat='%Y%m%d')[0]
csvfname = savedir+csvfname
ml_bottom = dataset['ml_obj'].fields['melting_layer_height']['data'][:, 0]
ml_top = dataset['ml_obj'].fields['melting_layer_height']['data'][:, 1]
ml_top_avg = np.ma.asarray(np.ma.mean(ml_top))
ml_top_std = np.ma.asarray(np.ma.std(ml_top))
thick = ml_top-ml_bottom
thick_avg = np.ma.asarray(np.ma.mean(thick))
thick_std = np.ma.asarray(np.ma.std(thick))
nrays_valid = thick.compressed().size
nrays_total = thick.size
write_ts_ml(
prdcfg['timeinfo'], ml_top_avg, ml_top_std, thick_avg, thick_std,
nrays_valid, nrays_total, csvfname)
print('saved CSV file: '+csvfname)
(dt_ml_arr, ml_top_avg_arr, ml_top_std_arr, thick_avg_arr,
thick_std_arr, nrays_valid_arr, nrays_total_arr) = (
read_ml_ts(csvfname))
if dt_ml_arr is None:
warn(
'Unable to plot time series. No valid data')
return None
figfname_list = make_filename(
'ts', prdcfg['dstype'], 'ml', prdcfg['imgformat'],
timeinfo=dt_ml_arr[0], timeformat='%Y%m%d')
for i, figfname in enumerate(figfname_list):
figfname_list[i] = savedir+figfname
titl = dt_ml_arr[0].strftime('%Y-%m-%d')+' melting layer time series'
plot_ml_ts(
dt_ml_arr, ml_top_avg_arr, ml_top_std_arr, thick_avg_arr,
thick_std_arr, nrays_valid_arr, nrays_total_arr, figfname_list,
labelx='Time UTC', titl=titl, dpi=dpi)
print('----- save to '+' '.join(figfname_list))
return figfname_list
if prdcfg['type'] == 'SAVE_ML':
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'saveml', prdcfg['dstype'], 'ml_h', ['nc'],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
pyart.io.cfradial.write_cfradial(fname, dataset['ml_obj'])
print('saved file: '+fname)
return fname
return generate_vol_products(dataset, prdcfg)
|
{"hexsha": "e40d2ebe00295e45e96df9afd2dc2bcc075371b3", "size": 29763, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyrad_proc/pyrad/prod/process_product.py", "max_stars_repo_name": "jfigui/pyrad", "max_stars_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2016-12-01T08:46:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T21:14:33.000Z", "max_issues_repo_path": "src/pyrad_proc/pyrad/prod/process_product.py", "max_issues_repo_name": "jfigui/pyrad", "max_issues_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2017-02-23T14:52:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-01T10:43:52.000Z", "max_forks_repo_path": "src/pyrad_proc/pyrad/prod/process_product.py", "max_forks_repo_name": "jfigui/pyrad", "max_forks_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-08-25T15:02:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T04:09:40.000Z", "avg_line_length": 35.6443113772, "max_line_length": 96, "alphanum_fraction": 0.5789738938, "include": true, "reason": "import numpy", "num_tokens": 7084}
|
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``dpctl_iface`` module implements Numba's interface to the dpctl library
that provides Python and C bindings to DPC++'s SYCL runtime API. The module
includes:
- LLVM IR builders for dpctl C API functions to be called directly from a Numba
generated LLVM module.
- Functions to lauch kernels on the dpctl "current queue".
"""
import numba_dppy.dpctl_iface.dpctl_function_types as dpctl_fn_ty
from numba_dppy.dpctl_iface.dpctl_capi_fn_builder import DpctlCAPIFnBuilder
from numba_dppy.dpctl_iface.kernel_launch_ops import KernelLaunchOps
from numba_dppy.dpctl_iface.usm_ndarray_type import USMNdArrayType
__all__ = [
DpctlCAPIFnBuilder,
KernelLaunchOps,
USMNdArrayType,
]
get_current_queue = dpctl_fn_ty.dpctl_get_current_queue()
malloc_shared = dpctl_fn_ty.dpctl_malloc_shared()
queue_memcpy = dpctl_fn_ty.dpctl_queue_memcpy()
free_with_queue = dpctl_fn_ty.dpctl_free_with_queue()
event_wait = dpctl_fn_ty.dpctl_event_wait()
event_delete = dpctl_fn_ty.dpctl_event_delete()
queue_wait = dpctl_fn_ty.dpctl_queue_wait()
|
{"hexsha": "b5ae4137986fe36881df6af12a5f95c09cea27dd", "size": 1625, "ext": "py", "lang": "Python", "max_stars_repo_path": "numba_dppy/dpctl_iface/__init__.py", "max_stars_repo_name": "akharche/numba-dppy", "max_stars_repo_head_hexsha": "f12dac64b149bd72f305f341ff64b796bbb648c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2020-11-25T12:13:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T14:26:14.000Z", "max_issues_repo_path": "numba_dppy/dpctl_iface/__init__.py", "max_issues_repo_name": "akharche/numba-dppy", "max_issues_repo_head_hexsha": "f12dac64b149bd72f305f341ff64b796bbb648c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 439, "max_issues_repo_issues_event_min_datetime": "2020-11-17T14:48:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:09:47.000Z", "max_forks_repo_path": "numba_dppy/dpctl_iface/__init__.py", "max_forks_repo_name": "1e-to/numba-dppy", "max_forks_repo_head_hexsha": "70bd9d6621b1e93819ee3d9c107a60314043bf6f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-11-24T14:29:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T05:50:27.000Z", "avg_line_length": 37.7906976744, "max_line_length": 79, "alphanum_fraction": 0.8036923077, "include": true, "reason": "import numba,from numba", "num_tokens": 398}
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Polyak solver."""
import dataclasses
import functools
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
import jax
import jax.numpy as jnp
from jaxopt import base
from jaxopt.tree_util import tree_add
from jaxopt.tree_util import tree_add_scalar_mul
from jaxopt.tree_util import tree_scalar_mul
from jaxopt.tree_util import tree_sub
from jaxopt.tree_util import tree_vdot
from jaxopt.tree_util import tree_zeros_like
class StochasticPolyakState(NamedTuple):
"""Named tuple containing state information."""
iter_num: int
value: float
velocity: Optional[Any]
aux: Any
@dataclasses.dataclass
class DiagonalStochasticPolyak:
"""Stochastic Polyak solver.
Attributes:
fun: a function of the form ``fun(params, *args, **kwargs)``, where
``params`` are parameters of the model,
``*args`` and ``**kwargs`` are additional arguments.
learning_rate: a learning rate to use
delta: a dampening parameter
momentum: momentum parameter, 0 corresponding to no momentum.
has_aux: whether ``fun`` outputs one (False) or more values (True).
When True it will be assumed by default that ``fun(...)[0]``
is the objective value. The auxiliary outputs are stored in
``state.aux``.
"""
fun: Callable
learning_rate: float
delta: float
momentum: float
has_aux: bool = False
def init(self,
init_params):
"""Initialize the ``(params, state)`` pair.
Args:
init_params: pytree containing the initial parameters.
Return type:
base.OptStep
Returns:
(params, state)
"""
if self.momentum == 0:
velocity = None
else:
velocity = tree_zeros_like(init_params)
state = StochasticPolyakState(
iter_num=0, value=jnp.inf, velocity=velocity, aux=None)
return base.OptStep(params=init_params, state=state)
def update(self,
params,
state,
data,
*args,
**kwargs):
"""Performs one iteration of the optax solver.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
data: dict.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Return type:
base.OptStep
Returns:
(params, state)
"""
del args, kwargs # unused
(value, aux), update = self._spsdiag_update(params, data)
if self.momentum == 0:
new_params = tree_add_scalar_mul(params, self.learning_rate, update)
new_velocity = None
else:
new_velocity = tree_sub(
tree_scalar_mul(self.momentum, state.velocity),
tree_scalar_mul(self.learning_rate, update))
new_params = tree_add(params, new_velocity)
new_params = tree_add_scalar_mul(
params, self.learning_rate, update)
aux['loss'] = jnp.mean(aux['loss'])
aux['accuracy'] = jnp.mean(aux['accuracy'])
if state.iter_num % 10 == 0:
print('Number of iterations', state.iter_num,
'. Objective function value: ', value)
new_state = StochasticPolyakState(
iter_num=state.iter_num+1, value=value, velocity=new_velocity, aux=aux)
return base.OptStep(params=new_params, state=new_state)
def __post_init__(self):
# Pre-compile useful functions.
def fun(params, args):
losses, aux = self.fun(params, args) # assumes has_aux = True
return jnp.mean(losses), aux
def least_square_regularizor_1d(a, b, delta):
# Computes the solution to min || a^Tx -b||^2 + delta ||x||^2
scale = -b/(tree_vdot(a, a) + delta)
return tree_scalar_mul(scale, a)
def single_update(params, data):
data_expanded = {
'image': jnp.expand_dims(data['image'], axis=0),
'label': jnp.expand_dims(data['label'], axis=0)
}
(value, aux), grad = jax.value_and_grad(
fun, has_aux=self.has_aux)(params, data_expanded)
new_update = least_square_regularizor_1d(grad, value, self.delta)
return (value, aux), new_update
all_updates = jax.vmap(
single_update, in_axes=(None, {'image': 0, 'label': 0}))
agg_exdim = functools.partial(jnp.sum, axis=0)
self._spsdiag_update = jax.jit(
lambda params, data: jax.tree_map(agg_exdim, all_updates(params, data)))
|
{"hexsha": "07866c3e096662e08459e47001ec045fe371f7a2", "size": 5019, "ext": "py", "lang": "Python", "max_stars_repo_path": "stochastic_polyak/spsdiag_solver.py", "max_stars_repo_name": "xxdreck/google-research", "max_stars_repo_head_hexsha": "dac724bc2b9362d65c26747a8754504fe4c615f8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23901, "max_stars_repo_stars_event_min_datetime": "2018-10-04T19:48:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:27:42.000Z", "max_issues_repo_path": "stochastic_polyak/spsdiag_solver.py", "max_issues_repo_name": "xxdreck/google-research", "max_issues_repo_head_hexsha": "dac724bc2b9362d65c26747a8754504fe4c615f8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 891, "max_issues_repo_issues_event_min_datetime": "2018-11-10T06:16:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:42:34.000Z", "max_forks_repo_path": "stochastic_polyak/spsdiag_solver.py", "max_forks_repo_name": "admariner/google-research", "max_forks_repo_head_hexsha": "7cee4b22b925581d912e8d993625c180da2a5a4f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6047, "max_forks_repo_forks_event_min_datetime": "2018-10-12T06:31:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:59:28.000Z", "avg_line_length": 31.36875, "max_line_length": 80, "alphanum_fraction": 0.679019725, "include": true, "reason": "import jax,from jax", "num_tokens": 1242}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import glob
from models.densenet import densenet121, densenet161
from data_util import *
import models_old.tf_to_pytorch_inception_v1 as inception
import models_old.tf_to_pytorch_resnet_v1_50 as resnet
import models_old.tf_to_pytorch_vgg16 as vgg
from models.vgg import vgg16_bn
from models.inception import inception_v3
from models.resnet import resnet50, resnet152
from models.googleNet import googlenet
from models.incept_resnet_v2 import InceptionResNetV2
from models.inception_v4 import InceptionV4
from PIL import Image
from scipy.misc import imread
from scipy.misc import imresize
import argparse
import imp
import random
import time
import json
import io
from PIL import Image
from torchvision import transforms
#os.environ("CUDA_VISILE_DEVICES")=0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=4,
help='batch size, e.g. 16, 32, 64...', type=int)
parser.add_argument('--gpu_id', default=0,
help='gpu_id eg: 0,1,2 ..', type=int)
parser.add_argument('--momentum', default = 0.75,
help="momentum", type=float)
parser.add_argument('--input_dir', default="/home/shh/Passport/jyz/data/IJCAI/dev_data",
help="data input dir", type=str)
parser.add_argument('--output_dir',default='./out2',type=str)
parser.add_argument('--ablation', default='',
help="one model, excluded", type=str)
parser.add_argument('--kernel_size', default=11, type=int)
parser.add_argument('--max_epsilon', default=14, type=int)
parser.add_argument('--num_iter', default=14, type=int)
parser.add_argument('--mode',default="nontarget",type=str)
parser.add_argument('--prob', default="0.1", help="probality use input diversity", type=float)
parser.add_argument('--random_eps', default=0.0,
help='before gradient descent', type=float)
parser.add_argument('--use_cam', default=1, help="use class activation map as mask "type=int)
parser.add_argument('--quantize', default=0, type=int)
parser.add_argument('--decay_index', default=0.5, help="decay the weights of successfully attacked model", type=float)
parser.add_argument('--mask_size',default=20, type=int)
parser.add_argument('--jpeg_quality', default=70, help="control the jpeg quality of attack image ", type=int)
parser.add_argument('--models_weight',default='',type=str)
return parser.parse_args()
def load_model(model,pth_file,device):
model = model.to(device)
#model = torch.nn.DataParallel(model)
#print('loading weights from : ', pth_file)
model.load_state_dict(torch.load(pth_file))
return model
def get_model_dics(device, model_list= None):
if model_list is None:
model_list = ['densenet121', 'densenet161', 'resnet50', 'resnet152',
'incept_v1', 'incept_v3', 'incept_v4_adv', 'incept_resnet_v2_adv',
'black_densenet161', 'black_resnet50', 'black_incept_v3',
'old_vgg','old_res','old_incept']
models = {}
for model in model_list:
if model=='densenet121':
models['densenet121'] = densenet121(num_classes=110)
load_model(models['densenet121'],"./pre_weights/ep_38_densenet121_val_acc_0.6527.pth",device)
if model=='densenet161':
models['densenet161'] = densenet161(num_classes=110)
load_model(models['densenet161'],"./pre_weights/ep_30_densenet161_val_acc_0.6990.pth",device)
if model=='resnet50':
models['resnet50'] = resnet50(num_classes=110)
load_model(models['resnet50'],"./pre_weights/ep_41_resnet50_val_acc_0.6900.pth",device)
if model=='incept_v3':
models['incept_v3'] = inception_v3(num_classes=110)
load_model(models['incept_v3'],"./pre_weights/ep_36_inception_v3_val_acc_0.6668.pth",device)
if model=='incept_v1':
models['incept_v1'] = googlenet(num_classes=110)
load_model(models['incept_v1'],"./pre_weights/ep_33_googlenet_val_acc_0.7091.pth",device)
#vgg16 = vgg16_bn(num_classes=110)
#load_model(vgg16, "./pre_weights/ep_30_vgg16_bn_val_acc_0.7282.pth",device)
if model=='incept_resnet_v2':
models['incept_resnet_v2'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2'], "./pre_weights/ep_17_InceptionResNetV2_ori_0.8320.pth",device)
if model=='incept_v4':
models['incept_v4'] = InceptionV4(num_classes=110)
load_model(models['incept_v4'],"./pre_weights/ep_17_InceptionV4_ori_0.8171.pth",device)
if model=='incept_resnet_v2_adv':
models['incept_resnet_v2_adv'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2_adv'], "./pre_weights/ep_22_InceptionResNetV2_val_acc_0.8214.pth",device)
if model=='incept_v4_adv':
models['incept_v4_adv'] = InceptionV4(num_classes=110)
load_model(models['incept_v4_adv'],"./pre_weights/ep_24_InceptionV4_val_acc_0.6765.pth",device)
if model=='incept_resnet_v2_adv2':
models['incept_resnet_v2_adv2'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2_adv2'],"./pre_weights/ep_29_InceptionResNetV2_adv2_0.8115.pth",device)
#load_model(models['incept_resnet_v2_adv2'],"../pre_weights/ep_13_InceptionResNetV2_val_acc_0.8889.pth",device)
if model=='incept_v4_adv2':
models['incept_v4_adv2'] = InceptionV4(num_classes=110)
load_model(models['incept_v4_adv2'],"./pre_weights/ep_32_InceptionV4_adv2_0.7579.pth",device)
if model=='resnet152':
models['resnet152'] = resnet152(num_classes=110)
load_model(models['resnet152'],"./pre_weights/ep_14_resnet152_ori_0.6956.pth",device)
if model=='resnet152_adv':
models['resnet152_adv'] = resnet152(num_classes=110)
load_model(models['resnet152_adv'],"./pre_weights/ep_29_resnet152_adv_0.6939.pth",device)
if model=='black_resnet50':
models['black_resnet50'] = resnet50(num_classes=110)
load_model(models['black_resnet50'],"./test_weights/ep_0_resnet50_val_acc_0.7063.pth",device)
if model=='black_densenet161':
models['black_densenet161'] = densenet161(num_classes=110)
load_model(models['black_densenet161'],"./test_weights/ep_4_densenet161_val_acc_0.6892.pth",device)
if model=='black_incept_v3':
models['black_incept_v3']=inception_v3(num_classes=110)
load_model(models['black_incept_v3'],"./test_weights/ep_28_inception_v3_val_acc_0.6680.pth",device)
if model=='old_res':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_resnet_v1_50.py")
models['old_res'] = torch.load('./models_old/tf_to_pytorch_resnet_v1_50.pth').to(device)
if model=='old_vgg':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_vgg16.py")
models[model] = torch.load('./models_old/tf_to_pytorch_vgg16.pth').to(device)
if model=='old_incept':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_inception_v1.py")
models[model] = torch.load('./models_old/tf_to_pytorch_inception_v1.pth').to(device)
return models
class EnsembleNet(nn.Module):
def __init__(self, models, models_weight, device, ablation=""):
super(EnsembleNet, self).__init__()
#self.models = get_model_dic(device)
self.models = models
self.models_weight = models_weight
self.preprocess = preprocess
self.ablation = ablation
self.img_size=[1,3,229,229]
self.preds = {}
self.logits= {}
self.use_model ={k:True for k in self.models.keys()}
self.model_index = {k:1.0 for k in self.models.keys()}
self.model_bindex = {}
def forward(self,x, get_feature=False, model_bindex=None):
if model_bindex is None or model_bindex=={}:
model_bindex = {k:torch.ones([x.shape[0],1]).to(device)for k in self.models.keys()}
self.img_size = x.shape
i=0
weights=0
models_weight = self.models_weight
for model in self.models.keys():
if self.use_model[model]==False:
continue
pre_x = self.preprocess(x,model)
out = self.models[model](pre_x,get_feature=get_feature)
if 'incept_v3' in model:
logits = 0.5*out[0]+0.5*out[1]
elif model=='incept_v1':
logits = 0.4*out[0]+0.3*out[1] + 0.3*out[2]
elif mode=='target' and model=='old_vgg':
logits = 1.0*out
else:
logits = out
self.preds[model]=logits.max(1)[1]
self.logits[model]=logits
if i==0:
out_sum = logits*model_bindex[model]*models_weight[model]
else:
out_sum = out_sum + logits*model_bindex[model]*models_weight[model]
i=i+1
weights+=models_weight[model]
out_sum = out_sum/weights
return out_sum
def get_cam(self,label):
i=0
for model in self.models.keys():
if model==self.ablation:
continue
if model == "vgg16" or model == "old_vgg":
continue
params = list(self.models[model].parameters())
features = self.models[model].feature
weights = params[-2].data #shape: [110,channel]
cam = torch.zeros(features.shape[0],1,features.shape[2],features.shape[3])
for b in range(label.shape[0]):
weight = weights[label[b]].reshape(1,-1,1,1)
cam[b] = F.conv2d(features[b].unsqueeze(0),weight,padding=0)
cam = F.upsample(cam,size=self.img_size[-2:],mode='bilinear')
#print(model, cam.mean().item(),cam.min().item(),cam.max().item())
ens_cam = cam if i==0 else ens_cam+cam
i+=1
ens_cam = ens_cam/i
ens_cam = (ens_cam-ens_cam.min())/ens_cam.max()
#print(ens_cam.shape)
return ens_cam
class RCE(nn.Module):
"""
reverse cross entropy
"""
def forward(self, x, target):
batch_size = x.size(0)
num_class = x.size(1)
mask = x.new_ones(x.size()) / (num_class-1)
mask.scatter_(1, target[:, None], 0)
x = F.softmax(x, dim=1)
x = -1*torch.log(x)
loss = torch.sum(x*mask) / batch_size
return loss
def load_data_for_defense(input_dir,batch_size=16): #Only forward
all_img_paths = glob.glob(os.path.join(input_dir, '*.png'))
all_labels = [-1 for i in range(len(all_img_paths))]
dev_data = pd.DataFrame({'image_path':all_img_paths, 'label_idx':all_labels})
transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
datasets = {
'dev_data': ImageSet(dev_data, transformer)
}
dataloaders = {
ds: DataLoader(datasets[ds],
batch_size=batch_size,
num_workers=0,
shuffle=True) for ds in datasets.keys()
}
return dataloaders
def input_diversity(image, prob, low, high):
if random.random()<prob:
return image
rnd = random.randint(low, high)
rescaled = F.upsample(image, size=[rnd, rnd], mode='bilinear')
h_rem = high - rnd
w_rem = high - rnd
pad_top = random.randint( 0, h_rem)
pad_bottom = h_rem - pad_top
pad_left = random.randint(0, w_rem)
pad_right = w_rem - pad_left
padded = F.pad(rescaled, [pad_top, pad_bottom, pad_left, pad_right], 'constant', 0)
return padded
def preprocess(image,model_name):
if "incept_v3" in model_name or model_name[:16]=="incept_resnet_v2" \
or 'incept_v4' in model_name or 'resnet152' in model_name:
return input_diversity(image,prob,270,299)
else:
image = F.upsample(image, size=(224, 224), mode='bilinear')
if model_name=="old_res" or model_name=="old_vgg":
image = ((image/2.0)+0.5)*255.0
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
image[:, 0,:, :] = image[:, 0,:, :] - _R_MEAN
image[:, 1,:, :] = image[:, 1,:, :] - _G_MEAN
image[:, 2,:, :] = image[:, 2,:, :] - _B_MEAN
return input_diversity(image,prob,200,224)
else:
return input_diversity(image,prob,200,224)
def eval_img(image, models, models_name, old_img, init_y):
correct = 0
ans = 0
j=0
for k in models:
if k not in models_name:
continue
j+=1
model = models[k]
img = preprocess(image,k)
out = model(img)
score = F.softmax(out)
score_sum=score if j==1 else score_sum+score
pre = score.max(1)
pred_y =pre[1]
c=(pred_y==init_y).sum()
correct+=c
rate = float(c)/(img.shape[0])
print(k,"correct rate is %.4f"%rate)
#print("eval:",model_name[i], pre_y)
for i in range(image.shape[0]):
if pred_y[i] != init_y[i]:
mse =torch.sqrt(torch.mean(((image[i]-old_img[i])*128)**2))
else:
mse = 128
ans+=mse
total_rate = float(correct)/(image.shape[0]*j)
print("correct rate is %.4f"%total_rate)
print("MSE of img is", ans/(image.shape[0]*j))
res = ans/float(j)
return rate,res
'''def get_init_y(image,models, models_name):
i=0
for k in models.keys():
if k not in models_name:
continue
i+=1
model = models[k]
img = preprocess(image,k)
out = model(img)
try:
score = F.softmax(out) if i==1 else score+F.softmax(out)
except:
print(k)
return score.max(1)[1]'''
def get_labels(input_dir):
table = pd.read_csv(input_dir+'/dev.csv')
labels = table['trueLabel'].values
filenames = table['filename'].values
labels_dic = {}
for i in range(labels.shape[0]):
labels_dic[filenames[i]] = labels[i]
return labels_dic
def get_targets(input_dir):
table = pd.read_csv(input_dir+'/dev.csv')
labels = table['targetedLabel'].values
filenames = table['filename'].values
labels_dic = {}
for i in range(labels.shape[0]):
labels_dic[filenames[i]] = labels[i]
return labels_dic
def save_images(images, filenames, output_dir):
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
img = (((images.cpu().detach().numpy()[i, :, :, :] + 1.0) * 0.5) * 255.0)
#img = img.clip(0,255).astype(np.uint8)
# resize back to [299, 299]
r_img = imresize(img, [299, 299])
r_img = np.around(r_img).clip(0,255).astype(np.uint8)
png = Image.fromarray(r_img)
png.save(os.path.join(output_dir, filename), format='PNG')
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
import scipy.stats as st
x = np.linspace(-nsig, nsig, kernlen)
kern1d = st.norm.pdf(x)
kernel_raw = np.outer(kern1d, kern1d)
kernel = kernel_raw / kernel_raw.sum()
return kernel
def images_tojpeg(images,images_new):
buffer = io.BytesIO()
#print('1',images.mean())
for i in range(images.shape[0]):
pil_img = transforms.ToPILImage()(images[i].detach().cpu())
pil_img.save(buffer, format='jpeg', quality=args.jpeg_quality)
images_new.data[i] = transforms.ToTensor()(Image.open(buffer).convert('RGB'))
#images.data = (images.data-0.5)*2
#print(images.mean(),images.max())
if __name__=='__main__':
args = parse_args()
print(args)
time1 = time.time()
batch_size = args.batch_size
input_dir = args.input_dir
out_dir = args.output_dir
momentum = args.momentum
kernel_size = args.kernel_size
mode=args.mode
random_eps=args.random_eps
test_loader = load_data_for_defense(input_dir, batch_size)
device= torch.device('cuda:%d'%args.gpu_id)
max_epsilon =args.max_epsilon
num_iter = args.num_iter
prob = args.prob
eps = 2.0 * max_epsilon / 255.0
alpha = eps / num_iter
labels_dic = get_labels(input_dir)
targets_dic = get_targets(input_dir)
if args.models_weight!='':
models_weight = json.load(args.models_weight)
else:
if mode=='nontarget':
models_weight = {
#'incept_v4':1.0,
#'incept_resnet_v2':1.0,
'resnet152':1.0,
#'resnet152_adv':1.0,
#'incept_v4_adv':1.0,
#'incept_resnet_v2_adv':1.0,
'incept_v4_adv2':1.0,
'incept_resnet_v2_adv2':1.0,
#'densenet161':1.0,
'black_densenet161':1.0,
'black_resnet50':1.0,
'black_incept_v3':1.0,
'old_vgg':1.0,
'old_incept':1.0,
'old_res':1.0
}
else:
models_weight = {
'black_densenet161':0.5,
'black_resnet50':0.5,
'black_incept_v3':0.5,
'old_vgg':1.0,
'old_incept':0.5,
'old_res':1.0
}
if args.ablation!='':
models_weight[args.ablation]=0.0
model_list = [m for m in models_weight.keys()]
models = get_model_dics(device, model_list= model_list)
ens_model = EnsembleNet( models, models_weight, device, args.ablation)
kernel = gkern(kernel_size, 3).astype(np.float32)
stack_kernel = np.stack([kernel, kernel, kernel])
#stack_kernel = np.load('kernels/ijcai/kernels_70.npy')
stack_kernel = np.expand_dims(stack_kernel, 1)
print(stack_kernel.shape)
stack_kernel = torch.Tensor(stack_kernel).to(device)
mask = torch.zeros([1, 3, 299, 299])
mask_size = args.mask_size
mask[:,:,mask_size:299-mask_size,mask_size:299-mask_size]=1
mask=mask.to(device)
it= 0
use_models = [m for m in ens_model.models]
print("use_models:",use_models)
for data in test_loader['dev_data']:
image = data["image"]
sh=image.shape
it+=image.shape[0]
filenames = data["filename"]
image = image.to(device)
if mode=='target':
label=torch.Tensor([targets_dic[f] for f in filenames]).long().to(device)
else:
label = torch.Tensor([labels_dic[f] for f in filenames]).long().to(device)
# old_img = image.clone()
image.requires_grad = True
image_new = data["image"].to(device)
image_new.requires_grad = True
#print(image.mean())
adv_ok = torch.zeros(image.shape[0])
model_bindex={}
momentum_sum=0
grad_decay_index=1.0
mu_index=torch.ones([image.shape[0],1,1,1]).to(device)
index=1.0
for iter in range(num_iter):
ens_model.zero_grad()
tur = torch.Tensor(np.abs(np.random.normal(0, random_eps, sh))).type_as(image).to(device) #tur seemly make no sense as well
images_tojpeg(image, image_new)
image_new.data = image.data + tur.data
out = ens_model(image_new,True, model_bindex)
if iter==0 and args.use_cam==1:
with torch.no_grad():
cam = ens_model.get_cam(label).to(device)
loss = nn.CrossEntropyLoss()(out, label)
#logit1 = F.softmax(out)
#loss = nn.NLLLoss()(1-logit1,label)
if mode=='nontarget':
loss -= RCE()(out, label)
#loss += nn.CrossEntropyLoss()(out, label)
loss.backward()
#data_grad = image.grad.data
data_grad = image_new.grad.data
data_grad = F.conv2d(data_grad, stack_kernel, padding=(kernel_size-1)//2, groups=3)
for i in range(data_grad.shape[0]):
data_grad[i] = data_grad[i]/torch.mean(data_grad[i].norm(2,0)/1.713)
if iter==0:
noise = data_grad
else:
noise = noise*momentum + data_grad
norm = noise.norm(dim=1).unsqueeze(1)
index = norm.mean()
momentum_sum = momentum_sum*momentum + 1.0
d_img = noise*norm*alpha/((momentum_sum)*index)
#d_img[d_img<1/(255.0*num_iter*2.0)]=0
#print(d_img.mean().item())
d_img= d_img*mask#*grad_decay_index
if args.use_cam:
d_img = d_img * cam
d_img = d_img/d_img.norm(dim=1).mean()*alpha
if mode=='target':
image_adv = image.data - d_img*mu_index
else:
image_adv = image.data + d_img*mu_index
image_adv = torch.clamp(image_adv,-1.0,1.0)
if args.quantize:
image_adv.mul_(255).round_().div_(255) # quanlity seemingly make no sense, even worse
image.data = image_adv
image_new.grad.zero_()
ens_model.zero_grad()
#image.grad.zero_()
logits_label=torch.zeros([len(use_models),image.shape[0]])
i=0
with torch.no_grad():
for model in use_models:
adv_ok = (ens_model.preds[model]!=label).unsqueeze(1).float().to(device) if mode=='nontarget' else (ens_model.preds[model]==label).unsqueeze(1).float().to(device)
logit_label = torch.gather(F.softmax(ens_model.logits[model]),1,label.view([image.shape[0],1]))
# print(logit_label.shape,label.shape)
logits_label[i]=logit_label.view(-1)
i+=1
base=1.0#+float(np.clip((iter-5)*0.1,0,10)*0.1)
model_bindex[model] = base - args.decay_index*adv_ok
################################################################################
#I am not sure if grad_decay_index works
#grad_decay_index = logits_label.max(dim=0)[0]
#print(grad_decay_index, logits_label.max(dim=0)[1])
#grad_decay_index = grad_decay_index.clamp(0.2,1.0).view(-1,1,1,1).to(device)
#print(grad_decay_index.shape)
#if iter>10:
# mu_index[grad_decay_index>0.3] = mu_index[grad_decay_index>0.3]*1.1
#elif iter>10:
# mu_index[grad_decay_index<0.07] = mu_index[grad_decay_index<0.07]*0.9
#res = eval_img(image.to(device_val), models, val_models_name, old_img, init_y)
#ans+=res[1]
save_images(image, data["filename"], args.output_dir)
#print(image.mean())
print("Processing [%d/%d].."%(it,len(labels_dic)))
time2 = time.time()
print("Finish! Total time is %.2f s."%(time2-time1))
#print("final result:", float(ans)/num_img)
|
{"hexsha": "657017c177f727c48d7420ce4f5623f41e456aa2", "size": 24083, "ext": "py", "lang": "Python", "max_stars_repo_path": "attack_tijiao2.py", "max_stars_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_stars_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-06-19T15:16:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-19T01:33:10.000Z", "max_issues_repo_path": "attack_tijiao2.py", "max_issues_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_issues_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "attack_tijiao2.py", "max_forks_repo_name": "jiangyangzhou/Non-targeted-Attack-IJCAI2019-ColdRiver", "max_forks_repo_head_hexsha": "f9f26b4e00241c7831a2e46a0a2c965457fe99e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-04T09:27:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-12T06:13:16.000Z", "avg_line_length": 43.236983842, "max_line_length": 183, "alphanum_fraction": 0.5852676162, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 6208}
|
"""
Module providing handling of the LAT effective area.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/irfs/effective_area.py,v 1.1 2016/06/22 17:02:51 wallacee Exp $
Author: Eric Wallace
"""
__version__ = "$Revision: 1.1 $"
import os
import numpy as np
from astropy.io import fits
from scipy import interpolate
import skymaps
#To get something working for now, just use C++ implementation
class EffectiveArea(skymaps.EffectiveArea):
"""Placeholder interface to skymaps::EffectiveArea"""
def __init__(self,filename,
aeff_extension="EFFECTIVE_AREA",
eff_params_extension="EFFICIENCY_PARAMETERS"):
self.eff_params_extension = eff_params_extension
skymaps.EffectiveArea.__init__(self,'',str(filename),str(aeff_extension))
"""
#TODO:
# Implement interpolation
# Implement livetime correction
class EffectiveArea(object):
def __init__(self,filename,
aeff_extension="EFFECTIVE_AREA",
aeff_params_extension = "EFFICENCY_PARAMETERS",
thetacut = 66.4):
hdus = fits.open(filename)
aeff = hdus[aeff_extension]
self.aeff = aeff.data['EFFAREA'][0]
self.efficiency_pars = hdus[eff_params_extension].data[0]
self.event_type = aeff.header['DETNAM'].lower()
self.ebins = np.vstack([aeff.data['ENERG_LO'][0],aeff.data['ENERG_HI'][0]]).T
self.cthetabins = np.vstack([aeff.data['CTHETA_LO'][0],aeff.data['CTHETA_HI'][0]]).T
#mask = self.cthetabins[:,0]>=cthetamin
#self.cthetabins = self.cthetabins[mask]
#self.aeff = aeff['EFFAREA'][mask]
#NB: Slightly different interpolation than used in skymaps.EffectiveArea
self._interp = interpolate.RectBivariateSpline(self.cthetabins.mean(axis=1),
np.log10(self.ebins).mean(axis=1),
self.aeff)
def value(self,energy=1000.,ctheta=1.0,interpolate=True):
if interpolate:
return self.interpolate(energy,ctheta)*1e4
else:
mask = (np.fmin(np.searchsorted(self.cthetabins[:,1],ctheta),self.cthetabins.shape[0]-1),
np.fmin(np.searchsorted(self.ebins[:,1],energy),self.ebins.shape[0]-1))
return self.aeff[mask]*1e4
def interpolate(self,energy,ctheta):
return self._interp(ctheta,np.log10(energy),grid=False)
def __call__(self,energy=1000.,ctheta=1.0):
return self.value(energy,ctheta)
def get_livetime_factors(self,energy):
pass
"""
|
{"hexsha": "c529b22ac3333db7730974c9c26b6c1cea77d905", "size": 2637, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/uw/irfs/effective_area.py", "max_stars_repo_name": "coclar/pointlike", "max_stars_repo_head_hexsha": "7088724b5a40cf787371aff69e64c9bec701f578", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-19T14:45:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-19T14:45:28.000Z", "max_issues_repo_path": "python/uw/irfs/effective_area.py", "max_issues_repo_name": "coclar/pointlike", "max_issues_repo_head_hexsha": "7088724b5a40cf787371aff69e64c9bec701f578", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-05T17:30:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-05T18:12:15.000Z", "max_forks_repo_path": "python/uw/irfs/effective_area.py", "max_forks_repo_name": "coclar/pointlike", "max_forks_repo_head_hexsha": "7088724b5a40cf787371aff69e64c9bec701f578", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-03-14T15:34:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T15:29:32.000Z", "avg_line_length": 32.5555555556, "max_line_length": 121, "alphanum_fraction": 0.6355707243, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 681}
|
include("StructEquality.jl")
include("ImportACounter.jl")
b_counter = CounterX(1)
println(a_counter.id)
println(b_counter.id)
println(a_counter == b_counter) # False
|
{"hexsha": "d471ae3f698bdf7edfcbb917a2296d8e221479d4", "size": 176, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Scripts/ImportStructEquality.jl", "max_stars_repo_name": "Sohva/oolong", "max_stars_repo_head_hexsha": "68b45cfa0b20b6d511db79f4aeacff358ed9b897", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Scripts/ImportStructEquality.jl", "max_issues_repo_name": "Sohva/oolong", "max_issues_repo_head_hexsha": "68b45cfa0b20b6d511db79f4aeacff358ed9b897", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2020-07-10T15:50:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-08T16:46:26.000Z", "max_forks_repo_path": "src/Scripts/ImportStructEquality.jl", "max_forks_repo_name": "oolongtensor/oolong", "max_forks_repo_head_hexsha": "68b45cfa0b20b6d511db79f4aeacff358ed9b897", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-28T13:33:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-28T13:33:19.000Z", "avg_line_length": 19.5555555556, "max_line_length": 40, "alphanum_fraction": 0.7386363636, "num_tokens": 43}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras DNN model using DistributionStrategy."""
import tensorflow.compat.v2 as tf
import numpy as np
import keras
from keras import backend
from keras.testing_infra import test_utils
from keras.distribute import keras_correctness_test_base
from keras.distribute import strategy_combinations
from keras.optimizers.optimizer_v2 import gradient_descent as gradient_descent_keras
def all_strategy_combinations_with_eager_and_graph_modes():
return (tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=['graph', 'eager']) + tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multi_worker_mirrored_strategies,
mode='eager'))
def all_strategy_combinations_with_graph_mode():
return (tf.__internal__.test.combinations.combine(
distribution=keras_correctness_test_base.all_strategies,
mode=['graph']))
def is_default_strategy(strategy):
with strategy.scope():
return not tf.distribute.has_strategy()
@test_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class TestDistributionStrategyDnnCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
# We add few non-linear layers to make it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(
keras.layers.Dense(
10,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-4)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
if initial_weights:
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'])
return model
def get_data(self):
x_train = np.random.rand(9984, 1).astype('float32')
y_train = 3 * x_train
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_predict
def get_data_with_partial_last_batch(self):
x_train = np.random.rand(10000, 1).astype('float32')
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype('float32')
y_eval = 3 * x_eval
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train = np.random.rand(9984, 1).astype('float32')
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype('float32')
y_eval = 3 * x_eval
x_predict = np.array([[1.], [2.], [3.], [4.]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base
.test_combinations_with_tpu_strategies_graph() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
self.run_correctness_test(
distribution, use_numpy, use_validation_data, partial_last_batch='eval')
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness_with_partial_last_batch(self, distribution,
use_numpy,
use_validation_data):
distribution.extended.experimental_enable_get_next_as_optional = True
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch='train_and_eval',
training_epochs=1)
@tf.__internal__.distribute.combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution):
self.run_dynamic_lr_test(distribution)
class TestDistributionStrategyDnnMetricCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=[keras.metrics.BinaryAccuracy()])
return model
def run_metric_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
distribution=distribution)
batch_size = 64
batch_size = (
keras_correctness_test_base.get_batch_size(batch_size, distribution))
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
keras_correctness_test_base.batch_wrapper(train_dataset, batch_size))
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes())
def test_simple_dnn_metric_correctness(self, distribution):
self.run_metric_correctness_test(distribution)
class TestDistributionStrategyDnnMetricEvalCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001))
return model
def run_eval_metrics_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
model = self.get_model(
distribution=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes())
def test_identity_model_metric_eval_correctness(self, distribution):
self.run_eval_metrics_correctness_test(distribution)
class SubclassedModel(keras.Model):
def __init__(self, initial_weights, input_shapes):
super().__init__()
self.dense1 = keras.layers.Dense(10, activation='relu', input_shape=(1,))
self.dense2 = keras.layers.Dense(
10, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-4))
self.dense3 = keras.layers.Dense(10, activation='relu')
self.dense4 = keras.layers.Dense(1)
if input_shapes:
self.build(input_shapes)
else:
# This covers cases when the input is DatasetV1Adapter.
self.build((None, 1))
if initial_weights:
self.set_weights(initial_weights)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return self.dense4(x)
@test_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
TestDistributionStrategyDnnCorrectness):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
model = SubclassedModel(initial_weights, input_shapes)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'])
return model
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data):
if (tf.executing_eagerly()) or is_default_strategy(distribution):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
elif (backend.is_tpu_strategy(distribution)
and not tf.executing_eagerly()):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclassed model instead.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution):
if ((tf.executing_eagerly()
and not backend.is_tpu_strategy(distribution))
or is_default_strategy(distribution)):
self.run_dynamic_lr_test(distribution)
elif backend.is_tpu_strategy(distribution):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclassed model instead.'):
self.run_dynamic_lr_test(distribution)
else:
with self.assertRaisesRegex(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_dynamic_lr_test(distribution)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph())
def test_dnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
with self.assertRaisesRegex(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclassed model instead.'):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch='eval')
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
|
{"hexsha": "d4d1602cfc5622ca2515e8ae25f12e648a95335f", "size": 13098, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras/distribute/keras_dnn_correctness_test.py", "max_stars_repo_name": "RakeshJarupula/keras", "max_stars_repo_head_hexsha": "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keras/distribute/keras_dnn_correctness_test.py", "max_issues_repo_name": "RakeshJarupula/keras", "max_issues_repo_head_hexsha": "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keras/distribute/keras_dnn_correctness_test.py", "max_forks_repo_name": "RakeshJarupula/keras", "max_forks_repo_head_hexsha": "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1779141104, "max_line_length": 96, "alphanum_fraction": 0.6987326309, "include": true, "reason": "import numpy", "num_tokens": 2811}
|
import hashlib
import hmac
import base64
import requests
import urllib.parse as urlparse
import time
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
# Load Maps Platform API and signing secret from JSON file
with open("api_key.json", "r") as f:
credentials = json.load(f)
api_key = credentials['api_key']
secret = credentials['secret']
# lat, lng = 51.507574, -0.127835
# lat, lng = 51.5252601, 0.1276249 # first const site
# lat, lng = 51.5222601, 0.1196249 # second const site
lat, lng = 51.6587601, -0.0203751 # third const site
dt_lat, dt_lng = 0.0015, 0.002
def sign_url(input_url=None, secret=None):
""" Sign a request URL with a URL signing secret.
Usage:
from urlsigner import sign_url
signed_url = sign_url(input_url=my_url, secret=SECRET)
Args:
input_url - The URL to sign
secret - Your URL signing secret
Returns:
The signed request URL
"""
if not input_url or not secret:
raise Exception("Both input_url and secret are required")
url = urlparse.urlparse(input_url)
# We only need to sign the path+query part of the string
url_to_sign = url.path + "?" + url.query
# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(secret)
# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, str.encode(url_to_sign), hashlib.sha1)
# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())
original_url = url.scheme + "://" + url.netloc + url.path + "?" + url.query
# Return signed URL
return original_url + "&signature=" + encoded_signature.decode()
def convert_params(parameter_dict):
return_str= ''
for key, value in parameter_dict.items():
return_str += f'{key}={value}&'
return return_str[:-1]
def develop_url(lat, lng):
url = 'https://maps.googleapis.com/maps/api/staticmap?'
# DeepSolar paper: 320x320 images at zoom level 21
params = {
'center': f'{lat},{lng}',
'size': '320x320',
'maptype': 'satellite',
'zoom':'21',
'key': api_key
}
url = f'{url}{convert_params(params)}'
return sign_url(url, secret)
def save_img(lat, lng, fn):
url = develop_url(lat, lng)
r = requests.get(url)
if r.ok:
with open(fn, 'wb') as f:
f.write(r.content)
else:
raise Exception(f"Request returned with status {r.status_code}. Error message: {r.content}")
def start_scraping():
img_names = set([tuple(x.replace(".png", "").split("_")) for x in os.listdir('outputs/images/')])
# lat_lon = np.load('outputs/lat_lon_query.npy')
query = pd.read_csv('outputs/final_query.csv')
lat_lon = query[['latitude', 'longitude']].values
rng = np.random.default_rng()
start_time = time.time()
# Keep track of the number of requests
num_requests = 0
tqdm_obj = tqdm(range(lat_lon.shape[0]), desc = 'qps: 0')
for idx in tqdm_obj:
# Randomly sample from a 0.001 degree square neighborhood around the base poin
base_lat, base_lon = lat_lon[idx]
max_dist = 0.001
n_samples = 5
# Sample from a Gaussian distribution bounded by [center - max_dist, center + max_dist]
def random_coords(center):
samples = rng.normal(center, max_dist / 2, n_samples)
return np.clip(samples, center - max_dist, center + max_dist, out=samples)
lats = random_coords(base_lat)
lons = random_coords(base_lon)
for i in range(n_samples):
lat, lon = lats[i], lons[i]
key = (f"{lat:.6f}", f"{lon:.6f}")
if key in img_names:
continue
save_img(lat, lon, f'outputs/images/{lat:.6f}_{lon:.6f}.png')
num_requests += 1
# ensure that the number of requests < 500/s
if num_requests % 500 == 0:
time_passed = time.time() - start_time
if time_passed < (num_requests // 500):
time.sleep((num_requests // 500) - time_passed)
tqdm_obj.set_description(f'qps: {num_requests/time_passed:.4f}')
# query = pd.read_csv('outputs/final_query.csv', index_col = 0)
# embed()
# 1/0
# save_img(lat, lng, 'outputs/const3.png')
start_scraping()
|
{"hexsha": "7f64ad8e40d2db3ac35590ab653c2b9172c5ac63", "size": 4546, "ext": "py", "lang": "Python", "max_stars_repo_path": "download_dataset_copy.py", "max_stars_repo_name": "aidan-fitz/SolarTracer", "max_stars_repo_head_hexsha": "31cc77ca974640be277d00c6ca23d82292f178c1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-08T14:37:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T14:37:10.000Z", "max_issues_repo_path": "download_dataset_copy.py", "max_issues_repo_name": "aidan-fitz/SolarTracer", "max_issues_repo_head_hexsha": "31cc77ca974640be277d00c6ca23d82292f178c1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "download_dataset_copy.py", "max_forks_repo_name": "aidan-fitz/SolarTracer", "max_forks_repo_head_hexsha": "31cc77ca974640be277d00c6ca23d82292f178c1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1804511278, "max_line_length": 101, "alphanum_fraction": 0.6379234492, "include": true, "reason": "import numpy", "num_tokens": 1169}
|
[STATEMENT]
lemma (in Corps) t_gen_vp:"\<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow>
(Vr K v) \<diamondsuit>\<^sub>p t = vp K v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
(*
apply (frule val_surj_n_val[of v], blast)
apply (frule ideal_apow_n_val[of v t])
apply (cut_tac a0_less_1)
apply (rule val_poss_mem_Vr[of v t], assumption+, simp)
apply (simp add:r_apow_def)
apply (simp only:ant_1[THEN sym], simp only:ant_0[THEN sym])
apply (simp only:aeq_zeq, simp)
apply (cut_tac z_neq_inf[THEN not_sym, of "1"], simp)
apply (simp only:an_1[THEN sym]) apply (simp add:na_an)
apply (rule Ring.idealprod_whole_r[of "Vr K v" "vp K v"])
apply (simp add:Vr_ring)
apply (simp add:vp_ideal)
done *)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
assume a1:"valuation K v" and
a2:"t \<in> carrier K" and
a3:"v t = 1"
[PROOF STATE]
proof (state)
this:
valuation K v
t \<in> carrier K
v t = 1
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
from a1 and a2 and a3
[PROOF STATE]
proof (chain)
picking this:
valuation K v
t \<in> carrier K
v t = 1
[PROOF STEP]
have h1:"t \<in> carrier (Vr K v)"
[PROOF STATE]
proof (prove)
using this:
valuation K v
t \<in> carrier K
v t = 1
goal (1 subgoal):
1. t \<in> carrier (Vr K v)
[PROOF STEP]
apply (cut_tac a0_less_1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1; 0 < 1\<rbrakk> \<Longrightarrow> t \<in> carrier (Vr K v)
[PROOF STEP]
apply (rule val_poss_mem_Vr[of v t], assumption+, simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
t \<in> carrier (Vr K v)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
from a1 and a2 and a3
[PROOF STATE]
proof (chain)
picking this:
valuation K v
t \<in> carrier K
v t = 1
[PROOF STEP]
have h2:"n_val K v = v"
[PROOF STATE]
proof (prove)
using this:
valuation K v
t \<in> carrier K
v t = 1
goal (1 subgoal):
1. n_val K v = v
[PROOF STEP]
apply (subst val_surj_n_val[of v])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> valuation K v
2. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> \<exists>x\<in>carrier K. v x = 1
3. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> v = v
[PROOF STEP]
apply assumption
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> \<exists>x\<in>carrier K. v x = 1
2. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> v = v
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> v = v
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
n_val K v = v
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
from a1 and h1
[PROOF STATE]
proof (chain)
picking this:
valuation K v
t \<in> carrier (Vr K v)
[PROOF STEP]
have h3:"Vr K v \<diamondsuit>\<^sub>p t = vp K v\<^bsup> (Vr K v) (n_val K v t)\<^esup>"
[PROOF STATE]
proof (prove)
using this:
valuation K v
t \<in> carrier (Vr K v)
goal (1 subgoal):
1. Vr K v \<diamondsuit>\<^sub>p t = vp K v\<^bsup> Vr K v n_val K v t\<^esup>
[PROOF STEP]
apply (simp add:ideal_apow_n_val[of v t])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Vr K v \<diamondsuit>\<^sub>p t = vp K v\<^bsup> Vr K v n_val K v t\<^esup>
goal (1 subgoal):
1. \<lbrakk>valuation K v; t \<in> carrier K; v t = 1\<rbrakk> \<Longrightarrow> Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
from a1 and a3 and h2 and h3
[PROOF STATE]
proof (chain)
picking this:
valuation K v
v t = 1
n_val K v = v
Vr K v \<diamondsuit>\<^sub>p t = vp K v\<^bsup> Vr K v n_val K v t\<^esup>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
valuation K v
v t = 1
n_val K v = v
Vr K v \<diamondsuit>\<^sub>p t = vp K v\<^bsup> Vr K v n_val K v t\<^esup>
goal (1 subgoal):
1. Vr K v \<diamondsuit>\<^sub>p t = vp K v
[PROOF STEP]
apply (simp add:r_apow_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = 1; n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = (if \<infinity> = 1 then {\<zero>\<^bsub>Vr K v\<^esub>} else if 1 = 0 then carrier (Vr K v) else vp K v \<^bsup>\<diamondsuit>Vr K v na 1\<^esup>)\<rbrakk> \<Longrightarrow> (1 = 0 \<longrightarrow> carrier (Vr K v) = vp K v) \<and> (1 \<noteq> 0 \<longrightarrow> (\<infinity> = 1 \<longrightarrow> {\<zero>\<^bsub>Vr K v\<^esub>} = vp K v) \<and> (\<infinity> \<noteq> 1 \<longrightarrow> vp K v \<^bsup>\<diamondsuit>Vr K v na 1\<^esup> = vp K v))
[PROOF STEP]
apply (simp only:ant_1[THEN sym], simp only:ant_0[THEN sym])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = ant 1; n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = (if \<infinity> = ant 1 then {\<zero>\<^bsub>Vr K v\<^esub>} else if ant 1 = ant 0 then carrier (Vr K v) else vp K v \<^bsup>\<diamondsuit>Vr K v na (ant 1)\<^esup>)\<rbrakk> \<Longrightarrow> (ant 1 = ant 0 \<longrightarrow> carrier (Vr K v) = vp K v) \<and> (ant 1 \<noteq> ant 0 \<longrightarrow> (\<infinity> = ant 1 \<longrightarrow> {\<zero>\<^bsub>Vr K v\<^esub>} = vp K v) \<and> (\<infinity> \<noteq> ant 1 \<longrightarrow> vp K v \<^bsup>\<diamondsuit>Vr K v na (ant 1)\<^esup> = vp K v))
[PROOF STEP]
apply (simp only:aeq_zeq, simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = 1; n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = (if \<infinity> = 1 then {\<zero>\<^bsub>Vr K v\<^esub>} else if 1 = 0 then carrier (Vr K v) else vp K v \<^bsup>\<diamondsuit>Vr K v na (ant 1)\<^esup>)\<rbrakk> \<Longrightarrow> (\<infinity> = 1 \<longrightarrow> {\<zero>\<^bsub>Vr K v\<^esub>} = vp K v) \<and> (\<infinity> \<noteq> 1 \<longrightarrow> vp K v \<^bsup>\<diamondsuit>Vr K v na 1\<^esup> = vp K v)
[PROOF STEP]
apply (cut_tac z_neq_inf[THEN not_sym, of "1"], simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = 1; n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<^bsup>\<diamondsuit>Vr K v na 1\<^esup>; \<infinity> \<noteq> 1\<rbrakk> \<Longrightarrow> vp K v \<^bsup>\<diamondsuit>Vr K v na 1\<^esup> = vp K v
[PROOF STEP]
apply (simp only:an_1[THEN sym])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = an 1; n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<^bsup>\<diamondsuit>Vr K v na (an 1)\<^esup>; \<infinity> \<noteq> an 1\<rbrakk> \<Longrightarrow> vp K v \<^bsup>\<diamondsuit>Vr K v na (an 1)\<^esup> = vp K v
[PROOF STEP]
apply (simp add:na_an)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = an (Suc 0); n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<diamondsuit>\<^sub>r\<^bsub>Vr K v\<^esub> carrier (Vr K v); \<infinity> \<noteq> an (Suc 0)\<rbrakk> \<Longrightarrow> vp K v \<diamondsuit>\<^sub>r\<^bsub>Vr K v\<^esub> carrier (Vr K v) = vp K v
[PROOF STEP]
apply (rule Ring.idealprod_whole_r[of "Vr K v" "vp K v"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>valuation K v; v t = an (Suc 0); n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<diamondsuit>\<^sub>r\<^bsub>Vr K v\<^esub> carrier (Vr K v); \<infinity> \<noteq> an (Suc 0)\<rbrakk> \<Longrightarrow> Ring (Vr K v)
2. \<lbrakk>valuation K v; v t = an (Suc 0); n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<diamondsuit>\<^sub>r\<^bsub>Vr K v\<^esub> carrier (Vr K v); \<infinity> \<noteq> an (Suc 0)\<rbrakk> \<Longrightarrow> ideal (Vr K v) (vp K v)
[PROOF STEP]
apply (simp add:Vr_ring)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>valuation K v; v t = an (Suc 0); n_val K v = v; Vr K v \<diamondsuit>\<^sub>p t = vp K v \<diamondsuit>\<^sub>r\<^bsub>Vr K v\<^esub> carrier (Vr K v); \<infinity> \<noteq> an (Suc 0)\<rbrakk> \<Longrightarrow> ideal (Vr K v) (vp K v)
[PROOF STEP]
apply (simp add:vp_ideal)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Vr K v \<diamondsuit>\<^sub>p t = vp K v
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4171, "file": "Valuation_Valuation1", "length": 32}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Régis Gbenou
@email: regis.gbenou@outlook.fr
"""
###############################################################################
# PACKAGES
###############################################################################
import itertools # Library providing iterative functions.
import matplotlib.pyplot as plt # Library to plot graphics.
import numpy as np # Library for matrix computation.
import pandas as pd # Library for data manipulation.
import seaborn as sns # Library to display graphics and make some basic statistics.
sns.set(style="ticks", color_codes=True)
import statsmodels.api as sm # Library for statistical models.
import statsmodels.stats.api as sms # Library for statistical models.
from statsmodels.stats.outliers_influence import(
variance_inflation_factor) # Function to evaluate multicolinearity of columns in a matrix.
from statsmodels.stats.stattools import(
medcouple,
durbin_watson) # Funcitons that evalue respectively the autocorrelation (AR(1)) in
# a time serie and the skewness (asymmetry) of a distribution.
from statsmodels.graphics.tsaplots import plot_acf # To plot AutoCorrelation Function (ACF)
import scipy.stats as sci # Library of computation interest.
import sklearn # Library of machine Learning Model
from sklearn.linear_model import LinearRegression # Module to access to Linear Regression, and other regularized functions.
from sklearn.model_selection import(
KFold, cross_validate, GridSearchCV) # Functions helping in the selection of features or hyperparameters
from sklearn.pipeline import Pipeline # Module to produce pipeline estimators.
from sklearn.preprocessing import PowerTransformer # Module to make Power Transformation.
import time # Library to access to the computer clock.
###############################################################################
# FUNCTIONS
###############################################################################
# DESCRIPTIVE PART
def plot_fct(df, cols_to_plot=[], ncols=2, size=(7, 3)):
'''
<plot_fct> plots the values, sorted by ascending order of each column having its hearder in <cols_to_plot>,
if <cols_to_plot> is left empty this fucntion will take in account by default all the
numercial columns of <df>.
Parameters
----------
df : DataFrame
Any data frame containing at least 3 numerical columns.
cols_to_plot : list, optional
cols_to_plotist of a subset of <df> columns. The default is []
ncols : int, optional
Number of subplot by row. The default is 2.
size : tuple, optional
(width, height) of all graphs. The default is (10, 7).
Returns
-------
None.
'''
# Displaying all numerical column if cols_to_plot is empty.
if not cols_to_plot:
cols_to_plot = list(df.select_dtypes('number'))
nrows = int(round(len(cols_to_plot)/ncols))
fig, axs = plt.subplots(nrows, ncols, figsize=size)
iterative_var = 0
# i,j belongs to the cartesian product: [1,nrows]X[1,ncols]
for i,j in itertools.product(np.arange(nrows),np.arange(ncols)):
try:
# x is a vector going from 0 to the row number of df.
x = np.arange(df.shape[0])
# y is an array containing the sorted values of the columns cols_to_plot[iterative_var].
y = df[cols_to_plot[iterative_var]].sort_values().values
except IndexError:
# Displaying blank plot.
pass
else:
# Displaying y(x).
axs[i,j].scatter(x, y)
axs[i,j].set_ylabel(cols_to_plot[iterative_var])
finally:
# Always incrementing iterative_var.
iterative_var += 1
plt.tight_layout()
plt.show()
return None
def boxPlot_fct(df, L=[], ncols=2, size=(7, 3)):
'''
<plot_fct> plots the box plots of the columns having their hearder in <L>,
if <L> is left empty this fucntion will take in account by default all the
numercial columns of <df>.
The box plots are modified using the thumb rule relying on medcouple:
https://en.wikipedia.org/wiki/Box_plot#Variations
The outliers according to the previous thumb rule will be colored in red.
Parameters
----------
df : DataFrame
Any data frame containing at least 3 numerical columns.
L : list, optional
Name of the numerical features present in <df> that we want the box plot.The
default is [].
ncols : int, optional
Number of columns. The default is 2.
size : tuple, optional
(width, height) of all subgraphs. The default is (10, 7).
Returns
-------
None.
'''
if L:
pass
else:
L = list(df.select_dtypes('number'))
nrows = int(round(len(L)/ncols))
red_squares = dict(markerfacecolor='r', marker='s')
fig, ax = plt.subplots(nrows, ncols, figsize=size)
for i,k in zip(itertools.product(np.arange(nrows), np.arange(ncols)), L):
mc = medcouple(df[k])
if mc > 0:
iq_adj = [1.5*np.exp(-4*mc), 1.5*np.exp(3*mc)] # computation of the adjusted interquartile for a medcouple value greater than 0.
else:
iq_adj = [1.5*np.exp(-3*mc), 1.5*np.exp(4*mc)]
low = np.percentile(df[k], 25) - iq_adj[0]*(np.percentile(df[k], 75) - np.percentile(df[k], 25))
up = np.percentile(df[k], 75) + iq_adj[1]*(np.percentile(df[k], 75) - np.percentile(df[k], 25))
a, b = round(sci.percentileofscore(df[k], low), 2), round(sci.percentileofscore(df[k], up), 2)
ax[i[0], i[1]].boxplot(df[k], flierprops=red_squares, whis=(a, b), vert=False)
ax[i[0], i[1]].set_title(k)
plt.tight_layout()
plt.show()
return None
def ols_summary_fct(dfo, name_y, intercept=True, cov_fit='', summary=True, graph=True, vif=True, size=(10, 7), method='box-cox',cols_boxCox=[]):
'''
<ols_summary_fct> checks the following assumptions:
- Homoscedasticity (equal variance accross the observations),
- Linear relationshp,
- Independency (a weaker criterion that is almost equivalent is autocorrelation),
- Non-colinearity,
- Normality
Parameters
----------
df : DataFrame
Data frame gathering the response and feature observations.
name_y : str
Name of the response in <df>.
Returns
-------
None.
'''
# a. OLS Regression summary -----------------------------------------------
df = dfo.copy()
df = df.reset_index(drop=True)
col_to_transf = list(df.reindex(columns=cols_boxCox).dropna(axis=1))
if col_to_transf:
power_t = PowerTransformer(method=method, standardize=False)
power_t.fit(df.loc[:, col_to_transf])
df.loc[:, col_to_transf] = power_t.transform(df.loc[:, col_to_transf])
# df_lambdas = pd.DataFrame({'lambda':power_t.lambdas_},
# index=col_to_transf)
if cols_boxCox:
print(f'Applying Box-Cox transformation on the following columns: {col_to_transf}')
predictor_names = list(df)
predictor_names.remove(name_y)
if intercept:
X = sm.add_constant(df.loc[:, predictor_names])
else:
X = df.loc[:, predictor_names]
y = df.loc[:, name_y]
model = sm.OLS(y, X)
if len(cov_fit):
results = model.fit(cov_type=cov_fit)
else:
results = model.fit()
if summary:
print(results.summary())
else:
print(f"\nR2_adj: {round(100*results.rsquared_adj, 1)}%")
print("\nLOG-LIKELIHOOD: ", "{:.2e}".format(round(results.llf, 1)))
print("\nFscore: ", round(results.fvalue, 1))
print("\nFpvalue: ", round(results.f_pvalue, 1))
print("\nAIC: ", "{:.2e}".format(round(results.aic, 1)))
print("\nBIC: ", "{:.2e}".format(round(results.bic, 1)))
print("\nCONDITION NUMBER: ", "{:.2e}".format(round(results.condition_number, 1)))
print("\nRESIDUAL MSE: ", round(results.mse_resid, 1))
# b. Homoscedasticity: detection of heteroscedasticity --------------------
print('\nHOMOSCEDASTICITY ASSUMPTION')
test = sms.het_breuschpagan(results.resid, results.model.exog) # tests heteroscedasticity with
fscore, fpvalue = round(test[2], 1), round(test[3], 2) # homoscedasticity as null hypothesis.
x_het = results.predict()
y_het = results.resid
poly_het = np.poly1d( # transforms the array of polynomial coefficients
np.polyfit(x_het, y_het, deg=3)) # in a function that can take a vector as argument.
xp_het = np.linspace(x_het.min(), x_het.max(), int(y_het.shape[0]/4))
print(f"Breush-Paga test:\tFscore:{round(fscore,1)}, Pval={round(fpvalue,1)}")
# c. Independency: detection of autocorrelation in residuals --------------
print('\nINDEPENDENCY ASSUMPTION')
print('Autocorrelation of the first order:')
y = results.resid
test_d = round(durbin_watson(y), 1) # tests the autocorrelation of first order, a rule of
print(f'The Durbin-Watson test result: {test_d}') # thumb is that 1.5< test < 2.5 then values are considered as normal.
# d. Linearity: detection of non-linearity in residuals -------------------
x_lin = df.index.values
np.random.shuffle(x_lin)
y_lin = results.resid
poly_lin = np.poly1d(
np.polyfit(x_lin, y_lin, deg=3))
xp_lin = np.linspace(x_lin.min(), x_lin.max(), int(y_lin.shape[0]/4))
# e. Multi plots ---------------------------------------------------
if graph:
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=size)
dct_arg = {'x':[x_het, x_lin], 'y':[y_het, y_lin], 'xp':[xp_het, xp_lin],
'yp':[poly_het(xp_het), poly_lin(xp_lin)],
'xlabel':[r'Fitted values $\hat{y_{i}}$', 'observation number'],
'title':[r"$residuals(\hat{y})$", "Residuals vs observation number"]}
for i in range(2):
axs[i, 0].scatter(x=dct_arg['x'][i], y=dct_arg['y'][i], label="residuals")
axs[i, 0].set_xlabel(dct_arg['xlabel'][i])
axs[i, 0].set_ylabel('residuals')
axs[i, 0].plot(dct_arg['xp'][i],dct_arg['yp'][i], color="DarkRed", label="Polynomial estimation")
axs[i, 0].set_title(dct_arg['title'][i])
axs[i, 0].legend()
plot_acf(results.resid, adjusted=True, zero=False,
title="Residuals autocorrelation", ax=axs[0,1])
sci.probplot(results.predict(), dist="norm", plot=axs[1,1])
axs[1, 1].set_title('Q-Q plot')
plt.tight_layout()
plt.show()
# f. Multicolinearity: use of VIF criterion -------------------------------
if vif:
print('\nNON-COLINEARITY ASSUMPTION')
X_multi = X.copy()
if intercept:
X_multi.drop(columns='const', inplace=True)
print(pd.DataFrame({
"VIF": [variance_inflation_factor(X_multi.values, i)
for i in range(X_multi.shape[1])]},
index = list(X_multi)).sort_values(by='VIF', ascending=False)
)
return None
# INFERENCE AND PREDICTIVE PART
def find_bestHyperparameters(df, name_y, estimator, parameters=dict(), k_cv=5, rs=0, n_jobs=1, graph=True,
size=(7, 3)):
'''
Return best hyperparameter estimator.
Parameters
----------
estimator : sklearn estimator
Scikit-Learn method.
df : Data Frame
Frame containin the response and the predictor columns data.
name_y : str
Header of the response columns.
parameters : dict
Dictionary of parameters.
The default is dict().
k_cv : int, optional
Cross validation value. The default is 5.
rs: int, optional
Random state.
The default is 0.
n_jobs : int
The number of workers used to run the model.
The default is 1.
Returns
-------
Dcictionary composed of the more suitable hyperparameters for the method.
And the grid search Cv results.
'''
duration = np.array(2 * [-time.time()])
columns_list = list(df)
columns_list.remove(name_y)
X = df.loc[:, columns_list]
y = df.loc[:, name_y]
# use of GridSearchCV to find the hyperparameters that reduce the most the mse.
grid = GridSearchCV(
estimator=estimator,
param_grid=parameters,
n_jobs=n_jobs,
scoring='neg_mean_squared_error')
grid.fit(X, y)
index_bestRanked_param = np.argwhere(grid.cv_results_['rank_test_score']==1)[0][0]
# handle pipe estimator.
if type(estimator) == sklearn.pipeline.Pipeline:
initial_param_dict = estimator.named_steps.copy()
final_param_dict = initial_param_dict.copy()
for k in parameters:
print(k.split('__')[0])
final_param_dict[k.split('__')[0]] =\
initial_param_dict[k.split('__')[0]].set_params(
**{k.split('__')[1] : grid.cv_results_['params'][index_bestRanked_param][k]})
# build new estimator handling parameters given in argument.
estimator_optimized = Pipeline(list(final_param_dict.items()))
else:
estimator_optimized = estimator.set_params(**grid.cv_results_['params'][index_bestRanked_param])
# evaluate the model with the found hyperparameters.
scores = cross_validate(
estimator= estimator_optimized,
X=X, y=y, n_jobs=n_jobs,
scoring=('r2', 'neg_mean_squared_error'),
cv=KFold(n_splits=k_cv, shuffle=True, random_state=rs))
# information to display.
text = f'For {estimator_optimized} we have MSE:'+\
f"{int(scores['test_neg_mean_squared_error'].mean())} +-"+\
f"{int(scores['test_neg_mean_squared_error'].std())}"+\
f"\nFor {estimator_optimized} we have R^2:"+\
f"{round(100*scores['test_r2'].mean(), 1)} +-"+\
f"{round(100*scores['test_r2'].std(), 1)}"+\
"\nThe above results are estimated by cross_validate() and may slightly differ from those estimated by GridSearchCV()"
print(text)
# graphic to identify the improvement of the found hyperparameters in comparison with other ones.
if graph:
fig, ax = plt.subplots(figsize=size)
y = np.abs(grid.cv_results_['mean_test_score'])
y_min = np.nanmin(y)
x = np.arange(len(y))
ax.scatter(x, y)
ax.plot(x, np.repeat(y_min, repeats=len(x)), linestyle='--', color='r', label=f'min_mse={"{:.2e}".format(round(y_min,1))} (estimated by GrideSearchCV())')
ax.axvline(x=index_bestRanked_param, linestyle='-.', color='r',
label=f"{grid.cv_results_['params'][index_bestRanked_param]}")
ax.set_xticks(x)
ax.set_xticklabels(grid.cv_results_['params'], rotation=90)
ax.legend(loc='best')
ax.set_title('Mean Squared Error vs Hyperparameters')
plt.show()
duration[1] = time.time()
quotient_hour, remainder_hour = duration.sum()// 3600, duration.sum() % 3600
quotient_minute, remainder_minute= remainder_hour // 60, remainder_hour % 60
print(f"Elapsed time: {int(quotient_hour)}h {int(quotient_minute)}min {int(remainder_minute)}s")
return {'bestHyperParams':grid.cv_results_['params'][index_bestRanked_param],
'gridSearch': grid.cv_results_, 'recap':text}
def mse_r2_score_fct(df, name_y, id_columns, estimator, L, cols_categ=[], method='box-cox',
pt_standardize=True, cols_boxCox=[], drp_first_level=True, parameters={}, fct=lambda x: x, cv=5, n_jobs=1):
'''
Return best hyperparameter estimator for a pipeline estimator.
Parameters
----------
df : DataFrame
Data frame containing the response and the features.
estimator : sklearn
Scikit-Learn method
parameters : dict
Dictionary of parameters.
name_y : str
The response column name.
L : list
Name of all possible columns of df. Note that depending on the value of cols_categ L can change.
cols_categ : list
List of the orignal categorical columns which their levels have to be converted in dummy variables.
The default is [].
id_columns : list/array
Column indexes to select in X.
parameters : set, optional
Set having as keys estimator argument and values the values of these arguements. The default is {}.
fct : function
Any function to apply to the response to compute a transformed rmse after the fitting.
The default is lambda x: x (the identity function).
cv : int, optional
Cross validation value. The default is 5.
n_jobs : int, optional
Core number to use. The default is 1.
Returns
-------
1D array
[mse_mean, mse_std, r2_mean, r2.std]. Respectively the mean and the standard deviation of both means squared error and explained variance rate.
'''
# levels by category.
lvls_by_categ = {k:[] for k in cols_categ}
col_name = [L[i].split('_') for i in id_columns]
for k in col_name:
if k[0] in lvls_by_categ:
lvls_by_categ[k[0]].append('_'.join(k[1:]))
# gather column name that are not levels of categorical columns.
col_name_2 = ['_'.join(k) for k in col_name if not k[0] in lvls_by_categ]
col_tot = col_name_2 + list(lvls_by_categ)
X = df.loc[:, col_tot]
# names of the catogorical columns.
keys = list(lvls_by_categ)
for key in lvls_by_categ:
if len(lvls_by_categ[key]):
X = X[X[key].isin(lvls_by_categ[key])].copy()
else:
keys.remove(key)
# boolean will allow to drop the first level of each cateogorical variable if it is possible.
if lvls_by_categ:
boolean = min([len(lvls_by_categ[k])!=1 for k in lvls_by_categ.keys()])
else:
boolean=False
if not drp_first_level:
boolean = False
X = pd.get_dummies(X, columns=keys, drop_first=boolean).select_dtypes('number')
y = df.loc[X.index, name_y]
# handle pipe estimator.
if type(estimator) == sklearn.pipeline.Pipeline:
steps_pipe = estimator.named_steps.copy()
steps_with_params = steps_pipe.copy()
for k in parameters:
name_estimator = k.split('__')[0]
name_parameter = k.split('__')[1]
print(name_estimator)
steps_with_params[name_estimator] =\
steps_pipe[name_estimator].set_params(
**{name_parameter: parameters[k]})
# build new estimator handling parameters given in argument.
estimator_2 = Pipeline(list(steps_with_params.items()))
else:
estimator_2 = estimator.set_params(**parameters)
frame = y.to_frame(name=y.name).join(X)
score=('neg_means_squared_error', 'r2')
res = {k:[0, 0] for k in ['test_'+k for k in score]}
mse_list, r2_list = [], []
# apply one of the two power transformations provided by Scikit-Learn.
frame_transf = frame.copy()
col_to_transf = list(frame.reindex(columns=cols_boxCox).dropna(axis=1))
if col_to_transf:
power_t = PowerTransformer(method=method, standardize=pt_standardize)
power_t.fit(frame_transf.loc[:, col_to_transf])
frame_transf.loc[:, col_to_transf] = power_t.transform(frame_transf.loc[:, col_to_transf])
# cross-validation process, it shuffle the row data and make a K-fold.
kf = KFold(n_splits=cv, shuffle=True, random_state=0)
for train, test in kf.split(frame_transf.iloc[:, 1:]):
X_train, X_test = frame_transf.iloc[train, 1:], frame_transf.iloc[test, 1:]
# y_test is left unchanged, then it is extracted from df not df_transf.
y_train, y_test = frame_transf.iloc[train, 0], frame.iloc[test, 0]
# to "reinitialize" estimator at each loop to avoid that the estimator be influenced by the
# previous fit.
estimator_to_use = estimator_2
estimator_to_use.fit(X_train, y_train)
y_pred = estimator_to_use.predict(X_test)
if name_y in col_to_transf:
y_pred_transf = fct(power_t.inverse_transform(
np.concatenate(len(col_to_transf)*[y_pred.reshape(-1, 1)], axis=1)))[:,0]
else:
y_pred_transf = fct(y_pred)
y_test_transf = fct(y_test)
mse = ((y_test_transf - y_pred_transf)**2).mean()
mse_list.append(mse)
denominator = ((y_test_transf - y_test_transf.mean())**2).sum()
# avoid case where y_test_transf contains only one value.
if denominator:
r2 = 100*(1 - ((y_test_transf - y_pred_transf)**2).sum()/denominator)
else:
r2 = np.nan
r2_list.append(r2)
mse_mean, mse_std = round(np.array(mse_list).mean(), 1), round(np.array(mse_list).std(), 1)
r2_mean, r2_std = round(np.array(r2_list).mean(), 1), round(np.array(r2_list).std(), 1)
res['test_neg_means_squared_error'][0], res['test_neg_means_squared_error'][1] = mse_mean, mse_std
res['test_r2'][0], res['test_r2'][1] = r2_mean, r2_std
return np.array([res[k] for k in res]).reshape(1,-1)[0]
def find_bestModel_fwd_bwd(df, name_y, estimator=LinearRegression(), mode='forward',
pt_standardize=True, cols_categ=[], nrows_thresh=None, drp_first=True, fct=lambda x:x, method='box-cox',
cols_boxCox=[], parameters=dict(), cv=5, n_jobs=1, min_score = True, graph=True, size=(7, 3),
thresh=10):
'''
<cv_forwardStepwiseSelection> makes forward stepwise selection using OLS
regression as model and relying on estimated MSE, provided by k-fold
cross-validation method.
Parameters
----------
df : DataFrame
Data frame gathering the response and feature observations, the response must be the
first column.
estimator: str, optional
The scikit-learn estimator in string. The default is 'linear_model.LinearRegression'
cols_categ: list, optional
The name_y of columns which we will use as dummy variables. The default is [].
score: tuple, optional
The tuple contains the name_y of the metrics that we want get about the estimator.
The default value is ('neg_mean_squared_error', 'r2').
parameters: dict, optional
The set of hyperparameters that we will use in the estimator. The default is dict().
cv : int, optional
The number of splits in a KFold() cross validation function. The default is 5.
n_jobs : int, optional
The number of workers to use.
Returns
-------
set
Assume that the the data frame contains p features, the first item
will be a data frame containing p + (p-1) + (p-2) + ... + 1 = p(p+1)/2
rows. The first p rows correspond to the models containing only one
feature, next among these first p model we choose this one having the
lower estimated mse. Then we add to the later model an other feature
among the remaining p-1 features and we keep the model having the lower
estimated mse and so on.
The second item is a data frame is summary of the best models of the
first item. It contains p rows, that each one correspond to the best
model, that have the lower estimated MSE for a fixed feature number.
For the 2 items the data are organized in 3 columns like below:
+--------------------+---------------------+------------------------+
| number of features | indexes of features | estimated mse (k-fold) |
+--------------------+---------------------+------------------------+
| 3 | (x8, x3, x6) | 1587 |
+--------------------+---------------------+------------------------+
'''
if not mode in {'forward', 'backward'}:
print("We have to chose between mode takes either 'forward' or 'backward' value.")
return {}
duration = np.array(2 * [-time.time()])
# a. Initialization
score=('neg_means_squared_error', 'r2')
score_params = np.array([[k+'_mean', k+'_std'] for k in score]).reshape(1,-1)[0]
df_tot = pd.get_dummies(df.iloc[:, 1:], drop_first=False, columns=cols_categ)
column_list = list(df_tot)
column_df_list = list(df)
column_df_list.insert(0, column_df_list.pop(column_df_list.index(name_y)))
df = df.loc[:, column_df_list].copy()
y = df.iloc[:, 0]
p = len(column_list)
list_df_fwd = []
list_df_fwd.append(pd.DataFrame({"predictorNumb":[0 if mode=='forward' else p][0],
"combinations":[tuple([] if mode=='forward' else tuple(range(p)))],
"model": estimator}))
def featureless(mode,step='begining'):
if mode == 'forward':
id_columns = tuple(range(1) if step=='begining' else range(p))
elif mode == 'backward':
id_columns = tuple(range(p) if step=='begining' else range(1))
if id_columns == (0,):
y_m = y.mean()
y_mean = pd.DataFrame({'mean': [y_m for k in y]}, index=y.index)
yy = y.to_frame(name=y.name).join(y_mean)
col = list(yy.iloc[:, 1:])
cols_c = []
else:
yy = df.copy()
col = column_list
cols_c = cols_categ
s = list_df_fwd[-1].loc[:, ['combinations', 'model']].apply(
lambda x: mse_r2_score_fct(df=yy, name_y=name_y, id_columns=list(id_columns), method=method,
pt_standardize=pt_standardize, cols_boxCox=cols_boxCox,estimator=x[1], L=col,
cols_categ=cols_c, fct=fct,parameters=parameters, cv=cv, n_jobs=n_jobs), axis=1)
columns_to_select = [k.split('_')[-1] for k in col if k.split('_')[0]=='genre']
if columns_to_select:
nrows = pd.get_dummies(df[df.genre.isin(columns_to_select)]).shape[0]
else:
nrows = 0
return {'score_serie':s, 'nrows':nrows}
s = featureless(mode=mode)['score_serie']
for k in score_params:
list_df_fwd[-1].loc[:, k] = 0
list_df_fwd[-1].loc[:, score_params] = pd.DataFrame(s.tolist(), columns=score_params, index=s.index)
score_up = score[0]+'_up'
list_df_fwd[-1].loc[:, score_up] = list_df_fwd[-1].loc[:, score_params[0]] +\
list_df_fwd[-1].loc[:, score_params[1]]
list_model_fwd = list_df_fwd.copy()
list_model_fwd[-1].loc[:, 'nrows'] = featureless(mode=mode)['nrows']
print('The model without any predictor (forward) or the one with all predictors (backward) has been treated.')
id_considered = tuple([] if mode=='forward' else range(p)) # (0, 1, 2, ..., p-1)
id_not_considered = tuple(k for k in range(p) if not k in id_considered)
def rem(x, a):
l = x.copy()
l.remove(a)
return l
# b. Looping
list_loop = list(range(p) if mode=='forward' else range(p-1, 0, -1))
for j in list_loop:
duration_i = np.array(2 * [-time.time()]) # array to compute durationation.
list_id_considered = list(id_considered)
if mode=='forward':
id_to_consider = np.array([list_id_considered+[k] for k in id_not_considered])
else:
id_to_consider = np.array([rem(list_id_considered, k) for k in id_considered])
df_fwd_j = pd.DataFrame({
"predictorNumb": np.array([j+1 if mode=='forward' else j for iter_var in range(len(id_to_consider))]),
"combinations": [tuple(x) for x in id_to_consider],
"model": [estimator for k in id_to_consider]
})
s = df_fwd_j.loc[:, ['combinations', 'model']].apply(
lambda x: mse_r2_score_fct(df=df, name_y=name_y, id_columns=list(x[0]), estimator=x[1], fct=fct,
L=column_list, cols_categ=cols_categ, parameters=parameters, cv=cv, method=method,
pt_standardize=pt_standardize,cols_boxCox=cols_boxCox, n_jobs=n_jobs), axis=1)
if not nrows_thresh:
nrows_thresh = df.shape[0]+1 if mode == 'forward' else -1
fct_nrows = (lambda x : 0 if x>=nrows_thresh else 1) if mode=='forward'\
else (lambda x : 0 if x<=nrows_thresh else 1)
for k in score_params:
df_fwd_j.loc[:, k] = 0
df_fwd_j.loc[:, score_params] = pd.DataFrame(s.tolist(), columns=score_params, index=s.index)
score_up = score[0]+'_up'
df_fwd_j[score_up] = df_fwd_j[score_params[0]] + df_fwd_j[score_params[1]]
print(f"Iteration number: {j}.")
list_df_fwd.append(df_fwd_j)
list_model_fwd.append(
df_fwd_j.sort_values(by=score[0]+'_mean', ascending=min_score).iloc[[0], :])
tuple_from_best = list_model_fwd[-1].combinations.values[0]
columns_selected = [column_list[i].split('_')[-1] for i in tuple_from_best if column_list[i].split('_')[0]=='genre']
list_model_fwd[-1].loc[:, 'nrows'] = pd.get_dummies(df[df.genre.isin(columns_selected)]).shape[0]
if mode == 'forward':
set_diff = set(tuple_from_best) - set(id_considered)
assert len(set_diff) == 1
id_considered += tuple(set_diff)
else:
set_diff = set(id_considered) - set(tuple_from_best)
assert len(set_diff) == 1
value_to_remove = list(set_diff)[0]
id_considered = tuple(rem(list_id_considered, value_to_remove))
id_not_considered = tuple([k for k in np.arange(p) if not k in id_considered])
duration_i[1] = time.time()
quotient_hour, remainder_hour = duration_i.sum()// 3600, duration_i.sum() % 3600
quotient_minute, remainder_minute= remainder_hour // 60, remainder_hour % 60
print(f"Model with :{j}/{p-1} predictors.",\
f"\nElapsed time: {int(quotient_hour)}h {int(quotient_minute)}min {int(remainder_minute)}s.")
nrows = list_model_fwd[-1].loc[:, 'nrows'].values[0]
if fct_nrows(nrows):
continue
else:
break
list_df_fwd.append(pd.DataFrame({"predictorNumb":[p if mode=='forward' else 0][0],
"combinations":[tuple(range(p) if mode=='forward' else [])],
"model": estimator}))
s = featureless(mode=mode, step='end')['score_serie']
for k in score_params:
list_df_fwd[-1].loc[:, k] = 0
list_df_fwd[-1].loc[:, score_params] = pd.DataFrame(s.tolist(), columns=score_params, index=s.index)
score_up = score[0]+'_up'
list_df_fwd[-1].loc[:, score_up] = list_df_fwd[-1].loc[:, score_params[0]] +\
list_df_fwd[-1].loc[:, score_params[1]]
list_model_fwd.append(
list_df_fwd[-1].sort_values(by=score[0]+'_mean', ascending=min_score).iloc[[0], :])
list_model_fwd[-1].loc[:, 'nrows'] = featureless(mode=mode)['nrows']
# c. Adding
df_fwd_total = pd.concat(list_df_fwd).reset_index(drop=True)
model_fwd_total = pd.concat(list_model_fwd).reset_index(drop=True)
duration[1] = time.time()
quotient_hour, remainder_hour = duration.sum()// 3600, duration.sum() % 3600
quotient_minute, remainder_minute= remainder_hour // 60, remainder_hour % 60
print(f"Elapsed time: {int(quotient_hour)}h {int(quotient_minute)}min {int(remainder_minute)}s")
if graph:
graph_dict = {model_fwd_total.predictorNumb.values[i]:
model_fwd_total.neg_means_squared_error_mean.values[i] for i in range(model_fwd_total.shape[0])}
graph_dict = dict(sorted(graph_dict.items(), key=lambda x: x[0])) if mode=='forward'\
else dict(sorted(graph_dict.items(), key=lambda x: x[0], reverse=True))
df_graph = pd.DataFrame({
'pred_numb': list(graph_dict.keys()),
'mse': list(graph_dict.values())
})
df_graph['mse_shift'] = df_graph.mse.shift(periods=-1)
df_graph['proportion'] = round(100*(df_graph['mse']-df_graph['mse_shift'])/df_graph['mse'])
print(df_graph)
mse_min = df_graph[df_graph['proportion']>=thresh].iloc[-1, 1]
pred_min = df_graph[df_graph['proportion']>=thresh].iloc[-1, 0]
print(f'mse_min:{mse_min}, pred_min:{pred_min}')
fig, ax = plt.subplots(figsize=size)
ax.scatter(x=list(graph_dict.keys()), y=list(graph_dict.values()))
ax.plot(list(graph_dict.keys()), np.repeat(mse_min, repeats=len(graph_dict)), color='red', linestyle='-.', alpha=.5,label=f'mse:{mse_min}')
ax.axvline(x=pred_min, color='red', linestyle='--', label=f'predictor_numb:{pred_min}')
ax.legend(loc='best')
ax.set_xlabel('Predictor number')
ax.set_ylabel('MSE')
ax.set_xlim(xmin=.5, xmax=max(graph_dict.keys())+1)
ax.set_title('MSE(predictors)')
# sort by predictor number then select the one for which we have the last larger decreasing in the MSE
plt.show()
return {'best_models': model_fwd_total, 'all_models':df_fwd_total}
def compute_rmse_r2(dfo, name_y, estimator=LinearRegression(),
powerTransf = False, cols_boxCox=[], method='box-cox', pt_standardize=True,
fct=lambda x: x, parameters=dict(), cv=5, rd=0, ncols=2, size=(10, 7)):
'''
<compute_rmse_r2> computes the rmse (root mean squared error) and $R^{2}$ (the explained variance rate)
for a regression estimator, that could been piped through the Pipeline() function from sklearn.
Parameters
----------
df : DataFrame
Data frame gathering the response and feature observations.
name_y : str
Name of the response in <df>.
estimator: sklearn estimator
Either regression estimator. The default is LinearRegression().
fct : function
Any function to apply to the response to compute a transformed rmse after the fitting.
The default is lambda x: x (the identity function).
parameters : dictionary
Dictionary that takes parameters of the estimator as keys and their value as values. Note that we have to respect the syntax of piped estimators.
The default is dict().
cv : int
Split number for the estimation of rmse and r2 with cross-validation procedure.
The default is 5.
rd: int
Random state. The default is 0.
ncols: int
Number of columns to display subplots.
size: (int, int)
Tuple respectively representing width and height.
The default is (15, 10).
Returns
-------
Dictionary.
'''
# array of two time values to evaluate the take time to run the script.
duration = np.array(2*[-time.time()])
df = dfo.copy()
# array of 4 values which will be the mean and the standard deviation of both rmse and r2.
res = {k:{'mean':np.nan, 'std':np.nan} for k in ['rmse', 'r2']}
if not name_y in list(df):
print(f'{name_y} is not in the column list')
else:
col_list = list(df)
# inserts the response column to the very first place.
col_list.insert(0, col_list.pop(col_list.index(name_y)))
# handle pipe estimator.
if type(estimator) == sklearn.pipeline.Pipeline:
steps_pipe = estimator.named_steps.copy()
steps_with_params = steps_pipe.copy()
for k in parameters:
name_estimator = k.split('__')[0]
name_parameter = k.split('__')[1]
print(name_estimator)
steps_with_params[name_estimator] =\
steps_pipe[name_estimator].set_params(
**{name_parameter: parameters[k]})
# build new estimator handling parameters given in argument.
estimator_2 = Pipeline(list(steps_with_params.items()))
else:
estimator_2 = estimator.set_params(**parameters)
# apply one of the two power transformations provided by Scikit-Learn.
col_to_transf = list(df.reindex(columns=cols_boxCox).dropna(axis=1))
df_transf = df.copy()
if col_to_transf:
power_t = PowerTransformer(method=method, standardize=pt_standardize)
power_t.fit(df.loc[:, col_to_transf])
df_transf.loc[:, col_to_transf] = power_t.transform(df_transf.loc[:, col_to_transf])
print(f'Applying {method} transformation on the following columns: {col_to_transf}')
rmse_list, r2_list = [], []
# build plot frame.
f = lambda x: int(x)+1 if (x - int(x))>=.5 else int(x) # an alternative of the round function
nrows = f(cv/ncols)
ind = [[i, j] for i,j in itertools.product(np.arange(nrows),np.arange(ncols))]
fig, axs = plt.subplots(nrows, ncols, figsize=size)
iter_var = 0
# cross-validation process, it shuffle the row data and make a K-fold.
kf = KFold(n_splits=cv, shuffle=True, random_state=0)
for train, test in kf.split(df.iloc[:, 1:]):
X_train, X_test = df_transf.iloc[train, 1:], df_transf.iloc[test, 1:]
# y_test is left unchanged, then it is extracted from df not df_transf.
y_train, y_test = df_transf.iloc[train, 0], df.iloc[test, 0]
# to "reinitialize" estimator at each loop to avoid that the estimator be influenced by the
# previous fit.
estimator_to_use = estimator_2
estimator_to_use.fit(X_train, y_train)
y_pred = estimator_to_use.predict(X_test)
# remove transformation of the response to compute the mse.
if name_y in col_to_transf:
y_pred_transf = fct(power_t.inverse_transform(
np.concatenate(len(col_to_transf)*[y_pred.reshape(-1, 1)], axis=1)))[:,0]
else:
y_pred_transf = fct(y_pred)
# recall that fct is a function given in argument, by default is the identity.
y_test_transf = fct(y_test)
# compute rmse and r2.
rmse = np.sqrt(((y_test_transf - y_pred_transf)**2).mean())
rmse_list.append(rmse)
r2 = 100*(1 - ((y_test_transf - y_pred_transf)**2).sum()/\
((y_test_transf - y_test_transf.mean())**2).sum())
r2_list.append(r2)
# plot graphics to visualize the prediction power of the model.
x = np.arange(len(test))
axs[ind[iter_var][0], ind[iter_var][1]].scatter(x, y_test_transf, label='Test')
axs[ind[iter_var][0], ind[iter_var][1]].scatter(x, y_pred_transf, label='Prediction')
axs[ind[iter_var][0], ind[iter_var][1]].set_title(
fr'{cv}-fold number {iter_var+1}, $rmse$ = {round(rmse, 1)}, $R^{2}={round(r2, 1)}\%$')
axs[ind[iter_var][0], ind[iter_var][1]].legend(loc='best')
iter_var += 1
# information about the rmse and the r2, they are put here to dipslay them above the graphic.
rmse_mean, rmse_std = round(np.array(rmse_list).mean(), 1), round(np.array(rmse_list).std(), 1)
r2_mean, r2_std = round(np.array(r2_list).mean(), 1), round(np.array(r2_list).std(), 1)
res['rmse']['mean'], res['rmse']['std'] = rmse_mean, rmse_std
res['r2']['mean'], res['r2']['std'] = r2_mean, r2_std
print(res)
# plot the graph.
plt.suptitle(f'Estimator: {estimator_2}.')
plt.tight_layout()
plt.show()
duration[1] = time.time()
quotient_hour, remainder_hour = duration.sum()// 3600, duration.sum() % 3600
quotient_minute, remainder_minute= remainder_hour // 60, remainder_hour % 60
print(f"Elapsed time: {int(quotient_hour)}h {int(quotient_minute)}min {int(remainder_minute)}s")
return res
|
{"hexsha": "402d83fcea44631810d08867483cdb82daef6118", "size": 40585, "ext": "py", "lang": "Python", "max_stars_repo_path": "1_scripts/analyticalFunctions.py", "max_stars_repo_name": "rgb250/MostValuableFilmGenre", "max_stars_repo_head_hexsha": "cafd0a0db0ec592b29479d36831097b6dc1e5fda", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T12:02:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T12:02:56.000Z", "max_issues_repo_path": "1_scripts/analyticalFunctions.py", "max_issues_repo_name": "rgb250/MostValuableFilmGenre", "max_issues_repo_head_hexsha": "cafd0a0db0ec592b29479d36831097b6dc1e5fda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1_scripts/analyticalFunctions.py", "max_forks_repo_name": "rgb250/MostValuableFilmGenre", "max_forks_repo_head_hexsha": "cafd0a0db0ec592b29479d36831097b6dc1e5fda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5424311927, "max_line_length": 162, "alphanum_fraction": 0.6059135148, "include": true, "reason": "import numpy,import scipy,import statsmodels,from statsmodels", "num_tokens": 10132}
|
[STATEMENT]
lemma the_riesz_rep[simp]: \<open>the_riesz_rep f \<bullet>\<^sub>C x = f *\<^sub>V x\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the_riesz_rep f \<bullet>\<^sub>C x = f *\<^sub>V x
[PROOF STEP]
unfolding the_riesz_rep_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (SOME t. \<forall>x. f *\<^sub>V x = t \<bullet>\<^sub>C x) \<bullet>\<^sub>C x = f *\<^sub>V x
[PROOF STEP]
apply (rule someI2_ex)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<exists>a. \<forall>x. f *\<^sub>V x = a \<bullet>\<^sub>C x
2. \<And>xa. \<forall>x. f *\<^sub>V x = xa \<bullet>\<^sub>C x \<Longrightarrow> xa \<bullet>\<^sub>C x = f *\<^sub>V x
[PROOF STEP]
by (simp_all add: riesz_frechet_representation_cblinfun_existence)
|
{"llama_tokens": 328, "file": "Complex_Bounded_Operators_Complex_Bounded_Linear_Function", "length": 3}
|
import numpy as np
"""
Basic Neural Network example with 1 hidden layer.
Input layer: 3 neurons
Hidden layer: 4 neurons
Output layer: 1 neuron
"""
# nonlinear function which is on the end of the Neurons
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
# input - every line represents a training data set, every training data set has 3 inputs
X = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
# output - every line represents an expecting output for a traning data set
y = np.array([[0],
[1],
[1],
[0]])
# random seed for test purpose. It couses same random number sequence after every run
np.random.seed(1)
# before the first cycle the weights need to be initialized by random values ( mean 0)
# normal distribution with mean of 0 over the range [-1, 1)
w01 = 2*np.random.random((3,4)) - 1
w12 = 2*np.random.random((4,1)) - 1
# we use a cycle to get closer the expected value instead of checking the mean error
for j in range(60000):
# -----------------
#
# Feed forward
#
# -----------------
# meaning: we send the input through the Neural Network
# -----------------------------------------------------
#
# Matrix lines for the training sets, columns are for the Nodes in the training set
#
#0. layer (input-4x1) is always the input
l0 = X
#1. layer (hidden-4x4) => nonlin(Σw.σ)
l1 = nonlin(np.dot(l0,w01))
#2. layer (output-4x1) => nonlin(Σw.σ)
l2 = nonlin(np.dot(l1,w12))
# -----------------
#
# Back Propagation
#
# -----------------
# meaning: we propagate back the error from the output till the input
# -------------------------------------------------------------------
#
# the error on the output layer value compared to the expected value
l2_error = y - l2
# delta on the output layer - calculated differently than on the other layers
# this is the walue we should modify the w12 weight
l2_delta = l2_error*nonlin(l2,deriv=True) # [4x1]
# we print out the mean error after every 10000 cycle
if (i % 10000) == 0:
print ("Error:{0}".format( str(np.mean(np.abs(l2_error)))))
# how much did each l1 value contribute to the l2 error (according to the weights)?
l1_error = l2_delta.dot(w12.T)
# delta on the hidden layer
# this is the value we should modify the w01 weight
l1_delta = l1_error * nonlin(l1,deriv=True)
# weight modification
w12 += l1.T.dot(l2_delta)
w01 += l0.T.dot(l1_delta)
# after the last cycle prints out the output
print(l2)
|
{"hexsha": "311971bae1ee31a8d73bf7f1d496e929cc988902", "size": 2667, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuron_3.py", "max_stars_repo_name": "dallaszkorben/python-neuralnetwork-demo", "max_stars_repo_head_hexsha": "b5aea2bdc25163a88d17e5a5640cd54f65bc16ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neuron_3.py", "max_issues_repo_name": "dallaszkorben/python-neuralnetwork-demo", "max_issues_repo_head_hexsha": "b5aea2bdc25163a88d17e5a5640cd54f65bc16ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neuron_3.py", "max_forks_repo_name": "dallaszkorben/python-neuralnetwork-demo", "max_forks_repo_head_hexsha": "b5aea2bdc25163a88d17e5a5640cd54f65bc16ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6333333333, "max_line_length": 89, "alphanum_fraction": 0.5868016498, "include": true, "reason": "import numpy", "num_tokens": 730}
|
"""
References:
https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py
https://github.com/scikit-image/scikit-image/blob/main/skimage/feature/corner.py
"""
import cv2
import numpy as np
from .GaussianFilter import gaussian_filter
import skimage.feature
def img2col(image, block_size):
rows, cols = image.shape
dst_height = rows - block_size[0] + 1
dst_width = cols - block_size[1] + 1
image_array = np.zeros((dst_height * dst_width, block_size[0] * block_size[1]))
row = 0
for i in range(0, dst_height):
for j in range(0, dst_width):
window = np.ravel(image[i: i + block_size[0], j: j + block_size[1]])
image_array[row, :] = window
row += 1
return image_array
def img_convolve(image, filter_kernel):
height, width = image.shape[0], image.shape[1]
k_size = filter_kernel.shape[0]
pad_size = k_size // 2
padding_img = np.pad(image, pad_size, mode="edge")
image_array = img2col(padding_img, (k_size, k_size))
kernel_array = np.ravel(filter_kernel)
convolved_img = np.dot(image_array, kernel_array).reshape(height, width)
return convolved_img
class HarrisCorner(object):
def __init__(self, k: float, kernel_size: int):
if k in (0.04, 0.06):
self.k = k
self.kernel_size = kernel_size
else:
raise ValueError("invalid k value")
def __str__(self):
return f"Harris Corner Detection with k: {self.k}"
def sobel(self, img):
kernel_x = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
dst_x = np.abs(img_convolve(img, kernel_x))
dst_y = np.abs(img_convolve(img, kernel_y))
dst_x = dst_x * 255 / np.max(dst_x)
dst_y = dst_y * 255 / np.max(dst_y)
return dst_x, dst_y
def gaussian(self, img, k_size=3, sigma=1):
return gaussian_filter(img, k_size, sigma)
def detect(self, img_path):
img = cv2.imread(img_path, 0)
height, width = img.shape
corner_list = []
color_img = img.copy()
color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
# origin method
# dy, dx = np.gradient(img)
# scikit-image method
# sobel
sobel_x, sobel_y = self.sobel(img)
ixx = sobel_x ** 2
iyy = sobel_y ** 2
ixy = sobel_x * sobel_y
# gaussian
ixx = gaussian_filter(ixx)
iyy = gaussian_filter(iyy)
ixy = gaussian_filter(ixy)
k = self.k
offset = self.kernel_size // 2
response = []
for y in range(offset, height-offset):
for x in range(offset, width-offset):
wxx = ixx[y-offset: y+offset+1, x-offset: x+offset+1].sum()
wyy = iyy[y-offset: y+offset+1, x-offset: x+offset+1].sum()
wxy = ixy[y-offset: y+offset+1, x-offset: x+offset+1].sum()
# harris method
det = (wxx * wyy) - (wxy ** 2)
trace = wxx + wyy
r = det - k * (trace ** 2)
response.append(r)
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0), 0)
color_img.itemset((y, x, 1), 0)
color_img.itemset((y, x, 2), 255)
# shi_tomasi
# r = ((wxx + wyy) - np.sqrt((wxx - wyy) ** 2 + 4 * wxy ** 2)) / 2
# if r > 0.5:
# corner_list.append([x, y, r])
# color_img.itemset((y, x, 0), 0)
# color_img.itemset((y, x, 1), 0)
# color_img.itemset((y, x, 2), 255)
response = np.asarray(response).reshape((img.shape))
return response, color_img, corner_list
if __name__ == "__main__":
test_path = "./test_img.jpg"
corner_detect = HarrisCorner(0.04, 3)
color_img = corner_detect.detect(test_path)
cv2.imwrite("detect.jpg", corner_detect)
|
{"hexsha": "e2daaba966bc2518a7543a05373b5624347357a3", "size": 4061, "ext": "py", "lang": "Python", "max_stars_repo_path": "HarrisCorner.py", "max_stars_repo_name": "chcorophyll/general_image_process_python", "max_stars_repo_head_hexsha": "0ab3b3da246808c36822d31fa0fd226f8d4079ab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HarrisCorner.py", "max_issues_repo_name": "chcorophyll/general_image_process_python", "max_issues_repo_head_hexsha": "0ab3b3da246808c36822d31fa0fd226f8d4079ab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HarrisCorner.py", "max_forks_repo_name": "chcorophyll/general_image_process_python", "max_forks_repo_head_hexsha": "0ab3b3da246808c36822d31fa0fd226f8d4079ab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6228070175, "max_line_length": 84, "alphanum_fraction": 0.5579906427, "include": true, "reason": "import numpy", "num_tokens": 1159}
|
import datetime
import serial
import csv
import re
import numpy as np
import matplotlib.pyplot as plt
SERIAL_PATH = 'COM6'
SERIAL_BAUD = 115200
ELEMENT_COUNT = 8
WINDOW = 60 * 60 # in seconds
OUT_PREFIX = "toaster_"
TEXT_ALPHA = 0.75
# for testing only
TEST_LINE = "0: 73 2: 79 3: 80 4: 75 5: 81 6: 80 7: 82"
if __name__ == "__main__":
start_time = datetime.datetime.now()
out_filename = OUT_PREFIX + start_time.strftime("%Y%m%d_%H%M%S") + ".csv"
with serial.Serial(SERIAL_PATH, SERIAL_BAUD) as ser, open(out_filename, 'w', newline='') as csv_out:
# with open(out_filename, 'w', newline='') as csv_out:
csvwriter = csv.writer(csv_out)
csvwriter.writerow(['timestamp'] + list(range(0, ELEMENT_COUNT)))
# initialize plot window and static elements
fig, ax = plt.subplots(1, 1, constrained_layout=True)
plots = list(map(lambda n: ax.plot([])[0], range(0, ELEMENT_COUNT)))
print(plots)
avg_line = ax.axhline(0)
min_text = ax.text(0, 0, "", ha='right', va='top', alpha=TEXT_ALPHA)
max_text = ax.text(0, 0, "", ha='right', va='bottom', alpha=TEXT_ALPHA)
avg_text = ax.text(0, 0, "", ha='right', va='center', alpha=TEXT_ALPHA)
ax.set_xlabel("time")
ax.set_ylabel("temp")
plt.ion()
plt.draw()
while True:
line = ser.readline().decode('utf-8')
# line = TEST_LINE
line_time = datetime.datetime.now()
delta_time = line_time.timestamp() - start_time.timestamp()
print("[%s] received: %s" % (line_time.strftime("%H:%M:%S"), line))
match = re.findall(r'(\d+):\s*(\d+\.?\d*)', line)
match = list(map(lambda x: (int(x[0]), float(x[1])), match))
if match:
print("Decoded: %s" % (match))
csvrow = list(map(lambda id: '', range(0, ELEMENT_COUNT)))
for (id, val) in match:
csvrow[id] = str(val)
csvwriter.writerow([line_time.timestamp()] + csvrow)
csv_out.flush()
for (id, val) in match:
if id >= ELEMENT_COUNT:
print("(%s, %.1f) id out of range" % (id, val))
else:
plot = plots[id]
plot.set_label("%s: %.1f" % (id, val))
xdata = plot.get_xdata()
ydata = plot.get_ydata()
while xdata.any() and xdata[0] < delta_time - WINDOW:
xdata = xdata[1:]
ydata = ydata[1:]
xdata = np.append(xdata, delta_time)
ydata = np.append(ydata, val)
plot.set_xdata(xdata)
plot.set_ydata(ydata)
match_rev = list(map(lambda x: (x[1], x[0]), match))
match_rev.sort()
match_vals = list(map(lambda x: x[0], match_rev))
avg = sum(match_vals)/len(match_vals)
min_text.set_x(delta_time)
max_text.set_x(delta_time)
avg_text.set_x(delta_time)
min_text.set_y(match_rev[0][0])
max_text.set_y(match_rev[-1][0])
avg_text.set_y(avg)
min_text.set_text("min %s: %.1f" % (match_rev[0][1], match_rev[0][0]))
max_text.set_text("max %s: %.1f" % (match_rev[-1][1], match_rev[-1][0]))
avg_text.set_text("avg(%s): %.1f, spd: %.1f" % (len(match_vals), avg, match_vals[-1] - match_vals[0]))
ax.legend(loc=2) # upper left
ax.set_xlim(delta_time - WINDOW, delta_time + 10)
ax.set_ylim(0, 100)
plt.draw()
plt.pause(0.01)
|
{"hexsha": "4eabd2286f5c5586bcb09908d83150a7136be2dd", "size": 3368, "ext": "py", "lang": "Python", "max_stars_repo_path": "toaster.py", "max_stars_repo_name": "CalSol/toaster-visualizer", "max_stars_repo_head_hexsha": "c381bc1d11e81aa0a162c12e5a272e7972e9c4aa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "toaster.py", "max_issues_repo_name": "CalSol/toaster-visualizer", "max_issues_repo_head_hexsha": "c381bc1d11e81aa0a162c12e5a272e7972e9c4aa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "toaster.py", "max_forks_repo_name": "CalSol/toaster-visualizer", "max_forks_repo_head_hexsha": "c381bc1d11e81aa0a162c12e5a272e7972e9c4aa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6990291262, "max_line_length": 110, "alphanum_fraction": 0.5837292162, "include": true, "reason": "import numpy", "num_tokens": 987}
|
import h5py
import numpy as np
import silx.math.fit
import silx.math.fit.peaks
# fileRead = '/home/esrf/slim/data/ihme10/id15/TiC_Calib/ihme10_TiC_calib.h5'
# filesave = '/home/esrf/slim/easistrain/easistrain/EDD/Results_ihme10_TiC_calib.h5'
# sample = 'TiC_calib'
# dataset = '0001'
# scanNumber = '4'
# horizontalDetector = 'mca2_det0'
# verticalDetector = 'mca2_det1'
# numberOfPeaks = 1
# rangeFit = [680,820]
# doublet = [1]
def splitPseudoVoigt(xData, *params):
return silx.math.fit.sum_splitpvoigt(xData, *params)
def guessParameters(yData, counterOfPeak, doublet):
fwhmGuess = silx.math.fit.peaks.guess_fwhm(yData)
peaksGuess = silx.math.fit.peaks.peak_search(
yData,
fwhmGuess,
sensitivity=1,
begin_index=None,
end_index=None,
debug=False,
relevance_info=False,
) ## index of the peak
if np.size(peaksGuess) > doublet[counterOfPeak]:
# print(peaksGuess[np.argsort(yData[peaksGuess[:].astype(int)])])
peaksGuess = peaksGuess[np.argsort(yData[peaksGuess[:].astype(int)])][
-doublet[counterOfPeak] :
]
print(peaksGuess)
return fwhmGuess, peaksGuess
def angleCalibrationEDD(
fileRead,
fileSave,
sample,
dataset,
scanNumber,
horizontalDetector,
verticalDetector,
numberOfPeaks,
doublet,
rangeFit,
):
with h5py.File(fileRead, "r") as h5Read: ## Read the h5 file of raw data
patternHorizontalDetector = h5Read[
sample
+ "_"
+ str(dataset)
+ "_"
+ str(scanNumber)
+ ".1/measurement/"
+ horizontalDetector
][
()
] ## pattern of horizontal detector
patternVerticalDetector = h5Read[
sample
+ "_"
+ str(dataset)
+ "_"
+ str(scanNumber)
+ ".1/measurement/"
+ verticalDetector
][
()
] ## pattern of vertical detector
h5Save = h5py.File(fileSave, "a") ## create/append h5 file to save in
if not "angleCalibration" in h5Save.keys():
angleCalibrationLevel1 = h5Save.create_group(
"angleCalibration"
) ## angleCalibration group
else:
angleCalibrationLevel1 = h5Save["angleCalibration"]
rawDataLevel1_1 = angleCalibrationLevel1.create_group(
"rawData" + "_" + str(dataset) + "_" + str(scanNumber)
) ## rawData subgroup in calibration group
fitLevel1_2 = angleCalibrationLevel1.create_group(
"fit" + "_" + str(dataset) + "_" + str(scanNumber)
) ## fit subgroup in calibration group
fitLevel1_2.create_group("fitParams") ## fit results group for the two detector
fitParamsHD = np.array(())
fitParamsVD = np.array(())
uncertaintyFitParamsHD = np.array(())
uncertaintyFitParamsVD = np.array(())
for i in range(numberOfPeaks):
peakHorizontalDetector = np.transpose(
(
np.arange(rangeFit[2 * i], rangeFit[(2 * i) + 1]),
patternHorizontalDetector[rangeFit[2 * i] : rangeFit[(2 * i) + 1]],
)
) ## peak of the horizontal detector
peakVerticalDetector = np.transpose(
(
np.arange(rangeFit[2 * i], rangeFit[(2 * i) + 1]),
patternVerticalDetector[rangeFit[2 * i] : rangeFit[(2 * i) + 1]],
)
) ## peak of the vertical detector
backgroundHorizontalDetector = silx.math.fit.strip(
data=peakHorizontalDetector[:, 1],
w=5,
niterations=4000,
factor=1,
anchors=None,
) ## background of the horizontal detector
backgroundVerticalDetector = silx.math.fit.strip(
data=peakVerticalDetector[:, 1],
w=5,
niterations=4000,
factor=1,
anchors=None,
) ## background of the vertical detector
fitLevel1_2.create_group(
f"fitLine_{str(i)}"
) ## create group for each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"rawHorizontalDetector", dtype="f", data=peakHorizontalDetector
) ## create dataset for raw data of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"rawVerticalDetector", dtype="f", data=peakVerticalDetector
) ## create dataset for raw data of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"backgroundHorizontalDetector",
dtype="f",
data=np.transpose(
(peakHorizontalDetector[:, 0], backgroundHorizontalDetector)
),
) ## create dataset for background of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"backgroundVerticalDetector",
dtype="f",
data=np.transpose((peakVerticalDetector[:, 0], backgroundVerticalDetector)),
) ## create dataset for background of each calibration peak
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"bgdSubsDataHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
peakHorizontalDetector[:, 1] - backgroundHorizontalDetector,
)
),
) ## create dataset for HD raw data after subst of background
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"bgdSubsDataVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
peakVerticalDetector[:, 1] - backgroundVerticalDetector,
)
),
) ## create dataset for VD raw data after subst of background
fwhmGuessHD, peaksGuessHD = guessParameters(
peakHorizontalDetector[:, 1] - backgroundHorizontalDetector, i, doublet
) ## guess fit parameters for HD
fwhmGuessVD, peaksGuessVD = guessParameters(
peakVerticalDetector[:, 1] - backgroundVerticalDetector, i, doublet
) ## guess fit parameters for VD
initialGuessHD = np.zeros(5 * doublet[i])
initialGuessVD = np.zeros(5 * doublet[i])
for n in range(doublet[i]):
initialGuessHD[5 * n] = peakHorizontalDetector[:, 1][int(peaksGuessHD[n])]
-backgroundHorizontalDetector[int(peaksGuessHD[n])]
initialGuessHD[5 * n + 1] = peakHorizontalDetector[:, 0][
int(peaksGuessHD[n])
]
initialGuessHD[5 * n + 2] = fwhmGuessHD
initialGuessHD[5 * n + 3] = fwhmGuessHD
initialGuessHD[5 * n + 4] = 0.5
initialGuessVD[5 * n] = peakVerticalDetector[:, 1][int(peaksGuessVD[n])]
-backgroundVerticalDetector[int(peaksGuessVD[n])]
initialGuessVD[5 * n + 1] = peakVerticalDetector[:, 0][int(peaksGuessVD[n])]
initialGuessVD[5 * n + 2] = fwhmGuessVD
initialGuessVD[5 * n + 3] = fwhmGuessVD
initialGuessVD[5 * n + 4] = 0.5
optimal_parametersHD, covarianceHD, infodictHD = silx.math.fit.leastsq(
model=splitPseudoVoigt,
xdata=peakHorizontalDetector[:, 0],
ydata=peakHorizontalDetector[:, 1] - backgroundHorizontalDetector,
p0=initialGuessHD,
sigma=np.sqrt(
np.abs(peakHorizontalDetector[:, 1] - backgroundHorizontalDetector) + 1
),
full_output=True,
max_iter=1000,
) ## fit of the peak of the Horizontal detector
optimal_parametersVD, covarianceVD, infodictVD = silx.math.fit.leastsq(
model=splitPseudoVoigt,
xdata=peakVerticalDetector[:, 0],
ydata=peakVerticalDetector[:, 1] - backgroundVerticalDetector,
p0=initialGuessVD,
sigma=np.sqrt(
np.abs(peakVerticalDetector[:, 1] - backgroundVerticalDetector) + 1
),
full_output=True,
max_iter=1000,
) ## fit of the peak of the Vertical detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"fitHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
splitPseudoVoigt(peakHorizontalDetector[:, 0], optimal_parametersHD)
+ backgroundHorizontalDetector,
)
),
) ## fitted data of the horizontal detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"fitVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
splitPseudoVoigt(peakVerticalDetector[:, 0], optimal_parametersVD)
+ backgroundVerticalDetector,
)
),
) ## fitted data of the vertical detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"errorHorizontalDetector",
dtype="f",
data=np.transpose(
(
peakHorizontalDetector[:, 0],
np.absolute(
splitPseudoVoigt(
peakHorizontalDetector[:, 0], optimal_parametersHD
)
+ backgroundHorizontalDetector
- peakHorizontalDetector[:, 1]
),
)
),
) ## error of the horizontal detector
fitLevel1_2[f"fitLine_{str(i)}"].create_dataset(
"errorVerticalDetector",
dtype="f",
data=np.transpose(
(
peakVerticalDetector[:, 0],
np.absolute(
splitPseudoVoigt(
peakVerticalDetector[:, 0], optimal_parametersVD
)
+ backgroundVerticalDetector
- peakVerticalDetector[:, 1]
),
)
),
) ## error of the vertical detector
for n in range(doublet[i]):
fitParamsHD = np.append(
fitParamsHD,
np.append(
optimal_parametersHD[5 * n : 5 * n + 5],
[
infodictHD["reduced_chisq"],
100
* np.sum(
np.absolute(
splitPseudoVoigt(
peakHorizontalDetector[:, 0], optimal_parametersHD
)
+ backgroundHorizontalDetector
- peakHorizontalDetector[:, 1]
)
)
/ np.sum(peakHorizontalDetector[:, 1]),
],
),
axis=0,
) ##
fitParamsVD = np.append(
fitParamsVD,
np.append(
optimal_parametersVD[5 * n : 5 * n + 5],
[
infodictVD["reduced_chisq"],
100
* np.sum(
np.absolute(
splitPseudoVoigt(
peakVerticalDetector[:, 0], optimal_parametersVD
)
+ backgroundVerticalDetector
- peakVerticalDetector[:, 1]
)
)
/ np.sum(peakVerticalDetector[:, 1]),
],
),
axis=0,
) ##
uncertaintyFitParamsHD = np.append(
uncertaintyFitParamsHD, infodictHD["uncertainties"], axis=0
) ##
uncertaintyFitParamsVD = np.append(
uncertaintyFitParamsVD, infodictVD["uncertainties"], axis=0
) ##
rawDataLevel1_1.create_dataset(
"horizontalDetector", dtype="f", data=patternHorizontalDetector
) ## save raw data of the horizontal detector
rawDataLevel1_1.create_dataset(
"verticalDetector", dtype="f", data=patternVerticalDetector
) ## save raw data of the vertical detector
fitLevel1_2["fitParams"].create_dataset(
"fitParamsHD",
dtype="f",
data=np.reshape(fitParamsHD, (int(np.size(fitParamsHD) / 7), 7)),
) ## save parameters of the fit of HD
fitLevel1_2["fitParams"].create_dataset(
"fitParamsVD",
dtype="f",
data=np.reshape(fitParamsVD, (int(np.size(fitParamsVD) / 7), 7)),
) ## save parameters of the fit of VD
fitLevel1_2["fitParams"].create_dataset(
"uncertaintyParamsHD", dtype="f", data=uncertaintyFitParamsHD
) ## save uncertainty on the parameters of the fit of HD
fitLevel1_2["fitParams"].create_dataset(
"uncertaintyParamsVD", dtype="f", data=uncertaintyFitParamsVD
) ## save uncertainty on the parameters of the fit of VD
h5Save.close()
return
|
{"hexsha": "995ad727062c721ba44a035d48089aea936c5a10", "size": 13388, "ext": "py", "lang": "Python", "max_stars_repo_path": "easistrain/EDD/angleCalibEDD_OLD.py", "max_stars_repo_name": "woutdenolf/easistrain", "max_stars_repo_head_hexsha": "0484168e33e548af01a5cc649abf815c45b182f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "easistrain/EDD/angleCalibEDD_OLD.py", "max_issues_repo_name": "woutdenolf/easistrain", "max_issues_repo_head_hexsha": "0484168e33e548af01a5cc649abf815c45b182f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "easistrain/EDD/angleCalibEDD_OLD.py", "max_forks_repo_name": "woutdenolf/easistrain", "max_forks_repo_head_hexsha": "0484168e33e548af01a5cc649abf815c45b182f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-04T14:02:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-04T14:02:16.000Z", "avg_line_length": 39.4926253687, "max_line_length": 88, "alphanum_fraction": 0.536375859, "include": true, "reason": "import numpy", "num_tokens": 3025}
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
#coding=utf_8
import numpy as np
import cv2
import os
import sys
#from vai.dpuv1.tools.compile.bin.xfdnn_compiler_caffe import CaffeFrontend as xfdnnCompiler
#from decent import CaffeFrontend as xfdnnQuantizer
import subprocess
from vai.dpuv1.rt.scripts.framework.caffe.xfdnn_subgraph import CaffeCutter as xfdnnCutter
import caffe
from google.protobuf import text_format
from caffe.proto import caffe_pb2
import time
import argparse
def Quantize(prototxt,caffemodel,test_iter=1,calib_iter=1):
os.environ["DECENT_DEBUG"] = "1"
subprocess.call(["vai_q_caffe", "quantize",
"--model", prototxt,
"--weights", caffemodel,
"--calib_iter", str(calib_iter)])
# Standard compiler arguments for XDNNv3
def Getopts():
return {
"bytesperpixels":1,
"dsp":96,
"memory":9,
"ddr":"256",
"cpulayermustgo":True,
"usedeephi":True,
}
name = "inception_v2_ssd"
# Generate hardware instructions for runtime -> compiler.json
def Compile(prototxt="quantize_results/deploy.prototxt",\
caffemodel="quantize_results/deploy.caffemodel",\
quantize_info="quantize_results/quantize_info.txt"):
subprocess.call(["vai_c_caffe",
"--prototxt", prototxt,
"--caffemodel", caffemodel,
"--net_name", name,
"--output_dir", "work",
"--arch", "/opt/vitis_ai/compiler/arch/dpuv1/ALVEO/ALVEO.json",
"--options", "{\"quant_cfgfile\":\"%s\", \
\"pipelineconvmaxpool\":False, \
}" %(quantize_info)])
# Generate a new prototxt with custom python layer in place of FPGA subgraph
def Cut(prototxt):
cutter = xfdnnCutter(
inproto="quantize_results/deploy.prototxt",
trainproto=prototxt,
outproto="xfdnn_auto_cut_deploy.prototxt",
outtrainproto="xfdnn_auto_cut_train_val.prototxt",
cutAfter="data",
xclbin="/opt/xilinx/overlaybins/xdnnv3",
netcfg="work/compiler.json",
quantizecfg="work/quantizer.json",
weights="work/weights.h5",
#profile=True
)
cutter.cut()
##################### Mean and threshold configure #####################
view_theshold = 0.3
conf_theshold = 0.01
#################################################################################
font = cv2.FONT_HERSHEY_SIMPLEX
colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
def get_labelname(labelmap, labels):
'''
get labelname from lablemap and lables
:param labelmap: map of label to name
:param labels: label list
:return: labelname list
'''
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in range(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
def declare_network(model_def, model_weights, labelmap_file, args):
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
net = caffe.Net(model_def, model_weights, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array(args["img_mean"]))
transformer.set_input_scale('data', args["img_input_scale"])
return net,transformer,labelmap
def detect_one_image(net,transformer,labelmap,image_path,image_resize_height, image_resize_width, mean, is_view=False, image_name=None, fp=None):
'''
detect one image use model
:param image: image matrix
:param image_name: image name
:param fp: handle of results record file
:return: None
'''
assert os.path.exists(image_path)
image = cv2.imread(image_path)
assert image is not None
height, width = image.shape[0:2]
image_resize = cv2.resize(image, (image_resize_width, image_resize_height))
net.blobs['data'].reshape(1, 3, image_resize_height, image_resize_width)
transformed_image = transformer.preprocess('data', image_resize)
net.blobs['data'].data[...] = transformed_image
start = time.time()
detections = net.forward()['detection_out']
end = time.time()
print ("Foward time: ", end - start )
det_label = detections[0, 0, :, 1]
det_conf = detections[0, 0, :, 2]
det_xmin = detections[0, 0, :, 3]
det_ymin = detections[0, 0, :, 4]
det_xmax = detections[0, 0, :, 5]
det_ymax = detections[0, 0, :, 6]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_theshold]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
if top_conf.size > 0:
size = len(top_labels)
for i in range(size):
f_xmin = width * top_xmin[i]
int_xmin = int(f_xmin)
f_ymin = height * top_ymin[i]
int_ymin = int(f_ymin)
f_xmax = width * top_xmax[i]
int_xmax = int(f_xmax)
f_ymax = height * top_ymax[i]
int_ymax = int(f_ymax)
if is_view and top_conf[i] >= view_theshold:
color = colors_tableau[int(top_label_indices[i])]
print (str(top_labels[i]),':','xmin',int_xmin,'ymin',int_ymin,'xmax',int_xmax,'ymax', int_ymax)
cv2.rectangle(image, (int_xmin, int_ymin), (int_xmax, int_ymax), color, 1)
cv2.putText(image, str(top_labels[i]), (int_xmin, int_ymin + 10), font, 0.4, (255, 255, 255), 1)
cv2.putText(image, str(top_conf[i]), (int_xmin, int_ymin - 10), font, 0.4, (255, 255, 255), 1)
if image_name is not None and fp is not None:
fp.writelines(image_name + " " + str(top_labels[i]) + " " + str(top_conf[i]) + " " \
+ str(f_xmin) + " " + str(f_ymin) + " " + str(f_xmax) + " " + str(f_ymax) + "\n")
if is_view:
cv2.imwrite("res_det.jpg", image)
def compute_map_of_datset(net, transformer, lablemap, image_list_file, det_res_file, gt_file, test_image_root, image_resize_height, image_resize_width, mean, compute_map_script_path):
'''
compute map of dataset
:param image_list_file:
:param det_res_file:
:param gt_file:
:return: None
'''
assert os.path.exists(image_list_file)
f_image_list = open(image_list_file, 'r')
lines = f_image_list.readlines()
f_image_list.close()
f_res_record = open(det_res_file, 'w')
for line in lines:
image_name = line.strip()
image_path = test_image_root + image_name + '.jpg'
detect_one_image(net, transformer, lablemap, image_path,image_resize_height, image_resize_width, mean, image_name=image_name, fp=f_res_record)
f_res_record.close()
os.system("python2 " + compute_map_script_path + " -mode detection " + \
" -gt_file " + gt_file + " -result_file " + det_res_file \
+ " -detection_use_07_metric True")
def Detect(deploy_file, caffemodel, image,labelmap_file, args):
net, transformer, labelmap = declare_network(deploy_file, caffemodel, labelmap_file, args)
N, C, H, W = net.blobs['data'].data.shape
detect_one_image(net, transformer, labelmap, image, H, W, np.array(args["img_mean"]), is_view=True)
def Infer(prototxt, caffemodel, args):
net, transformer, labelmap = declare_network(prototxt, caffemodel, args["labelmap_file"], args)
N, C, H, W = net.blobs['data'].data.shape
compute_map_of_datset(net, transformer, labelmap, args["image_list_file"], args["det_res_file"], args["gt_file"], args["test_image_root"], H, W, np.array(args["img_mean"]), args["compute_map_script_path"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--prototxt', required=True, type=str,
help='Provide the xfdnn_auto_cut prototxt generated by subgraph or original deploy.prototxt')
parser.add_argument('--caffemodel', required=True, type=str,
help = 'Provide the caffemodel file')
parser.add_argument('--prepare', action="store_true", help='In prepare mode, model preperation will be perfomred = Quantize + Compile')
parser.add_argument('--qtest_iter', type=int, default=1, help='User can provide the number of iterations to test the quantization')
parser.add_argument('--qcalib_iter', type=int, default=1, help='User can provide the number of iterations to run the quantization')
parser.add_argument('--labelmap_file', type=str, help='Provide the lablemap file')
parser.add_argument('--image', type=str, help='Provide image path')
parser.add_argument('--validate', action="store_true", help='If validation is enabled, the model will be ran on the FPGA, and the validation set examined')
parser.add_argument('--image_list_file', type=str, help='Provide image_list_file')
parser.add_argument('--test_image_root', type=str, help='images root directory')
parser.add_argument('--det_res_file', type=str, default= 'ssd_det_res.txt', help='Provide detected result file')
parser.add_argument('--gt_file', type=str, help='Ground truth file')
parser.add_argument('--compute_map_script_path', default= './evaluation_py2.py', type=str, help='compute map script path')
parser.add_argument('--img_mean',type=int, nargs=3, default=[104,117,123], # BGR for Caffe
help='image mean values ')
parser.add_argument('--img_input_scale', type=float, default=1.0, help='image input scale value ')
args = vars(parser.parse_args())
if args["prepare"]:
Quantize(args["prototxt"],args["caffemodel"], args["qtest_iter"], args["qcalib_iter"])
Compile()
Cut(args["prototxt"])
if args["image"]:
Detect("xfdnn_auto_cut_deploy.prototxt", args["caffemodel"], args["image"], args["labelmap_file"], args)
if args["validate"]:
if ((args["image_list_file"] == None) or (args["gt_file"] == None) or (args["test_image_root"] == None)):
print ('Provide the arguments for image_list_file, gt_file and test_image_root')
exit(0)
Infer("xfdnn_auto_cut_deploy.prototxt",args["caffemodel"], args)
|
{"hexsha": "3349d687dcd911cb984dcb469fdca4fb155a0b52", "size": 11596, "ext": "py", "lang": "Python", "max_stars_repo_path": "alveo/examples/caffe/ssd-detect/run_ssd.py", "max_stars_repo_name": "abid-k/Vitis-AI", "max_stars_repo_head_hexsha": "80d327afe3b9bf76dbbb384d0da4d2603d256bc3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-16T08:27:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-16T08:27:59.000Z", "max_issues_repo_path": "alveo/examples/caffe/ssd-detect/run_ssd.py", "max_issues_repo_name": "abid-k/Vitis-AI", "max_issues_repo_head_hexsha": "80d327afe3b9bf76dbbb384d0da4d2603d256bc3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alveo/examples/caffe/ssd-detect/run_ssd.py", "max_forks_repo_name": "abid-k/Vitis-AI", "max_forks_repo_head_hexsha": "80d327afe3b9bf76dbbb384d0da4d2603d256bc3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6323529412, "max_line_length": 209, "alphanum_fraction": 0.6497930321, "include": true, "reason": "import numpy", "num_tokens": 3068}
|
[STATEMENT]
lemma subspace_inter: "subspace A \<Longrightarrow> subspace B \<Longrightarrow> subspace (A \<inter> B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>subspace A; subspace B\<rbrakk> \<Longrightarrow> subspace (A \<inter> B)
[PROOF STEP]
by (simp add: subspace_def)
|
{"llama_tokens": 105, "file": null, "length": 1}
|
"""Generate iset (interpolated set) from a given correlation matrix or list. iset can then be used as a guide for aligning texts."""
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
# import joblib
# import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
# from absl import app, flags
import logzero
from logzero import logger
from tinybee.lowess_pairs import lowess_pairs
from tinybee.dbscan_pairs import dbscan_pairs
from tinybee.gen_row_align import gen_row_align
from tinybee.interpolate_pset import interpolate_pset
# FLAGS = flags.FLAGS
# flags.DEFINE_boolean("debug", False, "print debug messages.", short_name="d")
# fmt: off
def gen_iset(
cmat1: Union[List[List[float]], np.ndarray, pd.DataFrame],
verbose: Union[bool, float] = False,
estimator: str = "dbscan", # vs lowess
) -> List[Tuple[int, int]]:
# fmt: on
"""Generate pset (pair set) from a given correlation matrix or list.
Args:
cmat: correlation (similarity) matrix
verbose: show verbose messages
show plotting (plot_flag set to True)
when set to True or <= 10
Returns:
pair of integers as a guide for aligning
"""
if isinstance(verbose, bool) is True:
if verbose:
verbose = 10
else:
verbose = 20
logzero.loglevel(verbose)
plot_flag = False
if verbose <= 10:
plot_flag = True
# if isinstance(cmat, list):
cmat = np.array(cmat1)
logger.debug("cmat.shape: %s", cmat.shape)
# yhat = lowess_pairs(cmat)
# if not yhat: use yhat = dbscan_pairs(cmat)
if estimator in ["lowess"]:
logger.info("Need to install statsmodels")
yhat = lowess_pairs(cmat)
else:
yhat = dbscan_pairs(cmat)
if plot_flag:
df0 = pd.DataFrame(yhat, columns=["y00", "yargmax", "ymax"])
fig, ax = plt.subplots()
sns.scatterplot(data=df0, x="y00", y="yargmax", size="ymax", sizes=(1, 110))
if "get_ipython" not in globals():
plt.show(block=True)
src_len, tgt_len = cmat.shape
# eliminate points not in range between neighbors
# probably not necessary, already done in dbscan_pairs
pset = gen_row_align(yhat, src_len, tgt_len)
if plot_flag:
df1 = pd.DataFrame(pset, columns=["y00", "yargmax", "ymax"])
fig, ax = plt.subplots()
sns.scatterplot(data=df1, x="y00", y="yargmax", size="ymax", sizes=(1, 110))
if "get_ipython" not in globals():
plt.show(block=True)
iset = interpolate_pset(pset, tgt_len)
if plot_flag:
df2 = pd.DataFrame(iset, columns=["y00", "yargmax"])
fig, ax = plt.subplots()
sns.scatterplot(data=df2, x="y00", y="yargmax")
if "get_ipython" not in globals():
plt.show(block=True)
# return [(1, 1)]
return iset
_ = '''
def main(argv):
"""Test main."""
logger.info(argv)
if FLAGS.debug:
logzero.loglevel(10)
cmat = joblib.load("data/cmat.lzma")
res = gen_iset(
# [[1., 1.]],
cmat,
verbose=FLAGS.debug
)
# logger.debug("res: %s, %s", res, res[68])
# logger.info("res: %s, %s", res, res[68])
logger.debug("res[68]: %s", res[68])
if __name__ == "__main__":
app.run(main)
# python -m tinybee.gen_iset -d
# '''
|
{"hexsha": "5cb4323d5fe5c9e3b7be3bfa7ca3089e6c4c8918", "size": 3377, "ext": "py", "lang": "Python", "max_stars_repo_path": "tinybee/gen_iset.py", "max_stars_repo_name": "ffreemt/tinybee-aligner", "max_stars_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tinybee/gen_iset.py", "max_issues_repo_name": "ffreemt/tinybee-aligner", "max_issues_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tinybee/gen_iset.py", "max_forks_repo_name": "ffreemt/tinybee-aligner", "max_forks_repo_head_hexsha": "5a38b0634df12a197e50ba21efb017d58e4cd02d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8015873016, "max_line_length": 132, "alphanum_fraction": 0.6197808706, "include": true, "reason": "import numpy", "num_tokens": 924}
|
using Bridge
import Base.valtype
abstract type DiffusionDomain end
struct UnboundedDomain <: DiffusionDomain end
bound_satisfied(::UnboundedDomain, x) = true
struct LowerBoundedDomain{T,N} <: DiffusionDomain
bounds::NTuple{N,T}
coords::NTuple{N,Integer}
function LowerBoundedDomain(bounds::NTuple{N,T},
coords::NTuple{N,Integer}) where {N,T}
new{T,N}(bounds, coords)
end
function LowerBoundedDomain(bounds::NTuple{N,T}, coords) where {N,T}
@assert length(coords) == N
@assert all([typeof(c) <: Integer for c in coords])
new{T,N}(bounds, Tuple(coords))
end
function LowerBoundedDomain(bounds::Vector{T}, coords) where T
N = length(bounds)
@assert length(coords) == N
@assert all([typeof(c) <: Integer for c in coords])
new{T,N}(Tuple(bounds), Tuple(coords))
end
end
function bound_satisfied(d::LowerBoundedDomain{T,N}, x) where {T,N}
for i in 1:N
(x[d.coords[i]] < d.bounds[i]) && return false
end
true
end
struct UpperBoundedDomain{T,N} <: DiffusionDomain
bounds::NTuple{N,T}
coords::NTuple{N,Integer}
function UpperBoundedDomain(bounds::NTuple{N,T},
coords::NTuple{N,Integer}) where {T,N}
new{T,N}(bounds, coords)
end
function UpperBoundedDomain(bounds::NTuple{N,T}, coords) where {T,N}
@assert length(coords) == N
@assert all([typeof(c) <: Integer for c in coords])
new{T,N}(bounds, Tuple(coords))
end
function UpperBoundedDomain(bounds::Vector{T}, coords) where T
N = length(bounds)
@assert length(coords) == N
@assert all([typeof(c) <: Integer for c in coords])
new{T,N}(Tuple(bounds), Tuple(coords))
end
end
function bound_satisfied(d::UpperBoundedDomain{T,N}, x) where {T,N}
for i in 1:N
(x[d.coords[i]] > d.bounds[i]) && return false
end
true
end
struct BoundedDomain{T,N1,N2} <: DiffusionDomain
lowBds::LowerBoundedDomain{T,N1}
upBds::UpperBoundedDomain{T,N2}
function BoundedDomain(lowBds::LowerBoundedDomain{T,N1},
upBds::UpperBoundedDomain{T,N2}) where {T,N1,N2}
new{T,N1,N2}(lowBds, upBds)
end
function BoundedDomain(lowBds, lowBdsCoords, upBds, upBdsCoords)
lowBdsObj = lowerBoundedDomain(lowBds, lowBdsCoords)
upBdsObj = upperBoundedDomain(upBds, upBdsCoords)
T,N1 = valtype(lowBdsObj)
S,N2 = valtype(upBdsObj)
@assert T == S
new{T,N1,N2}(lowBdsObj, upBdsObj)
end
end
bound_satisfied(d, x) = bound_satisfied(d.lowBds, x) && bound_satisfied(d.upBds, x)
valtype(::LowerBoundedDomain{T,N}) where {T,N} = T,N
valtype(::UpperBoundedDomain{T,N}) where {T,N} = T,N
valtype(::BoundedDomain{T,N1,N2}) where {T,N1,N2} = T,N1,N2
DomainSomehowBounded = Union{LowerBoundedDomain,UpperBoundedDomain,BoundedDomain}
domain(::Any) = UnboundedDomain() # by default no restrictions
"""
check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath)
Check domain calling `domain`
"""
check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath) =
check_domain_adherence(P, XX , domain(P.Target))
"""
check_domain_adherence(P::Vector{ContinuousTimeProcess},
XX::Vector{SamplePath}, iRange)
Verify whether all paths in the range `iRange`, i.e. `XX[i].yy`, i in `iRange`
fall on the interior of the domain of diffusions `P[i]`, i in `iRange`
"""
function check_domain_adherence(P::Vector{S}, XX::Vector{T}, iRange
) where {S<:ContinuousTimeProcess, T<:SamplePath}
for i in iRange
!check_domain_adherence(P[i], XX[i]) && return false
end
true
end
"""
check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath,
d::UnboundedDomain)
For unrestricted domains there is nothing to check
"""
function check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath,
d::UnboundedDomain)
true
end
"""
check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath,
d::DiffusionDomain)
Verify whether path `XX.yy` falls on the interior of the domain of diffusion `P`
"""
function check_domain_adherence(P::ContinuousTimeProcess, XX::SamplePath,
d::DiffusionDomain)
N = length(XX)
for i in 1:N
!bound_satisfied(d, XX.yy[i]) && false
end
true
end
|
{"hexsha": "16ca34bbc56904433c8737413ee955f9fe0f19e8", "size": 4504, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/stochastic_process/bounded_diffusion_domain.jl", "max_stars_repo_name": "mmider/BridgeSDEInference.jl", "max_stars_repo_head_hexsha": "c18dbe9c45bba9ef1d19e70deec8754df2c05293", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-07-25T15:29:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T16:02:39.000Z", "max_issues_repo_path": "src/stochastic_process/bounded_diffusion_domain.jl", "max_issues_repo_name": "mmider/BridgeSDEInference.jl", "max_issues_repo_head_hexsha": "c18dbe9c45bba9ef1d19e70deec8754df2c05293", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2019-07-23T19:50:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-20T12:03:33.000Z", "max_forks_repo_path": "src/stochastic_process/bounded_diffusion_domain.jl", "max_forks_repo_name": "mmider/FitzHughNagumo.jl", "max_forks_repo_head_hexsha": "c18dbe9c45bba9ef1d19e70deec8754df2c05293", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-08-03T20:48:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-05T22:39:08.000Z", "avg_line_length": 30.4324324324, "max_line_length": 83, "alphanum_fraction": 0.6440941385, "num_tokens": 1227}
|
[STATEMENT]
lemma length_append [simp]: "length (xs @ ys) = length xs + length ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (xs @ ys) = length xs + length ys
[PROOF STEP]
by (induct xs) auto
|
{"llama_tokens": 82, "file": null, "length": 1}
|
@testset "GrpAbFinGen" begin
@testset "Type stuff" begin
@test elem_type(GrpAbFinGen) == GrpAbFinGenElem
@test parent_type(GrpAbFinGenElem) == GrpAbFinGen
end
@testset "Constructor" begin
M1 = matrix(FlintZZ, 2, 3, [1, 2, 3, 4, 5, 6])
G = @inferred AbelianGroup(M1)
@test isa(G, GrpAbFinGen)
@test G.rels == M1
M = FlintZZ[1 2 3; 4 5 6] # fmpz_mat
G = @inferred AbelianGroup(M)
@test isa(G, GrpAbFinGen)
@test G.rels == M1
M = fmpz[1 2 3; 4 5 6]
G = @inferred AbelianGroup(M)
@test isa(G, GrpAbFinGen)
@test G.rels == M1
M = [1 2 3; 4 5 6]
G = @inferred AbelianGroup(M)
@test isa(G, GrpAbFinGen)
@test G.rels == M1
M = fmpz[1, 2, 3, 4, 5, 6]
G = @inferred AbelianGroup(M)
@test isa(G, GrpAbFinGen)
@test G.rels == matrix(FlintZZ, 1, 6, M)
M = [1, 2, 3, 4, 5, 6]
G = @inferred AbelianGroup(M)
@test isa(G, GrpAbFinGen)
@test G.rels == matrix(FlintZZ, 1, 6, M)
M = [3, 0]
G = @inferred DiagonalGroup(M)
@test isa(G, GrpAbFinGen)
M = fmpz[3, 0]
G = @inferred DiagonalGroup(M)
@test isa(G, GrpAbFinGen)
M = matrix(FlintZZ, 1, 2, [3, 0])
G = @inferred DiagonalGroup(M)
@test isa(G, GrpAbFinGen)
N = [3, 5]
G = @inferred DiagonalGroup(N)
@test isa(G, GrpAbFinGen)
@test G.rels == matrix(FlintZZ, 2, 2, [3, 0, 0, 5])
N = fmpz[3, 5]
G = @inferred DiagonalGroup(N)
@test isa(G, GrpAbFinGen)
@test G.rels == matrix(FlintZZ, 2, 2, [3, 0, 0, 5])
N = matrix(FlintZZ, 1, 2, [3, 5])
G = @inferred DiagonalGroup(N)
@test isa(G, GrpAbFinGen)
@test G.rels == matrix(FlintZZ, 2, 2, [3, 0, 0, 5])
@test_throws ErrorException DiagonalGroup(FlintZZ[1 2; 3 4])
@test issnf(M)
end
@testset "String I/O" begin
N = matrix(FlintZZ, 1, 2, [3, 5])
G = DiagonalGroup(N)
@test isa(string(G), String)
end
@testset "Field access" begin
S = DiagonalGroup([3, 0])
@test @inferred issnf(S)
@test @inferred ngens(S) == 2
@test @inferred nrels(S) == 2
@test @inferred rels(S) == matrix(FlintZZ, 2, 2, [3, 0, 0, 0])
G = DiagonalGroup([3, 5])
@test @inferred !issnf(G)
@test @inferred ngens(G) == 2
@test @inferred nrels(G) == 2
@test @inferred rels(G) == matrix(FlintZZ, 2, 2, [3, 0, 0, 5])
end
@testset "Hermite normal form" begin
M = FlintZZ[1 2 3; 4 5 6]
HNF = FlintZZ[1 2 3; 0 3 6]
G = AbelianGroup(M)
Hecke.assure_has_hnf(G)
@test G.hnf == HNF
end
@testset "Smith normal form" begin
M = FlintZZ[16 17 2 ; 19 23 8 ; 16 17 2]
G = AbelianGroup(M)
S, mS = @inferred snf(G)
@test issnf(S)
@test S.snf == fmpz[45, 0]
@test codomain(mS) == G
@test domain(mS) == S
end
@testset "Finiteness" begin
G = DiagonalGroup([3, 15])
@test issnf(G)
@test @inferred isfinite(G)
@test @inferred !isinfinite(G)
G = DiagonalGroup([3, 5])
@test @inferred isfinite(G)
@test @inferred !isinfinite(G)
G = DiagonalGroup([3, 15, 0])
@test issnf(G)
@test @inferred !isfinite(G)
@test @inferred isinfinite(G)
G = DiagonalGroup([3, 5, 0])
@test @inferred !isfinite(G)
@test @inferred isinfinite(G)
end
@testset "Rank" begin
G = DiagonalGroup([3, 15])
@test @inferred rank(G) == 0
G = DiagonalGroup([3, 5])
@test @inferred rank(G) == 0
G = DiagonalGroup([3, 15, 0])
@test @inferred rank(G) == 1
G = DiagonalGroup([3, 5, 0])
@test @inferred rank(G) == 1
end
@testset "Order" begin
G = DiagonalGroup([3, 5])
@test @inferred order(G) == 15
G = DiagonalGroup([3, 15])
@test @inferred order(G) == 45
G = DiagonalGroup([3, 5, 0])
@test_throws ErrorException order(G)
end
@testset "Exponent" begin
G = DiagonalGroup([3, 5])
@test @inferred exponent(G) == 15
G = DiagonalGroup([3, 15])
@test @inferred exponent(G) == 15
end
@testset "Trivial" begin
G = DiagonalGroup([1])
@test @inferred istrivial(G)
G = DiagonalGroup([1, 1, 1])
@test @inferred istrivial(G)
G = DiagonalGroup([3, 3])
@test @inferred !istrivial(G)
G = DiagonalGroup([3, 5])
@test @inferred !istrivial(G)
end
@testset "Isomorphism" begin
b = @inferred isisomorphic(DiagonalGroup(Int[]), DiagonalGroup(Int[]))
@test b
G = DiagonalGroup([2, 3, 5])
H = DiagonalGroup([30])
@test @inferred isisomorphic(G, H)
end
@testset "Direct product" begin
G = DiagonalGroup([5, 3])
H = DiagonalGroup([4])
K = direct_product(G, H)[1]
@test isisomorphic(K, DiagonalGroup([60]))
end
@testset "Torsion" begin
G = DiagonalGroup([5, 4])
@test @inferred istorsion(G)
H, mH = torsion_subgroup(G)
@test order(H) == 20
G = DiagonalGroup([5, 0, 4, 0])
@test @inferred !istorsion(G)
H, mH = torsion_subgroup(G)
@test isisomorphic(H, DiagonalGroup([5, 4]))
end
@testset "Subgroup" begin
@test_throws ErrorException sub(GrpAbFinGenElem[])
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0])
g1 = G[1]
g2 = G[2]
g3 = G[3]
S, S_map = @inferred sub([g1, g2, g3])
@test isisomorphic(G, S)
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0])
S, mS = snf(G)
s1 = S[1]
s2 = S[2]
s3 = S[3]
H, mH = @inferred sub(S, [s1, s2, s3])
@test isisomorphic(H, G)
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0])
g1 = G[1]
H, mH = @inferred sub(G, [g1])
@test isisomorphic(H, DiagonalGroup([3]))
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0])
S, mS = snf(G)
s1 = S[1]
H, mH = @inferred sub(S, [s1])
@test isisomorphic(H, DiagonalGroup([3]))
# G contains empty relation
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0 ; 0 0 30 ; 0 0 0])
g1 = G[3]
S, mS = @inferred sub(G, [g1])
@test isisomorphic(S, DiagonalGroup([30]))
# n*G
G = DiagonalGroup([6, 6, 12, 5])
H, mH = @inferred sub(G, 2)
@test isisomorphic(H, DiagonalGroup([3, 3, 6, 5]))
H, mH = @inferred sub(G, fmpz(2))
@test isisomorphic(H, DiagonalGroup([3, 3, 6, 5]))
end
@testset "Quotient" begin
G = AbelianGroup(FlintZZ[3 0 0 ; 0 15 0])
Q, mQ = @inferred quo(G, GrpAbFinGenElem[])
@test isisomorphic(Q, G)
g2 = G[2]
Q, mQ = @inferred quo(G, [g2])
@test isisomorphic(Q, DiagonalGroup([3, 0]))
S = DiagonalGroup([3, 15, 0])
@test issnf(S)
g2 = S[2]
Q, mQ = @inferred quo(S, [g2])
@test isisomorphic(Q, DiagonalGroup([3, 0]))
G = DiagonalGroup([6, 6, 12, 5, 0])
H, mH = @inferred quo(G, 2)
@test isisomorphic(H, DiagonalGroup([2, 2, 2, 2]))
H, mH = @inferred quo(G, fmpz(2))
@test isisomorphic(H, DiagonalGroup([2, 2, 2, 2]))
end
@testset "Cyclic" begin
G = DiagonalGroup([3, 5])
@test @inferred iscyclic(G)
G = DiagonalGroup([3, 15])
@test @inferred !iscyclic(G)
end
@testset "p-Sylow subgroup" begin
G = DiagonalGroup([1, 3, 9, 5, 15, 20, 7])
P, mP = psylow_subgroup(G, 3)
@test order(P) == 3^valuation(order(G), 3)
P, mP = psylow_subgroup(G, 5)
@test order(P) == 5^valuation(order(G), 5)
P, mP = psylow_subgroup(G, 11)
@test order(P) == 11^valuation(order(G), 11)
end
@testset "All abelian groups" begin
l = collect(abelian_groups(1))
@test length(l) == 1
l = collect(abelian_groups(2))
@test length(l) == 1
l = collect(abelian_groups(2^6 * 3^3 * 5^2 * 7))
@test length(l) == 66
end
@testset "HomAlg" begin
G = AbelianGroup([3 1; 0 3])
S, mS = snf(G)
H, mH = hom(G,G)
T, p = tensor_product(G, G)
D = direct_product(G, G, task = :none)
for i=1:5
Th = hom(T, T, map(mH, [rand(H) for x = 1:2])) #induced map in tensor product
Dh = hom(D, D, map(mH, rand(H, (2,2)))) #induced map on direct prod
end
C = free_resolution(G)
D = free_resolution(S)
@test isexact(C)
hom(D, C, mS)
E = hom(D, T)
@test 9 == prod(order(x) for x = homology(E))
@test !isexact(E)
E = hom(T, C)
@test !isexact(E)
E = tensor_product(C, T)
@test !isexact(E)
A = AbelianGroup([3 1; 0 3])
B = AbelianGroup([9 2 1; 0 12 1; 0 0 25])
C = DiagonalGroup([3, 4, 0])
@test isisomorphic(hom(tensor_product(A, B, task = :none), C)[1],
hom(A, hom(B, C)[1])[1])
end
end
|
{"hexsha": "b5bea786d5084ff465cf135050036cf863407352", "size": 8341, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/GrpAb/GrpAbFinGen.jl", "max_stars_repo_name": "edgarcosta/Hecke.jl", "max_stars_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/GrpAb/GrpAbFinGen.jl", "max_issues_repo_name": "edgarcosta/Hecke.jl", "max_issues_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/GrpAb/GrpAbFinGen.jl", "max_forks_repo_name": "edgarcosta/Hecke.jl", "max_forks_repo_head_hexsha": "3ba4c63908eaa256150a055491a6387a45b081ec", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9037267081, "max_line_length": 83, "alphanum_fraction": 0.5712744275, "num_tokens": 3400}
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from scipy.io import savemat, loadmat
import numpy as np
import drawdown as dd
from pyPCGA import PCGA
import math
if __name__ == '__main__': # for windows application
# model domain and discretization
import pdb
pdb.set_trace()
# This is a 1D case, therefore should be used to test the 1D scenario
####### BEGINNING OF MODULE 1 ####################
# M1 parameters are: Lx, Ly, Lz, x0, y0, z0, dx, dy, dz, s_true, s_init
x0 = 23 # M1: Origin of x dimension
Lx = 500 # M1: Total length in the x direction
dxx = 0.05 # M1: Discretization (cell length) in the x direction, assumes cells of equal size
# This simulation is 1D, therefore default to y_origin = z_origin = 0, Ly = Lz = 1, dy = dz = 1
y0 = 0 # M1: Origin of y dimension
Ly = 1 # M1: Total length in the y direction
dyy = 1 # M1: Discretization (cell length) in the y direction, assumes cells of equal size
z0 = 0 # M1: Origin of y dimension
Lz = 1 # M1: Total length in the y direction
dzz = 1 # M1: Discretization (cell length) in the z direction, assumes cells of equal size
xmin = np.array([x0])
xmax = np.array([x0 + Lx])
m= int(Lx/dxx + 1)
N = np.array([m])
dx = np.array([dxx])
x = np.linspace(xmin, xmax, m)
pts = np.copy(x)
s_true = np.loadtxt('true.txt') # M1: input for file "true.txt"
# s_init, three options (drop down menu)
# option 1: user inputs a constant which gets assigned to variable s_constant
s_constant = 1 # M1: User selects constant checkbox from drop down, and inputs number in box
s_init = s_constant * np.ones((m, 1))
# option 2: s_init automatically calculated using s_true, if s_true provided
# # M1: User selects Auto checkbox from drop down, and check is run to see if s_true was provided
print(m)
s_init = np.mean(s_true) * np.ones((m, 1)) #M1 file input or constant input
# s_init = np.copy(s_true) # you can try with s_true!
### PLOTTING FOR 1D MODULE 1 #############
fig = plt.figure()
plt.plot(x,s_init,'k-',label='initial')
plt.plot(x,s_true,'r-',label='true')
plt.title('Pumping history')
plt.xlabel('Time (min)')
plt.ylabel(r'Q ($m^3$/min)')
plt.legend()
fig.savefig('best.png')
plt.close(fig)
####### END OF MODULE 1 ####################
# xloc,yloc,zloc are uniformly distributed
#xloc = [xmin:10:xmax]
#import pdb
#pdb.set_trace()
# covarIance kernel and scale parameters
#prior_std = 0.04 #Module 4 (R)
#prior_cov_scale = np.array([200.0]) #M4 lambdas, lx, ly, lz
#def kernel(r): return (prior_std ** 2) * np.exp(-r) # M4Kernel use switch function
#obs = np.loadtxt('obs.txt') # M3 file input
# prepare interface to run as a function
#def forward_model(s, parallelization, ncores=None):
# params = {}
# model = dd.Model(params)
#
# if parallelization:
# simul_obs = model.run(s, parallelization, ncores)
# else:
# simul_obs = model.run(s, parallelization)
# return simul_obs
#M 4 parameters
#params = {'R': (0.04) ** 2, 'n_pc': 50,
# 'maxiter': 10, 'restol': 0.01,
# 'matvec': 'FFT', 'xmin': xmin, 'xmax': xmax, 'N': N,
# 'prior_std': prior_std, 'prior_cov_scale': prior_cov_scale,
# 'kernel': kernel, 'post_cov': "diag",
# 'precond': True, 'LM': True,
# 'parallel': True, 'linesearch': True,
# 'forward_model_verbose': False, 'verbose': False,
# 'iter_save': True}
# params['objeval'] = False, if true, it will compute accurate objective function
# params['ncores'] = 36, with parallell True, it will determine maximum physcial core unless specified
# initialize
#prob = PCGA(forward_model, s_init, pts, params, s_true, obs)
# prob = PCGA(forward_model, s_init, pts, params, s_true, obs, X = X) #if you want to add your own drift X
# run inversion
#s_hat, simul_obs, post_diagv, iter_best = prob.Run()
#post_diagv[post_diagv < 0.] = 0. # just in case
#post_std = np.sqrt(post_diagv)
### BEGINNING OF PLOTTING #############
#fig = plt.figure()
#plt.plot(x,s_hat,'k-',label='estimated')
#plt.plot(x,s_hat + 2.*post_std,'k--',label='95%')
#plt.plot(x,s_hat - 2.*post_std,'k--',label='')
#plt.plot(x,s_true,'r-',label='true')
#plt.title('pumping history')
#plt.xlabel('time (min)')
#plt.ylabel(r's ($m^3$/min)')
#plt.legend()
#fig.savefig('best.png')
#plt.close(fig)
#nobs = prob.obs.shape[0]
#fig = plt.figure()
#plt.title('obs. vs simul.')
#plt.plot(prob.obs, simul_obs, '.')
#plt.xlabel('observation')
#plt.ylabel('simulation')
#minobs = np.vstack((prob.obs, simul_obs)).min(0)
#maxobs = np.vstack((prob.obs, simul_obs)).max(0)
#plt.plot(np.linspace(minobs, maxobs, 20), np.linspace(minobs, maxobs, 20), 'k-')
#plt.axis('equal')
#axes = plt.gca()
#axes.set_xlim([math.floor(minobs), math.ceil(maxobs)])
#axes.set_ylim([math.floor(minobs), math.ceil(maxobs)])
#fig.savefig('obs.png')
# plt.show()
#plt.close(fig)
#fig = plt.figure()
#plt.semilogy(np.linspace(1,len(prob.objvals),len(prob.objvals)), prob.objvals, 'r-')
#plt.xticks(np.linspace(1,len(prob.objvals),len(prob.objvals)))
#plt.title('obj values over iterations')
#plt.axis('tight')
#fig.savefig('obj.png')
#plt.close(fig)
|
{"hexsha": "7640263fc7b109bcd678dac24804066e1d4b8bc6", "size": 5614, "ext": "py", "lang": "Python", "max_stars_repo_path": "pumping_history_identification/example_linear_inversion_testing.py", "max_stars_repo_name": "offglitch/GUI-pyPCGA", "max_stars_repo_head_hexsha": "f76ae8f4e97273ebacf8f0dbd8491e00dc900f75", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-20T01:37:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-04T11:09:31.000Z", "max_issues_repo_path": "pumping_history_identification/example_linear_inversion_testing.py", "max_issues_repo_name": "offglitch/GUI-pyPCGA", "max_issues_repo_head_hexsha": "f76ae8f4e97273ebacf8f0dbd8491e00dc900f75", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pumping_history_identification/example_linear_inversion_testing.py", "max_forks_repo_name": "offglitch/GUI-pyPCGA", "max_forks_repo_head_hexsha": "f76ae8f4e97273ebacf8f0dbd8491e00dc900f75", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0242424242, "max_line_length": 110, "alphanum_fraction": 0.6058069113, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1705}
|
import cv2
import numpy as np
import torch
import torch.tensor as Tensor
import torch.nn.functional as F
piece_count = 3
batch_size = 2
padding = 6
def imwrite(filename: str, image: Tensor):
cv2.imwrite(filename, convert_to_cv_8bit(image))
def convert_to_cv_8bit(image: Tensor):
if len(image.shape) == 4:
image = image.squeeze(0)
output = torch.clamp(image / 2 + 0.5, min=0, max=1)
output = (np.transpose(output.cpu().numpy(), (1, 2, 0)) * 255).astype(np.uint8)
return cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
def convert_to_cv_float(image: Tensor):
if len(image.shape) == 4:
image = image.squeeze(0)
output = torch.clamp(image / 2 + 0.5, min=0, max=1)
output = (np.transpose(output.cpu().numpy(), (1, 2, 0)) * 255).astype(np.float32)
return cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
def scale(image: Tensor, aspect_ratio: float=1.0,
extra_scale: float=1.0, even_rounding: bool=False):
if aspect_ratio != 1.0 or extra_scale != 1.0:
unsq_dim = 0
while len(image.shape) < 4:
image = image.unsqueeze(0)
unsq_dim += 1
if even_rounding:
size = even_round(image.shape[2] * extra_scale,
image.shape[3] * aspect_ratio * extra_scale)
else:
size = (int(round(image.shape[2] * extra_scale)),
int(round(image.shape[3] * aspect_ratio * extra_scale)))
image = torch.clamp(F.interpolate(
image, size=size, mode='bicubic', align_corners=True
), min=-1, max=1)
while unsq_dim > 0:
image = image.squeeze(0)
unsq_dim -= 1
return image
def even_round(*args):
rounded = []
for arg in args:
rnd = int(arg)
if rnd % 2 != 0:
rnd += 1
rounded.append(rnd)
return rounded
def make_batch(image: Tensor) -> list:
pieces = cut_image(image)
batches = []
for i in range(len(pieces) // batch_size):
batches.append(torch.cat(pieces[i * batch_size:(i + 1) * batch_size], dim=0))
if len(pieces) % batch_size != 0:
batches.append(torch.cat(pieces[(len(pieces) // batch_size) * batch_size:], dim=0))
return batches
def unbatch(batches: list) -> Tensor:
pieces = []
for batch in batches:
pieces.extend(torch.unbind(batch, dim=0))
for i in range(len(pieces)):
pieces[i] = pieces[i].unsqueeze(0)
return glue_image(pieces)
def cut_image(image: Tensor) -> list:
_, c, h, w = image.shape
h //= piece_count
w //= piece_count
pieces = []
for i in range(piece_count):
for j in range(piece_count):
pieces.append(image[:, :, i * h - teta(i) * padding:(i + 1) * h + teta(piece_count - 1 - i) * padding,
j * w - teta(j) * padding:(j + 1) * w + teta(piece_count - 1 - j) * padding])
return pieces
def glue_image(pieces: list) -> Tensor:
horiz = []
for i in range(len(pieces)):
pieces[i] = pieces[i][:, :, padding * 2 * teta(i // piece_count):
pieces[i].shape[2] - padding * 2 * teta(piece_count - 1 - i // piece_count),
padding * 2 * teta(i % piece_count):
pieces[i].shape[3] - padding * 2 * teta(piece_count - 1 - i % piece_count)]
for i in range(piece_count):
horiz.append(torch.cat(pieces[i * piece_count:(i + 1) * piece_count], 3))
image = torch.cat(horiz, 2)
return image
def teta(x: int) -> int:
if x != 0:
return 1
return 0
|
{"hexsha": "dc019bbc344977d9272c86636e6df231650cfaf9", "size": 3591, "ext": "py", "lang": "Python", "max_stars_repo_path": "cm_modules/utils.py", "max_stars_repo_name": "SergejVolkov/SR_base", "max_stars_repo_head_hexsha": "285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-10T16:29:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T16:59:29.000Z", "max_issues_repo_path": "cm_modules/utils.py", "max_issues_repo_name": "SergejVolkov/SR_base", "max_issues_repo_head_hexsha": "285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cm_modules/utils.py", "max_forks_repo_name": "SergejVolkov/SR_base", "max_forks_repo_head_hexsha": "285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0625, "max_line_length": 114, "alphanum_fraction": 0.5758841548, "include": true, "reason": "import numpy", "num_tokens": 998}
|
#! -*- coding:utf-8 -*-
from __future__ import print_function
import numpy
def read(inp_file):
f_in = open(inp_file, 'r')
lines = f_in.readlines()
words_map = {} #マッピング用
char_map = {} #マッピング用
word_cnt = 0 #単語の種類数
char_cnt = 0 #文字の種類数
k_chr = 3 #文字コンテクストウィンドウ
k_wrd = 5 #単語コンテクストウィンドウ
y = []
x_chr = []
x_wrd = []
max_word_len, max_sen_len, num_sent = 0, 0, 20000
for line in lines[:num_sent]:
words = line[:-1].split()
tokens = words[1:]
y.append(int(float(words[0])))
max_sen_len = max(max_sen_len,len(tokens))
for token in tokens:
if token not in words_map:
words_map[token] = word_cnt
word_cnt += 1
max_word_len = max(max_word_len,len(token))
for i in xrange(len(token)):
if token[i] not in char_map:
char_map[token[i]] = char_cnt
char_cnt += 1
for line in lines[:num_sent]:
words = line[:-1].split()
tokens = words[1:]
word_mat = [0] * (max_sen_len+k_wrd-1)
char_mat = numpy.zeros((max_sen_len+k_wrd-1, max_word_len+k_chr-1))
for i in xrange(len(tokens)):
word_mat[(k_wrd/2)+i] = words_map[tokens[i]]
for j in xrange(len(tokens[i])):
char_mat[(k_wrd/2)+i][(k_chr/2)+j] = char_map[tokens[i][j]]
x_chr.append(char_mat)
x_wrd.append(word_mat)
max_word_len += k_chr-1
max_sen_len += k_wrd-1
# num_sent: 文書の数
# word_cnt: 単語の種類数
# char_cnt: 文字の種類数
# max_sen_len: 文書の最大の長さ
# max_word_len: 単語の最大の長さ
# x_chr: 入力となる文字のid列(num_sent*max_sen_len*max_word_len)
# x_wrd: 入力となる単語のid列(num_sent*max_sen_len)
# y: 1 or 0 (i.e., positive or negative)
# print numpy.array(x_wrd).shape
# print numpy.array(x_chr).shape
# print num_sent
# print max_sen_len
# print max_word_len
data = (num_sent, char_cnt, word_cnt, max_word_len, max_sen_len,\
k_chr, k_wrd, x_chr, x_wrd, y)
return data
read("tweets_clean.txt")
|
{"hexsha": "8a3f45a5df3efb9067542192337d40e69ddf7345", "size": 2122, "ext": "py", "lang": "Python", "max_stars_repo_path": "char_load.py", "max_stars_repo_name": "satopirka/CharSCNN-theano", "max_stars_repo_head_hexsha": "2b142b9e494e5c5f29bfd3a5b3b5fb4d24f065de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-11-09T07:44:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-20T10:56:57.000Z", "max_issues_repo_path": "char_load.py", "max_issues_repo_name": "satopirka/CharSCNN-theano", "max_issues_repo_head_hexsha": "2b142b9e494e5c5f29bfd3a5b3b5fb4d24f065de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-09T03:37:19.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-09T03:37:19.000Z", "max_forks_repo_path": "char_load.py", "max_forks_repo_name": "satopirka/CharSCNN-theano", "max_forks_repo_head_hexsha": "2b142b9e494e5c5f29bfd3a5b3b5fb4d24f065de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-11-09T07:58:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-30T00:04:22.000Z", "avg_line_length": 27.9210526316, "max_line_length": 75, "alphanum_fraction": 0.5777568332, "include": true, "reason": "import numpy", "num_tokens": 715}
|
#!/usr/bin/env python3
# imports go here
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
#
# Free Coding session for 2015-06-09
# Written by Matt Warren
#
lena = sp.misc.lena()
X = np.reshape(lena, (-1, 1))
connectivity = grid_to_graph(*lena.shape)
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
{"hexsha": "2ed6ffc3d7cd8d78362ae851e58d4a8706ff894b", "size": 849, "ext": "py", "lang": "Python", "max_stars_repo_path": "2015/06/fc_2015_06_09.py", "max_stars_repo_name": "mfwarren/FreeCoding", "max_stars_repo_head_hexsha": "58ac87f35ad2004a3514782556762ee0ed72c39a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2015/06/fc_2015_06_09.py", "max_issues_repo_name": "mfwarren/FreeCoding", "max_issues_repo_head_hexsha": "58ac87f35ad2004a3514782556762ee0ed72c39a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-04-27T01:43:45.000Z", "max_issues_repo_issues_event_max_datetime": "2015-04-27T01:43:45.000Z", "max_forks_repo_path": "2015/06/fc_2015_06_09.py", "max_forks_repo_name": "mfwarren/FreeCoding", "max_forks_repo_head_hexsha": "58ac87f35ad2004a3514782556762ee0ed72c39a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9705882353, "max_line_length": 103, "alphanum_fraction": 0.7302709069, "include": true, "reason": "import numpy,import scipy", "num_tokens": 218}
|
import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
from io_list import save_list
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_image(imfile):
img = np.array(Image.open(imfile).convert("RGB")).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img
def load_image_list(image_files):
images = []
for imfile in sorted(image_files):
images.append(load_image(imfile))
images = torch.stack(images, dim=0)
images = images.to(DEVICE)
padder = InputPadder(images.shape)
return padder.pad(images)[0]
# uv1 is a list of cell
def get_uv2(uv1, flo):
uv2 = []
for uv in uv1:
x = uv[0]
y = uv[1]
pt2 = (int(x+flo[y][x][0]), int(y+flo[y][x][1]))
uv2.append(pt2)
return uv2
def viz(img1, img2, flo, uv1 = None, filename = 'flow.png'):
img_1 = img1[0].permute(1,2,0).cpu().numpy()
img_2 = img2[0].permute(1, 2, 0).cpu().numpy()
flo = flo[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
flo_image = flow_viz.flow_to_image(flo)
img_1_u = img_1.astype('uint8').copy()
img_2_u = img_2.astype('uint8').copy()
# Draw points if have
if uv1 != None:
uv2 = get_uv2(uv1, flo)
point_size = 1
point_color = (0, 255, 0) # BGR
thickness = 4 # 可以为 0 、4、8
for i in range(len(uv2)):
pt1 = uv1[i]
pt2 = uv2[i]
cv2.circle(img_1_u, pt1, 2, point_color, -1)
cv2.circle(img_2_u, pt2, 2, point_color, -1)
img_flo = np.concatenate([img_1_u, img_2_u, flo_image], axis=0)
# cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0)
cv2.imwrite(filename, img_flo)
def demo(args):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model, map_location=torch.device('cpu')))
model = model.module
model.to(DEVICE)
model.eval()
with torch.no_grad():
images = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
images = load_image_list(images)
# Get sample points
points1 = []
for i in range(15):
for j in range(20):
# pt1 = (160, 160)
# x = pt1[0]
# y = pt1[1]
x = j * 30
y = i * 30
pt1 = (x, y)
points1.append(pt1)
for i in range(images.shape[0]-1):
image1 = images[i,None]
image2 = images[i+1,None]
flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
flow_up_numpy = flow_up[0].permute(1, 2, 0).cpu().numpy()
viz(image1, image2, flow_up, uv1=points1)
k = cv2.waitKey(200) * 0xFF
if k == 27:
break
def save_results(args, save_path):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model, map_location=torch.device('cpu')))
model = model.module
model.to(DEVICE)
model.eval()
with torch.no_grad():
images = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
save_list(path=f'{save_path}/image.list', list_obj=images)
length = len(images)
# images = load_image_list(images)
image1 = load_image(images[0])[None]
# Get sample points
points1 = []
for i in range(15):
for j in range(20):
# pt1 = (160, 160)
# x = pt1[0]
# y = pt1[1]
x = j * 30
y = i * 30
pt1 = (x, y)
points1.append(pt1)
for i in range(length-1):
# image1 = images[i,None]
# image2 = images[i+1,None]
image2 = load_image(images[i+1])[None]
flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
filename = f'flow_{i}_{i+1}'
np.savez(f'{save_path}/{filename}.npz', image1=images[i], image2=images[i+1], flow=flow_up)
viz(image1, image2, flow_up, uv1=points1, filename=f'{save_path}/{filename}.png')
image1 = image2
print(f'Finish saving {i}/{length}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--task', help="demo or save", type=str, default="demo")
parser.add_argument('--save_path', help="where to save results", type=str, default="./")
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
if args.task == "demo":
demo(args)
elif args.task == "save":
save_results(args, save_path=args.save_path)
else:
raise RuntimeError("Task {} not supported".format(args.task))
|
{"hexsha": "44145f78e5a98ab5ea950f63bd6b06fdf769f6d9", "size": 5402, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo.py", "max_stars_repo_name": "skylook/RAFT", "max_stars_repo_head_hexsha": "e0a4af12f76b4bba34a8bc1628a820dd5117c88a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo.py", "max_issues_repo_name": "skylook/RAFT", "max_issues_repo_head_hexsha": "e0a4af12f76b4bba34a8bc1628a820dd5117c88a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo.py", "max_forks_repo_name": "skylook/RAFT", "max_forks_repo_head_hexsha": "e0a4af12f76b4bba34a8bc1628a820dd5117c88a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2827225131, "max_line_length": 112, "alphanum_fraction": 0.5744168826, "include": true, "reason": "import numpy", "num_tokens": 1489}
|
[STATEMENT]
lemma set_restriction_fun_empty_conv: "
((set_restriction_fun P) A = {}) = (\<forall>x\<in>A. \<not> P x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (set_restriction_fun P A = {}) = (\<forall>x\<in>A. \<not> P x)
[PROOF STEP]
unfolding set_restriction_fun_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({x \<in> A. P x} = {}) = (\<forall>x\<in>A. \<not> P x)
[PROOF STEP]
by blast
|
{"llama_tokens": 186, "file": "List-Infinite_CommonSet_SetIntervalCut", "length": 2}
|
#!/usr/bin/env python3
# This script is used to estimate an accuracy of different face detection models.
# COCO evaluation tool is used to compute an accuracy metrics (Average Precision).
# Script works with different face detection datasets.
import os
import json
from fnmatch import fnmatch
from math import pi
import cv2 as cv
import argparse
import os
import sys
import pymlir
import numpy as np
from tqdm import tqdm
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from dataset_util.widerface.eval_widerface import detect_on_widerface, evaluation
parser = argparse.ArgumentParser(
description='Evaluate OpenCV face detection algorithms '
'using COCO evaluation tool, http://cocodataset.org/#detections-eval')
parser.add_argument('--model', type=str, default='', help="MLIR Model file")
parser.add_argument('--cascade', help='Optional path to trained Haar cascade as '
'an additional model for evaluation')
parser.add_argument('--annotation', help='Path to text file with ground truth annotations')
parser.add_argument('--image_path', help='Path to images root directory')
parser.add_argument('--fddb', help='Evaluate FDDB dataset, http://vis-www.cs.umass.edu/fddb/', action='store_true')
parser.add_argument('--wider', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true')
parser.add_argument('--matlib', help='Evaluate WIDER FACE dataset, http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/', action='store_true')
parser.add_argument("--count", type=int, default=-1)
parser.add_argument("--result", type=str, default='./result', help="Result folder")
args = parser.parse_args()
dataset = {}
dataset['images'] = []
dataset['categories'] = [{ 'id': 0, 'name': 'face' }]
dataset['annotations'] = []
def ellipse2Rect(params):
rad_x = params[0]
rad_y = params[1]
angle = params[2] * 180.0 / pi
center_x = params[3]
center_y = params[4]
pts = cv.ellipse2Poly((int(center_x), int(center_y)), (int(rad_x), int(rad_y)),
int(angle), 0, 360, 10)
rect = cv.boundingRect(pts)
left = rect[0]
top = rect[1]
right = rect[0] + rect[2]
bottom = rect[1] + rect[3]
return left, top, right, bottom
def addImage(imagePath):
assert('images' in dataset)
imageId = len(dataset['images'])
dataset['images'].append({
'id': int(imageId),
'file_name': imagePath
})
return imageId
def addBBox(imageId, left, top, width, height):
assert('annotations' in dataset)
dataset['annotations'].append({
'id': len(dataset['annotations']),
'image_id': int(imageId),
'category_id': 0, # Face
'bbox': [int(left), int(top), int(width), int(height)],
'iscrowd': 0,
'area': float(width * height)
})
def addDetection(detections, imageId, left, top, width, height, score):
detections.append({
'image_id': int(imageId),
'category_id': 0, # Face
'bbox': [int(left), int(top), int(width), int(height)],
'score': float(score)
})
def fddb_dataset(annotations, images):
for d in os.listdir(annotations):
if fnmatch(d, 'FDDB-fold-*-ellipseList.txt'):
with open(os.path.join(annotations, d), 'rt') as f:
lines = [line.rstrip('\n') for line in f]
lineId = 0
while lineId < len(lines):
# Image
imgPath = lines[lineId]
lineId += 1
imageId = addImage(os.path.join(images, imgPath) + '.jpg')
img = cv.imread(os.path.join(images, imgPath) + '.jpg')
# Faces
numFaces = int(lines[lineId])
lineId += 1
for i in range(numFaces):
params = [float(v) for v in lines[lineId].split()]
lineId += 1
left, top, right, bottom = ellipse2Rect(params)
addBBox(imageId, left, top, width=right - left + 1,
height=bottom - top + 1)
def wider_dataset(annotations, images):
with open(annotations, 'rt') as f:
lines = [line.rstrip('\n') for line in f]
lineId = 0
while lineId < len(lines):
# Image
imgPath = lines[lineId]
lineId += 1
imageId = addImage(os.path.join(images, imgPath))
# Faces
numFaces = int(lines[lineId])
lineId += 1
for i in range(numFaces):
params = [int(v) for v in lines[lineId].split()]
lineId += 1
left, top, width, height = params[0], params[1], params[2], params[3]
addBBox(imageId, left, top, width, height)
def detection():
with tqdm(total= args.count if args.count > 0 else len(dataset['images'])) as pbar:
for i in range(len(dataset['images'])):
#sys.stdout.write('\r%d / %d' % (i + 1, len(dataset['images'])))
#sys.stdout.flush()
img = cv.imread(dataset['images'][i]['file_name'])
imageId = int(dataset['images'][i]['id'])
detect(img, imageId)
if (i == args.count):
break
pbar.update(1)
def evaluate():
cocoGt = COCO('annotations.json')
cocoDt = cocoGt.loadRes('detections.json')
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
### Convert to COCO annotations format #########################################
assert(args.fddb or args.wider)
if args.fddb:
fddb_dataset(args.annotation, args.image_path)
elif args.wider:
wider_dataset(args.annotation, args.image_path)
with open('annotations.json', 'wt') as f:
json.dump(dataset, f)
### Obtain detections ##########################################################
detections = []
if args.model :
module = pymlir.module()
module.load(args.model)
def detect(img, imageId):
imgWidth = img.shape[1]
imgHeight = img.shape[0]
input = cv.dnn.blobFromImage(img, 1.0, (300, 300), (104., 177., 123.), False, False)
_ = module.run(input)
all_tensor = module.get_all_tensor()
out = all_tensor['detection_out']
for i in range(out.shape[2]):
confidence = out[0, 0, i, 2]
left = int(out[0, 0, i, 3] * img.shape[1])
top = int(out[0, 0, i, 4] * img.shape[0])
right = int(out[0, 0, i, 5] * img.shape[1])
bottom = int(out[0, 0, i, 6] * img.shape[0])
x = max(0, min(left, img.shape[1] - 1))
y = max(0, min(top, img.shape[0] - 1))
w = max(0, min(right - x + 1, img.shape[1] - x))
h = max(0, min(bottom - y + 1, img.shape[0] - y))
addDetection(detections, imageId, x, y, w, h, score=confidence)
elif args.cascade:
cascade = cv.CascadeClassifier(args.cascade)
def detect(img, imageId):
srcImgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(srcImgGray)
for rect in faces:
left, top, width, height = rect[0], rect[1], rect[2], rect[3]
addDetection(detections, imageId, left, top, width, height, score=1.0)
if args.matlib:
assert(args.wider and args.matlib)
module = pymlir.module()
module.load(args.model)
def detect(img):
imgWidth = img.shape[1]
imgHeight = img.shape[0]
input = cv.dnn.blobFromImage(img, 1.0, (300, 300), (104., 177., 123.), False, False)
_ = module.run(input)
all_tensor = module.get_all_tensor()
out = all_tensor['detection_out']
ret = np.zeros((out.shape[2], out.shape[3]))
for i in range(out.shape[2]):
confidence = out[0, 0, i, 2]
left = int(out[0, 0, i, 3] * img.shape[1])
top = int(out[0, 0, i, 4] * img.shape[0])
right = int(out[0, 0, i, 5] * img.shape[1])
bottom = int(out[0, 0, i, 6] * img.shape[0])
ret[i][0] = max(0, min(left, img.shape[1] - 1))
ret[i][1] = max(0, min(top, img.shape[0] - 1))
ret[i][2] = max(0, min(right - ret[i][0] + 1, img.shape[1] - ret[i][0]))
ret[i][3] = max(0, min(bottom - ret[i][1] + 1, img.shape[0] - ret[i][1]))
ret[i][4] = confidence;
return ret
annotation_path = os.path.abspath(os.path.join(args.annotation, ".."))
detect_on_widerface(args.image_path, annotation_path, args.result, detect)
evaluation(args.result, 'ssd300_face')
else:
detection()
with open('detections.json', 'wt') as f:
json.dump(detections, f)
evaluate()
def rm(f):
if os.path.exists(f):
os.remove(f)
rm('annotations.json')
rm('detections.json')
|
{"hexsha": "6a6b091021f994cb4f642deb7e880791f9ee5351", "size": 8959, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cvi_toolkit/eval/eval_ssd300_face.py", "max_stars_repo_name": "sophgo/tpu_compiler", "max_stars_repo_head_hexsha": "6299ea0a3adae1e5c206bcb9bedf225d16e636db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-14T11:47:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T01:45:37.000Z", "max_issues_repo_path": "python/cvi_toolkit/eval/eval_ssd300_face.py", "max_issues_repo_name": "sophgo/tpu_compiler", "max_issues_repo_head_hexsha": "6299ea0a3adae1e5c206bcb9bedf225d16e636db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cvi_toolkit/eval/eval_ssd300_face.py", "max_forks_repo_name": "sophgo/tpu_compiler", "max_forks_repo_head_hexsha": "6299ea0a3adae1e5c206bcb9bedf225d16e636db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3291666667, "max_line_length": 137, "alphanum_fraction": 0.5735015069, "include": true, "reason": "import numpy", "num_tokens": 2358}
|
#include <boost/timer.hpp>
|
{"hexsha": "294211880af67e4bbc3227a77a510645ef179ba8", "size": 27, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_timer.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_timer.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_timer.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 13.5, "max_line_length": 26, "alphanum_fraction": 0.7407407407, "num_tokens": 7}
|
#ifndef REF_DESCRIPTORS_IMPL_IPP
#define REF_DESCRIPTORS_IMPL_IPP
#include <ref/Class.hpp>
#include <ref/DescriptorsImpl.hpp>
#include <ref/Holder.hpp>
#include <ref/detail/Name.hpp>
#include <iterator>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <boost/mpl/for_each.hpp>
#include <boost/lexical_cast.hpp>
namespace ref
{
template <typename Descriptor, typename Impl, typename T>
const Descriptor* DescriptorImplBase<Descriptor, Impl, T>::instance()
{
static Impl instance_;
return &instance_;
}
template <typename Descriptor, typename Impl, typename T>
std::string DescriptorImplBase<Descriptor, Impl, T>::getName() const
{
static const std::string name = detail::get_name<T>();
return name;
}
template <typename Descriptor, typename Impl, typename T>
std::string DescriptorImplBase<Descriptor, Impl, T>::getFqn() const
{
static const std::string fqn = detail::get_fqn<T>();
return fqn;
}
template <typename Descriptor, typename Impl, typename T>
std::string DescriptorImplBase<Descriptor, Impl, T>::getXmlTag() const
{
static const std::string tag = detail::convert_to_xmltag(getName());
return tag;
}
// ClassDescriptorImpl
template <typename Class>
const ClassDescriptorImpl<Class>* ClassDescriptorImpl<Class>::instance()
{
static ClassDescriptorImpl instance_;
return &instance_;
}
namespace detail
{
template <typename T>
struct BaseClassDescriptor
{
typedef ClassDescriptorImpl<typename T::base_class> type;
static const ClassDescriptor* get() { return type::instance(); }
};
template <>
struct BaseClassDescriptor<ModelClass>
{
static const ClassDescriptor* get() { return nullptr; }
};
} // namespace detail
template <typename Class>
struct ClassDescriptorImpl<Class>::Initializer
{
ClassDescriptorImpl& d;
Initializer(ClassDescriptorImpl& d_) : d(d_) {}
template <typename Feature>
void operator()(Feature) const
{
const FeatureDescriptor* feature =
FeatureDescriptorImpl<Class, Feature>::instance();
d.m_featureVec.push_back(feature);
d.m_featureMap.insert(std::make_pair(feature->getName(), feature));
}
};
template <typename Class>
ClassDescriptorImpl<Class>::ClassDescriptorImpl()
{
boost::mpl::for_each<typename Class::features_type>(Initializer(*this));
const ClassDescriptor* parent =
detail::BaseClassDescriptor<Class>::get();
if (parent)
{
m_allFeatureVec = parent->getAllFeatureDescriptors();
for (const auto& feature : m_allFeatureVec)
{
m_featureMap.insert(
std::make_pair(feature->getName(), feature));
}
}
std::copy(m_featureVec.begin(), m_featureVec.end(),
std::back_inserter(m_allFeatureVec));
}
namespace detail
{
template <class Class, typename Enabled = void>
struct Create
{
static inline ModelClass* call() { return new Class; }
};
template <class Class>
struct Create<Class,
typename boost::enable_if<
typename boost::is_abstract<Class>::type>::type>
{
static inline ModelClass* call() { return nullptr; }
};
} // namespace detail
template <typename Class>
Holder ClassDescriptorImpl<Class>::create() const
{
return Holder(detail::Create<Class>::call(), this, true);
}
template <typename Class>
void ClassDescriptorImpl<Class>::copy(Holder src, Holder dst) const
{
ModelClass* pSrc = src.get<ModelClass>();
ModelClass* pDst = dst.get<ModelClass>();
assert(pSrc && pDst);
assert(pSrc->getClassDescriptor() && pDst->getClassDescriptor());
if (pSrc == pDst) return;
const ClassDescriptor* classDesc = pSrc->getClassDescriptor();
const FeatureDescriptorVector features =
classDesc->getAllFeatureDescriptors();
for (const auto& feature : features)
{
Holder hSrc = feature->getValue(pSrc);
Holder hDst = feature->getValue(pDst);
feature->getTypeDescriptor()->copy(hSrc, hDst);
}
}
template <typename Class>
const ClassDescriptor*
ClassDescriptorImpl<Class>::getParentClassDescriptor() const
{
return detail::BaseClassDescriptor<Class>::get();
}
template <typename Class>
FeatureDescriptorVector ClassDescriptorImpl<Class>::getFeatureDescriptors()
const
{
return m_featureVec;
}
template <typename Class>
FeatureDescriptorVector
ClassDescriptorImpl<Class>::getAllFeatureDescriptors() const
{
return m_allFeatureVec;
}
template <typename Class>
bool ClassDescriptorImpl<Class>::isAbstract() const
{
typedef typename boost::is_abstract<Class>::type is_abstract;
return is_abstract::value;
}
template <typename Class>
const FeatureDescriptor* ClassDescriptorImpl<Class>::getFeatureDescriptor(
std::string name) const
{
FeatureDescriptorMap::const_iterator it = m_featureMap.find(name);
if (it != m_featureMap.end()) return it->second;
return nullptr;
}
template <typename Class>
Holder ClassDescriptorImpl<Class>::getFeatureValue(ModelClass* obj,
std::string name) const
{
FeatureDescriptorMap::const_iterator it = m_featureMap.find(name);
if (it != m_featureMap.end()) return it->second->getValue(obj);
return Holder();
}
template <typename Class>
ModelClass* ClassDescriptorImpl<Class>::get(Holder h) const
{
return h.get<ModelClass>();
}
template <typename Class>
FeatureValueVector ClassDescriptorImpl<Class>::getFeatureValues(
ModelClass* obj) const
{
FeatureValueVector values;
for (const auto& featureDesc : m_allFeatureVec)
{
Holder featureValue = featureDesc->getValue(obj);
values.push_back(std::make_pair(featureDesc, featureValue));
}
return values;
}
// FeatureDescriptorImpl
template <typename Class, typename Feature>
const TypeDescriptor*
FeatureDescriptorImpl<Class, Feature>::getTypeDescriptor() const
{
return TypeDescriptor::getDescriptor<typename Feature::type>();
}
template <typename Class, typename Feature>
Holder FeatureDescriptorImpl<Class, Feature>::getValue(
ModelClass* obj) const
{
Class* realObj = static_cast<Class*>(obj);
return Holder(&realObj->template get<Feature>(), getTypeDescriptor());
}
template <typename Class, typename Feature>
ModelClass* FeatureDescriptorImpl<Class, Feature>::getObject(Holder h) const
{
assert(h.isValid());
assert(h.descriptor() == getTypeDescriptor());
assert(!h.isContained());
typedef typename Feature::type type;
type* value = h.get<type>();
Feature* feature = reinterpret_cast<Feature*>(
reinterpret_cast<char*>(value) - offsetof(Feature, value));
return static_cast<Class*>(feature);
}
template <typename Class, typename Feature>
const ClassDescriptor* FeatureDescriptorImpl<Class, Feature>::getDefinedIn()
const
{
return ClassDescriptorImpl<Class>::instance();
}
// PrimitiveTypeDescriptorImpl
template <typename T>
Holder PrimitiveTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
void PrimitiveTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
template <typename T>
std::string PrimitiveTypeDescriptorImpl<T>::getString(Holder h) const
{
assert(h.descriptor() == this && h.get<T>());
return boost::lexical_cast<std::string>(*h.get<T>());
}
template <>
std::string PrimitiveTypeDescriptorImpl<std::string>::getString(
Holder h) const
{
assert(h.descriptor() == this && h.get<std::string>());
return *h.get<std::string>();
}
template <typename T>
void PrimitiveTypeDescriptorImpl<T>::setString(
Holder h, const std::string& value) const
{
assert(h.descriptor() == this && h.get<T>());
*h.get<T>() = boost::lexical_cast<T>(value);
}
template <>
void PrimitiveTypeDescriptorImpl<std::string>::setString(
Holder h, const std::string& value) const
{
assert(h.descriptor() == this && h.get<std::string>());
*h.get<std::string>() = value;
}
// ListTypeDescriptor
template <typename T>
Holder ListTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
const TypeDescriptor* ListTypeDescriptorImpl<T>::getValueTypeDescriptor()
const
{
return detail::GetDescriptorType<
typename T::value_type>::type::instance();
}
template <typename T>
void ListTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
template <typename T>
std::vector<Holder> ListTypeDescriptorImpl<T>::getValue(Holder h) const
{
const T* t = h.get<T>();
assert(t);
std::vector<Holder> value;
for (auto& i : *t)
{
value.push_back(Holder(&i, getValueTypeDescriptor()));
}
return value;
}
template <typename T>
void ListTypeDescriptorImpl<T>::setValue(
Holder h, const std::vector<Holder>& value) const
{
T* t = h.get<T>();
assert(t);
t->resize(value.size());
auto valueDesc = getValueTypeDescriptor();
for (size_t i = 0; i < value.size(); i++)
{
valueDesc->copy(value[i], Holder(&(*t)[i], valueDesc));
}
}
// SetTypeDescriptor
template <typename T>
Holder SetTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
const TypeDescriptor* SetTypeDescriptorImpl<T>::getValueTypeDescriptor()
const
{
return detail::GetDescriptorType<
typename T::value_type>::type::instance();
}
template <typename T>
void SetTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
template <typename T>
std::vector<Holder> SetTypeDescriptorImpl<T>::getValue(Holder h) const
{
const T* t = h.get<T>();
assert(t);
std::vector<Holder> value;
for (auto& i : *t)
{
value.push_back(Holder(&i, getValueTypeDescriptor()));
}
return value;
}
template <typename T>
void SetTypeDescriptorImpl<T>::setValue(
Holder h, const std::vector<Holder>& value) const
{
T* t = h.get<T>();
assert(t);
t->clear();
auto valueDesc = getValueTypeDescriptor();
for (auto& i : value)
{
typename T::value_type v;
valueDesc->copy(i, Holder(&v, valueDesc));
t->insert(v);
}
}
// MapTypeDescriptor
template <typename T>
Holder MapTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
void MapTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
template <typename T>
const TypeDescriptor* MapTypeDescriptorImpl<T>::getKeyTypeDescriptor() const
{
return detail::GetDescriptorType<
typename T::key_type>::type::instance();
}
template <typename T>
const TypeDescriptor* MapTypeDescriptorImpl<T>::getMappedTypeDescriptor()
const
{
return detail::GetDescriptorType<
typename T::mapped_type>::type::instance();
}
template <typename T>
const TypeDescriptor* MapTypeDescriptorImpl<T>::getValueTypeDescriptor()
const
{
return detail::GetDescriptorType<value_type>::type::instance();
}
template <typename T>
std::vector<Holder> MapTypeDescriptorImpl<T>::getValue(Holder h) const
{
const T* t = h.get<T>();
assert(t);
std::vector<Holder> value;
for (auto& i : *t)
{
value.push_back(Holder(&i, getValueTypeDescriptor()));
}
return value;
}
template <typename T>
void MapTypeDescriptorImpl<T>::setValue(
Holder h, const std::vector<Holder>& value) const
{
T* t = h.get<T>();
assert(t);
t->clear();
auto valueDesc = getValueTypeDescriptor();
for (auto& i : value)
{
value_type v;
valueDesc->copy(i, Holder(&v, valueDesc));
t->insert(v);
}
}
// PairTypeDescriptor
template <typename T>
Holder PairTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
void PairTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
template <typename T>
const TypeDescriptor* PairTypeDescriptorImpl<T>::getFirstTypeDescriptor()
const
{
return detail::GetDescriptorType<
typename T::first_type>::type::instance();
}
template <typename T>
const TypeDescriptor* PairTypeDescriptorImpl<T>::getSecondTypeDescriptor()
const
{
return detail::GetDescriptorType<
typename T::second_type>::type::instance();
}
template <typename T>
std::pair<Holder, Holder> PairTypeDescriptorImpl<T>::getValue(
Holder h) const
{
const T* t = h.get<T>();
assert(t);
Holder first(&t->first, getFirstTypeDescriptor());
Holder second(&t->second, getSecondTypeDescriptor());
return std::make_pair(first, second);
}
// PointerTypeDescriptor
template <typename T>
Holder PointerTypeDescriptorImpl<T>::create() const
{
return Holder(new T, this, true);
}
template <typename T>
void PointerTypeDescriptorImpl<T>::copy(Holder src, Holder dst) const
{
assert(src.descriptor() == this && src.get<T>());
assert(dst.descriptor() == this && dst.get<T>());
T* pSrc = src.get<T>();
T* pDst = dst.get<T>();
if (pSrc != pDst)
{
*pDst = *pSrc;
}
}
namespace detail
{
template <typename T>
struct pointer_traits;
template <typename T>
struct pointer_traits<T*>
{
typedef T element_type;
enum
{
pointer_type = PointerTypeDescriptor::kRaw
};
static T* get(T* t) { return t; }
};
template <typename T>
struct pointer_traits<std::shared_ptr<T> >
{
typedef T element_type;
enum
{
pointer_type = PointerTypeDescriptor::kShared
};
static T* get(std::shared_ptr<T> t) { return t.get(); }
};
template <typename T>
struct pointer_traits<std::weak_ptr<T> >
{
typedef T element_type;
enum
{
pointer_type = PointerTypeDescriptor::kWeak
};
static T* get(std::weak_ptr<T> t) { return t.lock().get(); }
};
template <typename T>
struct pointer_traits<std::unique_ptr<T> >
{
typedef T element_type;
enum
{
pointer_type = PointerTypeDescriptor::kShared
};
static T* get(std::unique_ptr<T> t) { return t.get(); }
};
} // namespace detail
template <typename T>
PointerTypeDescriptor::PointerType
PointerTypeDescriptorImpl<T>::getPointerType() const
{
return static_cast<PointerTypeDescriptor::PointerType>(
detail::pointer_traits<T>::pointer_type);
}
template <typename T>
const TypeDescriptor*
PointerTypeDescriptorImpl<T>::getPointedTypeDescriptor() const
{
return detail::GetDescriptorType<
typename detail::pointer_traits<T>::element_type>::type::instance();
}
template <typename T>
bool PointerTypeDescriptorImpl<T>::isNull(Holder h) const
{
assert(h.descriptor() == this && h.get<T>());
T* ph = h.get<T>();
return detail::pointer_traits<T>::get(*ph);
}
template <typename T>
Holder PointerTypeDescriptorImpl<T>::dereference(Holder h) const
{
assert(h.descriptor() == this && h.get<T>());
T* ph = h.get<T>();
return Holder(detail::pointer_traits<T>::get(*ph),
getPointedTypeDescriptor());
}
// UnsupportedTypeDescriptor
template <typename T>
Holder UnsupportedTypeDescriptorImpl<T>::create() const
{
return Holder();
}
template <typename T>
void UnsupportedTypeDescriptorImpl<T>::copy(Holder, Holder) const
{
}
template <typename T>
const TypeDescriptor* TypeDescriptor::getDescriptor()
{
return detail::GetDescriptorType<T>::type::instance();
}
} // namespace ref
#endif // REF_DESCRIPTORS_IMPL_IPP
|
{"hexsha": "60ac3f38a1210e5311fd3ec61d6d12aa5451384b", "size": 18959, "ext": "ipp", "lang": "C++", "max_stars_repo_path": "ref/DescriptorsImpl.ipp", "max_stars_repo_name": "asenac/refcpp", "max_stars_repo_head_hexsha": "6b1ab20e65b3e5159fb2c7dd3b351dcc047516cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2015-02-02T05:25:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T09:56:18.000Z", "max_issues_repo_path": "ref/DescriptorsImpl.ipp", "max_issues_repo_name": "asenac/refcpp", "max_issues_repo_head_hexsha": "6b1ab20e65b3e5159fb2c7dd3b351dcc047516cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2016-02-22T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2016-02-23T14:11:58.000Z", "max_forks_repo_path": "ref/DescriptorsImpl.ipp", "max_forks_repo_name": "asenac/refcpp", "max_forks_repo_head_hexsha": "6b1ab20e65b3e5159fb2c7dd3b351dcc047516cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7404795487, "max_line_length": 80, "alphanum_fraction": 0.5839443009, "num_tokens": 4190}
|
from __future__ import division, print_function, absolute_import
import sklearn
from . import matplotlibhelpers as mplh
import numpy as np
def get_tsne_embedding(affinity_mat, aff_to_dist_mat, perplexity, **kwargs):
from sklearn import manifold
tsne = sklearn.manifold.TSNE(metric='precomputed', perplexity=perplexity,
**kwargs)
dist_mat = aff_to_dist_mat(affinity_mat)
embedding = tsne.fit_transform(dist_mat)
return embedding
def color_tsne_embedding_by_clustering(embedding, clusters,
*args, **kwargs):
mplh.scatter_plot(coords=embedding, clusters=clusters,
*args, **kwargs)
|
{"hexsha": "06c07ab48e7ad2866bbe563350a405ec5085ea77", "size": 703, "ext": "py", "lang": "Python", "max_stars_repo_path": "modisco/visualization/tsne.py", "max_stars_repo_name": "XiaotingChen/tfmodisco", "max_stars_repo_head_hexsha": "17cbafe806942304a02e8134fe10224bdff38b0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2018-05-21T01:47:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T04:15:27.000Z", "max_issues_repo_path": "modisco/visualization/tsne.py", "max_issues_repo_name": "XiaotingChen/tfmodisco", "max_issues_repo_head_hexsha": "17cbafe806942304a02e8134fe10224bdff38b0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2018-06-04T21:39:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T15:24:11.000Z", "max_forks_repo_path": "modisco/visualization/tsne.py", "max_forks_repo_name": "XiaotingChen/tfmodisco", "max_forks_repo_head_hexsha": "17cbafe806942304a02e8134fe10224bdff38b0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2018-06-04T19:51:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T23:27:18.000Z", "avg_line_length": 37.0, "max_line_length": 77, "alphanum_fraction": 0.679943101, "include": true, "reason": "import numpy", "num_tokens": 151}
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from GridCal.Engine.basic_structures import Logger
from GridCal.Engine.Devices.bus import Bus
from GridCal.Engine.Devices.enumerations import BranchType
from GridCal.Engine.Devices.transformer import TransformerType, Transformer2W
from GridCal.Engine.Devices.line import SequenceLineType, Line
from GridCal.Engine.Devices.hvdc_line import HvdcLine
from GridCal.Engine.Devices.underground_line import UndergroundLineType
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
from GridCal.Engine.Devices.tower import Tower
# Global sqrt of 3 (bad practice?)
SQRT3 = np.sqrt(3.0)
class BranchTemplate:
def __init__(self, name='BranchTemplate', tpe=BranchType.Branch):
self.name = name
self.tpe = tpe
self.edit_headers = []
self.units = []
self.non_editable_indices = []
self.edit_types = {}
def __str__(self):
return self.name
def get_save_data(self):
dta = list()
for property in self.edit_headers:
dta.append(getattr(self, property))
return dta
class TapChanger:
"""
The **TapChanger** class defines a transformer's tap changer, either onload or
offload. It needs to be attached to a predefined transformer (i.e. a
:ref:`Branch<branch>` object).
The following example shows how to attach a tap changer to a transformer tied to a
voltage regulated :ref:`bus`:
.. code:: ipython3
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.devices import *
from GridCal.Engine.device_types import *
# Create grid
grid = MultiCircuit()
# Create buses
POI = Bus(name="POI",
vnom=100, #kV
is_slack=True)
grid.add_bus(POI)
B_C3 = Bus(name="B_C3",
vnom=10) #kV
grid.add_bus(B_C3)
# Create transformer types
SS = TransformerType(name="SS",
hv_nominal_voltage=100, # kV
lv_nominal_voltage=10, # kV
nominal_power=100, # MVA
copper_losses=10000, # kW
iron_losses=125, # kW
no_load_current=0.5, # %
short_circuit_voltage=8) # %
grid.add_transformer_type(SS)
# Create transformer
X_C3 = Branch(bus_from=POI,
bus_to=B_C3,
name="X_C3",
branch_type=BranchType.Transformer,
template=SS,
bus_to_regulated=True,
vset=1.05)
# Attach tap changer
X_C3.tap_changer = TapChanger(taps_up=16, taps_down=16, max_reg=1.1, min_reg=0.9)
X_C3.tap_changer.set_tap(X_C3.tap_module)
# Add transformer to grid
grid.add_branch(X_C3)
Arguments:
**taps_up** (int, 5): Number of taps position up
**taps_down** (int, 5): Number of tap positions down
**max_reg** (float, 1.1): Maximum regulation up i.e 1.1 -> +10%
**min_reg** (float, 0.9): Maximum regulation down i.e 0.9 -> -10%
Additional Properties:
**tap** (int, 0): Current tap position
"""
def __init__(self, taps_up=5, taps_down=5, max_reg=1.1, min_reg=0.9):
self.max_tap = taps_up
self.min_tap = -taps_down
self.inc_reg_up = (max_reg - 1.0) / taps_up
self.inc_reg_down = (1.0 - min_reg) / taps_down
self.tap = 0
def tap_up(self):
"""
Go to the next upper tap position
"""
if self.tap + 1 <= self.max_tap:
self.tap += 1
def tap_down(self):
"""
Go to the next upper tap position
"""
if self.tap - 1 >= self.min_tap:
self.tap -= 1
def get_tap(self):
"""
Get the tap voltage regulation module
"""
if self.tap == 0:
return 1.0
elif self.tap > 0:
return 1.0 + self.tap * self.inc_reg_up
elif self.tap < 0:
return 1.0 + self.tap * self.inc_reg_down
def set_tap(self, tap_module):
"""
Set the integer tap position corresponding to a tap value
Attribute:
**tap_module** (float): Tap module centered around 1.0
"""
if tap_module == 1.0:
self.tap = 0
elif tap_module > 1:
self.tap = round((tap_module - 1.0) / self.inc_reg_up)
elif tap_module < 1:
self.tap = -round((1.0 - tap_module) / self.inc_reg_down)
class Branch(EditableDevice):
"""
* This class exists for legacy reasons, use the Line or Transformer2w classes instead! *
The **Branch** class represents the connections between nodes (i.e.
:ref:`buses<bus>`) in **GridCal**. A branch is an element (cable, line, capacitor,
transformer, etc.) with an electrical impedance. The basic **Branch** class
includes basic electrical attributes for most passive elements, but other device
types may be passed to the **Branch** constructor to configure it as a specific
type.
For example, a transformer may be created with the following code:
.. code:: ipython3
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Devices import *
from GridCal.Engine.Devices.types import *
# Create grid
grid = MultiCircuit()
# Create buses
POI = Bus(name="POI",
vnom=100, #kV
is_slack=True)
grid.add_bus(POI)
B_C3 = Bus(name="B_C3",
vnom=10) #kV
grid.add_bus(B_C3)
# Create transformer types
SS = TransformerType(name="SS",
hv_nominal_voltage=100, # kV
lv_nominal_voltage=10, # kV
nominal_power=100, # MVA
copper_losses=10000, # kW
iron_losses=125, # kW
no_load_current=0.5, # %
short_circuit_voltage=8) # %
grid.add_transformer_type(SS)
# Create transformer
X_C3 = Branch(bus_from=POI,
bus_to=B_C3,
name="X_C3",
branch_type=BranchType.Transformer,
template=SS,
)
# Add transformer to grid
grid.add_branch(X_C3)
Refer to the :class:`GridCal.Engine.Devices.branch.TapChanger` class for an example
using a voltage regulator.
Arguments:
**bus_from** (:ref:`Bus`): "From" :ref:`bus<Bus>` object
**bus_to** (:ref:`Bus`): "To" :ref:`bus<Bus>` object
**name** (str, "Branch"): Name of the branch
**r** (float, 1e-20): Branch resistance in per unit
**x** (float, 1e-20): Branch reactance in per unit
**g** (float, 1e-20): Branch shunt conductance in per unit
**b** (float, 1e-20): Branch shunt susceptance in per unit
**rate** (float, 1.0): Branch rate in MVA
**tap** (float, 1.0): Branch tap module
**shift_angle** (int, 0): Tap shift angle in radians
**active** (bool, True): Is the branch active?
**tolerance** (float, 0): Tolerance specified for the branch impedance in %
**mttf** (float, 0.0): Mean time to failure in hours
**mttr** (float, 0.0): Mean time to recovery in hours
**r_fault** (float, 0.0): Mid-line fault resistance in per unit (SC only)
**x_fault** (float, 0.0): Mid-line fault reactance in per unit (SC only)
**fault_pos** (float, 0.0): Mid-line fault position in per unit (0.0 = `bus_from`, 0.5 = middle, 1.0 = `bus_to`)
**branch_type** (BranchType, BranchType.Line): Device type enumeration (ex.: :class:`GridCal.Engine.Devices.transformer.TransformerType`)
**length** (float, 0.0): Length of the branch in km
**vset** (float, 1.0): Voltage set-point of the voltage controlled bus in per unit
**temp_base** (float, 20.0): Base temperature at which `r` is measured in °C
**temp_oper** (float, 20.0): Operating temperature in °C
**alpha** (float, 0.0033): Thermal constant of the material in °C
**bus_to_regulated** (bool, False): Is the `bus_to` voltage regulated by this branch?
**template** (BranchTemplate, BranchTemplate()): Basic branch template
"""
def __init__(self, bus_from: Bus = None, bus_to: Bus = None, name='Branch', r=1e-20, x=1e-20, g=1e-20, b=1e-20,
rate=1.0, tap=1.0, shift_angle=0, active=True, tolerance=0, cost=0.0,
mttf=0, mttr=0, r_fault=0.0, x_fault=0.0, fault_pos=0.5,
branch_type: BranchType = BranchType.Line, length=1, vset=1.0,
temp_base=20, temp_oper=20, alpha=0.00330,
bus_to_regulated=False, template=BranchTemplate(), ):
EditableDevice.__init__(self,
name=name,
active=active,
device_type=DeviceType.BranchDevice,
editable_headers={'name': GCProp('', str, 'Name of the branch.'),
'idtag': GCProp('', str, 'Unique ID'),
'bus_from': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "from" side of the branch.'),
'bus_to': GCProp('', DeviceType.BusDevice,
'Name of the bus at the "to" side of the branch.'),
'active': GCProp('', bool, 'Is the branch active?'),
'rate': GCProp('MVA', float, 'Thermal rating power of the branch.'),
'mttf': GCProp('h', float, 'Mean time to failure, '
'used in reliability studies.'),
'mttr': GCProp('h', float, 'Mean time to recovery, '
'used in reliability studies.'),
'R': GCProp('p.u.', float, 'Total resistance.'),
'X': GCProp('p.u.', float, 'Total reactance.'),
'G': GCProp('p.u.', float, 'Total shunt conductance.'),
'B': GCProp('p.u.', float, 'Total shunt susceptance.'),
'tolerance': GCProp('%', float,
'Tolerance expected for the impedance values\n'
'7% is expected for transformers\n'
'0% for lines.'),
'length': GCProp('km', float, 'Length of the branch '
'(not used for calculation)'),
'tap_module': GCProp('', float, 'Tap changer module, '
'it a value close to 1.0'),
'angle': GCProp('rad', float, 'Angle shift of the tap changer.'),
'bus_to_regulated': GCProp('', bool, 'Is the bus tap regulated?'),
'vset': GCProp('p.u.', float, 'Objective voltage at the "to" side of '
'the bus when regulating the tap.'),
'temp_base': GCProp('ºC', float, 'Base temperature at which R was '
'measured.'),
'temp_oper': GCProp('ºC', float, 'Operation temperature to modify R.'),
'alpha': GCProp('1/ºC', float, 'Thermal coefficient to modify R,\n'
'around a reference temperature\n'
'using a linear approximation.\n'
'For example:\n'
'Copper @ 20ºC: 0.004041,\n'
'Copper @ 75ºC: 0.00323,\n'
'Annealed copper @ 20ºC: 0.00393,\n'
'Aluminum @ 20ºC: 0.004308,\n'
'Aluminum @ 75ºC: 0.00330'),
'Cost': GCProp('e/MWh', float,
'Cost of overloads. Used in OPF.'),
'r_fault': GCProp('p.u.', float, 'Resistance of the mid-line fault.\n'
'Used in short circuit studies.'),
'x_fault': GCProp('p.u.', float, 'Reactance of the mid-line fault.\n'
'Used in short circuit studies.'),
'fault_pos': GCProp('p.u.', float,
'Per-unit positioning of the fault:\n'
'0 would be at the "from" side,\n'
'1 would be at the "to" side,\n'
'therefore 0.5 is at the middle.'),
'branch_type': GCProp('', BranchType, ''),
'template': GCProp('', BranchTemplate, '')},
non_editable_attributes=['bus_from', 'bus_to', 'template'],
properties_with_profile={'active': 'active_prof',
'rate': 'rate_prof',
'temp_oper': 'temp_oper_prof',
'Cost': 'Cost_prof'})
# connectivity
self.bus_from = bus_from
self.bus_to = bus_to
# List of measurements
self.measurements = list()
# line length in km
self.length = length
# branch impedance tolerance
self.tolerance = tolerance
# short circuit impedance
self.r_fault = r_fault
self.x_fault = x_fault
self.fault_pos = fault_pos
# total impedance and admittance in p.u.
self.R = r
self.X = x
self.G = g
self.B = b
self.mttf = mttf
self.mttr = mttr
self.Cost = cost
self.Cost_prof = None
self.active_prof = None
# Conductor base and operating temperatures in ºC
self.temp_base = temp_base
self.temp_oper = temp_oper
self.temp_oper_prof = None
# Conductor thermal constant (1/ºC)
self.alpha = alpha
# tap changer object
self.tap_changer = TapChanger()
# Tap module
if tap != 0:
self.tap_module = tap
self.tap_changer.set_tap(self.tap_module)
else:
self.tap_module = self.tap_changer.get_tap()
# Tap angle
self.angle = shift_angle
# branch rating in MVA
self.rate = rate
self.rate_prof = None
# branch type: Line, Transformer, etc...
self.branch_type = branch_type
# type template
self.template = template
self.bus_to_regulated = bus_to_regulated
self.vset = vset
# converter for enumerations
self.conv = {'branch': BranchType.Branch,
'line': BranchType.Line,
'transformer': BranchType.Transformer,
'switch': BranchType.Switch,
'reactance': BranchType.Reactance}
self.inv_conv = {val: key for key, val in self.conv.items()}
@property
def R_corrected(self):
"""
Returns a temperature corrected resistance based on a formula provided by:
NFPA 70-2005, National Electrical Code, Table 8, footnote #2; and
https://en.wikipedia.org/wiki/Electrical_resistivity_and_conductivity#Linear_approximation
(version of 2019-01-03 at 15:20 EST).
"""
return self.R * (1 + self.alpha * (self.temp_oper - self.temp_base))
def get_weight(self):
return np.sqrt(self.R * self.R + self.X * self.X)
def branch_type_converter(self, val_string):
"""
function to convert the branch type string into the BranchType
:param val_string:
:return: branch type conversion
"""
return self.conv[val_string.lower()]
def copy(self, bus_dict=None):
"""
Returns a copy of the branch
@return: A new with the same content as this
"""
if bus_dict is None:
f = self.bus_from
t = self.bus_to
else:
f = bus_dict[self.bus_from]
t = bus_dict[self.bus_to]
# z_series = complex(self.R, self.X)
# y_shunt = complex(self.G, self.B)
b = Branch(bus_from=f,
bus_to=t,
name=self.name,
r=self.R,
x=self.X,
g=self.G,
b=self.B,
rate=self.rate,
tap=self.tap_module,
shift_angle=self.angle,
active=self.active,
mttf=self.mttf,
mttr=self.mttr,
bus_to_regulated=self.bus_to_regulated,
vset=self.vset,
temp_base=self.temp_base,
temp_oper=self.temp_oper,
alpha=self.alpha,
branch_type=self.branch_type,
template=self.template)
b.measurements = self.measurements
b.active_prof = self.active_prof.copy()
return b
def tap_up(self):
"""
Move the tap changer one position up
"""
self.tap_changer.tap_up()
self.tap_module = self.tap_changer.get_tap()
def tap_down(self):
"""
Move the tap changer one position up
"""
self.tap_changer.tap_down()
self.tap_module = self.tap_changer.get_tap()
def apply_tap_changer(self, tap_changer: TapChanger):
"""
Apply a new tap changer
Argument:
**tap_changer** (:class:`GridCal.Engine.Devices.branch.TapChanger`): Tap changer object
"""
self.tap_changer = tap_changer
if self.tap_module != 0:
self.tap_changer.set_tap(self.tap_module)
else:
self.tap_module = self.tap_changer.get_tap()
def get_virtual_taps(self):
"""
Get the branch virtual taps
The virtual taps generate when a transformer nominal winding voltage differs
from the bus nominal voltage.
Returns:
**tap_f** (float, 1.0): Virtual tap at the *from* side
**tap_t** (float, 1.0): Virtual tap at the *to* side
"""
if self.branch_type == BranchType.Transformer and type(self.template) == TransformerType:
# resolve how the transformer is actually connected and set the virtual taps
bus_f_v = self.bus_from.Vnom
bus_t_v = self.bus_to.Vnom
dhf = abs(self.template.HV - bus_f_v)
dht = abs(self.template.HV - bus_t_v)
if dhf < dht:
# the HV side is on the from side
tpe_f_v = self.template.HV
tpe_t_v = self.template.LV
else:
# the HV side is on the to side
tpe_t_v = self.template.HV
tpe_f_v = self.template.LV
tap_f = tpe_f_v / bus_f_v
tap_t = tpe_t_v / bus_t_v
return tap_f, tap_t
else:
return 1.0, 1.0
def apply_template(self, obj, Sbase, logger=Logger()):
"""
Apply a branch template to this object
Arguments:
**obj**: TransformerType or Tower object
**Sbase** (float): Nominal power in MVA
**logger** (list, []): Log list
"""
if type(obj) is TransformerType:
if self.branch_type == BranchType.Transformer:
# get the transformer impedance in the base of the transformer
z_series, zsh = obj.get_impedances()
# Change the impedances to the system base
base_change = Sbase / obj.rating
z_series *= base_change
zsh *= base_change
# compute the shunt admittance
if zsh.real != 0.0 or zsh.imag != 0.0:
y_shunt = 1.0 / zsh
else:
y_shunt = complex(0, 0)
self.R = np.round(z_series.real, 6)
self.X = np.round(z_series.imag, 6)
self.G = np.round(y_shunt.real, 6)
self.B = np.round(y_shunt.imag, 6)
self.rate = obj.rating
if obj != self.template:
self.template = obj
self.branch_type = BranchType.Transformer
else:
raise Exception('You are trying to apply a transformer type to a non-transformer branch')
elif type(obj) is Tower:
if self.branch_type == BranchType.Line:
Vn = self.bus_to.Vnom
Zbase = (Vn * Vn) / Sbase
Ybase = 1 / Zbase
z = obj.z_series() * self.length / Zbase
y = obj.y_shunt() * self.length / Ybase
self.R = np.round(z.real, 6)
self.X = np.round(z.imag, 6)
self.G = np.round(y.real, 6)
self.B = np.round(y.imag, 6)
# get the rating in MVA = kA * kV
self.rate = obj.rating * Vn * SQRT3
if obj != self.template:
self.template = obj
self.branch_type = BranchType.Line
else:
raise Exception('You are trying to apply an Overhead line type to a non-line branch')
elif type(obj) is UndergroundLineType:
Vn = self.bus_to.Vnom
Zbase = (Vn * Vn) / Sbase
Ybase = 1 / Zbase
z = obj.z_series() * self.length / Zbase
y = obj.y_shunt() * self.length / Ybase
self.R = np.round(z.real, 6)
self.X = np.round(z.imag, 6)
self.G = np.round(y.real, 6)
self.B = np.round(y.imag, 6)
# get the rating in MVA = kA * kV
self.rate = obj.rating * Vn * SQRT3
if obj != self.template:
self.template = obj
self.branch_type = BranchType.Line
elif type(obj) is SequenceLineType:
Vn = self.bus_to.Vnom
Zbase = (Vn * Vn) / Sbase
Ybase = 1 / Zbase
self.R = np.round(obj.R * self.length / Zbase, 6)
self.X = np.round(obj.X * self.length / Zbase, 6)
self.G = np.round(obj.G * self.length / Ybase, 6)
self.B = np.round(obj.B * self.length / Ybase, 6)
# get the rating in MVA = kA * kV
self.rate = obj.rating * Vn * SQRT3
if obj != self.template:
self.template = obj
self.branch_type = BranchType.Line
elif type(obj) is BranchTemplate:
# this is the default template that does nothing
pass
else:
logger.append(self.name + ' the object type template was not recognised')
def get_save_data(self):
"""
Return the data that matches the edit_headers
:return:
"""
data = list()
for name, properties in self.editable_headers.items():
obj = getattr(self, name)
if properties.tpe == BranchType:
obj = self.branch_type.value
elif properties.tpe == BranchTemplate:
if obj is None:
obj = ''
else:
obj = str(obj)
elif properties.tpe not in [str, float, int, bool]:
obj = str(obj)
data.append(obj)
return data
def get_properties_dict(self):
"""
Get json dictionary
:return:
"""
d = {'id': self.idtag,
'type': 'branch',
'phases': 'ps',
'name': self.name,
'from': self.bus_from.idtag,
'to': self.bus_to.idtag,
'active': self.active,
'rate': self.rate,
'r': self.R,
'x': self.X,
'g': self.G,
'b': self.B,
'length': self.length,
'tap_module': self.tap_module,
'bus_to_regulated': self.bus_to_regulated,
'vset': self.vset,
'temp_base': self.temp_base,
'temp_oper': self.temp_oper,
'alpha': self.alpha,
'tap_angle': self.angle,
'branch_type': str(self.branch_type),
'active_profile': [],
'rate_prof': []}
if self.active_prof is not None:
d['active_profile'] = self.active_prof.tolist()
d['rate_prof'] = self.rate_prof.tolist()
return d
def plot_profiles(self, time_series=None, my_index=0, show_fig=True):
"""
Plot the time series results of this object
:param time_series: TimeSeries Instance
:param my_index: index of this object in the simulation
:param show_fig: Show the figure?
"""
if time_series is not None:
fig = plt.figure(figsize=(12, 8))
ax_1 = fig.add_subplot(211)
ax_2 = fig.add_subplot(212)
x = time_series.results.time
# loading
y = time_series.results.loading * 100.0
df = pd.DataFrame(data=y[:, my_index], index=x, columns=[self.name])
ax_1.set_title('Loading', fontsize=14)
ax_1.set_ylabel('Loading [%]', fontsize=11)
df.plot(ax=ax_1)
# losses
y = time_series.results.losses
df = pd.DataFrame(data=y[:, my_index], index=x, columns=[self.name])
ax_2.set_title('Losses', fontsize=14)
ax_2.set_ylabel('Losses [MVA]', fontsize=11)
df.plot(ax=ax_2)
plt.legend()
fig.suptitle(self.name, fontsize=20)
if show_fig:
plt.show()
def get_coordinates(self):
"""
Get the branch defining coordinates
"""
return [self.bus_from.get_coordinates(), self.bus_to.get_coordinates()]
def convert_branch(branch: Branch):
"""
:param branch:
:return:
"""
if branch.branch_type == BranchType.Line:
return Line(bus_from=branch.bus_from,
bus_to=branch.bus_to,
name=branch.name,
r=branch.R,
x=branch.X,
b=branch.B,
rate=branch.rate,
active=branch.active,
tolerance=branch.tolerance,
cost=branch.Cost,
mttf=branch.mttf,
mttr=branch.mttr,
r_fault=branch.r_fault,
x_fault=branch.x_fault,
fault_pos=branch.fault_pos,
length=branch.length,
temp_base=branch.temp_base,
temp_oper=branch.temp_oper,
alpha=branch.alpha,
rate_prof=branch.rate_prof,
Cost_prof=branch.Cost_prof,
active_prof=branch.active_prof,
temp_oper_prof=branch.temp_oper_prof)
elif branch.branch_type == BranchType.Transformer:
return Transformer2W(bus_from=branch.bus_from,
bus_to=branch.bus_to,
name=branch.name,
r=branch.R,
x=branch.X,
b=branch.B,
rate=branch.rate,
active=branch.active,
tolerance=branch.tolerance,
cost=branch.Cost,
mttf=branch.mttf,
mttr=branch.mttr,
tap=branch.tap_module,
shift_angle=branch.angle,
vset=branch.vset,
bus_to_regulated=branch.bus_to_regulated,
temp_base=branch.temp_base,
temp_oper=branch.temp_oper,
alpha=branch.alpha,
template=branch.template,
rate_prof=branch.rate_prof,
Cost_prof=branch.Cost_prof,
active_prof=branch.active_prof,
temp_oper_prof=branch.temp_oper_prof)
else:
return branch
|
{"hexsha": "4c3693dd2c62eefd5b842d77d4df0c1627c14c07", "size": 31249, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GridCal/Engine/Devices/branch.py", "max_stars_repo_name": "vineetjnair9/GridCal", "max_stars_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GridCal/Engine/Devices/branch.py", "max_issues_repo_name": "vineetjnair9/GridCal", "max_issues_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GridCal/Engine/Devices/branch.py", "max_forks_repo_name": "vineetjnair9/GridCal", "max_forks_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9373522459, "max_line_length": 145, "alphanum_fraction": 0.491279721, "include": true, "reason": "import numpy", "num_tokens": 6710}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Make an encoding model that uses 'latent' retina information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
import numpy as np
import retina.response_model.python.metric_learning.end_to_end.losses as losses
import retina.response_model.python.metric_learning.end_to_end.response_embedding as resp
import retina.response_model.python.metric_learning.end_to_end.stimulus_embedding as stim
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
def convolutional_encoder(sess, is_training, dimx, dimy):
"""Find latent representation and use it for predicting responses.
TODO(bhaishahster): Subsample retinas in training,
Args:
sess : Tensorflow session.
is_training : Either training or evaluation mode.
dimx: X dimension of the stimulus.
dimy: Y dimension of the stimulus.
Returns:
sr_graph : Container of the embedding parameters and losses.
"""
# Find latent dimensions
layers = FLAGS.resp_layers
latent_dimensionality = np.int(layers.split(',')[-2])
# format: window x filters x stride
# NOTE: final layer - filters=1, stride =1 throughout
batch_norm = FLAGS.batch_norm
anchor_model = resp.Convolutional2(time_window=1,
layers=layers,
batch_norm=batch_norm,
is_training=is_training,
reuse_variables=False,
num_cell_types=2,
dimx=dimx, dimy=dimy)
# Use same model to embed negative responses using reuse_variables=True.
neg_model = resp.Convolutional2(time_window=1,
layers=layers,
batch_norm=batch_norm,
is_training=is_training,
reuse_variables=True,
num_cell_types=2,
dimx=dimx, dimy=dimy)
# Get retina parameters from negative responses.
retina_params = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(neg_model.responses_embed,
0), 0), 0)
## Go from stimulus to response prediction for the population.
# Get stimulus placeholder.
stim_tf = tf.placeholder(tf.float32,
shape=[None, dimx,
dimy, 30]) # batch x X x Y x time_window
# Do 1 layer of convolution on stimulus.
# Set normalization
if batch_norm:
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
if FLAGS.sr_model == 'convolutional_encoder': #or FLAGS.sr_model == 'convolutional_encoder_2'
pred_fcn = pred_responses_latent
if FLAGS.sr_model == 'convolutional_encoder_2':
pred_fcn = pred_responses_latent_2
response_predicted = pred_fcn(retina_params, stim_tf,
latent_dimensionality,
normalizer_fn, is_training,
reuse_variables=False)
# Predict responses using arbitrary retina parameters.
retina_params_arbitrary = tf.placeholder(tf.float32,
shape=[latent_dimensionality],
name='retina_params_arbitrary')
response_pred_from_arbit_ret_params = pred_fcn(retina_params_arbitrary,
stim_tf,
latent_dimensionality,
normalizer_fn, is_training,
reuse_variables=True)
tfd = tf.expand_dims
loss = (tf.reduce_sum((response_predicted -
anchor_model.embed_responses_original *
tf.log(response_predicted)) * tfd(anchor_model.embed_locations_original, 0)) / tf.reduce_sum(anchor_model.embed_locations_original))
loss_regularization = 0.5 * tf.reduce_sum(tf.square(retina_params))
loss_total = (FLAGS.scale_encode * loss +
FLAGS.scale_regularization * loss_regularization)
train_op = tf.train.AdagradOptimizer(FLAGS.learning_rate).minimize(loss_total)
# Store everything in a graph.
sr_graph = collections.namedtuple('SR_Graph', 'sess train_op anchor_model'
' neg_model'
' loss'
' stim_tf fr_predicted retina_params'
' response_pred_from_arbit_ret_params'
' retina_params_arbitrary')
sr_graph = sr_graph(sess, train_op, anchor_model, neg_model, loss,
stim_tf, response_predicted, retina_params,
response_pred_from_arbit_ret_params, retina_params_arbitrary)
return sr_graph
def pred_responses_latent(retina_params, stim_tf, latent_dimensionality,
normalizer_fn, is_training, reuse_variables=False):
"""Use latent representation and stimulus to predict responses.
Do outer product on first layer.
"""
stim_ld = slim.conv2d(stim_tf, latent_dimensionality,
[1, 1],
stride=1,
scope='stim_downsample',
normalizer_fn=normalizer_fn,
activation_fn=tf.nn.relu,
reuse=reuse_variables)
# Use response embedding to
stim_ret_outer_prod = tf.expand_dims(stim_ld, 4) * retina_params
shape = stim_ret_outer_prod.get_shape().as_list()
stim_ret_merged = tf.reshape(stim_ret_outer_prod,
[-1, shape[1],
shape[2], latent_dimensionality ** 2])
response_predicted, _= stim.convolutional_encode(FLAGS.stim_layers.split(','),
FLAGS.batch_norm,
stim_ret_merged,
is_training,
reuse_variables=reuse_variables)
return response_predicted
def pred_responses_latent_2(retina_params, stim_tf, latent_dimensionality,
normalizer_fn, is_training, reuse_variables=False):
"""Use latent representation and stimulus to predict responses
Feature weighting using latent dimensionality.
"""
##
layers = FLAGS.stim_layers.split(',')
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
##
# Take latent vector and get scales, shifts for different layers by a fully connected layer
n_scales = np.sum([int(layers[ilayer * 3 + 1]) for ilayer in range(n_layers)])
n_shifts = np.sum([int(layers[ilayer * 3 + 1]) for ilayer in range(n_layers)])
retina_scales = slim.fully_connected(tf.expand_dims(retina_params, 0),
n_scales,
activation_fn=None,
reuse=reuse_variables,
scope='latent_to_scales')
retina_scales = retina_scales[0, :]
retina_shifts = slim.fully_connected(tf.expand_dims(retina_params, 0),
n_scales,
activation_fn=None,
reuse=reuse_variables,
scope='latent_to_shifts')
retina_shifts = retina_shifts[0, :]
#if latent_dimensionality != n_scales:
# raise ValueError('Latent dimensionality must be same as sum of widths of each layer')
activation_fn = tf.nn.softplus
tf.logging.info('Logistic activation')
# Use slim to define multiple layers of convolution.
net = stim_tf
layer_collection = [net]
latent_idx = 0
for ilayer in range(n_layers):
# Convolve
tf.logging.info('Building stimulus embedding layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='stim_layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=None,
normalizer_params={'is_training': is_training})
# Scale using latent dimensionality after each layer.
#if latent_dimensionality >= latent_idx + 1 + int(layers[ilayer*3 + 1]):
net = (net * retina_scales[latent_idx:
latent_idx + int(layers[ilayer*3 + 1])] +
retina_shifts[latent_idx: latent_idx + int(layers[ilayer*3 + 1])])
if ilayer == n_layers - 1:
net = tf.exp(net) # Put exponential nonlinearity in last layer.
else:
net = activation_fn(net)
'''
print('CAREFUL: REMOVED EXPONENT - resp prediction!!!!!!!')
net = activation_fn(net)
'''
latent_idx += int(layers[ilayer*3 + 1])
layer_collection += [net]
return net
def embed_ei(ei_tf, normalizer_fn, is_training, reuse_variables=False):
layers = FLAGS.ei_layers.split(',')
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
activation_fn = tf.nn.softplus
tf.logging.info('Logistic activation')
# Use slim to define multiple layers of convolution.
net = ei_tf
layer_collection = [net]
latent_idx = 0
for ilayer in range(n_layers):
# Convolve
tf.logging.info('Building stimulus embedding layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='ei_layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=None,
normalizer_params={'is_training': is_training})
if ilayer < n_layers - 1: # No rectification for the last layer.
net = activation_fn(net)
#print('REMOVED SOFTPLUS IN FINAL LAYER OF EI EMBEDDING!!')
# Final EI embedding
net = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(net, 0), 0), 0)
return net
def convolutional_encoder_using_retina_id(sess, is_training, dimx, dimy, n_retinas):
"""Find latent representation and use it for predicting responses.
Use one-hot encoding of retina-id to interpolate responses.
Args:
sess : Tensorflow session.
is_training : Either training or evaluation mode.
dimx: X dimension of the stimulus.
dimy: Y dimension of the stimulus.
n_retinas: Number of retinas
Returns:
sr_graph : Container of the embedding parameters and losses.
"""
# Find latent dimensions
layers = FLAGS.resp_layers
latent_dimensionality = np.int(layers.split(',')[-2])
# format: window x filters x stride
# NOTE: final layer - filters=1, stride =1 throughout
batch_norm = FLAGS.batch_norm
anchor_model = resp.Convolutional2(time_window=1,
layers=layers,
batch_norm=batch_norm,
is_training=is_training,
reuse_variables=False,
num_cell_types=2,
dimx=dimx, dimy=dimy)
# Get retina parameters from negative responses.
# get 'retina_params' from 1 hot retina-id.
retina_indicator = tf.placeholder(shape=[n_retinas], dtype=tf.float32)
a_mat = tf.get_variable('a_mat', shape=[latent_dimensionality,
n_retinas])
retina_params = tf.matmul(a_mat, tf.expand_dims(retina_indicator, 1))
retina_params = retina_params[:, 0]
## Go from stimulus to response prediction for the population.
# Get stimulus placeholder.
stim_tf = tf.placeholder(tf.float32,
shape=[None, dimx,
dimy, 30]) # batch x X x Y x time_window
# Do 1 layer of convolution on stimulus.
# Set normalization
if batch_norm:
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
pred_fcn = pred_responses_latent_2
response_predicted = pred_fcn(retina_params, stim_tf,
latent_dimensionality,
normalizer_fn, is_training,
reuse_variables=False)
# Predict responses using arbitrary retina parameters.
retina_params_arbitrary = tf.placeholder(tf.float32,
shape=[latent_dimensionality],
name='retina_params_arbitrary')
response_pred_from_arbit_ret_params = pred_fcn(retina_params_arbitrary,
stim_tf,
latent_dimensionality,
normalizer_fn, is_training,
reuse_variables=True)
tfd = tf.expand_dims
loss = (tf.reduce_sum((response_predicted -
anchor_model.embed_responses_original *
tf.log(response_predicted)) * tfd(anchor_model.embed_locations_original, 0)) / tf.reduce_sum(anchor_model.embed_locations_original))
loss_regularization = 0.5 * tf.reduce_sum(tf.square(retina_params))
loss_total = (FLAGS.scale_encode * loss +
FLAGS.scale_regularization * loss_regularization)
loss_arbit_ret_params = (tf.reduce_sum((response_pred_from_arbit_ret_params -
anchor_model.embed_responses_original *
tf.log(response_pred_from_arbit_ret_params)) * tfd(anchor_model.embed_locations_original, 0)) / tf.reduce_sum(anchor_model.embed_locations_original))
# Use EIs for blind retina.
str_ei_embedding = ''
ei_params = ()
if FLAGS.use_EIs:
# Get retina parameters from EIs.
# get : retina_params_from_ei
ei_image = tf.placeholder(tf.float32, shape=[None, 64, 32])
# Set normalization
if FLAGS.batch_norm_ei:
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
retina_params_from_ei = embed_ei(tf.expand_dims(ei_image, 3),
normalizer_fn, is_training,
reuse_variables=False)
# Predict responses from retina params from EIs.
if batch_norm:
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
response_pred_from_eis = pred_fcn(retina_params_from_ei,
stim_tf,
latent_dimensionality,
normalizer_fn, is_training,
reuse_variables=True)
loss_from_ei = (tf.reduce_sum((response_pred_from_eis -
anchor_model.embed_responses_original *
tf.log(response_pred_from_eis)) * tfd(anchor_model.embed_locations_original, 0)) / tf.reduce_sum(anchor_model.embed_locations_original))
loss_regularization_from_ei = 0.5 * tf.reduce_sum(tf.square(retina_params_from_ei))
loss_match_embeddings = 0.5 * tf.reduce_sum(tf.square(retina_params - retina_params_from_ei))
loss_total_ei = (FLAGS.scale_encode_from_ei * loss_from_ei +
FLAGS.scale_regularization_from_ei * loss_regularization_from_ei +
FLAGS.scale_match_embeddding * loss_match_embeddings)
loss_total = loss_total + loss_total_ei
str_ei_embedding = (' ei_image retina_params_from_ei response_pred_from_eis'
' loss_from_ei loss_regularization_from_ei'
' loss_match_embeddings loss_total_ei')
ei_params = (ei_image, retina_params_from_ei, response_pred_from_eis,
loss_from_ei, loss_regularization_from_ei,
loss_match_embeddings, loss_total_ei)
train_op = tf.train.AdagradOptimizer(FLAGS.learning_rate).minimize(loss_total)
accuracy_tf = (tf.reduce_sum(anchor_model.embed_responses_original *
(response_predicted - tf.reduce_mean(response_predicted, 0)) *
tfd(anchor_model.embed_locations_original, 0)) / tf.reduce_sum(anchor_model.embed_locations_original))
# Store everything in a graph.
sr_graph = collections.namedtuple('SR_Graph', 'sess train_op anchor_model'
' loss loss_arbit_ret_params'
' stim_tf fr_predicted retina_params'
' response_pred_from_arbit_ret_params'
' retina_params_arbitrary retina_indicator a_mat accuracy_tf' + str_ei_embedding)
sr_graph = sr_graph(sess, train_op, anchor_model, loss, loss_arbit_ret_params,
stim_tf, response_predicted, retina_params,
response_pred_from_arbit_ret_params,
retina_params_arbitrary, retina_indicator, a_mat,
accuracy_tf, *ei_params)
return sr_graph
|
{"hexsha": "3adedf3d5c6631908e61c91553ba220eb45ad442", "size": 18209, "ext": "py", "lang": "Python", "max_stars_repo_path": "response_model/python/metric_learning/end_to_end/encoding_models_experimental.py", "max_stars_repo_name": "googlearchive/rgc-models", "max_stars_repo_head_hexsha": "0dea94bbd54f591d82d95169e33d40bb55b6be94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-18T16:47:09.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-18T16:47:09.000Z", "max_issues_repo_path": "response_model/python/metric_learning/end_to_end/encoding_models_experimental.py", "max_issues_repo_name": "google/rgc-models", "max_issues_repo_head_hexsha": "0dea94bbd54f591d82d95169e33d40bb55b6be94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "response_model/python/metric_learning/end_to_end/encoding_models_experimental.py", "max_forks_repo_name": "google/rgc-models", "max_forks_repo_head_hexsha": "0dea94bbd54f591d82d95169e33d40bb55b6be94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-12T12:44:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T12:44:17.000Z", "avg_line_length": 41.7637614679, "max_line_length": 174, "alphanum_fraction": 0.5994288539, "include": true, "reason": "import numpy", "num_tokens": 3670}
|
import cv2, pyrebase, socket,struct
import numpy as np
from tracker import CentroidTracker
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(33,GPIO.IN)
GPIO.setup(34,GPIO.IN)
GPIO.setup(36,GPIO.OUT)
GPIO.setup(37,GPIO.OUT)
RollOut = GPIO.PWM(36,300)
PitchOut = GPIO.PWM(37,300)
RollOut.start(50)
PitchOut.start(50)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
print('HOST IP:', host_ip)
port = 9999
socket_address = (host_ip, port)
server_socket.bind(socket_address)
server_socket.listen(5)
print("LISTENING AT:", socket_address)
#=============== Video Streamer ===============#
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_ip = '192.168.137.33' #RPi ip
port = 9000
client_socket.connect((host_ip, port))
data = b""
payload_size = struct.calcsize("Q")
#==============================================#
tracker = CentroidTracker(maxDisappeared=80)
config = {
############ Firebase details
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
# ========= Variables =========#
ROI_dif = 100
counter = 0
Move_Dir = "null"
FrameCentre = 640
PreArea = 0
# =============================#
# ===== Getting Video =====#
cap = cv2.VideoCapture("rtsp://192.168.137.33:8554")
# cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('TrackAI.avi',fourcc, 3.2, (640,480), True)
# =========================#
# =========== Obj_Detection_Config ===========#
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
# ============================================#
RpiSocket, addr = server_socket.accept()
print("Got connection from", addr)
while True:
#################################
RollIN = 50 # note:
PitchIN = 50 # note:
#NOTE: The method I use to read PWM signals from the receiver is not even close to accurate. So, please suggest a better way.
# RollIN reads PWM on pin 33
# PitchIN reads PWM on pin 34
#################################
try:
trackstatus = db.child("checkpoint").get().val()
if (trackstatus == "track_proceed"):
if (counter == 0):
Detected_ClassId = 0
RoiX1 = int(db.child("coordinates").child("Xa1").get().val()
RoiY1 = int(db.child("coordinates").child("Ya1").get().val()
RoiX2 = int(db.child("coordinates").child("Xa2").get().val()
RoiY2 = int(db.child("coordinates").child("Ya2").get().val()
PreArea = abs((RoiX1-RoiX2)*(RoiY1-RoiY2))
# ================ ROI & ObjD Initially =================#
rects = []
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=0.4)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indicies = cv2.dnn.NMSBoxes(bbox, confs, 0.2, 0.1)
if len(classIds) != 0:
i = indicies[0][0]
box = bbox[i]
rects.append(box)
object = tracker.update(rects)
X = object[1][0][0]
Y = object[1][0][1]
W = object[1][0][2]
H = object[1][0][3]
X2 = X + W)
Y2 =(Y + H)
if abs(RoiX1 - X) <= ROI_dif and abs(RoiY1 - Y) <= ROI_dif abs(RoiX2 - X2) <= ROI_dif and abs(RoiY2 - Y2) <= ROI_dif :
Detected_ClassId = classIds[0][0]
# =========================================================#
counter = (counter + 1)
elif (counter == 1):
rects = []
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=0.4)
if len(classIds) != 0:
if classIds[0][0] == Detected_ClassId:
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indicies = cv2.dnn.NMSBoxes(bbox, confs, 0.2, 0.1)
for i in indicies:
i = i[0]
box = bbox[i]
rects.append(box)
object = tracker.update(rects)
X = object[1][0][0]
Y = object[1][0][1]
W = object[1][0][2]
H = object[1][0][3]
PostArea = (W*H)
#print("ClassID:-", classIds[0][0])
cv2.rectangle(img, (X, Y), (X + W, Y + H), (0, 255, 0), 2)
cX = (X + (X + W)) / 2
# extreme diff = 310
# minimum diff = 150
# RIGHT
if((cX - FrameCentre) >= 150 and (cX - FrameCentre) < 310):
Move_Dir = "right"
RollOut.ChangeDutyCycle(58)
elif((cX - FrameCentre) >= 310):
Move_Dir = "Right"
RollOut.ChangeDutyCycle(32)
# LEFT
elif ((FrameCentre - cX) >= 150 and (FrameCentre - cX) < 310):
Move_Dir = "left"
RollOut.ChangeDutyCycle(42)
elif ((FrameCentre - cX) >= 310):
Move_Dir = "Left"
RollOut.ChangeDutyCycle(68)
else:
RollOut.ChangeDutyCycle(RollIN)
#######################################################################
if(PreArea - PostArea >= 50):
PitchOut.ChangeDutyCycle(58)
elif(PostArea - PreArea >= 50):
PitchOut.ChangeDutyCycle(42)
else:
PitchOut.ChangeDutyCycle(PitchIN)
elif (trackstatus == "track_abort"):
success, img = cap.read()
counter = 0
# out.write(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
pass
cap.release()
# out.release() # save whole video on raspberry pi storage
cv2.destroyAllWindows()
|
{"hexsha": "39c7f672d824973d4640413f3bd44ca0e3ab17a8", "size": 6922, "ext": "py", "lang": "Python", "max_stars_repo_path": "Raspberry Pi Code/TrackAI main.py", "max_stars_repo_name": "phoenixlamarsacs9/DroneTrackingAI", "max_stars_repo_head_hexsha": "f5babad1cc05622af153c40a46d02c9bb41e2e71", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-05-09T05:19:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T05:24:04.000Z", "max_issues_repo_path": "Raspberry Pi Code/TrackAI main.py", "max_issues_repo_name": "phoenixlamarsacs9/DroneTrackingAI", "max_issues_repo_head_hexsha": "f5babad1cc05622af153c40a46d02c9bb41e2e71", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-05-11T12:32:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T04:50:30.000Z", "max_forks_repo_path": "Raspberry Pi Code/TrackAI main.py", "max_forks_repo_name": "phoenixlamarsacs9/DroneTrackingAI", "max_forks_repo_head_hexsha": "f5babad1cc05622af153c40a46d02c9bb41e2e71", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-05-09T05:21:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T19:50:20.000Z", "avg_line_length": 35.4974358974, "max_line_length": 139, "alphanum_fraction": 0.4605605316, "include": true, "reason": "import numpy", "num_tokens": 1674}
|
import pickle
import os
import matplotlib.pyplot as plt
import numpy as np
varying_rollout_folder = 'code/varying_rollout'
clone_results_subfolder = 'clone_results'
dagger_results_subfolder = 'dagger_results'
expert_results_folder = 'code/expert_results'
out_folder = '3_1_graphs'
envnames = ['Ant-v2','HalfCheetah-v2','Hopper-v2','Humanoid-v2','Reacher-v2','Walker2d-v2']
rollouts = [a for a in range(5,201,5)]
x = np.array( rollouts ,dtype=np.int32)
c_mean_dict = {}
c_std_dict = {}
d_mean_dict = {}
d_std_dict = {}
for envname in envnames:
c_mean_list = []
c_std_list = []
d_mean_list = []
d_std_list = []
for rollout in rollouts:
clone_results_fname = os.path.join(varying_rollout_folder, str(rollout), clone_results_subfolder, envname + '.pkl' )
dagger_results_fname = os.path.join(varying_rollout_folder, str(rollout), dagger_results_subfolder, envname + '.pkl' )
with open(clone_results_fname, 'rb') as f:
clone_results = pickle.loads(f.read())
with open(dagger_results_fname, 'rb') as f:
dagger_results = pickle.loads(f.read())
c_mean_list.append( clone_results['returns']['mean'] )
c_std_list.append( clone_results['returns']['std'] )
d_mean_list.append( dagger_results['returns']['mean'] )
d_std_list.append( dagger_results['returns']['std'] )
c_mean_dict[envname] = np.array(c_mean_list, dtype = np.float32)
c_std_dict[envname] = np.array(c_std_list, dtype = np.float32)
d_mean_dict[envname] = np.array(d_mean_list, dtype = np.float32)
d_std_dict[envname] = np.array(d_std_list, dtype = np.float32)
expert_results_fname = os.path.join(expert_results_folder, 'train_' + envname +'.pkl')
with open(expert_results_fname, 'rb') as f:
expert_results = pickle.loads(f.read())
e_mean = np.array(expert_results['returns']['mean'])
lines0 = plt.errorbar(x, c_mean_dict[envname], c_std_dict[envname],color='r', linewidth=2.0, label='Behavioral Cloning')
lines1 = plt.errorbar(x, d_mean_dict[envname], d_std_dict[envname], color='g', linewidth=2.0, label='Dagger')
lines2 = plt.plot(x, e_mean * np.ones_like(d_mean_dict[envname]) )
plt.setp(lines2, color='b', linewidth=2.0, label='Expert')
plt.ylabel('Reward')
plt.xlabel('# rollouts/ Dagger iterations')
plt.legend()
plt.savefig(os.path.join(out_folder,'%s.png'%(envname)), bbox_inches='tight')
plt.clf()
|
{"hexsha": "fd468615c586b9bb3147989ddb3891f611f333e7", "size": 2521, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw1/3_1_draw_graph.py", "max_stars_repo_name": "hsilva664/Reinforcement_Learning_Course", "max_stars_repo_head_hexsha": "6a250bc017965bec76b6fe909068e40127e62fa7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw1/3_1_draw_graph.py", "max_issues_repo_name": "hsilva664/Reinforcement_Learning_Course", "max_issues_repo_head_hexsha": "6a250bc017965bec76b6fe909068e40127e62fa7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw1/3_1_draw_graph.py", "max_forks_repo_name": "hsilva664/Reinforcement_Learning_Course", "max_forks_repo_head_hexsha": "6a250bc017965bec76b6fe909068e40127e62fa7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5342465753, "max_line_length": 127, "alphanum_fraction": 0.6739389131, "include": true, "reason": "import numpy", "num_tokens": 684}
|
"""Utility functions module"""
import numpy as np
def distance(p1, p2):
"""Distance between two points"""
return np.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
def xamtfos(x, sig):
aux = (1 / (np.sqrt(2 * np.pi * sig ** 2)))
return -aux * (np.e ** -(x ** 2 / (2 * (sig ** 2)))) + aux + 1
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def is_in_box(point):
return (
point[0] >= 0 and
point[0] <= 1 and
point[1] >= 0 and
point[1] <= 1
)
|
{"hexsha": "a1d15d29761885e1fc2388aff1c79073f941d839", "size": 607, "ext": "py", "lang": "Python", "max_stars_repo_path": "tracer/utils.py", "max_stars_repo_name": "leopiney/tscf", "max_stars_repo_head_hexsha": "d98fbfe06abbf1d29458ddd147b7f1d99118e4ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tracer/utils.py", "max_issues_repo_name": "leopiney/tscf", "max_issues_repo_head_hexsha": "d98fbfe06abbf1d29458ddd147b7f1d99118e4ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tracer/utils.py", "max_forks_repo_name": "leopiney/tscf", "max_forks_repo_head_hexsha": "d98fbfe06abbf1d29458ddd147b7f1d99118e4ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6785714286, "max_line_length": 67, "alphanum_fraction": 0.5041186161, "include": true, "reason": "import numpy", "num_tokens": 212}
|
#########################################################################
#
# iterators.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2014 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, tboggs@users.sourceforge.net
#
'''Runs unit tests for iterators
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.iterators
'''
from __future__ import division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from .spytest import SpyTest
import spectral as spy
class IteratorTest(SpyTest):
'''Tests various math functions.'''
def setup(self):
self.image = spy.open_image('92AV3C.lan')
self.gt = spy.open_image('92AV3GT.GIS').read_band(0)
def test_iterator_all(self):
'''Iteration over all pixels.'''
from spectral.algorithms.algorithms import iterator
data = self.image.load()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels, 0)
itsum = np.sum(np.array([x for x in iterator(data)]), 0)
assert_allclose(sum, itsum)
def test_iterator_nonzero(self):
'''Iteration over all non-background pixels.'''
from spectral.algorithms.algorithms import iterator
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes > 0], 0)
itsum = np.sum(np.array([x for x in iterator(data, self.gt)]), 0)
assert_allclose(sum, itsum)
def test_iterator_index(self):
'''Iteration over single ground truth index'''
from spectral.algorithms.algorithms import iterator
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([x for x in iterator(data, self.gt, cls)]), 0)
assert_allclose(sum, itsum)
def test_iterator_ij_nonzero(self):
'''Iteration over all non-background pixels.'''
from spectral.algorithms.algorithms import iterator_ij
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes > 0], 0)
itsum = np.sum(np.array([data[ij] for ij in iterator_ij(self.gt)]), 0)
assert_allclose(sum, itsum)
def test_iterator_ij_index(self):
'''Iteration over single ground truth index'''
from spectral.algorithms.algorithms import iterator_ij
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([data[ij] for ij in iterator_ij(self.gt,
cls)]),
0)
assert_allclose(sum, itsum)
def test_iterator_spyfile(self):
'''Iteration over SpyFile object for single ground truth index'''
from spectral.algorithms.algorithms import iterator
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
itsum = np.sum(np.array([x for x in iterator(self.image, self.gt, cls)]),
0)
assert_allclose(sum, itsum)
def test_iterator_spyfile_nomemmap(self):
'''Iteration over SpyFile object without memmap'''
from spectral.algorithms.algorithms import iterator
cls = 5
data = self.image.load()
classes = self.gt.ravel()
pixels = data.reshape((-1, data.shape[-1]))
sum = np.sum(pixels[classes == cls], 0)
image = spy.open_image('92AV3C.lan')
itsum = np.sum(np.array([x for x in iterator(image, self.gt, cls)]), 0)
assert_allclose(sum, itsum)
def run():
print('\n' + '-' * 72)
print('Running iterator tests.')
print('-' * 72)
test = IteratorTest()
test.run()
if __name__ == '__main__':
from spectral.tests.run import parse_args, reset_stats, print_summary
parse_args()
reset_stats()
run()
print_summary()
|
{"hexsha": "8ce82f84c98b924d603f7f7d185c64650e7ff050", "size": 5195, "ext": "py", "lang": "Python", "max_stars_repo_path": "ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/tests/iterators.py", "max_stars_repo_name": "satish1901/Methane-detection-from-hyperspectral-imagery", "max_stars_repo_head_hexsha": "741dee02e76931f572cf3e06af8faabe871e8e4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-06-11T21:59:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T03:10:50.000Z", "max_issues_repo_path": "ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/tests/iterators.py", "max_issues_repo_name": "N-NSH/Methane-detection-from-hyperspectral-imagery", "max_issues_repo_head_hexsha": "741dee02e76931f572cf3e06af8faabe871e8e4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:41:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:41:04.000Z", "max_forks_repo_path": "ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/tests/iterators.py", "max_forks_repo_name": "N-NSH/Methane-detection-from-hyperspectral-imagery", "max_forks_repo_head_hexsha": "741dee02e76931f572cf3e06af8faabe871e8e4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-01-18T15:57:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T20:51:27.000Z", "avg_line_length": 36.5845070423, "max_line_length": 81, "alphanum_fraction": 0.6051973051, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1205}
|
"""
Here the structure of the network is made in pytorch
"""
from typing import List, Union, Optional
import torch
import os
from logger import logger
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.stats import norm
class Encoder(nn.Module):
"""
Encodes the data using a CNN
Input => 64x64 image
Output => mean vector z_dim
log_std vector z_dim
predicted value
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.z_dim = z_dim
self.layers = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 512, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(512),
nn.Flatten(),
nn.Linear(512, 1000),
nn.LeakyReLU(),
nn.Linear(1000, z_dim*2+1)
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
# return classification, mean and log_std
return out[:, 0], out[:, 1:self.z_dim+1], F.softplus(out[:,self.z_dim+1:])
class UnFlatten(nn.Module):
def __init__(self, channel_size, image_size):
super(UnFlatten, self).__init__()
self.channel_size = channel_size
self.image_size = image_size
def forward(self, input):
return input.view(-1, self.channel_size, self.image_size, self.image_size)
class Decoder(nn.Module):
"""
Encodes the data using a CNN
Input => sample vector z_dim
Output => 64x64 image
4 6 13 29 61
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(z_dim, 1000),
nn.LeakyReLU(),
nn.Linear(1000, 512*1*1),
UnFlatten(512, 1),
nn.ConvTranspose2d(512, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, kernel_size=5, stride=2, output_padding=1),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 3, kernel_size=5, stride=2, output_padding=1),
nn.Sigmoid()
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
return out
class Db_vae(nn.Module):
def __init__(
self,
z_dim: int = 20,
hist_size: int = 1000,
alpha: float = 0.01,
num_bins: int = 10,
device: str = "cpu",
custom_encoding_layers: Optional[nn.Sequential] = None,
custom_decoding_layers: Optional[nn.Sequential] = None
):
super().__init__()
self.device = device
self.z_dim = z_dim
self.encoder = Encoder(z_dim, custom_encoding_layers)
self.decoder = Decoder(z_dim, custom_decoding_layers)
self.target_dist = torch.distributions.normal.Normal(0, 1)
self.c1 = 1
self.c2 = 1
self.c3 = 0.1
self.num_bins = num_bins
self.min_val = -15
self.max_val = 15
self.xlin = np.linspace(self.min_val, self.max_val, self.num_bins).reshape(1,1,self.num_bins)
self.hist = np.zeros((z_dim, self.num_bins))
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
self.alpha = alpha
@staticmethod
def init(path_to_model: str, device: str, z_dim: int):
full_path_to_model = f"results/{path_to_model}/model.pt"
if not os.path.exists(full_path_to_model):
logger.error(
f"Can't find model at {full_path_to_model}",
next_step="Evaluation will stop",
tip="Double check your path to model"
)
raise Exception
model: Db_vae = Db_vae(z_dim=z_dim, device=device)
try:
model.load_state_dict(torch.load(full_path_to_model, map_location=device))
except:
logger.error("Unable to load model from {full_path_to_model}.",
next_step="Model will not initialize",
tip="Did you use the right config parameters, or custom layers from the stored model?"
)
logger.info(f"Loaded model from {path_to_model}!")
return model
def forward(self, images: torch.Tensor, labels: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
pred, mean, std = self.encoder(images)
loss_class = F.binary_cross_entropy_with_logits(pred, labels.float(), reduction='none')
# We only want to calculate the loss towards actual faces
faceslicer = labels == 1
facemean = mean[faceslicer]
facestd = std[faceslicer]
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(facemean, facestd)
z = dist.rsample().to(self.device)
res = self.decoder(z)
# calculate VAE losses
loss_recon = (images[faceslicer] - res)**2
loss_recon = loss_recon.view(loss_recon.shape[0],-1).mean(1)
loss_kl = torch.distributions.kl.kl_divergence(dist, self.target_dist)
loss_kl = loss_kl.view(loss_kl.shape[0],-1).mean(1)
loss_vae = self.c2 * loss_recon + self.c3 * loss_kl
loss_total = self.c1 * loss_class
# Only add loss to positions of faces, rest is zero
zeros = torch.zeros(faceslicer.shape[0]).to(self.device)
zeros[faceslicer] = loss_vae
loss_total = loss_total + zeros
return pred, loss_total
def forward_eval(self, images: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
with torch.no_grad():
pred, _,_ = self.encoder(images)
return pred
def interpolate(self, images: torch.Tensor, amount: int):
with torch.no_grad():
_, mean, std = self.encoder(images)
mean_1, std_1 = mean[0,:], std[0,:]
mean_2, std_2 = mean[1,:], std[1,:]
all_mean = torch.tensor([]).to(self.device)
all_std = torch.tensor([]).to(self.device)
diff_mean = mean_1 - mean_2
diff_std = std_1 = std_2
steps_mean = diff_mean / (amount-1)
steps_std = diff_std / (amount-1)
for i in range(amount):
all_mean = torch.cat((all_mean, mean_1 - steps_mean*i))
all_std = torch.cat((all_std, std_1 - steps_std*i))
all_mean = all_mean.view(amount, -1)
all_std = all_std.view(amount, -1)
dist = torch.distributions.normal.Normal(all_mean, all_std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
return recon_images
def build_means(self, input: torch.Tensor):
_, mean, log_std = self.encoder(input)
self.means = torch.cat((self.means, mean))
return
def build_histo(self, input: torch.Tensor):
"""
Creates histos or samples Qs from it
NOTE:
Make sure you only put faces into this
functions
"""
_, mean, std = self.encoder(input)
self.means = torch.cat((self.means, mean))
self.std = torch.cat((self.std, std))
values = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu()).sum(0)
self.hist += values
return
def get_histo_max(self):
probs = torch.zeros_like(self.means[:,0]).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs = torch.max(probs, torch.Tensor(p).to(self.device))
probs /= probs.sum()
return probs
def get_histo_max5(self):
probs = torch.zeros_like(self.means, dtype=float).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs[:,i] = torch.Tensor(p).to(self.device)
probs = probs.sort(1, descending=True)[0][:,:5]
probs = probs.prod(1)
print(probs)
return probs
def get_histo_gaussian(self):
"""
Returns the probabilities given the means given the histo values
"""
results = np.empty(self.means.shape[0])
hist_batch_size = 4000
# Iterate in large batches over dataset to prevent memory lockup
for i in range(0, self.means.shape[0], hist_batch_size):
i_end = i + hist_batch_size
if i_end > self.means.shape[0]:
i_end = self.means.shape[0]
mean = self.means[i:i_end, :]
std = self.std[i:i_end, :]
lins = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu())
Q = lins * self.hist
Q = Q.sum(-1)
W = 1 / (Q + self.alpha)
# Performing the max value technique, TODO: analyse top 5
results[i:i_end] = W.max(-1)
# # Reset values
self.hist.fill(0)
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
return torch.tensor(results).to(self.device)
def recon_images(self, images: torch.Tensor):
with torch.no_grad():
pred, mean, std = self.encoder(images)
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(mean, std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
# return predictions and the loss
return recon_images
def sample(self, n_samples, z_samples=[]):
"""
Sample n_samples from the model. Return both the sampled images
(from bernoulli) and the means for these bernoullis (as these are
used to plot the data manifold).
"""
with torch.no_grad():
z_samples = torch.randn(n_samples, self.z_dim).to(self.device)
sampled_images = self.decoder(z_samples)
return sampled_images
|
{"hexsha": "5d85349d2e309e5adea8b4afbf0f677a6d087990", "size": 11616, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/vae_model.py", "max_stars_repo_name": "JMitnik/FacialDebiasing", "max_stars_repo_head_hexsha": "9003ab484ef7bdeb372d7e5cdcbe0b162e76dc39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-29T18:48:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-18T09:27:01.000Z", "max_issues_repo_path": "code/vae_model.py", "max_issues_repo_name": "lucasfijen/FacialDebiasing", "max_issues_repo_head_hexsha": "3ecd47f1ebefb56ab6ac778e4747557693bc6d66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/vae_model.py", "max_forks_repo_name": "lucasfijen/FacialDebiasing", "max_forks_repo_head_hexsha": "3ecd47f1ebefb56ab6ac778e4747557693bc6d66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-02-21T10:17:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T22:47:46.000Z", "avg_line_length": 30.0932642487, "max_line_length": 110, "alphanum_fraction": 0.5744662534, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2815}
|
import numpy
import math
import random
random.seed(1337)
def next_poisson(rate):
return -math.log(1.0 - random.random()) / rate
def next_gaussian(mean, dev):
return int(numpy.random.normal(mean, dev, 1))
|
{"hexsha": "3a672dfb0f12a7c191bd57ac299378de1ff216d8", "size": 214, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/math.py", "max_stars_repo_name": "uzum/cran-simulator", "max_stars_repo_head_hexsha": "eb9544ddf7e4312df8d88616cea2252feb270525", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/math.py", "max_issues_repo_name": "uzum/cran-simulator", "max_issues_repo_head_hexsha": "eb9544ddf7e4312df8d88616cea2252feb270525", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/math.py", "max_forks_repo_name": "uzum/cran-simulator", "max_forks_repo_head_hexsha": "eb9544ddf7e4312df8d88616cea2252feb270525", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.4545454545, "max_line_length": 50, "alphanum_fraction": 0.7196261682, "include": true, "reason": "import numpy", "num_tokens": 59}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script visualize the semantic segmentation of ENet.
"""
import os
import numpy as np
from argparse import ArgumentParser
from os.path import join
import argparse
import sys
caffe_root = '/home/czm/CapStone/Capstone/ENet/caffe-enet/' # Change this to the absolute directory to ENet Caffe
sys.path.insert(0, caffe_root + 'python')
import caffe
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
__author__ = 'Timo Sämann'
__university__ = 'Aschaffenburg University of Applied Sciences'
__email__ = 'Timo.Saemann@gmx.de'
__data__ = '24th May, 2017'
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, help='.prototxt file for inference')
parser.add_argument('--weights', type=str, required=True, help='.caffemodel file')
parser.add_argument('--colours', type=str, required=True, help='label colours')
parser.add_argument('--input_image', type=str, required=True, help='input image path')
parser.add_argument('--out_dir', type=str, default=None, help='output directory in which the segmented images '
'should be stored')
parser.add_argument('--gpu', type=str, default='0', help='0: gpu mode active, else gpu mode inactive')
return parser
if __name__ == '__main__':
parser1 = make_parser()
args = parser1.parse_args()
if args.gpu == 0:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
net = caffe.Net(args.model, args.weights, caffe.TEST)
input_shape = net.blobs['data'].data.shape
output_shape = net.blobs['deconv6_0_0'].data.shape
label_colours = cv2.imread(args.colours, 1).astype(np.uint8)
input_image = cv2.imread(args.input_image, 1).astype(np.float32)
input_image = cv2.resize(input_image, (input_shape[3], input_shape[2]))
input_image = input_image.transpose((2, 0, 1))
input_image = np.asarray([input_image])
out = net.forward_all(**{net.inputs[0]: input_image})
prediction = net.blobs['deconv6_0_0'].data[0].argmax(axis=0)
prediction = np.squeeze(prediction)
prediction = np.resize(prediction, (3, input_shape[2], input_shape[3]))
prediction = prediction.transpose(1, 2, 0).astype(np.uint8)
prediction_rgb = np.zeros(prediction.shape, dtype=np.uint8)
label_colours_bgr = label_colours[..., ::-1]
cv2.LUT(prediction, label_colours_bgr, prediction_rgb)
cv2.imshow("ENet", prediction_rgb)
key = cv2.waitKey(0)
if args.out_dir is not None:
input_path_ext = args.input_image.split(".")[-1]
input_image_name = args.input_image.split("/")[-1:][0].replace('.' + input_path_ext, '')
out_path_im = args.out_dir + input_image_name + '_enet' + '.' + input_path_ext
out_path_gt = args.out_dir + input_image_name + '_enet_gt' + '.' + input_path_ext
cv2.imwrite(out_path_im, prediction_rgb)
# cv2.imwrite(out_path_gt, prediction) # label images, where each pixel has an ID that represents the class
|
{"hexsha": "2f00352d38997a5f274625fe06de872f7d17e163", "size": 3100, "ext": "py", "lang": "Python", "max_stars_repo_path": "ENet/scripts/test_segmentation.py", "max_stars_repo_name": "kuonangzhe/Visional_AutoDrive", "max_stars_repo_head_hexsha": "539d9858cbbcc8551343e2d067200c353175e7b2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ENet/scripts/test_segmentation.py", "max_issues_repo_name": "kuonangzhe/Visional_AutoDrive", "max_issues_repo_head_hexsha": "539d9858cbbcc8551343e2d067200c353175e7b2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ENet/scripts/test_segmentation.py", "max_forks_repo_name": "kuonangzhe/Visional_AutoDrive", "max_forks_repo_head_hexsha": "539d9858cbbcc8551343e2d067200c353175e7b2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.632183908, "max_line_length": 116, "alphanum_fraction": 0.6848387097, "include": true, "reason": "import numpy", "num_tokens": 785}
|
import numpy as np
import cv2
from hand_tracker import HandTracker
det = HandTracker('models/palm_detection_without_custom_op.tflite',
'models/hand_landmark_3d.tflite',
'data/anchors.csv',
box_shift=-0.5, box_enlarge=2.6)
in_bgr = cv2.imread('data/test_img1.jpg')
in_rgb = in_bgr[:,:,::-1]
list_keypoints, list_bbox = det(in_rgb)
out_img = np.copy(in_bgr)
# point size
ps = int(np.ceil(min(out_img.shape[0], out_img.shape[1]) / 256))
if list_keypoints is not None:
for idx in range(len(list_keypoints)):
keypoints = list_keypoints[idx]
bbox = list_bbox[idx]
for i in range(4):
j = (i + 1) % 4
p0 = (int(bbox[i,0]+0.5),int(bbox[i,1]+0.5))
p1 = (int(bbox[j,0]+0.5),int(bbox[j,1]+0.5))
cv2.line(out_img, p0, p1, (0,0,255), ps)
for i in range(keypoints.shape[0]):
p = (int(keypoints[i,0]+0.5),int(keypoints[i,1]+0.5))
cv2.circle(out_img, p, ps, (255,0,0), ps)
cv2.imwrite('out.jpg', out_img)
|
{"hexsha": "ce3e4276e30942beb50a47aa0d08457709b7c8c5", "size": 1058, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "yin-zhang/hand_tracking", "max_stars_repo_head_hexsha": "a9495c2639963a22fe6dbe1989c12c6d1f629198", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-15T21:36:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-15T21:36:08.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "yin-zhang/hand_tracking", "max_issues_repo_head_hexsha": "a9495c2639963a22fe6dbe1989c12c6d1f629198", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "yin-zhang/hand_tracking", "max_forks_repo_head_hexsha": "a9495c2639963a22fe6dbe1989c12c6d1f629198", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1290322581, "max_line_length": 67, "alphanum_fraction": 0.5879017013, "include": true, "reason": "import numpy", "num_tokens": 333}
|
from pylab import *
from numpy import NaN
def m(a):
z = 0
for n in range(1, 100):
z = z**2 + a
if abs(z) > 2:
return n
return NaN
X = arange(-2, .5, .002)
Y = arange(-1, 1, .002)
Z = zeros((len(Y), len(X)))
for iy, y in enumerate(Y):
print (iy, "of", len(Y))
for ix, x in enumerate(X):
Z[iy,ix] = m(x + 1j * y)
imshow(Z, cmap = plt.cm.prism, interpolation = 'none', extent = (X.min(), X.max(), Y.min(), Y.max()))
xlabel("Re(c)")
ylabel("Im(c)")
savefig("mandelbrot_python.svg")
show()
|
{"hexsha": "d99e405c1614446538ba570f34f62094396175b2", "size": 504, "ext": "py", "lang": "Python", "max_stars_repo_path": "Task/Mandelbrot-set/Python/mandelbrot-set-3.py", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-01-29T20:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T06:16:05.000Z", "max_issues_repo_path": "Task/Mandelbrot-set/Python/mandelbrot-set-3.py", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Mandelbrot-set/Python/mandelbrot-set-3.py", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-13T04:19:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T04:19:31.000Z", "avg_line_length": 19.3846153846, "max_line_length": 101, "alphanum_fraction": 0.5714285714, "include": true, "reason": "from numpy", "num_tokens": 185}
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from numbers import Number
import numpy as np
from pymor.tools import mpi
from pymor.vectorarrays.numpy import NumpyVectorSpace
from pymor.vectorarrays.mpi import MPIVectorSpaceAutoComm
def random_array(dims, length, seed):
if isinstance(dims, Number):
dims = (dims,)
return MPIVectorSpaceAutoComm(tuple(NumpyVectorSpace(dim) for dim in dims)).make_array(
mpi.call(_random_array, dims, length, seed)
)
def _random_array(dims, length, seed):
np.random.seed(seed + mpi.rank)
dim = dims[mpi.rank] if len(dims) > 1 else dims[0]
array = NumpyVectorSpace.make_array(np.random.random((length, dim)))
obj_id = mpi.manage_object(array)
return obj_id
|
{"hexsha": "db8f500401757d8989c42fdb1f1ba821c6cea39c", "size": 917, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pymor/playground/vectorarrays/mpi.py", "max_stars_repo_name": "ManuelMBaumann/pymor", "max_stars_repo_head_hexsha": "9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pymor/playground/vectorarrays/mpi.py", "max_issues_repo_name": "ManuelMBaumann/pymor", "max_issues_repo_head_hexsha": "9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pymor/playground/vectorarrays/mpi.py", "max_forks_repo_name": "ManuelMBaumann/pymor", "max_forks_repo_head_hexsha": "9ad226a0a46c7ba30a18bdab27b8bbbfe8f83a31", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-03T15:18:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-03T15:18:24.000Z", "avg_line_length": 32.75, "max_line_length": 91, "alphanum_fraction": 0.7371864776, "include": true, "reason": "import numpy", "num_tokens": 235}
|
function cm = getColorMap(num_colors)
%GETCOLORMAP Return default k-Wave color map.
%
% DESCRIPTION:
% getColorMap returns the default color map used for display and
% visualisation across the k-Wave Toolbox. Zero values are displayed
% as white, positive values are displayed as yellow through red to
% black, and negative values are displayed as light to dark
% blue-greys. If no value for num_colors is provided, cm will have
% 256 colors.
%
% USAGE:
% cm = getColorMap()
% cm = getColorMap(num_colors)
%
% OPTIONAL INPUTS:
% num_colors - number of colors in the color map (default = 256)
%
% OUTPUTS:
% cm - three column color map matrix which can be applied
% using colormap
%
% ABOUT:
% author - Bradley Treeby
% date - 3rd July 2009
% last update - 17th July 2009
%
% This function is part of the k-Wave Toolbox (http://www.k-wave.org)
% Copyright (C) 2009-2014 Bradley Treeby and Ben Cox
%
% See also colormap
% This file is part of k-Wave. k-Wave is free software: you can
% redistribute it and/or modify it under the terms of the GNU Lesser
% General Public License as published by the Free Software Foundation,
% either version 3 of the License, or (at your option) any later version.
%
% k-Wave is distributed in the hope that it will be useful, but WITHOUT ANY
% WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
% FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
% more details.
%
% You should have received a copy of the GNU Lesser General Public License
% along with k-Wave. If not, see <http://www.gnu.org/licenses/>.
% set literals
if nargin == 0
neg_pad = 48;
num_colors = 256;
else
neg_pad = round(48*num_colors/256);
end
% define colour spectrums
neg = bone(num_colors/2 + neg_pad);
neg = neg(1 + neg_pad:end, :);
pos = flipud(hot(num_colors/2));
% create custom colour map
cm = [neg; pos];
|
{"author": "wme7", "repo": "Aero-matlab", "sha": "9430008f2e3b84f28633775a44dff534e780fbac", "save_path": "github-repos/MATLAB/wme7-Aero-matlab", "path": "github-repos/MATLAB/wme7-Aero-matlab/Aero-matlab-9430008f2e3b84f28633775a44dff534e780fbac/K-wave/k-Wave/getColorMap.m"}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 13:17:12 2018
@author: Raj
"""
from ffta.hdf_utils import hdf_utils
from matplotlib import pyplot as plt
def test_pixel(h5_file, param_changes={}, pxls = 1, showplots = True,
verbose=True, clear_filter = False):
"""
Takes a random pixel and does standard processing.
:param h5_file: H5 file to process
:type h5_file: h5Py File, path, Dataset
:param param_changes:
:type param_changes: dict, optional
:param pxls: Number of random pixels to survey
:type pxls: int, optional
:param showplots: Whether to create a new plot or not.
:type showplots: bool, optional
:param verbose: To print to command line. Currently for future-proofing
:type verbose : bool , optional
:param clear_filter: Whether to do filtering (FIR) or not
:type clear_filter: bool, optional
"""
# get_pixel can work on Datasets or the H5_File
if any(param_changes):
hdf_utils.change_params(h5_file, new_vals=param_changes)
parameters = hdf_utils.get_params(h5_file)
cols = parameters['num_cols']
rows = parameters['num_rows']
# Creates random pixels to sample
pixels = []
if pxls == 1:
pixels.append([0,0])
if pxls > 1:
from numpy.random import randint
for i in range(pxls):
pixels.append([randint(0,rows), randint(0,cols)])
# Analyzes all pixels
for rc in pixels:
h5_px = hdf_utils.get_pixel(h5_file, rc=rc)
if clear_filter:
h5_px.clear_filter_flags()
h5_px.analyze()
print(rc, h5_px.tfp)
if showplots == True:
plt.plot(h5_px.best_fit, 'r--')
plt.plot(h5_px.cut, 'g-')
return
|
{"hexsha": "927602feda4e222a55a54b802da89270eac36339", "size": 1610, "ext": "py", "lang": "Python", "max_stars_repo_path": "ffta/analysis/test_pixel.py", "max_stars_repo_name": "GingerLabUW/FFTA", "max_stars_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-05T17:36:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T19:37:00.000Z", "max_issues_repo_path": "ffta/analysis/test_pixel.py", "max_issues_repo_name": "GingerLabUW/FFTA", "max_issues_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-08T05:41:18.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-13T01:44:30.000Z", "max_forks_repo_path": "ffta/analysis/test_pixel.py", "max_forks_repo_name": "GingerLabUW/FFTA", "max_forks_repo_head_hexsha": "576591d6ba23731c26f7dfa90591e94795f1b288", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-05T19:20:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-19T20:05:57.000Z", "avg_line_length": 22.3611111111, "max_line_length": 72, "alphanum_fraction": 0.6968944099, "include": true, "reason": "from numpy", "num_tokens": 463}
|
# Digital Signal Processing - Lab 1 - Part 2
# Dimitris Dimos - 03117165
# Christos Dimopoulos - 03117037
import numpy as np
from numpy import random
import librosa
import matplotlib.pyplot as plt
import pywt
import sounddevice as sd
from scipy import signal as sg
import math
plt.close('all')
# 2.1
# QUESTION (a)
# sampling parameters
Fs = 1000
Ts = 1/Fs
start = 0
end = 2
samples = int (end/Ts)
# AWGN
v = random.normal(loc = 0.0, scale = 1.0, size = int (samples))
# noisy sampled signal
n = np.arange(start, end, Ts) / Ts
x = 2 * np.cos(2*np.pi*70*Ts*n) + 3 * np.sin(2*np.pi*140*Ts*n) + 0.15 * v
# sounds like
sd.play(x, 10000)
# looks like
plt.figure(1,
dpi = 200,
figsize = (15.0, 7.0))
plt.plot(n, x, label = 'x[n] = 2cos(2π0.07n) + 3sin(2π0.14n) + 0.15v[0.001n]')
plt.xlabel('Time [n]')
plt.ylabel('x[n]')
plt.title('Sampled noisy signal of Question 2.1')
plt.legend()
plt.show()
# QUESTION (b)
# STFT Parameters
window = 0.04
overlap = 0.02
one = int (window / Ts)
two = int (overlap / Ts)
# STFT creation
G = librosa.stft(x, n_fft = one, hop_length = two)
print(G.shape)
# plot STFT
t = np.linspace(0, end, 101)
f = np.linspace(0, Fs/2, 21)
plt.figure(2,
dpi = 200,
figsize = (15.0, 7.0))
plt.pcolormesh(t,f,np.abs(G))
plt.xlabel('Time (sec)')
plt.ylabel('Frequency(Hz)')
plt.title('STFT of the noisy signal')
# QUESTION (c)
# Continuous Wavelet Transform
s = np.power(2, np.linspace(1, math.log(Fs/15.625, 2), 20*5))
coefs,freqs = pywt.cwt(x, s, 'cmor3.0-1.0')
print(coefs.shape)
# Plot CWT - frequency
t = np.linspace(0, samples/Fs, samples)
f = freqs * Fs
plt.figure(3,
dpi = 200,
figsize = (15.0, 7.0))
plt.pcolormesh(t,f,np.abs(coefs))
plt.xlabel('Time (sec)')
plt.ylabel('Frequency(Hz)')
plt.title('CWT of the signal segment')
# Plot CWT - scale
t = np.linspace(0, samples/Fs, samples)
plt.figure(4,
dpi = 200,
figsize = (15.0, 7.0))
plt.pcolormesh(t, s, np.abs(coefs))
plt.xlabel('Time (sec)')
plt.ylabel('Scales')
plt.title('CWT of the signal segment')
# 2.2
# Parameters
Fs = 1000
Ts = 1/Fs
start = 0
end = 2
samples = int (end/Ts)
# QUESTION (a)
# AWGN
v = random.normal(loc = 0.0, scale = 1.0, size = int (samples)) # AWGN
# Diracs
burst1 = sg.unit_impulse(samples, 625)
burst2 = sg.unit_impulse(samples, 800)
# final signal
n = np.arange(start, end, Ts) / Ts
x = 1.7 * (burst1 + burst2) + 0.15*v + 1.7*np.cos(2*np.pi*90*Ts*n)
# which sounds like
sd.play(x, 5000)
# and looks like
plt.figure(5,
dpi = 200,
figsize = (15.0, 7.0))
plt.plot(n, x, label =
'x[n] = 1.7cos(2π0.09n)+0.15v(0.001n)+1.7[δ(0.001n − 0.625)+δ(0.001n − 0.800)]')
plt.xlabel('Time [n]')
plt.ylabel('x[n]')
plt.title('Sampled noisy signal of Question 2.2')
plt.legend()
plt.show()
# QUESTION (b)
# STFT Parameters
window = 0.04
overlap = 0.02
one = int (window / Ts)
two = int (overlap / Ts)
# STFT of noisy signal
G = librosa.stft(x, n_fft = one, hop_length = two)
print(G.shape)
# plot STFT
t = np.linspace(0, end, 101)
f = np.linspace(0, Fs/2, 21)
plt.figure(6,
dpi = 200,
figsize = (15.0, 7.0))
plt.contour(t, f, np.abs(G), 16)
plt.xlabel('Time (sec)')
plt.ylabel('Frequency(Hz)')
plt.title('STFT of the noisy signal')
# QUESTION (c)
# CWT
s = np.power(2, np.linspace(1, math.log(Fs/15.625, 2), 20*5))
coefs,freqs = pywt.cwt(x, s, 'cmor3.0-1.0')
print(coefs.shape)
# plot in relation to frequency
t = np.linspace(0, samples/Fs, samples)
f = freqs * Fs
plt.figure(7,
dpi = 200,
figsize = (15.0, 7.0))
plt.contour(t, f, np.abs(coefs), 16)
plt.xlabel('Time (sec)')
plt.ylabel('Frequency(Hz)')
plt.title('CWT of the signal segment')
# plot in relation to scale
t = np.linspace(0, samples/Fs, samples)
plt.figure(8,
dpi = 200,
figsize = (15.0, 7.0))
plt.contour(t, s, np.abs(coefs), 16)
plt.xlabel('Time (sec)')
plt.ylabel('Scales')
plt.title('CWT of the signal segment')
|
{"hexsha": "7f670f92a83ebbb571f9b75f21c9b90f4bbe1cbe", "size": 3994, "ext": "py", "lang": "Python", "max_stars_repo_path": "labs/Lab1/source/part2.py", "max_stars_repo_name": "d-dimos/dsp_ntua", "max_stars_repo_head_hexsha": "5702d3607682d6bfe6904a290d57ea0caed00b21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-25T14:25:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T14:25:50.000Z", "max_issues_repo_path": "labs/Lab1/source/part2.py", "max_issues_repo_name": "d-dimos/dsp_ntua", "max_issues_repo_head_hexsha": "5702d3607682d6bfe6904a290d57ea0caed00b21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "labs/Lab1/source/part2.py", "max_forks_repo_name": "d-dimos/dsp_ntua", "max_forks_repo_head_hexsha": "5702d3607682d6bfe6904a290d57ea0caed00b21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-24T22:18:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T22:26:05.000Z", "avg_line_length": 20.4820512821, "max_line_length": 89, "alphanum_fraction": 0.6214321482, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1433}
|
import numpy as np
import sys,os
import readgadget
import MAS_library as MASL
import Pk_library as PKL
def generatePk(simroot, outroot, dims, ptypes, MAS, do_RSD, axis, threads, bigbox=True):
for i in xrange(0, 50):
for prefix in ["","NCV_0_","NCV_1_"]:
if ((prefix == "NCV_0_") or (prefix == "NCV_1_")) and (i > 24) and bigbox : continue
for num in [0,1,2]:
print 'Working with snap %s%d'%(prefix,i)
snapshot = simroot+prefix+'%d/snap_%03d'%(i,num)
header = readgadget.header(snapshot)
BoxSize = header.boxsize/1e3 #Mpc/h
redshift = header.redshift
masses = header.massarr*1e10 #Msun/h
fout_dir = outroot+prefix+'%d'%(i)
fout = fout_dir + '/Pk_cdm_z=%d.txt'%(round(redshift))
if os.path.exists(fout): continue
if not os.path.isdir(fout_dir): os.mkdir(fout_dir)
delta_m = MASL.density_field_gadget(snapshot, ptypes, dims, MAS, do_RSD, axis)
delta_m /= np.mean(delta_m, dtype=np.float64); delta_m -= 1.0
Pk = PKL.Pk(delta_m, BoxSize, axis, MAS, threads)
np.savetxt(fout, np.transpose([Pk.k3D, Pk.Pk[:,0]]))
################################ INPUT #################################
simroot = '/simons/scratch/fvillaescusa/Lya_ncv/20Mpc_256/'
outroot = '/home/landerson/src/LyA-InvertPhase/20Mpc_256/'
dims = 256
ptypes = [1]
MAS = 'CIC'
do_RSD = False
axis = 0
threads = 16
########################################################################
generatePk(simroot, outroot, dims, ptypes, MAS, do_RSD, axis, threads, bigbox=False)
################################ INPUT #################################
simroot = '/simons/scratch/fvillaescusa/Lya_ncv/40Mpc_512/'
outroot = '/home/landerson/src/LyA-InvertPhase/40Mpc_512/'
dims = 512
ptypes = [1]
MAS = 'CIC'
do_RSD = False
axis = 0
threads = 16
########################################################################
generatePk(simroot, outroot, dims, ptypes, MAS, do_RSD, axis, threads)
|
{"hexsha": "e0cf405f8a1068ffe0b55cf059447f6de688e443", "size": 2524, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pk.py", "max_stars_repo_name": "andersdot/LyA-InvertPhase", "max_stars_repo_head_hexsha": "142a3c8e11be04c82d1cf61020e8fd54536ecc18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pk.py", "max_issues_repo_name": "andersdot/LyA-InvertPhase", "max_issues_repo_head_hexsha": "142a3c8e11be04c82d1cf61020e8fd54536ecc18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pk.py", "max_forks_repo_name": "andersdot/LyA-InvertPhase", "max_forks_repo_head_hexsha": "142a3c8e11be04c82d1cf61020e8fd54536ecc18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.671641791, "max_line_length": 148, "alphanum_fraction": 0.4473058637, "include": true, "reason": "import numpy", "num_tokens": 604}
|
import cv2
import numpy as np
class BlackBoard():
def __init__(self,img,i,cor,board):
self.cor=cor
self.frame=img
self.range=np.load('ohyea.npy')
self.board=board
self.flag=1
self.i=i
self.draw()
def draw(self):
self.frame=cv2.flip(self.frame,1)
if self.board is None:
self.board=np.zeros_like(self.frame)
mask=self.CreateMask()
contours=self.DetectContour(mask)
self.DrawLine(contours)
self.Display()
def CreateMask(self):
hsv=cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
low=self.range[0]
up=self.range[1]
mask=cv2.inRange(hsv,low,up)
return mask
def DetectContour(self,mask):
contours,hierarchy=cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
return contours
def DrawLine(self, contours):
if contours and cv2.contourArea(max(contours,key=cv2.contourArea))>100:
cv2.circle(self.frame,(10,10),10,(0,255,0),-1)
c=max(contours,key=cv2.contourArea)
x2,y2,w,h=cv2.boundingRect(c)
if self.cor[0][0] == 0 and self.cor[0][1]==0:
self.cor[0] = (x2,y2)
else:
# for i in range(1, len(self.cor)-1):
self.board = cv2.line(self.board, self.cor[0], (x2,y2),[255 * self.flag, 0, 0],10)
self.cor[0]=(x2,y2)
else:
# self.cor[0]=(0,0)
cv2.circle(self.frame,(10,10),10,(0,0,255),-1)
# for i in range(1, len(self.cor) - 1):
# self.board = cv2.line(self.board, self.cor[i], self.cor[i + 1], [255 * self.flag, 0, 0], 10)
return list(self.cor)
def Display(self):
self.frame=cv2.add(self.frame,self.board)
path='./out/frame-'+str(self.i)+'.jpg'
cv2.imwrite(path,self.board)
cv2.imshow('frame',self.frame)
cv2.imshow('Board',self.board)
return list(self.cor)
def Switch(self, k):
if k==ord('c'):
self.board=None
if k==ord('e'):
self.flag=int(not self.flag)
# if __name__=='__main__':
# BlackBoard()
cv2.destroyAllWindows()
|
{"hexsha": "7c66ed9ba3a969c51f4b0ab67abe2bdbb3dba0ea", "size": 2213, "ext": "py", "lang": "Python", "max_stars_repo_path": "T194/Backend/OpenCV/Process.py", "max_stars_repo_name": "krish232002/Hackathon_5.0", "max_stars_repo_head_hexsha": "78006c04aa902297649487d351c1ae33bac88b00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-22T17:47:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T17:47:13.000Z", "max_issues_repo_path": "T194/Backend/OpenCV/Process.py", "max_issues_repo_name": "krish232002/Hackathon_5.0", "max_issues_repo_head_hexsha": "78006c04aa902297649487d351c1ae33bac88b00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "T194/Backend/OpenCV/Process.py", "max_forks_repo_name": "krish232002/Hackathon_5.0", "max_forks_repo_head_hexsha": "78006c04aa902297649487d351c1ae33bac88b00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0298507463, "max_line_length": 110, "alphanum_fraction": 0.5571622232, "include": true, "reason": "import numpy", "num_tokens": 604}
|
#! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
# Copyright Google 2016
# Copyright 2019 The BioNLP-HZAU Kaiyin Zhou
# Time:2019/04/08
"""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
class evalsequence(object):
def __init__(self,labels, predictions, num_classes, weights=None):
self.labels = labels
self.predictions = predictions
self.num_classes = num_classes
self.weights = weights
self.total_cm, self.update_op = self.streaming_confusion_matrix()
def _metric_variable(self,shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.
If running in a `DistributionStrategy` context, the variable will be
"tower local". This means:
* The returned object will be a container with separate variables
per replica/tower of the model.
* When writing to the variable, e.g. using `assign_add` in a metric
update, the update will be applied to the variable local to the
replica/tower.
* To get a metric's result value, we need to sum the variable values
across the replicas/towers before computing the final answer.
Furthermore, the final answer should be computed once instead of
in every replica/tower. Both of these are accomplished by
running the computation of the final result value inside
`tf.contrib.distribution_strategy_context.get_tower_context(
).merge_call(fn)`.
Inside the `merge_call()`, ops are only added to the graph once
and access to a tower-local variable in a computation returns
the sum across all replicas/towers.
Args:
shape: Shape of the created variable.
dtype: Type of the created variable.
validate_shape: (Optional) Whether shape validation is enabled for
the created variable.
name: (Optional) String name of the created variable.
Returns:
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a tower-local variable container.
"""
# Note that synchronization "ON_READ" implies trainable=False.
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM,
name=name)
def streaming_confusion_matrix(self):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = self._metric_variable(
[self.num_classes, self.num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(self.predictions)
labels = math_ops.to_int64(self.labels)
num_classes = math_ops.to_int64(self.num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (self.weights is not None) and (self.weights.get_shape().ndims > 1):
self.weights = array_ops.reshape(self.weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=self.weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return (total_cm, update_op)
@property
def precision_score(self):
precisions = []
for i in range(self.num_classes):
rowsum, colsum = np.sum(self.total_cm[i]), np.sum(self.total_cm[r][i] for r in range(self.num_classes))
precision = self.total_cm[i][i] / float(colsum + 1e-12)
precisions.append(precision)
return np.mean(precisions)
@property
def recall_score(self):
recalls = []
for i in range(self.num_classes):
rowsum, colsum = np.sum(self.total_cm[i]), np.sum(self.total_cm[r][i] for r in range(self.num_classes))
recall = self.total_cm[i][i] / float(rowsum+1e-12)
recalls.append(recall)
return np.mean(recalls)
@property
def f1_score(self):
fs = []
for i in range(self.num_classes):
rowsum, colsum = np.sum(self.total_cm[i]), np.sum(self.total_cm[r][i] for r in range(self.num_classes))
precision = self.total_cm[i][i] / float(colsum+1e-12)
recall = self.total_cm[i][i] / float(rowsum+1e-12)
f = 2 * precision * recall / (precision + recall+1e-12)
fs.append(f)
return np.mean(fs)
|
{"hexsha": "02e3df71d3c6cb25bd6fe0856d84242d7fd41ac9", "size": 6791, "ext": "py", "lang": "Python", "max_stars_repo_path": "tfmetrics/metrics.py", "max_stars_repo_name": "kyzhouhzau/tfmetrics", "max_stars_repo_head_hexsha": "e7111ede6937dc4b27d0c3d1ab3497806940cbd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-16T02:42:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-26T16:55:35.000Z", "max_issues_repo_path": "tfmetrics/metrics.py", "max_issues_repo_name": "kyzhouhzau/tfmetrics", "max_issues_repo_head_hexsha": "e7111ede6937dc4b27d0c3d1ab3497806940cbd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tfmetrics/metrics.py", "max_forks_repo_name": "kyzhouhzau/tfmetrics", "max_forks_repo_head_hexsha": "e7111ede6937dc4b27d0c3d1ab3497806940cbd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-26T16:55:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-26T16:55:28.000Z", "avg_line_length": 48.5071428571, "max_line_length": 116, "alphanum_fraction": 0.6432042409, "include": true, "reason": "import numpy", "num_tokens": 1433}
|
"""shell
pip install -r https://raw.githubusercontent.com/datamllab/automl-in-action-notebooks/master/requirements.txt
"""
import tensorflow as tf
tf.random.set_seed(42)
"""
## Load data
"""
# Import the dataset loading function from sklearn
from sklearn.datasets import fetch_california_housing
# Load the California housing dataset
house_dataset = fetch_california_housing()
# Display the oringal data
house_dataset.keys()
# Import pandas package to format the data
import pandas as pd
# Extract features with their names into the a dataframe format
data = pd.DataFrame(house_dataset.data, columns=house_dataset.feature_names)
# Extract target with their names into a pd.Series object with name MEDV
target = pd.Series(house_dataset.target, name="MEDV")
# Visualize the first 5 samples of the data
data.head(5)
"""invisible
"""
# Split data into training and test dataset
from sklearn.model_selection import train_test_split
train_data, test_data, train_targets, test_targets = train_test_split(
data, target, test_size=0.2, random_state=42
)
# Check the shape of whole dataset and the splited training and test set
print("--Shape of the whole data--\n {}".format(data.shape))
print("\n--Shape of the target vector--\n {}".format(target.shape))
print("\n--Shape of the training data--\n {}".format(train_data.shape))
print("\n--Shape of the testing data--\n {}".format(test_data.shape))
"""invisible
"""
train_data.shape, test_data.shape
"""invisible
"""
train_data
"""
## Data Preparation: normalization
"""
def norm(x, mean, std):
return (x - mean) / std
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
normed_train_data = norm(train_data, mean, std)
normed_test_data = norm(test_data, mean, std)
"""
## Build up an MLP
"""
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential(
[
layers.Dense(64, activation="relu", input_shape=[8]),
layers.Dense(64, activation="relu"),
layers.Dense(1),
]
)
# Now try out the model. Take a batch of 5 examples from the training data and call model.predict on it.
example_batch = normed_train_data[:5]
example_result = model.predict(example_batch)
example_result
"""invisible
"""
model.summary()
"""invisible
"""
# Customize the optimizer configuration (learning rate here)
optimizer = tf.keras.optimizers.RMSprop(0.01)
model.compile(loss="mse", optimizer=optimizer, metrics=["mae", "mse"])
"""
### Train & test the model
"""
model.fit(normed_train_data, train_targets, epochs=300, batch_size=1024, verbose=1)
"""invisible
"""
loss, mae, mse = model.evaluate(normed_test_data, test_targets, verbose=0)
mse
"""
### Tune the number of epochs
"""
# Train the model (in silent mode, verbose=0)
def build_model():
model = keras.Sequential(
[
layers.Dense(
64, activation="relu", input_shape=[normed_train_data.shape[1]]
),
layers.Dense(64, activation="relu"),
layers.Dense(1),
]
)
optimizer = tf.keras.optimizers.RMSprop(0.01)
model.compile(loss="mse", optimizer=optimizer, metrics=["mae", "mse"])
return model
model = build_model()
EPOCHS = 500
history = model.fit(
normed_train_data,
train_targets,
validation_split=0.2,
epochs=EPOCHS,
batch_size=1024,
verbose=1,
)
"""invisible
"""
import pandas as pd
hist = pd.DataFrame(history.history)
hist["epoch"] = history.epoch
hist.tail()
"""invisible
"""
import matplotlib.pyplot as plt
plt.plot(hist["epoch"], hist["mse"], label="train mse")
plt.plot(hist["epoch"], hist["val_mse"], label="val mse")
plt.xlabel("Epochs")
plt.ylabel("MSE")
# Set a title of the current axes.
plt.title("Training and Validation MSE by Epoch")
# show a legend on the plot
plt.legend()
# Display a figure.
plt.show()
"""invisible
"""
import numpy as np
def smooth_curve(values, std=5):
# gaussian smoothing: Smooths a list of values by convolving with a gussian.
width = std * 4
x = np.linspace(-width, width, 2 * width + 1)
kernel = np.exp(-((x / 5) ** 2))
values = np.array(values)
weights = np.ones_like(values)
smoothed_values = np.convolve(values, kernel, mode="same")
smoothed_weights = np.convolve(weights, kernel, mode="same")
return smoothed_values / smoothed_weights
"""invisible
"""
import matplotlib.pyplot as plt
plt.plot(hist["epoch"], smooth_curve(hist["mse"]), label="train mse")
plt.plot(hist["epoch"], smooth_curve(hist["val_mse"]), label="val mse")
plt.xlabel("Epochs")
plt.ylabel("MSE")
plt.ylim((0, 0.5))
# Set a title of the current axes.
plt.title("Training and Validation MSE by Epoch (smoothed)")
# show a legend on the plot
plt.legend()
# Display a figure.
plt.show()
"""
## Final fit model with full data & test model
"""
model = build_model()
model.fit(normed_train_data, train_targets, epochs=150, batch_size=1024, verbose=1)
"""invisible
"""
loss, mae, mse = model.evaluate(normed_test_data, test_targets, verbose=0)
mse
"""invisible
"""
test_predictions = model.predict(normed_test_data).flatten()
a = plt.axes(aspect="equal")
plt.scatter(test_targets, test_predictions)
plt.xlabel("True Values [MEDV]")
plt.ylabel("Predictions [MEDV]")
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
|
{"hexsha": "2bbbabbd28babf26281e3a7d6aa9608742901af9", "size": 5299, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/py/3.1-House-Price-Prediction-MLP.py", "max_stars_repo_name": "saibaldas/automl-in-action-notebooks", "max_stars_repo_head_hexsha": "4ddd105c7fccf2382091afaf05884ab816ad4b38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-29T19:47:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:47:09.000Z", "max_issues_repo_path": "src/py/3.1-House-Price-Prediction-MLP.py", "max_issues_repo_name": "qingquansong/automl-in-action-notebooks", "max_issues_repo_head_hexsha": "4ddd105c7fccf2382091afaf05884ab816ad4b38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/py/3.1-House-Price-Prediction-MLP.py", "max_forks_repo_name": "qingquansong/automl-in-action-notebooks", "max_forks_repo_head_hexsha": "4ddd105c7fccf2382091afaf05884ab816ad4b38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0391304348, "max_line_length": 109, "alphanum_fraction": 0.7035289677, "include": true, "reason": "import numpy", "num_tokens": 1311}
|
import argparse
import os
import nibabel as nib
import numpy as np
import pandas as pd
def find_sample(path):
"""
Args:
path (str): path to mri images
"""
labels_data = {"images": [], "labels": []}
t = 0
for case in os.listdir(path):
if case.startswith("."):
continue
case_folder = os.path.join(path, case)
for person in os.listdir(case_folder):
if person.startswith("."):
continue
person_folder = os.path.join(case_folder, person)
for train in os.listdir(person_folder):
if train.startswith("."):
continue
if train == "t1weighted.nii.gz":
labels_data["images"].append(
os.path.join(person_folder, "t1weighted.nii.gz")
)
if train == "labels.DKT31.manual+aseg.nii.gz":
labels_data["labels"].append(
os.path.join(
person_folder, "labels.DKT31.manual+aseg.nii.gz"
)
)
print(t)
t += 1
return pd.DataFrame(labels_data)
def main(datapath, n_labels):
"""
Args:
datapath (str): path to mri files
n_labels (int): number of labels generated (the n most frequent labels)
"""
# Labels are from https://mindboggle.readthedocs.io/en/latest/labels.html
# DKT protocol minus removed labels
labels = [1002, 1003]
labels.extend([*range(1005, 1032)])
labels.extend([1035, 1036])
labels.extend([2002, 2003])
labels.extend([*range(2005, 2032)])
labels.extend([2034, 2035])
# Non-cortical labels
labels.extend(
[
16,
24,
14,
15,
72,
85,
4,
5,
6,
7,
10,
11,
12,
13,
17,
18,
25,
26,
28,
30,
91,
43,
44,
45,
46,
49,
50,
51,
52,
53,
54,
57,
58,
60,
62,
92,
630,
631,
632,
]
)
n_labels = min(n_labels, len(labels))
labels = labels[:n_labels]
dataframe = find_sample(datapath)
for _, row in dataframe.iterrows():
voxel_labels = nib.load(row["labels"]).get_fdata()
new_img = np.zeros([256, 256, 256])
new_img[
: voxel_labels.shape[0],
: voxel_labels.shape[1],
: voxel_labels.shape[2],
] = voxel_labels
segmentation = np.zeros([256, 256, 256])
for j, l in enumerate(labels):
mask = np.equal(l, new_img)
segmentation[mask] = j + 1
nib_seg = nib.Nifti1Image(segmentation, np.eye(4))
nib_seg.to_filename(
row["labels"].split(".nii")[0] + "_labels" + ".nii.gz"
)
dataframe["nii_labels"] = dataframe["labels"].apply(
lambda a: a.split(".nii")[0] + "_labels" + ".nii.gz"
)
dataframe.to_csv("./data/dataset.csv", index=False)
dataframe = dataframe.sample(frac=1, random_state=42)
dataframe.iloc[:70, :].to_csv("./data/dataset_train.csv", index=False)
dataframe.iloc[70:80, :].to_csv("./data/dataset_valid.csv", index=False)
dataframe.iloc[80:, :].to_csv("./data/dataset_infer.csv", index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="folders to files")
parser.add_argument("datapath", type=str, help="dir with image")
parser.add_argument(
"n_labels",
type=int,
help="""number of labels used for segmentation.
The first 62 follow the DKT human labeling protocol
while the next 39 are from Freesurfer""",
)
params = parser.parse_args()
main(params.datapath, params.n_labels)
|
{"hexsha": "1de40bfa3a7690dbdbeaba45e7d3f2941f38a88c", "size": 4129, "ext": "py", "lang": "Python", "max_stars_repo_path": "neuro/scripts/prepare_data.py", "max_stars_repo_name": "ssktotoro/neuro", "max_stars_repo_head_hexsha": "2d6d4cd060ee368d67c3437cac2b64eeb7d7180c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neuro/scripts/prepare_data.py", "max_issues_repo_name": "ssktotoro/neuro", "max_issues_repo_head_hexsha": "2d6d4cd060ee368d67c3437cac2b64eeb7d7180c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-09T06:07:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-12T03:16:38.000Z", "max_forks_repo_path": "neuro/scripts/prepare_data.py", "max_forks_repo_name": "ssktotoro/neuro", "max_forks_repo_head_hexsha": "2d6d4cd060ee368d67c3437cac2b64eeb7d7180c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.711409396, "max_line_length": 79, "alphanum_fraction": 0.4967304432, "include": true, "reason": "import numpy", "num_tokens": 987}
|
/*=============================================================================
Copyright (c) 2002-2015 Joel de Guzman
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
///////////////////////////////////////////////////////////////////////////////
//
// A complex number micro parser.
//
// [ JDG May 10, 2002 ] spirit1
// [ JDG May 9, 2007 ] spirit2
// [ JDG May 12, 2015 ] spirit X3
//
///////////////////////////////////////////////////////////////////////////////
#include <boost/config/warning_disable.hpp>
#include <boost/spirit/home/x3.hpp>
#include <iostream>
#include <string>
#include <complex>
///////////////////////////////////////////////////////////////////////////////
// Our complex number parser/compiler
///////////////////////////////////////////////////////////////////////////////
namespace client
{
template <typename Iterator>
bool parse_complex(Iterator first, Iterator last, std::complex<double>& c)
{
using boost::spirit::x3::double_;
using boost::spirit::x3::_attr;
using boost::spirit::x3::phrase_parse;
using boost::spirit::x3::ascii::space;
double rN = 0.0;
double iN = 0.0;
auto fr = [&](auto& ctx){ rN = _attr(ctx); };
auto fi = [&](auto& ctx){ iN = _attr(ctx); };
bool r = phrase_parse(first, last,
// Begin grammar
(
'(' >> double_[fr]
>> -(',' >> double_[fi]) >> ')'
| double_[fr]
),
// End grammar
space);
if (!r || first != last) // fail if we did not get a full match
return false;
c = std::complex<double>(rN, iN);
return r;
}
}
////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////
int
main()
{
std::cout << "/////////////////////////////////////////////////////////\n\n";
std::cout << "\t\tA complex number micro parser for Spirit...\n\n";
std::cout << "/////////////////////////////////////////////////////////\n\n";
std::cout << "Give me a complex number of the form r or (r) or (r,i) \n";
std::cout << "Type [q or Q] to quit\n\n";
std::string str;
while (getline(std::cin, str))
{
if (str.empty() || str[0] == 'q' || str[0] == 'Q')
break;
std::complex<double> c;
if (client::parse_complex(str.begin(), str.end(), c))
{
std::cout << "-------------------------\n";
std::cout << "Parsing succeeded\n";
std::cout << "got: " << c << std::endl;
std::cout << "\n-------------------------\n";
}
else
{
std::cout << "-------------------------\n";
std::cout << "Parsing failed\n";
std::cout << "-------------------------\n";
}
}
std::cout << "Bye... :-) \n\n";
return 0;
}
|
{"hexsha": "02e5089b4ec032ba172551c89bd1844193dfdf1d", "size": 3294, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/spirit/example/x3/complex_number.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/spirit/example/x3/complex_number.cpp", "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-04T18:00:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-04T18:00:00.000Z", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/spirit/example/x3/complex_number.cpp", "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 33.2727272727, "max_line_length": 82, "alphanum_fraction": 0.3557984214, "num_tokens": 686}
|
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
import numpy as np
import numpy.testing as np_testing
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
class Test(unittest.TestCase):
def setUp(self):
import Exercise5_5
self.exercises = Exercise5_5
self.file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter05/DataSet/taxstats2015.csv'
self.df = pd.read_csv(self.file_url, usecols=['Postcode', 'Average total business income', 'Average total business expenses'])
self.X = self.df[['Average total business income', 'Average total business expenses']]
self.min_max_scaler = MinMaxScaler()
self.min_max_scaler.fit(self.X)
self.X_min_max = self.min_max_scaler.transform(self.X)
self.kmeans = KMeans(random_state=1, n_clusters=4, init='k-means++', n_init=5)
self.kmeans.fit(self.X_min_max)
self.df['cluster8'] = self.kmeans.predict(self.X_min_max)
self.standard_scaler = StandardScaler()
self.X_scaled = self.standard_scaler.fit_transform(self.X)
self.kmeans = KMeans(random_state=1, n_clusters=4, init='k-means++', n_init=5)
self.kmeans.fit(self.X_scaled)
self.df['cluster9'] = self.kmeans.predict(self.X_scaled)
def test_file_url(self):
self.assertEqual(self.exercises.file_url, self.file_url)
def test_df(self):
pd_testing.assert_frame_equal(self.exercises.df, self.df)
def test_X_min_max(self):
np_testing.assert_array_equal(self.exercises.X_min_max, self.X_min_max)
def test_X_scaled(self):
np_testing.assert_array_equal(self.exercises.X_scaled, self.X_scaled)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "91acd8d2c5568ae13258a8a9b75bc58cbb79f598", "size": 1747, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter05/Exercise5.06/Test5.06.py", "max_stars_repo_name": "pmayd/The-Data-Science-Workshop", "max_stars_repo_head_hexsha": "a712f1fdbdf839c8b9288f4d4cdebcf5ebe146c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 162, "max_stars_repo_stars_event_min_datetime": "2020-02-11T08:45:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T13:06:03.000Z", "max_issues_repo_path": "Chapter05/Exercise5.06/Test5.06.py", "max_issues_repo_name": "pmayd/The-Data-Science-Workshop", "max_issues_repo_head_hexsha": "a712f1fdbdf839c8b9288f4d4cdebcf5ebe146c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-03-31T10:05:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:12:16.000Z", "max_forks_repo_path": "Chapter05/Exercise5.06/Test5.06.py", "max_forks_repo_name": "carltyndall/The-Data-Science-Workshop", "max_forks_repo_head_hexsha": "e16b96016314590ffe7294c7186923cfbec7b8b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 193, "max_forks_repo_forks_event_min_datetime": "2019-11-08T05:15:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T09:14:02.000Z", "avg_line_length": 38.8222222222, "max_line_length": 138, "alphanum_fraction": 0.7750429307, "include": true, "reason": "import numpy", "num_tokens": 434}
|
"""Basic Unit tests for Classes in Helper_functions"""
import unittest
import numpy as np
import random
from random import randint, sample
import helper_functions as thehelp
class NewDataFrameTests(unittest.TestCase):
"""Tests the NewDataFrame class within help_functions.py"""
def setUp(self):
"""Set up a random NewDataframe and list to test"""
self.df1 = thehelp.NewDataFrame(np.random.randint(0, 100, size=(10, 4)))
self.list = random.sample(range(0, 100), 10)
def test_null_count(self):
"""Tests null_count Method"""
self.assertEqual(self.df1.null_count().shape, (4, ))
def test_list_2_series(self):
"""Tests list_2_series method"""
self.assertEqual(self.df1.list_2_series(list).shape, (10, 5))
def test_split_dates(self):
"""Test split_dates method"""
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "c5006c9c89de5e11e24ab456000aa265c2f941df", "size": 897, "ext": "py", "lang": "Python", "max_stars_repo_path": "lambdata_Lopez_John/test_helper_functions.py", "max_stars_repo_name": "Lopez-John/lambdata-Lopez-John", "max_stars_repo_head_hexsha": "97f127d16a6f6304b62e9c13b5c8086ca98525bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lambdata_Lopez_John/test_helper_functions.py", "max_issues_repo_name": "Lopez-John/lambdata-Lopez-John", "max_issues_repo_head_hexsha": "97f127d16a6f6304b62e9c13b5c8086ca98525bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lambdata_Lopez_John/test_helper_functions.py", "max_forks_repo_name": "Lopez-John/lambdata-Lopez-John", "max_forks_repo_head_hexsha": "97f127d16a6f6304b62e9c13b5c8086ca98525bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-01T23:06:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-01T23:06:12.000Z", "avg_line_length": 28.935483871, "max_line_length": 80, "alphanum_fraction": 0.6811594203, "include": true, "reason": "import numpy", "num_tokens": 207}
|
import dgl
import numpy as np
from openhgnn.models.MAGNN import mp_instance_sampler, mini_mp_instance_sampler
class MAGNN_sampler():
'''
Description
-----------
MAGNN sampler based on the implementation of author. The author only implements one sampling 1-layer subgraphs. Here
we implement the MAGNN sampler which can sample n-layer subgraphs based on DGL and Pytorch.
The MAGNN sampler samples n-layer subgraphs based on seed_node of a certain class, metapath instances of seed_nodes
which contain all neighbors that might be sampled.
Users can make use of the MAGNN sampler as a dataset of torch.DataLoader so that we can assign num_workers to
concurrently speed up sampling subgraphs.
Parameters
----------
g : dgl.DGLGraph
the dgl graph from which we sample subgraphs.
mask : list or numpy.ndarray
the train/test/test nodes mask, which should be like train mask = [False, True, False, True, False] indicating
that the 1th, 4th nodes are training nodes of g. Note that the mask indicates which part of nodes in g will be
sampled by MAGNN sampler. e.g. if it's the test mask, the sample will be employed on testing nodes of g to
sample subgraphs for testing phase. This param is useful when the graph is too large to be directly validate on.
n_layers : int
the number of layers of each subgraph.
category : any
the class of seed_nodes.
metapath_list : list
the type of all metapaths in graph g. e.g. ['M-A-M', 'M-D-M', ...]
num_samples : int
the maximal number of metapath instances we'd like to sample for each node. i.e. for each node,
if the number of metapath instances is more than num_samples, we sample num_samples instances randomly based on
a specific calculated probability. Otherwise, we sample all instances.
dataset_name : str
the name of the dataset.
Default : 'dblp4MAGNN'
'''
def __init__(self, g, mask, n_layers, category, metapath_list,
num_samples, dataset_name='dblp4MAGNN'):
self.g = g
self.mask = mask
self.dataset_name = dataset_name
self.metapath_list = metapath_list
self.n_layers = n_layers
self.category = category
self.num_samples = num_samples
self.mp_inst = mp_instance_sampler(g, self.metapath_list, self.dataset_name)
def __getitem__(self, idx):
'''
Parameters
----------
idx : int
The node ids of ntype as self.category, that is, the original seed_nodes is {self.category: idx}.
Returns
-------
dict
The nids sampled by the MAGNN_sampler from the original graph. The nids are of original version.
dict
The metapath instances corresponding to the sampled graph. The nids in mp instances are of original version.
DGLGraph
The original heterogeneous subgraph.
Notes
-----
Sampling neighbors based on seed_nodes and metapath instances mp_inst[i],
we only need to sample mp_inst from all metapath instances with seed_nodes as mp_inst[i][0] and
sample neighbors as mp_inst[i][1, 2, ...]. Here we consider mp_inst[i][0] as dst_nodes.
But the sampled mp_inst above does not contain all the mp_inst of the sampled subgraph.
For example, if seed_nodes contain M1 and we sample neighbors as A1, M2 based on metapath instance M1-A1-M2,
we'd lose metapath instance like M2-A1-M1 because M2 is not one of the seed_nodes while M2-A1-M1 absolutely
one of the metapath instances of the sampled subgraph.
'''
idx = np.where(self.mask)[0][idx]
_seed_nodes = {self.category: idx}
seed_nodes = {self.category: np.array([idx])}
if self.n_layers < 1:
raise ValueError("Wrong value of number of layers.")
for _ in range(self.n_layers):
mini_mp_inst = mini_mp_instance_sampler(seed_nodes=seed_nodes, mp_instances=self.mp_inst,
num_samples=self.num_samples)
# seed_nodes = {}
for metapath in mini_mp_inst.keys():
_mini_mp_inst = mini_mp_inst[metapath]
for i in range(0, len(metapath)):
if not metapath[i] in seed_nodes.keys():
seed_nodes[metapath[i]] = _mini_mp_inst[:, i]
else:
seed_nodes[metapath[i]] = np.concatenate((seed_nodes[metapath[i]], _mini_mp_inst[:, i]),
axis=0)
for ntype in seed_nodes.keys():
seed_nodes[ntype] = np.unique(seed_nodes[ntype])
for meta, idx in mini_mp_inst.items():
mini_mp_inst[meta] = np.unique(np.concatenate((idx, np.flip(idx, axis=1))), axis=0)
# Here seed_nodes are the nodes sampled from original graph with {self.category: idx} as _seed_nodes
# while mini_mp_inst is corresponding mini metapath instances
return seed_nodes, mini_mp_inst, _seed_nodes, self.g
def __len__(self):
# return self.g.number_of_nodes(self.category)
return len(self.mask[self.mask == 1])
def collate_fn(batch):
'''
Parameters
----------
batch : tuple
The mini batch data, including seed_nodes, mini_mp_inst and g.
The seed_nodes is the nids sampled by the MAGNN_sampler from the original graph.
The mini_mp_inst is the metapath instances corresponding to the sampled graph.
The g is the sampled graph.
Returns
-------
tuple
the sampled subgraph and corresponding mini metapath instances
'''
def convert_mp_nids(old_metapath_dict, old_nids):
# convert the old_nids in metapath instances into the new_nids in the subgraph
for meta in old_metapath_dict.keys():
for i, ntype in enumerate(meta):
old_metapath_dict[meta][:, i] = \
np.array(
list(map(lambda x: np.argwhere(old_nids[ntype] == x)[0][0], old_metapath_dict[meta][:, i]))
)
return old_metapath_dict
nids = {}
mini_mp_inst = {}
seed_nodes = {}
ntypes = []
meta_types = []
seed_ntypes = []
[ntypes.extend(list(_batch[0].keys())) for _batch in batch]
[meta_types.extend(list(_batch[1].keys())) for _batch in batch]
[seed_ntypes.extend(list(_batch[2].keys())) for _batch in batch]
ntypes, meta_types, seed_ntypes = set(ntypes), set(meta_types), set(seed_ntypes)
for _batch in batch:
for ntype in ntypes:
if ntype not in _batch[0].keys():
continue
if ntype in nids.keys():
nids[ntype] = np.concatenate((nids[ntype], _batch[0][ntype]), axis=0)
else:
nids[ntype] = _batch[0][ntype]
for meta_type in meta_types:
if meta_type not in _batch[1].keys():
continue
if meta_type in mini_mp_inst.keys():
mini_mp_inst[meta_type] = np.concatenate((mini_mp_inst[meta_type], _batch[1][meta_type]),
axis=0)
else:
mini_mp_inst[meta_type] = _batch[1][meta_type]
for seed_ntype in seed_ntypes:
if seed_ntype not in _batch[2].keys():
continue
_batch[2][seed_ntype] = np.array([_batch[2][seed_ntype]])
if seed_ntype in seed_nodes.keys():
seed_nodes[seed_ntype] = np.concatenate((seed_nodes[seed_ntype], _batch[2][seed_ntype]),
axis=0)
else:
seed_nodes[seed_ntype] = _batch[2][seed_ntype]
for ntype in nids.keys():
nids[ntype] = np.sort(np.unique(nids[ntype], axis=0), axis=0)
for meta_type in mini_mp_inst.keys():
mini_mp_inst[meta_type] = np.unique(mini_mp_inst[meta_type], axis=0)
mini_mp_inst = convert_mp_nids(mini_mp_inst, nids)
_subgraph = dgl.node_subgraph(batch[0][3], nids, store_ids=True)
for seed_ntype in seed_ntypes:
seed_nodes[seed_ntype] = np.unique(seed_nodes[seed_ntype], axis=0)
_nids = _subgraph.nodes[seed_ntype].data[dgl.NID].cpu().numpy()
seed_nodes[seed_ntype] = np.array(
list(map(lambda x: np.argwhere(_nids == x)[0][0], seed_nodes[seed_ntype]))
)
# if seed_nodes = {'A':[0, 2]}, the 0th and 2th nodes of type 'A' are seed_nodes in
# _subgraph
return _subgraph, mini_mp_inst, seed_nodes
|
{"hexsha": "06b607582d2a09e54ba8c1e565d5eb1d7c99dc7c", "size": 8703, "ext": "py", "lang": "Python", "max_stars_repo_path": "openhgnn/sampler/MAGNN_sampler.py", "max_stars_repo_name": "clearhanhui/OpenHGNN", "max_stars_repo_head_hexsha": "9c3b7e7a4bc9b3df38ee6dc7154f950340ceec20", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openhgnn/sampler/MAGNN_sampler.py", "max_issues_repo_name": "clearhanhui/OpenHGNN", "max_issues_repo_head_hexsha": "9c3b7e7a4bc9b3df38ee6dc7154f950340ceec20", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openhgnn/sampler/MAGNN_sampler.py", "max_forks_repo_name": "clearhanhui/OpenHGNN", "max_forks_repo_head_hexsha": "9c3b7e7a4bc9b3df38ee6dc7154f950340ceec20", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4030612245, "max_line_length": 120, "alphanum_fraction": 0.62173963, "include": true, "reason": "import numpy", "num_tokens": 2096}
|
"""
==============
Load converter
==============
This example demonstrates passing a custom converter to `numpy.genfromtxt` to
extract dates from a CSV file.
"""
import dateutil.parser
from matplotlib import cbook
import matplotlib.pyplot as plt
import numpy as np
datafile = cbook.get_sample_data('msft.csv', asfileobj=False)
print('loading', datafile)
data = np.genfromtxt(
datafile, delimiter=',', names=True,
dtype=None, converters={0: dateutil.parser.parse})
fig, ax = plt.subplots()
ax.plot(data['Date'], data['High'], '-')
fig.autofmt_xdate()
plt.show()
|
{"hexsha": "793de7dc92649e76e437b4458e5147cb0df96644", "size": 575, "ext": "py", "lang": "Python", "max_stars_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/misc/load_converter.py", "max_stars_repo_name": "JohnLauFoo/clc_packages_Yu", "max_stars_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-13T17:21:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T17:21:44.000Z", "max_issues_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/misc/load_converter.py", "max_issues_repo_name": "JohnLauFoo/clc_packages_Yu", "max_issues_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/misc/load_converter.py", "max_forks_repo_name": "JohnLauFoo/clc_packages_Yu", "max_forks_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2962962963, "max_line_length": 77, "alphanum_fraction": 0.6956521739, "include": true, "reason": "import numpy", "num_tokens": 136}
|
//
// Created by janw on 02.06.2020.
//
// STL
#include <iostream>
#include <algorithm>
#include <sstream>
#include <string>
#include <boost/iostreams/copy.hpp>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include "laser_slam_ros/visual_view.hpp"
#define PRINT(x) std::cout << #x << " = " << x << std::endl
using std::cout;
using std::endl;
#include <signal.h>
#include <termios.h>
#include <stdio.h>
int getch()
{
static struct termios oldt, newt;
tcgetattr( STDIN_FILENO, &oldt); // save old settings
newt = oldt;
newt.c_lflag &= ~(ICANON); // disable buffering
tcsetattr( STDIN_FILENO, TCSANOW, &newt); // apply new settings
int c = getchar(); // read character (non-blocking)
tcsetattr( STDIN_FILENO, TCSANOW, &oldt); // restore old settings
return c;
}
namespace laser_slam_ros {
VisualView::VisualView()
: isCompressedFlag(false)
{
// ++ids;
// LOG(INFO) << "allocated " << ids;
}
VisualView::VisualView(const laser_slam::LaserScan &iscan,
const laser_slam::Pose &ipose,
const int &ihorRes,
const int &ivertRes,
bool iorganized)
: isCompressedFlag(false),
horRes(ihorRes),
vertRes(ivertRes),
organized(iorganized),
pixelOffsets{0, 6, 12, 18},
// MulRan
// vertAngles{-15.654, -15.062, -14.497, -13.947, -13.489, -12.908, -12.337, -11.803,
// -11.347, -10.78, -10.224, -9.675, -9.239, -8.664, -8.118, -7.574,
// -7.131, -6.575, -6.019, -5.47, -5.037, -4.478, -3.921, -3.382,
// -2.938, -2.274, -1.84, -1.282, -0.842, -0.29, 0.262, 0.805,
// 1.246, 1.799, 2.346, 2.894, 3.338, 3.891, 4.442, 4.989,
// 5.443, 5.977, 6.535, 7.079, 7.54, 8.086, 8.63, 9.189,
// 9.633, 10.182, 10.734, 11.309, 11.751, 12.305, 12.852, 13.442,
// 13.91, 14.444, 15.012, 15.615, 16.073, 16.629, 17.217, 17.84}
// KITTI
vertAngles{-24.8 , -24.37460317, -23.94920635, -23.52380952,
-23.0984127 , -22.67301587, -22.24761905, -21.82222222,
-21.3968254 , -20.97142857, -20.54603175, -20.12063492,
-19.6952381 , -19.26984127, -18.84444444, -18.41904762,
-17.99365079, -17.56825397, -17.14285714, -16.71746032,
-16.29206349, -15.86666667, -15.44126984, -15.01587302,
-14.59047619, -14.16507937, -13.73968254, -13.31428571,
-12.88888889, -12.46349206, -12.03809524, -11.61269841,
-11.18730159, -10.76190476, -10.33650794, -9.91111111,
-9.48571429, -9.06031746, -8.63492063, -8.20952381,
-7.78412698, -7.35873016, -6.93333333, -6.50793651,
-6.08253968, -5.65714286, -5.23174603, -4.80634921,
-4.38095238, -3.95555556, -3.53015873, -3.1047619 ,
-2.67936508, -2.25396825, -1.82857143, -1.4031746 ,
-0.97777778, -0.55238095, -0.12698413, 0.2984127 ,
0.72380952, 1.14920635, 1.57460317, 2. }
{
pose = ipose;
time_ns = iscan.time_ns;
intensity.resize(vertRes, horRes);
intensity.setZero();
range.resize(vertRes, horRes);
range.setZero();
count.resize(vertRes, horRes);
count.setZero();
dirs.resize(vertRes * horRes, Eigen::Vector3f::Zero());
unsigned intDim = 0;
if(iscan.scan.descriptorExists("intensity")){
intDim = iscan.scan.getDescriptorDimension("intensity");
// cout << "intDim = " << intDim << endl;
}
else{
LOG(ERROR) << "No intensity data in laser scan";
}
// cout << "iscan.scan.getNbPoints() = " << iscan.scan.getNbPoints() << endl;
if (organized) {
for (size_t i = 0u; i < iscan.scan.getNbPoints(); ++i) {
float x = iscan.scan.features(0, i);
float y = iscan.scan.features(1, i);
float z = iscan.scan.features(2, i);
float intVal = iscan.scan.descriptors(intDim, i);
float rangeVal = std::sqrt(x * x + y * y + z * z);
if (rangeVal > rangeThresh) {
int vertCoord = i / horRes;
int horCoord = (i - pixelOffsets[vertCoord % 4] + horRes) % horRes;
// int horCoord = getHorCoord(x, y, z);
// int vertCoord = getVertCoord(x, y, z);
// if(horCoord < 0 || horRes <= horCoord || vertCoord < 0 || vertRes <= vertCoord) {
// cout << i << ": (" << x << ", " << y << ", " << z << ")" << endl;
// // cout << "horAngle = " << horAngle << "\tvertAngle = " << vertAngle << "\tintensity = " << intVal << endl;
// cout << "horCoord = " << horCoord << "\tvertCoord = " << vertCoord << endl;
//
// LOG(ERROR) << "Wrong image coordinates";
// throw "Wrong image coordinates";
// }
Eigen::Vector3f dir = Eigen::Vector3f(x, y, z).normalized();
intensity(vertCoord, horCoord) = intVal;
range(vertCoord, horCoord) = rangeVal;
count(vertCoord, horCoord) += 1;
dirs[vertCoord * horRes + horCoord] = dir;
}
}
}
// do interpolation
else {
// std::cout << "interpolating intensity image" << std::endl;
// std::cout << "binning" << std::endl;
std::vector<std::vector<std::vector<int>>> bins(vertRes,
std::vector<std::vector<int>>(horRes, std::vector<int>()));
for (size_t i = 0u; i < iscan.scan.getNbPoints(); ++i) {
float x = iscan.scan.features(0, i);
float y = iscan.scan.features(1, i);
float z = iscan.scan.features(2, i);
float intVal = iscan.scan.descriptors(intDim, i);
float rangeVal = std::sqrt(x * x + y * y + z * z);
if (rangeVal > rangeThresh) {
int horCoord = getHorCoordLow(x, y, z);
int vertCoord = getVertCoordLow(x, y, z);
if(0 <= horCoord && horCoord < horRes && 0 <= vertCoord && vertCoord < vertRes) {
// std::cout << "adding to bin (" << vertCoord << ", " << horCoord << ")" << std::endl;
bins[vertCoord][horCoord].push_back(i);
}
}
}
// std::cout << "interpolating" << std::endl;
for(int r = 0; r < vertRes; ++r) {
for (int c = 0; c < horRes; ++c) {
// PRINT(r);
// PRINT(c);
Eigen::Vector3f dir = getDir(r, c);
float horAngle = getHorAngle(dir(0), dir(1), dir(2));
float vertAngle = getVertAngle(dir(0), dir(1), dir(2));
// PRINT(dir.transpose());
// PRINT(horAngle);
// PRINT(vertAngle);
int nh00 = getClosest(iscan, bins[r][c], dir);
int nh10 = -1;
if (r > 0) {
nh10 = getClosest(iscan, bins[r - 1][c], dir);
}
// there is always a neighboring column
int nh01 = getClosest(iscan, bins[r][(c - 1 + horRes) % horRes], dir);
int nh11 = -1;
if (r > 0) {
nh11 = getClosest(iscan, bins[r - 1][(c - 1 + horRes) % horRes], dir);
}
// PRINT(nh00);
// PRINT(nh10);
// PRINT(nh01);
// PRINT(nh11);
Eigen::Vector4f pt = Eigen::Vector4f::Zero();
if (nh00 >= 0 && nh10 >= 0 && nh01 >= 0 && nh11 >= 0) {
Eigen::Vector4f pt00 = getPoint(iscan, nh00, intDim);
Eigen::Vector4f pt10 = getPoint(iscan, nh10, intDim);
Eigen::Vector4f pt01 = getPoint(iscan, nh01, intDim);
Eigen::Vector4f pt11 = getPoint(iscan, nh11, intDim);
// first interpolate horizontally
Eigen::Vector4f pt0 = interpolateHor(pt01, pt00, horAngle);
Eigen::Vector4f pt1 = interpolateHor(pt11, pt10, horAngle);
// then vertically
pt = interpolateVert(pt0, pt1, vertAngle);
// {
// PRINT(r);
// PRINT(c);
// PRINT(dir.transpose());
// PRINT(horAngle);
// PRINT(vertAngle);
// PRINT(getHorAngle(pt01(0), pt01(1), pt01(2)));
// PRINT(getHorAngle(pt00(0), pt00(1), pt00(2)));
// PRINT(nh00);
// PRINT(nh10);
// PRINT(nh01);
// PRINT(nh11);
// PRINT(pt00.transpose());
// PRINT(pt10.transpose());
// PRINT(pt01.transpose());
// PRINT(pt11.transpose());
// PRINT(pt0.transpose());
// PRINT(pt1.transpose());
// PRINT(pt.transpose());
// while (getch() != 'n') {
//
// }
// }
}
else if (nh00 >= 0 && nh10 >= 0) {
Eigen::Vector4f pt00 = getPoint(iscan, nh00, intDim);
Eigen::Vector4f pt10 = getPoint(iscan, nh10, intDim);
// PRINT(pt00.transpose());
// PRINT(pt10.transpose());
pt = interpolateVert(pt00, pt10, vertAngle);
}
else if (nh01 >= 0 && nh11 >= 0) {
Eigen::Vector4f pt01 = getPoint(iscan, nh01, intDim);
Eigen::Vector4f pt11 = getPoint(iscan, nh11, intDim);
// PRINT(pt01.transpose());
// PRINT(pt11.transpose());
// interpolate vertically
pt = interpolateVert(pt01, pt11, vertAngle);
}
else if (nh00 >= 0 && nh01 >= 0) {
Eigen::Vector4f pt00 = getPoint(iscan, nh00, intDim);
Eigen::Vector4f pt01 = getPoint(iscan, nh01, intDim);
// PRINT(pt00.transpose());
// PRINT(pt01.transpose());
// interpolate horizontally
pt = interpolateHor(pt01, pt00, horAngle);
}
else if (nh10 >= 0 && nh11 >= 0) {
Eigen::Vector4f pt10 = getPoint(iscan, nh10, intDim);
Eigen::Vector4f pt11 = getPoint(iscan, nh11, intDim);
// PRINT(pt10.transpose());
// PRINT(pt11.transpose());
// interpolate horizontally
pt = interpolateHor(pt11, pt10, horAngle);
}
else {
// chose the closest
int nh = getClosest(iscan, std::vector<int>{nh00, nh10, nh01, nh11}, dir);
if (nh >= 0) {
pt = getPoint(iscan, nh, intDim);
}
}
// PRINT(pt.transpose());
if (pt != Eigen::Vector4f::Zero()) {
Eigen::Vector3f dir = pt.head<3>().normalized();
intensity(r, c) = pt(3);
range(r, c) = pt.head<3>().norm();
dirs[r * horRes + c] = dir;
count(r, c) += 1;
}
}
}
}
// buildKdtree();
++ids;
// LOG(INFO) << "allocated " << ids;
}
// VisualView::VisualView(VisualView &&other) :
// pixelOffsets(std::move(other.pixelOffsets)),
// vertAngles(std::move(other.vertAngles)),
// pose(std::move(other.pose)),
// time_ns(std::move(other.time_ns)),
// intensity(std::move(other.intensity)),
// range(std::move(other.range)),
// count(std::move(other.count)),
// dirs(std::move(other.dirs)) {
// ++ids;
// }
// VisualView::VisualView(const VisualView &other) :
// pixelOffsets(other.pixelOffsets),
// vertAngles(other.vertAngles),
// pose(other.pose),
// time_ns(other.time_ns),
// intensity(other.intensity),
// range(other.range),
// count(other.count),
// dirs(other.dirs) {
// ++ids;
// // LOG(INFO) << "allocated " << ids;
// }
// VisualView::~VisualView(){
// --ids;
// // LOG(INFO) << "allocated " << ids;
// }
float VisualView::angDiff(const float &a1, const float &a2) const {
float d = a1 - a2;
if (d < -M_PI) {
d += 2 * M_PI;
}
else if(M_PI < d) {
d -= 2 * M_PI;
}
return d;
}
float VisualView::getHorAngle(const float &x, const float &y, const float &z) const {
float horAngle = atan2(-y, x);
if (horAngle < 0) {
horAngle += 2.0 * M_PI;
}
return horAngle;
}
float VisualView::getVertAngle(const float &x, const float &y, const float &z) const {
float horRange = std::sqrt(x*x + y*y);
float vertAngle = atan2(z, horRange);
return vertAngle;
}
int VisualView::getHorCoord(const float &x, const float &y, const float &z) const {
float horAngle = getHorAngle(x, y, z);
// round to nearest integer
int horCoord = int(horAngle / (2 * M_PI) * horRes + 0.5);
return horCoord;
}
int VisualView::getVertCoord(const float &x, const float &y, const float &z) const {
float vertAngle = getVertAngle(x, y, z);
auto it = std::lower_bound(vertAngles.begin(), vertAngles.end(), vertAngle * 180.0 / M_PI);
if (it != vertAngles.begin()){
if(std::abs(*(it-1) - vertAngle * 180.0 / M_PI) < std::abs(*it - vertAngle * 180.0 / M_PI)){
--it;
}
}
// int vertCoord = int((vertRange / 2.0 - vertAngle) / vertRange * (vertRes - 1));
int vertCoord = vertAngles.end() - it - 1;
return vertCoord;
}
int VisualView::getHorCoordLow(const float &x, const float &y, const float &z) const {
float horAngle = getHorAngle(x, y, z);
// round to nearest integer
int horCoord = int(horAngle / (2 * M_PI) * horRes);
return horCoord;
}
int VisualView::getVertCoordLow(const float &x, const float &y, const float &z) const {
float vertAngle = getVertAngle(x, y, z);
auto it = std::lower_bound(vertAngles.begin(), vertAngles.end(), vertAngle * 180.0 / M_PI);
// int vertCoord = int((vertRange / 2.0 - vertAngle) / vertRange * (vertRes - 1));
int vertCoord = vertAngles.end() - it - 1;
return vertCoord;
}
Eigen::Vector3f VisualView::getDir(const int &r, const int &c) const {
float horAngle = c * 2 * M_PI / horRes;
float vertAngle = vertAngles[vertAngles.size() - r - 1] * M_PI / 180.0;
Eigen::Quaternionf q = Eigen::AngleAxisf(-horAngle, Eigen::Vector3f::UnitZ())
* Eigen::AngleAxisf(-vertAngle, Eigen::Vector3f::UnitY());
Eigen::Vector3f dir = q * Eigen::Vector3f::UnitX();
return dir;
}
int VisualView::getClosest(const laser_slam::LaserScan &scan,
const std::vector<int> &nhs,
const Eigen::Vector3f &dir) const {
int bestNh = -1;
float bestW = 0.0f;
for (const int &nh : nhs) {
if (nh >= 0) {
float x = scan.scan.features(0, nh);
float y = scan.scan.features(1, nh);
float z = scan.scan.features(2, nh);
Eigen::Vector3f nhDir = Eigen::Vector3f(x, y, z).normalized();
float curW = dir.dot(nhDir);
if (curW > bestW) {
bestNh = nh;
bestW = curW;
}
}
}
return bestNh;
}
Eigen::Vector4f VisualView::getPoint(const laser_slam::LaserScan &scan, const int &idx, const int &intDim) const {
return Eigen::Vector4f(scan.scan.features(0, idx),
scan.scan.features(1, idx),
scan.scan.features(2, idx),
scan.scan.descriptors(intDim, idx));
}
Eigen::Vector4f VisualView::interpolateVert(const Eigen::Vector4f &pt1,
const Eigen::Vector4f &pt2,
const float &vertAngle) const {
float pt1ang = getVertAngle(pt1(0), pt1(1), pt1(2));
float pt2ang = getVertAngle(pt2(0), pt2(1), pt2(2));
float w2 = (vertAngle - pt1ang) / (pt2ang - pt1ang);
float w1 = (pt2ang - vertAngle) / (pt2ang - pt1ang);
if (abs(w1 + w2 - 1.0f) > 1e-5) {
std::cout << "abs(w1 + w2 - 1.0f) > 1e-5" << std::endl;
}
if (vertAngle < pt1ang || pt2ang < vertAngle) {
std::cout << "vertAngle < pt1ang || pt2ang < vertAngle" << std::endl;
}
// {
// PRINT(pt1.transpose());
// PRINT(pt2.transpose());
// PRINT(pt1ang);
// PRINT(vertAngle);
// PRINT(pt2ang);
// PRINT(w1);
// PRINT(w2);
// while (getch() != 'm') {
//
// }
// }
return (w1 * pt1 + w2 * pt2) / (w1 + w2);
}
Eigen::Vector4f VisualView::interpolateHor(const Eigen::Vector4f &pt1,
const Eigen::Vector4f &pt2,
const float &horAngle) const {
float pt1ang = getHorAngle(pt1(0), pt1(1), pt1(2));
float pt2ang = getHorAngle(pt2(0), pt2(1), pt2(2));
float w2 = angDiff(horAngle, pt1ang) / angDiff(pt2ang, pt1ang);
float w1 = angDiff(pt2ang, horAngle) / angDiff(pt2ang, pt1ang);
// if (abs(w1 + w2 - 1.0f) > 1e-5 || w1 < 0.0f || w2 < 0.0f) {
// std::cout << "abs(w1 + w2 - 1.0f) > 1e-5 || w1 < 0.0f || w2 < 0.0f" << std::endl;
//
// {
// PRINT(pt1.transpose());
// PRINT(pt2.transpose());
// PRINT(pt1ang);
// PRINT(horAngle);
// PRINT(pt2ang);
// PRINT(w1);
// PRINT(w2);
// while (getch() != 'm') {
//
// }
// }
// }
// if (horAngle < pt1ang || pt2ang < horAngle) {
// std::cout << "horAngle < pt1ang || pt2ang < horAngle" << std::endl;
// }
return (w1 * pt1 + w2 * pt2) / (w1 + w2);
}
std::pair<int, int> VisualView::getClosestDir(const float &x, const float &y, const float &z,
const int &r1,
const int &c1,
const int &r2,
const int &c2) const {
Eigen::Vector3f dir(x, y, z);
dir.normalize();
std::pair<int, int> bestCoord(-1, -1);
float bestDiff = std::numeric_limits<float>::max();
// std::vector<int> idxs;
// std::vector<float> dists;
// kdtree->nearestKSearch(pcl::PointXYZ(dir(0), dir(1), dir(2)), 4, idxs, dists);
//
// for (auto &idx : idxs) {
// int r = idxToCoord.at(idx).first;
// int c = idxToCoord.at(idx).second;
//
// if(range(r, c) > rangeThresh) {
// float diff = 1.0f - dir.dot(dirs[r * horRes + c]);
// if ((diff < 1.0f - cos(dirThresh * M_PI / 180.0)) && diff < bestDiff) {
// bestCoord.first = r;
// bestCoord.second = c;
// bestDiff = diff;
// }
// }
// }
for(int r = r1; r < r2; ++r){
for(int c = c1; c < c2; ++c){
if(range(r, c) > rangeThresh) {
// 1 - cosine(alpha)
float diff = 1.0f - dir.dot(dirs[r * horRes + c]);
if ((diff < 1.0f - cos(dirThresh * M_PI / 180.0)) && diff < bestDiff) {
bestCoord.first = r;
bestCoord.second = c;
bestDiff = diff;
}
}
}
}
// if(bestCoord.first == -1 && bestCoord.second == -1){
// cout << "bestDiff = " << acos(1.0 - bestDiffAll) * 180.0 / M_PI << endl;
// }
return bestCoord;
}
VisualView::MatrixInt
VisualView::getMask(const laser_slam_ros::PointCloud &point_cloud) const {
MatrixInt mask;
mask.resize(vertRes, horRes);
mask.setZero();
for(int p = 0; p < point_cloud.size(); ++p) {
Eigen::Vector3d ptSensor = pose.T_w.inverseTransform(point_cloud.at(p).getVector3fMap().cast<double>());
// narrowing down search area
int horCoordComp = getHorCoord(ptSensor(0), ptSensor(1), ptSensor(2));
int vertCoordComp = getVertCoord(ptSensor(0), ptSensor(1), ptSensor(2));
int r1 = std::max(0, vertCoordComp - 16);
int r2 = std::min(vertRes, vertCoordComp + 16);
int c1 = std::max(0, horCoordComp - 32);
int c2 = std::min(horRes, horCoordComp + 32);
std::pair<int, int> coord = getClosestDir(ptSensor(0), ptSensor(1), ptSensor(2),
r1, c1, r2, c2);
int horCoord = coord.second;
int vertCoord = coord.first;
// {
// Eigen::Vector3f dir = ptSensor.normalized().cast<float>();
// std::vector<int> idxs;
// std::vector<float> dists;
// kdtree.nearestKSearch(pcl::PointXYZ(dir(0), dir(1), dir(2)), 4, idxs, dists);
//
// std::pair<int, int> coordComp(-1, -1);
//
// float bestDiff = std::numeric_limits<float>::max();
// for (int i = 0; i < idxs.size(); ++i){
// int r = idxToCoord.at(idxs[i]).first;
// int c = idxToCoord.at(idxs[i]).second;
//
// if(range(r, c) > rangeThresh) {
// float diff = 1.0f - dir.dot(dirs[r * horRes + c]);
// if ((diff < 1.0f - cos(dirThresh * M_PI / 180.0)) && diff < bestDiff) {
// coordComp.first = r;
// coordComp.second = c;
// bestDiff = diff;
// }
// }
// }
//
// if (coordComp.first != coord.first || coordComp.second != coord.second) {
// LOG(INFO) << "coord = (" << coord.first << ", " << coord.second << "), coordComp = ("
// << coordComp.first << ", " << coordComp.second << "), coordProj = ("
// << vertCoordComp << ", " << horCoordComp << ")";
// LOG(INFO) << "diff = " << 1.0f - dir.dot(dirs[coord.first * horRes + coord.second])
// << ", diffComp = " << 1.0f - dir.dot(dirs[coordComp.first * horRes + coordComp.second]);
// }
// }
// {
// int r1 = std::max(0, vertCoordComp - 12);
// int r2 = std::min(vertRes - 1, vertCoordComp + 12);
// int c1 = std::max(0, horCoordComp - 28);
// int c2 = std::min(horRes - 1, horCoordComp + 28);
//
// std::pair<int, int> coordComp = getClosestDir(ptSensor(0), ptSensor(1), ptSensor(2),
// r1, c1, r2, c2);
// if (coordComp.first != coord.first || coordComp.second != coord.second) {
// LOG(INFO) << "coord = (" << coord.first << ", " << coord.second << "), coordComp = ("
// << coordComp.first << ", " << coordComp.second << "), coordProj = ("
// << vertCoordComp << ", " << horCoordComp << ")";
// }
// }
// cout << p << ": (" << ptSensor(0) << ", " << ptSensor(1) << ", " << ptSensor(2) << ")" << endl;
// cout << "horCoord1 = " << horCoord << "\tvertCoord1 = " << vertCoord << endl;
// cout << "horCoord2 = " << horCoordComp << "\tvertCoord2 = " << vertCoordComp << endl;
if(0 <= horCoord && horCoord < horRes && 0 <= vertCoord && vertCoord < vertRes) {
if(ptSensor.norm() < range(vertCoord, horCoord) + occlusionThresh) {
mask(vertCoord, horCoord) += 1;
}
}
// else{
// cout << "horCoord1 = " << horCoord << "\tvertCoord1 = " << vertCoord << endl;
// cout << "horCoord2 = " << horCoordComp << "\tvertCoord2 = " << vertCoordComp << endl;
// }
}
return mask;
}
std::vector<uint8_t> VisualView::compressData(const std::vector<uint8_t> &data) {
std::stringstream compressed;
std::stringstream decompressed;
decompressed << std::string((char*)data.data(), data.size());
boost::iostreams::filtering_streambuf<boost::iostreams::input> out;
out.push(boost::iostreams::zlib_compressor());
out.push(decompressed);
boost::iostreams::copy(out, compressed);
std::string compressedStr = compressed.str();
return std::vector<uint8_t>(compressedStr.begin(), compressedStr.end());
}
std::vector<uint8_t> VisualView::decompressData(const std::vector<uint8_t> &dataComp) {
std::stringstream compressed;
std::stringstream decompressed;
compressed << std::string((char*)dataComp.data(), dataComp.size());
boost::iostreams::filtering_streambuf<boost::iostreams::input> in;
in.push(boost::iostreams::zlib_decompressor());
in.push(compressed);
boost::iostreams::copy(in, decompressed);
std::string decompressedStr = decompressed.str();
return std::vector<uint8_t>(decompressedStr.begin(), decompressedStr.end());
}
void VisualView::compress() {
if(intensity.size() > 0){
std::vector<uint8_t> data((uint8_t*)intensity.data(), (uint8_t*)intensity.data() + intensity.size() * sizeof(float));
intensityComp = compressData(data);
intensity.resize(0, 0);
// cout << "data.size() = " << data.size() << endl;
// std::vector<uint8_t> dataComp = compressData(data);
// cout << "dataComp.size() = " << dataComp.size() << endl;
// std::vector<uint8_t> data2 = decompressData(dataComp);
// cout << "data2.size() = " << data2.size() << endl;
// Matrix intensity2 = Eigen::Map<Matrix>((float*)data2.data(), vertRes, horRes);
// if(intensity != intensity2) {
// cout << endl << "Matrices dont match" << endl << endl;
// cout << intensity << endl;
// cout << intensity2 << endl;
// }
// else{
// cout << endl << "Matrices match :)" << endl << endl;
// }
}
if(range.size() > 0){
std::vector<uint8_t> data((uint8_t*)range.data(), (uint8_t*)range.data() + range.size() * sizeof(float));
rangeComp = compressData(data);
range.resize(0, 0);
}
if(count.size() > 0){
std::vector<uint8_t> data((uint8_t*)count.data(), (uint8_t*)count.data() + count.size() * sizeof(int));
countComp = compressData(data);
count.resize(0, 0);
}
if(dirs.size() > 0){
std::vector<uint8_t> data((uint8_t*)dirs.data(), (uint8_t*)dirs.data() + dirs.size() * sizeof(Eigen::Vector3f));
dirsComp = compressData(data);
dirs.clear();
}
// kdtree.reset();
// idxToCoord.clear();
isCompressedFlag = true;
}
void VisualView::decompress() {
if(intensityComp.size() > 0) {
std::vector<uint8_t> data = decompressData(intensityComp);
intensity = Eigen::Map<Matrix>((float*)data.data(), vertRes, horRes);
intensityComp.clear();
// cout << "intensity decompressed" << endl;
}
if(rangeComp.size() > 0) {
std::vector<uint8_t> data = decompressData(rangeComp);
range = Eigen::Map<Matrix>((float*)data.data(), vertRes, horRes);
rangeComp.clear();
// cout << "range decompressed" << endl;
}
if(countComp.size() > 0) {
std::vector<uint8_t> data = decompressData(countComp);
count = Eigen::Map<MatrixInt>((int*)data.data(), vertRes, horRes);
countComp.clear();
// cout << "count decompressed" << endl;
}
if(dirsComp.size() > 0) {
std::vector<uint8_t> data = decompressData(dirsComp);
dirs = std::vector<Eigen::Vector3f>((Eigen::Vector3f*)data.data(), (Eigen::Vector3f*)data.data() + vertRes * horRes);
dirsComp.clear();
// cout << "dirs decompressed" << endl;
}
// buildKdtree();
isCompressedFlag = false;
}
// void VisualView::buildKdtree() {
// LOG(INFO) << "Building kdtree";
//
// idxToCoord.clear();
// kdtree.reset(new pcl::KdTreeFLANN<pcl::PointXYZ>());
//
// pcl::PointCloud<pcl::PointXYZ>::Ptr dirsPointCloud(new pcl::PointCloud<pcl::PointXYZ>());
// for(int r = 0; r < vertRes; ++r) {
// for (int c = 0; c < horRes; ++c) {
// if (count(r, c) > 0) {
// idxToCoord[dirsPointCloud->size()] = std::make_pair(r, c);
// dirsPointCloud->push_back(pcl::PointXYZ(dirs[r * horRes + c](0),
// dirs[r * horRes + c](1),
// dirs[r * horRes + c](2)));
// }
// }
// }
//
// kdtree->setInputCloud(dirsPointCloud);
// }
int VisualView::ids = 0;
}
|
{"hexsha": "e782765a8d11648ae0195df8e7e2e64dd9ccf4bc", "size": 27026, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "laser_slam_ros/src/visual_view.cpp", "max_stars_repo_name": "LRMPUT/laser_slam", "max_stars_repo_head_hexsha": "99c69859ac26bbe13009474f461d14637b89cc4a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "laser_slam_ros/src/visual_view.cpp", "max_issues_repo_name": "LRMPUT/laser_slam", "max_issues_repo_head_hexsha": "99c69859ac26bbe13009474f461d14637b89cc4a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "laser_slam_ros/src/visual_view.cpp", "max_forks_repo_name": "LRMPUT/laser_slam", "max_forks_repo_head_hexsha": "99c69859ac26bbe13009474f461d14637b89cc4a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3252688172, "max_line_length": 123, "alphanum_fraction": 0.5383704581, "num_tokens": 8279}
|
using MAT
using PyPlot
using ADCME
using DelimitedFiles
data = matread("data.mat")["data"]
kid = 3
tid = 1
if length(ARGS)==2
global kid = parse(Int64, ARGS[1])
global tid = parse(Int64, ARGS[2])
end
@info kid, tid
d = data[kid]
n = div(size(d, 1), 5)
H = Variable(1.0)^2
ε = d[1:n, 1]
σ = d[1:n, 2]
function prediction(ε, σ0)
ε, σ0 = convert_to_tensor([ε, σ0], [Float64, Float64])
N = length(ε)
condition = (i, σ)-> (i<=N)
function body(i, taσ)
σl = read(taσ, i-1)
εc = ε[i]
εl = ε[i-1]
σc = H * εc + squeeze(ae(reshape([εl;σl], (1,2)), [20,20,20,1]))
taσ = write(taσ, i, σc)
i+1, taσ
end
i = constant(2, dtype=Int32)
taσ = TensorArray(N)
taσ = write(taσ, 1, σ0)
_, out = while_loop(condition, body, [i, taσ])
set_shape(stack(out), (N,))
end
function visualize(iter)
full = run(sess, σpred_full)
close("all")
plot(d[:,1], d[:,2])
plot(d[1:end,1], full, "--")
xlabel("Strain")
ylabel("Stress (MPa)")
savefig("figures/strainstress$kid$(tid)_$iter.png")
close("all")
ref = d[1:end,2]
plot(1:n-1,ref[1:n-1], "C1", label="Reference")
plot(1:n-1,full[1:n-1], "C2--", label="Training Prediction")
plot(n:size(d,1),ref[n:end], "C1")
plot(n:size(d,1),full[n:end], "C3--", label="Testing Prediction")
xlabel("Index")
ylabel("Stress (MPa)")
legend()
savefig("figures/stress$kid$(tid)_$iter.png")
writedlm("figures/res$kid$(tid)_$iter.txt", full)
end
σpred_train = prediction(d[1:n], σ[1])
loss = sum((σpred_train - σ)^2)
σpred_full = prediction(d[1:end,1], σ[1])
sess = Session(); init(sess)
for i = 1:20
visualize(i-1)
BFGS!(sess, loss, 100)
end
|
{"hexsha": "8d9739e66111c367bd305f7fd1f9ed42d24c8d27", "size": 1729, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "AdFemResearch/viscoelasticity_experimental_data/nnfit.jl", "max_stars_repo_name": "ADCMEMarket/ADCMEImages", "max_stars_repo_head_hexsha": "d89df7050f53e56f3c509ff737199068410dbbc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2020-10-18T01:33:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T00:13:24.000Z", "max_issues_repo_path": "AdFemResearch/viscoelasticity_experimental_data/nnfit.jl", "max_issues_repo_name": "ADCMEMarket/ADCMEImages", "max_issues_repo_head_hexsha": "d89df7050f53e56f3c509ff737199068410dbbc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-10-19T03:51:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:38:46.000Z", "max_forks_repo_path": "AdFemResearch/viscoelasticity_experimental_data/nnfit.jl", "max_forks_repo_name": "ADCMEMarket/ADCMEImages", "max_forks_repo_head_hexsha": "d89df7050f53e56f3c509ff737199068410dbbc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-11-05T11:34:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T19:30:09.000Z", "avg_line_length": 21.6125, "max_line_length": 72, "alphanum_fraction": 0.5702718334, "num_tokens": 638}
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import json
import math
from tqdm import tqdm
from multiprocessing import Queue, Process
import argparse
sys.path.append("../")
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.networks import build_whole_network_batch
from help_utils import tools
from libs.label_name_dict.label_dict import *
from data.lib_coco.PythonAPI.pycocotools.coco import COCO
from data.lib_coco.PythonAPI.pycocotools.cocoeval import COCOeval
def cocoval(detected_json, eval_json):
eval_gt = COCO(eval_json)
eval_dt = eval_gt.loadRes(detected_json)
cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')
# cocoEval.params.imgIds = eval_gt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
def worker(gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for a_img in images:
record = json.loads(a_img)
img_path = os.path.join('/data/yangxue/dataset/COCO/val2017', record['fpath'].split('_')[-1])
raw_img = cv2.imread(img_path)
# raw_img = cv2.imread(record['fpath'])
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: raw_img[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
scales = [raw_w / resized_w, raw_h / resized_h]
result_dict = {'scales': scales, 'boxes': detected_boxes,
'scores': detected_scores, 'labels': detected_categories,
'image_id': record['ID']}
result_queue.put_nowait(result_dict)
def eval_coco(det_net, real_test_img_list, gpu_ids):
save_path = os.path.join('./eval_coco', cfgs.VERSION)
tools.mkdir(save_path)
fw_json_dt = open(os.path.join(save_path, 'coco_minival.json'), 'w')
coco_det = []
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(gpu_ids.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(5000)
procs = []
for i in range(gpu_num):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_ids.strip().split(',')[i]), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
xmin, ymin, xmax, ymax = res['boxes'][:, 0], res['boxes'][:, 1], \
res['boxes'][:, 2], res['boxes'][:, 3]
xmin = xmin * res['scales'][0]
xmax = xmax * res['scales'][0]
ymin = ymin * res['scales'][1]
ymax = ymax * res['scales'][1]
boxes = np.transpose(np.stack([xmin, ymin, xmax-xmin, ymax-ymin]))
sort_scores = np.array(res['scores'])
sort_labels = np.array(res['labels'])
sort_boxes = np.array(boxes)
# if len(res['scores']) > cfgs.MAXIMUM_DETECTIONS:
# sort_indx = np.argsort(np.array(res['scores']) * -1)[:cfgs.MAXIMUM_DETECTIONS]
# # print(sort_indx)
# sort_scores = np.array(res['scores'])[sort_indx]
# sort_labels = np.array(res['labels'])[sort_indx]
# sort_boxes = np.array(boxes)[sort_indx]
for j, box in enumerate(sort_boxes):
coco_det.append({'bbox': [float(box[0]), float(box[1]), float(box[2]), float(box[3])],
'score': float(sort_scores[j]), 'image_id': int(res['image_id'].split('.jpg')[0].split('_000000')[-1]),
'category_id': int(classes_originID[LABEL_NAME_MAP[sort_labels[j]]])})
pbar.set_description("Eval image %s" % res['image_id'])
pbar.update(1)
for p in procs:
p.join()
json.dump(coco_det, fw_json_dt)
fw_json_dt.close()
return os.path.join(save_path, 'coco_minival.json')
def eval(num_imgs, eval_data, eval_gt, gpu_ids):
with open(eval_data) as f:
test_img_list = f.readlines()
if num_imgs == np.inf:
real_test_img_list = test_img_list
else:
real_test_img_list = test_img_list[: num_imgs]
faster_rcnn = build_whole_network_batch.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_json = eval_coco(det_net=faster_rcnn, real_test_img_list=real_test_img_list, gpu_ids=gpu_ids)
# save_path = os.path.join('./eval_coco', cfgs.VERSION)
# detected_json = os.path.join(save_path, 'coco_minival.json')
cocoval(detected_json, eval_gt)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result with Pascal2007 stdand')
parser.add_argument('--eval_data', dest='eval_data',
help='evaluate imgs dir, download link: https://drive.google.com/file/d/1Au55e6lqvuTunNBZO2Cj4Kh9XySyM3ZN/view?usp=sharing',
default='/data/yangxue/dataset/COCO/coco_minival2014.odgt', type=str)
parser.add_argument('--eval_gt', dest='eval_gt',
help='eval gt, download link: https://drive.google.com/file/d/1cgyEzdGVfx7zPNUO0lLfm8pu0HfIj3Xv/view?usp=sharing',
default='/data/yangxue/dataset/COCO/instances_minival2014.json',
type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num, # use np.inf to test all the imgs. use 10 to test 10 imgs.
eval_data=args.eval_data,
eval_gt=args.eval_gt,
gpu_ids=args.gpus)
# os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
# eval(np.inf, # use np.inf to test all the imgs. use 10 to test 10 imgs.
# eval_data='/data/COCO/coco_minival2014.odgt',
# eval_gt='/data/COCO/instances_minival2014.json',
# gpu_ids='0,1,2,3,4,5,6,7')
|
{"hexsha": "13d4ac7a40126b091e63f4633ecb7dba8905745d", "size": 8183, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/eval_coco_multiprocessing.py", "max_stars_repo_name": "DetectionTeamUCAS/RetinaNet_Tensorflow", "max_stars_repo_head_hexsha": "bf05e25a11b420649f1c3f8bc58af66ba7bdf5c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2019-05-23T02:42:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T08:18:46.000Z", "max_issues_repo_path": "tools/eval_coco_multiprocessing.py", "max_issues_repo_name": "DetectionTeamUCAS/RetinaNet_Tensorflow", "max_issues_repo_head_hexsha": "bf05e25a11b420649f1c3f8bc58af66ba7bdf5c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-06-05T11:29:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-12T11:44:49.000Z", "max_forks_repo_path": "tools/eval_coco_multiprocessing.py", "max_forks_repo_name": "DetectionTeamUCAS/RetinaNet_Tensorflow", "max_forks_repo_head_hexsha": "bf05e25a11b420649f1c3f8bc58af66ba7bdf5c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2019-06-01T01:17:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T15:08:02.000Z", "avg_line_length": 34.9700854701, "max_line_length": 148, "alphanum_fraction": 0.6243431504, "include": true, "reason": "import numpy", "num_tokens": 2067}
|
from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
|
{"hexsha": "5caf03c472b109f9c4a09de43fd631678b177ace", "size": 3671, "ext": "py", "lang": "Python", "max_stars_repo_path": "rllab/envs/mujoco/hill/terrain.py", "max_stars_repo_name": "RussellM2020/maml_gps", "max_stars_repo_head_hexsha": "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1838, "max_stars_repo_stars_event_min_datetime": "2017-08-10T04:19:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:41:19.000Z", "max_issues_repo_path": "rllab/envs/mujoco/hill/terrain.py", "max_issues_repo_name": "RussellM2020/maml_gps", "max_issues_repo_head_hexsha": "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 120, "max_issues_repo_issues_event_min_datetime": "2016-10-05T09:16:16.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-27T22:57:31.000Z", "max_forks_repo_path": "rllab/envs/mujoco/hill/terrain.py", "max_forks_repo_name": "RussellM2020/maml_gps", "max_forks_repo_head_hexsha": "631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 498, "max_forks_repo_forks_event_min_datetime": "2017-08-16T03:34:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T04:41:32.000Z", "avg_line_length": 36.71, "max_line_length": 125, "alphanum_fraction": 0.6475074911, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1108}
|
import argparse
import datetime
import gym
import numpy as np
import itertools
import torch
from sac import SAC
from dctrain import DiscOptimizer
from tensorboardX import SummaryWriter
from replay_memory import ReplayMemory
from gym_navigation.envs.navigation import ContinuousNavigation2DEnv, ContinuousNavigation2DNREnv
import cv2
import os
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env-name', default="2d-navigation-v0",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--policy', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.05, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--dclr', type=float, default=0.00001, metavar='G',
help='learning rate of discriminator (default: 0.00001)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--latent_size', type=int, default=2, metavar='N',
help='latent variable length (default: 2)')
parser.add_argument('--updates_per_step', type=int, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=10000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument('--suffix', type=str, default="",
help='suffix for model path')
args = parser.parse_args()
# Built-in config
bt_conf = dict()
bt_conf['render'] = True # The env has '_render_trajectory' method or not
bt_conf['alpha tuning'] = True # Scheduled alpha decreasing or not
bt_conf['include_r'] = True # Include real reward in training or not
bt_conf['include_sr'] = False # Include pseudo reward in training or not
# Environment
# env = NormalizedActions(gym.make(args.env_name))
env = gym.make(args.env_name)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env.seed(args.seed)
# Agent
agent = SAC(env.observation_space.shape[0], env.action_space, args)
dc = DiscOptimizer(env.observation_space.shape[0], args)
# agent.load_model(env_name=args.env_name)
#TesnorboardX
logdir = 'runs/{}_SAC_{}_{}_{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
args.env_name, args.policy, "autotune" if args.automatic_entropy_tuning else "")
writer = SummaryWriter(logdir=logdir)
logdir_img = logdir + '/img'
if not os.path.exists(logdir_img):
os.makedirs(logdir_img)
# Memory
memory = ReplayMemory(args.replay_size)
# Training Loop
total_numsteps = 0
updates = 0
l_s = args.latent_size
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
episode_sr = 0 # pseudo reward
episode_allr = 0 # all rewards
done = False
state = env.reset()
scale_factor = 1
context = np.random.random(l_s)
context = context * 2 * scale_factor - scale_factor # scale to [-a, a)
while not done:
if args.start_steps > total_numsteps:
action = env.action_space.sample() # Sample random action
else:
action = agent.select_action(state, context) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
disc_loss = dc.update_parameters(memory, args.batch_size, updates)
writer.add_scalar('loss/disc', disc_loss, updates)
writer.add_scalar('loss/critic_1', critic_1_loss, updates)
writer.add_scalar('loss/critic_2', critic_2_loss, updates)
writer.add_scalar('loss/policy', policy_loss, updates)
writer.add_scalar('loss/entropy_loss', ent_loss, updates)
writer.add_scalar('entropy_temprature/alpha', alpha, updates)
updates += 1
# Reduce the entropy reward gain
if bt_conf['alpha tuning']:
if updates < 1000000:
agent.adjust_alpha(1.0000046051807898) # reduced to 0.01 times in 1 million steps
next_state, reward, done, _ = env.step(action) # Step
episode_steps += 1
total_numsteps += 1
episode_reward += reward
pseudo_reward = dc.pseudo_score(context, state)
episode_sr += pseudo_reward
all_reward = pseudo_reward + reward
episode_allr += all_reward
# Ignore the "done" signal if it comes from hitting the time horizon.
# (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)
mask = 1 if episode_steps == env._max_episode_steps else float(not done)
r = 0.
if bt_conf['include_r']:
r += reward
if bt_conf['include_sr']:
r += pseudo_reward
memory.push((context, state, action, r, next_state, mask)) # Append transition to memory
state = next_state
if total_numsteps > args.num_steps:
break
writer.add_scalar('reward/train', episode_reward, i_episode)
writer.add_scalar('reward/train_pseudo', episode_sr, i_episode)
writer.add_scalar('reward/train_all', episode_allr, i_episode)
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}, sr: {}, all: {}".format(i_episode,
total_numsteps, episode_steps, round(episode_reward, 2), round(episode_sr, 2), round(episode_allr, 2)))
if i_episode % 100 == 0 and args.eval == True:
avg_reward = 0.
avg_sr = 0.
avg_all = 0.
avg_reward_x = 0.
avg_sr_x = 0.
avg_all_x = 0.
episodes = 20
# The test part is not compatible with high dimensional latent variables
# at this time. An example for the 2D case.
c = np.linspace(-1.0, 1.0, num=episodes) * scale_factor
context = np.stack([c for _ in range(l_s)], axis=1)
# Using mean for evaluation
for i in range(episodes):
state = env.reset()
traj = []
traj.append([state, None, 0.0, False])
episode_reward = 0
episode_sr = 0
episode_allr = 0
done = False
while not done:
action = agent.select_action(state, context[i], eval=True)
next_state, reward, done, _ = env.step(action)
episode_reward += reward
traj.append([next_state, action, reward, done])
pseudo_reward = dc.pseudo_score(context[i], state)
episode_sr += pseudo_reward
episode_allr += (pseudo_reward + reward)
state = next_state
avg_reward += episode_reward
avg_sr += episode_sr
avg_all += episode_allr
if bt_conf['render']:
img = env._render_trajectory(traj)
cv2.imwrite("{}/test-{}-{}-{}.png".format(logdir_img, i, context[i][0], scale_factor), img * 255.0)
# Sample actions for evaluation
for i in range(episodes):
state = env.reset()
traj = []
traj.append([state, None, 0.0, False])
episode_reward = 0
episode_sr = 0
episode_allr = 0
done = False
while not done:
action = agent.select_action(state, context[i], eval=False)
next_state, reward, done, _ = env.step(action)
episode_reward += reward
traj.append([next_state, action, reward, done])
pseudo_reward = dc.pseudo_score(context[i], state)
episode_sr += pseudo_reward
episode_allr += (pseudo_reward + reward)
state = next_state
avg_reward_x += episode_reward
avg_sr_x += episode_sr
avg_all_x += episode_allr
if bt_conf['render']:
img = env._render_trajectory(traj)
cv2.imwrite("{}/train-{}-{}-{}.png".format(logdir_img, i, context[i][0], scale_factor), img * 255.0)
avg_reward /= episodes
avg_sr /= episodes
avg_all /= episodes
avg_reward_x /= episodes
avg_sr_x /= episodes
avg_all_x /= episodes
writer.add_scalar('avg_reward/test', avg_reward, i_episode)
writer.add_scalar('avg_reward/test_pseudo', avg_sr, i_episode)
writer.add_scalar('avg_reward/test_all', avg_all, i_episode)
writer.add_scalar('avg_reward_x/test', avg_reward_x, i_episode)
writer.add_scalar('avg_reward_x/test_pseudo', avg_sr_x, i_episode)
writer.add_scalar('avg_reward_x/test_all', avg_all_x, i_episode)
print("----------------------------------------")
print("Test Episodes: {}, Avg. Reward: {}, Avg. SR: {}".format(episodes, round(avg_reward, 2), round(avg_sr, 2)))
print("----------------------------------------")
print("Test Episodes: {}, Avg. Reward_x: {}, Avg. SR_x: {}".format(episodes, round(avg_reward_x, 2), round(avg_sr_x, 2)))
print("----------------------------------------")
print("Current Time: {}".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
print("----------------------------------------")
# Save model
agent.save_model(args.env_name, suffix=args.suffix)
dc.save_model(args.env_name, suffix=args.suffix)
env.close()
|
{"hexsha": "321c1c2b4fa489561092a4d38a5f2715ceb1f72c", "size": 11459, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "Steven-Ho/diayn-cont", "max_stars_repo_head_hexsha": "4f30e5593ae89ed70c78066022049bf40c4c005e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "Steven-Ho/diayn-cont", "max_issues_repo_head_hexsha": "4f30e5593ae89ed70c78066022049bf40c4c005e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-22T13:47:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-08T05:18:50.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "Steven-Ho/diayn-cont", "max_forks_repo_head_hexsha": "4f30e5593ae89ed70c78066022049bf40c4c005e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2432432432, "max_line_length": 134, "alphanum_fraction": 0.6143642552, "include": true, "reason": "import numpy", "num_tokens": 2637}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import random
import string
import sys
import ctypes
import time
import json
import os
import multiprocessing
import yaml
import hashlib
from core.load_modules import load_all_languages
from core.time import now
from core.color import color
def process_conditions(
event,
module_name,
target,
scan_unique_id,
options,
response,
process_number,
module_thread_number,
total_module_thread_number,
request_number_counter,
total_number_of_requests
):
from core.alert import (success_event_info,
verbose_info,
messages)
if 'save_to_temp_events_only' in event.get('response', ''):
from database.db import submit_temp_logs_to_db
submit_temp_logs_to_db(
{
"date": now(model=None),
"target": target,
"module_name": module_name,
"scan_unique_id": scan_unique_id,
"event_name": event['response']['save_to_temp_events_only'],
"port": event.get('ports', ''),
"event": event,
"data": response
}
)
if event['response']['conditions_results'] and 'save_to_temp_events_only' not in event.get('response', ''):
from database.db import submit_logs_to_db
# remove sensitive information before submitting to db
from config import nettacker_api_config
options = copy.deepcopy(options)
for key in nettacker_api_config():
try:
del options[key]
except Exception:
continue
del event['response']['conditions']
del event['response']['condition_type']
event_request_keys = copy.deepcopy(event)
del event_request_keys['response']
submit_logs_to_db(
{
"date": now(model=None),
"target": target,
"module_name": module_name,
"scan_unique_id": scan_unique_id,
"port": event.get('ports') or event.get('port') or (
event.get('url').split(':')[2].split('/')[0] if
type(event.get('url')) == str and len(event.get('url').split(':')) >= 3 and
event.get('url').split(':')[2].split('/')[0].isdigit() else ""
),
"event": " ".join(
yaml.dump(event_request_keys).split()
) + "conditions: " + " ".join(
yaml.dump(event['response']['conditions_results']).split()
),
"json_event": event
}
)
success_event_info(
messages("send_success_event_from_module").format(
process_number,
module_name,
target,
module_thread_number,
total_module_thread_number,
request_number_counter,
total_number_of_requests,
" ".join(
[
color('yellow') + key + color('reset') if ':' in key
else color('green') + key + color('reset')
for key in yaml.dump(event_request_keys).split()
]
),
filter_large_content(
"conditions: " + " ".join(
[
color('purple') + key + color('reset') if ':' in key
else color('green') + key + color('reset')
for key in yaml.dump(event['response']['conditions_results']).split()
]
),
filter_rate=150
)
)
)
verbose_info(
json.dumps(event)
)
return True
else:
del event['response']['conditions']
verbose_info(
messages("send_unsuccess_event_from_module").format(
process_number,
module_name,
target,
module_thread_number,
total_module_thread_number,
request_number_counter,
total_number_of_requests
)
)
verbose_info(
json.dumps(event)
)
return 'save_to_temp_events_only' in event['response']
def filter_large_content(content, filter_rate=150):
from core.alert import messages
if len(content) <= filter_rate:
return content
else:
filter_rate -= 1
filter_index = filter_rate
for char in content[filter_rate:]:
if char == ' ':
return content[0:filter_index] + messages('filtered_content')
else:
filter_index += 1
return content
def get_dependent_results_from_database(target, module_name, scan_unique_id, event_name):
from database.db import find_temp_events
while True:
event = find_temp_events(target, module_name, scan_unique_id, event_name)
if event:
break
time.sleep(0.1)
return json.loads(event.event)['response']['conditions_results']
def find_and_replace_dependent_values(sub_step, dependent_on_temp_event):
if type(sub_step) == dict:
for key in copy.deepcopy(sub_step):
if type(sub_step[key]) not in [str, float, int, bytes]:
sub_step[key] = find_and_replace_dependent_values(
copy.deepcopy(sub_step[key]), dependent_on_temp_event
)
else:
if type(sub_step[key]) == str:
if 'dependent_on_temp_event' in sub_step[key]:
globals().update(locals())
exec('sub_step[key] = {sub_step}'.format(sub_step=sub_step[key]), globals(), {})
if type(sub_step) == list:
value_index = 0
for value in copy.deepcopy(sub_step):
if type(sub_step[value_index]) not in [str, float, int, bytes]:
sub_step[key] = find_and_replace_dependent_values(
copy.deepcopy(sub_step[value_index]), dependent_on_temp_event
)
else:
if type(sub_step[value_index]) == str:
if 'dependent_on_temp_event' in sub_step[value_index]:
globals().update(locals())
exec('sub_step[value_index] = {sub_step}'.format(sub_step=sub_step[value_index]), globals(), {})
value_index += 1
return sub_step
def replace_dependent_values(sub_step, dependent_on_temp_event):
return find_and_replace_dependent_values(sub_step, dependent_on_temp_event)
def reverse_and_regex_condition(regex, reverse):
if regex:
if reverse:
return []
return list(set(regex))
else:
if reverse:
return True
return []
def select_maximum_cpu_core(mode):
if mode == 'maximum':
return int(multiprocessing.cpu_count() - 1) if int(multiprocessing.cpu_count() - 1) >= 1 else 1
elif mode == 'high':
return int(multiprocessing.cpu_count() / 2) if int(multiprocessing.cpu_count() - 1) >= 1 else 1
elif mode == 'normal':
return int(multiprocessing.cpu_count() / 4) if int(multiprocessing.cpu_count() - 1) >= 1 else 1
elif mode == 'low':
return int(multiprocessing.cpu_count() / 8) if int(multiprocessing.cpu_count() - 1) >= 1 else 1
else:
return 1
def wait_for_threads_to_finish(threads, maximum=None, terminable=False, sub_process=False):
while threads:
try:
for thread in threads[:]:
if not thread.is_alive():
threads.remove(thread)
if maximum and len(threads) < maximum:
break
time.sleep(0.01)
except KeyboardInterrupt:
if terminable:
for thread in threads:
terminate_thread(thread)
if sub_process:
for thread in threads:
thread.kill()
return False
return True
def terminate_thread(thread, verbose=True):
"""
kill a thread https://stackoverflow.com/a/15274929
Args:
thread: an alive thread
verbose: verbose mode/boolean
Returns:
True/None
"""
from core.alert import info
if verbose:
info("killing {0}".format(thread.name))
if not thread.is_alive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident),
exc
)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
return True
def find_args_value(args_name):
try:
return sys.argv[sys.argv.index(args_name) + 1]
except Exception:
return None
def application_language():
from config import nettacker_global_config
nettacker_global_configuration = nettacker_global_config()
if "-L" in sys.argv:
language = find_args_value('-L') or 'en'
elif "--language" in sys.argv:
language = find_args_value('--language') or 'en'
else:
language = nettacker_global_configuration['nettacker_user_application_config']['language']
if language not in load_all_languages():
language = 'en'
return language
def generate_random_token(length=10):
return "".join(
random.choice(string.ascii_lowercase) for _ in range(length)
)
def re_address_repeaters_key_name(key_name):
return "".join(['[\'' + _key + '\']' for _key in key_name.split('/')[:-1]])
def generate_new_sub_steps(sub_steps, data_matrix, arrays):
original_sub_steps = copy.deepcopy(sub_steps)
steps_array = []
for array in data_matrix:
array_name_position = 0
for array_name in arrays:
for sub_step in sub_steps:
exec(
"original_sub_steps{key_name} = {matrix_value}".format(
key_name=re_address_repeaters_key_name(array_name),
matrix_value='"' + str(array[array_name_position]) + '"' if type(
array[array_name_position]) == int or type(array[array_name_position]) == str else array[
array_name_position]
)
)
array_name_position += 1
steps_array.append(copy.deepcopy(original_sub_steps))
return steps_array
def find_repeaters(sub_content, root, arrays):
if type(sub_content) == dict and 'nettacker_fuzzer' not in sub_content:
temprory_content = copy.deepcopy(sub_content)
original_root = root
for key in sub_content:
root = original_root
root += key + '/'
temprory_content[key], _root, arrays = find_repeaters(sub_content[key], root, arrays)
sub_content = copy.deepcopy(temprory_content)
root = original_root
if (type(sub_content) not in [bool, int, float]) and (
type(sub_content) == list or 'nettacker_fuzzer' in sub_content):
arrays[root] = sub_content
return (sub_content, root, arrays) if root != '' else arrays
def find_and_replace_configuration_keys(module_content, module_inputs):
if type(module_content) == dict:
for key in copy.deepcopy(module_content):
if key in module_inputs:
if module_inputs[key]:
module_content[key] = module_inputs[key]
elif type(module_content[key]) in [dict, list]:
module_content[key] = find_and_replace_configuration_keys(module_content[key], module_inputs)
elif type(module_content) == list:
array_index = 0
for key in copy.deepcopy(module_content):
module_content[array_index] = find_and_replace_configuration_keys(key, module_inputs)
array_index += 1
else:
return module_content
return module_content
class value_to_class:
def __init__(self, value):
self.value = value
def class_to_value(arrays):
original_arrays = copy.deepcopy(arrays)
array_index = 0
for array in arrays:
value_index = 0
for value in array:
if type(value) == value_to_class:
original_arrays[array_index][value_index] = value.value
value_index += 1
array_index += 1
return original_arrays
def generate_and_replace_md5(content):
# todo: make it betetr and document it
md5_content = content.split('NETTACKER_MD5_GENERATOR_START')[1].split('NETTACKER_MD5_GENERATOR_STOP')[0]
md5_content_backup = md5_content
if type(md5_content) == str:
md5_content = md5_content.encode()
md5_hash = hashlib.md5(md5_content).hexdigest()
return content.replace(
'NETTACKER_MD5_GENERATOR_START' + md5_content_backup + 'NETTACKER_MD5_GENERATOR_STOP',
md5_hash
)
def arrays_to_matrix(arrays):
import numpy
return numpy.array(
numpy.meshgrid(*[
arrays[array_name] for array_name in arrays
])
).T.reshape(
-1,
len(arrays.keys())
).tolist()
def string_to_bytes(string):
return string.encode()
def fuzzer_function_read_file_as_array(filename):
from config import nettacker_paths
return open(
os.path.join(
nettacker_paths()['payloads_path'],
filename
)
).read().split('\n')
def apply_data_functions(data):
original_data = copy.deepcopy(data)
function_results = {}
globals().update(locals())
for data_name in data:
if type(data[data_name]) == str and data[data_name].startswith('fuzzer_function'):
exec(
"fuzzer_function = {fuzzer_function}".format(
fuzzer_function=data[data_name]
),
globals(),
function_results
)
original_data[data_name] = function_results['fuzzer_function']
return original_data
def nettacker_fuzzer_repeater_perform(arrays):
original_arrays = copy.deepcopy(arrays)
for array_name in arrays:
if 'nettacker_fuzzer' in arrays[array_name]:
data = arrays[array_name]['nettacker_fuzzer']['data']
data_matrix = arrays_to_matrix(apply_data_functions(data))
prefix = arrays[array_name]['nettacker_fuzzer']['prefix']
input_format = arrays[array_name]['nettacker_fuzzer']['input_format']
interceptors = copy.deepcopy(arrays[array_name]['nettacker_fuzzer']['interceptors'])
if interceptors:
interceptors = interceptors.split(',')
suffix = arrays[array_name]['nettacker_fuzzer']['suffix']
processed_array = []
for sub_data in data_matrix:
formatted_data = {}
index_input = 0
for value in sub_data:
formatted_data[list(data.keys())[index_input]] = value
index_input += 1
interceptors_function = ''
interceptors_function_processed = ''
if interceptors:
interceptors_function += 'interceptors_function_processed = '
for interceptor in interceptors[::-1]:
interceptors_function += '{interceptor}('.format(interceptor=interceptor)
interceptors_function += 'input_format.format(**formatted_data)' + str(
')' * interceptors_function.count('('))
expected_variables = {}
globals().update(locals())
exec(interceptors_function, globals(), expected_variables)
interceptors_function_processed = expected_variables['interceptors_function_processed']
else:
interceptors_function_processed = input_format.format(**formatted_data)
processed_sub_data = interceptors_function_processed
if prefix:
processed_sub_data = prefix + processed_sub_data
if suffix:
processed_sub_data = processed_sub_data + suffix
processed_array.append(copy.deepcopy(processed_sub_data))
original_arrays[array_name] = processed_array
return original_arrays
def expand_module_steps(content):
original_content = copy.deepcopy(content)
for protocol_lib in content:
for sub_step in content[content.index(protocol_lib)]['steps']:
arrays = nettacker_fuzzer_repeater_perform(find_repeaters(sub_step, '', {}))
if arrays:
original_content[content.index(protocol_lib)]['steps'][
original_content[content.index(protocol_lib)]['steps'].index(sub_step)
] = generate_new_sub_steps(sub_step, class_to_value(arrays_to_matrix(arrays)), arrays)
else:
original_content[content.index(protocol_lib)]['steps'][
original_content[content.index(protocol_lib)]['steps'].index(sub_step)
] = [ # minimum 1 step in array
original_content[content.index(protocol_lib)]['steps'][
original_content[content.index(protocol_lib)]['steps'].index(sub_step)
]
]
return original_content
def sort_dictonary(dictionary):
etc_flag = '...' in dictionary
if etc_flag:
del dictionary['...']
sorted_dictionary = {}
for key in sorted(dictionary):
sorted_dictionary[key] = dictionary[key]
if etc_flag:
sorted_dictionary['...'] = {}
return sorted_dictionary
|
{"hexsha": "022bb5a04c4baaf5ef97fbb76cc54e61a1bb46fe", "size": 18186, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/utility.py", "max_stars_repo_name": "cbrunnkvist/Nettacker", "max_stars_repo_head_hexsha": "de2b0f9c64c48105eb2f7150ece9b44ecb8644ff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-22T12:49:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-22T12:49:41.000Z", "max_issues_repo_path": "core/utility.py", "max_issues_repo_name": "cbrunnkvist/Nettacker", "max_issues_repo_head_hexsha": "de2b0f9c64c48105eb2f7150ece9b44ecb8644ff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2021-10-04T06:04:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T23:26:14.000Z", "max_forks_repo_path": "core/utility.py", "max_forks_repo_name": "itsdivyanshjain/Nettacker", "max_forks_repo_head_hexsha": "787a9f6005f176ce4ffd0c8aa61f5bc597d8504d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4448897796, "max_line_length": 120, "alphanum_fraction": 0.5880897394, "include": true, "reason": "import numpy", "num_tokens": 3623}
|
#pragma once
#include <cstddef>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include "nifty/graph/detail/andres/grid-graph.hxx"
#include "nifty/tools/runtime_check.hxx"
#include "nifty/tools/for_each_coordinate.hxx"
#include "nifty/graph/undirected_graph_base.hxx"
#include "nifty/graph/detail/adjacency.hxx"
#include "nifty/graph/graph_tags.hxx"
#include "nifty/parallel/threadpool.hxx"
#include "nifty/array/arithmetic_array.hxx"
#include "nifty/xtensor/xtensor.hxx"
namespace nifty{
namespace graph{
namespace detail_graph{
template<std::size_t DIM, bool SIMPLE_NH>
class UndirectedGridGraphIter{
public:
typedef andres::graph::GridGraph<DIM> AGridGraph;
typedef typename AGridGraph::AdjacencyIterator AGridGraphAdjacencyIter;
typedef UndirectedAdjacency<int64_t,int64_t,int64_t,int64_t> NodeAdjacency;
struct UnaryFunction{
typedef NodeAdjacency value_type;
template<class ADJ>
NodeAdjacency operator()(const ADJ & adjacency)const{
return NodeAdjacency(adjacency.vertex(), adjacency.vertex());
}
};
typedef boost::transform_iterator<
UnaryFunction,
typename AGridGraph::AdjacencyIterator,
NodeAdjacency,
NodeAdjacency
> OldAdjacencyIter;
class AdjacencyIter
: public boost::iterator_facade<
AdjacencyIter,
NodeAdjacency,
std::random_access_iterator_tag,
const NodeAdjacency &
>
{
public:
AdjacencyIter(const AGridGraphAdjacencyIter & iter)
: iter_(iter),
adjacency_(){
}
bool equal(const AdjacencyIter & other)const{
return iter_ == other.iter_;
}
void increment(){
++iter_;
}
void dencrement(){
--iter_;
}
void advance(const std::size_t n){
iter_+=n;
}
std::ptrdiff_t distance_to(const AdjacencyIter & other)const{
return std::distance(iter_, other.iter_);
}
const NodeAdjacency & dereference()const{
adjacency_ = NodeAdjacency(iter_->vertex(), iter_->edge());
return adjacency_;
}
private:
mutable AGridGraphAdjacencyIter iter_;
mutable NodeAdjacency adjacency_;
};
class NodeIter : public boost::counting_iterator<int64_t>{
using boost::counting_iterator<int64_t>::counting_iterator;
using boost::counting_iterator<int64_t>::operator=;
};
class EdgeIter : public boost::counting_iterator<int64_t>{
using boost::counting_iterator<int64_t>::counting_iterator;
using boost::counting_iterator<int64_t>::operator=;
};
};
};
template<std::size_t DIM, bool SIMPLE_NH>
class UndirectedGridGraph;
template<std::size_t DIM>
class UndirectedGridGraph<DIM,true> : public
UndirectedGraphBase<
UndirectedGridGraph<DIM, true>,
typename detail_graph::UndirectedGridGraphIter<DIM,true>::NodeIter,
typename detail_graph::UndirectedGridGraphIter<DIM,true>::EdgeIter,
typename detail_graph::UndirectedGridGraphIter<DIM,true>::AdjacencyIter
>
{
private:
typedef andres::graph::GridGraph<DIM> AndresGridGraphType;
typedef typename AndresGridGraphType::VertexCoordinate AndresVertexCoordinate;
public:
typedef nifty::array::StaticArray<int64_t, DIM> ShapeType;
typedef nifty::array::StaticArray<int64_t, DIM> CoordinateType;
typedef typename detail_graph::UndirectedGridGraphIter<DIM,true>::NodeIter NodeIter;
typedef typename detail_graph::UndirectedGridGraphIter<DIM,true>::EdgeIter EdgeIter;
typedef typename detail_graph::UndirectedGridGraphIter<DIM,true>::AdjacencyIter AdjacencyIter;
typedef ContiguousTag EdgeIdTag;
typedef ContiguousTag NodeIdTag;
typedef SortedTag EdgeIdOrderTag;
typedef SortedTag NodeIdOrderTag;
UndirectedGridGraph()
: gridGraph_(){
}
template<class T>
UndirectedGridGraph(const nifty::array::StaticArray<T, DIM> & shape)
: gridGraph_(){
AndresVertexCoordinate ashape;
std::copy(shape.rbegin(), shape.rend(), ashape.begin());
gridGraph_.assign(ashape);
}
template<class T>
void assign(const nifty::array::StaticArray<T, DIM> & shape){
AndresVertexCoordinate ashape;
std::copy(shape.rbegin(), shape.rend(), ashape.begin());
gridGraph_.assign(ashape);
}
//void assign(const uint64_t numberOfNodes = 0, const uint64_t reserveNumberOfEdges = 0);
// MUST IMPL INTERFACE
int64_t u(const int64_t e)const{
return gridGraph_.vertexOfEdge(e,0);
}
int64_t v(const int64_t e)const{
return gridGraph_.vertexOfEdge(e,1);
}
int64_t findEdge(const int64_t u, const int64_t v)const{
const auto r = gridGraph_.findEdge(u,v);
if(r.first)
return r.second;
else
return -1;
}
int64_t nodeIdUpperBound() const{
return numberOfNodes() == 0 ? 0 : numberOfNodes()-1;
}
int64_t edgeIdUpperBound() const{
return numberOfEdges() == 0 ? 0 : numberOfEdges()-1;
}
uint64_t numberOfEdges() const{
return gridGraph_.numberOfEdges();
}
uint64_t numberOfNodes() const{
return gridGraph_.numberOfVertices();
}
NodeIter nodesBegin()const{
return NodeIter(0);
}
NodeIter nodesEnd()const{
return NodeIter(this->numberOfNodes());
}
EdgeIter edgesBegin()const{
return EdgeIter(0);
}
EdgeIter edgesEnd()const{
return EdgeIter(this->numberOfEdges());
}
AdjacencyIter adjacencyBegin(const int64_t node)const{
return AdjacencyIter(gridGraph_.adjacenciesFromVertexBegin(node));
}
AdjacencyIter adjacencyEnd(const int64_t node)const{
return AdjacencyIter(gridGraph_.adjacenciesFromVertexEnd(node));
}
AdjacencyIter adjacencyOutBegin(const int64_t node)const{
return AdjacencyIter(gridGraph_.adjacenciesFromVertexBegin(node));
}
AdjacencyIter adjacencyOutEnd(const int64_t node)const{
return AdjacencyIter(gridGraph_.adjacenciesFromVertexEnd(node));
}
// optional (with default impl in base)
//std::pair<int64_t,int64_t> uv(const int64_t e)const;
template<class F>
void forEachEdge(F && f)const{
for(uint64_t edge=0; edge< numberOfEdges(); ++edge){
f(edge);
}
}
template<class F>
void forEachNode(F && f)const{
for(uint64_t node=0; node< numberOfNodes(); ++node){
f(node);
}
}
// serialization de-serialization
uint64_t serializationSize() const{
return DIM + 1;
}
template<class ITER>
void serialize(ITER iter) const{
for(auto d=0; d<DIM; ++d){
*iter = gridGraph_.shape(d);
++iter;
}
// simple nh?
*iter = true;
++iter;
}
template<class ITER>
void deserialize(ITER iter);
/**
* @brief convert an image with DIM dimension to an edge map
* @details convert an image with DIM dimension to an edge map
* by applying a binary functor to the values of a node map at
* the endpoints of an edge.
*
* @param image the input image
* @param binaryFunctor a binary functor
* @param[out] the result edge map
*
* @return [description]
*/
template<class IMAGE, class BINARY_FUNCTOR, class EDGE_MAP>
void imageToEdgeMap(
const IMAGE & image,
BINARY_FUNCTOR binaryFunctor,
EDGE_MAP & edgeMap
)const{
for(const auto edge : this->edges()){
const auto uv = this->uv(edge);
CoordinateType cU, cV;
nodeToCoordinate(uv.first, cU);
nodeToCoordinate(uv.second, cV);
const auto uVal = xtensor::read(image, cU.asStdArray());
const auto vVal = xtensor::read(image, cU.asStdArray());
edgeMap[edge] = binaryFunctor(uVal, vVal);
}
}
// TODO parallelize
/**
* @brief convert an affinity map with DIM+1 dimension to an edge map
* @details convert an affinity map with DIM+1 dimension to an edge map
* by assigning the affinity values to corresponding affinity values
*
* @param image the input affinities
* @param[out] the result edge map
*
* @return [description]
*/
template<class AFFINITIES, class EDGE_MAP>
void affinitiesToEdgeMap(const AFFINITIES & affinities,
EDGE_MAP & edgeMap) const {
NIFTY_CHECK_OP(affinities.shape()[0], ==, DIM, "wrong number of affinity channels")
for(auto d=1; d<DIM+1; ++d){
NIFTY_CHECK_OP(shape(d-1), ==, affinities.shape()[d], "wrong shape")
}
typedef nifty::array::StaticArray<int64_t, DIM+1> AffinityCoordType;
CoordinateType cU, cV;
for(const auto edge : this->edges()){
const auto uv = this->uv(edge);
nodeToCoordinate(uv.first, cU);
nodeToCoordinate(uv.second, cV);
// find the correct affinity edge
AffinityCoordType affCoord;
for(std::size_t d = 0; d < DIM; ++d) {
auto diff = cU[d] - cV[d];
if(diff == 0) {
affCoord[d + 1] = cU[d];
}
else {
// TODO max for different direction convention
affCoord[d + 1] = std::min(cU[d], cV[d]);
affCoord[0] = d;
}
}
edgeMap[edge] = xtensor::read(affinities, affCoord.asStdArray());
}
}
template<class AFFINITIES, class LOCAL_FEATURES,
class LIFTED_UVS, class LIFTED_FEATURES>
std::size_t longRangeAffinitiesToLiftedEdges(const AFFINITIES & affinities,
xt::xexpression<LOCAL_FEATURES> & localFeaturesExp,
xt::xexpression<LIFTED_UVS> & liftedUvsExp,
xt::xexpression<LIFTED_FEATURES> & liftedFeaturesExp,
const std::vector<std::vector<int>> & offsets) const {
auto & localFeatures = localFeaturesExp.derived_cast();
auto & liftedUvs = liftedUvsExp.derived_cast();
auto & liftedFeatures = liftedFeaturesExp.derived_cast();
//
typedef nifty::array::StaticArray<int64_t, DIM+1> AffinityCoordType;
for(auto d=1; d<DIM+1; ++d){
NIFTY_CHECK_OP(shape(d-1), ==, affinities.shape()[d], "wrong shape")
}
std::size_t affLen = affinities.shape()[0];
AffinityCoordType affShape;
affShape[0] = affLen;
for(unsigned d = 0; d < DIM; ++d) {
affShape[d + 1] = shape(d);
}
std::size_t liftedEdgeId = 0;
tools::forEachCoordinate(affShape, [&](const AffinityCoordType & affCoord) {
const auto & offset = offsets[affCoord[0]];
CoordinateType cU, cV;
for(unsigned d = 0; d < DIM; ++d) {
cU[d] = affCoord[d + 1];
cV[d] = affCoord[d + 1] + offset[d];
// range check
if(cV[d] >= shape(d) || cV[d] < 0) {
return;
}
}
const std::size_t u = coordianteToNode(cU);
const std::size_t v = coordianteToNode(cV);
const std::size_t e = findEdge(u, v);
if(e == -1) {
liftedFeatures(liftedEdgeId) = xtensor::read(affinities, affCoord.asStdArray());
liftedUvs(liftedEdgeId, 0) = std::min(u, v);
liftedUvs(liftedEdgeId, 1) = std::max(u, v);
++liftedEdgeId;
} else {
localFeatures(e) = xtensor::read(affinities, affCoord.asStdArray());
}
});
return liftedEdgeId;
}
template<class AFFINITIES, class LOCAL_FEATURES,
class LIFTED_UVS, class LIFTED_FEATURES>
std::size_t longRangeAffinitiesToLiftedEdges(const AFFINITIES & affinities,
LOCAL_FEATURES & localFeatures,
LIFTED_UVS & liftedUvs,
LIFTED_FEATURES & liftedFeatures,
const std::vector<std::vector<int>> & offsets,
const std::vector<int> & strides) const {
//
typedef nifty::array::StaticArray<int64_t, DIM+1> AffinityCoordType;
for(auto d=1; d<DIM+1; ++d){
NIFTY_CHECK_OP(shape(d-1), ==, affinities.shape()[d], "wrong shape")
}
std::size_t affLen = affinities.shape()[0];
AffinityCoordType affShape;
affShape[0] = affLen;
for(unsigned d = 0; d < DIM; ++d) {
affShape[d + 1] = shape(d);
}
std::size_t liftedEdgeId = 0;
tools::forEachCoordinate(affShape, [&](const AffinityCoordType & affCoord) {
const auto & offset = offsets[affCoord[0]];
CoordinateType cU, cV;
for(unsigned d = 0; d < DIM; ++d) {
cU[d] = affCoord[d + 1];
cV[d] = affCoord[d + 1] + offset[d];
// range check
if(cV[d] >= shape(d) || cV[d] < 0) {
return;
}
}
// check if we are in the strides for channels > DIM
if(affCoord[0] > DIM) {
bool inStride = true;
for(unsigned d = 0; d < DIM; ++d) {
if(cU[d] % strides[d] != 0) {
inStride = false;
break;
}
}
if(!inStride) {
return;
}
}
const std::size_t u = coordianteToNode(cU);
const std::size_t v = coordianteToNode(cV);
const std::size_t e = findEdge(u, v);
if(e == -1) {
liftedFeatures(liftedEdgeId) = xtensor::read(affinities, affCoord.asStdArray());
liftedUvs(liftedEdgeId, 0) = std::min(u, v);
liftedUvs(liftedEdgeId, 1) = std::max(u, v);
++liftedEdgeId;
} else {
localFeatures(e) = xtensor::read(affinities, affCoord.asStdArray());
}
});
return liftedEdgeId;
}
/**
* @brief convert an image with DIM dimension to an edge map
* @details convert an image with DIM dimension to an edge map
* by taking the values of the image at the
* interpixel coordinates.
* The shape of the image must be 2*shape-1
*
*
* @param image the input image
* @param binaryFunctor a binary functor
* @param[out] the result edge map
*
* @return [description]
*/
template<class IMAGE, class EDGE_MAP>
void imageToInterpixelEdgeMap(
const IMAGE & image,
EDGE_MAP & edgeMap
)const{
for(auto d=0; d<DIM; ++d){
NIFTY_CHECK_OP(shape(d)*2 - 1, ==, image.shape()[d],
"wrong shape foer image to interpixel edge map")
}
for(const auto edge : this->edges()){
const auto uv = this->uv(edge);
CoordinateType cU,cV;
nodeToCoordinate(uv.first, cU);
nodeToCoordinate(uv.second, cV);
// FIXME I don't understand what is going on here ?!
// But doesn't look right
const auto uVal = xtensor::read(image, cU.asStdArray());
cU += cV;
edgeMap(edge) = xtensor::read(image, cU.asStdArray());
}
}
uint64_t shape(const std::size_t d)const{
return gridGraph_.shape(DIM-1-d);
}
// COORDINATE RELATED
CoordinateType nodeToCoordinate(const uint64_t node)const{
CoordinateType ret;
nodeToCoordinate(node, ret);
return ret;
}
template<class NODE_COORDINATE>
void nodeToCoordinate(
const uint64_t node,
NODE_COORDINATE & coordinate
)const{
AndresVertexCoordinate aCoordinate;
gridGraph_.vertex(node, aCoordinate);
for(auto d=0; d<DIM; ++d){
coordinate[d] = aCoordinate[DIM-1-d];
}
}
template<class NODE_COORDINATE>
uint64_t coordianteToNode(const NODE_COORDINATE & coordinate)const{
AndresVertexCoordinate aCoordinate;
for(auto d=0; d<DIM; ++d){
aCoordinate[DIM-1-d] = coordinate[d];
}
return gridGraph_.vertex(aCoordinate);
}
private:
andres::graph::GridGraph<DIM> gridGraph_;
};
} // namespace nifty::graph
} // namespace nifty
|
{"hexsha": "57cce2d39b569d8deee6bc31bd5d95d21855f624", "size": 17237, "ext": "hxx", "lang": "C++", "max_stars_repo_path": "include/nifty/graph/undirected_grid_graph.hxx", "max_stars_repo_name": "konopczynski/nifty", "max_stars_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/nifty/graph/undirected_grid_graph.hxx", "max_issues_repo_name": "konopczynski/nifty", "max_issues_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/nifty/graph/undirected_grid_graph.hxx", "max_forks_repo_name": "konopczynski/nifty", "max_forks_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6856617647, "max_line_length": 98, "alphanum_fraction": 0.5739397807, "num_tokens": 4116}
|
import numpy as np
from ..hardware import Hardware
from ..world import World
async def drive_arc(world: World, hardware: Hardware):
while world.robot.prediction.x < 2:
await hardware.drive(1, np.deg2rad(25))
await hardware.stop()
|
{"hexsha": "afcfdad0276798e91e725317666e3d86004ea84b", "size": 249, "ext": "py", "lang": "Python", "max_stars_repo_path": "rosys/automations/arc.py", "max_stars_repo_name": "zauberzeug/rosys", "max_stars_repo_head_hexsha": "10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-20T08:21:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T08:21:07.000Z", "max_issues_repo_path": "rosys/automations/arc.py", "max_issues_repo_name": "zauberzeug/rosys", "max_issues_repo_head_hexsha": "10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-08T12:46:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T12:46:09.000Z", "max_forks_repo_path": "rosys/automations/arc.py", "max_forks_repo_name": "zauberzeug/rosys", "max_forks_repo_head_hexsha": "10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6363636364, "max_line_length": 54, "alphanum_fraction": 0.7148594378, "include": true, "reason": "import numpy", "num_tokens": 59}
|
from jax import random, vmap, numpy as jnp, value_and_grad
from jax.scipy.special import logsumexp
from jaxns.gaussian_process.utils import squared_norm
def log_tomographic_weight_function_stochastic(key, u, x1, p1, x2, p2):
"""
int w(x) f(x) dx = sum_i w(dx * i) f(dx * i) dx
where,
int w(x) dx = sum_i w(dx * i) dx = 1
Args:
key:
u:
x1: [N, 3]
p1: [N, 3]
x2: [M, 3]
p2: [M, 3]
Returns:
w(dx*i) dx / sum_i w(dx * i) dx
[N, M] shaped
"""
n = u.size ** 2
N = x1.shape[0]
M = x2.shape[0]
t1 = random.uniform(key, shape=(n, N, 1))
t2 = random.uniform(key, shape=(n, M, 1))
# L, N, M
norm_squared = vmap(squared_norm)(x1 + t1 * p1, x2 + t2 * p2)
bins = jnp.concatenate([u, u[-1:] + u[-1] - u[-2]])
# N*M, U
hist = vmap(lambda x: jnp.histogram(x, bins)[0])(jnp.reshape(norm_squared, (n, -1)).T)
# N,M,U
hist = jnp.reshape(hist, (x1.shape[0], x2.shape[0], u.size))
log_hist = jnp.log(hist)
log_du = jnp.diff(bins)
log_w = log_hist + log_du
# N,M,U
log_w = log_w - logsumexp(log_w, axis=-1, keepdims=True)
log_w = jnp.where(hist == 0., -jnp.inf, log_w)
return log_w
def tomographic_weight_function_stochastic(key, u, x1, p1, x2, p2):
return jnp.exp(log_tomographic_weight_function_stochastic(key, u, x1, p1, x2, p2))
def tomographic_weight_function_outer(gamma, x1, x2, p1, p2, S):
def inner(x1, p1):
# M
return vmap(lambda x2, p2: tomographic_weight_function(gamma, x1, x2, p1, p2, S))(x2, p2)
return vmap(inner)(x1, p1) # N, M
def log_tomographic_weight_function_outer(gamma, x1, x2, p1, p2, S):
return vmap(lambda x1,p1:
vmap(lambda x2, p2: log_tomographic_weight_function(gamma, x1, x2, p1, p2, S)
)(x2, p2)
)(x1, p1) # N, M
def tomographic_weight_function(gamma, x1, x2, p1, p2=None, S=25):
return jnp.exp(log_tomographic_weight_function(gamma, x1, x2, p1, p2=p2, S=S))
def log_tomographic_weight_function(gamma, x1, x2, p1, p2=None, S=25):
parabolic = False
if p2 is None:
parabolic = True
p2 = p1
x12 = x1 - x2
A = p1 @ p1
C = p2 @ p2
B = -2. * p1 @ p2
D = 2. * x12 @ p1
E = -2. * x12 @ p2
F = x12 @ x12 - gamma
t1 = jnp.linspace(0., 1., S)[:, None]
H = (D ** 2 - 4. * A * F + (2. * B * D - 4. * A * E) * t1 + (B ** 2 - 4. * A * C) * t1 ** 2)
u = (-D - B * t1)
lower = jnp.clip(0.5 * (u - jnp.sqrt(H)) / A, 0., 1.)
upper = jnp.clip(0.5 * (u + jnp.sqrt(H)) / A, 0., 1.)
diff = (upper - lower) / (S - 1)
if not parabolic:
reg_valid = H >= 0.
cdf = jnp.sum(jnp.where(reg_valid, diff, 0.), axis=0)
else:
cdf = jnp.sum(diff, axis=0)
return jnp.log(jnp.diff(cdf)) - jnp.log(jnp.diff(gamma))
def cumulative_tomographic_weight_function(gamma, x1, x2, p1, p2=None, S=25):
parabolic = False
if p2 is None:
parabolic = True
p2 = p1
x12 = x1 - x2
A = p1 @ p1
C = p2 @ p2
B = -2. * p1 @ p2
D = 2. * x12 @ p1
E = -2. * x12 @ p2
F = x12 @ x12 - gamma
t1 = jnp.linspace(0., 1., S)[:, None]
H = (D ** 2 - 4. * A * F + 2. * B * D * t1 - 4. * A * E * t1 + B ** 2 * t1 ** 2 - 4. * A * C * t1 ** 2)
u = (-D - B * t1)
lower = jnp.clip(0.5 * (u - jnp.sqrt(H)) / A, 0., 1.)
upper = jnp.clip(0.5 * (u + jnp.sqrt(H)) / A, 0., 1.)
diff = (upper - lower) / (S - 1)
if not parabolic:
reg_valid = H >= 0.
cdf = jnp.sum(jnp.where(reg_valid, diff, 0.), axis=0)
else:
cdf = jnp.sum(diff, axis=0)
return cdf
def _tomographic_weight_function(gamma, x1, x2, p1, p2=None, S=25):
from jax import grad
print(gamma.shape)
return vmap(grad(lambda gamma: cumulative_tomographic_weight_function(gamma, x1, x2, p1, p2=p2, S=S)[0]))(gamma)
def cumulative_tomographic_weight_dimensionless_function(gamma_prime, n, w1, w2=None, S=25):
parabolic = False
if w2 is None:
parabolic = True
w2 = w1
A = w1 @ w1
C = w2 @ w2
B = -2. * w1 @ w2
D = 2. * n @ w1
E = -2. * n @ w2
F = 1. - gamma_prime
t1 = jnp.linspace(0., 1., S)[:, None]
H = (D ** 2 - 4. * A * F + 2. * B * D * t1 - 4. * A * E * t1 + B ** 2 * t1 ** 2 - 4. * A * C * t1 ** 2)
u = (-D - B * t1)
lower = jnp.clip(0.5 * (u - jnp.sqrt(H)) / A, 0., 1.)
upper = jnp.clip(0.5 * (u + jnp.sqrt(H)) / A, 0., 1.)
diff = (upper - lower) / (S - 1)
if not parabolic:
reg_valid = H >= 0.
cdf = jnp.sum(jnp.where(reg_valid, diff, 0.), axis=0)
else:
cdf = jnp.sum(diff, axis=0)
return cdf
def log_tomographic_weight_dimensionless_function(gamma_prime, n, w1, w2=None, S=25):
cdf = cumulative_tomographic_weight_dimensionless_function(gamma_prime, n, w1, w2=w2, S=S)
def density(gamma_prime):
def cdf(gamma_prime):
return cumulative_tomographic_weight_dimensionless_function(gamma_prime, n, w1, w2=w2, S=S)[0]
f, grad = value_and_grad(cdf)(gamma_prime)
return grad
w = vmap(density)(0.5 * (gamma_prime[:-1] + gamma_prime[1:]))
return jnp.log(w)
return jnp.log(jnp.diff(cdf)) - jnp.log(jnp.diff(gamma_prime))
def cumulative_tomographic_weight_dimensionless_polynomial(Q, gamma_prime, n, w1, w2):
"""
Computes log P(|x1-x2 + t1*p1 - t2*p2|^2 < lambda)
Note, that this is invariant to scaling of the input vectors by a scalar,
P(alpha*|x1-x2 + t1*p1 - t2*p2|^2 < alpha*lambda).
Therefore a dimensionless form is ,
P(|n + t1*w1 - t2*w2|^2 < lambda')
where,
n = x1-x2 / |x1-x2| is a unit vector.
w1 = p1 / |x1-x2|
w2 = p2 / |x1-x2|
lambda' = lambda / |x1-x2|^2
Args:
Q:
gamma:
x1:
x2:
p1:
p2:
Returns:
"""
parabolic = False
if w2 is None:
parabolic = True
w2 = w1
A = w1 @ w1
C = w2 @ w2
B = -2. * w1 @ w2
D = 2. * n @ w1
E = -2. * n @ w2
F = 1. - gamma_prime
param = jnp.asarray([1., A, C, B, D, E, F])
Q = Q.reshape((-1, 7))
coefficients = Q @ param
return jnp.polyval(coefficients, gamma_prime)
def get_polynomial_form():
"""
The polynomial form of log P(|n + t1*w1 - t2*w2|^2 < lambda') is assumed to be:
c_i = Q_ij p_j
log_cdf = c_i g_i = g_i Q_ij p_j = Tr(Q @ (p g))
log_cdf_k = g_ki Q_ij p_kj
Returns:
"""
from jax.scipy.optimize import minimize
from jax import jit, value_and_grad
from jax.lax import scan
import pylab as plt
def generate_single_data(key):
"""
Generate a physical set of:
n = x1-x2/|x1-x2| is a unit vector.
w1 = p1 / |x1-x2|
w2 = p2 / |x1-x2|
lambda' = lambda / |x1-x2|^2
Args:
key:
Returns:
"""
keys = random.split(key, 6)
n = random.normal(keys[0], shape=(3,))
n = n / jnp.linalg.norm(n)
w1 = random.normal(keys[1], shape=(3,))
w1 = w1 / jnp.linalg.norm(w1)
w1 = w1 * random.uniform(keys[2], minval=0., maxval=10.)
w2 = random.normal(keys[3], shape=(3,))
w2 = w2 / jnp.linalg.norm(w2)
w2 = w2 * random.uniform(keys[4], minval=0., maxval=10.)
gamma_prime = jnp.linspace(0., 10., 100) # random.uniform(keys[5],minval=0.,maxval=10.)**2
cdf_ref = cumulative_tomographic_weight_dimensionless_function(gamma_prime, n, w1, w2, S=150) # /h**2
return n, w1, w2, gamma_prime, cdf_ref
data = jit(vmap(generate_single_data))(random.split(random.PRNGKey(12340985), 100))
# print(data[-1])
def loss(Q):
def single_loss(single_datum):
n, w1, w2, gamma_prime, cdf_ref = single_datum
return (vmap(
lambda gamma_prime: cumulative_tomographic_weight_dimensionless_polynomial(Q, gamma_prime, n, w1, w2))(
gamma_prime) - cdf_ref) ** 2
return jnp.mean(vmap(single_loss)(data))
K = 3
Q0 = 0.01 * random.normal(random.PRNGKey(0), shape=(K * 7,))
print(jit(loss)(Q0))
@jit
def do_minimize():
results = minimize(loss, Q0, method='BFGS', options=dict(gtol=1e-8, line_search_maxiter=100))
print(results.message)
return results.x.reshape((K, 7)), results.status, results.fun, results.nfev, results.nit, results.jac
@jit
def do_sgd(key):
def body(state, X):
(Q,) = state
(key,) = X
n, w1, w2, gamma_prime, cdf_ref = generate_single_data(key)
def loss(Q):
return jnp.mean((vmap(
lambda gamma_prime: cumulative_tomographic_weight_dimensionless_polynomial(Q, gamma_prime, n, w1,
w2))(
gamma_prime) - cdf_ref) ** 2) # + 0.1*jnp.mean(Q**2)
f, g = value_and_grad(loss)(Q)
Q = Q - 0.00000001 * g
return (Q,), (f,)
(Q,), (values,) = scan(body, (Q0,), (random.split(key, 1000),))
return Q.reshape((-1, 7)), values
# results = do_minimize()
Q, values = vmap(do_sgd)(random.split(random.PRNGKey(12456), 100))
print('Qmean', Q.mean(0))
print('Qstd', Q.std(0))
f = values.mean(0)
fstd = values.std(0)
plt.plot(jnp.percentile(values, 50, axis=0))
plt.plot(jnp.percentile(values, 85, axis=0), ls='dotted', c='black')
plt.plot(jnp.percentile(values, 15, axis=0), ls='dotted', c='black')
plt.show()
# print(results)
return Q.mean(0)
def gamma_min_max(x1, p1, x2, p2):
"""
Get the minimum and maximum separation squared between two line segments.
|(x1-x2 + k1 t1 - k2 t2)|^2 smallest to largest in (0,1)x(0,1).
|(x1-x2 + k1 t1 - k2 t2)|^2
= |x1-x2|^2 + |k1 t1 - k2 t2|^2 + 2 (x1-x2).(k1 t1 - k2 t2)
= |x1-x2|^2 + |k1|^2 t1^2 + |k2|^2 t2^2 - 2 t1 t2 k1.k2 + 2 (x1-x2).(k1 t1 - k2 t2)
=
Args:
x1:
k1:
x2:
k2:
Returns:
"""
x12 = x1 - x2
A = p1 @ p1
C = p2 @ p2
B = -2. * p1 @ p2
D = 2. * x12 @ p1
E = -2. * x12 @ p2
F = x12 @ x12
disc = B ** 2 - 4. * A * C
end_point_dist = vmap(lambda t1, t2: jnp.sum(jnp.square(x12 + t1 * p1 - t2 * p2)))(
jnp.array([0.,0.,1.,1.]), jnp.array([0.,1.,0.,1.]))
parabolic = disc == 0.
t1 = (2. * C * D - B * E) / disc
t2 = (2. * A * E - B * D) / disc
closest_within_segments = (t1 > 0.) & (t1 < 1.) & (t2 > 0.) & (t2 < 1.)
gamma_min = jnp.where(parabolic | (~closest_within_segments),
jnp.min(end_point_dist),
(C * D ** 2 - B * D * E + A * E ** 2) / disc + F)
gamma_max = jnp.max(end_point_dist)
return gamma_min, gamma_max
def test_gamma_min_max():
x1 = jnp.array([0., 0.])
x2 = jnp.array([1., 0.])
p1 = jnp.array([0., 1.])
p2 = jnp.array([0., 1.])
assert gamma_min_max(x1, p1, x2, p2) == (1., 2.)
x1 = jnp.array([0., 0.])
x2 = jnp.array([1., 0.])
p1 = jnp.array([0., 1.])
p2 = jnp.array([1., 1.])
assert gamma_min_max(x1, p1, x2, p2) == (1., 5.)
|
{"hexsha": "5afac086abdd97d43e642c2eea8fc1509dff3791", "size": 11283, "ext": "py", "lang": "Python", "max_stars_repo_path": "jaxns/gaussian_process/tomographic_kernel/tomographic_kernel_utils.py", "max_stars_repo_name": "fehiepsi/jaxns", "max_stars_repo_head_hexsha": "9cf9366f11ace564e21f938edf4d090fb5de137d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jaxns/gaussian_process/tomographic_kernel/tomographic_kernel_utils.py", "max_issues_repo_name": "fehiepsi/jaxns", "max_issues_repo_head_hexsha": "9cf9366f11ace564e21f938edf4d090fb5de137d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jaxns/gaussian_process/tomographic_kernel/tomographic_kernel_utils.py", "max_forks_repo_name": "fehiepsi/jaxns", "max_forks_repo_head_hexsha": "9cf9366f11ace564e21f938edf4d090fb5de137d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7704485488, "max_line_length": 119, "alphanum_fraction": 0.5317734645, "include": true, "reason": "from jax", "num_tokens": 4123}
|
import xarray
import os
import numpy
import progressbar
import gsw
from ismip6_ocean_forcing.remap.interp1d import remap_vertical
from ismip6_ocean_forcing.remap.descriptor import get_antarctic_descriptor
from ismip6_ocean_forcing.remap.grid import LatLonGridDescriptor, \
LatLon2DGridDescriptor
from ismip6_ocean_forcing.remap.remapper import Remapper
from ismip6_ocean_forcing.remap.res import get_res, get_horiz_res
def remap_model(config, modelFolder):
_fix_units_and_periodicity(config, modelFolder)
_interp_z(config, modelFolder)
_remap(config, modelFolder)
def _fix_units_and_periodicity(config, modelFolder):
modelName = config.get('model', 'name')
tIndexMin = config.getint('output', 'tIndexMin')
tIndexMax = config.getint('output', 'tIndexMax')
if tIndexMax == -1:
tIndexMax = None
else:
tIndexMax += 1
renameDict = {}
for varName in ['lat', 'lon', 'z', 'time', 'temperature', 'salinity',
'z_bnds']:
oldVarName = config.get('model', varName)
renameDict[oldVarName] = varName
inFileNames = {}
outFileNames = {}
bothExist = True
for fieldName in ['temperature', 'salinity']:
inFileNames[fieldName] = \
config.get('model', '{}FileName'.format(fieldName))
outFileNames[fieldName] = \
'{}/{}_{}_periodic.nc'.format(modelFolder, modelName, fieldName)
if not os.path.exists(outFileNames[fieldName]):
bothExist = False
if bothExist:
return
print(' Add a periodic image in longitude and fix units...')
datasets = {}
for fieldName in inFileNames:
inFileName = inFileNames[fieldName]
outFileName = outFileNames[fieldName]
print(' {}'.format(outFileName))
keepList = ['lat', 'lon', 'z', 'time', 'z_bnds', fieldName]
ds = xarray.open_dataset(inFileName)
for name in renameDict:
if name in ds:
ds = ds.rename({name: renameDict[name]})
dropList = []
ds = ds.isel(time=slice(tIndexMin, tIndexMax))
for coord in ds.coords:
if coord not in keepList:
ds = ds.drop(coord)
for var in ds.data_vars:
if var not in keepList:
dropList.append(var)
ds = ds.drop(dropList)
ds.z.attrs['bounds'] = 'z_bnds'
if numpy.amax(ds.z.values) > 0.:
attrs = ds.z.attrs
attrs['positive'] = 'up'
ds['z'] = -ds.z
ds.z.attrs = attrs
attrs = ds.z_bnds.attrs
ds['z_bnds'] = -ds.z_bnds
ds.z_bnds.attrs = attrs
if fieldName == 'temperature':
if ds.temperature.attrs['units'] == 'K':
attrs = ds.temperature.attrs
attrs['units'] = 'degrees C'
ds['temperature'] = ds.temperature - 273.15
ds.temperature.attrs = attrs
if fieldName == 'salinity':
if 'units' not in ds.salinity.attrs:
# Let's hope it's PSU...
ds.salinity.attrs['units'] = 'PSU'
if len(ds.lon.dims) == 1:
lonDim = ds.lon.dims[0]
lonRange = ds.lon[-1].values - ds.lon[0].values
if numpy.abs(lonRange - 360.) > 1e-10:
# Needs a periodic image
ds = _add_periodic_lon(ds, lonDim)
else:
assert(len(ds.lon.dims) == 2)
lonDim = ds.lon.dims[1]
lonRange = ds.lon[0, -1].values - ds.lon[0, 0].values
if numpy.abs(lonRange - 360.) > 1e-10:
# Needs a periodic image
ds = _add_periodic_lon(ds, lonDim)
datasets[fieldName] = ds
dsTemp = _potential_to_in_situ_temperature(datasets['temperature'],
datasets['salinity'])
dsTemp.to_netcdf(outFileNames['temperature'])
datasets['salinity'].to_netcdf(outFileNames['salinity'])
def _add_periodic_lon(ds, lonDim):
nLon = ds.sizes[lonDim]
lonIndices = xarray.DataArray(numpy.append(numpy.arange(nLon), [0]),
dims=('newLon',))
ds.load()
ds = ds.isel({lonDim: lonIndices})
ds = ds.rename({'newLon': lonDim})
return ds
def _interp_z(config, modelFolder):
modelName = config.get('model', 'name')
inFileNames = {}
outFileNames = {}
for fieldName in ['temperature', 'salinity']:
inFileNames[fieldName] = \
'{}/{}_{}_periodic.nc'.format(modelFolder, modelName, fieldName)
outFileNames[fieldName] = \
'{}/{}_{}_interp_z.nc'.format(modelFolder, modelName, fieldName)
remap_vertical(config, inFileNames, outFileNames, extrap=True)
def _remap(config, modelFolder):
res = get_res(config)
hres = get_horiz_res(config)
modelName = config.get('model', 'name')
inFileNames = {}
outFileNames = {}
bothExist = True
for fieldName in ['temperature', 'salinity']:
inFileNames[fieldName] = \
'{}/{}_{}_interp_z.nc'.format(modelFolder, modelName, fieldName)
outFileNames[fieldName] = \
'{}/{}_{}_{}.nc'.format(modelFolder, modelName, fieldName, res)
if not os.path.exists(outFileNames[fieldName]):
bothExist = False
if bothExist:
return
print(' Remapping to {} grid...'.format(res))
for fieldName in inFileNames:
inFileName = inFileNames[fieldName]
outFileName = outFileNames[fieldName]
if os.path.exists(outFileName):
continue
outGridFileName = 'ismip6/{}_grid.nc'.format(hres)
print(' {}'.format(outFileName))
progressDir = '{}/progress_remap_{}'.format(modelFolder, fieldName)
try:
os.makedirs(progressDir)
except OSError:
pass
ds = xarray.open_dataset(inFileName)
if len(ds.lon.dims) == 1:
inDescriptor = LatLonGridDescriptor.read(
inFileName, latVarName='lat', lonVarName='lon')
else:
assert(len(ds.lon.dims) == 2)
inDescriptor = LatLon2DGridDescriptor.read(
inFileName, latVarName='lat', lonVarName='lon')
inDescriptor.regional = True
outDescriptor = get_antarctic_descriptor(outGridFileName)
mappingFileName = '{}/map_{}_to_{}.nc'.format(
modelName.lower(), inDescriptor.meshName,
outDescriptor.meshName)
remapper = Remapper(inDescriptor, outDescriptor, mappingFileName)
remapper.build_mapping_file(method='bilinear')
ds = ds.drop(['lat', 'lon'])
nt = ds.sizes['time']
widgets = [' ', progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()]
bar = progressbar.ProgressBar(widgets=widgets,
maxval=nt).start()
for tIndex in range(nt):
progressFileName = '{}/{}_t_{}.nc'.format(
progressDir, modelName, tIndex)
if os.path.exists(progressFileName):
bar.update(tIndex+1)
continue
dsIn = ds.isel(time=tIndex)
dsOut = remapper.remap(dsIn, renormalizationThreshold=0.1)
for attrName in ['units', 'standard_name', 'long_name']:
if attrName in ds[fieldName].attrs:
dsOut[fieldName].attrs[attrName] = \
ds[fieldName].attrs[attrName]
dsOut.z.attrs = ds.z.attrs
dsOut.to_netcdf(progressFileName)
bar.update(tIndex+1)
bar.finish()
dsOut = xarray.open_mfdataset(
'{}/{}_t_*.nc'.format(progressDir, modelName), concat_dim='time')
dsOut['z_bnds'] = ds.z_bnds
dsOut.to_netcdf(outFileName)
def _potential_to_in_situ_temperature(dsPotTemp, dsSalin):
z = dsPotTemp.z.values
lat = numpy.maximum(dsPotTemp.lat.values, -80.)
lon = dsPotTemp.lon.values
nz = len(z)
ny, nx = lat.shape
nt = dsPotTemp.sizes['time']
dsTemp = dsPotTemp.drop('temperature')
T = numpy.nan*numpy.ones((nt, nz, ny, nx))
for zIndex in range(nz):
pressure = gsw.p_from_z(z[zIndex], lat)
for tIndex in range(nt):
pt = dsPotTemp.temperature[tIndex, zIndex, :, :].values
salin = dsSalin.salinity[tIndex, zIndex, :, :].values
mask = numpy.logical_and(numpy.isfinite(pt), numpy.isfinite(salin))
SA = gsw.SA_from_SP(salin[mask], pressure[mask], lon[mask],
lat[mask])
TSlice = T[tIndex, zIndex, :, :]
CT = gsw.CT_from_pt(SA, pt[mask])
TSlice[mask] = gsw.t_from_CT(SA, CT, pressure[mask])
T[tIndex, zIndex, :, :] = TSlice
dsTemp['temperature'] = (dsPotTemp.temperature.dims, T)
dsTemp['temperature'].attrs = dsPotTemp.temperature.attrs
return dsTemp
|
{"hexsha": "3f6540638cc7751e043d6bc9206f5a922a2f2fb2", "size": 8990, "ext": "py", "lang": "Python", "max_stars_repo_path": "ismip6_ocean_forcing/model/remap.py", "max_stars_repo_name": "nicojourdain/ismip6-ocean-forcing", "max_stars_repo_head_hexsha": "2c28be2ce9c0e1e94db275a6a3e668ad4f871343", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ismip6_ocean_forcing/model/remap.py", "max_issues_repo_name": "nicojourdain/ismip6-ocean-forcing", "max_issues_repo_head_hexsha": "2c28be2ce9c0e1e94db275a6a3e668ad4f871343", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ismip6_ocean_forcing/model/remap.py", "max_forks_repo_name": "nicojourdain/ismip6-ocean-forcing", "max_forks_repo_head_hexsha": "2c28be2ce9c0e1e94db275a6a3e668ad4f871343", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4548736462, "max_line_length": 79, "alphanum_fraction": 0.5813125695, "include": true, "reason": "import numpy", "num_tokens": 2191}
|
# -*- coding: utf-8 -*-
"""
PEC cylinder
============
An example of scattering from a perfectly conducting cylinder
"""
import matplotlib.pyplot as plt
import numpy as np
import gyptis as gy
import gyptis.utils.data_download as dd
##############################################################################
# Reference results are taken from :cite:p:`Ruppin2006`.
pmesh = 10
degree = 2
R = 1
kR = np.linspace(0.09, 10, 15)
wl = 2 * gy.pi * R / kR
scs_gyptis = dict()
for polarization in ["TM", "TE"]:
scsnorm = []
for wavelength in wl:
lmin = wavelength / pmesh
Rcalc = R + 1 * R
lbox = Rcalc * 2 * 1.1
geom = gy.BoxPML(
dim=2,
box_size=(lbox, lbox),
pml_width=(wavelength, wavelength),
Rcalc=Rcalc,
)
box = geom.box
cyl = geom.add_circle(0, 0, 0, R)
box = geom.cut(box, cyl)
geom.add_physical(box, "box")
bnds = geom.get_boundaries("box")
cyl_bnds = bnds[1]
geom.add_physical(cyl_bnds, "cyl_bnds", dim=1)
[geom.set_size(pml, lmin * 0.7) for pml in geom.pmls]
geom.set_size("box", lmin)
geom.build()
pw = gy.PlaneWave(
wavelength=wavelength, angle=0, dim=2, domain=geom.mesh, degree=degree
)
bcs = {"cyl_bnds": "PEC"}
epsilon = dict(box=1)
mu = dict(box=1)
s = gy.Scattering(
geom,
epsilon,
mu,
pw,
degree=degree,
polarization=polarization,
boundary_conditions=bcs,
)
s.solve()
SCS = s.scattering_cross_section()
SCS_norma = SCS / (2 * R)
scsnorm.append(SCS_norma)
scs_gyptis[polarization] = scsnorm
for polarization in ["TM", "TE"]:
scs_file = dd.download_example_data(
data_file_name=f"scs_pec_{polarization}.csv",
example_dir="scattering",
)
benchmark = np.loadtxt(scs_file, delimiter=",")
p = plt.plot(
benchmark[:, 0],
benchmark[:, 1],
label=f"ref. {polarization}",
)
plt.plot(
kR,
scs_gyptis[polarization],
"o",
c=p[0].get_c(),
label=f"gyptis {polarization}",
)
plt.xlabel(r"$kR$")
plt.ylabel(r"$\sigma_{\rm s}/2R$")
plt.legend()
plt.tight_layout()
|
{"hexsha": "606f3b20a7f94f2dc69227a7b7b60bbdee3e44c3", "size": 2344, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scattering/plot_scattering2d_pec_cylinder.py", "max_stars_repo_name": "benvial/gyptis", "max_stars_repo_head_hexsha": "3f197fe33d0a0bac72be05a474baff4e450af0ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/scattering/plot_scattering2d_pec_cylinder.py", "max_issues_repo_name": "benvial/gyptis", "max_issues_repo_head_hexsha": "3f197fe33d0a0bac72be05a474baff4e450af0ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/scattering/plot_scattering2d_pec_cylinder.py", "max_forks_repo_name": "benvial/gyptis", "max_forks_repo_head_hexsha": "3f197fe33d0a0bac72be05a474baff4e450af0ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1649484536, "max_line_length": 82, "alphanum_fraction": 0.5290102389, "include": true, "reason": "import numpy", "num_tokens": 656}
|
from numpy import log
def compute_1d_array_entropy(_1d_array):
probabilities = _1d_array / _1d_array.sum()
return -(probabilities * log(probabilities)).sum()
|
{"hexsha": "a7c805c885022bdd274470e5a7f862662639c073", "size": 170, "ext": "py", "lang": "Python", "max_stars_repo_path": "ccal/compute_1d_array_entropy.py", "max_stars_repo_name": "alex-wenzel/ccal", "max_stars_repo_head_hexsha": "74dfc604d93e6ce9e12f34a828b601618df51faa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccal/compute_1d_array_entropy.py", "max_issues_repo_name": "alex-wenzel/ccal", "max_issues_repo_head_hexsha": "74dfc604d93e6ce9e12f34a828b601618df51faa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ccal/compute_1d_array_entropy.py", "max_forks_repo_name": "alex-wenzel/ccal", "max_forks_repo_head_hexsha": "74dfc604d93e6ce9e12f34a828b601618df51faa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.8888888889, "max_line_length": 54, "alphanum_fraction": 0.7411764706, "include": true, "reason": "from numpy", "num_tokens": 43}
|
import textwrap
from pathlib import Path
import pyexasol
import pytest
from exasol_udf_mock_python.column import Column
from exasol_udf_mock_python.connection import Connection
from exasol_udf_mock_python.group import Group
from exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment
from exasol_udf_mock_python.mock_meta_data import MockMetaData
from exasol_udf_mock_python.udf_mock_executor import UDFMockExecutor
from exasol_data_science_utils_python.preprocessing.sql.schema.schema_name import SchemaName
from exasol_bucketfs_utils_python.bucketfs_factory import BucketFSFactory
@pytest.fixture(scope="session")
def db_connection():
db_connection = Connection(address=f"localhost:8888", user="sys", password="exasol")
return db_connection
@pytest.fixture(scope="session")
def pyexasol_connection(db_connection):
conn = pyexasol.connect(dsn=db_connection.address, user=db_connection.user, password=db_connection.password)
return conn
@pytest.fixture(scope="session")
def upload_language_container(pyexasol_connection, language_container):
container_connection = Connection(address=f"http://localhost:6583/default/container;bfsdefault",
user="w", password="write")
bucket_fs_factory = BucketFSFactory()
container_bucketfs_location = \
bucket_fs_factory.create_bucketfs_location(
url=container_connection.address,
user=container_connection.user,
pwd=container_connection.password,
base_path=None)
container_path = Path(language_container["container_path"])
alter_session = Path(language_container["alter_session"])
pyexasol_connection.execute(f"ALTER SYSTEM SET SCRIPT_LANGUAGES='{alter_session}'")
pyexasol_connection.execute(f"ALTER SESSION SET SCRIPT_LANGUAGES='{alter_session}'")
with open(container_path, "rb") as container_file:
container_bucketfs_location.upload_fileobj_to_bucketfs(container_file, "ml.tar")
@pytest.fixture(scope="session")
def create_input_table(pyexasol_connection):
pyexasol_connection.execute("""
CREATE OR REPLACE TABLE TEST.ABC(
P1 INTEGER,
P2 INTEGER,
A FLOAT,
B FLOAT,
C FLOAT
)
""")
for i in range(1, 100):
if i % 100 == 0:
print(f"Insert {i}")
values = ",".join([f"({j % 2},{i % 2},{j * 1.0 * i}, {j * 2.0 * i}, {j * 3.0 * i})" for j in range(1, 100)])
pyexasol_connection.execute(f"INSERT INTO TEST.ABC VALUES {values}")
print("COUNT", pyexasol_connection.execute("SELECT count(*) FROM TEST.ABC").fetchall())
def drop_and_create_target_schema(pyexasol_connection):
try:
pyexasol_connection.execute("""
DROP SCHEMA TARGET_SCHEMA CASCADE;
""")
except:
pass
pyexasol_connection.execute("""CREATE SCHEMA TARGET_SCHEMA;""")
def udf_wrapper():
from exasol_udf_mock_python.udf_context import UDFContext
from sklearn.linear_model import SGDRegressor
from numpy.random import RandomState
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \
ColumnDescriptionBasedTablePreprocessorFactory
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \
ColumnPreprocessorDescription
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \
ExactColumnNameSelector
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \
MinMaxScalerFactory
from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF
train_udf = PartialFitRegressionTrainUDF()
def run(ctx: UDFContext):
model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False,
fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling')
table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory(
input_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("A"),
column_preprocessor_factory=MinMaxScalerFactory()
),
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("B"),
column_preprocessor_factory=MinMaxScalerFactory()
),
],
target_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("C"),
column_preprocessor_factory=MinMaxScalerFactory()
),
]
)
train_udf.run(exa, ctx, model, table_preprocessor_factory)
def test_train_udf_with_mock_random_partitions(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 3
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=3,
split_by_columns=None,
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_node(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 1
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=True,
number_of_random_partitions=None,
split_by_columns=None,
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_columns(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 4
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=None,
split_by_columns="P1,P2",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_random_partitions_and_split_by_columns(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 6
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=3,
split_by_columns="P1",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_node_and_random_partitions(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=True,
number_of_random_partitions=2,
split_by_columns=None
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_split_by_columns_empty_string(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=2,
split_by_columns="",
)
assert len(fitted_base_models) == expected_number_of_base_models
assert len(unique_base_models) == expected_number_of_base_models
assert len(fitted_combined_models) == 1
assert len(result) == 1
for group in result:
assert len(group.rows) == 1
def test_train_udf_with_mock_multiple_groups(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
number_of_groups = 2
expected_number_of_base_models = 2
result, fitted_base_models, fitted_combined_models, unique_base_models = \
run_mock_test_valid(
db_connection,
pyexasol_connection,
split_by_node=False,
number_of_random_partitions=2,
split_by_columns="",
number_of_groups=number_of_groups
)
unique_model_id_in_base_models = {row[1] for row in fitted_base_models}
assert len(fitted_base_models) == expected_number_of_base_models * number_of_groups
assert len(unique_model_id_in_base_models) == number_of_groups
assert len(unique_base_models) == expected_number_of_base_models * number_of_groups
assert len(fitted_combined_models) == 1 * number_of_groups
assert len(result) == number_of_groups
for group in result:
assert len(group.rows) == 1
def run_mock_test_valid(db_connection,
pyexasol_connection,
split_by_node: bool,
number_of_random_partitions: int,
split_by_columns: str,
number_of_groups: int = 1):
result = run_mock_test(db_connection,
pyexasol_connection,
split_by_node,
number_of_random_partitions,
split_by_columns,
number_of_groups)
fitted_base_models, fitted_combined_models, unique_base_models = get_results(pyexasol_connection, result)
return result, fitted_base_models, fitted_combined_models, unique_base_models
def get_results(pyexasol_connection, result):
fitted_base_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall()
print("fitted_base_models", fitted_base_models)
fitted_combined_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall()
print("fitted_combined_models", fitted_combined_models)
unique_base_models = {row[4] for row in fitted_base_models}
print("result", result)
return fitted_base_models, fitted_combined_models, unique_base_models
def run_mock_test(db_connection,
pyexasol_connection,
split_by_node: bool,
number_of_random_partitions: int,
split_by_columns: str,
number_of_groups: int = 1):
executor = UDFMockExecutor()
meta = MockMetaData(
script_code_wrapper_function=udf_wrapper,
input_type="SET",
input_columns=[
Column("model_connection", str, "VARCHAR(2000000)"),
Column("path_under_model_connection", str, "VARCHAR(2000000)"),
Column("download_retry_seconds", int, "INTEGER"),
Column("db_connection", str, "VARCHAR(2000000)"),
Column("source_schema_name", str, "VARCHAR(2000000)"),
Column("source_table_name", str, "VARCHAR(2000000)"),
Column("columns", str, "VARCHAR(2000000)"),
Column("target_schema_name", str, "VARCHAR(2000000)"),
Column("experiment_name", str, "VARCHAR(2000000)"),
Column("epochs", int, "INTEGER"),
Column("batch_size", int, "INTEGER"),
Column("shuffle_buffer_size", int, "INTEGER"),
Column("split_per_node", bool, "BOOLEAN"),
Column("number_of_random_partitions", int, "INTEGER"),
Column("split_by_columns", str, "VARCHAR(2000000)"),
],
output_type="EMIT",
output_columns=[
Column("job_id", str, "VARCHAR(2000000)"),
Column("model_id", str, "VARCHAR(2000000)"),
Column("model_connection_name", str, "VARCHAR(2000000)"),
Column("path_under_model_connection", str, "VARCHAR(2000000)"),
Column("model_path", str, "VARCHAR(2000000)"),
]
)
model_connection, model_connection_name = \
create_model_connection(pyexasol_connection)
drop_and_create_target_schema(pyexasol_connection)
exa = MockExaEnvironment(meta,
connections={
"MODEL_CONNECTION": model_connection,
"DB_CONNECTION": db_connection
})
groups = [Group([(
model_connection_name,
"my_path_under_model_connection_" + str(i),
60,
"DB_CONNECTION",
"TEST",
"ABC",
"A,B,C",
"TARGET_SCHEMA",
"EXPERIMENT",
10,
100,
10000,
split_by_node,
number_of_random_partitions,
split_by_columns
)]) for i in range(number_of_groups)]
result = list(executor.run(groups, exa))
return result
def test_train_udf(
upload_language_container,
create_input_table,
pyexasol_connection,
db_connection):
model_connection, model_connection_name = \
create_model_connection(pyexasol_connection)
db_connection, db_connection_name = \
create_db_connection(pyexasol_connection, db_connection)
target_schema = SchemaName("TARGET_SCHEMA")
drop_and_create_target_schema(pyexasol_connection)
udf_sql = textwrap.dedent(f"""
CREATE OR REPLACE PYTHON3_DSUP SET SCRIPT {target_schema.fully_qualified()}."TRAIN_UDF"(
model_connection VARCHAR(2000000),
path_under_model_connection VARCHAR(2000000),
download_retry_seconds INTEGER,
db_connection VARCHAR(2000000),
source_schema_name VARCHAR(2000000),
source_table_name VARCHAR(2000000),
columns VARCHAR(2000000),
target_schema_name VARCHAR(2000000),
experiment_name VARCHAR(2000000),
epochs INTEGER,
batch_size INTEGER,
shuffle_buffer_size INTEGER,
split_per_node BOOLEAN,
number_of_random_partitions INTEGER,
split_by_columns VARCHAR(2000000)
)
EMITS (
job_id VARCHAR(2000000),
model_id VARCHAR(2000000),
model_connection_name VARCHAR(2000000),
path_under_model_connection VARCHAR(2000000),
model_path VARCHAR(2000000)
) AS
from sklearn.linear_model import SGDRegressor
from numpy.random import RandomState
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \
ColumnDescriptionBasedTablePreprocessorFactory
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \
ColumnPreprocessorDescription
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \
ExactColumnNameSelector
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \
MinMaxScalerFactory
from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF
train_udf = PartialFitRegressionTrainUDF()
def run(ctx):
model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False,
fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling')
table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory(
input_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("A"),
column_preprocessor_factory=MinMaxScalerFactory()
),
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("B"),
column_preprocessor_factory=MinMaxScalerFactory()
),
],
target_column_preprocessor_descriptions=[
ColumnPreprocessorDescription(
column_selector=ExactColumnNameSelector("C"),
column_preprocessor_factory=MinMaxScalerFactory()
),
]
)
train_udf.run(exa, ctx, model, table_preprocessor_factory)
""")
pyexasol_connection.execute(udf_sql)
query_udf = f"""
select {target_schema.fully_qualified()}."TRAIN_UDF"(
'{model_connection_name}',
'my_path_under_model_connection',
60,
'{db_connection_name}',
'TEST',
'ABC',
'A,B,C',
'TARGET_SCHEMA',
'EXPERIMENT',
10,
100,
10000,
True,
4,
null
)
"""
pyexasol_connection.execute(query_udf)
fitted_base_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall()
print(fitted_base_models)
assert len(fitted_base_models) == 4
fitted_combined_models = pyexasol_connection.execute("""
SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall()
print(fitted_combined_models)
assert len(fitted_combined_models) == 1
def create_model_connection(conn):
model_connection = Connection(address=f"http://localhost:6583/default/model;bfsdefault",
user="w", password="write")
model_connection_name = "MODEL_CONNECTION"
return drop_and_create_connection(conn, model_connection, model_connection_name)
def create_db_connection(conn, db_connection):
db_connection_name = "DB_CONNECTION"
return drop_and_create_connection(conn, db_connection, db_connection_name)
def drop_and_create_connection(conn, model_connection, model_connection_name):
try:
conn.execute(f"DROP CONNECTION {model_connection_name}")
except:
pass
conn.execute(
f"CREATE CONNECTION {model_connection_name} TO '{model_connection.address}' USER '{model_connection.user}' IDENTIFIED BY '{model_connection.password}';")
return model_connection, model_connection_name
|
{"hexsha": "3db123e46456ae189a23df426bd125b5a0aff05a", "size": 19875, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integration_tests/test_train_udf.py", "max_stars_repo_name": "exasol/data-science-utils-python", "max_stars_repo_head_hexsha": "44f4019b30f4945e14a8ee19c1a4f6bed68692d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/integration_tests/test_train_udf.py", "max_issues_repo_name": "exasol/data-science-utils-python", "max_issues_repo_head_hexsha": "44f4019b30f4945e14a8ee19c1a4f6bed68692d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2021-06-04T15:45:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T12:16:34.000Z", "max_forks_repo_path": "tests/integration_tests/test_train_udf.py", "max_forks_repo_name": "exasol/data-science-utils-python", "max_forks_repo_head_hexsha": "44f4019b30f4945e14a8ee19c1a4f6bed68692d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1515151515, "max_line_length": 161, "alphanum_fraction": 0.6855345912, "include": true, "reason": "from numpy", "num_tokens": 4120}
|
# Probabilidad I:
Valor esperado e indicadores. Teorema de Bayes. Estimación Bayesiana.
# 0. PMFs y PDFs conjuntas.
- Una PMF conjunta $p_{X,Y}$ de las variables $X$ y $Y$ está definida como
\begin{equation}
p_{X,Y}(x,y)=P(X=x,Y=y)
\end{equation}
- La PMF marginal de X y Y puede ser obtenida a partir de la PMF conjunta, utilizando
\begin{equation}
p_{X}(x)=\sum_{y}{p_{X,Y}(x,y)}
\end{equation}
y
\begin{equation}
p_{Y}(y)=\sum_{x}{p_{X,Y}(x,y)}
\end{equation}
De manera análoga, para $X$ y $Y$ que sean variables aleatorias conjuntas con una PDF conjunta $f_{X,Y}$ se tiene que
\begin{equation}
f_{X,Y}(x,y)=f_{Y}(y)f_{X|Y}(x|y)
\end{equation}
\begin{equation}
f_{X}(x)=\int_{-\infty}^{\infty}f_{Y}(y)f_{X|Y}(x|y)dy
\end{equation}
#### Ejercicio 1: Realice dos gráficas de funciones de distribución conjuntas para variables aleatorias X,Y distribuidas normales para medias y varianzas iguales y medias y varianzas diferentes
Fórmula para distribución normal multivariada
\begin{equation}
f_X (x_1, ...,x_n)=\frac{1}{(2\pi)^{n/2}|\Sigma|^{1/2}} \exp{-\frac{1}{2}(x-\mu)^T \Sigma (x-\mu)}
\end{equation}
```python
import numpy as np
import matplotlib.pyplot as plt
#Utilizando una función gaussiana como la utilizada previamente
def gauss(x,m,s):
return 1/(np.sqrt(2*np.pi)*s)*np.exp(-(x-m)**2 / (2*s**2))
x_0=np.linspace(-3,3,600)
x=gauss(x_0,0,1)
y=gauss(x_0,0,1)
xx,yy=np.meshgrid(x,y)
img=xx*yy
plt.contourf(x_0,x_0,img)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
```python
#Sin embargo, al hacer esto, no obtenemos la distribución normal multivariada dadas las condicionales de x, y.
#Utilizando la función multivariate_normal de scipy, podemos obtenerla, esta función recibe como parámetro
#la media [x,y] y la matriz de covarianza [[c1,c2],[c3,c4]]. Luego podemos graficar la pdf utilizando rv.pdf
from scipy.stats import multivariate_normal
x, y = np.mgrid[-3:3:1.0/100, -3:3:1.0/100]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
plt.contourf(x, y, rv.pdf(pos))
plt.title('Distribución gaussiana 2D')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
# 1. Valor Esperado y varianza.
Para variables discretas se define el valor esperado de una variable discreta $X$ como
\begin{equation}
E[X] =\sum_{x}{x p_{X} (x)}
\end{equation}
y para una variable continua:
\begin{equation}
E[X] =\int_{x}{x p_{X} (x)}dx
\end{equation}
y la varianza se define como
\begin{equation}
var[X] = E[(X-E[X])^2]
\end{equation}
#### Ejercicio 2: Obtenga numéricamente el valor esperado y la varianza de una función de distribución Gamma generada aleatoriamente con parámetros $1/\lambda=0.5$ y $k=9$. Realice la gráfica de la distribución y de la CDF.
```python
#Realizaremos la gráfica de una distribución gamma con los parámetros dados
shape,scale=9,0.5
g=np.random.gamma(shape,scale,1000)
(n, bins, patches)=plt.hist(g,bins=100,density=True)
plt.title(r'histograma de distribución $\Gamma(0.5,9)$')
plt.show()
#Utilizaremos los bins y n para graficar un estimado de la CDF de esta distribución
plt.plot(bins[:-1],np.cumsum(n)/sum(n))
plt.title('CDF estimada a partir de bins')
plt.show()
```
# 2. Teorema de Bayes
Volviendo a los fundamentos de probabilidad, hablemos del teorema de probabilidad total, que dicta: Sean $A_1, ..., A_n$ eventos disjuntos que forman una partición del espacio muestral (cada posible resultado está incluido en exactamente uno de los eventos $A_1, ...., A_n$) y asumamos que $P(A_i)>0$ para todo $i$. Entonces, para cada evento $B$, se tiene
\begin{equation}
P(B)=P(A_1 \cap B)+P(A_n \cap B)
\end{equation}
\begin{equation}
=P(A_1)P(B|A_1)+...+P(A_n)P(B|A_n).
\end{equation}
este teorema lo que dice es que, intuitivamente, estamos particionando el espacio muestral en un número de escenarios $A_i$. Entonces, la probabilidad que $B$ ocurra es un promedio ponderado de la probabilidad condicional sobre cada escenario, donde cada escenario es pesado de acuerdo con su probabilidad. Esto permite calcular la probabilidad de varios eventos $B$ para los cuales las probabilidades $P(B|A_i)$ son conocidas o fáciles de obtener. El teorema de probabilidad total puede ser aplicado repetidamente para calcular probabilidades en experimentos que tienen caracter secuencial.
## Inferencia y regla de Bayes.
El teorema de probabilidad total es usualmente usado en conjunto con el teorema de Bayes, que relaciona las probabilidades condicionales de la forma $P(A|B)$ cno probabilidades condicionales de la forma $P(B|A)$, en el cual el orden del condicionamiento es contrario.
#### Regla de Bayes
Sean $A_1, A_2, ..., A_n$ eventos disjuntos que forman una partición del espacio muestral, y asumiendo que $P(A_i)>0 $ para todo $i$. Entonces para algún evento tal que $P(B)>0$, se tiene
\begin{equation}
P(A_i|B)=\frac{P(A_i)P(B|A_i)}{P(B)}
\end{equation}
\begin{equation}
=\frac{P(A_i)P(B|A_i)}{P(A_1)P(B|A_1)+...+P(A_n)P(B|A_n)}
\end{equation}
La regla de Bayes es usualmente usada para inferencia. Existen un cierto número de "causas" que producen un "efecto". Es posible observar el efecto y se desea inferir la causa. Los eventos $A_1,...,A_n$ son asociados con las causas y el evento $B$ representa el efecto. La probabilidad $P(B|A_i)$ de que el efecto observado cuando la causa $A_i$ está presente, contribuye a un modelo probabilistico de relación de causa y efecto. Dado el efecto B que se ha observado, se desea evaluar la probabilidad $P(A_i|B)$ de que la causa $A_i$ esté presente. En general se refiere a $P(A_i|B)$ como la probabilidad a posteriori de un evento $A_i$ y a $P(A_i)$ la cual se denomina probabilidad a priori.
##### Nota:
Es importante recordar el concepto de independencia, que dice que cuando $P(A|B)=P(A)$, entonces A es independiente de B. Por lo que por definición:
\begin{equation}
P(A\cup B)=P(A)P(B)
\end{equation}
En el caso en que $P(B)=0$, entonces $P(A|B)$ no está definida, por lo que la relación se mantiene. La simetría de esta relación también implica que la independencia es una propiedad simétrica; es decir, que si A es independiente de B, entonces B es independiente de A, y se puede decir que A y B son eventos independientes.
Además, también es importante notar que la independencia condicional entre un evento C y dos eventos B y C, es condicional si
\begin{equation}
P(A\cup B| C)=P(A|C)P(B|C)
\end{equation}
Que significa, en otras palabras, que si se sabe que C ocurrió, el conocimiento adicional de que B también ocurrió no cambia la probabilidad de A. La independencia entre dos eventos A y B respecto a una ley de probabilidad incondicional no implica la independencia condicional y viceversa.
# 3 Inferencia Bayesiana
La inferencia estadística es el proceso de extraer información a partir de una variable o un modelo desconocido a partir de información disponible.
La infrencia estadística difiere de la teoría de probabilidad en formas fundamentales. La probabilidad es un área de las matemáticas completamente autocontenida, basada en axiomas, como ya se ha visto. En razonamiento probabilístico se asume un modelo probabilístico completamente especificado que obedece estos axiomas. Luego se hace uso de un método matemático para cuantificar las consecuencias de este modelo o responder varias preguntas de interés. En particular, cada pregunta no ambigua tiene una respuesta correcta única.
La estadística es diferente. Para cualquier problema, pueden existir múltiples métodos razonables, con diferentes respuestas. En general, no hay una forma de obtener el mejor método, a menos que se realicen suposiciones fuertes y se impongan restricciones adicionales sobre la inferencia.
En la estadística Bayesiana, todas las suposiciones se localizan en un lugar, en la forma de un prior, los estadístas Bayesianos argumentan que todas las suposiciones son traidas a una superficie y están dispuestas al escrutinio.
Finalmente hay consideraciones prácticas. En muchos casos, los métodos Bayesianos son computacionalmente intratables. Sin embargo, con las capacidades de cómputo recientes, gran parte de la comunidad se enfoca en realizar métodos Bayesianos más prácticos y aplicables.
Conceptos clave:
- En estadística Bayesiana, se tratan los parámetros desconocidos como variables aleatorias con distribuciones a priori conocidas.
- En estimación de parámetros se quiere generar un estimado de qué tan cercanos están los valores de los estimadores a los verdaderos valores de los parámetros en un sentido probabilístico.
- En pruebas de hipótesis, el parámetro desconocido toma uno de valores infinitos, correspondiente según la hipótesis. Se quiere seleccionar una hipótesis basados en una pequeña probabilidad de error.
- Los principales métodos de inferencia Bayesiana son:
- MAP: (Maximun a posteriori probability): A partir de posibles valores de parámetros, se selecciona uno con máxima probabilidad condicional dados unos datos.
- LMS: (Least Mean Squares) Se selecciona un estimador/función de los datos que minimiza el error cuadrático medio entre el parámetro y su estimado.
- Linear Least Mean Squares: Se selecciona un estimador que es una función lineal de los datos y minimiza el error cuadrático medio entre los parámetros y su estimado.
En inferencia bayesiana, la cantidad de interés se denota por $\Theta$, y es modelada como una variable aleatoria o como una colección finita de variables aleatorias. Aquí, $\theta$ puede representar cantidades físicas, tales como una velocidad o una posición, o un conjunto de parámetros desconocidos de un modelo probabilístico. Por simpleza, a menos que lo contrario sea explícitamente mencionado, se ve $\Theta$ como una variable aleatoria.
El objetivo es extraer información acerca de $\theta$, basado en observar una colección de variables aleatorias $X=(X_1,...,X_n)$ relacionadas, llamadas observaciones, medidas o vector de observaciones. Por esto, se asume que se conoce la distribución conjunta de $\Theta$ y $X$. Es decir,
- Se asume el conocimiento de la distribución a priori $p_\theta$, dependiendo de si $\theta$ es discreta o continua-
- Una distribución condicional $p_{X|\theta}$, dependiendo de si X es discreta o continua.
Una vez un valor particular de $x$ en $X$ ha sido observado, una respuesta completa de un problema de inferencia es proveído por la distribución a posteriori. Esta distribución está determinada por la forma apropiada de la regla de Bayes y encapsula todo el conocimiento que se pueda tener acerca de $\Theta$ dada la información disponible.
### Resumen:
- Se comienza con una distribución a priori $p_{\Theta}$ o $f_{\Theta}$ para una variable desconocida aleatoria $\Theta$.
- Se tiene un modelo $p_{X|\Theta}$ o $f_{X|\Theta}$ del vector se observaciones X.
- Después de observar el valor $x$ en $X$, se forma la distribución a posteriori de $\Theta$, usando la versión apropiada de la regla de Bayes.
##### Ejercicio:
Suponga que tratamos con las dos siguientes hipótesis
\begin{equation}
H_1:p=0.1,H_2:p=0.2
\end{equation}
donde $p=0.2$ está basada en una proporción muestral de 1 a 5.
Primero asumamos que el tamaño de la muestra es $n=5$, $k=1$ donde, k significa una pelota amarilla que escojemos entre n pelotas. También podemos asumor que la probailidad a priori es 0.5 y 0.5, es decir igual para ambas hipótesis. Es posible actualizar la probabilidad a posteriori de cada hipótesis utilizando la regla de Bayes. Calcule las probabilidades P(p=0.1) y P(p=0.2) respectivamente para muestras de tamaño 5, 10, 15 y 20 y concluya al respecto.
```python
from scipy.stats import binom
n=[5,10,15,20]
p1=0.1
p2=0.2
k=1
for n_ in n:
p_1= binom(n_, p1)
p_2=binom(n_,p2)
"""
P1=p_1.pmf(k)*0.5/(p_1.pmf(k)*0.5+p_2.pmf(k)*0.5)
P2=p_2.pmf(k)*0.5/(p_1.pmf(k)*0.5+p_2.pmf(k)*0.5)
print(".............")
print("P(H1):%lf para n=%d"%(P1,n_))
print("P(H2):%lf para n=%d"%(P2,n_))
"""
P1=p_1.pmf(k)*0.5
P2=p_2.pmf(k)*0.5
nm=P1+P2
print(".............")
print("P(H1):%lf para n=%d"%(P1/nm,n_))
print("P(H2):%lf para n=%d"%(P2/nm,n_))
```
.............
P(H1):0.444723 para n=5
P(H2):0.555277 para n=5
.............
P(H1):0.590710 para n=10
P(H2):0.409290 para n=10
.............
P(H1):0.722283 para n=15
P(H2):0.277717 para n=15
.............
P(H1):0.824151 para n=20
P(H2):0.175849 para n=20
# Referencias.
- Wackerly, D. Mathematical Statistics with Applications. 2008
- Bertsekas, D. Introduction to Probability. 2008.
- Sivia, D.S. Data Analysis: A Bayesian Tutorial. 2006.
```python
```
|
{"hexsha": "cd1e5f8003001db5f3d5c28f3a595b756c429cfe", "size": 67897, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "7Estadistica/3_ProbabilidadI.ipynb", "max_stars_repo_name": "sergiogaitan/Study_Guides", "max_stars_repo_head_hexsha": "083acd23f5faa6c6bc404d4d53df562096478e7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-09-12T17:16:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T01:37:02.000Z", "max_issues_repo_path": "7Estadistica/3_ProbabilidadI.ipynb", "max_issues_repo_name": "sergiogaitan/Study_Guides", "max_issues_repo_head_hexsha": "083acd23f5faa6c6bc404d4d53df562096478e7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "7Estadistica/3_ProbabilidadI.ipynb", "max_forks_repo_name": "sergiogaitan/Study_Guides", "max_forks_repo_head_hexsha": "083acd23f5faa6c6bc404d4d53df562096478e7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-22T12:57:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-03T01:37:07.000Z", "avg_line_length": 154.6628701595, "max_line_length": 12980, "alphanum_fraction": 0.8759886298, "converted": true, "num_tokens": 3932}
|
[STATEMENT]
lemma in_synth_GuardK: "[| X \<in> synth G; GuardK n Ks G |] ==> X \<in> guardK n Ks"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>X \<in> synth G; GuardK n Ks G\<rbrakk> \<Longrightarrow> X \<in> guardK n Ks
[PROOF STEP]
by (drule GuardK_synth, auto)
|
{"llama_tokens": 123, "file": null, "length": 1}
|
/**********************************************************************
* Copyright (c) 2008-2015, Alliance for Sustainable Energy.
* All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********************************************************************/
#ifndef OPENSTUDIO_HVACSYSTEMSCONTROLLER_HPP
#define OPENSTUDIO_HVACSYSTEMSCONTROLLER_HPP
#include "../model/Model.hpp"
#include "../model/ModelObject.hpp"
#include "../model/Loop.hpp"
#include "OSDropZone.hpp"
#include "ModelObjectItem.hpp"
#include "ModelObjectVectorController.hpp"
#include <boost/smart_ptr.hpp>
#include "SOConstants.hpp"
#include "../shared_gui_components/OSQObjectController.hpp"
#include "OSItem.hpp"
#include "../shared_gui_components/OSComboBox.hpp"
#include <QAbstractListModel>
#include <QPointer>
namespace openstudio {
namespace model {
class HVACComponent;
class AirLoopHVAC;
class SetpointManagerScheduled;
}
class LoopView;
class HVACSystemsView;
class HVACControlsView;
class LoopListModel;
class HVACGraphicsView;
class HVACLayoutController;
class HVACControlsController;
class MechanicalVentilationView;
class SingleZoneReheatSPMView;
class NoSupplyAirTempControlView;
class NoMechanicalVentilationView;
class ScheduledSPMView;
class SystemAvailabilityVectorController;
class OSDropZone;
class FollowOATempSPMView;
class OAResetSPMView;
class AirLoopHVACUnitaryHeatPumpAirToAirControlView;
class NoControlsView;
class RefrigerationController;
class RefrigerationGridController;
class RefrigerationGridView;
class VRFController;
class HVACSystemsController : public QObject
{
Q_OBJECT
public:
enum SceneType {TOPOLOGY, CONTROLS};
HVACSystemsController(bool isIP, const model::Model & model);
virtual ~HVACSystemsController();
HVACSystemsView * hvacSystemsView() const;
std::shared_ptr<HVACLayoutController> hvacLayoutController() const;
std::shared_ptr<HVACControlsController> hvacControlsController() const;
model::Model model() const;
// Indicates what object or system to display
// This handle may correspond to a Loop, a ModelObject, such as a WaterUseConnection object,
// or it may be NULL when displaying the service hot water overview.
QString currentHandle() const;
void setCurrentHandle(const QString & handle);
boost::optional<model::Loop> currentLoop() const;
void clearSceneSelection();
public slots:
void updateLater();
private slots:
void update();
void addToModel(AddToModelEnum addToModelEnum);
void onAddSystemClicked();
void onRemoveLoopClicked();
void onShowTopologyClicked();
void onShowControlsClicked();
void onShowGridClicked();
void onObjectAdded(const WorkspaceObject&);
void onObjectRemoved(const WorkspaceObject&);
void onObjectChanged();
void onSystemComboBoxIndexChanged(int i);
void toggleUnits(bool displayIP);
private:
std::vector<IddObjectType> systemComboBoxTypes() const;
QPointer<HVACSystemsView> m_hvacSystemsView;
std::shared_ptr<HVACLayoutController> m_hvacLayoutController;
std::shared_ptr<HVACControlsController> m_hvacControlsController;
std::shared_ptr<RefrigerationController> m_refrigerationController;
std::shared_ptr<RefrigerationGridController> m_refrigerationGridController;
std::shared_ptr<VRFController> m_vrfController;
QString m_currentHandle;
bool m_dirty;
model::Model m_model;
bool m_isIP;
signals:
void toggleUnitsClicked(bool displayIP);
};
class HVACControlsController : public QObject
{
Q_OBJECT;
public:
HVACControlsController(HVACSystemsController * hvacSystemsController);
virtual ~HVACControlsController();
HVACControlsView * hvacControlsView() const;
NoControlsView * noControlsView() const;
boost::optional<model::AirLoopHVAC> airLoopHVAC() const;
public slots:
void updateLater();
private slots:
void update();
void onEconomizerComboBoxIndexChanged(int index);
void onVentilationCalcMethodComboBoxIndexChanged(int index);
void onNightCycleComboBoxIndexChanged(int index);
void onControlZoneComboBoxChanged(int index);
void onUnitaryHeatPumpControlZoneChanged(int index);
private:
QPointer<HVACControlsView> m_hvacControlsView;
QPointer<NoControlsView> m_noControlsView;
QPointer<MechanicalVentilationView> m_mechanicalVentilationView;
QPointer<SingleZoneReheatSPMView> m_singleZoneReheatSPMView;
QPointer<ScheduledSPMView> m_scheduledSPMView;
QPointer<NoSupplyAirTempControlView> m_noSupplyAirTempControlView;
QPointer<NoMechanicalVentilationView> m_noMechanicalVentilationView;
QPointer<OSDropZone> m_systemAvailabilityDropZone;
QPointer<OSDropZone> m_supplyAirTempScheduleDropZone;
QPointer<FollowOATempSPMView> m_followOATempSPMView;
QPointer<OAResetSPMView> m_oaResetSPMView;
QPointer<AirLoopHVACUnitaryHeatPumpAirToAirControlView> m_airLoopHVACUnitaryHeatPumpAirToAirControlView;
QPointer<HVACSystemsController> m_hvacSystemsController;
bool m_dirty;
};
class HVACLayoutController : public QObject
{
Q_OBJECT;
public:
HVACLayoutController(HVACSystemsController * hvacSystemsController);
virtual ~HVACLayoutController();
std::shared_ptr<RefrigerationGridController> refrigerationGridController() const;
HVACGraphicsView * hvacGraphicsView() const;
void clearSceneSelection();
public slots:
void updateLater();
private slots:
void update();
void onModelObjectSelected(model::OptionalModelObject & modelObject, bool readOnly);
void addLibraryObjectToModelNode(OSItemId itemid, model::HVACComponent & comp);
void removeModelObject(model::ModelObject & modelObject);
void goToOtherLoop( model::ModelObject & modelObject );
void addLibraryObjectToTopLevel(OSItemId itemid);
void goToServiceWaterScene();
private:
QPointer<HVACGraphicsView> m_hvacGraphicsView;
QPointer<HVACSystemsController> m_hvacSystemsController;
std::shared_ptr<RefrigerationGridController> m_refrigerationGridController;
bool m_dirty;
};
class SystemAvailabilityVectorController : public ModelObjectVectorController
{
Q_OBJECT;
public:
virtual ~SystemAvailabilityVectorController() {}
boost::optional<model::AirLoopHVAC> airLoopHVAC();
void attach(const model::ModelObject& modelObject);
void detach();
public slots:
void reportItemsLater();
void reportItems();
protected:
std::vector<OSItemId> makeVector();
void onReplaceItem(OSItem * currentItem, const OSItemId& replacementItemId);
void onDrop(const OSItemId& itemId);
private:
bool m_reportScheduled;
};
class SupplyAirTempScheduleVectorController : public ModelObjectVectorController
{
Q_OBJECT;
public:
virtual ~SupplyAirTempScheduleVectorController() {}
boost::optional<model::SetpointManagerScheduled> setpointManagerScheduled();
void attach(const model::ModelObject& modelObject);
void detach();
public slots:
void reportItemsLater();
void reportItems();
protected:
std::vector<OSItemId> makeVector();
void onReplaceItem(OSItem * currentItem, const OSItemId& replacementItemId);
void onDrop(const OSItemId& itemId);
private:
bool m_reportScheduled;
};
} // openstudio
#endif // OPENSTUDIO_HVACSYSTEMSCONTROLLER_HPP
|
{"hexsha": "cd98abb27addc1f51225eb7ca899a82cae297490", "size": 7945, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "openstudiocore/src/openstudio_lib/HVACSystemsController.hpp", "max_stars_repo_name": "pepsi7959/OpenstudioThai", "max_stars_repo_head_hexsha": "fb18afb8b983f71dd5eb171e753dac7d9a4b811b", "max_stars_repo_licenses": ["blessing"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2015-06-28T09:06:24.000Z", "max_stars_repo_stars_event_max_datetime": "2015-06-28T09:06:24.000Z", "max_issues_repo_path": "openstudiocore/src/openstudio_lib/HVACSystemsController.hpp", "max_issues_repo_name": "pepsi7959/OpenstudioThai", "max_issues_repo_head_hexsha": "fb18afb8b983f71dd5eb171e753dac7d9a4b811b", "max_issues_repo_licenses": ["blessing"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2015-05-05T16:16:33.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-10T08:15:50.000Z", "max_forks_repo_path": "openstudiocore/src/openstudio_lib/HVACSystemsController.hpp", "max_forks_repo_name": "pepsi7959/OpenstudioThai", "max_forks_repo_head_hexsha": "fb18afb8b983f71dd5eb171e753dac7d9a4b811b", "max_forks_repo_licenses": ["blessing"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2017-09-23T12:51:13.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-23T12:51:13.000Z", "avg_line_length": 23.1632653061, "max_line_length": 106, "alphanum_fraction": 0.7740717432, "num_tokens": 1876}
|
"""Code for simulations in [1].
[1] Håvard Kvamme and Ørnulf Borgan. Continuous and Discrete-Time Survival Prediction
with Neural Networks. arXiv preprint arXiv:1910.06724, 2019.
https://arxiv.org/pdf/1910.06724.pdf
"""
import numpy as np
import pandas as pd
import torchtuples as tt
from pycox.simulations import base
_TIMES = np.linspace(0, 100, 1001)
class SimBase(base._SimBase):
times = _TIMES
num_weights = NotImplemented
def __init__(self, covs_per_weight=5, betas=None):
self.covs_per_weight = covs_per_weight
self.betas = betas if betas else self.make_betas()
def make_betas(self, func=lambda m: np.random.normal(0, 1, m)):
return tuple(func(self.covs_per_weight) for _ in range(self.num_weights))
@staticmethod
def _sample_uniform(n):
return np.random.uniform(-1, 1, (n, 1))
def sample_weights(self, n):
return [self._sample_uniform(n) for _ in range(self.num_weights)]
def sample_covs(self, weights):
return [self._conditional_covariate_sampling(beta, weight)
for beta, weight in zip(self.betas, weights)]
def surv_df(self, logit_haz):
assert len(self.times) == (logit_haz.shape[1] + 1), 'Need dims to be correct'
haz = sigmoid(logit_haz)
surv = np.ones((len(self.times), len(haz)))
surv[1:, :] = haz2surv(haz).transpose()
return pd.DataFrame(surv, index=self.times)
@staticmethod
def _conditional_covariate_sampling(beta, weight):
beta, weight = beta.reshape(-1), weight.reshape(-1)
size = len(weight), len(beta)
u = np.random.uniform(-1, 1, size=size)
u[:, 0] = weight
x = np.empty_like(u)
x[:, :-1] = -np.diff(u)/beta[:-1]
x[:, -1] = (u[:, 0] - x[:, :-1].dot(beta[:-1]))/beta[-1]
return x
def sample_event_times(self, logit_haz):
haz = sigmoid(logit_haz)
assert haz.shape[1] == len(self.times)-1, 'Fix dims'
samp = np.random.uniform(0, 1, haz.shape)
hit = np.zeros((len(haz), len(self.times)), 'bool')
hit[:, 1:] = samp < haz
idx_first = hit.argmax(1)
durations = self.times[idx_first] # -1 because hit has one additional column
durations[idx_first == False] = np.nan
return durations
def simulate(self, n, surv_df=False):
weights = self.sample_weights(n)
return self.simulate_from_weights(weights, surv_df)
def simulate_from_weights(self, weights, surv_df=False):
logit_haz = self.logit_haz(self.times[1:], *weights)
durations = self.sample_event_times(logit_haz)#.astype('float32')
is_nan = np.isnan(durations)
events = np.ones_like(durations)
events[is_nan] = 0.
durations[is_nan] = self.times[-1]
covs = self.sample_covs(weights)
covs = tt.tuplefy(covs).flatten()
covs = np.concatenate(covs, axis=1)#.astype('float32')
surv = self.surv_df(logit_haz) if surv_df is True else None
return dict(covs=covs, durations=durations, events=events, weights=weights,
surv_df=surv)
def covs2weights(self, covs):
return [cov.dot(beta).reshape(-1, 1) for cov, beta in zip(covs, self.betas)]
def covs2surv_df(self, covs):
weights = self.covs2weights(covs)
logit_haz = self.logit_haz(self.times[1:], *weights)
return self.surv_df(logit_haz)
def logit_haz(self, times, *args, **kwargs):
raise NotImplementedError
@staticmethod
def dict2df(data):
"""Make a pd.DataFrame from the dict obtained when simulating.
Arguments:
data {dict} -- Dict from simulation.
Returns:
pd.DataFrame -- A DataFrame
"""
return base.dict2df(data, False)
class SimSin(SimBase):
num_weights = 4
def logit_haz(self, times, a, bb, c, dd):
"""We expect a, bb, c, dd to be Unif[-1, 1] and transform them to
the desired ranges. Use '_logit_haz' to skip this transform.
"""
a = a * 5 # Unif[-5, 5]
idx = ((bb + 1) / 2 * 5).astype('int')
bb = np.arange(-1, 4)[idx] # Unif[{-1, 0, 1, 2, 3}]
c = c * 15 # Unif[-15, 15]
dd = dd * 2 # Unif[-2, 2]
return self._logit_haz(times, a, bb, c, dd)
@staticmethod
def _logit_haz(times, a, bb, c, dd):
b = 2 * np.pi / 100 * np.power(2., bb)
d = dd - 6 - abs(a/2)
return a * np.sin(b*(times + c)) + d
class SimConstHaz(SimBase):
num_weights = 1
def logit_haz(self, times, a):
"""Expect a to be Unit[-1, 1]."""
a = (a + 1) / 2 * 5 - 8 # Unif[-8, -3]
return self._logit_haz(times, a)
@staticmethod
def _logit_haz(times, a):
return a * np.ones((len(a), len(times)))
class SimAcceleratingHaz(SimBase):
num_weights = 1
def logit_haz(self, times, aa):
"""Expect a to be Unit[-1, 1]."""
aa = (aa + 1) / 2 * 6 - 5 # Unif[-5, 1]
a = sigmoid(aa)
return self._logit_haz(times, a)
@staticmethod
def _logit_haz(times, a):
start = -10
return a * times + start
class SimConstHazIndependentOfWeights(SimBase):
"""Constant hazards independent of weights and covariates.
Covariates are simply a column of zeros and can be removed from analysis.
None of the call arguments matter, as they are set in the constructor.
"""
num_weights = 1
def __init__(self, *args, **kwargs):
covs_per_weight = 1
betas = np.array([0.])
super().__init__(covs_per_weight, betas)
def sample_weights(self, n):
return [np.zeros((n, 1))]
def sample_covs(self, weights):
return weights
def covs2weights(self, covs):
return covs
def logit_haz(self, times, a):
return -7. * np.ones((len(a), len(times)))
class SimUniform(SimBase):
num_weights = 1
def __init__(self, s_end=0.2, *args, **kwargs):
self.s_end = s_end
covs_per_weight = 1
betas = np.array([0.])
super().__init__(covs_per_weight, betas)
def logit_haz(self, times, w):
n, m = len(w), len(times)
j = np.arange(1, m+1, dtype='float').reshape(1, -1).repeat(n, axis=0)
return -np.log(m/(1-self.s_end) - j + 1)
def sample_weights(self, n):
return [np.zeros((n, 1))]
def sample_covs(self, weights):
return weights
def covs2weights(self, covs):
return covs
class _SimCombine(SimBase):
sims = NotImplemented
alpha_range = NotImplemented
_first_prev = NotImplemented
def sample_weights(self, n):
weights = [sim.sample_weights(n) for sim in self.sims]
return [super().sample_weights(n)] + weights
def sample_covs(self, weights):
alpha = weights[0]
covs = [sim.sample_covs(w) for sim, w in zip(self.sims, weights[1:])]
return [super().sample_covs(alpha)] + covs
def logit_haz(self, times, *weights):
alpha = np.concatenate(weights[0], axis=1)
alpha[:, 0] += self._first_pref
alpha = softmax(alpha * self.alpha_range)
logit_haz = 0.
for i, (sim, w) in enumerate(zip(self.sims, weights[1:])):
logit_haz += sim.logit_haz(self.times[1:], *w) * alpha[:, [i]]
return logit_haz
def covs2weights(self, covs):
weights = [sim.covs2weights(cov) for sim, cov in zip(self.sims, covs[1:])]
return [super().covs2weights(covs[0])] + weights
def covs2surv_df(self, covs):
weights = self.covs2weights(covs)
logit_haz = self.logit_haz(self.times[1:], *weights)
return self.surv_df(logit_haz)
class SimSinAccConst(_SimCombine):
def __init__(self, covs_per_weight=5, alpha_range=5., sin_pref=0.6):
self.num_weights = 3
super().__init__(covs_per_weight)
self.alpha_range = alpha_range
self._first_pref = sin_pref
self.sim_sin = SimSin(covs_per_weight)
self.sim_const = SimConstHaz(covs_per_weight)
self.sim_acc = SimAcceleratingHaz(covs_per_weight)
self.sims = [self.sim_sin, self.sim_const, self.sim_acc]
class SimConstAcc(_SimCombine):
def __init__(self, covs_per_weight=5, alpha_range=5., const_pref=2):
self.num_weights = 2
super().__init__(covs_per_weight)
self.alpha_range = alpha_range
self._first_pref = const_pref
self.sim_const = SimConstHaz(covs_per_weight)
self.sim_acc = SimAcceleratingHaz(covs_per_weight)
self.sims = [self.sim_const, self.sim_acc]
class _SimStudyBase:
sim_surv = NotImplemented
sim_censor = NotImplemented
@staticmethod
def _combine_surv_and_censor(surv, censor):
surv['durations_true'], surv['events_true'] = surv['durations'].copy(), surv['events'].copy()
is_censor = censor['durations'] < surv['durations']
surv['durations'][is_censor] = censor['durations'][is_censor]
surv['events'][is_censor] = 0.
return dict(**surv, **{'censor_'+str(k): v for k, v in censor.items()})
def simulate(self, n, surv_df=False, censor_df=False, binary_surv=False):
if binary_surv:
if not (surv_df and censor_df):
raise ValueError("To produce binary_surv, you need to also set surv_df and censor_df to True")
surv = self.sim_surv.simulate(n, surv_df)
censor = self.sim_censor.simulate(n, censor_df)
res = self._combine_surv_and_censor(surv, censor)
if binary_surv:
res['binary_surv_df'] = self.binary_surv(res)
return res
@staticmethod
def dict2df(data, add_true=True, add_censor_covs=False):
"""Make a pd.DataFrame from the dict obtained when simulating.
Arguments:
data {dict} -- Dict from simulation.
Keyword Arguments:
add_true {bool} -- If we should include the true duration and censoring times
(default: {True})
add_censor_covs {bool} -- If we should include the censor covariates as covariates.
(default: {False})
Returns:
pd.DataFrame -- A DataFrame
"""
return base.dict2df(data, add_true, add_censor_covs)
class SimStudyIndepSurvAndCens(_SimStudyBase):
def __init__(self, sim_surv, sim_censor):
self.sim_surv = sim_surv
self.sim_censor = sim_censor
class SimStudySACCensorConst(_SimStudyBase):
"""Simulation study from [1].
It combines three sources to the logit-hazard: A sin function, an increasing function
and a constant function.
See paper for details https://arxiv.org/pdf/1910.06724.pdf.
Keyword Arguments:
covs_per_weight {int} -- Number of covariates per weight (gamma in paper)
(default: {5})
alpha_range {[type]} -- Controls how the mixing between the three logit-hazards.
High alpha is equivalent to picking one of them, while low is equivalent to
a more homogeneous mixing. (default: {5.})
sin_pref {float} -- Preference for the SimSin in the mixing. (default: {0.6})
References:
[1] Håvard Kvamme and Ørnulf Borgan. Continuous and Discrete-Time Survival Prediction
with Neural Networks. arXiv preprint arXiv:1910.06724, 2019.
https://arxiv.org/pdf/1910.06724.pdf
"""
def __init__(self, covs_per_weight=5, alpha_range=5., sin_pref=0.6):
self.sim_surv = SimSinAccConst(covs_per_weight, alpha_range, sin_pref)
self.sim_censor = SimConstHazIndependentOfWeights()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def haz2surv(haz, eps=1e-7):
return np.exp(np.log((1 - haz) + eps).cumsum(1))
def softmax(x):
exp = np.exp(x)
return exp / exp.sum(1, keepdims=True)
|
{"hexsha": "bea01f63f9d2c932769aeef0de988ba77fb1be69", "size": 11784, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycox/simulations/discrete_logit_hazard.py", "max_stars_repo_name": "nikolase90/pycox", "max_stars_repo_head_hexsha": "1c780253da7bab7eba0dc02e1436a68a9b812a66", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-01T11:49:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-01T11:49:31.000Z", "max_issues_repo_path": "pycox/simulations/discrete_logit_hazard.py", "max_issues_repo_name": "ailzy/pycox", "max_issues_repo_head_hexsha": "72900692e3687d607740e975f15b57fa3675391b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-21T14:23:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-21T14:23:55.000Z", "max_forks_repo_path": "pycox/simulations/discrete_logit_hazard.py", "max_forks_repo_name": "ailzy/pycox", "max_forks_repo_head_hexsha": "72900692e3687d607740e975f15b57fa3675391b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7610619469, "max_line_length": 110, "alphanum_fraction": 0.6244059742, "include": true, "reason": "import numpy", "num_tokens": 3317}
|
"""
File contains everything required to load, provide expert trajectories to the discriminator.
"""
import random
import glob
import os
import math
import itertools
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
def pad_collate(traj_batch):
"""
collate function that pads with zeros for variable lenght data-points.
pass into the dataloader object.
"""
lens = [x.shape[0] for x in traj_batch]
padded_traj = pad_sequence(traj_batch, batch_first=True, padding_value=0)
# labels to train disc, gen_is_high
labels = torch.zeros(len(lens))
return padded_traj, torch.tensor(lens, dtype=torch.float), labels
class ExpertDataset(Dataset):
"""
dataset that stores the saved traj
"""
def __init__(self, saved_pkl, obs_only: bool = True):
# expert_data = [[obs_1, acts_1], [obs_2, acts_2]...]
with open(saved_pkl, "rb") as f:
self.expert_data = pickle.load(f)
self.obs_only = obs_only
def __len__(self):
return len(self.expert_data)
def __getitem__(self, i):
if self.obs_only:
traj = self.expert_data[i][0]
else:
# squash obs and act together
traj = None # will implement later
return traj
class ExpertDataLoader:
def __init__(
self,
saved_pkl,
obs_only: bool = True,
expert_batch_size: int = 32,
shuffle: bool = True,
):
self.dataset = ExpertDataset(saved_pkl, obs_only)
self.expert_batch_size = expert_batch_size
self.dataloader = DataLoader(
self.dataset,
batch_size=expert_batch_size,
shuffle=shuffle,
num_workers=0,
drop_last=True,
collate_fn=pad_collate,
)
def get_expert_batch(self):
return next(iter(self.dataloader))
|
{"hexsha": "b1a5a9929f2da123257a8dbcdcf3138770969fb2", "size": 1943, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/expert_data_loader.py", "max_stars_repo_name": "sen-pai/Reccurent-GAIL", "max_stars_repo_head_hexsha": "67a1938fe584171d95cada5de58c8a516aa3f65f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-11T02:59:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T02:59:11.000Z", "max_issues_repo_path": "utils/expert_data_loader.py", "max_issues_repo_name": "sen-pai/Reccurent-GAIL", "max_issues_repo_head_hexsha": "67a1938fe584171d95cada5de58c8a516aa3f65f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/expert_data_loader.py", "max_forks_repo_name": "sen-pai/Reccurent-GAIL", "max_forks_repo_head_hexsha": "67a1938fe584171d95cada5de58c8a516aa3f65f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2875, "max_line_length": 93, "alphanum_fraction": 0.6387030365, "include": true, "reason": "import numpy", "num_tokens": 452}
|
import json
from datetime import datetime
from decimal import Decimal
import numpy as np
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
numpy_types = (
np.bool_,
# np.bytes_, -- python `bytes` class is not json serializable
# np.complex64, -- python `complex` class is not json serializable
# np.complex128, -- python `complex` class is not json serializable
# np.complex256, -- python `complex` class is not json serializable
# np.datetime64, -- python `datetime.datetime` class is not json serializable
np.float16,
np.float32,
np.float64,
# np.float128, -- special handling below
np.int8,
np.int16,
np.int32,
np.int64,
# np.object_ -- should already be evaluated as python native
np.str_,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.void,
)
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, numpy_types):
return obj.item()
elif isinstance(obj, np.float128):
return obj.astype(np.float64).item()
elif isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime):
return str(obj)
elif obj is np.ma.masked:
return str(np.NaN)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
|
{"hexsha": "a4f44c39b8d139b23b4989536cc4a3f8c8a6896f", "size": 1738, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/webservice/webmodel/CustomEncoder.py", "max_stars_repo_name": "ngachung/incubator-sdap-nexus", "max_stars_repo_head_hexsha": "38e768694fcc142e2d88283cb1e44e05f88da847", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-11-16T07:36:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T00:02:20.000Z", "max_issues_repo_path": "analysis/webservice/webmodel/CustomEncoder.py", "max_issues_repo_name": "ngachung/incubator-sdap-nexus", "max_issues_repo_head_hexsha": "38e768694fcc142e2d88283cb1e44e05f88da847", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2018-01-11T00:50:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T23:08:07.000Z", "max_forks_repo_path": "analysis/webservice/webmodel/CustomEncoder.py", "max_forks_repo_name": "ngachung/incubator-sdap-nexus", "max_forks_repo_head_hexsha": "38e768694fcc142e2d88283cb1e44e05f88da847", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-11-16T07:36:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T20:48:46.000Z", "avg_line_length": 35.4693877551, "max_line_length": 90, "alphanum_fraction": 0.5667433832, "include": true, "reason": "import numpy", "num_tokens": 368}
|
import numpy as np
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
import pandas as pd
import pandas._testing as tm
from .base import BaseExtensionTests
class BaseInterfaceTests(BaseExtensionTests):
"""Tests that the basic interface is satisfied."""
# ------------------------------------------------------------------------
# Interface
# ------------------------------------------------------------------------
def test_len(self, data):
assert len(data) == 100
def test_size(self, data):
assert data.size == 100
def test_ndim(self, data):
assert data.ndim == 1
def test_can_hold_na_valid(self, data):
# GH-20761
assert data._can_hold_na is True
def test_contains(self, data, data_missing):
# GH-37867
# Tests for membership checks. Membership checks for nan-likes is tricky and
# the settled on rule is: `nan_like in arr` is True if nan_like is
# arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
# check the presence of na_value
assert na_value in data_missing
assert na_value not in data
# the data can never contain other nan-likes than na_value
for na_value_obj in tm.NULL_OBJECTS:
if na_value_obj is na_value:
continue
assert na_value_obj not in data
assert na_value_obj not in data_missing
def test_memory_usage(self, data):
s = pd.Series(data)
result = s.memory_usage(index=False)
assert result == s.nbytes
def test_array_interface(self, data):
result = np.array(data)
assert result[0] == data[0]
result = np.array(data, dtype=object)
expected = np.array(list(data), dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_is_extension_array_dtype(self, data):
assert is_extension_array_dtype(data)
assert is_extension_array_dtype(data.dtype)
assert is_extension_array_dtype(pd.Series(data))
assert isinstance(data.dtype, ExtensionDtype)
def test_no_values_attribute(self, data):
# GH-20735: EA's with .values attribute give problems with internal
# code, disallowing this for now until solved
assert not hasattr(data, "values")
assert not hasattr(data, "_values")
def test_is_numeric_honored(self, data):
result = pd.Series(data)
assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric
def test_isna_extension_array(self, data_missing):
# If your `isna` returns an ExtensionArray, you must also implement
# _reduce. At the *very* least, you must implement any and all
na = data_missing.isna()
if is_extension_array_dtype(na):
assert na._reduce("any")
assert na.any()
assert not na._reduce("all")
assert not na.all()
assert na.dtype._is_boolean
def test_copy(self, data):
# GH#27083 removing deep keyword from EA.copy
assert data[0] != data[1]
result = data.copy()
data[1] = data[0]
assert result[1] != result[0]
def test_view(self, data):
# view with no dtype should return a shallow copy, *not* the same
# object
assert data[1] != data[0]
result = data.view()
assert result is not data
assert type(result) == type(data)
result[1] = result[0]
assert data[1] == data[0]
# check specifically that the `dtype` kwarg is accepted
data.view(dtype=None)
|
{"hexsha": "6a4ff68b4580f119dabdb09aad427196ff2ff601", "size": 3932, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tests/extension/base/interface.py", "max_stars_repo_name": "CJL89/pandas", "max_stars_repo_head_hexsha": "6210077d32a9e9675526ea896e6d1f9189629d4a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 603, "max_stars_repo_stars_event_min_datetime": "2020-12-23T13:49:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:38:03.000Z", "max_issues_repo_path": "pandas/tests/extension/base/interface.py", "max_issues_repo_name": "CJL89/pandas", "max_issues_repo_head_hexsha": "6210077d32a9e9675526ea896e6d1f9189629d4a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 387, "max_issues_repo_issues_event_min_datetime": "2020-12-15T14:54:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:00:21.000Z", "max_forks_repo_path": "pandas/tests/extension/base/interface.py", "max_forks_repo_name": "CJL89/pandas", "max_forks_repo_head_hexsha": "6210077d32a9e9675526ea896e6d1f9189629d4a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2021-03-26T03:12:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T10:15:10.000Z", "avg_line_length": 32.4958677686, "max_line_length": 88, "alphanum_fraction": 0.6177517803, "include": true, "reason": "import numpy", "num_tokens": 877}
|
module MixedModelsSim
using LinearAlgebra
using MixedModels
using PooledArrays
using PrettyTables
using Random
using Statistics
using Tables
using MixedModels: replicate
export
create_re,
create_theta,
createθ,
cyclicshift,
factorproduct,
flatlowertri,
nlevels,
nlevstbl,
#withinitem,
pooled!,
power_table,
simdat_crossed,
update!
export pretty_table, @pt # re-exports
include("utilities.jl")
include("power.jl")
include("simdat.jl")
end # module
|
{"hexsha": "71e891e14d39ab900ed917c94e669272f3131ac6", "size": 505, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MixedModelsSim.jl", "max_stars_repo_name": "RePsychLing/MixedModelsSim.jl", "max_stars_repo_head_hexsha": "0f39f5b088996da40290727744014405420f2953", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-03T19:51:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-05T23:17:07.000Z", "max_issues_repo_path": "src/MixedModelsSim.jl", "max_issues_repo_name": "RePsychLing/MixedModelsSim.jl", "max_issues_repo_head_hexsha": "0f39f5b088996da40290727744014405420f2953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2020-02-14T11:14:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T13:26:21.000Z", "max_forks_repo_path": "src/MixedModelsSim.jl", "max_forks_repo_name": "RePsychLing/MixedModelsSim.jl", "max_forks_repo_head_hexsha": "0f39f5b088996da40290727744014405420f2953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-14T12:07:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-13T11:01:06.000Z", "avg_line_length": 14.4285714286, "max_line_length": 37, "alphanum_fraction": 0.7267326733, "num_tokens": 130}
|
%%
%% $Id$
%%
%% Copyright 1989-2014 MINES ParisTech
%%
%% This file is part of PIPS.
%%
%% PIPS is free software: you can redistribute it and/or modify it
%% under the terms of the GNU General Public License as published by
%% the Free Software Foundation, either version 3 of the License, or
%% any later version.
%%
%% PIPS is distributed in the hope that it will be useful, but WITHOUT ANY
%% WARRANTY; without even the implied warranty of MERCHANTABILITY or
%% FITNESS FOR A PARTICULAR PURPOSE.
%%
%% See the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public License
%% along with PIPS. If not, see <http://www.gnu.org/licenses/>.
%%
\documentclass{article}
\usepackage{newgen_domain}
\usepackage[backref,pagebackref]{hyperref}
\title{C Parser Global Context}
\author{Nga Nguyen}
\begin{document}
\maketitle
\section{Introduction}
The C parser context is used to store the information such as
the scope, type and storage of the current entity, given by the decl\_spec\_list,
which are used later by declarator to create the entity.
\domain{import entity from "ri.newgen"}
\domain{import type from "ri.newgen"}
\domain{import storage from "ri.newgen"}
\domain{import qualifier from "ri.newgen"}
\domain{c\_parser\_context = scope:string x type:type x storage:storage x qualifiers:qualifier* x typedef:bool x static:bool}
\end{document}
|
{"hexsha": "74dba79b0d3854ed13a64236d1a6da62efca03ed", "size": 1414, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "packages/PIPS/pips/src/Documentation/newgen/c_parser_private.tex", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/pips/src/Documentation/newgen/c_parser_private.tex", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/pips/src/Documentation/newgen/c_parser_private.tex", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 28.28, "max_line_length": 125, "alphanum_fraction": 0.7489391796, "num_tokens": 354}
|
#%%
from os import listdir
from os.path import isfile, join
import cv2;
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
# not used in this stub but often useful for finding various files
#project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import ndimage
#%%
imgDir = "src/data/tests/Team/"
imgs = {}
srcToIndex = {}
srcIndex = []
for f in listdir(imgDir) :
if isfile(join(imgDir, f)):
src = join(imgDir, f)
imgs[f] = cv.cvtColor(cv.imread(src), cv.COLOR_BGR2RGB)
srcToIndex[f] = len(srcIndex)
srcIndex.append(f)
def apply_brightness_contrast(input_img, brightness = 0, contrast = 0):
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow)/255
gamma_b = shadow
buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)
else:
buf = input_img.copy()
if contrast != 0:
f = 131*(contrast + 127)/(127*(131-contrast))
alpha_c = f
gamma_c = 127*(1-f)
buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)
return buf
import pytesseract
def parseTeam(img):
img = apply_brightness_contrast(img, contrast=20);
text = pytesseract.image_to_string(img, config="-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ -c preserve_interword_spaces=1 --psm 7")
return text, img
errors = np.zeros((len(imgs),1))
for src in imgs.keys():
expected, version = src.split(" ")
expected = expected.replace("_", "")
result, resultImg = parseTeam(imgs[src])
if result != expected:
#plt.imshow(resultImg);
#plt.show();
print("ERR: Expected '{}'; Parsed '{}'".format(expected, result))
errors[srcToIndex[src],0] = 1
sns.heatmap(data=errors, yticklabels=srcIndex, xticklabels=["Errors"])
totalErrors = errors.sum(axis=0)[0]
print("Total errors: ", totalErrors)
accuracy = (1 - (totalErrors / len(imgs)))*100
print("Accuracy: ", accuracy,"%")
|
{"hexsha": "e3a501cb86c283cec902e599dc2511d5000db576", "size": 2377, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/team-detection.py", "max_stars_repo_name": "helldragger/MetaWatch", "max_stars_repo_head_hexsha": "3ef0d7bcf5603b57402f79fff90c84b20516fe82", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-06T16:52:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-06T16:52:34.000Z", "max_issues_repo_path": "notebooks/team-detection.py", "max_issues_repo_name": "helldragger/MetaWatch", "max_issues_repo_head_hexsha": "3ef0d7bcf5603b57402f79fff90c84b20516fe82", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/team-detection.py", "max_forks_repo_name": "helldragger/MetaWatch", "max_forks_repo_head_hexsha": "3ef0d7bcf5603b57402f79fff90c84b20516fe82", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0113636364, "max_line_length": 146, "alphanum_fraction": 0.6558687421, "include": true, "reason": "import numpy,from scipy", "num_tokens": 620}
|
from __future__ import absolute_import, division, print_function
import os
import sys
import h5py
import numpy as np
from cctbx import factor_ev_angstrom
from scitbx.array_family import flex
from dxtbx.format.FormatHDF5 import FormatHDF5
from dxtbx.format.FormatStill import FormatStill
class FormatHDF5SaclaRayonix(FormatHDF5, FormatStill):
"""
Class to handle multi-event HDF5 files from Rayonix detector (MX300-HS)
preprocessed by Cheetah SFX pipeline at SACLA.
"""
@staticmethod
def understand(image_file):
with h5py.File(image_file, "r") as h5_handle:
if "metadata/detector" not in h5_handle:
return False
if h5_handle["metadata/detector"][()] != "Rayonix MX300HS":
return False
if any(elem.startswith("tag-") for elem in h5_handle):
return True
return False
def __init__(self, image_file, index=0, reconst_mode=False, **kwargs):
self._raw_data = None
self.index = index
self.image_filename = image_file
super(FormatHDF5SaclaRayonix, self).__init__(image_file, **kwargs)
# This hard-coded value can be overwritten
# by RAYONIX_DISTANCE
self.distance = 240.0 # mm
self.mask = None
# Read metadata if possible
# Read pixel size from the metadata and determine the binning of rayonix
self.read_metadata()
self.PIXEL_SIZE = self.pixelsize_in_um / 1000 # convert um to mm
self.bin_size = int(self.pixelsize_in_um / 39.1)
self.RECONST_SIZE = 7680 // self.bin_size
# Override by environmental variables
if os.getenv("RAYONIX_DISTANCE"):
self.distance = float(os.environ["RAYONIX_DISTANCE"])
def _start(self):
h5_handle = h5py.File(self.image_filename, "r")
self._images = sorted([tag for tag in h5_handle if tag.startswith("tag-")])
self.tag = self._images[self.index]
h5_handle.close()
def read_metadata(self):
h5_handle = h5py.File(self.image_filename, "r")
self.pixelsize_in_um = h5_handle["metadata"]["pixelsize_in_um"][()]
h5_handle.close()
def get_image_file(self, index=None):
return self.image_filename
def set_index(self, index):
assert index < len(self._images)
self.index = index
self.tag = self._images[self.index]
self._raw_data = None
def _detector(self, index=None):
return self._detector_factory.simple(
sensor="CCD",
distance=self.distance,
beam_centre=(
self.RECONST_SIZE / 2 * self.PIXEL_SIZE,
self.RECONST_SIZE / 2 * self.PIXEL_SIZE,
),
fast_direction="+x",
slow_direction="-y",
pixel_size=(self.PIXEL_SIZE, self.PIXEL_SIZE),
image_size=(self.RECONST_SIZE, self.RECONST_SIZE),
trusted_range=(-1, 65535),
# px_mm = px_mm,
mask=[],
)
def _beam(self):
h5_handle = h5py.File(self.image_filename, "r")
eV = h5_handle[self.tag]["photon_energy_ev"][()]
h5_handle.close()
return self._beam_factory.simple(factor_ev_angstrom / eV)
def get_num_images(self):
return len(self._images)
def get_raw_data(self, index=None):
if index is not None and self.index != index:
self.set_index(index)
if self._raw_data is None:
h5_handle = h5py.File(self.image_filename, "r")
data = h5_handle[self.tag]["data"][()].astype(np.int32)
h5_handle.close()
self._raw_data = flex.int(data)
return self._raw_data
def get_detector(self, index=None):
if self._detector_instance is None:
self._detector_instance = self._detector()
return self._detector_instance
def get_static_mask(self):
# This means when the pixel mask is present, trusted region is ignored.
# The used provided masks (if any) will be automatically merged.
# see https://github.com/dials/dials/issues/236
return self.mask
def get_beam(self, index=None):
if index is not None and self.index != index:
self.set_index(index)
self._beam_instance = None
if self._beam_instance is None:
self._beam_instance = self._beam()
return self._beam_instance
if __name__ == "__main__":
print(FormatHDF5SaclaRayonix.understand(sys.argv[1]))
FormatHDF5SaclaRayonix(sys.argv[1])
|
{"hexsha": "6712b80305aacfa138f8ba600f72bc546e03fd4f", "size": 4587, "ext": "py", "lang": "Python", "max_stars_repo_path": "format/FormatHDF5SaclaRayonix.py", "max_stars_repo_name": "dials/dx2", "max_stars_repo_head_hexsha": "94a5e2fd048b4133241c5a528ffaf7c504f22f8b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "format/FormatHDF5SaclaRayonix.py", "max_issues_repo_name": "dials/dx2", "max_issues_repo_head_hexsha": "94a5e2fd048b4133241c5a528ffaf7c504f22f8b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "format/FormatHDF5SaclaRayonix.py", "max_forks_repo_name": "dials/dx2", "max_forks_repo_head_hexsha": "94a5e2fd048b4133241c5a528ffaf7c504f22f8b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8541666667, "max_line_length": 83, "alphanum_fraction": 0.6309134511, "include": true, "reason": "import numpy", "num_tokens": 1103}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#%%
DAT = pd.read_fwf("table2", header=None)#, dtype="str")
#%%
DAT.columns = ["0", "1", "2", "3", "intensi", "long", "6", "7", "8", "9", "10", "11", "12", "13", "14"]
DAT
DAT_1 = DAT.apply(pd.to_numeric, errors="coerce")
DAT_1
type(DAT_1["intensi"][14])
filtrado = DAT_1[DAT_1['intensi'].apply(lambda x: type(x) in [int, np.int64, float, np.float64])]
#DAT
#filtrado
#%%
fig = plt.figure(figsize = (15,15))
plt.scatter(DAT_1["long"], DAT_1["intensi"], s=10)
plt.show()
#%%
LAMB = pd.read_fwf("Nave_wavelengths.txt", sikprows=9)#, dtype="str")
LAMB.columns = ["Wavelengths"]
long_nave = np.array(LAMB["Wavelengths"][3:]).astype(np.float)
long_nave
mag = np.ones(len(long_nave))
np.savetxt("lineas_NAVE.txt",long_nave)
#%%
fig = plt.figure(figsize = (10,10))
lim_inf = 5400
lim_sup = 5500
plt.scatter(long_nave, mag, marker="|", s=10000)
plt.xlim(lim_inf,lim_sup)
plt.xlabel(u"$Longitud\ de\ onda\ [\AA]$")
plt.title(u"$Líneas\ de\ FE\ I$")
plt.show()
#%%
encuentra = long_nave[(long_nave < lim_sup) & (long_nave > lim_inf)]
encuentra
#%%
mibisec = np.loadtxt("./lineas/bisec_4592.txt")
plt.figure(figsize=(15,15))
plt.scatter(mibisec[0,0],mibisec[0,1])
plt.scatter(mibisec[2:,0],mibisec[2:,1])
plt.ticklabel_format(useOffset=False)
plt.show()
|
{"hexsha": "2ed517746ac1e812e6cd37b0b46b28c6be9dff31", "size": 1338, "ext": "py", "lang": "Python", "max_stars_repo_path": "Nave.py", "max_stars_repo_name": "juanitopereza/Granulacion", "max_stars_repo_head_hexsha": "07e455fee338f86bf4dc5eae068b677e87a534d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Nave.py", "max_issues_repo_name": "juanitopereza/Granulacion", "max_issues_repo_head_hexsha": "07e455fee338f86bf4dc5eae068b677e87a534d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Nave.py", "max_forks_repo_name": "juanitopereza/Granulacion", "max_forks_repo_head_hexsha": "07e455fee338f86bf4dc5eae068b677e87a534d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.3913043478, "max_line_length": 103, "alphanum_fraction": 0.6591928251, "include": true, "reason": "import numpy", "num_tokens": 475}
|
#ifndef _UTHHFF_CODEPY_BPL_HPP_INCLUDED
#define _UTHHFF_CODEPY_BPL_HPP_INCLUDED
#include <boost/python.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#define CODEPY_PYTHON_ERROR(TYPE, REASON) \
{ \
PyErr_SetString(PyExc_##TYPE, REASON); \
throw boost::python::error_already_set(); \
}
#define CODEPY_PYTHON_FOREACH(TYPE, NAME, ITERABLE) \
BOOST_FOREACH(TYPE NAME, \
std::make_pair( \
boost::python::stl_input_iterator<TYPE>(ITERABLE), \
boost::python::stl_input_iterator<TYPE>()))
namespace codepy
{
template <class T>
class no_compare_indexing_suite :
public boost::python::vector_indexing_suite<T, false, no_compare_indexing_suite<T> >
{
public:
static bool contains(T &container, typename T::value_type const &key)
{ CODEPY_PYTHON_ERROR(NotImplementedError, "containment checking not supported on this container"); }
};
}
#endif
|
{"hexsha": "07ca6305cdf848e0fac209ab4eec7404318d2da2", "size": 931, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "codepy/include/codepy/bpl.hpp", "max_stars_repo_name": "reguly/codepy", "max_stars_repo_head_hexsha": "17801c99622a46bc6ca7308135f8e9235ba3c7f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30.0, "max_stars_repo_stars_event_min_datetime": "2015-04-09T09:49:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-01T01:52:23.000Z", "max_issues_repo_path": "codepy/include/codepy/bpl.hpp", "max_issues_repo_name": "reguly/codepy", "max_issues_repo_head_hexsha": "17801c99622a46bc6ca7308135f8e9235ba3c7f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12.0, "max_issues_repo_issues_event_min_datetime": "2017-04-04T14:45:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T20:33:46.000Z", "max_forks_repo_path": "codepy/include/codepy/bpl.hpp", "max_forks_repo_name": "reguly/codepy", "max_forks_repo_head_hexsha": "17801c99622a46bc6ca7308135f8e9235ba3c7f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24.0, "max_forks_repo_forks_event_min_datetime": "2016-01-01T00:35:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T09:58:40.000Z", "avg_line_length": 21.6511627907, "max_line_length": 107, "alphanum_fraction": 0.7282491944, "num_tokens": 232}
|
subroutine tmulta(
$ np,x,px,y,py,z,g,dv,sx,sy,sz,al,ak0,phi,
$ psi1,psi2,bz,
1 dx,dy,theta,dtheta,
$ eps0,krad,fb1,fb2,mfring,fringe)
use ffs_flag, only:ndivrad,calpol,photons
use tmacro
use multa
use tbendcom, only:tbrot,tbshift
use kradlib, only:tradke,pxr0,pyr0,zr0,bsi
use photontable,only:tsetpcvt,pcvt
use mathfun
implicit none
integer*4 ,parameter ::ndivmax=2000
real*8 ,parameter ::ampmax=0.05d0,eps00=0.005d0
integer*4 np,mfring,i,n,mfr,ndiv,nmmax,m,m1,k,nmmin
real*8 x(np),px(np),y(np),py(np),z(np),g(np),dv(np),
$ al,phi,psi1,psi2,bz,dx,dy,theta,eps0,fb1,fb2,
$ dtheta,pr,cost,sint,rho0,rhob,
$ sinp1,sinp2,cosp1,cosp2,phin,aln,cosw,sinw,sqwh,sinwp1,
$ eps,w,r,rk(0:nmult),als,ak0r,ak1r,ak1n,
$ phib,phibn
real*8 sx(np),sy(np),sz(np)
complex*16 ak0(0:nmult),ak(0:nmult),akn(0:nmult),
$ cx1,csl,csr,cl,cr,cg,cx
logical*4 fringe,krad
if(bz .ne. 0.d0)then
write(*,*)
$ 'MULT with nonzero ANGLE and BZ is not yet supported.'
call abort
endif
if(gknini)then
call gkninit
endif
cost=cos(theta)
sint=sin(theta)
call tbshift(np,x,px,y,py,z,dx,dy,phi,cost,sint,.true.)
if(dtheta .ne. 0.d0)then
call tbrot(np,x,px,y,py,z,sx,sy,sz,phi,dtheta)
endif
if(eps0 .eq. 0.d0)then
eps=eps00
else
eps=eps00*eps0
endif
nmmax=0
ak=ak0
ak0r=dble(ak(0))
ak1r=dble(ak(1))
ak(0)=dcmplx(0.d0,imag(ak(0)))
ak(1)=dcmplx(0.d0,imag(ak(1)))
do n=nmult,0,-1
if(ak(n) .ne. (0.d0,0.d0))then
nmmax=n
exit
endif
enddo
nmmin=nmmax
do n=0,nmmax-1
if(ak(n) .ne. (0.d0,0.d0))then
nmmin=n
exit
endif
enddo
ndiv=1
do n=nmmin,nmmax
ndiv=max(ndiv,
$int(sqrt(ampmax**(n-1)/6.d0/fact(n-1)/eps*abs(ak(n)*al)))+1)
enddo
phib=phi+ak0r
if(krad)then
ndiv=max(ndiv,ndivrad(phib,ak1r,0.d0,eps0))
endif
ndiv=min(ndivmax,ndiv)
aln=al/ndiv
if(fb1 .ne. 0.d0)then
aln=aln-(phi*fb1)**2/al/48.d0
1 *sin(.5d0*(phi-psi1-psi2))/sin(.5d0*phi)/ndiv
endif
if(fb2 .ne. 0.d0)then
aln=aln-(phi*fb2)**2/al/48.d0
1 *sin(.5d0*(phi-psi1-psi2))/sin(.5d0*phi)/ndiv
endif
rho0=al/phi
rhob=al/phib
phin=phi/ndiv
phibn=phib/ndiv
ak1n=ak1r/ndiv
sinp1=sin(psi1)
cosp1=cos(psi1)
sinp2=sin(psi2)
cosp2=cos(psi2)
do m=nmmin,nmmax
akn(m)=ak(m)/(fact(m+1)*ndiv)
enddo
als=aln*.5d0
if(krad)then
pxr0=px
pyr0=py
zr0=z
if(calpol)then
bsi=0.d0
endif
if(photons)then
call tsetpcvt(l_track,dx,dy,theta,dtheta,phi,al)
endif
endif
do n=1,ndiv
if(n .eq. 1)then
w=phin*.5d0-psi1
cosw=cos(w)
sinw=sin(w)
if(cosw .ge. 0.d0)then
sqwh=sinw**2/(1.d0+cosw)
else
sqwh=1.d0-cosw
endif
sinwp1=sin(phin*.5d0)
mfr=0
if(mfring .eq. 2)then
mfr=0
elseif(mfring .ne. 0)then
mfr=-1
endif
if(krad .and. calpol)then
do concurrent (i=1:np)
cx1=dcmplx(x(i),y(i))
cx=0.d0
do k=nmmax,2,-1
cx=(cx+ak(k))*cx1*aninv(k+1)
enddo
bsi(i)=.5d0*imag(cx*cx1**2)/al
enddo
endif
call tbend(np,x,px,y,py,z,g,dv,sx,sy,sz,
$ als,phibn*.5d0,phin*.5d0,psi1,0.d0,
1 cosp1,sinp1,1.d0,0.d0,
1 ak1n,0.d0,0.d0,0.d0,0.d0,1.d0,0.d0,
$ fb1,fb2,mfr,fringe,cosw,sinw,sqwh,sinwp1,
1 krad,eps0,.false.,1)
pcvt%fr0=als/al
w=phin
cosw=cos(w)
sinw=sin(w)
if(cosw .ge. 0.d0)then
sqwh=sinw**2/(1.d0+cosw)
else
sqwh=1.d0-cosw
endif
sinwp1=sinw
else
call tbend(np,x,px,y,py,z,g,dv,sx,sy,sz,
$ aln,phibn,phin,0.d0,0.d0,
1 1.d0,0.d0,1.d0,0.d0,
1 ak1n,0.d0,0.d0,0.d0,0.d0,1.d0,0.d0,
$ 0.d0,0.d0,0,.false.,cosw,sinw,sqwh,sinwp1,
1 krad,eps0,.false.,2)
als=als+aln
pcvt%fr0=als/al
endif
do i=1,np
pr=(1.d0+g(i))
cx1=dcmplx(x(i),y(i))
csl=(0.d0,0.d0)
csr=(0.d0,0.d0)
r=rho0+x(i)
rk(0)=1.d0+sqrt1(x(i)/rho0)
do k=1,nmult-nmmin
rk(k)=rk(k-1)/r
enddo
do m=nmult,0,-1
cl=(0.d0,0.d0)
cr=(0.d0,0.d0)
do k=max(0,m-nmmax),m-nmmin
m1=m-k
cg=gkn(m1,k)*rk(k)*akn(m1)
cl=cl+(m+1)*cg
cr=cr+(.5d0-k)*cg
enddo
csl=csl*cx1+cl
csr=csr*cx1+cr
enddo
px(i)=px(i)-(dble(csr*cx1)/r+dble(csl))/pr
py(i)=py(i)+imag(csl)/pr
enddo
if(krad)then
pxr0=px
pyr0=py
zr0=z
endif
enddo
w=phin*.5d0-psi2
cosw=cos(w)
sinw=sin(w)
if(cosw .ge. 0.d0)then
sqwh=sinw**2/(1.d0+cosw)
else
sqwh=1.d0-cosw
endif
sinwp1=sinw
mfr=0
if(mfring .eq. 1)then
mfr=0
elseif(mfring .ne. 0)then
mfr=-2
endif
if(krad .and. calpol)then
do i=1,np
cx1=dcmplx(x(i),y(i))
cx=0.d0
do k=nmmax,2,-1
cx=(cx+ak(k))*cx1*aninv(k+1)
enddo
bsi(i)=-imag(.5d0*cx*cx1**2)/al
enddo
endif
call tbend(np,x,px,y,py,z,g,dv,sx,sy,sz,
$ aln*.5d0,phibn*.5d0,phin*.5d0,0.d0,psi2,
1 1.d0,0.d0,cosp2,sinp2,
1 ak1n,0.d0,0.d0,0.d0,0.d0,1.d0,0.d0,
$ fb1,fb2,mfr,fringe,cosw,sinw,sqwh,sinwp1,
1 krad,eps0,.false.,2)
if(dtheta .ne. 0.d0)then
call tbrot(np,x,px,y,py,z,sx,sy,sz,-phi,-dtheta)
endif
call tbshift(np,x,px,y,py,z,-dx,-dy,-phi,cost,-sint,.false.)
return
end
subroutine tmultae(trans,cod,beam,srot,al,ak0,
$ phi,psi1,psi2,apsi1,apsi2,bz,
1 dx,dy,theta,dtheta,
$ eps0,enarad,fringe,fb1,fb2,mfring,l)
use tfstk
use tmacro
use multa
use temw,only:tmulbs
use kradlib, only:tradke
implicit none
integer*4 ndivmax
real*8 ampmax,eps00
parameter (ampmax=0.05d0,eps00=0.005d0,ndivmax=2000)
integer*4 mfring,n,mfr,ndiv,nmmax,m,m1,k,nmmin,l
real*8 trans(6,12),cod(6),beam(42),trans1(6,6),srot(3,9),
$ al,phi,psi1,psi2,bz,dx,dy,theta,eps0,fb1,fb2,
$ dphix,dphiy,dtheta,rho0,
$ phin,aln,eps,r,rk(0:nmult),apsi1,apsi2,
$ psi1n,psi2n,ak0r,ak1r,ak1n,phib,phibn
complex*16 ak(0:nmult),ak0(0:nmult),akn(0:nmult),
$ cx1,csl,csr,cl,cr,cg,
$ csxx,csxy,csyy,cxx,cxy,cyy
logical*4 enarad,fringe
if(bz .ne. 0.d0)then
write(*,*)
$ 'MULT with nonzero ANGLE and BZ is not yet supported.'
call abort
endif
if(gknini)then
call gkninit
endif
call tchge(trans,cod,beam,srot,
$ dx,dy,theta,dtheta,phi,.true.)
if(dtheta .ne. 0.d0)then
dphix= phi*sin(.5d0*dtheta)**2
dphiy= .5d0*phi*sin(dtheta)
cod(2)=cod(2)+dphix
cod(4)=cod(4)+dphiy
else
dphix=0.d0
dphiy=0.d0
endif
if(eps0 .eq. 0.d0)then
eps=eps00
else
eps=eps00*eps0
endif
ak=ak0
ak0r=dble(ak(0))
ak1r=dble(ak(1))
ak(0)=dcmplx(0.d0,imag(ak(0)))
ak(1)=dcmplx(0.d0,imag(ak(1)))
nmmax=0
do n=nmult,0,-1
if(ak(n) .ne. (0.d0,0.d0))then
nmmax=n
exit
endif
enddo
nmmin=nmmax
do n=0,nmmax-1
if(ak(n) .ne. (0.d0,0.d0))then
nmmin=n
exit
endif
enddo
ndiv=1
do n=nmmin,nmmax
ndiv=max(ndiv,
$int(sqrt(ampmax**(n-1)/6.d0/fact(n-1)/eps*abs(ak(n)*al)))+1)
enddo
ndiv=min(ndivmax,ndiv)
psi1n=2.d0*psi1*ndiv
psi2n=2.d0*psi2*ndiv
aln=al/ndiv
if(fb1 .ne. 0.d0 .and. (mfring .eq. 1 .or. mfring .eq. 3))then
aln=aln-(phi*fb1)**2/al/48.d0
1 *sin(.5d0*(phi*(1.d0-psi1-psi2)-apsi1-apsi2))
$ /sin(.5d0*phi)/ndiv
endif
if(fb2 .ne. 0.d0 .and. (mfring .eq. 2 .or. mfring .eq. 3))then
aln=aln-(phi*fb2)**2/al/48.d0
1 *sin(.5d0*(phi*(1.d0-psi1-psi2)-apsi1-apsi2))
$ /sin(.5d0*phi)/ndiv
endif
phib=phi+ak0r
phin=phi/ndiv
phibn=phib/ndiv
ak1n=ak1r/ndiv
rho0=aln/phin
do m=nmmin,nmmax
akn(m)=ak(m)/(fact(m+1)*ndiv)
enddo
call tinitr(trans1)
do n=1,ndiv
if(n .eq. 1)then
mfr=0
if(mfring .eq. 2)then
mfr=0
elseif(mfring .ne. 0)then
mfr=-1
endif
call tbende(trans,cod,beam,srot,aln*.5d0,phibn*.5d0,phin*.5d0,
$ psi1n,0.d0,apsi1,0.d0,ak1n*.5d0,
$ 0.d0,0.d0,0.d0,0.d0,
1 fb1,fb2,mfr,fringe,eps0,enarad,.false.,.false.,l)
else
call tbende(trans,cod,beam,srot,aln,phibn,phin,
$ 0.d0,0.d0,0.d0,0.d0,ak1n,
$ 0.d0,0.d0,0.d0,0.d0,
1 0.d0,0.d0,0,.false.,eps0,enarad,.false.,.false.,l)
endif
cx1=dcmplx(cod(1),cod(3))
csl=(0.d0,0.d0)
csr=(0.d0,0.d0)
csxx=(0.d0,0.d0)
csxy=(0.d0,0.d0)
csyy=(0.d0,0.d0)
r=rho0+cod(1)
rk(0)=sqrt(r/rho0)
do k=1,nmult-nmmin
rk(k)=rk(k-1)/r
enddo
do m=nmult,0,-1
cl=(0.d0,0.d0)
cr=(0.d0,0.d0)
cxx=(0.d0,0.d0)
cxy=(0.d0,0.d0)
cyy=(0.d0,0.d0)
do k=max(0,m-nmmax),m-nmmin
m1=m-k
cg=gkn(m1,k)*rk(k)*akn(m1)
cl=cl+(m+1)*cg
cr=cr+(.5-k)*cg
cxx=cxx+(.25d0*(4*k**2-1)*cx1-(2*k-1)*(m+1)*r)*cg
if(m .eq. 0)then
cxy=cxy+(1-2*k)*cg
else
cxy=cxy+(m+1)*((1-2*k)*cx1+2*m*r)*cg
cyy=cyy+m*(m+1)*cg
endif
enddo
csl=csl*cx1+cl
csr=csr*cx1+cr
csxx=csxx*cx1+cxx
if(m .ne. 0)then
csxy=csxy*cx1+cxy
csyy=csyy*cx1+cyy
else
csxy=csxy+cxy
endif
if(enarad)then
call tradke(trans,cod,beam,srot,aln,0.d0,0.d0)
endif
enddo
c write(*,*)'tmultae ',dble(csr*cx1)/r,dble(csl),nmmin
cod(2)=cod(2)-(dble(csr*cx1)/r+dble(csl))
cod(4)=cod(4)+imag(csl)
trans1(2,1)=-(dble(csxx)/r**2+dble(csyy))
trans1(2,3)=imag(csxy)*.5d0/r
trans1(4,1)=trans1(2,3)
trans1(4,3)=dble(csyy)
call tmultr5(trans,trans1,irad)
call tmulbs(beam,trans1,.true.)
enddo
mfr=0
if(mfring .eq. 1)then
mfr=0
elseif(mfring .ne. 0)then
mfr=-2
endif
call tbende(trans,cod,beam,srot,aln*.5d0,phibn*.5d0,phin*.5d0,
$ 0.d0,psi2n,0.d0,apsi2,ak1n*.5d0,
$ 0.d0,0.d0,0.d0,0.d0,
1 fb1,fb2,mfr,fringe,eps0,enarad,.false.,.false.,l)
if(dtheta .ne. 0.d0)then
cod(2)=cod(2)+dphix
cod(4)=cod(4)+dphiy
endif
call tchge(trans,cod,beam,srot,
$ -dx,-dy,-theta,-dtheta,-phi,.false.)
return
end
|
{"hexsha": "d253a0a24a55310c4c33b4a44a9f173bf0e3c1bf", "size": 11849, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/tmulta.f", "max_stars_repo_name": "noboruatkek/SAD", "max_stars_repo_head_hexsha": "3881a190af8903537774c57b56963ffc94cfc039", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-04-01T15:54:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T16:47:20.000Z", "max_issues_repo_path": "src/tmulta.f", "max_issues_repo_name": "noboruatkek/SAD", "max_issues_repo_head_hexsha": "3881a190af8903537774c57b56963ffc94cfc039", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tmulta.f", "max_forks_repo_name": "noboruatkek/SAD", "max_forks_repo_head_hexsha": "3881a190af8903537774c57b56963ffc94cfc039", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-03-15T08:52:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T08:06:40.000Z", "avg_line_length": 28.6900726392, "max_line_length": 72, "alphanum_fraction": 0.4862013672, "num_tokens": 4972}
|
'''
Functions to select features based on various feature selection settings
Code by Carl Rietschel
https://github.com/carlr67/deephitplus
'''
import pandas as pd
import numpy as np
def apply_features(full_feat_list, feat_list, tr_data, va_data, te_data):
# Takes as input feat_list, an array of feature lists (length: num_Event + 1)
# First element = feature list for shared subnetwork
# Second, third, ... elements = feature lists for cause-specific subnetworks
# Get concatenated list of features
c_feat_list = [inner for outer in feat_list for inner in outer]
x_dim = [len(x) for x in feat_list]
new_tr_data = np.asarray(pd.DataFrame(tr_data, columns=full_feat_list)[c_feat_list])
new_va_data = np.asarray(pd.DataFrame(va_data, columns=full_feat_list)[c_feat_list])
new_te_data = np.asarray(pd.DataFrame(te_data, columns=full_feat_list)[c_feat_list])
DIM = (x_dim)
return DIM, new_tr_data, new_va_data, new_te_data
def get_feat_list(features, num_Event, eval_time, data, full_feat_list, times, labels, param_dict, cv_iter, path_to_immportances):
# Returns the feature list for the dataset depending on a chosen feature selection mode
feature_mode = features.split("_")[0]
if feature_mode in ["all", "sparse", "attentive"]:
feat_list = [full_feat_list for i in range(num_Event + 1)]
elif feature_mode == "filter":
method = features.split("_")[1]
# Import packages for the chosen filter method
if method == "anova":
print("Using ANOVA p-value (in ascending order) for feature selection")
from sklearn.feature_selection import f_classif
elif method == "svm":
print("Using SVM absolute coeffs (in descending order) for feature selection")
from sklearn import svm
elif method == "relieff":
print("Using ReliefF feature importances (in descending order) for feature selection")
from skrebate import ReliefF
output = pd.DataFrame(full_feat_list, columns=["Feature"])
output.set_index("Feature", inplace=True)
dfindex = ['Horizon', 'Event', 'Feature']
dfcols = ['Score']
result = pd.DataFrame(columns = dfindex + dfcols)
result.set_index(keys=dfindex, inplace=True)
event_feat_list = []
for event in range(1, num_Event + 1):
print(" Now computing: Event", event)
for ti in eval_time:
print(" time", ti)
data_for_calc = data
label_for_calc = ((times.flatten() < ti) & (labels.flatten() == event)) * 1
if method == "anova":
# Get the ANOVA p-value
F, pval = f_classif(data_for_calc, label_for_calc)
feature_score = pval
elif method == "svm":
clf = svm.SVC(kernel='linear')
clf.fit(data_for_calc, label_for_calc)
feature_score = np.absolute(clf.coef_).flatten()
elif method == "relieff":
fs = ReliefF(discrete_threshold=10, n_features_to_select=100, n_jobs=-1, n_neighbors=100, verbose=True)
fs.fit(data_for_calc, label_for_calc)
feature_score = fs.feature_importances_
tmp_result = pd.DataFrame({
'Horizon': ti,
'Event': event,
'Feature': full_feat_list,
'Score': feature_score})
tmp_result.set_index(keys=dfindex, inplace=True)
result = result.append(tmp_result)
output["Event " + str(event)] = result.groupby(["Event", "Feature"]).mean().loc[event]
eventdf = output["Event " + str(event)]
top = param_dict['top'][event-1]
if method in ["relieff", "svm"]:
ascending = False
elif method in["anova"]:
ascending = True
eventdf = eventdf.sort_values(ascending=ascending)
print("Using top {} features from:".format(top))
print(eventdf)
print()
filteredeventdf = eventdf.iloc[0:top].copy()
event_feat_list.append(filteredeventdf.index.values)
shared_feat_list = list(set.intersection(*[set(x) for x in event_feat_list]))
feat_list = event_feat_list.copy()
feat_list.insert(0, shared_feat_list)
elif feature_mode == "hybrid":
metric = features.split("_")[1]
cutofftype = features.split("_")[2]
# Load feature importance and get the most important features
result = pd.read_csv(path_to_immportances + '/result_all_VAL-IMPORTANCES_cv' + str(cv_iter) + '.csv')
# For using the p-value that a feature importance > 0
if metric == 'p':
avgdf0 = result.groupby(["Event", "Feature", "Permutation"]).mean()
dim1 = len(avgdf0.index.get_level_values(0).unique())
dim2 = len(avgdf0.index.get_level_values(1).unique())
a = avgdf0.values.reshape((dim1, dim2, -1))
from scipy.stats import ttest_1samp
r = ttest_1samp(a=a, popmean=0, axis=2)
analysisdf = avgdf0.groupby(["Event", "Feature"]).mean()
analysisdf["statistic"] = r[0].flatten()
analysisdf["Importance"] = r[1].flatten()/2 # Divide by 2 to get one-tailed test
avgdf = analysisdf.query("statistic>0") # Only take ones with positive importance
# print(avgdf)
sortascending = True
# For using 'raw' feature importances
elif metric == 'm':
avgdf = result.groupby(["Event", "Feature"]).mean()
sortascending = False
event_feat_list = []
for e in range(num_Event):
event = e + 1
eventdf = avgdf.loc[event]
print("Retrieving feature importances for event {}".format(event))
eventdf.index.names = ["Feature (Event " + str(event) + ")"]
eventdf.sort_values(by="Importance", ascending=sortascending, inplace=True)
if metric == 'm' and cutofftype == 'cut':
cutoff = param_dict['importancecutoff'][e]
filteredeventdf = eventdf.query("Importance >= " + str(cutoff))
print("Using features with raw importance (M) above {}:".format(cutoff))
elif metric == 'p' and cutofftype == 'cut':
cutoff = param_dict['importancecutoff'][e]
filteredeventdf = eventdf.query("Importance <= " + str(cutoff))
print("Using features with importance p-value (P) below {}:".format(cutoff))
elif cutofftype == 'top':
n_top = param_dict['top'][e]
filteredeventdf = eventdf.iloc[0:n_top,:].copy()
print("Using top {} features using metric({}) from:".format(n_top, metric))
print(eventdf)
print()
event_feat_list.append(filteredeventdf.index.values)
shared_feat_list = list(set.intersection(*[set(x) for x in event_feat_list]))
feat_list = event_feat_list.copy()
feat_list.insert(0, shared_feat_list)
return feat_list
|
{"hexsha": "26ed2b7e836781ed4ceb8b8b3671735af3eaf53a", "size": 7297, "ext": "py", "lang": "Python", "max_stars_repo_path": "get_features.py", "max_stars_repo_name": "carlr67/deephitplus", "max_stars_repo_head_hexsha": "0d527b235cfa471a4d97853c599f3ef46837e78a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-06-24T08:45:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T21:30:51.000Z", "max_issues_repo_path": "get_features.py", "max_issues_repo_name": "texchi2/deephitplus", "max_issues_repo_head_hexsha": "22da47d8919a3f5e9bf5190604d1b04e25bc0bd6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "get_features.py", "max_forks_repo_name": "texchi2/deephitplus", "max_forks_repo_head_hexsha": "22da47d8919a3f5e9bf5190604d1b04e25bc0bd6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-02T15:14:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T13:54:45.000Z", "avg_line_length": 39.2311827957, "max_line_length": 130, "alphanum_fraction": 0.5972317391, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1630}
|
from __future__ import print_function
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Activation, Embedding
from keras.layers import LSTM, Input
from keras.optimizers import RMSprop, Adam
from keras.utils.data_utils import get_file
from keras.layers.normalization import BatchNormalization
from keras.callbacks import Callback, ModelCheckpoint
from sklearn.decomposition import PCA
import numpy as np
import random
import sys
import csv
import os
import h5py
maxlen = 40 # must match length which generated model
num_char_generated = 30000
text = open('magic_cards.txt').read()
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds + 1e-6) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
print('Loading model...')
model = load_model('output/model.hdf5')
f2 = open('output/text_sample.txt', 'w')
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
f2.write('----- diversity:' + ' ' + str(diversity) + '\n')
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
f2.write('----- Generating with seed: "' + sentence + '"' + '\n---\n')
sys.stdout.write(generated)
for i in range(num_char_generated):
x = np.zeros((1, maxlen), dtype=np.int)
for t, char in enumerate(sentence):
x[0, t] = char_indices[char]
preds = model.predict(x, verbose=0)[0][0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
f2.write(generated + '\n')
print()
f2.close()
|
{"hexsha": "25c4383e36fe02b9902d840f8a40139be467d8c0", "size": 2213, "ext": "py", "lang": "Python", "max_stars_repo_path": "text_generator_keras_sample.py", "max_stars_repo_name": "tienthanhdhcn/char-embeddings", "max_stars_repo_head_hexsha": "860c92a0af3b13c525c33d7257ef0204aaf80e1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 218, "max_stars_repo_stars_event_min_datetime": "2017-04-04T05:22:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T14:01:29.000Z", "max_issues_repo_path": "text_generator_keras_sample.py", "max_issues_repo_name": "AbhayJain0/char-embeddings", "max_issues_repo_head_hexsha": "860c92a0af3b13c525c33d7257ef0204aaf80e1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-04-04T13:17:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-08T18:04:32.000Z", "max_forks_repo_path": "text_generator_keras_sample.py", "max_forks_repo_name": "AbhayJain0/char-embeddings", "max_forks_repo_head_hexsha": "860c92a0af3b13c525c33d7257ef0204aaf80e1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 170, "max_forks_repo_forks_event_min_datetime": "2017-04-04T05:22:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T13:58:53.000Z", "avg_line_length": 31.6142857143, "max_line_length": 74, "alphanum_fraction": 0.6769091731, "include": true, "reason": "import numpy", "num_tokens": 564}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.