text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import os
import pandas as pd
import numpy as np
from zipfile import ZipFile
import urllib.request
from tempfile import mktemp
# Data needs to be saved outside of project folder
base_path = os.environ['HOMEPATH']
data_folder='data'
# URL to download the sentiment140 dataset
data_url='http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip'
# Functions to download and process data
def change_base_dir(base_dir_path):
""" Change the working directopry of the code"""
if not os.path.exists(base_dir_path):
print ('creating directory', base_dir_path)
os.makedirs(base_dir_path)
print ('Changing base directory to ', base_dir_path)
os.chdir(base_dir_path)
def download_data(download_url, filename='downloaded_data.zip'):
""" Download and extract data """
downloaded_filename = os.path.join('.', filename)
print ('Step 1: Downloading data')
urllib.request.urlretrieve(download_url,downloaded_filename)
print ('Step 2: Extracting data')
zipfile=ZipFile(downloaded_filename)
zipfile.extractall('./')
zipfile.close()
def extract_tweets_and_labels(filename ):
""" Extract tweets and labels from the downloaded data"""
print ('Step 3: Reading the data as a dataframe')
df=pd.read_csv(filename, header=None, encoding='iso-8859-1')
df.columns=['Label','TweetId','Date','Query','User','Text']
print ('Read {} lines'.format(df.shape[0]))
print ('Discarding neutral tweets')
df=df[df.Label!=2]
print ('No of lines in the data after filtering neutral tweets: {}'.format(df.shape[0]))
print ('Step 4: Shuffling the data')
train_length=int(df.shape[0]*0.8)
df=df.sample(frac=1) # reshuffling the data
df['Text']=df['Text'].astype(str).apply(lambda x:x.strip())#.encode('ascii','ignore')#str.decode('utf8','ignore')#.str.encode('ascii','ignore')
print (df.head())
print ('Step 5: Dividing into test and train datasets')
df_train = df.iloc[:train_length, :]
df_test = df.iloc[train_length:, :]
print ('Step 6: Exporting the train and test datasets')
print ('Exporting training data of rows {}'.format(df_train.shape[0]))
export_prefix='training'
df_train[['Label']].to_csv(export_prefix+'_label.csv', header=False, index=False)
df_train[['Text']].to_csv(export_prefix+'_text.csv', header=False, index=False)
print ('Target distribution in the training data is as follows')
print ('\n',df_train['Label'].value_counts())
print ('Exporting training data of rows {}'.format(df_test.shape[0]))
export_prefix='testing'
df_test[['Label']].to_csv(export_prefix+'_label.csv', header=False, index=False)
df_test[['Text']].to_csv(export_prefix+'_text.csv', header=False, index=False)
print ('Target distribution in the testing data is as follows')
print ('\n',df_test['Label'].value_counts())
# Download and processing the data
base_dir_path=base_path+'\\'+data_folder
change_base_dir(base_dir_path)
download_data(data_url)
extract_tweets_and_labels('training.1600000.processed.noemoticon.csv')
|
{"hexsha": "34ec3dda99874170a0965ce06a04738a66f3d686", "size": 3192, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/01_data_acquisition_and_understanding/01_DataPreparation.py", "max_stars_repo_name": "maelcamerlynck/MachineLearningSamples-TwitterSentimentPrediction", "max_stars_repo_head_hexsha": "a03f251953d6928117e292ff5929864643d92b31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-12-17T21:30:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-21T14:31:31.000Z", "max_issues_repo_path": "code/01_data_acquisition_and_understanding/01_DataPreparation.py", "max_issues_repo_name": "maelcamerlynck/MachineLearningSamples-TwitterSentimentPrediction", "max_issues_repo_head_hexsha": "a03f251953d6928117e292ff5929864643d92b31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/01_data_acquisition_and_understanding/01_DataPreparation.py", "max_forks_repo_name": "maelcamerlynck/MachineLearningSamples-TwitterSentimentPrediction", "max_forks_repo_head_hexsha": "a03f251953d6928117e292ff5929864643d92b31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-09-21T00:18:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T09:00:19.000Z", "avg_line_length": 40.4050632911, "max_line_length": 148, "alphanum_fraction": 0.6845238095, "include": true, "reason": "import numpy", "num_tokens": 743}
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# __author__ = 'zd'
import re
import numpy as np
def clean_str(sentence):
"""
清洗数据
:param sentence:
:return:
"""
sentence = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", sentence)
sentence = re.sub(r"\'s", " \'s", sentence)
sentence = re.sub(r"\'ve", " \'ve", sentence)
sentence = re.sub(r"n\'t", " n\'t", sentence)
sentence = re.sub(r"\'re", " \'re", sentence)
sentence = re.sub(r"\'d", " \'d", sentence)
sentence = re.sub(r"\'ll", " \'ll", sentence)
sentence = re.sub(r",", " , ", sentence)
sentence = re.sub(r"!", " ! ", sentence)
sentence = re.sub(r"\(", " \( ", sentence)
sentence = re.sub(r"\)", " \) ", sentence)
sentence = re.sub(r"\?", " \? ", sentence)
sentence = re.sub(r"\s{2,}", " ", sentence)
return sentence.strip().lower()
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
生成分批次的数据
:param data:
:param batch_size:
:param num_epochs:
:param shuffle:
:return:
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
# shuffle the data
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index: end_index]
|
{"hexsha": "7e2b6783b819518e63717bdbc9e6eddf46b2ef06", "size": 1640, "ext": "py", "lang": "Python", "max_stars_repo_path": "\u6587\u672c\u5206\u7c7b/rnn_binary/data_utils.py", "max_stars_repo_name": "zhangdddong/beautifulNLP", "max_stars_repo_head_hexsha": "295987cc03c9afb52008917d9d141fdb2eb66ba5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-12-25T12:52:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-22T09:02:34.000Z", "max_issues_repo_path": "\u6587\u672c\u5206\u7c7b/rnn_binary/data_utils.py", "max_issues_repo_name": "zhangdddong/beautifulNLP", "max_issues_repo_head_hexsha": "295987cc03c9afb52008917d9d141fdb2eb66ba5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "\u6587\u672c\u5206\u7c7b/rnn_binary/data_utils.py", "max_forks_repo_name": "zhangdddong/beautifulNLP", "max_forks_repo_head_hexsha": "295987cc03c9afb52008917d9d141fdb2eb66ba5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-09-28T04:17:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T06:16:02.000Z", "avg_line_length": 30.9433962264, "max_line_length": 73, "alphanum_fraction": 0.5652439024, "include": true, "reason": "import numpy", "num_tokens": 457}
|
MODULE InitializationModule_Relativistic
USE KindModule, ONLY: &
DP, &
Zero, &
Half
USE ProgramHeaderModule, ONLY: &
ProgramName, &
nDOFX, &
nNodesX, &
iX_B0, &
iX_B1, &
iX_E0, &
iX_E1
USE MeshModule, ONLY: &
MeshX, &
NodeCoordinate
USE UtilitiesModule, ONLY: &
Locate, &
NodeNumberX, &
Interpolate1D_Linear
USE GravitySolutionModule_CFA_Poseidon, ONLY: &
ComputeConformalFactor_Poseidon, &
ComputeLapseAndShift_Poseidon
USE Poseidon_UtilitiesModule, ONLY: &
MultiplyByPsi6, &
DivideByPsi6, &
ComputeMatterSources_Poseidon, &
ComputePressureTensorTrace_Poseidon
USE GeometryFieldsModule, ONLY: &
uGF, &
iGF_Gm_dd_11, &
iGF_Gm_dd_22, &
iGF_Gm_dd_33, &
iGF_Alpha, &
iGF_Psi
USE FluidFieldsModule, ONLY: &
uPF, &
iPF_D, &
iPF_V1, &
iPF_V2, &
iPF_V3, &
iPF_E, &
iPF_Ne, &
uCF, &
iCF_D, &
iCF_S1, &
iCF_S2, &
iCF_S3, &
iCF_E, &
iCF_Ne, &
uAF, &
iAF_P, &
iAF_T, &
iAF_Ye, &
iAF_S, &
iAF_E, &
iAF_Me, &
iAF_Mp, &
iAF_Mn, &
iAF_Xp, &
iAF_Xn, &
iAF_Xa, &
iAF_Xh, &
iAF_Gm, &
uDF
USE Euler_SlopeLimiterModule_Relativistic_TABLE, ONLY: &
ApplySlopeLimiter_Euler_Relativistic_TABLE
USE Euler_PositivityLimiterModule_Relativistic_TABLE, ONLY: &
ApplyPositivityLimiter_Euler_Relativistic_TABLE
USE Euler_UtilitiesModule_Relativistic, ONLY: &
ComputeConserved_Euler_Relativistic, &
ComputeFromConserved_Euler_Relativistic
USE EquationOfStateModule, ONLY: &
ComputeThermodynamicStates_Primitive, &
ApplyEquationOfState
USE ProgenitorModule, ONLY: &
ProgenitorType1D, &
ReadProgenitor1D
IMPLICIT NONE
PRIVATE
PUBLIC :: InitializeFields_Relativistic
CONTAINS
SUBROUTINE InitializeFields_Relativistic &
( ProgenitorFileName_Option )
CHARACTER(LEN=*), INTENT(in), OPTIONAL :: ProgenitorFileName_Option
CHARACTER(LEN=32) :: ProgenitorFileName
ProgenitorFileName = '../Progenitors/WH07_15M_Sun.h5'
IF( PRESENT( ProgenitorFileName_Option ) ) &
ProgenitorFileName = TRIM( ProgenitorFileName_Option )
WRITE(*,*)
WRITE(*,'(A,A)') ' INFO: ', TRIM( ProgramName )
SELECT CASE ( TRIM( ProgramName ) )
CASE( 'GravitationalCollapse' )
CALL InitializeFields_GravitationalCollapse &
( TRIM( ProgenitorFileName ) )
CASE DEFAULT
WRITE(*,*)
WRITE(*,'(A21,A)') 'Invalid ProgramName: ', ProgramName
WRITE(*,'(A)') 'Stopping...'
STOP
END SELECT
END SUBROUTINE InitializeFields_Relativistic
SUBROUTINE InitializeFields_GravitationalCollapse &
( ProgenitorFileName )
CHARACTER(LEN=*), INTENT(in) :: ProgenitorFileName
INTEGER :: iX1, iX2, iX3
INTEGER :: iNodeX1, iNodeX2, iNodeX3, iNodeX
REAL(DP) :: X1
CHARACTER(LEN=32) :: ProgenitorFile
TYPE(ProgenitorType1D) :: P1D
REAL(DP) :: E (nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
REAL(DP) :: Si(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3),3)
REAL(DP) :: S (nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
REAL(DP) :: Mg(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
INTEGER :: ITER
REAL(DP) :: dAlpha, dPsi
LOGICAL :: CONVERGED
REAL(DP) :: dAl1(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
REAL(DP) :: dCF1(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
REAL(DP) :: dAl2(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
REAL(DP) :: dCF2(nDOFX,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3))
WRITE(*,*)
WRITE(*,'(6x,A,A)') &
'ProgenitorFileName: ', TRIM( ProgenitorFileName )
WRITE(*,*)
CALL ReadProgenitor1D( TRIM( ProgenitorFileName ), P1D )
! --- Initialize Fluid Fields ---
ASSOCIATE &
( R1D => P1D % Radius, &
D1D => P1D % MassDensity, &
V1D => P1D % RadialVelocity, &
T1D => P1D % Temperature, &
Y1D => P1D % ElectronFraction )
DO iX3 = iX_B0(3), iX_E0(3)
DO iX2 = iX_B0(2), iX_E0(2)
DO iX1 = iX_B0(1), iX_E1(1)
DO iNodeX3 = 1, nNodesX(3)
DO iNodeX2 = 1, nNodesX(2)
DO iNodeX1 = 1, nNodesX(1)
X1 = NodeCoordinate( MeshX(1), iX1, iNodeX1 )
iNodeX = NodeNumberX( iNodeX1, iNodeX2, iNodeX3 )
uPF(iNodeX,iX1,iX2,iX3,iPF_D) &
= Interpolate1D( R1D, D1D, SIZE( R1D ), X1 )
uPF(iNodeX,iX1,iX2,iX3,iPF_V1) &
= Interpolate1D( R1D, V1D, SIZE( R1D ), X1 )
uPF(iNodeX,iX1,iX2,iX3,iPF_V2) &
= Zero
uPF(iNodeX,iX1,iX2,iX3,iPF_V3) &
= Zero
uAF(iNodeX,iX1,iX2,iX3,iAF_T) &
= Interpolate1D( R1D, T1D, SIZE( R1D ), X1 )
uAF(iNodeX,iX1,iX2,iX3,iAF_Ye) &
= Interpolate1D( R1D, Y1D, SIZE( R1D ), X1 )
CALL ComputeThermodynamicStates_Primitive &
( uPF(iNodeX,iX1,iX2,iX3,iPF_D ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_T ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Ye), &
uPF(iNodeX,iX1,iX2,iX3,iPF_E ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_E ), &
uPF(iNodeX,iX1,iX2,iX3,iPF_Ne) )
CALL ApplyEquationOfState &
( uPF(iNodeX,iX1,iX2,iX3,iPF_D ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_T ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Ye), &
uAF(iNodeX,iX1,iX2,iX3,iAF_P ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_S ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_E ), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Me), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Mp), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Mn), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Xp), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Xn), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Xa), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Xh), &
uAF(iNodeX,iX1,iX2,iX3,iAF_Gm) )
CALL ComputeConserved_Euler_Relativistic &
( uPF(iNodeX,iX1,iX2,iX3,iPF_D ), &
uPF(iNodeX,iX1,iX2,iX3,iPF_V1), &
uPF(iNodeX,iX1,iX2,iX3,iPF_V2), &
uPF(iNodeX,iX1,iX2,iX3,iPF_V3), &
uPF(iNodeX,iX1,iX2,iX3,iPF_E ), &
uPF(iNodeX,iX1,iX2,iX3,iPF_Ne), &
uCF(iNodeX,iX1,iX2,iX3,iCF_D ), &
uCF(iNodeX,iX1,iX2,iX3,iCF_S1), &
uCF(iNodeX,iX1,iX2,iX3,iCF_S2), &
uCF(iNodeX,iX1,iX2,iX3,iCF_S3), &
uCF(iNodeX,iX1,iX2,iX3,iCF_E ), &
uCF(iNodeX,iX1,iX2,iX3,iCF_Ne), &
uGF(iNodeX,iX1,iX2,iX3,iGF_Gm_dd_11), &
uGF(iNodeX,iX1,iX2,iX3,iGF_Gm_dd_22), &
uGF(iNodeX,iX1,iX2,iX3,iGF_Gm_dd_33), &
uAF(iNodeX,iX1,iX2,iX3,iAF_P ) )
END DO
END DO
END DO
END DO
END DO
END DO
END ASSOCIATE ! R1D, etc
! --- Iterate to incorporate gravity in initial conditions ---
CONVERGED = .FALSE.
ITER = 0
DO WHILE( .NOT. CONVERGED )
ITER = ITER + 1
dAl1 = uGF(:,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3),iGF_Alpha)
dCF1 = uGF(:,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3),iGF_Psi )
CALL MultiplyByPsi6( iX_B1, iX_E1, uGF, uCF )
CALL ComputeMatterSources_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF, E, Si, Mg )
CALL ComputeConformalFactor_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, E, Si, Mg, uGF )
CALL ComputePressureTensorTrace_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF, S )
CALL ComputeLapseAndShift_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, E, S, Si, uGF )
dAl2 = uGF(:,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3),iGF_Alpha)
dCF2 = uGF(:,iX_B0(1):iX_E0(1), &
iX_B0(2):iX_E0(2), &
iX_B0(3):iX_E0(3),iGF_Psi )
dAlpha = MINVAL( ABS( dAl2 - dAl1 ) / ( Half * ( dAl1 + dAl2 ) ) )
dPsi = MINVAL( ABS( dCF2 - dCF1 ) / ( Half * ( dCF1 + dCF2 ) ) )
DO iX3 = iX_B0(3), iX_E0(3)
DO iX2 = iX_B0(2), iX_E0(2)
DO iX1 = iX_B0(1), iX_E1(1)
CALL ComputeConserved_Euler_Relativistic &
( uPF(:,iX1,iX2,iX3,iPF_D ), uPF(:,iX1,iX2,iX3,iPF_V1), &
uPF(:,iX1,iX2,iX3,iPF_V2), uPF(:,iX1,iX2,iX3,iPF_V3), &
uPF(:,iX1,iX2,iX3,iPF_E ), uPF(:,iX1,iX2,iX3,iPF_Ne), &
uCF(:,iX1,iX2,iX3,iCF_D ), uCF(:,iX1,iX2,iX3,iCF_S1), &
uCF(:,iX1,iX2,iX3,iCF_S2), uCF(:,iX1,iX2,iX3,iCF_S3), &
uCF(:,iX1,iX2,iX3,iCF_E ), uCF(:,iX1,iX2,iX3,iCF_Ne), &
uGF(:,iX1,iX2,iX3,iGF_Gm_dd_11), &
uGF(:,iX1,iX2,iX3,iGF_Gm_dd_22), &
uGF(:,iX1,iX2,iX3,iGF_Gm_dd_33), &
uAF(:,iX1,iX2,iX3,iAF_P) )
END DO
END DO
END DO
IF( MAX( dAlpha, dPsi ) .LT. 1.0e-13_DP ) CONVERGED = .TRUE.
IF( ITER .EQ. 10 )THEN
WRITE(*,*) 'Could not initialize fields. Exiting...'
STOP
END IF
END DO
CALL ApplySlopeLimiter_Euler_Relativistic_TABLE &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF, uDF )
CALL ApplyPositivityLimiter_Euler_Relativistic_TABLE &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF )
CALL MultiplyByPsi6( iX_B1, iX_E1, uGF, uCF )
CALL ComputeMatterSources_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF, E, Si, Mg )
CALL ComputeConformalFactor_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, E, Si, Mg, uGF )
CALL ComputePressureTensorTrace_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, uGF, uCF, S )
CALL ComputeLapseAndShift_Poseidon &
( iX_B0, iX_E0, iX_B1, iX_E1, E, S, Si, uGF )
CALL DivideByPsi6( iX_B1, iX_E1, uGF, uCF )
END SUBROUTINE InitializeFields_GravitationalCollapse
REAL(DP) FUNCTION Interpolate1D( x, y, n, xq )
INTEGER, INTENT(in) :: n
REAL(DP), DIMENSION(n), INTENT(in) :: x, y
REAL(DP), INTENT(in) :: xq
INTEGER :: i
i = Locate( xq, x, n )
IF( i == 0 )THEN
! --- Extrapolate Left ---
Interpolate1D &
= Interpolate1D_Linear( xq, x(1), x(2), y(1), y(2) )
ELSE IF( i == n )THEN
! --- Extrapolate Right ---
Interpolate1D &
= Interpolate1D_Linear( xq, x(n-1), x(n), y(n-1), y(n) )
ELSE
Interpolate1D &
= Interpolate1D_Linear( xq, x(i), x(i+1), y(i), y(i+1) )
END IF
RETURN
END FUNCTION Interpolate1D
END MODULE InitializationModule_Relativistic
|
{"hexsha": "b111f2aaa70fde2489d1bc0478860fb842709783", "size": 11422, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "SandBox/GravitationalCollapse_CFA/InitializationModule_Relativistic.f90", "max_stars_repo_name": "endeve/thornado", "max_stars_repo_head_hexsha": "b156bb416ca438771955449ad198034b5ebfe39f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-12-08T16:16:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T19:31:21.000Z", "max_issues_repo_path": "SandBox/GravitationalCollapse_CFA/InitializationModule_Relativistic.f90", "max_issues_repo_name": "endeve/thornado", "max_issues_repo_head_hexsha": "b156bb416ca438771955449ad198034b5ebfe39f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-07-10T20:13:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T13:21:00.000Z", "max_forks_repo_path": "SandBox/GravitationalCollapse_CFA/InitializationModule_Relativistic.f90", "max_forks_repo_name": "endeve/thornado", "max_forks_repo_head_hexsha": "b156bb416ca438771955449ad198034b5ebfe39f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-11-14T01:13:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T02:08:20.000Z", "avg_line_length": 28.9164556962, "max_line_length": 72, "alphanum_fraction": 0.5386097006, "num_tokens": 4334}
|
#!/usr/bin/python3
import numpy
import time
import scipy.optimize
from matplotlib import pylab
from frc971.control_loops.python import controls
dt = 0.05
def RungeKutta(f, x, dt):
"""4th order RungeKutta integration of F starting at X."""
a = f(x)
b = f(x + dt / 2.0 * a)
c = f(x + dt / 2.0 * b)
d = f(x + dt * c)
return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0
def dynamics(X, U):
"""Calculates the dynamics for a double jointed arm.
Args:
X, numpy.matrix(4, 1), The state. [theta1, omega1, theta2, omega2]
U, numpy.matrix(2, 1), The input. [torque1, torque2]
Returns:
numpy.matrix(4, 1), The derivative of the dynamics.
"""
return numpy.matrix([[X[1, 0]],
[U[0, 0]],
[X[3, 0]],
[U[1, 0]]])
def discrete_dynamics(X, U):
return RungeKutta(lambda startingX: dynamics(startingX, U), X, dt)
def U_from_array(U_array):
"""Converts the U array from the optimizer to a bunch of column vectors.
Args:
U_array, numpy.array[N] The U coordinates in v, av, v, av, ...
Returns:
numpy.matrix[2, N/2] with [[v, v, v, ...], [av, av, av, ...]]
"""
return numpy.matrix(U_array).reshape((2, -1), order='F')
def U_to_array(U_matrix):
"""Converts the U matrix to the U array for the optimizer.
Args:
U_matrix, numpy.matrix[2, N/2] with [[v, v, v, ...], [av, av, av, ...]]
Returns:
numpy.array[N] The U coordinates in v, av, v, av, ...
"""
return numpy.array(U_matrix.reshape((1, -1), order='F'))
def numerical_jacobian_x(fn, X, U, epsilon=1e-4):
"""Numerically estimates the jacobian around X, U in X.
Args:
fn: A function of X, U.
X: numpy.matrix(num_states, 1), The state vector to take the jacobian
around.
U: numpy.matrix(num_inputs, 1), The input vector to take the jacobian
around.
Returns:
numpy.matrix(num_states, num_states), The jacobian of fn with X as the
variable.
"""
num_states = X.shape[0]
nominal = fn(X, U)
answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states)))
# It's more expensive, but +- epsilon will be more reliable
for i in range(0, num_states):
dX_plus = X.copy()
dX_plus[i] += epsilon
dX_minus = X.copy()
dX_minus[i] -= epsilon
answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0
return answer
def numerical_jacobian_u(fn, X, U, epsilon=1e-4):
"""Numerically estimates the jacobian around X, U in U.
Args:
fn: A function of X, U.
X: numpy.matrix(num_states, 1), The state vector to take the jacobian
around.
U: numpy.matrix(num_inputs, 1), The input vector to take the jacobian
around.
Returns:
numpy.matrix(num_states, num_inputs), The jacobian of fn with U as the
variable.
"""
num_inputs = U.shape[0]
nominal = fn(X, U)
answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs)))
for i in range(0, num_inputs):
dU_plus = U.copy()
dU_plus[i] += epsilon
dU_minus = U.copy()
dU_minus[i] -= epsilon
answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0
return answer
class Cost(object):
def __init__(self):
q_pos = 0.5
q_vel = 1.65
self.Q = numpy.matrix(numpy.diag([
1.0 / (q_pos ** 2.0), 1.0 / (q_vel ** 2.0),
1.0 / (q_pos ** 2.0), 1.0 / (q_vel ** 2.0)]))
self.R = numpy.matrix(numpy.diag([1.0 / (12.0 ** 2.0),
1.0 / (12.0 ** 2.0)]))
final_A = numerical_jacobian_x(discrete_dynamics,
numpy.matrix(numpy.zeros((4, 1))),
numpy.matrix(numpy.zeros((2, 1))))
final_B = numerical_jacobian_u(discrete_dynamics,
numpy.matrix(numpy.zeros((4, 1))),
numpy.matrix(numpy.zeros((2, 1))))
#print 'Final A', final_A
#print 'Final B', final_B
K, self.S = controls.dlqr(
final_A, final_B, self.Q, self.R, optimal_cost_function=True)
print K
print self.S
def cost(self, U_array, X):
"""Computes the cost given the inital position and the U array.
Args:
U_array: numpy.array[N] The U coordinates.
X: numpy.matrix[3, 1] The cartesian coordinates of the starting
location.
Returns:
double, The quadratic cost of evaluating U.
"""
X_mod = X.copy()
U_matrix = U_from_array(U_array)
my_cost = 0
for U in U_matrix.T:
# Handle a keep out zone.
penalized_cost = 0.0
if X_mod[0, 0] > 0.5 and X_mod[2, 0] < 0.8:
out_of_bound1 = 0.8 - X_mod[2, 0]
penalized_cost += 1000.0 * (out_of_bound1 ** 2.0 + 0.1 * out_of_bound1)
out_of_bound2 = X_mod[0, 0] - 0.5
penalized_cost += 1000.0 * (out_of_bound2 ** 2.0 + 0.1 * out_of_bound2)
U = U.T
my_cost += U.T * self.R * U + X_mod.T * self.Q * X_mod + penalized_cost
X_mod = discrete_dynamics(X_mod, U)
return my_cost + 0.5 * X_mod.T * self.S * X_mod
# TODO(austin): Add Parker's constraints in.
# TODO(austin): Real dynamics from dad.
# TODO(austin): Look at grid resolution needed. Grid a section in open space
# and look at curvature of the grid. Try motions using the grid.
#
# https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.interpolate.RegularGridInterpolator.html
# Look at a C++ version so we can build a large space.
# TODO(austin): Larger timesteps further out?
if __name__ == '__main__':
X = numpy.matrix([[1.0],
[0.0],
[1.0],
[0.0]])
theta1_array = []
omega1_array = []
theta2_array = []
omega2_array = []
cost_array = []
time_array = []
u0_array = []
u1_array = []
num_steps = 40
cost_obj = Cost()
U_array = numpy.zeros((num_steps * 2))
for i in range(400):
print('Iteration', i)
start_time = time.time()
# Solve the NMPC
U_array, fx, _, _, _ = scipy.optimize.fmin_slsqp(
cost_obj.cost, U_array.copy(), bounds = [(-12, 12), (-12, 12)] * num_steps,
args=(X,), iter=500, full_output=True)
U_matrix = U_from_array(U_array)
# Save variables for plotting.
cost_array.append(fx[0, 0])
u0_array.append(U_matrix[0, 0])
u1_array.append(U_matrix[1, 0])
theta1_array.append(X[0, 0])
omega1_array.append(X[1, 0])
theta2_array.append(X[2, 0])
omega2_array.append(X[3, 0])
time_array.append(i * dt)
# Simulate the dynamics
X = discrete_dynamics(X, U_matrix[:, 0])
U_array = U_to_array(numpy.hstack((U_matrix[:, 1:], numpy.matrix([[0], [0]]))))
print 'Took %f to evaluate' % (time.time() - start_time)
if fx < 1e-05:
print('Cost is', fx, 'after', i, 'cycles, finishing early')
break
# Plot
pylab.figure('trajectory')
pylab.plot(theta1_array, theta2_array, label = 'trajectory')
fig, ax1 = fig, ax1 = pylab.subplots()
fig.suptitle('time')
ax1.plot(time_array, theta1_array, label='theta1')
ax1.plot(time_array, omega1_array, label='omega1')
ax1.plot(time_array, theta2_array, label = 'theta2')
ax1.plot(time_array, omega2_array, label='omega2')
ax2 = ax1.twinx()
ax2.plot(time_array, cost_array, 'k', label='cost')
ax1.legend(loc=2)
ax2.legend()
pylab.figure('u')
pylab.plot(time_array, u0_array, label='u0')
pylab.plot(time_array, u1_array, label='u1')
pylab.legend()
pylab.show()
|
{"hexsha": "fcb3129d5bc641c2750252a7afb97ebee361aa94", "size": 7414, "ext": "py", "lang": "Python", "max_stars_repo_path": "y2018/control_loops/python/arm_mpc.py", "max_stars_repo_name": "Ewpratten/frc_971_mirror", "max_stars_repo_head_hexsha": "3a8a0c4359f284d29547962c2b4c43d290d8065c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "y2018/control_loops/python/arm_mpc.py", "max_issues_repo_name": "Ewpratten/frc_971_mirror", "max_issues_repo_head_hexsha": "3a8a0c4359f284d29547962c2b4c43d290d8065c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "y2018/control_loops/python/arm_mpc.py", "max_forks_repo_name": "Ewpratten/frc_971_mirror", "max_forks_repo_head_hexsha": "3a8a0c4359f284d29547962c2b4c43d290d8065c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.188976378, "max_line_length": 110, "alphanum_fraction": 0.6029134071, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2282}
|
module FineShiftTests
using Test, FineShift, InterpolationKernels
function randu(T::Type{<:AbstractFloat}; max::Real=1, min::Real=0)
_min = T(min)
_max = T(max)
return (_max - _min)*rand(T) + _min
end
function randu(T::Type{<:AbstractFloat}, dims::Dims; max::Real=1, min::Real=0)
A = rand(T, dims)
if max != 1 || min != 0
a = T(max) - T(min)
b = T(min)
@inbounds @simd for i in eachindex(A)
A[i] = a*A[i] + b
end
end
return A
end
function FineShift.fineshift(dims::NTuple{N,Int},
src::AbstractArray{T,N},
ker::Kernel{T},
off::NTuple{N,Real},
adj::Bool = false) where {T<:AbstractFloat,N}
return _fineshift(dims, src, ker, off, adj, Val(N))
end
function _fineshift(dims::NTuple{N,Int},
src::AbstractArray{T,N},
ker::Kernel{T},
off::NTuple{N,Real},
adj::Bool,
::Val{1}) where {T<:AbstractFloat,N}
return fineshift(dims[1], src, ker, off[1], 1, adj)
end
function _fineshift(dims::NTuple{N,Int},
src::AbstractArray{T,N},
ker::Kernel{T},
off::NTuple{N,Real},
adj::Bool,
::Val{D}) where {T<:AbstractFloat,N,D}
return fineshift(dims[D], _fineshift(dims, src, ker, off, adj, Val(D-1)),
ker, off[D], D, adj)
end
function vdot(x::AbstractArray{Tx,N},
y::AbstractArray{Ty,N}) where {Tx<:Real,Ty<:Real,N}
T = float(promote_type(Tx,Ty))
local s::T = 0
@assert axes(x) == axes(y)
@inbounds for i in eachindex(x, y)
s += T(x[i])*T(y[i])
end
return s
end
@testset "Fast correlation code" begin
function splitindices(m::Int, n::Int, S::Int, l::Int)
# This mimics the splitting of the 1D correlation code in 4 different
# intervals.
p = S - l
i1 = 1 - p
i2 = n - p
i3 = l - 1 + n
I1 = 1:min(i1,m)
I2 = max(i1+1,1):min(i2,m)
I3 = max(i2+1,1):min(i3-1,m)
I4 = max(i3,1):m
J = zeros(Int, S)
if length(I2) > 0
j = first(I2) - l
for k in 1:S-1
J[k+1] = clamp(j+k,1,n)
end
elseif length(I3) > 0
j = first(I3) - l
for k in 1:S-1
J[k+1] = clamp(j+k,1,n)
end
end
for i in I1
checksplitindices(m, n, S, l, i, 1)
end
for i in I2
J[1:S-1] = J[2:S]
J[S] = p + i
checksplitindices(m, n, S, l, i, J)
end
for i in I3
J[1:S-1] = J[2:S]
checksplitindices(m, n, S, l, i, J)
end
for i in I4
checksplitindices(m, n, S, l, i, n)
end
return true
end
function checksplitindices(m::Int, n::Int, S::Int, l::Int,
i::Int, Jp::Vector{Int})
for k = 1:S
j = clamp(i-l+k,1,n)
jp = Jp[k]
jp == j || error("(i,k)=($i,$k) j=$j, not $jp")
end
end
function checksplitindices(m::Int, n::Int, S::Int, l::Int,
i::Int, jp::Int)
for k = 1:S
j = clamp(i-l+k,1,n)
jp == j || error("(i,k)=($i,$k) j=$j, not $jp")
end
end
# For offset t and kernel support size S, l = floor(S+1+t), hence to
# explore all cases we want t in -(S+1):(S+1), that is l in 0:2(S+1)
# with S = 4 (as assumed in the tests) we consider l in -1:10
@testset "(l,m,n) = ($l,$m,$n)" for m in (3,6), n in (2,4,8), l in -1:10
@test splitindices(m, n, 4, l)
end
end
@testset "Fine shift with zero offset" begin
@testset "Dimensions: $dims" for dims in ((100,), (20,30), (10,20,30))
T = Float64
N = length(dims)
ker = CatmullRomSpline{T}()
src = randu(T, dims; min=-1, max=1)
off = ntuple(i -> zero(T), N)
dst = fineshift(dims, src, ker, off)
@test dst == src
end
end
@testset "Direct/adjoint fine shift" begin
@testset "Dimensions: $dims" for dims in ((100,), (20,30), (10,20,30))
T = Float64
N = length(dims)
ker = CatmullRomSpline{T}()
xdims = dims
ydims = ntuple(d -> (isodd(d) ? dims[d] - 1 : dims[d] + 2), N)
x = randu(T, xdims; min=-1, max=1)
y = randu(T, ydims; min=-1, max=1)
off = ntuple(i -> randu(T; min=-3, max=3), N)
@test (vdot(y, fineshift(ydims, x, ker, off, false)) ≈
vdot(x, fineshift(xdims, y, ker, off, true )))
end
end
@testset "Fine shift of a smooth function" begin
function f(x::T) where {T<:AbstractFloat}
q = T(0.03)
r = T(1.2)
return exp(-q*x*x)*cos(x - r)
end
s = 5.3 # offset in number of samples
t = -40:0.05:40
y1 = f.(t)
z1 = fineshift(length(t), y1, CatmullRomSpline(), s)
z2 = f.(t .- s*step(t))
@test maximum(abs.(z1 .- z2)) < 1e-5
end
end # module
|
{"hexsha": "fa338c50475a9b26821b37bd65f0016aa0193d79", "size": 5162, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "emmt/FineShift.jl", "max_stars_repo_head_hexsha": "efff6fdb4be91a8510b67f04be6fee0e4b11f141", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-12T05:31:10.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-28T22:23:58.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "emmt/FineShift.jl", "max_issues_repo_head_hexsha": "efff6fdb4be91a8510b67f04be6fee0e4b11f141", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "emmt/FineShift.jl", "max_forks_repo_head_hexsha": "efff6fdb4be91a8510b67f04be6fee0e4b11f141", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3647058824, "max_line_length": 78, "alphanum_fraction": 0.4767531964, "num_tokens": 1710}
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer.testing import attr
from deepmark_chainer.net import inception_v3
class TestInceptionV3(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 299, 299)).astype(numpy.float32)
self.l = inception_v3.InceptionV3()
def check_forward(self, xp):
x = chainer.Variable(xp.asarray(self.x))
self.l(x)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.cupy)
|
{"hexsha": "a8e6cb31c68f956bf817605572219171241af7e5", "size": 623, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/net_tests/test_inception_v3.py", "max_stars_repo_name": "delta2323/chainer-deepmark", "max_stars_repo_head_hexsha": "8147f5169cab06ad8c66a599663f4f0671e5180b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-06-12T12:41:58.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-26T17:00:05.000Z", "max_issues_repo_path": "tests/net_tests/test_inception_v3.py", "max_issues_repo_name": "delta2323/chainer-deepmark", "max_issues_repo_head_hexsha": "8147f5169cab06ad8c66a599663f4f0671e5180b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2016-06-10T00:30:56.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-27T12:16:32.000Z", "max_forks_repo_path": "tests/net_tests/test_inception_v3.py", "max_forks_repo_name": "delta2323/chainer-deepmark", "max_forks_repo_head_hexsha": "8147f5169cab06ad8c66a599663f4f0671e5180b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-07-04T04:01:39.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-31T18:26:26.000Z", "avg_line_length": 21.4827586207, "max_line_length": 84, "alphanum_fraction": 0.6805778491, "include": true, "reason": "import numpy", "num_tokens": 154}
|
#!/usr/bin/env python3
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torchvision.models as models
import os
import sys
import math
import numpy as np
data = dset.CIFAR10(root='cifar', train=True, download=True,
transform=transforms.ToTensor()).train_data
data = data.astype(np.float32)/255.
means = []
stdevs = []
for i in range(3):
pixels = data[:,i,:,:].ravel()
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
print("means: {}".format(means))
print("stdevs: {}".format(stdevs))
print('transforms.Normalize(mean = {}, std = {})'.format(means, stdevs))
|
{"hexsha": "68d68463abf92a629d71f1fd4b280146ae652a44", "size": 835, "ext": "py", "lang": "Python", "max_stars_repo_path": "compute-cifar10-mean.py", "max_stars_repo_name": "zwh930712/densenet.pytorch", "max_stars_repo_head_hexsha": "d1cd5e1957975628286e516512c6d1c14430f810", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 826, "max_stars_repo_stars_event_min_datetime": "2017-02-09T16:36:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T04:52:10.000Z", "max_issues_repo_path": "compute-cifar10-mean.py", "max_issues_repo_name": "zwh930712/densenet.pytorch", "max_issues_repo_head_hexsha": "d1cd5e1957975628286e516512c6d1c14430f810", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2017-02-09T23:52:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-09T21:57:56.000Z", "max_forks_repo_path": "compute-cifar10-mean.py", "max_forks_repo_name": "zwh930712/densenet.pytorch", "max_forks_repo_head_hexsha": "d1cd5e1957975628286e516512c6d1c14430f810", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 206, "max_forks_repo_forks_event_min_datetime": "2017-02-10T12:10:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T03:16:16.000Z", "avg_line_length": 21.9736842105, "max_line_length": 72, "alphanum_fraction": 0.7185628743, "include": true, "reason": "import numpy", "num_tokens": 202}
|
import cv2
import numpy as np
import database
from imutils.video import FPS
import argparse
import imutils
import Gui
import sys
class Detector:
def recognize(self):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
if sys.platform == 'win32':
from imutils.video import WebcamVideoStream
cap = WebcamVideoStream(src=0).start()
else:
from imutils.video.pivideostream import PiVideoStream
cap = PiVideoStream().start()
font = cv2.FONT_HERSHEY_COMPLEX
d = database.Database().getAll()
ls = {}
for doc in d:
ls[doc['id']] = doc['name']
name = 'unknown'
while True:
im = cap.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
Id, conf = recognizer.predict(gray[y:y + h, x:x + w])
if sys.platform == 'win32':
if (conf <= 70):
name = ls.get(Id)
print(name, Id, conf)
else:
name = "who?"
else:
if (conf >= 50):
name = ls.get(Id)
print(name, Id, conf)
else:
name = "who?"
cv2.putText(im, str(name), (x, y + h), font, 1, (255, 255, 255))
cv2.imshow('im', im)
if cv2.waitKey(10) == ord('q'):
break
cap.stop()
cv2.destroyAllWindows()
Gui.Gui()
# n = Detector().recognize()
|
{"hexsha": "3586662f53a2164747f349b43d6c1c717813dc17", "size": 1998, "ext": "py", "lang": "Python", "max_stars_repo_path": "detector.py", "max_stars_repo_name": "napsterstiffler/faceoff", "max_stars_repo_head_hexsha": "af0c92e3803e74bfd5922ac980457728427d2605", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detector.py", "max_issues_repo_name": "napsterstiffler/faceoff", "max_issues_repo_head_hexsha": "af0c92e3803e74bfd5922ac980457728427d2605", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detector.py", "max_forks_repo_name": "napsterstiffler/faceoff", "max_forks_repo_head_hexsha": "af0c92e3803e74bfd5922ac980457728427d2605", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.21875, "max_line_length": 81, "alphanum_fraction": 0.4764764765, "include": true, "reason": "import numpy", "num_tokens": 456}
|
# Date: Friday 21 July 2017
# Email: nrupatunga@whodat.com
# Name: Nrupatunga
# Description: Training the tracker
from ..helper import config
import argparse
import setproctitle
from ..logger.logger import setup_logger
from ..loader.loader_imagenet import loader_imagenet
from ..loader.loader_alov import loader_alov
from ..train.example_generator import example_generator
from ..network.regressor_train import regressor_train
from ..tracker.tracker_trainer import tracker_trainer
import os
import numpy as np
from .EntryGenerator import EntryGenerator
setproctitle.setproctitle('TRAIN_TRACKER_IMAGENET_ALOV')
logger = setup_logger(logfile=None)
logger.info('Caffe path = {}'.format(config.CAFFE_PATH))
ap = argparse.ArgumentParser()
ap.add_argument("-imagenet", "--imagenet", required=True, help="Path to ImageNet folder")
ap.add_argument("-alov", "--alov", required=True, help="Path to Alov folder")
ap.add_argument("-init_caffemodel", "--init_caffemodel", required=True, help="Path to caffe Init model")
ap.add_argument("-train_prototxt", "--train_prototxt", required=True, help="train prototxt")
ap.add_argument("-solver_prototxt", "--solver_prototxt", required=True, help="solver prototxt")
ap.add_argument("-lamda_shift", "--lamda_shift", required=True, help="lamda shift")
ap.add_argument("-lamda_scale", "--lamda_scale", required=True, help="lamda scale ")
ap.add_argument("-min_scale", "--min_scale", required=True, help="min scale")
ap.add_argument("-max_scale", "--max_scale", required=True, help="max scale")
ap.add_argument("-gpu_id", "--gpu_id", required=True, help="gpu id")
RANDOM_SEED = 800
GPU_ONLY = True
kNumBatches = 500000
# TODO: create new class from https://towardsdatascience.com/how-to-quickly-build-a-tensorflow-training-pipeline-15e9ae4d78a0
class Dataset(object):
def __init__(self, generator=EntryGenerator())):
self.next_element = self.build_iterator(generator)
def build_iterator(self, entry_gen: EntryGenerator()):
batch_size = 10
prefetch_batch_buffer
dataset = tf.data.Dataset.from_generator(entry_gen.get_next_entry, \
output_types={EntryGenerator.image: tf.Tensor, EntryGenerator.target: tf.Tensor, EntryGenerator.bbox_x1: tf.int64, EntryGenerator.bbox_y1: tf.int64, EntryGenerator.bbox_x2: tf.int64, EntryGenerator.bbox_y2: tf.int64})
# dataset = dataset.map() - don't even need this
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch_batch_buffer)
def train_image(image_loader, images, tracker_trainer):
"""TODO: Docstring for train_image.
"""
curr_image = np.random.randint(0, len(images))
list_annotations = images[curr_image]
curr_ann = np.random.randint(0, len(list_annotations))
image, bbox = image_loader.load_annotation(curr_image, curr_ann)
tracker_trainer.train(image, image, bbox, bbox)
def train_video(videos, tracker_trainer):
"""TODO: Docstring for train_video.
"""
video_num = np.random.randint(0, len(videos))
video = videos[video_num]
annotations = video.annotations
if len(annotations) < 2:
logger.info('Error - video {} has only {} annotations', video.video_path, len(annotations))
ann_index = np.random.randint(0, len(annotations) - 1)
frame_num_prev, image_prev, bbox_prev = video.load_annotation(ann_index)
frame_num_curr, image_curr, bbox_curr = video.load_annotation(ann_index + 1)
tracker_trainer.train(image_prev, image_curr, bbox_prev, bbox_curr)
def main(args):
"""TODO: Docstring for main.
"""
logger.info('Loading training data')
# Load imagenet training images and annotations
# imagenet_folder = os.path.join(args['imagenet'], 'images')
#imagenet_annotations_folder = os.path.join(args['imagenet'], 'gt')
#objLoaderImgNet = loader_imagenet(imagenet_folder, imagenet_annotations_folder, logger)
#train_imagenet_images = objLoaderImgNet.loaderImageNetDet()
# Load alov training images and annotations
alov_folder = os.path.join(args['alov'], 'images')
alov_annotations_folder = os.path.join(args['alov'], 'gt')
objLoaderAlov = loader_alov(alov_folder, alov_annotations_folder, logger)
objLoaderAlov.loaderAlov()
train_alov_videos = objLoaderAlov.get_videos()
# create example generator and setup the network
objExampleGen = example_generator(float(args['lamda_shift']), float(args['lamda_scale']), float(args['min_scale']), float(args['max_scale']), logger)
objRegTrain = regressor_train(args['train_prototxt'], args['init_caffemodel'], int(args['gpu_id']), args['solver_prototxt'], logger)
objTrackTrainer = tracker_trainer(objExampleGen, objRegTrain, logger)
# NEW GOTURN PATH
tracknet = goturn_net.TRACKNET(BATCH_SIZE)
tracknet.build()
# wrap the three lists together into a single object
dataset = tf.data.Dataset.from_generator()
global_step = tf.Variable(0, trainable=False, name="global_step")
train_step = tf.train.AdamOptimizer(0.00001, 0.9).minimize( \
tracknet.loss_wdecay, global_step=global_step)
merged_summary = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter('./train_summary', sess.graph)
init = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
sess.run(init)
sess.run(init_local)
ckpt_dir = "./checkpoints"
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
start = 0
if ckpt and ckpt.model_checkpoint_path:
start = int(ckpt.model_checkpoint_path.split("-")[1])
logging.info("start by iteration: %d" % (start))
saver = tf.train.Saver()
saver.restore(sess, ckpt.model_checkpoint_path)
assign_op = global_step.assign(start)
sess.run(assign_op)
model_saver = tf.train.Saver(max_to_keep=3)
try:
for i in range(start, int(len(train_box) / BATCH_SIZE * NUM_EPOCHS)):
if i % int(len(train_box) / BATCH_SIZE) == 0:
logging.info("start epoch[%d]" % (int(i / len(train_box) * BATCH_SIZE)))
if i > start:
save_ckpt = "checkpoint.ckpt"
last_save_itr = i
model_saver.save(sess, "checkpoints/" + save_ckpt, global_step=i + 1)
print(global_step.eval(session=sess))
cur_batch = sess.run(batch_queue)
start_time = time.time()
[_, loss] = sess.run([train_step, tracknet.loss], feed_dict={tracknet.image: cur_batch[0],
tracknet.target: cur_batch[1],
tracknet.bbox: cur_batch[2]})
logging.debug(
'Train: time elapsed: %.3fs, average_loss: %f' % (time.time() - start_time, loss / BATCH_SIZE))
if i % 10 == 0 and i > start:
summary = sess.run(merged_summary, feed_dict={tracknet.image: cur_batch[0],
tracknet.target: cur_batch[1],
tracknet.bbox: cur_batch[2]})
train_writer.add_summary(summary, i)
except KeyboardInterrupt:
print("get keyboard interrupt")
if (i - start > 1000):
model_saver = tf.train.Saver()
save_ckpt = "checkpoint.ckpt"
model_saver.save(sess, "checkpoints/" + save_ckpt, global_step=i + 1)
|
{"hexsha": "88749852fe04efe660b44040ec5737b6b4393c46", "size": 7602, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_pygo/train.py", "max_stars_repo_name": "theplaineric/GOTURN-Tensorflow", "max_stars_repo_head_hexsha": "ed82defb69c45a5ec43330d4cbf2c9f86fc5b9da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-02T04:00:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-02T04:00:15.000Z", "max_issues_repo_path": "train_pygo/train.py", "max_issues_repo_name": "theplaineric/GOTURN-Tensorflow", "max_issues_repo_head_hexsha": "ed82defb69c45a5ec43330d4cbf2c9f86fc5b9da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_pygo/train.py", "max_forks_repo_name": "theplaineric/GOTURN-Tensorflow", "max_forks_repo_head_hexsha": "ed82defb69c45a5ec43330d4cbf2c9f86fc5b9da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9421965318, "max_line_length": 266, "alphanum_fraction": 0.6754801368, "include": true, "reason": "import numpy", "num_tokens": 1798}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions for the construction of second-order pose data and regressors
"""
import numpy as np
import pandas as pd
import scipy.stats
from scipy.spatial.distance import pdist, squareform, cdist, euclidean
from sklearn.cluster import AgglomerativeClustering
from psypose import utils
import os
def cluster_ID(pose, metric='cosine', linkage='average', overwrite=False, use_cooccurence=True):
"""
Clusters all tracks based on facial encodings attached to each track.
It is recommended to use the cosine metric and average linkage for best results.
Outputs a dictionary where keys are cluster ID's and values are lists of tracks.
"""
if overwrite:
pose.clusters=None
if pose.is_clustered and not overwrite:
raise Exception('Pose object has already been clustered. Set overwrite=True to overwite previous clustering.')
face_data = pose.face_data
pose_data = pose.pose_data
pose.encoding_length = len([i for i in list(face_data.columns) if 'enc' in i])
fr_ids = []
# here, iterating through all rows of the face_rec data frame (all available frames)
# checking if each frame is listed within any of the VIBE tracks
# if overlap frames are detected, the algo will check if the face box is within
# the MEVA bounding box. If True, then that frame will get that track ID
for r in range(len(face_data)):
row = face_data.iloc[r]
frame = int(row['frame'])
track_id_list = []
for t in np.unique(list(pose_data.keys())):
track = pose_data.get(t)
track_frames = track.get('frame_ids')
if frame in track_frames:
track_id_list.append(t)
if len(track_id_list)!=0:
for track_id in track_id_list:
box_loc = np.where(pose_data.get(track_id).get('frame_ids')==frame)[0][0]
box = pose_data.get(track_id).get('bboxes')[box_loc]
if utils.check_match(box, utils.get_bbox(row)):
track_designation = int(track_id)
break
else:
# track designation is 'no_match' if the face is not within body box,
# those rows will be removed.
track_designation = 'no_match'
fr_ids.append(track_designation)
else:
fr_ids.append('no_match')
face_data['track_id'] = fr_ids
pose.face_data = face_data
#removing face encodings with no match
face_data = face_data[face_data['track_id']!='no_match']
# here, I'm going through each unique track and getting an average face encoding
enc_columns = [i for i in face_data.columns if 'enc' in i]
avg_encodings = []
enc_tracks = []
for loc, track in enumerate([int(i) for i in np.unique(face_data['track_id'])]):
tr_df = face_data[face_data['track_id']==track]
avg_encodings.append(np.mean(tr_df[enc_columns].to_numpy(), axis=0))
enc_tracks.append(track)
track_enc_avgs = np.array(avg_encodings)
#print(track_enc_avgs.shape)
track_encoding_avgs = dict(zip(enc_tracks, avg_encodings))
# here, we cluster all of the averaged track encodings.
# tracks containing the same face will be concatenated later
# Here, I'm going to compute the between-track distances using only co-occuring tracks
# I'll first create a co-occurence matrix
n_tracks = len(np.unique(face_data['track_id']))
# Getting a list of track ID's with faces in the,.
opt_tracks = np.unique(face_data['track_id']).tolist()
# Initialize empty co-occurence matrix
cooc = np.zeros((n_tracks, n_tracks))
for i, track1 in enumerate(opt_tracks):
frames_1 = pose_data.get(track1).get('frame_ids')
for j, track2 in enumerate(opt_tracks):
frames_2 = pose_data.get(track2).get('frame_ids')
if len(np.intersect1d(frames_1, frames_2))>0:
cooc[i][j]=True
cooc = cooc[np.tril_indices_from(cooc, k=-1)]
#get per-track average encodings with nested list comprehension (small python flex)
track_encoding_avgs = [np.mean(enc, axis=0) for enc in
[face_data[face_data['track_id']==track][enc_columns].to_numpy()
for track in opt_tracks]]
encoding_averages = dict(zip(opt_tracks, track_encoding_avgs))
pose.track_encoding_avgs = encoding_averages
avg_enc_array = np.empty((len(encoding_averages), pose.encoding_length))
for i, track in enumerate(list(encoding_averages.keys())):
avg_enc_array[i] = encoding_averages[track]
pose.average_encoding_array = avg_enc_array
encoding_dist_mat = squareform(pdist(track_encoding_avgs, metric=metric))
all_track_dist = encoding_dist_mat[np.tril_indices_from(encoding_dist_mat, k=-1)]
inter_track_distances = np.extract(cooc, all_track_dist)
# get the within-track distances
# for each track, a distance matrix of all encodings is made
# the average distances is taken
intra_track_encodings = [face_data[face_data['track_id']==tr][enc_columns] for tr in opt_tracks]
intra_track_distances = np.array([np.mean(pdist(encs, metric=metric)) for encs in intra_track_encodings])
intra_track_distances = intra_track_distances[~np.isnan(intra_track_distances)]
all_distances = np.concatenate((inter_track_distances, intra_track_distances))
all_distances = all_distances.reshape(-1,1)
#dist_cluster = AgglomerativeClustering(linkage='ward', distance_threshold=0, n_clusters=None).fit(all_distances)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2).fit(all_distances)
#clustered = kmeans.fit_transform(all_distances)
clus0 = all_distances[np.where(kmeans.labels_==0)]
clus1 = all_distances[np.where(kmeans.labels_==1)]
cut = float(max(clus0) + (min(clus1)-max(clus0))/2)
# This final clustering uses the cut as a distance threshold.
# Tracks in the same cluster will now be consolidated.
final_clust = AgglomerativeClustering(linkage=linkage, distance_threshold=cut,
n_clusters=None, affinity=metric).fit(track_enc_avgs)
pose.face_clustering = final_clust
clusters = []
cluster_tracks = []
enc_tracks = np.array(enc_tracks)
for clus in np.unique(final_clust.labels_):
clusters.append(clus)
tracks = enc_tracks[np.where(final_clust.labels_==clus)[0]]
cluster_tracks.append(tracks)
fc = dict(zip(clusters, cluster_tracks))
cluster_mains = [k for k in sorted(fc, key=lambda k: len(fc[k]), reverse=True)]
t = [fc.get(clust) for clust in cluster_mains]
sorted_dict = dict(zip([i for i in range(0, len(t))], t))
# add cluster ids to face_data
pose.clusters = sorted_dict
pose.is_clustered = True
def name_clusters(pose, character_dict, overwrite_names=False):
pose.character_key = character_dict
chars = list(np.unique(character_dict.keys())[0])
names_clustered = {}
for char in chars:
#common_clusters = [k for k, v in character_dict.items() if str(v) == char]
tracks = []
for cluster in character_dict[char]:
cluster_tracks = pose.clusters[cluster]
tracks.extend(cluster_tracks)
names_clustered.update({char:tracks})
names_sorted = sorted(names_clustered, key=lambda k: len(names_clustered[k]), reverse=True)
tracks_sorted = [names_clustered[name] for name in names_sorted]
names_clustered = dict(zip(names_sorted, tracks_sorted))
pose.named_clusters = names_clustered
def imaging_pars(pose, functional = 'a_func_file', TR=2.0):
#make a function that allows you to either manually input imaging parameters
#or provide a sample functional run for parameters (HRF, TR, etc)
#this info will be used to generate the different regressors
pose.TR = TR
def presence_matrix(pose, character_order, hertz=None):
# character order should be an iterable containing strings of character IDs
# that correspond to the labels given in name_clusters()
char_order = np.array(character_order)
# first make an empty array for character appearances
char_auto = np.zeros((pose.n_frames, len(char_order)))
# this is going to label a character presence array with ones and zeros
for i, tracklist in enumerate(list(pose.named_clusters.values())):
character = list(pose.named_clusters.keys())[i]
if character not in char_order:
continue
arr_placement = int(np.where(char_order==character)[0])
for track in tracklist:
track_frames = pose.pose_data.get(track).get('frame_ids')
char_auto[:,arr_placement][track_frames] = 1
char_frame = pd.DataFrame(char_auto, columns=character_order)
char_frame['frame_ids'] = np.arange(pose.n_frames)
pose.full_ID_annotations = char_frame
if hertz==None:
return char_frame
else:
needed_frames = np.arange(round((1/hertz)*pose.fps), pose.n_frames, round((1/hertz)*pose.fps))
auto_appearances_filt = char_frame.take(needed_frames[:-2], axis=0).reset_index(drop=True)
return auto_appearances_filt
def get_pose_distance(p1, p2):
# p1 and p2 should just be the vectors
# may be the normal static vectors or those derived with numpy.gradient()
p1, p2 = p1[3:], p2[3:]
return euclidean(p1, p2)
def synchrony(pose, type='static'):
track_occurence = {}
data = pose.pose_data
for frame in range(pose.framecount):
present_tracks = []
for key, val in data.items():
if frame in val['frame_ids']:
present_tracks.append(key)
track_occurence[frame] = present_tracks
pose.track_occurence = track_occurence
if type=='static':
max_distance = 26.096028503877093
elif type=='dynamic':
max_distance = 52.19205700775419
out_vec=[]
for frame in range(pose.framecount):
tracks = track_occurence[frame]
if len(tracks) < 2:
out_vec.append(np.nan)
else:
pose_vectors = []
for track in tracks:
track_data = data[track]
if type=='dynamic':
track_data['pose_gradient'] = np.gradient(track_data['pose'], axis=0)
pose_vectors.append(track_data['pose_gradient'][np.where(track_data['frame_ids']==frame)[0][0]])
elif type=='static':
pose_vectors.append(track_data['pose'][np.where(track_data['frame_ids']==frame)[0][0]])
sync_arr = np.empty((len(pose_vectors), len(pose_vectors)))
for i in range(len(pose_vectors)):
for j in range(len(pose_vectors)):
sync_arr[i][j] = get_pose_distance(pose_vectors[i], pose_vectors[j])
val = np.mean(sync_arr[np.tril_indices_from(sync_arr, k=-1)])
val = -(2*(val/max_distance)) + 1
out_vec.append(val)
return np.array(out_vec)
|
{"hexsha": "473a451ac3a4b663a1ae3251c2bc6259cde95842", "size": 11169, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/psypose/regressors.py", "max_stars_repo_name": "scraplab/psypose", "max_stars_repo_head_hexsha": "81753e29b78023b8a7c48356ec54c67b7182c183", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/psypose/regressors.py", "max_issues_repo_name": "scraplab/psypose", "max_issues_repo_head_hexsha": "81753e29b78023b8a7c48356ec54c67b7182c183", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-13T16:27:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-13T16:27:34.000Z", "max_forks_repo_path": "src/psypose/regressors.py", "max_forks_repo_name": "scraplab/psypose", "max_forks_repo_head_hexsha": "81753e29b78023b8a7c48356ec54c67b7182c183", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6297709924, "max_line_length": 118, "alphanum_fraction": 0.6658608649, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2606}
|
c**********************************************************************
c IO_INIT_TP.F
c**********************************************************************
c Read in test particle data
c
c Input:
c infile ==> File name to read from (character*80)
c
c Output:
c ntp ==> number of massive bodies (int scalar)
c mass ==> mass of bodies (real array)
c xht,yht,zht ==> initial position in Helio coord
c (real arrays)
c vxht,vyht,vzht ==> initial position in Helio coord
c (real arrays)
c istat ==> status of the test paricles
c (2d integer array)
c istat(i,1) = 0 active
c istat(i,1) = 1 not
c rstat ==> status of the test paricles
c (2d real array)
c
c
c
c Remarks:
c Authors: Martin Duncan
c Date: 3/2/93
c Last revision: 12/22/95 HFL
subroutine io_init_tp(infile,ntp,xht,yht,zht,vxht,vyht,
& vzht,istat,rstat)
include '../swift.inc'
include 'io.inc'
c... Input
character*(*) infile
c... Output
real*8 xht(NTPMAX),yht(NTPMAX),zht(NTPMAX)
real*8 vxht(NTPMAX),vyht(NTPMAX),vzht(NTPMAX)
real*8 rstat(NTPMAX,NSTATR)
integer istat(NTPMAX,NSTAT)
integer ntp
c... Internal
integer i,j,ierr,ns
c-----
c... Executable code
write(*,*) 'Test particle file called ',infile
call io_open(7,infile,'old','formatted',ierr)
read(7,*) ntp
if(ntp.gt.NTPMAX) then
write(*,*) ' SWIFT ERROR: in io_init_tp: '
write(*,*) ' The number of test bodies,',ntp,','
write(*,*) ' is too large, it must be less than',NTPMAX
call util_exit(1)
endif
write(*,*) ' '
write(*,*) 'ntp : ',ntp
if(ntp.eq.0) then
close(unit = 7)
write(*,*) ' '
return
endif ! <===== NOTE
c... Determine the number of istat and rstat variables. In what follows,
c... we assume that they are the same.
call io_getns(7,ns)
if(ns.ne.NSTAT) then
write(*,*) 'Warning: The size of istat and rstat arrays is '
write(*,*) ' not NSTAT=',NSTAT,', but is ',ns
endif
c Start again:
rewind(7)
read(7,*) ntp
c Read in the x's and v's and istat(*,*)
write(*,*) ' '
do i=1,ntp
read(7,*) xht(i),yht(i),zht(i)
read(7,*) vxht(i),vyht(i),vzht(i)
read(7,*) (istat(i,j),j=1,ns)
read(7,*) (rstat(i,j),j=1,ns)
do j=ns+1,NSTAT
istat(i,j) = 0
enddo
do j=ns+1,NSTATR
rstat(i,j) = 0.0d0
enddo
enddo
close(unit = 7)
write(*,*) ' '
return
end ! io_init_tp.f
c-----------------------------------------------------------------
|
{"hexsha": "f2d089f61b8de7f122c95ff0e726b1dbd10dd4e2", "size": 3062, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/swift_j/io/io_init_tp.f", "max_stars_repo_name": "Simske/exostriker", "max_stars_repo_head_hexsha": "587b0af4c9cadb46637a4ac61a5392a596e966b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2020-01-06T13:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T11:23:14.000Z", "max_issues_repo_path": "exostriker/source/swift_j/io/io_init_tp.f", "max_issues_repo_name": "sai-33/Exostriker", "max_issues_repo_head_hexsha": "f59fa51c6bdce3a2ed51d6621fe42bfcd8c2846f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 67, "max_issues_repo_issues_event_min_datetime": "2019-11-30T14:45:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T20:26:06.000Z", "max_forks_repo_path": "exostriker/source/swift_j/io/io_init_tp.f", "max_forks_repo_name": "sai-33/Exostriker", "max_forks_repo_head_hexsha": "f59fa51c6bdce3a2ed51d6621fe42bfcd8c2846f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-01-06T13:44:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T11:23:17.000Z", "avg_line_length": 28.0917431193, "max_line_length": 75, "alphanum_fraction": 0.4474199869, "num_tokens": 852}
|
"""
The inference (retrieval) sample file.
Authors: Hamed Zamani (zamani@cs.umass.edu)
"""
from app_logger import logger
logger = logger(__file__)
from allennlp.common import Params, Tqdm
from allennlp.common.util import prepare_environment
prepare_environment(Params({})) # sets the seeds to be fixed
from config import config
import params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.data.iterators import BucketIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.data.tokenizers.word_splitter import JustSpacesWordSplitter
from allennlp.data.tokenizers import WordTokenizer
from data_loading import IrTupleDatasetReader
from allennlp.data.tokenizers.word_filter import StopwordFilter
import pickle as pkl
import time
import numpy as np
import torch
from snrm import SNRM
from inverted_index import MemMappedInvertedIndex
from allennlp.nn.util import move_to_device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
logger.info('PyTorch uses device {}'.format(device))
# TODO extract to utils file, already used in train2 file
def count_zero(tensor: torch.Tensor) -> int:
return (tensor == 0.0).sum().item()
# layer_size is a list containing the size of each layer. It can be set through the 'hidden_x' arguments.
layer_size = [config.get('emb_dim')] # input layer
for i in [config.get('hidden_1'), config.get('hidden_2'), config.get('hidden_3'), config.get('hidden_4'), config.get('hidden_5')]:
if i <= 0:
break
layer_size.append(i)
logger.info('Loading vocabulary')
vocabulary: Vocabulary = Vocabulary.from_files(directory=config.get('vocab_dir_path'))
logger.info('Loading embedding')
token_embedding: Embedding = Embedding.from_params(vocab=vocabulary, params=Params({"pretrained_file": config.get('pre_trained_embedding_file_name'),
"embedding_dim": config.get('emb_dim'),
"trainable": False, # TODO is this ok?
"max_norm": None,
"norm_type": 2.0,
"padding_index": 0}))
word_embedder = BasicTextFieldEmbedder({"tokens": token_embedding})
# The SNRM model.
model = SNRM(word_embeddings= word_embedder,
batch_size=config.get('batch_size'),
max_q_len=config.get('max_q_len'),
max_doc_len=config.get('max_doc_len'),
emb_dim=config.get('emb_dim'),
layer_size=layer_size,
dropout_parameter=config.get('dropout_probability'),
regularization_term=config.get('regularization_term'),
learning_rate=config.get('learning_rate'))
model_load_path = '{0}model-state_{1}.pt'.format(config.get('model_path'), config.get('run_name'))
logger.info('Restoring model parameters from "{}"'.format(model_load_path))
# restore model parameter
model.load_state_dict(torch.load(model_load_path))
# if you saved model on GPU and now want to load it on a CPU:
# model.load_state_dict(torch.load(model_load_path, map_location=device))
model.to(device)
model.eval() # set model in evaluation mode
inverted_index = MemMappedInvertedIndex(layer_size[-1])
inverted_index.load()
logger.info('Initializing document tuple loader and iterator')
query_tuple_loader = IrTupleDatasetReader(lazy=True,
max_text_length=config.get('max_q_len'),
tokenizer = WordTokenizer(word_splitter=JustSpacesWordSplitter(),
word_filter=StopwordFilter()))
# already spacy tokenized, so that it is faster
iterator = BucketIterator(batch_size=config.get('batch_size'), # TODO should we only process one query at a time? would be faster if > 1
sorting_keys=[("text_tokens", "num_tokens")])
iterator.index_with(vocabulary)
num_queries_processed = 0
batch_num = 0
current_timestamp_str = time.strftime("%Y-%m-%d_%H%M%S")
candidate_file_name = config.get('base_path') + config.get('evaluation_result_candidate_file_prefix') + current_timestamp_str + '.txt'
max_retrieval_docs = config.get('num_retrieval_documents_per_query') # we are only interested in the top k document for a query
inverted_index_lat_term_keys = np.array(list(inverted_index.index.keys()))
logger.info('Inverted Index has {} dimensions'.format(len(inverted_index_lat_term_keys)))
with open(candidate_file_name, 'w') as evaluationCandidateFile:
logger.info('Created and writing evaluation candidate file "{}"'.format(candidate_file_name))
for batch in Tqdm.tqdm(iterator(query_tuple_loader.read(config.get('evaluation_query_file')), num_epochs=1)):
batch_num += 1
batch = move_to_device(obj=batch, cuda_device=(0 if torch.cuda.is_available() else -1))
query_ids = batch['id']
query_repr, _, _ = model.forward(batch['text_tokens']['tokens'], None, None)
if batch_num % 4 == 0:
zero_elements = count_zero(query_repr)
num_elements = query_repr.numel()
ratio_zero = (zero_elements / num_elements)
logger.info('query_repr batch #{} has zero elements={}, total size={}'.format(batch_num, zero_elements, num_elements))
logger.info('query_repr batch #{} has ratio_zero_elements={:6.5f}'.format(batch_num, ratio_zero))
logger.info('retrieving document scores for query qid={}'.format(repr(query_ids)))
num_queries_in_batch = query_repr.shape[0]
for q in range(num_queries_in_batch):
query_repr_v = query_repr[q]
qid = query_ids[q]
retrieval_scores = dict() # maps doc_id to retrieval score for the query
if q % 10 == 0:
zero_elements = count_zero(query_repr_v)
num_elements = query_repr_v.numel()
ratio_zero = (zero_elements / num_elements)
logger.debug('query_repr qid={} has zero elements={}, total size={}'.format(qid, zero_elements, num_elements))
logger.debug('query_repr qid={} has ratio_zero_elements={:6.5f}'.format(qid, ratio_zero))
sum_docs_processed = 0
indices_query_positive = ((query_repr_v > 0).nonzero()).flatten().numpy()
logger.info('Query (qid={}) has {} latent term dimensions > 0'.format(qid, len(indices_query_positive)))
indices_query_positive_in_index = np.intersect1d(indices_query_positive, inverted_index_lat_term_keys, assume_unique=True)
logger.info('Query (qid={}) has {} relevant dimensions in index'.format(qid, len(indices_query_positive_in_index)))
skipped_query_dimensions = 0
for i in indices_query_positive_in_index: # TODO can this be optimized / parallelized?
#if not i in inverted_index.index:
# logger.debug('A latent term dimension (dim={}) of a query (qid={}) has no assigned documents in index'.format(i, qid))
# TODO log or write something
# continue # no document is in this latent term dimension
# logger.info('found {} docs in latent term dimension {}'.format(str(len(inverted_index.index[i])),str(i)))
if len(inverted_index.index[i]) > (inverted_index.count_documents() * 0.8):
logger.warning('Skipping latent term dimension={} for Query (qid={}) - too much docs in dimension ({})'.format(i, qid, len(inverted_index.index[i])))
skipped_query_dimensions += 1
continue
for doc_id in inverted_index.index[i]: # for every doc in the current latent term dimension
sum_docs_processed += 1
if doc_id not in retrieval_scores:
retrieval_scores[doc_id] = 0.
doc_representation_v = inverted_index.get_doc_representation(doc_id)
weight = doc_representation_v[i]
retrieval_scores[doc_id] += query_repr_v[i] * weight
logger.info('Skipped {} / {} latent term dimensions for Query (qid={}), due to too much docs in dimension'.format(skipped_query_dimensions, len(indices_query_positive_in_index), qid))
mean_docs_processed = round(sum_docs_processed / len(query_repr_v), 3)
#logger.debug('processed avg. {} non-distinct docs for query per dimension (whole: {})'.format(str(mean_docs_processed), str(sum_docs_processed)))
logger.info('Obtained a score for {} distinct docs for query qid={}'.format(len(retrieval_scores), qid))
retrieval_result_for_qid = sorted(retrieval_scores.items(), key=lambda x: x[1], reverse=True)
retrieval_result_for_qid = retrieval_result_for_qid[:max_retrieval_docs]
if len(retrieval_result_for_qid) == 0:
logger.warning('Could not retrieve any relevant document for query qid={}'.format(qid))
# writing retrieval result to candidate file
for rank, (doc_id, retrieval_score) in enumerate(retrieval_result_for_qid):
# logger.debug('qid={}\t\tdoc_id={}\tscore={}\trank={}'.format(qid,doc_id,retrieval_score, rank+1))
evaluationCandidateFile.write('{0}\tQ0\t{1}\t{2}\t{3}\t{4}\n'.format(qid, doc_id, rank+1, retrieval_score, config.get('run_name')))
if rank < 10:
logger.debug('{0}\tQ0\t{1}\t{2}\t{3}\t{4}'.format(qid, doc_id, rank+1, retrieval_score, config.get('run_name')))
num_queries_processed += 1
if num_queries_processed == config.get('num_evaluation_queries'):
logger.info('Ending retrieval after processing {} queries'.format(num_queries_processed))
break
logger.info('Processed {} queries for retrieval'.format(num_queries_processed))
if num_queries_processed == config.get('num_evaluation_queries'):
break
|
{"hexsha": "e5a5f64a298fd60ec35fceea69fb3c31aa27221f", "size": 10548, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/retrieval.py", "max_stars_repo_name": "Bernhard-Steindl/snrm-extension", "max_stars_repo_head_hexsha": "e4a797ff258a15b690b8838a064fbe790ff22b80", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-19T10:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T10:14:56.000Z", "max_issues_repo_path": "code/retrieval.py", "max_issues_repo_name": "Bernhard-Steindl/snrm-extension", "max_issues_repo_head_hexsha": "e4a797ff258a15b690b8838a064fbe790ff22b80", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-03-19T15:58:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:53:43.000Z", "max_forks_repo_path": "code/retrieval.py", "max_forks_repo_name": "Bernhard-Steindl/snrm-extension", "max_forks_repo_head_hexsha": "e4a797ff258a15b690b8838a064fbe790ff22b80", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0050251256, "max_line_length": 195, "alphanum_fraction": 0.6526355707, "include": true, "reason": "import numpy", "num_tokens": 2219}
|
# coding: utf-8
import os
import sys
import struct
import argparse
import numpy as np
sys.path.append("../")
from colmap_process.colmap_read_write_model import *
from colmap_process.colmap_export_geo import *
def read_orb_traj_text(traj_path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
image_id = 0
with open(traj_path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_name = elems[0] + ".png"
tvec = np.array(tuple(map(float, elems[1:4])))
qvec = np.array(tuple(map(float, elems[4:8])))
qvec_new = [qvec[3], qvec[0], qvec[1], qvec[2]]
qvec = qvec_new
rmat = quaternion_matrix(qvec).T
tnew = -rmat @ tvec
qnew = quaternion_from_matrix(rmat)
qvec = qnew
tvec = tnew
camera_id = 2048
# image_name = elems[9]
# elems = fid.readline().split()
# xys = np.column_stack([tuple(map(float, elems[0::3])),
# tuple(map(float, elems[1::3]))])
# point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=[], point3D_ids=[])
image_id = image_id +1
return images
def main():
parser = argparse.ArgumentParser(description='Convert orbslam traj to colmap image model')
parser.add_argument('--traj', help='txt trajectory file')
parser.add_argument('--output', help='colmap image model')
args = parser.parse_args()
images = read_orb_traj_text(args.traj)
write_images_text(images, args.output, False)
#write_model(cameras, images, points3D, path=args.output_model, ext=args.output_format, poseonly=args.poseonly)
if __name__ == "__main__":
main()
|
{"hexsha": "34af82d2e3b98124247b6dd282f0af6f9da2ef92", "size": 2302, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/sfm_toolkits/ezxr_sfm/trajectory_tools/orbtraj2colmap.py", "max_stars_repo_name": "TxT1212/colmap", "max_stars_repo_head_hexsha": "ee87e7c8b9ee7541be62f45121febd431be134a9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/sfm_toolkits/ezxr_sfm/trajectory_tools/orbtraj2colmap.py", "max_issues_repo_name": "TxT1212/colmap", "max_issues_repo_head_hexsha": "ee87e7c8b9ee7541be62f45121febd431be134a9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sfm_toolkits/ezxr_sfm/trajectory_tools/orbtraj2colmap.py", "max_forks_repo_name": "TxT1212/colmap", "max_forks_repo_head_hexsha": "ee87e7c8b9ee7541be62f45121febd431be134a9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8529411765, "max_line_length": 115, "alphanum_fraction": 0.5595134666, "include": true, "reason": "import numpy", "num_tokens": 548}
|
#!/usr/bin/env python3
import contextlib
import io
import json
import logging
import sys
from multiprocessing import Pool
import msgpack
import numpy as np
import tqdm
import lzma
import bz2
import openforcefield
from openforcefield.topology.molecule import Molecule
from rdkit import Chem
from rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers
from rdkit.Chem.rdMolAlign import AlignMol
logger = logging.getLogger(__name__)
# TODO: convert output to use the logger
def detect_file_total_data_lines(filename, skip_rows=0):
N = -skip_rows
for line in open(filename, "r"):
N += 1
return N
def parse_file(filename, N, line_start=0, line_end=None, skip_rows=0):
"""
start and end is inclusive
"""
if line_end is None:
line_end = N
smi = []
with open(filename, "r") as fd:
for lineno, line in enumerate(fd, -skip_rows):
if lineno < line_start:
continue
if lineno > line_end:
continue
spline = line.split()
smi.append(spline[0])
return smi
def process_smiles_to_qcschema(lineno, N, header, filename, **kwargs):
out_lines = ""
out_mols = {}
smi_list = parse_file(
filename, N, line_start=lineno, line_end=lineno, skip_rows=header
)
out_lines += "{:8d} / {:8d} ENTRY: {:s}\n".format(lineno + 1, N, smi_list[0])
mols = expand_smiles_to_qcschema(smi_list[0], **kwargs)
isomers = 0
conformations = 0
entries = 1
for i, (smi, mol_list) in enumerate(mols.items(), 1):
for mol in mol_list:
isomers += 1
conformations += len(mol.conformers)
out_lines += "{:22s}ISOMER {:3d}/{:3d} CONFS: {} SMILES: {:s}\n".format(
"", i, len(mols), len(mol.conformers), smi
)
out_mols.update(mols)
# out_lines += out
return out_mols, out_lines, (entries, isomers, conformations)
def expand_smiles_to_qcschema(
smi,
cutoff=None,
n_confs=1,
unique_smiles=True,
isomer_max=-1,
):
"""
Load a file containing smiles strings, and generate stereoisomers and
conformers for each stereoisomer.
Parameters
----------
input_fnm : str
The input filename to read SMILES from
cutoff : float
During the all-pairwise RMSD calculation, remove
molecules that are less than this cutoff value apart
n_confs : int
The number of conformations to attempt generating
unique_smiles : bool
If stereoisomers are generated, organize molecules by
their unambiguous SMILES string
isomers : int
The number of stereoisomers to keep if multiple are found.
The default of -1 means keep all found.
line_start : int
The line in the input file to start processing
line_end : int
The line in the input file to stop processing (not inclusive)
skip_rows : int
The number of lines at the top of the file to skip before
data begins
output_fid : FileHandle
the file object to write to. Must support the write function
Returns
-------
mols : dict
Keys are the smiles from the input file, and the value is a
list of OpenFF molecules with conformers attached.
output : str
The contents of what was written to output_fid
"""
# TODO: unique_smiles=False is broken as it repeats isomers for some reason
unique_smiles = True
# Initializing
i = 0
rmsd_cutoff = cutoff
# this is the main object returned
molecule_set = {}
ref_smi = smi
try:
# If this fails, probably due to stereochemistry. Catch the
# exception, then enumerate the variations on the SMILES.
mol = Molecule.from_smiles(smi).to_rdkit()
smi_list = [mol]
except openforcefield.utils.toolkits.UndefinedStereochemistryError:
smi_list = list(EnumerateStereoisomers(Chem.MolFromSmiles(smi)))
# Clip the isomers here if a limit was specified
if isomer_max > 0:
smi_list = smi_list[:isomer_max]
for i, mol in enumerate(smi_list):
smi_list[i] = Chem.AddHs(mol)
for atom in smi_list[i].GetAtoms():
atom.SetAtomMapNum(atom.GetIdx() + 1)
smi_list = [
smi
for smi in sorted(
Chem.MolToSmiles(
x,
isomericSmiles=True,
allHsExplicit=True,
canonical=True,
allBondsExplicit=False,
)
for x in smi_list
)
]
if unique_smiles:
# we are collecting molecules by their specific stereoisomer SMILES
for smi in smi_list:
try:
# this is ridiculous; we enumerated stereoisomers previously,
# but we still fail to build the molecule. Silently allow...
# note that this is likely because there is still bond
# stereochemistry
lvl = logging.getLogger("openforcefield").getEffectiveLevel()
logging.getLogger("openforcefield").setLevel(logging.ERROR)
molecule_set[smi] = [Molecule.from_smiles(smi, allow_undefined_stereo=True)]
logging.getLogger("openforcefield").setLevel(lvl)
except openforcefield.utils.toolkits.UndefinedStereochemistryError:
# RDKit was unable to determine chirality? Skip...
pass
else:
mols = []
for smi in smi_list:
mols.append(Molecule.from_smiles(smi))
molecule_set[ref_smi] = mols
for smi in smi_list:
# Some book keeping to make sure that the stereoisomer SMILES
# is always printed to the log, but the returned data structure
# follows the request input settings
if unique_smiles:
out_smi = smi
else:
out_smi = smi
smi = ref_smi
if smi not in molecule_set:
continue
for mol in molecule_set[smi]:
# Not obvious, but i is the number of unique SMILES strings
# generated (so far) from the input SMILES
i += 1
# attempt to generate n_confs, but the actual number could be
# smaller
f = io.StringIO()
with contextlib.redirect_stderr(f):
with contextlib.redirect_stdout(f):
try:
mol.generate_conformers(n_conformers=n_confs)
except TypeError:
pass
rdmol = mol.to_rdkit()
L = len(mol.conformers)
# This will be used to determined whether it should be pruned
# from the RMSD calculations. If we find it should be pruned
# just once, it is sufficient to avoid it later in the pairwise
# processing.
uniq = list([True] * L)
# This begins the pairwise RMSD pruner
if L > 1:
# The reference conformer for RMSD calculation
for j in range(L - 1):
# A previous loop has determine this specific conformer
# is too close to another, so we can entirely skip it
if not uniq[j]:
continue
# since k starts from j+1, we are only looking at the
# upper triangle of the comparisons (j < k)
for k in range(j + 1, L):
rmsd_i = AlignMol(rdmol, rdmol, k, j)
r = np.linalg.norm(
mol.conformers[k] - mol.conformers[j], axis=1
)
rmsd_i = r.mean()
# Flag this conformer for pruning, and also
# prevent it from being used as a reference in the
# future comparisons
if rmsd_i < rmsd_cutoff:
uniq[k] = False
# hack? how to set conformers explicity if different number than
# currently stored?
confs = [
mol.conformers[j] for j, add_bool in enumerate(uniq) if add_bool
]
mol._conformers = confs.copy()
if len(molecule_set) == 0:
molecule_set[ref_smi] = []
return molecule_set
def main():
import argparse
parser = argparse.ArgumentParser(
description="A tool to transform a SMILES string into a QCSchema. Enumerates stereoisomers if the SMILES is ambiguous, and generates conformers."
)
parser.add_argument(
"input",
type=str,
help="Input file containing smiles strings. Assumes that the file is CSV-like, splits on spaces, and the SMILES is the first column",
)
parser.add_argument(
"-c",
"--cutoff",
type=float,
default=9999.0,
help="Prune conformers less than this cutoff using all pairwise RMSD comparisons (in Angstroms)",
)
parser.add_argument(
"-n",
"--max-conformers",
type=int,
default=1,
help="The number of conformations to attempt generating",
)
parser.add_argument(
"-s",
"--line-start",
type=int,
default=0,
help="The line in the input file to start processing",
)
parser.add_argument(
"-e",
"--line-end",
type=int,
help="The line in the input file to stop processing (not inclusive)",
)
parser.add_argument(
"-H",
"--header-lines",
type=int,
default=0,
help=""" The number of lines at the top of the file to skip before data
begins""",
)
parser.add_argument(
"-u",
"--unique-smiles",
action="store_true",
help="""If stereoisomers are generated, organize molecules by their
unambiguous SMILES string""",
)
parser.add_argument(
"-i",
"--isomers",
type=int,
default=-1,
help="""The number of stereoisomers to keep if multiple are found""",
)
parser.add_argument(
"-o", "--output-file", type=str, help="The file to write the output log to"
)
parser.add_argument(
"-f",
"--formatted-out",
type=str,
help="Write all molecules to a formatted output as qc_schema molecules. Assumes singlets! Choose either --json, --qcsubmit, or --msgpack as the the format",
)
parser.add_argument(
"-j",
"--json",
action="store_true",
help="Write the formatted output to qc_schema (json) format.",
)
parser.add_argument(
"--qcsubmit",
action="store_true",
help="Create and ",
)
parser.add_argument(
"-m",
"--msgpack",
action="store_true",
help="Write the formatted output to qc_schema binary message pack (msgpack).",
)
parser.add_argument(
"--ncpus",
type=int,
help="Number of processes to use.",
)
args = parser.parse_args()
if args.output_file is not None:
fid = open(args.output_file, "w")
else:
fid = sys.stdout
start = args.line_start
end = args.line_end
N = detect_file_total_data_lines(args.input, args.header_lines)
if end is None:
end = N
#############################################################################
# Output settings and general description to the log
output = ""
out_line = "# Running entries {:d} to {:d}\n".format(start + 1, end)
output += out_line
out_line = "# Generating max {:d} conformers, prune RMSD {:6.2f}\n".format(
args.max_conformers, args.cutoff
)
output += out_line
if args.unique_smiles:
out_line = "# Collecting molecules using unique SMILES\n"
else:
out_line = "# Collecting molecules by their input SMILES\n"
output += out_line
if args.isomers < 0:
out_line = "# Collecting all stereoisomers found\n"
else:
out_line = "# Collecting at most {:d} stereoisomers\n".format(args.isomers)
output += out_line
fid.write(out_line)
fid.flush()
#############################################################################
kwargs = dict(
cutoff=args.cutoff,
n_confs=args.max_conformers,
unique_smiles=args.unique_smiles,
isomer_max=args.isomers,
)
mols = {}
entries, isomers, conformations = 0, 0, 0
no_mol = []
no_conf = []
if args.ncpus == 1:
for i in tqdm.tqdm(
range(start, end), total=end - start, ncols=80, disable=True
):
fn_args = (i, end, args.header_lines, args.input)
mol, out_lines, counts = process_smiles_to_qcschema(*fn_args, **kwargs)
mols.update(mol)
fid.write(out_lines)
entries += counts[0]
isomers += counts[1]
conformations += counts[2]
if counts[1] == 0:
fid.write("{:22s}Error: Could not build molecule\n".format(""))
no_mol.extend(mol.keys())
elif counts[2] == 0:
fid.write("{:22s}Error: Could not generate any conformers\n".format(""))
no_conf.extend(mol.keys())
fid.write(
"{:22s}Inputs: {:10d} Isomers: {:10d} Conformations: {:10d}\n".format(
"", entries, isomers, conformations
)
)
else:
pool = Pool(processes=args.ncpus)
work = []
for i in range(start, end):
fn_args = (i, end, args.header_lines, args.input)
unit = pool.apply_async(process_smiles_to_qcschema, fn_args, kwargs)
work.append(unit)
for i, unit in tqdm.tqdm(
enumerate(work), total=len(work), ncols=80, disable=True
):
mol, out_lines, counts = unit.get()
mols.update(mol)
fid.write(out_lines)
entries += counts[0]
isomers += counts[1]
conformations += counts[2]
if counts[1] == 0:
fid.write("{:22s}Error: Could not build molecule\n".format(""))
no_mol.extend(mol.keys())
elif counts[2] == 0:
fid.write("{:22s}Error: Could not generate any conformers\n".format(""))
no_conf.extend(mol.keys())
fid.write(
"{:22s}Inputs: {:10d} Isomers: {:10d} Conformations: {:10d}\n".format(
"", entries, isomers, conformations
)
)
pool.close()
fid.write("Totals:\n")
fid.write(" Inputs: {:8d}\n".format(entries))
fid.write(" Isomers: {:8d}\n".format(isomers))
fid.write(" Conformations: {:8d}\n".format(conformations))
if len(no_mol) > 0:
fid.write("\nEntries that could not be built:\n")
for i, m in enumerate(no_mol, 1):
fid.write(" {:8d} {:s}\n".format(i, m))
if len(no_conf) > 0:
fid.write("\nEntries that could not generate conformers:\n")
for i, m in enumerate(no_conf, 1):
fid.write(" {:8d} {:s}\n".format(i, m))
if args.output_file is not None and fid is not sys.stdout:
fid.close()
serialize_method = "json"
serializer = json
if args.msgpack:
serialize_method = "msgpack-ext"
serializer = msgpack
if args.json is not None:
json_mol = {}
for smi in mols:
json_mol[smi] = [
serializer.loads(
mol.to_qcschema(conformer=i).serialize(serialize_method)
)
for mol in mols[smi]
for i in range(mol.n_conformers)
]
if args.formatted_out:
if args.msgpack:
with open(args.formatted_out, "wb") as fid:
msgpack.dump(json_mol, fid)
elif args.json:
opener = open
if args.formatted_out.endswith("bz2"):
opener = bz2.open
elif args.formatted_out.endswith("xz"):
opener = lzma.open
elif args.formatted_out.endswith("lzma"):
opener = lzma.open
with opener(args.formatted_out, "wt") as fid:
json.dump(json_mol, fid, indent=2)
if __name__ == "__main__":
main()
|
{"hexsha": "e3cd7530b6a199bcf8ea7571d37636ae5d57fbd9", "size": 16600, "ext": "py", "lang": "Python", "max_stars_repo_path": "offsb/ui/smiles/load.py", "max_stars_repo_name": "MobleyLab/openff-spellbook", "max_stars_repo_head_hexsha": "66a9f2add895034da7949701069b11cf0ab3f817", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-20T13:53:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T20:42:12.000Z", "max_issues_repo_path": "offsb/ui/smiles/load.py", "max_issues_repo_name": "MobleyLab/openff-spellbook", "max_issues_repo_head_hexsha": "66a9f2add895034da7949701069b11cf0ab3f817", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-10-12T07:12:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-22T10:22:17.000Z", "max_forks_repo_path": "offsb/ui/smiles/load.py", "max_forks_repo_name": "MobleyLab/openff-spellbook", "max_forks_repo_head_hexsha": "66a9f2add895034da7949701069b11cf0ab3f817", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-12T00:31:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T21:36:10.000Z", "avg_line_length": 30.4587155963, "max_line_length": 166, "alphanum_fraction": 0.5592771084, "include": true, "reason": "import numpy", "num_tokens": 3912}
|
"""
Regression tests.
"""
import numpy as np
import gym
from .verifier import *
from .levelgen import *
from gym_minigrid.minigrid import *
class Level_TestGoToBlocked(RoomGridLevel):
"""
Go to a yellow ball that is blocked with a lot of red balls.
"""
def __init__(self, room_size=8, seed=None):
super().__init__(
num_rows=1,
num_cols=1,
room_size=9,
seed=seed
)
def gen_mission(self):
self.place_agent()
self.start_pos = np.array([3, 3])
self.start_dir = 0
obj = Ball('yellow')
self.grid.set(1, 1, obj)
for i in (1, 2, 3):
for j in (1, 2, 3):
if (i, j) not in [(1 ,1), (3, 3)]:
self.grid.set(i, j, Ball('red'))
self.instrs = GoToInstr(ObjDesc(obj.type, obj.color))
class Level_TestPutNextToBlocked(RoomGridLevel):
"""
Pick up a yellow ball and put it next to a blocked blue ball.
"""
def __init__(self, room_size=8, seed=None):
super().__init__(
num_rows=1,
num_cols=1,
room_size=9,
seed=seed
)
def gen_mission(self):
self.place_agent()
self.start_pos = np.array([3, 3])
self.start_dir = 0
obj1 = Ball('yellow')
obj2 = Ball('blue')
self.place_obj(obj1, (4, 4), (1, 1))
self.place_obj(obj2, (1, 1), (1, 1))
self.grid.set(1, 2, Ball('red'))
self.grid.set(2, 1, Ball('red'))
self.instrs = PutNextInstr(ObjDesc(obj1.type, obj1.color),
ObjDesc(obj2.type, obj2.color))
register_levels(__name__, globals())
|
{"hexsha": "59d5a32b6981fb6394c5d43fcd5fd4bb2cf09d38", "size": 1702, "ext": "py", "lang": "Python", "max_stars_repo_path": "babyai/levels/test_levels.py", "max_stars_repo_name": "MathijsMul/babyai-emergent-guidance", "max_stars_repo_head_hexsha": "9e37535134c89bd019affa51c7f199d1672811b6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "babyai/levels/test_levels.py", "max_issues_repo_name": "MathijsMul/babyai-emergent-guidance", "max_issues_repo_head_hexsha": "9e37535134c89bd019affa51c7f199d1672811b6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "babyai/levels/test_levels.py", "max_forks_repo_name": "MathijsMul/babyai-emergent-guidance", "max_forks_repo_head_hexsha": "9e37535134c89bd019affa51c7f199d1672811b6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0294117647, "max_line_length": 66, "alphanum_fraction": 0.535840188, "include": true, "reason": "import numpy", "num_tokens": 469}
|
[STATEMENT]
lemma fls_deriv_add [simp]: "fls_deriv (f+g) = fls_deriv f + fls_deriv g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fls_deriv (f + g) = fls_deriv f + fls_deriv g
[PROOF STEP]
by (auto intro: fls_eqI simp: algebra_simps)
|
{"llama_tokens": 116, "file": null, "length": 1}
|
[STATEMENT]
lemma if_mred_heap_read_typedD:
"multithreaded_base.init_fin final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) (heap_base.heap_read_typed (\<lambda>_ :: 'heap. typeof_addr) heap_read P) heap_write P) t xh ta x'h' \<longleftrightarrow>
if_heap_read_typed final_expr (J_heap_base.mred addr2thread_id thread_id2addr spurious_wakeups empty_heap allocate (\<lambda>_ :: 'heap. typeof_addr) heap_read heap_write P) typeof_addr P t xh ta x'h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. multithreaded_base.init_fin final_expr (\<lambda>t ((e, l), h) ta ((e', l'), h'). J_heap_base.red (\<lambda>x. x) (\<lambda>x. x) spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) (heap_base.heap_read_typed (\<lambda>_. typeof_addr) heap_read P) heap_write (extTA2J P) P t e (h, l) ta e' (h', l')) t xh ta x'h' = if_heap_read_typed final_expr (\<lambda>t ((e, l), h) ta ((e', l'), h'). J_heap_base.red (\<lambda>x. x) (\<lambda>x. x) spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) heap_read heap_write (extTA2J P) P t e (h, l) ta e' (h', l')) typeof_addr P t xh ta x'h'
[PROOF STEP]
unfolding multithreaded_base.init_fin.simps
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<exists>tb x m taa x' m'. t = tb \<and> xh = ((Running, x), m) \<and> ta = convert_TA_initial (convert_obs_initial taa) \<and> x'h' = ((Running, x'), m') \<and> (case (x, m) of (x, xa) \<Rightarrow> (case x of (e, l) \<Rightarrow> \<lambda>h ta ((e', l'), h'). J_heap_base.red (\<lambda>x. x) (\<lambda>x. x) spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) (heap_base.heap_read_typed (\<lambda>_. typeof_addr) heap_read P) heap_write (extTA2J P) P tb e (h, l) ta e' (h', l')) xa) taa (x', m')) \<or> (\<exists>tb x m. t = tb \<and> xh = ((PreStart, x), m) \<and> ta = \<lbrace>InitialThreadAction\<rbrace> \<and> x'h' = ((Running, x), m)) \<or> (\<exists>x tb m. t = tb \<and> xh = ((Running, x), m) \<and> ta = \<lbrace>ThreadFinishAction\<rbrace> \<and> x'h' = ((Finished, x), m) \<and> final_expr x)) = (((\<exists>tb x m taa x' m'. t = tb \<and> xh = ((Running, x), m) \<and> ta = convert_TA_initial (convert_obs_initial taa) \<and> x'h' = ((Running, x'), m') \<and> (case (x, m) of (x, xa) \<Rightarrow> (case x of (e, l) \<Rightarrow> \<lambda>h ta ((e', l'), h'). J_heap_base.red (\<lambda>x. x) (\<lambda>x. x) spurious_wakeups empty_heap allocate (\<lambda>_. typeof_addr) heap_read heap_write (extTA2J P) P tb e (h, l) ta e' (h', l')) xa) taa (x', m')) \<or> (\<exists>tb x m. t = tb \<and> xh = ((PreStart, x), m) \<and> ta = \<lbrace>InitialThreadAction\<rbrace> \<and> x'h' = ((Running, x), m)) \<or> (\<exists>x tb m. t = tb \<and> xh = ((Running, x), m) \<and> ta = \<lbrace>ThreadFinishAction\<rbrace> \<and> x'h' = ((Finished, x), m) \<and> final_expr x)) \<and> (\<forall>ad al v T. NormalAction (ReadMem ad al v) \<in> set \<lbrace>ta\<rbrace>\<^bsub>o\<^esub> \<longrightarrow> heap_base'.addr_loc_type TYPE('heap) typeof_addr P ad al T \<longrightarrow> heap_base'.conf TYPE('heap) typeof_addr P v T))
[PROOF STEP]
by(subst red_heap_read_typedD) fastforce
|
{"llama_tokens": 1305, "file": "JinjaThreads_MM_JMM_J_Typesafe", "length": 2}
|
from __future__ import annotations
from typing import Union
import torch
import numpy as np
from .data import Data
from .data import ACCESSIBLE_KEY
from utils.config import global_config
import copy
class Dataset(object):
"""This is a class for building the dataset in the learning process.
:param buffer_size: The size of the dataset.
:type buffer_size: int
:param state_dim: The dimension of the state data. For example,
the state dimension of a binary image
is (512, 512). The state of a cartpole system is (4,) or 4,
which includes the position, velocity, angle of the
pole, and the angular velocity of the pole.
:type state_dim: Tuple(int) or int
:param action_dim: The dimension of the action data. For example, the
state dimension of a cartpole system is 1,
which is the force applied to the cart.
The action dimension of a vehicle system is 2,
which are the steering angle and the accelaration.
:type action_dim: int
"""
def __init__(self, buffer_size,
state_dim: Union[Tuple(int), int],
action_dim: int):
if isinstance(state_dim, int):
state_dim = (state_dim,)
self._buffer_size = buffer_size
self._state_dim = state_dim
self._action_dim = action_dim
self._update_key = 0
self._total_update = 0 # totally number of obtained data
def update_dataset(self, data: Data):
"""Update the new data into the dataset. If the dataset is full,
then this method will remove the oldest data in the dataset,
and update the new data alternatively.
:param data: New data
:type data: Data
"""
if self._total_update == 0: # If this is the first update, initial the dataset
self._dataset_dict = {}
for key in data._data_dict:
if global_config.is_cuda:
if key == "state" or key == "next_state":
self._dataset_dict[key] = torch.zeros((self._buffer_size, *data._data_dict[key].shape[1:])).cuda()
elif key == "action":
self._dataset_dict[key] = torch.zeros((self._buffer_size, data._data_dict[key].shape[1])).cuda()
elif key == "reward":
self._dataset_dict[key] = torch.zeros((self._buffer_size)).cuda()
elif key == "done_flag":
self._dataset_dict[key] = torch.zeros((self._buffer_size), dtype=torch.bool).cuda()
else:
if key == "state" or key == "next_state":
self._dataset_dict[key] = torch.zeros((self._buffer_size, *data._data_dict[key].shape[1:]))
elif key == "action":
self._dataset_dict[key] = torch.zeros((self._buffer_size, data._data_dict[key].shape[1]))
elif key == "reward":
self._dataset_dict[key] = torch.zeros((self._buffer_size))
elif key == "done_flag":
self._dataset_dict[key] = torch.zeros((self._buffer_size), dtype=torch.bool)
self._total_update += len(data)
# if not exceed the last data in the dataset
if self._update_key+len(data) <= self._buffer_size:
for key in self._dataset_dict:
self._dataset_dict[key][self._update_key:self._update_key + len(data)] = data._data_dict[key]
self._update_key += len(data)
if self._update_key == self._buffer_size:
self._update_key = 0
else: # if exceed
exceed_number = len(data) + self._update_key - self._buffer_size
for key in self._dataset_dict:
self._dataset_dict[key][self._update_key:] = data._data_dict[key][:self._buffer_size-self._update_key]
##########################
self._dataset_dict[key][:exceed_number] = data._data_dict[key][self._buffer_size-self._update_key:]
self._update_key = exceed_number
def get_current_buffer_size(self):
"""Get the current data buffer size. If the number of the current data is less than the buffer size,
return current number of data. Otherwise, return the size of the dataset.
:return: [description]
:rtype: [type]
"""
return min(self._total_update, self._buffer_size)
def fetch_all_data(self):
"""Return all the data in the dataset.
:return: All data
:rtype: Data
"""
index = list(range(self._buffer_size))
return self.fetch_data_by_index(index)
def fetch_data_by_index(self, index: list) -> Data:
"""Return the data by specifying the index. For example, if index = [1,2,5], then three datas in the dataset will be returned.
:param index: The index of the data
:type index: list
:return: Specific data
:rtype: Data
"""
temp_dict = {}
for key in self._dataset_dict:
temp_dict[key] = self._dataset_dict[key][index]
data = Data(**temp_dict)
return data
def fetch_data_randomly(self, num_of_data: int) -> Data:
"""Return the data with random keyes
:param num_of_data: How many data will be returned
:type num_of_data: int
:return: Data with random keyes
:rtype: Data
"""
if self._total_update < self._buffer_size:
if num_of_data > self._total_update:
raise Exception("The current buffer size is %d. Number of random data size is %d." % (self._total_update, num_of_data) +
"The latter must be less or equal than the former.")
index = np.random.choice(
self._total_update, size=num_of_data, replace=False)
else:
if num_of_data > self._buffer_size:
raise Exception("The current buffer size is %d. Number of random data size is %d." % (self._buffer_size, num_of_data) +
"The latter must be less or equal than the former.")
index = np.random.choice(
self._buffer_size, size=num_of_data, replace=False)
return self.fetch_data_by_index(index)
|
{"hexsha": "778b57587bfc1cc24ddf72b88e499b084f2ef830", "size": 6352, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/dataset.py", "max_stars_repo_name": "HKUST-JM/iLQR_Traj_Trac", "max_stars_repo_head_hexsha": "c23101a8ddc208d43c210e4c759f28f9b7fb00b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/dataset.py", "max_issues_repo_name": "HKUST-JM/iLQR_Traj_Trac", "max_issues_repo_head_hexsha": "c23101a8ddc208d43c210e4c759f28f9b7fb00b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/dataset.py", "max_forks_repo_name": "HKUST-JM/iLQR_Traj_Trac", "max_forks_repo_head_hexsha": "c23101a8ddc208d43c210e4c759f28f9b7fb00b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0289855072, "max_line_length": 136, "alphanum_fraction": 0.6002833753, "include": true, "reason": "import numpy", "num_tokens": 1417}
|
SUBROUTINE MB03BD( JOB, DEFL, COMPQ, QIND, K, N, H, ILO, IHI, S,
$ A, LDA1, LDA2, Q, LDQ1, LDQ2, ALPHAR, ALPHAI,
$ BETA, SCAL, IWORK, LIWORK, DWORK, LDWORK,
$ IWARN, INFO )
C
C SLICOT RELEASE 5.7.
C
C Copyright (c) 2002-2020 NICONET e.V.
C
C PURPOSE
C
C To find the eigenvalues of the generalized matrix product
C
C S(1) S(2) S(K)
C A(:,:,1) * A(:,:,2) * ... * A(:,:,K)
C
C where A(:,:,H) is upper Hessenberg and A(:,:,i), i <> H, is upper
C triangular, using a double-shift version of the periodic
C QZ method. In addition, A may be reduced to periodic Schur form:
C A(:,:,H) is upper quasi-triangular and all the other factors
C A(:,:,I) are upper triangular. Optionally, the 2-by-2 triangular
C matrices corresponding to 2-by-2 diagonal blocks in A(:,:,H)
C are so reduced that their product is a 2-by-2 diagonal matrix.
C
C If COMPQ = 'U' or COMPQ = 'I', then the orthogonal factors are
C computed and stored in the array Q so that for S(I) = 1,
C
C T
C Q(:,:,I)(in) A(:,:,I)(in) Q(:,:,MOD(I,K)+1)(in)
C T (1)
C = Q(:,:,I)(out) A(:,:,I)(out) Q(:,:,MOD(I,K)+1)(out),
C
C and for S(I) = -1,
C
C T
C Q(:,:,MOD(I,K)+1)(in) A(:,:,I)(in) Q(:,:,I)(in)
C T (2)
C = Q(:,:,MOD(I,K)+1)(out) A(:,:,I)(out) Q(:,:,I)(out).
C
C A partial generation of the orthogonal factors can be realized
C via the array QIND.
C
C ARGUMENTS
C
C Mode Parameters
C
C JOB CHARACTER*1
C Specifies the computation to be performed, as follows:
C = 'E': compute the eigenvalues only; A will not
C necessarily be put into periodic Schur form;
C = 'S': put A into periodic Schur form, and return the
C eigenvalues in ALPHAR, ALPHAI, BETA, and SCAL;
C = 'T': as JOB = 'S', but A is put into standardized
C periodic Schur form, that is, the general product
C of the 2-by-2 triangular matrices corresponding to
C a complex eigenvalue is diagonal.
C
C DEFL CHARACTER*1
C Specifies the deflation strategy to be used, as follows:
C = 'C': apply a careful deflation strategy, that is,
C the criteria are based on the magnitudes of
C neighboring elements and infinite eigenvalues are
C only deflated at the top; this is the recommended
C option;
C = 'A': apply a more aggressive strategy, that is,
C elements on the subdiagonal or diagonal are set
C to zero as soon as they become smaller in magnitude
C than eps times the norm of the corresponding
C factor; this option is only recommended if
C balancing is applied beforehand and convergence
C problems are observed.
C
C COMPQ CHARACTER*1
C Specifies whether or not the orthogonal transformations
C should be accumulated in the array Q, as follows:
C = 'N': do not modify Q;
C = 'U': modify (update) the array Q by the orthogonal
C transformations that are applied to the matrices in
C the array A to reduce them to periodic Schur form;
C = 'I': like COMPQ = 'U', except that each matrix in the
C array Q will be first initialized to the identity
C matrix;
C = 'P': use the parameters as encoded in QIND.
C
C QIND INTEGER array, dimension (K)
C If COMPQ = 'P', then this array describes the generation
C of the orthogonal factors as follows:
C If QIND(I) > 0, then the array Q(:,:,QIND(I)) is
C modified by the transformations corresponding to the
C i-th orthogonal factor in (1) and (2).
C If QIND(I) < 0, then the array Q(:,:,-QIND(I)) is
C initialized to the identity and modified by the
C transformations corresponding to the i-th orthogonal
C factor in (1) and (2).
C If QIND(I) = 0, then the transformations corresponding
C to the i-th orthogonal factor in (1), (2) are not applied.
C
C Input/Output Parameters
C
C K (input) INTEGER
C The number of factors. K >= 1.
C
C N (input) INTEGER
C The order of each factor in the array A. N >= 0.
C
C H (input) INTEGER
C Hessenberg index. The factor A(:,:,H) is on entry in upper
C Hessenberg form. 1 <= H <= K.
C
C ILO (input) INTEGER
C IHI (input) INTEGER
C It is assumed that each factor in A is already upper
C triangular in rows and columns 1:ILO-1 and IHI+1:N.
C 1 <= ILO <= IHI <= N, if N > 0;
C ILO = 1 and IHI = 0, if N = 0.
C
C S (input) INTEGER array, dimension (K)
C The leading K elements of this array must contain the
C signatures of the factors. Each entry in S must be either
C 1 or -1.
C
C A (input/output) DOUBLE PRECISION array, dimension
C (LDA1,LDA2,K)
C On entry, the leading N-by-N-by-K part of this array
C must contain the factors in upper Hessenberg-triangular
C form, that is, A(:,:,H) is upper Hessenberg and the other
C factors are upper triangular.
C On exit, if JOB = 'S' and INFO = 0, the leading
C N-by-N-by-K part of this array contains the factors of
C A in periodic Schur form, that is, A(:,:,H) is upper quasi
C triangular and the other factors are upper triangular.
C On exit, if JOB = 'T' and INFO = 0, the leading
C N-by-N-by-K part of this array contains the factors of
C A as for the option JOB = 'S', but the product of the
C triangular factors corresponding to a 2-by-2 block in
C A(:,:,H) is diagonal.
C On exit, if JOB = 'E', then the leading N-by-N-by-K part
C of this array contains meaningless elements in the off-
C diagonal blocks. Consequently, the formulas (1) and (2)
C do not hold for the returned A and Q (if COMPQ <> 'N')
C in this case.
C
C LDA1 INTEGER
C The first leading dimension of the array A.
C LDA1 >= MAX(1,N).
C
C LDA2 INTEGER
C The second leading dimension of the array A.
C LDA2 >= MAX(1,N).
C
C Q (input/output) DOUBLE PRECISION array, dimension
C (LDQ1,LDQ2,K)
C On entry, if COMPQ = 'U', the leading N-by-N-by-K part
C of this array must contain the initial orthogonal factors
C as described in (1) and (2).
C On entry, if COMPQ = 'P', only parts of the leading
C N-by-N-by-K part of this array must contain some
C orthogonal factors as described by the parameters QIND.
C If COMPQ = 'I', this array should not be set on entry.
C On exit, if COMPQ = 'U' or COMPQ = 'I', the leading
C N-by-N-by-K part of this array contains the modified
C orthogonal factors as described in (1) and (2).
C On exit, if COMPQ = 'P', only parts of the leading
C N-by-N-by-K part contain some modified orthogonal factors
C as described by the parameters QIND.
C This array is not referenced if COMPQ = 'N'.
C
C LDQ1 INTEGER
C The first leading dimension of the array Q. LDQ1 >= 1,
C and, if COMPQ <> 'N', LDQ1 >= MAX(1,N).
C
C LDQ2 INTEGER
C The second leading dimension of the array Q. LDQ2 >= 1,
C and, if COMPQ <> 'N', LDQ2 >= MAX(1,N).
C
C ALPHAR (output) DOUBLE PRECISION array, dimension (N)
C On exit, if INFO = 0, the leading N elements of this array
C contain the scaled real parts of the eigenvalues of the
C matrix product A. The i-th eigenvalue of A is given by
C
C (ALPHAR(I) + ALPHAI(I)*SQRT(-1))/BETA(I) * BASE**SCAL(I),
C
C where BASE is the machine base (often 2.0). Complex
C conjugate eigenvalues appear in consecutive locations.
C
C ALPHAI (output) DOUBLE PRECISION array, dimension (N)
C On exit, if INFO = 0, the leading N elements of this array
C contain the scaled imaginary parts of the eigenvalues
C of A.
C
C BETA (output) DOUBLE PRECISION array, dimension (N)
C On exit, if INFO = 0, the leading N elements of this array
C contain indicators for infinite eigenvalues. That is, if
C BETA(I) = 0.0, then the i-th eigenvalue is infinite.
C Otherwise BETA(I) is set to 1.0.
C
C SCAL (output) INTEGER array, dimension (N)
C On exit, if INFO = 0, the leading N elements of this array
C contain the scaling parameters for the eigenvalues of A.
C
C Workspace
C
C IWORK INTEGER array, dimension (LIWORK)
C On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK,
C and if IWARN > N, the nonzero absolute values in IWORK(2),
C ..., IWORK(N+1) are indices of the possibly inaccurate
C eigenvalues, as well as of the corresponding 1-by-1 or
C 2-by-2 diagonal blocks of the factors in the array A.
C The 2-by-2 blocks correspond to negative values in IWORK.
C One negative value is stored for each such eigenvalue
C pair. Its modulus indicates the starting index of a
C 2-by-2 block. This is also done for any value of IWARN,
C if a 2-by-2 block is found to have two real eigenvalues.
C On exit, if INFO = -22, IWORK(1) returns the minimum value
C of LIWORK.
C
C LIWORK INTEGER
C The length of the array IWORK. LIWORK >= 2*K+N.
C
C DWORK DOUBLE PRECISION array, dimension (LDWORK)
C On exit, if INFO = 0, DWORK(1) returns the optimal LDWORK,
C and DWORK(2), ..., DWORK(1+K) contain the Frobenius norms
C of the factors of the formal matrix product used by the
C algorithm.
C On exit, if INFO = -24, DWORK(1) returns the minimum value
C of LDWORK.
C
C LDWORK INTEGER
C The length of the array DWORK.
C LDWORK >= K + MAX( 2*N, 8*K ).
C
C Warning Indicator
C
C IWARN INTEGER
C = 0 : no warnings;
C = 1,..,N-1 : A is in periodic Schur form, but the
C algorithm was not able to reveal information
C about the eigenvalues from the 2-by-2
C blocks.
C ALPHAR(i), ALPHAI(i), BETA(i) and SCAL(i),
C can be incorrect for i = 1, ..., IWARN+1;
C = N : some eigenvalues might be inaccurate;
C = N+1 : some eigenvalues might be inaccurate, and
C details can be found in IWORK.
C
C Error Indicator
C
C INFO INTEGER
C = 0 : succesful exit;
C < 0 : if INFO = -i, the i-th argument had an illegal
C value;
C = 1,..,N : the periodic QZ iteration did not converge.
C A is not in periodic Schur form, but
C ALPHAR(i), ALPHAI(i), BETA(i) and SCAL(i), for
C i = INFO+1,...,N should be correct.
C
C METHOD
C
C A modified version of the periodic QZ algorithm is used [1], [2].
C
C REFERENCES
C
C [1] Bojanczyk, A., Golub, G. H. and Van Dooren, P.
C The periodic Schur decomposition: algorithms and applications.
C In F.T. Luk (editor), Advanced Signal Processing Algorithms,
C Architectures, and Implementations III, Proc. SPIE Conference,
C vol. 1770, pp. 31-42, 1992.
C
C [2] Kressner, D.
C An efficient and reliable implementation of the periodic QZ
C algorithm. IFAC Workshop on Periodic Control Systems (PSYCO
C 2001), Como (Italy), August 27-28 2001. Periodic Control
C Systems 2001 (IFAC Proceedings Volumes), Pergamon.
C
C NUMERICAL ASPECTS
C
C The implemented method is numerically backward stable.
C 3
C The algorithm requires 0(K N ) floating point operations.
C
C CONTRIBUTOR
C
C D. Kressner, Technical Univ. Berlin, Germany, June 2001.
C
C REVISIONS
C
C V. Sima, Research Institute for Informatics, Bucharest, Romania,
C July 2009, SLICOT Library version of the routine PHGEQZ.
C V. Sima, June 2010, July 2010, Nov. 2010, Sep. 2011, Oct. 2011,
C Jan. 2013, Feb. 2013, July 2013, Sep. 2016, Nov. 2016, Apr. 2018.
C Dec. 2018, Jan. 2019, Feb. 2019, Mar. 2019, Aug.-Sep. 2019, Dec.
C 2019, Jan.-Apr. 2020.
C
C KEYWORDS
C
C Eigenvalues, QZ algorithm, periodic QZ algorithm, orthogonal
C transformation.
C
C ******************************************************************
C
C .. Parameters ..
C .. NITER is the number of consecutive iterations for a deflated ..
C .. subproblem before switching from implicit to explicit shifts...
C .. MCOUNT is, similarly, the maximum number of consecutive ..
C .. iterations before switching from explicit to implicit shifts...
C
INTEGER MCOUNT, NITER
PARAMETER ( MCOUNT = 1, NITER = 10 )
DOUBLE PRECISION ZERO, ONE, TEN
PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0, TEN = 1.0D+1 )
C .. Scalar Arguments ..
CHARACTER COMPQ, DEFL, JOB
INTEGER H, IHI, ILO, INFO, IWARN, K, LDA1, LDA2, LDQ1,
$ LDQ2, LDWORK, LIWORK, N
C .. Array Arguments ..
INTEGER IWORK(*), QIND(*), S(*), SCAL(*)
DOUBLE PRECISION A(LDA1,LDA2,*), ALPHAI(*), ALPHAR(*), BETA(*),
$ DWORK(*), Q(LDQ1,LDQ2,*)
C .. Local Arrays ..
DOUBLE PRECISION MACPAR(5)
C .. Local Scalars ..
LOGICAL ADEFL, ISINF, LCMPQ, LINIQ, LPARQ, LSCHR, LSVD
CHARACTER SHFT
INTEGER AIND, COUNT, COUNTE, I, IERR, IFIRST, IFRSTM,
$ IITER, ILAST, ILASTM, IN, IO, J, J1, JDEF,
$ JITER, JLO, L, LDEF, LM, MAXIT, NTRA, OPTDW,
$ OPTIW, QI, SINV, TITER, ZITER
DOUBLE PRECISION A1, A2, A3, A4, BASE, CS, CS1, CS2, LGBAS, NRM,
$ SAFMAX, SAFMIN, SDET, SMLNUM, SN, SN1, SN2,
$ SVMN, TEMP, TEMP2, TOL, TOLL, ULP, W1, W2
C .. Workspace Pointers ..
INTEGER MAPA, MAPH, MAPQ, PDW, PFREE, PNORM
C .. External Functions ..
LOGICAL LSAME
DOUBLE PRECISION DLAMCH, DLANHS, DLAPY2, DLAPY3
EXTERNAL DLAMCH, DLANHS, DLAPY2, DLAPY3, LSAME
C .. External Subroutines ..
EXTERNAL DLABAD, DLADIV, DLARTG, DLAS2, DLASET, DROT,
$ MA01BD, MB03AB, MB03AF, MB03BA, MB03BB, MB03BC,
$ MB03BF, XERBLA
C .. Intrinsic Functions ..
INTRINSIC ABS, DBLE, INT, LOG, MAX, MIN, MOD, SIGN, SQRT
C
C .. Executable Statements ..
C
C Decode the scalar input parameters.
C
LSVD = LSAME( JOB, 'T' )
LSCHR = LSAME( JOB, 'S' ) .OR. LSVD
LINIQ = LSAME( COMPQ, 'I' )
LCMPQ = LSAME( COMPQ, 'U' ) .OR. LINIQ
LPARQ = LSAME( COMPQ, 'P' )
ADEFL = LSAME( DEFL, 'A' )
IWARN = 0
OPTDW = K + MAX( 2*N, 8*K )
OPTIW = 2*K + N
C
C Check the scalar input parameters.
C
INFO = 0
IF ( .NOT. ( LSCHR .OR. LSAME( JOB, 'E' ) ) ) THEN
INFO = -1
ELSE IF ( .NOT.( ADEFL .OR. LSAME( DEFL, 'C' ) ) ) THEN
INFO = -2
ELSE IF ( .NOT.( LCMPQ .OR. LPARQ .OR. LSAME( COMPQ, 'N' ) ) )
$ THEN
INFO = -3
ELSE IF ( K.LT.1 ) THEN
INFO = -5
ELSE IF ( N.LT.0 ) THEN
INFO = -6
ELSE IF ( H.LT.1 .OR. H.GT.K ) THEN
INFO = -7
ELSE IF ( ILO.LT.1 ) THEN
INFO = -8
ELSE IF ( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN
INFO = -9
ELSE IF ( LDA1.LT.MAX( 1, N ) ) THEN
INFO = -12
ELSE IF ( LDA2.LT.MAX( 1, N ) ) THEN
INFO = -13
ELSE IF ( LDQ1.LT.1 .OR. ( ( LCMPQ .OR. LPARQ )
$ .AND. LDQ1.LT.N ) ) THEN
INFO = -15
ELSE IF ( LDQ2.LT.1 .OR. ( ( LCMPQ .OR. LPARQ )
$ .AND. LDQ2.LT.N ) ) THEN
INFO = -16
ELSE IF ( LIWORK.LT.OPTIW ) THEN
IWORK(1) = OPTIW
INFO = -22
ELSE IF ( LDWORK.LT.OPTDW ) THEN
DWORK(1) = DBLE( OPTDW )
INFO = -24
END IF
C
C Return if there were illegal values.
C
IF ( INFO.NE.0 ) THEN
CALL XERBLA( 'MB03BD', -INFO )
RETURN
END IF
C
C Quick return if possible.
C
IF ( N.EQ.0 ) THEN
DWORK(1) = ONE
IWORK(1) = 1
RETURN
END IF
C
C Compute Maps for accessing A and Q.
C
MAPA = 0
MAPH = 2
MAPQ = K
QI = 0
CALL MB03BA( K, H, S, SINV, IWORK(MAPA+1), IWORK(MAPQ+1) )
C
C Machine Constants.
C
IN = IHI + 1 - ILO
SAFMIN = DLAMCH( 'SafeMinimum' )
SAFMAX = ONE / SAFMIN
ULP = DLAMCH( 'Precision' )
TOLL = TEN*ULP
CALL DLABAD( SAFMIN, SAFMAX )
SMLNUM = SAFMIN*( IN / ULP )
BASE = DLAMCH( 'Base' )
LGBAS = LOG( BASE )
C
MACPAR(2) = DLAMCH( 'Underflow' )
IF ( LSVD ) THEN
MACPAR(1) = DLAMCH( 'ORmax' )
MACPAR(3) = SAFMIN
MACPAR(4) = DLAMCH( 'Epsilon' )
MACPAR(5) = BASE
END IF
IF ( K.GE.INT( LOG( MACPAR(2) ) / LOG( ULP ) ) ) THEN
C
C Start Iteration with a controlled zero shift.
C
ZITER = -1
ELSE
ZITER = 0
END IF
C
C Initialize IWORK (needed in case of loosing accuracy).
C
DO 10 I = 2*K + 1, 2*K + N
IWORK(I) = 0
10 CONTINUE
C
C Compute norms and initialize Q.
C
PNORM = 0
PFREE = K
DO 20 I = 1, K
AIND = IWORK(MAPA+I)
DWORK(I) = DLANHS( 'Frobenius', IN, A(ILO,ILO,AIND), LDA1,
$ DWORK )
J = 0
IF ( LINIQ ) THEN
J = I
ELSE IF ( LPARQ ) THEN
J = -QIND(I)
END IF
IF ( J.NE.0 )
$ CALL DLASET( 'Full', N, N, ZERO, ONE, Q(1,1,J), LDQ1 )
20 CONTINUE
C
C Set Eigenvalues IHI+1:N.
C
DO 30 J = IHI + 1, N
CALL MA01BD( BASE, LGBAS, K, S, A(J,J,1), LDA1*LDA2, ALPHAR(J),
$ BETA(J), SCAL(J) )
ALPHAI(J) = ZERO
30 CONTINUE
C
C If IHI < ILO, skip QZ steps.
C
IF ( IHI.LT.ILO )
$ GO TO 550
C
C MAIN PERIODIC QZ ITERATION LOOP.
C
C Initialize dynamic indices.
C
C Eigenvalues ILAST+1:N have been found.
C Column operations modify rows IFRSTM:whatever.
C Row operations modify columns whatever:ILASTM.
C
C If only eigenvalues are being computed, then
C IFRSTM is the row of the last splitting row above row ILAST;
C this is always at least ILO.
C IITER counts iterations since the last eigenvalue was found,
C to tell when to use an observed zero or exceptional shift.
C MAXIT is the maximum number of QZ sweeps allowed.
C
ILAST = IHI
IF ( LSCHR ) THEN
IFRSTM = 1
ILASTM = N
ELSE
IFRSTM = ILO
ILASTM = IHI
END IF
IITER = 0
TITER = 0
COUNT = 0
COUNTE = 0
MAXIT = 120 * IN
C
DO 540 JITER = 1, MAXIT
C
C Special Case: ILAST = ILO.
C
IF ( ILAST.EQ.ILO )
$ GO TO 390
C
C **************************************************************
C * CHECK FOR DEFLATION *
C **************************************************************
C
C Test 1: Deflation in the Hessenberg matrix.
C
IF ( ADEFL )
$ TOL = MAX( SAFMIN, DWORK(PNORM+1)*ULP )
AIND = IWORK(MAPA+1)
JLO = ILO
DO 40 J = ILAST, ILO + 1, -1
IF ( .NOT.ADEFL ) THEN
TOL = ABS( A(J-1,J-1,AIND) ) + ABS( A(J,J,AIND) )
IF ( TOL.EQ.ZERO )
$ TOL = DLANHS( '1', J-ILO+1, A(ILO,ILO,AIND), LDA1,
$ DWORK )
TOL = MAX( ULP*TOL, SMLNUM )
END IF
IF ( ABS( A(J,J-1,AIND) ).LE.TOL ) THEN
A(J,J-1,AIND) = ZERO
JLO = J
IF ( J.EQ.ILAST )
$ GO TO 390
GO TO 50
END IF
40 CONTINUE
C
50 CONTINUE
C
C Test 2: Deflation in the triangular matrices with index 1.
C
DO 70 LDEF = 2, K
AIND = IWORK(MAPA+LDEF)
IF ( S(AIND).EQ.SINV ) THEN
IF ( ADEFL )
$ TOL = MAX( SAFMIN, DWORK(PNORM+LDEF)*ULP )
DO 60 J = ILAST, JLO, -1
IF ( .NOT.ADEFL ) THEN
IF ( J.EQ.ILAST ) THEN
TOL = ABS( A(J-1,J,AIND) )
ELSE IF ( J.EQ.JLO ) THEN
TOL = ABS( A(J,J+1,AIND) )
ELSE
TOL = ABS( A(J-1,J,AIND) )
$ + ABS( A(J,J+1,AIND) )
END IF
IF ( TOL.EQ.ZERO )
$ TOL = DLANHS( '1', J-JLO+1, A(JLO,JLO,AIND),
$ LDA1, DWORK )
TOL = MAX( ULP*TOL, SMLNUM )
END IF
IF ( ABS( A(J,J,AIND) ).LE.TOL ) THEN
A(J,J,AIND) = ZERO
GO TO 170
END IF
60 CONTINUE
END IF
70 CONTINUE
C
C Test 3: Deflation in the triangular matrices with index -1.
C
DO 90 LDEF = 2, K
AIND = IWORK(MAPA+LDEF)
IF ( S(AIND).NE.SINV ) THEN
IF ( ADEFL )
$ TOL = MAX( SAFMIN, DWORK(PNORM+LDEF)*ULP )
DO 80 J = ILAST, JLO, -1
IF ( .NOT.ADEFL ) THEN
IF ( J.EQ.ILAST ) THEN
TOL = ABS( A(J-1,J,AIND) )
ELSE IF ( J.EQ.JLO ) THEN
TOL = ABS( A(J,J+1,AIND) )
ELSE
TOL = ABS( A(J-1,J,AIND) )
$ + ABS( A(J,J+1,AIND) )
END IF
IF ( TOL.EQ.ZERO )
$ TOL = DLANHS( '1', J-JLO+1, A(JLO,JLO,AIND),
$ LDA1, DWORK )
TOL = MAX( ULP*TOL, SMLNUM )
END IF
IF ( ABS( A(J,J,AIND) ).LE.TOL ) THEN
A(J,J,AIND) = ZERO
GO TO 320
END IF
80 CONTINUE
END IF
90 CONTINUE
C
C Test 4: Controlled zero shift.
C
IF ( ZITER.GE.7 .OR. ZITER.LT.0 ) THEN
C
C Make Hessenberg matrix upper triangular.
C
AIND = IWORK(MAPA+1)
PDW = PFREE + 1
DO 100 J = JLO, ILAST - 1
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN, A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
100 CONTINUE
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 110 J = JLO, ILAST - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
110 CONTINUE
END IF
C
C Propagate transformations back to A_1.
C
DO 150 L = K, 2, -1
AIND = IWORK(MAPA+L)
PDW = PFREE + 1
IF ( ADEFL )
$ TOL = MAX( SAFMIN, DWORK(PNORM+L)*ULP )
IF ( S(AIND).EQ.SINV ) THEN
DO 120 J = JLO, ILAST - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
IF ( SN.NE.ZERO ) THEN
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
C
C Check for deflation.
C
IF ( .NOT.ADEFL ) THEN
TOL = ABS( A(J,J,AIND) ) +
$ ABS( A(J+1,J+1,AIND) )
IF ( TOL.EQ.ZERO )
$ TOL = DLANHS( '1', J-JLO+2,
$ A(JLO,JLO,AIND), LDA1,
$ DWORK )
TOL = MAX( ULP*TOL, SMLNUM )
END IF
IF ( ABS( A(J+1,J,AIND) ).LE.TOL ) THEN
CS = ONE
SN = ZERO
A(J+1,J,AIND) = ZERO
END IF
END IF
IF ( SN.NE.ZERO ) THEN
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS, SN )
END IF
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
120 CONTINUE
ELSE
DO 130 J = JLO, ILAST - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
IF ( SN.NE.ZERO ) THEN
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS, SN )
C
C Check for deflation.
C
IF ( .NOT.ADEFL ) THEN
TOL = ABS( A(J,J,AIND) ) +
$ ABS( A(J+1,J+1,AIND) )
IF ( TOL.EQ.ZERO )
$ TOL = DLANHS( '1', J-JLO+2,
$ A(JLO,JLO,AIND), LDA1,
$ DWORK )
TOL = MAX( ULP*TOL, SMLNUM )
END IF
IF ( ABS( A(J+1,J,AIND) ).LE.TOL ) THEN
CS = ONE
SN = ZERO
A(J+1,J,AIND) = ZERO
END IF
END IF
IF ( SN.NE.ZERO ) THEN
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS, SN,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J+1-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
END IF
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
130 CONTINUE
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 140 J = JLO, ILAST - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
IF ( SN.NE.ZERO )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS,
$ SN )
140 CONTINUE
END IF
150 CONTINUE
C
C Apply the transformations to the right hand side of the
C Hessenberg factor.
C
AIND = IWORK(MAPA+1)
PDW = PFREE + 1
ZITER = 0
DO 160 J = JLO, ILAST - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
IF ( SN.NE.ZERO ) THEN
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
ELSE
ZITER = -1
END IF
160 CONTINUE
C
C No QZ iteration.
C
GO TO 530
END IF
C
C **************************************************************
C * HANDLE DEFLATIONS *
C **************************************************************
C
C Case I: Deflation occurs in the Hessenberg matrix. The QZ
C iteration is only applied to the JLO:ILAST part.
C
IFIRST = JLO
C
C Go to the periodic QZ steps.
C
GO TO 420
C
C Case II: Deflation occurs in a triangular matrix with index 1.
C
C Do an unshifted periodic QZ step.
C
170 CONTINUE
JDEF = J
AIND = IWORK(MAPA+1)
PDW = PFREE + 1
DO 180 J = JLO, JDEF - 1
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN, A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1, A(J+1,J+1,AIND),
$ LDA1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
180 CONTINUE
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 190 J = JLO, JDEF - 1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
190 CONTINUE
END IF
C
C Propagate the transformations through the triangular matrices.
C Due to the zero element on the diagonal of the LDEF-th factor,
C the number of transformations drops by one.
C
DO 230 L = K, 2, -1
AIND = IWORK(MAPA+L)
IF ( L.LT.LDEF ) THEN
NTRA = JDEF - 2
ELSE
NTRA = JDEF - 1
END IF
PDW = PFREE + 1
IF ( S(AIND).EQ.SINV ) THEN
DO 200 J = JLO, NTRA
CS = DWORK(PDW)
SN = DWORK(PDW+1)
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
200 CONTINUE
ELSE
DO 210 J = JLO, NTRA
CS = DWORK(PDW)
SN = DWORK(PDW+1)
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS, SN )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS, SN,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J+1-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
210 CONTINUE
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 220 J = JLO, NTRA
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
220 CONTINUE
END IF
230 CONTINUE
C
C Apply the transformations to the right hand side of the
C Hessenberg factor.
C
AIND = IWORK(MAPA+1)
PDW = PFREE + 1
DO 240 J = JLO, JDEF - 2
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
240 CONTINUE
C
C Do an unshifted periodic QZ step.
C
PDW = PFREE + 1
DO 250 J = ILAST, JDEF + 1, -1
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, -A(J,J-1,AIND), CS, SN, A(J,J,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
250 CONTINUE
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+2)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+2)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 260 J = ILAST, JDEF + 1, -1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS, SN )
260 CONTINUE
END IF
C
C Propagate the transformations through the triangular matrices.
C
DO 300 L = 2, K
AIND = IWORK(MAPA+L)
IF ( L.GT.LDEF ) THEN
NTRA = JDEF + 2
ELSE
NTRA = JDEF + 1
END IF
PDW = PFREE + 1
IF ( S(AIND).NE.SINV ) THEN
DO 270 J = ILAST, NTRA, -1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
CALL DROT( J+1-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
TEMP = A(J-1,J-1,AIND)
CALL DLARTG( TEMP, A(J,J-1,AIND), CS, SN,
$ A(J-1,J-1,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( ILASTM-J+1, A(J-1,J,AIND), LDA1,
$ A(J,J,AIND), LDA1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
270 CONTINUE
ELSE
DO 280 J = ILAST, NTRA, -1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
CALL DROT( ILASTM-J+2, A(J-1,J-1,AIND), LDA1,
$ A(J,J-1,AIND), LDA1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, -A(J,J-1,AIND), CS, SN,
$ A(J,J,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
DWORK(PDW) = CS
DWORK(PDW+1) = SN
PDW = PDW + 2
280 CONTINUE
END IF
LM = MOD( L, K ) + 1
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LM)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LM)) )
END IF
IF ( QI.NE.0 ) THEN
PDW = PFREE + 1
DO 290 J = ILAST, NTRA, -1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS, SN )
290 CONTINUE
END IF
300 CONTINUE
C
C Apply the transformations to the left hand side of the
C Hessenberg factor.
C
AIND = IWORK(MAPA+1)
PDW = PFREE + 1
DO 310 J = ILAST, JDEF + 2, -1
CS = DWORK(PDW)
SN = DWORK(PDW+1)
PDW = PDW + 2
CALL DROT( ILASTM-J+2, A(J-1,J-1,AIND), LDA1, A(J,J-1,AIND),
$ LDA1, CS, SN )
310 CONTINUE
C
C No QZ iteration.
C
GO TO 530
C
C Case III: Deflation occurs in a triangular matrix with
C index -1.
C
320 CONTINUE
JDEF = J
IF ( JDEF.GT.( ( ILAST - JLO + 1 )/2 ) ) THEN
C
C Chase the zero downwards to the last position.
C
DO 340 J1 = JDEF, ILAST - 1
J = J1
AIND = IWORK(MAPA+LDEF)
TEMP = A(J,J+1,AIND)
CALL DLARTG( TEMP, A(J+1,J+1,AIND), CS, SN,
$ A(J,J+1,AIND) )
A(J+1,J+1,AIND) = ZERO
CALL DROT( ILASTM-J-1, A(J,J+2,AIND), LDA1,
$ A(J+1,J+2,AIND), LDA1, CS, SN )
LM = MOD( LDEF, K ) + 1
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LM)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LM)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
DO 330 L = 1, K - 1
AIND = IWORK(MAPA+LM)
IF ( LM.EQ.1 ) THEN
CALL DROT( ILASTM-J+2, A(J,J-1,AIND), LDA1,
$ A(J+1,J-1,AIND), LDA1, CS, SN )
TEMP = A(J+1,J,AIND)
CALL DLARTG( TEMP, -A(J+1,J-1,AIND), CS, SN,
$ A(J+1,J,AIND) )
A(J+1,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM+1, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
J = J - 1
ELSE IF ( S(AIND).EQ.SINV ) THEN
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS, SN )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS, SN,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
ELSE
CALL DROT( J-IFRSTM+2, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS, SN )
END IF
LM = MOD( LM, K ) + 1
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LM)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LM)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS,
$ SN )
330 CONTINUE
AIND = IWORK(MAPA+LDEF)
CALL DROT( J-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
340 CONTINUE
C
C Deflate the last element in the Hessenberg matrix.
C
AIND = IWORK(MAPA+1)
J = ILAST
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, -A(J,J-1,AIND), CS, SN, A(J,J,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+2)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+2)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS, SN )
DO 350 L = 2, LDEF - 1
AIND = IWORK(MAPA+L)
IF ( S(AIND).NE.SINV ) THEN
CALL DROT( J+1-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
TEMP = A(J-1,J-1,AIND)
CALL DLARTG( TEMP, A(J,J-1,AIND), CS, SN,
$ A(J-1,J-1,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( ILASTM-J+1, A(J-1,J,AIND), LDA1,
$ A(J,J,AIND), LDA1, CS, SN )
ELSE
CALL DROT( ILASTM-J+2, A(J-1,J-1,AIND), LDA1,
$ A(J,J-1,AIND), LDA1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, -A(J,J-1,AIND), CS, SN,
$ A(J,J,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
END IF
LM = L + 1
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LM)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LM)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS, SN )
350 CONTINUE
AIND = IWORK(MAPA+LDEF)
CALL DROT( J+1-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
ELSE
C
C Chase the zero upwards to the first position.
C
DO 370 J1 = JDEF, JLO + 1, -1
J = J1
AIND = IWORK(MAPA+LDEF)
TEMP = A(J-1,J,AIND)
CALL DLARTG( TEMP, -A(J-1,J-1,AIND), CS, SN,
$ A(J-1,J,AIND) )
A(J-1,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM-1, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LDEF)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LDEF)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS, SN )
LM = LDEF - 1
DO 360 L = 1, K - 1
AIND = IWORK(MAPA+LM)
IF ( LM.EQ.1 ) THEN
CALL DROT( J-IFRSTM+2, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
TEMP = A(J,J-1,AIND)
CALL DLARTG( TEMP, A(J+1,J-1,AIND), CS, SN,
$ A(J,J-1,AIND) )
A(J+1,J-1,AIND) = ZERO
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS, SN )
J = J + 1
ELSE IF ( S(AIND).NE.SINV ) THEN
CALL DROT( ILASTM-J+2, A(J-1,J-1,AIND), LDA1,
$ A(J,J-1,AIND), LDA1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, -A(J,J-1,AIND), CS, SN,
$ A(J,J,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( J-IFRSTM, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
ELSE
CALL DROT( J-IFRSTM+1, A(IFRSTM,J-1,AIND), 1,
$ A(IFRSTM,J,AIND), 1, CS, SN )
TEMP = A(J-1,J-1,AIND)
CALL DLARTG( TEMP, A(J,J-1,AIND), CS, SN,
$ A(J-1,J-1,AIND) )
A(J,J-1,AIND) = ZERO
CALL DROT( ILASTM-J+1, A(J-1,J,AIND), LDA1,
$ A(J,J,AIND), LDA1, CS, SN )
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+LM)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+LM)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J-1,QI), 1, Q(1,J,QI), 1, CS,
$ SN )
LM = LM - 1
IF ( LM.LE.0 )
$ LM = K
360 CONTINUE
AIND = IWORK(MAPA+LDEF)
CALL DROT( ILASTM-J+1, A(J-1,J,AIND), LDA1, A(J,J,AIND),
$ LDA1, CS, SN )
370 CONTINUE
C
C Deflate the first element in the Hessenberg matrix.
C
AIND = IWORK(MAPA+1)
J = JLO
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN, A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1, A(J+1,J+1,AIND),
$ LDA1, CS, SN )
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
DO 380 L = K, LDEF + 1, -1
AIND = IWORK(MAPA+L)
IF ( S(AIND).EQ.SINV ) THEN
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS, SN,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS, SN )
ELSE
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS, SN )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS, SN,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J+1-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS, SN )
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS, SN )
380 CONTINUE
AIND = IWORK(MAPA+LDEF)
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1, A(J+1,J+1,AIND),
$ LDA1, CS, SN )
END IF
C
C No QZ iteration.
C
GO TO 530
C
C Special case: A 1x1 block splits off at the bottom.
C
390 CONTINUE
CALL MA01BD( BASE, LGBAS, K, S, A(ILAST,ILAST,1), LDA1*LDA2,
$ ALPHAR(ILAST), BETA(ILAST), SCAL(ILAST) )
ALPHAI(ILAST) = ZERO
C
C Check for possible loss of accuracy.
C
IF ( BETA(ILAST).NE.ZERO ) THEN
DO 400 L = 1, K
AIND = IWORK(MAPA+L)
TEMP = A(ILAST,ILAST,AIND)
IF ( TEMP.NE.ZERO ) THEN
IF ( ABS( TEMP ).LT.DWORK(L)*TOLL ) THEN
IWARN = N + 1
IWORK(2*K+ILAST) = ILAST
GO TO 410
END IF
END IF
400 CONTINUE
END IF
C
C Go to next block - exit if finished.
C
410 CONTINUE
ILAST = ILAST - 1
IF ( ILAST.LT.ILO )
$ GO TO 550
C
C Reset iteration counters.
C
IITER = 0
TITER = 0
COUNT = 0
COUNTE = 0
IF ( ZITER.NE.-1 )
$ ZITER = 0
IF ( .NOT.LSCHR ) THEN
ILASTM = ILAST
IF ( IFRSTM.GT.ILAST )
$ IFRSTM = ILO
END IF
C
C No QZ iteration.
C
GO TO 530
C
C **************************************************************
C * PERIODIC QZ STEP *
C **************************************************************
C
C It is assumed that IFIRST < ILAST.
C
420 CONTINUE
C
IITER = IITER + 1
ZITER = ZITER + 1
IF( .NOT.LSCHR )
$ IFRSTM = IFIRST
IF ( IFIRST+1.EQ.ILAST ) THEN
C
C Special case -- 2x2 block.
C
J = ILAST - 1
IF ( TITER.LT.2 ) THEN
TITER = TITER + 1
C
C Try to deflate the 2-by-2 problem.
C
PDW = PFREE + 1
DO 430 L = 1, K
DWORK(PDW ) = A(J,J,L)
DWORK(PDW+1) = A(J+1,J,L)
DWORK(PDW+2) = A(J,J+1,L)
DWORK(PDW+3) = A(J+1,J+1,L)
PDW = PDW + 4
430 CONTINUE
IF ( SINV.LT.0 ) THEN
I = IWORK(MAPQ+1)
IWORK(MAPQ+1) = IWORK(MAPA+1)
END IF
CALL MB03BF( K, IWORK(MAPH), S, SINV, DWORK(PFREE+1),
$ 2, 2, ULP )
IF ( SINV.LT.0 )
$ IWORK(MAPQ+1) = I
I = PFREE + 4*( H - 1 )
IF ( ABS( DWORK(I+2) ).LT.
$ ULP*( MAX( ABS( DWORK(I+1) ), ABS( DWORK(I+3) ),
$ ABS( DWORK(I+4) ) ) ) ) THEN
C
C Construct a perfect shift polynomial. This may fail,
C so we try it twice (indicated by TITER).
C
CS1 = ONE
SN1 = ONE
DO 440 L = K, 2, -1
AIND = IWORK(MAPA+L)
TEMP = DWORK(PFREE+AIND*4)
IF ( S(AIND).EQ.SINV ) THEN
CALL DLARTG( CS1*A(J,J,AIND), SN1*TEMP, CS1,
$ SN1, TEMP )
ELSE
CALL DLARTG( CS1*TEMP, SN1*A(J,J,AIND), CS1,
$ SN1, TEMP )
END IF
440 CONTINUE
AIND = IWORK(MAPA+1)
TEMP = DWORK(PFREE+AIND*4)
CALL DLARTG( A(J,J,AIND)*CS1-TEMP*SN1,
$ A(J+1,J,AIND)*CS1, CS1, SN1, TEMP )
GO TO 510
END IF
END IF
C
C Looks like a complex block.
C 1. Compute the product SVD of the triangular matrices
C (optionally).
C
IF ( LSVD ) THEN
CALL MB03BC( K, IWORK(MAPA+1), S, SINV, A(J,J,1), LDA1,
$ LDA2, MACPAR, DWORK(PFREE+1),
$ DWORK(PFREE+K+1), DWORK(PFREE+2*K+1) )
C
C Update factors and transformations.
C
AIND = IWORK(MAPA+1)
CS2 = DWORK(PFREE+1)
SN2 = DWORK(PFREE+K+1)
CALL DROT( ILASTM-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS2, SN2 )
DO 450 L = 2, K
AIND = IWORK(MAPA+L)
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS2,
$ SN2 )
CS1 = CS2
SN1 = SN2
CS2 = DWORK(PFREE+L)
SN2 = DWORK(PFREE+K+L)
IF (S(AIND).EQ.SINV) THEN
CALL DROT( ILASTM-J-1, A(J,J+2,AIND), LDA1,
$ A(J+1,J+2,AIND), LDA1, CS1, SN1 )
CALL DROT( J-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS2, SN2 )
ELSE
CALL DROT( ILASTM-J-1, A(J,J+2,AIND), LDA1,
$ A(J+1,J+2,AIND), LDA1, CS2, SN2 )
CALL DROT( J-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
END IF
450 CONTINUE
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS2, SN2 )
AIND = IWORK(MAPA+1)
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS2, SN2 )
END IF
C
C 2. Compute complex eigenvalues.
C
CALL MB03BB( BASE, LGBAS, ULP, K, IWORK(MAPA+1), S, SINV,
$ A(J,J,1), LDA1, LDA2, ALPHAR(J), ALPHAI(J),
$ BETA(J), SCAL(J), DWORK(PFREE+1), IERR )
IF ( IERR.EQ.1 ) THEN
C
C The single shift periodic QZ did not converge, set
C IWARN = J to indicate that the eigenvalues are not
C assigned.
C
IWARN = MAX( J, IWARN )
ELSE IF ( IERR.EQ.2 ) THEN
C
C Some computed eigenvalues might be inaccurate.
C
IF ( IWARN.EQ.0 )
$ IWARN = N
END IF
C
C Check for real eigenvalues and possible loss of accuracy.
C Also, set zero or infinite eigenvalues where appropriate.
C
DO 460 L = 1, K
AIND = IWORK(MAPA+L)
IF ( ALPHAI(J).EQ.ZERO .AND. BETA(J).NE.ZERO ) THEN
IF ( ABS( A(J,J,AIND) ).LT.DWORK(L)*TOLL ) THEN
IWARN = N + 1
IWORK(2*K+J) = -J
GO TO 470
END IF
ELSE
A1 = A(J,J,AIND)
A3 = A(J,J+1,AIND)
A4 = A(J+1,J+1,AIND)
NRM = DLAPY3( A1, A3, A4 )
IF ( L.EQ.IWORK(MAPA+1) ) THEN
A2 = A(J+1,J,L)
NRM = DLAPY2( NRM, A2 )
END IF
SDET = ( MAX( ABS( A1 ), ABS( A4 ) )/NRM )
$ *MIN( ABS( A1 ), ABS( A4 ) )*
$ SIGN( ONE, A1 )*SIGN( ONE, A4 )
IF ( L.EQ.IWORK(MAPA+1) )
$ SDET = SDET - ( MAX( ABS( A2 ), ABS( A3 ) )/NRM )
$ *MIN( ABS( A2 ), ABS( A3 ) )*
$ SIGN( ONE, A2 )*SIGN( ONE, A3 )
IF ( ABS( SDET ).LT.DWORK(L)*TOLL ) THEN
C
C Make a more accurate singularity test using SVD.
C
IF ( L.EQ.IWORK(MAPA+1) ) THEN
IF ( ABS( A1 ).GE.ABS( A4 ) ) THEN
CALL DLARTG( A1, A2, CS, SN, TEMP )
A1 = TEMP
TEMP = CS*A3 + SN*A4
A4 = CS*A4 - SN*A3
A3 = TEMP
ELSE
CALL DLARTG( A4, A2, CS, SN, TEMP )
A4 = TEMP
TEMP = CS*A3 + SN*A1
A1 = CS*A1 - SN*A3
A3 = TEMP
END IF
END IF
CALL DLAS2( A1, A3, A4, SVMN, TEMP )
IF ( SVMN.LT.DWORK(L)*TOLL ) THEN
IWARN = N + 1
IWORK(2*K+J) = -J
GO TO 470
END IF
END IF
END IF
460 CONTINUE
C
C Go to next block and reset counters.
C
470 CONTINUE
ILAST = IFIRST - 1
IF ( ILAST.LT.ILO )
$ GO TO 550
IITER = 0
TITER = 0
COUNT = 0
COUNTE = 0
IF ( ZITER.NE.-1 )
$ ZITER = 0
IF ( .NOT.LSCHR ) THEN
ILASTM = ILAST
IF ( IFRSTM.GT.ILAST )
$ IFRSTM = ILO
END IF
GO TO 530
END IF
C
C Now, it is assumed that ILAST-IFIRST+1 >= 3.
C
IF ( COUNT.LT.NITER ) THEN
C
C Use the normal periodic QZ step routine.
C Note that the pointer to IWORK is increased by 1.
C The fact that, for SINV = 1, IWORK(MAPQ+1) = IWORK(MAPA+1)
C is used.
C
COUNT = COUNT + 1
IF ( SINV.LT.0 ) THEN
I = IWORK(MAPQ+1)
IWORK(MAPQ+1) = IWORK(MAPA+1)
END IF
CALL MB03AF( 'Double', K, ILAST-IFIRST+1, IWORK(MAPH), S,
$ SINV, A(IFIRST,IFIRST,1), LDA1, LDA2, CS1,
$ SN1, CS2, SN2 )
IF ( SINV.LT.0 )
$ IWORK(MAPQ+1) = I
ELSE IF ( COUNTE.LT.MCOUNT ) THEN
C
C Compute the two trailing eigenvalues for finding the shifts.
C Deal with special case of infinite eigenvalues, if needed.
C
I = ILAST - 1
IF ( SINV.LT.0 ) THEN
AIND = IWORK(MAPA+1)
A1 = A(I,I,AIND)
A2 = A(I+1,I,AIND)
A3 = A(I,I+1,AIND)
A4 = A(I+1,I+1,AIND)
NRM = DLANHS( 'Frobenius', 2, A(ILO,ILO,AIND), LDA1,
$ DWORK )
SDET = ( MAX( ABS( A1 ), ABS( A4 ) )/NRM )
$ *MIN( ABS( A1 ), ABS( A4 ) )*
$ SIGN( ONE, A1 )*SIGN( ONE, A4 ) -
$ ( MAX( ABS( A2 ), ABS( A3 ) )/NRM )
$ *MIN( ABS( A2 ), ABS( A3 ) )*
$ SIGN( ONE, A2 )*SIGN( ONE, A3 )
ISINF = ABS( SDET ).LT.DWORK(AIND)*TOLL
IF ( ISINF ) THEN
ALPHAR(I) = ONE/DWORK(PNORM+1)
ALPHAR(ILAST) = ONE/DWORK(PNORM+1)
SCAL(I) = 1
SCAL(ILAST) = 1
END IF
IERR = 0
ELSE
ISINF = .FALSE.
END IF
IF ( .NOT.ISINF ) THEN
CALL MB03BB( BASE, LGBAS, ULP, K, IWORK(MAPA+1), S, SINV,
$ A(I,I,1), LDA1, LDA2, ALPHAR(I), ALPHAI(I),
$ BETA(I), SCAL(I), DWORK(PFREE+1), IERR )
IF ( SINV.LT.0 ) THEN
C
C Use the reciprocals of the eigenvalues returned above.
C
IF ( ALPHAI(I).EQ.ZERO ) THEN
ALPHAR(I) = SIGN( ONE, ALPHAR(I) )/
$ MAX( SAFMIN, ABS( ALPHAR(I) ) )
ALPHAR(ILAST) = SIGN( ONE, ALPHAR(ILAST) )/
$ MAX( SAFMIN, ABS( ALPHAR(ILAST) ) )
SCAL(I) = -SCAL(I)
SCAL(ILAST) = -SCAL(ILAST)
ELSE
CALL DLADIV( ONE, ZERO, ALPHAR(ILAST),
$ -ALPHAI(ILAST), ALPHAR(I), ALPHAI(I) )
SCAL(I) = -SCAL(I)
END IF
END IF
C
IF ( IERR.NE.0 ) THEN
C
C Try an exceptional transformation if MB03BB does not
C converge on some special cases.
C
TEMP2 = BASE**SCAL(I)
IF ( ALPHAI(I).NE.ZERO ) THEN
TEMP = ( ABS( ALPHAR(I) ) + ABS( ALPHAI(I) ) )*
$ TEMP2
ELSE
TEMP = MAX( ABS( ALPHAR(ILAST) )*BASE**SCAL(ILAST),
$ ABS( ALPHAR(I) )*TEMP2 )
END IF
IF ( TEMP.LE.SQRT( ULP )*DWORK(PNORM+1) ) THEN
ALPHAR(I) = DWORK(PNORM+1)
SCAL(I) = 1
ALPHAR(ILAST) = DWORK(PNORM+1)
SCAL(ILAST) = 1
IERR = 0
END IF
END IF
END IF
C
IF ( IERR.NE.0 ) THEN
C
C Use the normal periodic QZ step routine.
C
IERR = 0
IN = ILAST - IFIRST + 1
IF ( SINV.LT.0 ) THEN
J1 = IWORK(MAPQ+1)
IWORK(MAPQ+1) = IWORK(MAPA+1)
END IF
CALL MB03AF( 'Double', K, IN, IWORK(MAPH), S, SINV,
$ A(IFIRST,IFIRST,1), LDA1, LDA2, CS1, SN1,
$ CS2, SN2 )
IF ( SINV.LT.0 )
$ IWORK(MAPQ+1) = J1
COUNT = 0
COUNTE = 0
ELSE
C
C Use explict shifts.
C
COUNTE = COUNTE + 1
W1 = ALPHAR(I)*BASE**SCAL(I)
C
IF ( ALPHAI(I).NE.ZERO ) THEN
C
C Use complex conjugate shifts.
C
SHFT = 'C'
W2 = ALPHAI(I)*BASE**SCAL(I)
C
ELSE
C
C Two identical real shifts are tried first. If there is
C no convergence after MCOUNT/2 consecutive iterations,
C a single shift is applied. The eigenvalue closer to
C the last element of the current product is used.
C
W2 = ALPHAR(ILAST)*BASE**SCAL(ILAST)
C
CALL MA01BD( BASE, LGBAS, K, S, A(ILAST,ILAST,1),
$ LDA1*LDA2, TEMP, TEMP2, I )
TEMP = TEMP*BASE**I
A1 = ABS( TEMP - W1 )
A2 = ABS( TEMP - W2 )
C
IF ( COUNTE.LE.MAX( 1, MCOUNT/2 ) ) THEN
SHFT = 'D'
IF ( A1.LT.A2 ) THEN
W2 = W1
ELSE
W1 = W2
END IF
ELSE
SHFT = 'S'
IF ( A1.LT.A2 )
$ W2 = W1
END IF
C
END IF
C
C Compute an initial transformation using the selected
C shifts.
C
CALL MB03AB( SHFT, K, ILAST-IFIRST+1, IWORK(MAPA+1), S,
$ SINV, A(IFIRST,IFIRST,1), LDA1, LDA2, W1,
$ W2, CS1, SN1, CS2, SN2 )
END IF
C
IF ( COUNT+COUNTE.GE.NITER+MCOUNT ) THEN
C
C Reset the two counters.
C
COUNT = 0
COUNTE = 0
END IF
END IF
C
C Do the sweeps.
C
IF ( K.GT.1 ) THEN
C
C The propagation of the initial transformation is processed
C here separately.
C
IN = IFIRST + 1
IO = ILAST - 2
J = IFIRST
AIND = IWORK(MAPA+1)
CALL DROT( ILAST-IFRSTM+1, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
CALL DROT( ILAST-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+2)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+2)) )
END IF
IF ( QI.NE.0 ) THEN
CALL DROT( N, Q(1,J+1,QI), 1, Q(1,J+2,QI), 1, CS2, SN2 )
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
END IF
C
C Propagate information from the right to A_k.
C
DO 480 L = 2, K
AIND = IWORK(MAPA+L)
IF ( S(AIND).EQ.SINV ) THEN
CALL DROT( ILASTM-J+1, A(J+1,J,AIND), LDA1,
$ A(J+2,J,AIND), LDA1, CS2, SN2 )
TEMP = A(J+2,J+2,AIND)
CALL DLARTG( TEMP, -A(J+2,J+1,AIND), CS2, SN2,
$ A(J+2,J+2,AIND) )
A(J+2,J+1,AIND) = ZERO
CALL DROT( J-IFRSTM+2, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
C
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS1, SN1,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
C
ELSE
C
CALL DROT( J+3-IFRSTM, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, A(J+2,J+1,AIND), CS2, SN2,
$ A(J+1,J+1,AIND) )
A(J+2,J+1,AIND) = ZERO
CALL DROT( ILASTM-J-1, A(J+1,J+2,AIND), LDA1,
$ A(J+2,J+2,AIND), LDA1, CS2, SN2 )
C
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS1, SN1,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS1, SN1 )
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+MOD(L,K)+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+MOD(L,K)+1)) )
END IF
IF ( QI.NE.0 ) THEN
CALL DROT( N, Q(1,J+1,QI), 1, Q(1,J+2,QI), 1, CS2,
$ SN2 )
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
END IF
480 CONTINUE
C
AIND = IWORK(MAPA+1)
CALL DROT( ILASTM-IFIRST+1, A(J+1,IFIRST,AIND), LDA1,
$ A(J+2,IFIRST,AIND), LDA1, CS2, SN2 )
CALL DROT( ILASTM-IFIRST+1, A(J,IFIRST,AIND), LDA1,
$ A(J+1,IFIRST,AIND), LDA1, CS1, SN1 )
ELSE
IN = IFIRST - 1
IO = ILAST - 3
END IF
C
DO 500 J1 = IN, IO
AIND = IWORK(MAPA+1)
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
C
C Create a bulge if J1 = IFIRST - 1, otherwise chase the
C bulge.
C
IF ( J1.LT.IFIRST ) THEN
J = J1 + 1
CALL DROT( ILASTM-J+1, A(J+1,J,AIND), LDA1,
$ A(J+2,J,AIND), LDA1, CS2, SN2 )
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
ELSE
IF ( K.EQ.1 ) THEN
J = J + 1
ELSE
J = J1
END IF
TEMP = A(J+1,J-1,AIND)
CALL DLARTG( TEMP, A(J+2,J-1,AIND), CS2, SN2,
$ TEMP2 )
TEMP = A(J,J-1,AIND)
CALL DLARTG( TEMP, TEMP2, CS1, SN1, A(J,J-1,AIND) )
A(J+1,J-1,AIND) = ZERO
A(J+2,J-1,AIND) = ZERO
CALL DROT( ILASTM-J+1, A(J+1,J,AIND), LDA1,
$ A(J+2,J,AIND), LDA1, CS2, SN2 )
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
END IF
IF ( QI.NE.0 ) THEN
CALL DROT( N, Q(1,J+1,QI), 1, Q(1,J+2,QI), 1, CS2, SN2 )
CALL DROT( N, Q(1,J, QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
END IF
C
C Propagate information from the right to A_1.
C
DO 490 L = K, 2, -1
AIND = IWORK(MAPA+L)
IF ( S(AIND).EQ.SINV ) THEN
CALL DROT( J+3-IFRSTM, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, A(J+2,J+1,AIND), CS2, SN2,
$ A(J+1,J+1,AIND) )
A(J+2,J+1,AIND) = ZERO
CALL DROT( ILASTM-J-1, A(J+1,J+2,AIND), LDA1,
$ A(J+2,J+2,AIND), LDA1, CS2, SN2 )
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS1, SN1,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS1, SN1 )
ELSE
CALL DROT( ILASTM-J+1, A(J+1,J,AIND), LDA1,
$ A(J+2,J,AIND), LDA1, CS2, SN2 )
TEMP = A(J+2,J+2,AIND)
CALL DLARTG( TEMP, -A(J+2,J+1,AIND), CS2, SN2,
$ A(J+2,J+2,AIND) )
A(J+2,J+1,AIND) = ZERO
CALL DROT( J+2-IFRSTM, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS1, SN1,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J+1-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 ) THEN
CALL DROT( N, Q(1,J+1,QI), 1, Q(1,J+2,QI), 1, CS2,
$ SN2 )
CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
END IF
490 CONTINUE
AIND = IWORK(MAPA+1)
LM = MIN( J+3, ILASTM ) - IFRSTM + 1
CALL DROT( LM, A(IFRSTM,J+1,AIND), 1,
$ A(IFRSTM,J+2,AIND), 1, CS2, SN2 )
CALL DROT( LM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
500 CONTINUE
C
C To avoid IF statements, there is an extra piece of code for
C the last step.
C
J = ILAST - 1
TEMP = A(J,J-1,AIND)
CALL DLARTG( TEMP, A(J+1,J-1,AIND), CS1, SN1, A(J,J-1,AIND) )
A(J+1,J-1,AIND) = ZERO
C
510 CONTINUE
C
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+1)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+1)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
C
C Propagate information from the right to A_1.
C
DO 520 L = K, 2, -1
AIND = IWORK(MAPA+L)
IF ( S(AIND).EQ.SINV ) THEN
CALL DROT( J+2-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
TEMP = A(J,J,AIND)
CALL DLARTG( TEMP, A(J+1,J,AIND), CS1, SN1,
$ A(J,J,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( ILASTM-J, A(J,J+1,AIND), LDA1,
$ A(J+1,J+1,AIND), LDA1, CS1, SN1 )
ELSE
CALL DROT( ILASTM-J+1, A(J,J,AIND), LDA1,
$ A(J+1,J,AIND), LDA1, CS1, SN1 )
TEMP = A(J+1,J+1,AIND)
CALL DLARTG( TEMP, -A(J+1,J,AIND), CS1, SN1,
$ A(J+1,J+1,AIND) )
A(J+1,J,AIND) = ZERO
CALL DROT( J+1-IFRSTM, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
END IF
IF ( LCMPQ ) THEN
QI = IWORK(MAPQ+L)
ELSE IF ( LPARQ ) THEN
QI = ABS( QIND(IWORK(MAPQ+L)) )
END IF
IF ( QI.NE.0 )
$ CALL DROT( N, Q(1,J,QI), 1, Q(1,J+1,QI), 1, CS1, SN1 )
520 CONTINUE
AIND = IWORK(MAPA+1)
CALL DROT( ILASTM-IFRSTM+1, A(IFRSTM,J,AIND), 1,
$ A(IFRSTM,J+1,AIND), 1, CS1, SN1 )
C
C End of iteration loop.
C
530 CONTINUE
540 CONTINUE
C
C Drop through = non-convergence.
C
INFO = ILAST
GO TO 580
C
C Successful completion of all QZ steps.
C
550 CONTINUE
C
C Set eigenvalues 1:ILO-1.
C
DO 560 J = 1, ILO - 1
CALL MA01BD( BASE, LGBAS, K, S, A(J,J,1), LDA1*LDA2, ALPHAR(J),
$ BETA(J), SCAL(J) )
ALPHAI(J) = ZERO
560 CONTINUE
C
C Store information about the splitted 2-by-2 blocks and possible
C loss of accuracy.
C
DO 570 I = 2, N + 1
IWORK(I) = IWORK(2*K+I-1)
570 CONTINUE
C
580 CONTINUE
C
DO 590 L = K + 1, 2, -1
DWORK(PNORM+L) = DWORK(PNORM+L-1)
590 CONTINUE
C
DWORK(1) = DBLE( OPTDW )
IWORK(1) = OPTIW
C
RETURN
C *** Last line of MB03BD ***
END
|
{"hexsha": "9999bdebfeee56054d19a3cd6c1591ab93241a17", "size": 74462, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/MB03BD.f", "max_stars_repo_name": "bnavigator/SLICOT-Reference", "max_stars_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-10T23:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:34:43.000Z", "max_issues_repo_path": "src/MB03BD.f", "max_issues_repo_name": "RJHKnight/slicotr", "max_issues_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-07T22:26:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:01:07.000Z", "max_forks_repo_path": "src/MB03BD.f", "max_forks_repo_name": "RJHKnight/slicotr", "max_forks_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-26T11:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:37:21.000Z", "avg_line_length": 38.0685071575, "max_line_length": 72, "alphanum_fraction": 0.4147484623, "num_tokens": 23094}
|
function test_failed = test_blockfwt()
test_failed = 0;
disp('-------------TEST_BLOCKFWT--------------');
L = 567;
W = [1,3];
Lb = [78,64,58,1021];
wa = {'dden3','ana:symorth1'};
ws = {'dden3','syn:symorth1'};
J = [5];
for wId = 1:numel(W)
for lId = 1:numel(L)
f = tester_rand(L(lId),W(wId));
for lbId = 1:numel(Lb)
for waId=1:numel(wa)
Fa = blockframeaccel(frame('fwt',wa{waId},J),Lb(lbId),'segola');
Fs = blockframeaccel(frame('fwt',ws{waId},J),Lb(lbId),'segola');
a = Fa.g.a(1);
m = numel(Fa.g.g{1}.h);
rmax = (a^J-1)/(a-1)*(m-1);
f = postpad(f,L(lId)+rmax);
block(f,'offline','L',Lb(lbId));
colC = {};
colfhat = {};
for ii=1:ceil(L(lId)/Lb(lbId))
fb = blockread();
c = blockana(Fa,fb);
ccell = comp_fwtpack2cell(Fa,c);
colC{end+1} = ccell;
chat = cell2mat(ccell);
fhat = blocksyn(Fs,chat,size(fb,1));
colfhat{end+1} = fhat;
end
err = 0;
cwhole = fwt(f,wa{waId},J,'zero','cell');
for ii=1:numel(colC{1})
cc{ii} = cell2mat(cellfun(@(cEl) cEl{ii},colC','UniformOutput',0));
Ltmp = min([size(cwhole{ii},1),size(cc{ii},1)]);
err = err + norm(cwhole{ii}(1:Ltmp,:)-cc{ii}(1:Ltmp,:));
end
[test_failed,fail]=ltfatdiditfail(err,test_failed);
fprintf('COEFS L:%3i, W:%3i, Lb=%3i, %s, err=%.4e %s\n',L(lId),W(wId),Lb(lbId),wa{waId},err,fail);
fhat = cell2mat(colfhat.');
fhat = fhat(rmax+1:end,:);
Lcrop = min([size(fhat,1),size(f,1)]);
res = norm([f(1:Lcrop,:)-fhat(1:Lcrop,:)]);
[test_failed,fail]=ltfatdiditfail(res,test_failed);
fprintf('REC L:%3i, W:%3i, Lb=%3i, %s, err=%.4e %s\n',L(lId),W(wId),Lb(lbId),wa{waId},res,fail);
end
end
end
end
|
{"author": "ltfat", "repo": "ltfat", "sha": "4496a06ad8dddb85cd2e007216b765dc996ef327", "save_path": "github-repos/MATLAB/ltfat-ltfat", "path": "github-repos/MATLAB/ltfat-ltfat/ltfat-4496a06ad8dddb85cd2e007216b765dc996ef327/testing/test_blockfwt.m"}
|
#!/usr/bin/env python
from os.path import join
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4 as nc4
from e3sm_case_output import day_str, time_str
NUM_DAYS = 1
TIME_STEP = 1800
assert 86400 % TIME_STEP == 0, "cannot fit even number of time steps in day"
times_per_day = 86400 // TIME_STEP
CASE_NAMES = [
"timestep_ctrl",
# "timestep_MG2_10s",
"timestep_CLUBB_10s_MG2_10s",
"timestep_CLUBB_MG2_10s",
# "timestep_CLUBB_MG2_10s_ftype1",
"timestep_all_10s",
# "timestep_dyn_10s",
# "timestep_presaer_ctrl",
# "timestep_presaer_CLUBB_MG2_10s",
# "timestep_presaer_CLUBB_MG2_10s_ZM_10s",
# "timestep_presaer_cld_10s",
# "timestep_presaer_cld_10s_ftype1",
# "timestep_presaer_all_10s",
]
SHORT_CASE_NAMES = [
"CTRL",
# "MICRO10",
"CLUBB10MICRO10",
"CLUBBMICRO10",
# "CLUBBMICRO10FTYPE1",
"ALL10",
# "DYN10",
# "CTRLPA",
# "CLUBBMICRO10PA",
# "CLUBBMICRO10ZM10PA",
# "CLD10PA",
# "CLD10FTYPE1PA",
# "ALL10PA",
]
STYLES = {
"CTRL": ('k', '-'),
"MICRO10": ('r', '-'),
"CLUBB10MICRO10": ('maroon', '-'),
"CLUBBMICRO10": ('indigo', '-'),
"CLUBBMICRO10FTYPE1": ('indigo', ':'),
"ALL10": ('dimgrey', '-'),
"DYN10": ('y', '-'),
"CTRLPA": ('k', '-'),
"CLUBBMICRO10PA": ('indigo', '-'),
"CLUBBMICRO10ZM10PA": ('saddlebrown', '-'),
"CLD10PA": ('slateblue', '-'),
"CLD10FTYPE1PA": ('slateblue', ':'),
"ALL10PA": ('dimgrey', '-'),
}
OUTPUT_DIRS = ["/p/lustre2/santos36/ACME/{}/run/".format(case)
for case in CASE_NAMES]
suffix = ""
log_file = open("plot_water_budget_col_log{}.txt".format(suffix), 'w')
out_file_template = "{}.cam.h0.0001-01-{}-{}.nc"
def get_out_file_name(icase, day, time):
"""Given a case index, day, and time, return CAM header file name."""
return join(OUTPUT_DIRS[icase],
out_file_template.format(CASE_NAMES[icase],
day_str(day), time_str(time)))
first_file_name = get_out_file_name(0, 1, 0)
first_file = nc4.Dataset(first_file_name, 'r')
ncol = len(first_file.dimensions['ncol'])
nlev = len(first_file.dimensions['lev'])
lat = first_file['lat'][:]
lon = first_file['lon'][:]
lev = first_file['lev'][:]
ilev = first_file['ilev'][:]
# Find columns in box over South America.
min_lat = -20.
max_lat = 10.
min_lon = 280.
max_lon = 315.
column_set = set()
for i in range(ncol):
if min_lon <= lon[i] <= max_lon and min_lat <= lat[i] <= max_lat:
column_set.add(i)
first_file.close()
ncol_sa = len(column_set)
column_list = sorted(list(column_set))
# Max diff in CLDLIQ at surface for CLUBBMICRO10
#ifocus = 28970
# Max CLDLIQ at surface for CLUBBMICRO10
#ifocus = 27898
# Max precipitation in CTRL
#ifocus = 29215
# Max precipitation in CLUBBMICRO10 and CLUBBMICRO10PA
ifocus = 29488
# Max precipitation in ALL10
#ifocus = 29227
# Large oscillations found here:
#ifocus = 3913
#ifocus = -1
if ifocus == -1:
# Look at precip in a particular run.
print("Searching for largest average precipitation.", file=log_file)
itest = 1
precl_total = np.zeros((ncol_sa,))
for day in range(1, NUM_DAYS+1):
for it in range(times_per_day):
test_file_name = get_out_file_name(itest, day, it*TIME_STEP)
test_file = nc4.Dataset(test_file_name, 'r')
for icol in range(ncol_sa):
precl_total[icol] += test_file['PRECL'][0,column_list[icol]]
test_file.close()
test_file_name = get_out_file_name(itest, NUM_DAYS+1, 0)
test_file = nc4.Dataset(test_file_name, 'r')
for icol in range(ncol_sa):
precl_total[icol] += test_file['PRECL'][0,column_list[icol]]
test_file.close()
precl_max = 0.
for icol in range(ncol_sa):
if precl_max < precl_total[icol]:
precl_max = precl_total[icol]
ifocus = column_list[icol]
assert ifocus != -1, "Cloud liquid max difference not found!"
print("Difference maximized at column ", ifocus, " at lat = ",
lat[ifocus], ", lon = ", lon[ifocus], file=log_file)
first_file_name = get_out_file_name(0, 1, 0)
first_file = nc4.Dataset(first_file_name, 'r')
p0 = first_file['P0']
ps = first_file['PS'][0,ifocus]
hyam = first_file['hyam'][:]
hybm = first_file['hybm'][:]
p = hyam * p0 + hybm * ps
first_file.close()
variables = [
{'name': 'RELHUM', 'units': r'%', 'ndim': 2},
{'name': 'CLDLIQ', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'CLDICE', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'RAINQM', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'CMELIQ', 'units': r'$g/kg/d$', 'ndim': 2, 'scale': 86.4e6},
{'name': 'PRAO', 'units': r'$g/kg/d$', 'ndim': 2, 'scale': 86.4e6},
{'name': 'PRCO', 'units': r'$g/kg/d$', 'ndim': 2, 'scale': 86.4e6},
{'name': 'QCSEDTEN', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'QRSEDTEN', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'EVAPPREC', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'T', 'units': r'$K$', 'ndim': 2},
{'name': 'Q', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'U', 'units': r'$m/s$', 'ndim': 2},
{'name': 'V', 'units': r'$m/s$', 'ndim': 2},
{'name': 'OMEGA', 'units': r'$Pa/s$', 'ndim': 2},
{'name': 'CLOUD', 'units': r'fraction', 'ndim': 2},
{'name': 'DPDLFLIQ', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'DPDLFICE', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'QRL', 'units': r'$K/s$', 'ndim': 2},
{'name': 'QRS', 'units': r'$K/s$', 'ndim': 2},
{'name': 'Z3', 'units': r'$m$', 'ndim': 2},
{'name': 'QCSEVAP', 'units': r'$g/kg/d$', 'ndim': 2, 'scale': 86.4e6},
{'name': 'QISEVAP', 'units': r'$g/kg/d$', 'ndim': 2, 'scale': 86.4e6},
]
def calc_rho(t):
rho = np.zeros(t.shape)
ntimes = t.shape[1]
for i in range(ntimes):
rho[:,i] = p / (287.058 * t[:,i])
return rho
derived_variables = [
{'name': 'LWC', 'units': r'$g/m^3$', 'ndim': 2,
'depends': ['CLDLIQ', 'T'],
'calc': (lambda var_dict: var_dict['CLDLIQ'] * calc_rho(var_dict['T'])),
},
{'name': 'IWC', 'units': r'$g/m^3$', 'ndim': 2,
'depends': ['CLDICE', 'T'],
'calc': (lambda var_dict: var_dict['CLDICE'] * calc_rho(var_dict['T'])),
},
{'name': 'RAINPROD', 'units': r'$g/kg/d$', 'ndim': 2,
'depends': ['PRCO', 'PRAO'],
'calc': (lambda var_dict: var_dict['PRCO'] + var_dict['PRAO']),
},
{'name': 'WINDSPEED', 'units': r'$m/s$', 'ndim': 2,
'depends': ['U', 'V'],
'calc': (lambda var_dict: np.sqrt(var_dict['U']**2 + var_dict['V']**2)),}
]
# Check that dependencies are satisfied.
var_names = [var['name'] for var in variables]
for derived in derived_variables:
for depend in derived['depends']:
assert depend in var_names
ncases = len(CASE_NAMES)
ntimes = NUM_DAYS * times_per_day + 1
out_vars = {}
for icase in range(ncases):
case = SHORT_CASE_NAMES[icase]
print("Processing case ", case)
out_vars[case] = {}
for var in variables:
out_vars[case][var['name']] = np.zeros((nlev, ntimes))
ita = 0
for day in range(1, NUM_DAYS+1):
for it in range(times_per_day):
out_file_name = get_out_file_name(icase, day, it*TIME_STEP)
out_file = nc4.Dataset(out_file_name, 'r')
for var in variables:
varname = var['name']
ndim = var['ndim']
if ndim == 2:
out_vars[case][varname][:,ita] = out_file[varname][0,:,ifocus]
else:
assert False, \
"don't know what to do with ndim={}".format(ndim)
out_file.close()
ita += 1
# Last file is 0-th time of the next day.
out_file_name = get_out_file_name(icase, NUM_DAYS+1, 0)
out_file = nc4.Dataset(out_file_name, 'r')
for var in variables:
varname = var['name']
ndim = var['ndim']
if ndim == 2:
out_vars[case][varname][:,ita] = out_file[varname][0,:,ifocus]
else:
assert False, \
"don't know what to do with ndim={}".format(ndim)
out_file.close()
# Scale variables
for var in variables:
if 'scale' in var:
out_vars[case][var['name']] *= var['scale']
# Calculate derived variables
for derived in derived_variables:
out_vars[case][derived['name']] = derived['calc'](out_vars[case])
PLOT_TOP = 700.
itop = 0
for level in ilev:
if level > PLOT_TOP:
break
itop += 1
plot_ilev = ilev[itop:]
# Assumes Venezuelan time.
TIME_OFFSET = 4.
times = np.linspace(0., TIME_STEP*(ntimes - 1) / 3600., ntimes) - TIME_OFFSET
for var in variables + derived_variables:
name = var['name']
clim_val = [1.e36, -1.e36]
for icase in range(ncases):
clim_val[0] = min(clim_val[0], out_vars[case][name][itop:,1:].min())
clim_val[1] = max(clim_val[1], out_vars[case][name][itop:,1:].max())
for icase in range(ncases):
case = SHORT_CASE_NAMES[icase]
plt.pcolor(times, plot_ilev, out_vars[case][name][itop:,1:])
plt.axis('tight')
plt.xlabel("Time (hr)")
# Bad hard-coding!
if NUM_DAYS == 1:
plt.xticks(np.linspace(-3., 18., 8),
["2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800"])
elif NUM_DAYS == 2:
plt.xticks(np.linspace(-3., 42., 16),
["2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800",
"2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800"])
plt.grid(True, axis='x')
ax = plt.gca()
ylim = ax.get_ylim()
ax.set_ylim([ylim[1], ylim[0]])
plt.ylabel("Pressure (hPa)")
plt.colorbar()
plt.clim(clim_val[0], clim_val[1])
plt.savefig("{}_{}_time_col{}.png".format(name, case, suffix))
plt.close()
htime = (15 * 3600) // TIME_STEP
for icase in range(ncases):
case = SHORT_CASE_NAMES[icase]
hodo_winds = np.zeros((2, 11))
next_km = 1
base_z3 = out_vars[case]['Z3'][nlev-1,htime]
for jlev in range(nlev-1, -1, -1):
this_z3 = out_vars[case]['Z3'][jlev,htime]
next_z3 = out_vars[case]['Z3'][jlev-1,htime]
if next_z3 > (1000.*next_km + base_z3):
weight = (1000.*next_km + base_z3 - this_z3) / (next_z3 - this_z3)
u = out_vars[case]['U'][jlev-1,htime]*weight + \
out_vars[case]['U'][jlev,htime]*(1.-weight)
v = out_vars[case]['V'][jlev-1,htime]*weight + \
out_vars[case]['V'][jlev,htime]*(1.-weight)
hodo_winds[0,next_km] = u
hodo_winds[1,next_km] = v
next_km += 1
if next_km == 11:
break
plt.plot(hodo_winds[0,:], hodo_winds[1,:])
plt.savefig("hodo_{}{}.png".format(case, suffix))
plt.close()
log_file.close()
|
{"hexsha": "4a399b277cb6fc9b92ecbaa0da21903e865fa34d", "size": 10977, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_water_budget_col.py", "max_stars_repo_name": "quantheory/E3SMTimestepStudy", "max_stars_repo_head_hexsha": "63b5517c2a92dfae7af0cd7aa0eb39058967ed06", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot_water_budget_col.py", "max_issues_repo_name": "quantheory/E3SMTimestepStudy", "max_issues_repo_head_hexsha": "63b5517c2a92dfae7af0cd7aa0eb39058967ed06", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_water_budget_col.py", "max_forks_repo_name": "quantheory/E3SMTimestepStudy", "max_forks_repo_head_hexsha": "63b5517c2a92dfae7af0cd7aa0eb39058967ed06", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6717791411, "max_line_length": 88, "alphanum_fraction": 0.5718320124, "include": true, "reason": "import numpy", "num_tokens": 3648}
|
import numpy as np
import itertools
from scipy.interpolate import griddata
from .util import Envelope, norm_array
from typing import List, Tuple
_t_DEM = List[Tuple[float, float, float]]
class DEMObject:
_dem: _t_DEM = None
def __init__(self, dem: _t_DEM):
self._dem = np.asarray(dem)
def __repr__(self):
return f"<DEMObject @ {len(self._dem)}>"
def _axis(self, bound: Envelope, resolution: float):
sx, sy = bound.west(), bound.south()
ex, ey = bound.east(), bound.north()
x_axis = np.arange(sx, ex+resolution, resolution)
y_axis = np.arange(sy, ey+resolution, resolution)
return x_axis, y_axis
# 특정 해상도 (meter) 단위로 elevation grid 생성
def grid(self, bound: Envelope, resolution: float=10.0):
DEM = self._dem
x_axis, y_axis = self._axis(bound, resolution)
X, Y = np.meshgrid(x_axis, y_axis)
Q = np.vstack([X.ravel(), Y.ravel()]).T
# print(Q)
Z = griddata((DEM[:, 0], DEM[:, 1]), DEM[:, 2], (Q[:, 0], Q[:, 1]), method='linear', fill_value=0)
return np.vstack([Q[:, 0], Q[:, 1], Z]).T
def slope(self, bound: Envelope, resolution: float=10.0):
x_axis, y_axis = self._axis(bound, resolution)
grid = self.grid(bound, resolution)
height = grid[:,2]
height = height.reshape((len(y_axis), len(x_axis)))
grad_y, grad_x = np.gradient(height)
vec_x = np.asarray([[resolution, 0.0, x] for x in list(itertools.chain(*grad_x))])
vec_x = np.asarray(list(norm_array(vec_x)))
vec_y = np.asarray([[0.0, resolution, y] for y in list(itertools.chain(*grad_y))])
vec_y = np.asarray(list(norm_array(vec_y)))
slope = np.cross(vec_x, vec_y)
slope = np.asarray(list(norm_array(slope)))
return np.hstack([grid, slope])
|
{"hexsha": "b60fde08b0cd123c28e187415c23bea7a5cc7d6e", "size": 1848, "ext": "py", "lang": "Python", "max_stars_repo_path": "demutil/processor.py", "max_stars_repo_name": "BetaS/pydemutil", "max_stars_repo_head_hexsha": "c4b5cc5c21e09c9513cdd3836f89e307a05ca55d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demutil/processor.py", "max_issues_repo_name": "BetaS/pydemutil", "max_issues_repo_head_hexsha": "c4b5cc5c21e09c9513cdd3836f89e307a05ca55d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demutil/processor.py", "max_forks_repo_name": "BetaS/pydemutil", "max_forks_repo_head_hexsha": "c4b5cc5c21e09c9513cdd3836f89e307a05ca55d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8064516129, "max_line_length": 106, "alphanum_fraction": 0.6071428571, "include": true, "reason": "import numpy,from scipy", "num_tokens": 524}
|
# Copyright 2021 The XMC-GAN Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
def hinge_loss_g(fake_logit: np.ndarray) -> np.ndarray:
return -np.mean(fake_logit)
def hinge_loss_d(real_logit: np.ndarray,
fake_logit: np.ndarray) -> np.ndarray:
real_loss = np.mean(torch.nn.relu(1.0 - real_logit))
fake_loss = np.mean(torch.nn.relu(1.0 + fake_logit))
return real_loss + fake_loss
def hinge_loss(real_logit: np.ndarray, fake_logit: np.ndarray) -> np.ndarray:
generator_loss = -np.mean(fake_logit)
real_loss = torch.nn.relu(1.0 - real_logit)
fake_loss = torch.nn.relu(1.0 + fake_logit)
discriminator_loss = np.mean(real_loss + fake_loss)
return discriminator_loss, generator_loss
def cross_entropy_loss_with_logits(*, labels: np.ndarray,
logits: np.ndarray) -> np.ndarray:
"""Calculates the cross entropy loss: label is one dimensional, not one hot."""
logp = torch.nn.LogSoftmax(logits)
loglik = np.take_along_axis(logp, labels[:, None], axis=1)
return -loglik
def tf_cross_entropy_loss_with_logits(*, labels: np.ndarray,
logits: np.ndarray) -> np.ndarray:
logp = torch.nn.LogSoftmax(logits)
loss = - np.sum(np.multiply(labels, logp), axis=-1)
return loss
|
{"hexsha": "9741457ce47f02cb4d54fe9786defc20fab05000", "size": 1899, "ext": "py", "lang": "Python", "max_stars_repo_path": "attn_loss/losses.py", "max_stars_repo_name": "StolasIn/Lafite", "max_stars_repo_head_hexsha": "a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "attn_loss/losses.py", "max_issues_repo_name": "StolasIn/Lafite", "max_issues_repo_head_hexsha": "a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "attn_loss/losses.py", "max_forks_repo_name": "StolasIn/Lafite", "max_forks_repo_head_hexsha": "a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8301886792, "max_line_length": 82, "alphanum_fraction": 0.6882569774, "include": true, "reason": "import numpy", "num_tokens": 454}
|
[STATEMENT]
lemma ffd_fbd_conjugation: "(fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
have "(fd\<^sub>\<F> f X \<inter> Y = {}) = (fd\<^sub>\<F> f X \<subseteq> -Y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (fd\<^sub>\<F> f X \<subseteq> - Y)
[PROOF STEP]
by (simp add: disjoint_eq_subset_Compl)
[PROOF STATE]
proof (state)
this:
(fd\<^sub>\<F> f X \<inter> Y = {}) = (fd\<^sub>\<F> f X \<subseteq> - Y)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(fd\<^sub>\<F> f X \<inter> Y = {}) = (fd\<^sub>\<F> f X \<subseteq> - Y)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
have "... = (X \<subseteq> bb\<^sub>\<F> f (-Y))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<subseteq> - Y) = (X \<subseteq> bb\<^sub>\<F> f (- Y))
[PROOF STEP]
by (simp add: ffd_fbb_galois)
[PROOF STATE]
proof (state)
this:
(fd\<^sub>\<F> f X \<subseteq> - Y) = (X \<subseteq> bb\<^sub>\<F> f (- Y))
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(fd\<^sub>\<F> f X \<subseteq> - Y) = (X \<subseteq> bb\<^sub>\<F> f (- Y))
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
have "... = (X \<inter> - bb\<^sub>\<F> f (-Y) = {})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X \<subseteq> bb\<^sub>\<F> f (- Y)) = (X \<inter> - bb\<^sub>\<F> f (- Y) = {})
[PROOF STEP]
by (simp add: disjoint_eq_subset_Compl)
[PROOF STATE]
proof (state)
this:
(X \<subseteq> bb\<^sub>\<F> f (- Y)) = (X \<inter> - bb\<^sub>\<F> f (- Y) = {})
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(X \<subseteq> bb\<^sub>\<F> f (- Y)) = (X \<inter> - bb\<^sub>\<F> f (- Y) = {})
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
have "... = (X \<inter> \<partial> (bb\<^sub>\<F> f (\<partial> Y)) = {})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X \<inter> - bb\<^sub>\<F> f (- Y) = {}) = (X \<inter> \<partial> (bb\<^sub>\<F> f (\<partial> Y)) = {})
[PROOF STEP]
by (simp add: dual_set_def)
[PROOF STATE]
proof (state)
this:
(X \<inter> - bb\<^sub>\<F> f (- Y) = {}) = (X \<inter> \<partial> (bb\<^sub>\<F> f (\<partial> Y)) = {})
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> \<partial> (bb\<^sub>\<F> f (\<partial> Y)) = {})
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> \<partial> (bb\<^sub>\<F> f (\<partial> Y)) = {})
goal (1 subgoal):
1. (fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
[PROOF STEP]
by (metis (no_types, opaque_lifting) comp_apply fbb_fbd_demorgan invol_dual_var)
[PROOF STATE]
proof (state)
this:
(fd\<^sub>\<F> f X \<inter> Y = {}) = (X \<inter> bd\<^sub>\<F> f Y = {})
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1664, "file": "Transformer_Semantics_Kleisli_Transformers", "length": 16}
|
% ==============================================================================
%
% F P G A
%
% ==============================================================================
\chapter{FPGA} % ------------------------------------------------------------- %
\label{ch:fpga}
% ---------------------------------------------------------------------------- %
% ==============================================================================
%
% O V E R V I E W
%
% ==============================================================================
%<<<
This chapter first presents a rough outline of the FPGA toolchain, and then
provides more specific information on each of the three FPGA subsystems in
Sections~\ref{sec:fpga:adc}, \ref{sec:fpga:logger} and~\ref{sec:fpga:chains},
respectively. Those subsystems are the ADC control logic, a data acquisition
core which writes data to RAM and is responsible for triggering, and the
filter chains that connect the two. Figure~\ref{fig:fpga:structure} shows a
schematic of how these components fit together, and how they are related to
the overall STEMlab system.
\begin{figure}
\centering
\input{images/fpga/system-overview.tikz}
\caption[System Schematic]{%
Schematic of the STEMlab with the three main FPGA subsystems
highlighted in yellow.%
}
\label{fig:fpga:structure}
\end{figure}
%>>>
% ==============================================================================
%
% T O O L C H A I N
%
% ==============================================================================
\section{The Xilinx Toolchain} % <<< ----------------------------------------- %
\label{sec:fpga:toolchain}
% ---------------------------------------------------------------------------- %
The bitstream is compiled using Vivado, Xilinx's own IDE, a tool that can do
everything around Xilinx FPGAs. It does the crucial parts right and can be
interfaced with using Tcl. This is very convenient, as a project can be
replicated idempotently\footnote{%
Idempotence [\ldots] is the property of certain operations in mathematics
and computer science, that can be applied multiple times without changing
the result beyond the initial application~\cite{wiki:idempotence}.%
}%
whenever a rebuild is needed.
Whilst Vivado offers a GUI to build block designs, this process can be a bit
frustrating to the user due to various ``eccentricities'' of the application.
Therefore, we choose to use its Tcl API
to write scripts that create a new project and apply and connect all necessary
blocks. This avoids a lot of errors as a bug in Vivado's user interface won't tamper
with the project.
It also enables us to use version control tools for the project as Vivado
projects create a lot of files which often clash in very simple
versioning operations. Using Tcl scripts which create and configure the project,
and leaving everything else out of the repository, avoids this hassle.
Tcl also allows the creation of sub-blocks: One can group blocks together and
insert them multiple times with little effort; a feature which Vivado's
graphical front-end apparently does not offer.
More on the Tcl API and Tcl itself can be found
in~\cite{xilinx:vivado-tcl-command-reference-guide},
\cite{xilinx:vivado-design-suit-user-guide:using-tcl-scripting},
and~\cite{tcl-exchange}.
The final advantage of the Tcl API to be mentioned here is that it allows the
creation of the entire project, the block design, perform the synthesis and
implementation, build a bitstream, as well as the board support package and
first stage bootloader, all in a single sequence of automated tasks without
the need for manual intervention. Since this tends to take quite a lot of
time, that is a significant advantage.
%>>>
% ==============================================================================
%
% A D C C O R E
%
% ==============================================================================
\section{The ADC Core} % <<< ------------------------------------------------- %
\label{sec:fpga:adc}
% ---------------------------------------------------------------------------- %
The ADC core is a simple piece of logic that interfaces with the FPGA pins
which are connected to the STEMlab's ADC. It reads the ADC's unsigned
\SI{14}{\bit} values and converts them to \SI{16}{\bit} signed format by
adding an offset of $2^{13}$ and performing a \SI{2}{\bit} sign extension. The
resulting numbers are then provided over an AXI Stream bus interface, which is
also used by all the filters. The core is used from the git repository
provided by Pavel Demin~\cite{pita:github:pitaya-notes} More on his repository
and project can be read in Section~\ref{subsec:concept:fpga_components}.
%>>>
% ==============================================================================
%
% L O G G E R C O R E
%
% ==============================================================================
\section{The Logger Core} % <<< ---------------------------------------------- %
\label{sec:fpga:logger}
% ---------------------------------------------------------------------------- %
The logger core (logger in further text) is a piece of VHDL code that stores
samples it gets from a source into a ringbuffer in the RAM. It is packaged as
a Vivado IP core and can be seamlessly integrated into the project. The
logger originated from an earlier project~\cite{huess-schnid}. In addition to
logging data to RAM, it can also be programmed with various triggers. It reads
instructions from a BRAM on the FPGA and iterates over them. Having reached
the last one, it issues an IRQ signal, signaling the end of the transcription.
The logger's original implementation features eight channels with a width of
\SI{14}{\bit}, padded to \SI{16}{\bit} in order to simplify data transmission
in byte-sized chunks. Each two channels require one clock cycle to store
a sample.
In order to take advantage of the fact that additional bits can be ``won'' by
oversampling a signal, this projects implements a new configuration, which can
process full \SI{16}{\bit} values, a gain in two bits over the ADC's output.
The penalty for this is one additional clock cycle of delay, since the adders
and comparators cannot match the timing requirements with two additional
carries. Since this project aims to optimize for lower-frequency signals, the
resulting additional delay of \SI{8}{\nano\second} is acceptable and will not
be an issue in practice. The logger core comes with a kernel module that
provides a convenient interface from the ARM core. This avoids having to
manually program the logger core.
%>>>
% ==============================================================================
%
% F I L T E R C H A I N S
%
% ==============================================================================
\section{The Filter Chains} % <<< -------------------------------------------- %
\label{sec:fpga:chains}
% ---------------------------------------------------------------------------- %
% ==============================================================================
%
% O V E R V I E W
%
% ==============================================================================
%<<<
The filter chains are the most crucial part of the project and also the most
delicate one as simple mistakes can cost several decibels of SNR and create a
worse signal at the filter chain's output than at its input, instead
of an improved one. The logical structure of the chains can be seen
in Figure~\ref{fig:fdesign:chain_concept} and the rationale behind it is
explained in the respective Section~\ref{sec:fdesign:filter_specifications}.
For the detailed implementation, it is advised to look at the project in
Vivado itself, as the block design is impractically large to be put onto
paper, thus it is omitted in this report.
This section first gives a few notes on the two most important building blocks
in the FPGA design: The CIC and FIR compilers by Xilinx. After that, two key
points which are particularly challenging when implementing filter chains are
elaborated upon: Propagating the correct bits through the cascade, and
adjusting the gain correctly in order to exploit the available bits for
maximum dynamic range.
%>>>
% ==============================================================================
%
% F I L T E R C O M P I L E R S
%
% ==============================================================================
\subsection{Filter Compilers} % <<< ------------------------------------------ %
\label{subsec:fpga:filter_compilers}
% ---------------------------------------------------------------------------- %
The basic building blocks of the filter chains are FIR and CIC filters,
which are based on ready-to-use blocks by Xilinx. Vivado's CIC and FIR
compilers natively utilize the DSP slices to a maximum extent and make it
very easy (at least in theory) to implement a Matlab-designed filter
in hardware. Those IPs are described very thoroughly in the official
documentation~\cite{xilinx:fir-compiler},~\cite{xilinx:cic-compiler}.
The FIR filter compilers are configured using a set of coefficients in
\code{double} format (exported from Matlab, or any other filter design tool of
choice). The compiler quantizes the coefficients with maximum precision using
a \SI{16}{\bit} fixed point number (this can be changed from the default but
should be left to the compiler for best results). It does so by determining
the index of the MSB required to represent the biggest coefficient in the set
using \num{16}\,bits downwards.
As an example, take the biggest coefficient to be $c_\mathrm{max} = 0.23$. The
bit at index \num{-2}\footnote{The bit at index $-n$ is the $2^{-n}$ valued
bit.} becomes the sign as its value (\num{0.25}) is not needed to display
$c_\mathrm{max}$. Thus the number is said to have \num{16}\si{bits} overall
and \num{17} fractional bits. While this might seem a bit counterintuitive at
first, it simply means that the LSB is the one at index \num{-17} and it has
\num{16}\,bits, meaning the sign is at the bit index \num{-2}.
The compiler then also takes the specified input bit configuration and
determines the required output configuration to guarantee no overflows and
achieve maximum performance. What is important here is that the output bit
width is the same that is needed to guarantee no overflows inside the
filter. This means that the user has to be aware of the maximum gain of the
designed filter and determine on their own which bits are important at the
output.
%>>>
% ==============================================================================
%
% B I T P R O P A G A T I O N
%
% ==============================================================================
\subsection{Bit Propagation Through the Filter Chains} % <<< ----------------- %
\label{subsec:fpga:bit_propagation}
% ---------------------------------------------------------------------------- %
For this application, only \SI{16}{\bit} values are stored. However, the
filters generate far wider numbers at their outputs. This means that many
bits are discarded at the output. To make sure that no important bits are
truncated, the chosen input format for the filters is \code{17.7}, resulting
in \num{24} total bits. The MSB should always remain just a sign extend of the
sign actually residing at bit \num{15}. The \num{7} fractional bits are cut
off at the end of the filter chains but are still important for more precision
so less rounding and/or truncation errors are introduced inside the filter
chain.
As values can use up significantly more than \num{24}\,bits inside the filter
due to bit growth, it has to be ensured that no overflows happen, resulting in
greater bit widths at the output of the filter. It is important that the
location of the decimal point is always tracked and remains in its right
place. Figure~\ref{fig:fpga:bitflow} depicts the flow through an example chain
but represents the general case in out block design.
\begin{figure}
\centering
\input{images/fpga/bitflow.tikz}
\caption[Bit Flow in Filter Chain]{%
The flow of the bits in a filter chain. The bits are shifted through
horizontally. Every bit that does not fit into the numerical width of
the next stage will be cut off, starting with those furthest away from
the sign. At the sign extend, the bit is replicated \num{4} times and
handed through to the next stage. The ZERO stage simply holds \num{7}
\code{'0'} bits to pad the number coming from the ADC on its lower end
before it goes into the first filter. Inside the filter the bits can
grow, so those are not shifted and cut off but rather resized towards
the output.%
}
\label{fig:fpga:bitflow}
\end{figure}
%>>>
% ==============================================================================
%
% M A X I M I Z E D Y N A M I C R A N G E
%
% ==============================================================================
\subsection{Ensuring Maximum Dynamic Range} % <<< ---------------------------- %
\label{subsec:fpga:maximize_dynamic_range}
% ---------------------------------------------------------------------------- %
\enlargethispage{2ex}
The challenge of optimally using the available dynamic range is explained here
on a simple example of a sampled sine wave coming into the filtering system.
It is important not to discard any MSBs (or signs) because otherwise the
signal will clip or, even worse, overflow and wrap around. The same applies to
the case where too few bits are cut off. If the bit count is increased by one
to guarantee no overflow inside the filter, but that bit is not set at the
output due to unity gain, it will effectively be lost as it will never be
used. This reduces the maximum theoretically achievable SNR by \SI{6}{\dB},
which is obviously highly undesirable.
To make sure neither of these faults happens, it is important to have the
highest possible filter gain at $G \leq 1$. Furthermore, at the end of each
filter chain, the additional bits must not be carried over, but rather the
initial \num{14}\,bits before the decimal point, along with two fractional
bits after the decimal point. This yields the desired \SI{16}{\bit} value and
ensure no ``empty'' bits.
The FIR compiler can avoid those empty bits by normalizing the coefficient
such that the highest gain (i.e. the top peaks of its passband ripple)
is at exactly \num{1}. This is called \emph{maximizing the dynamic
range}. Figure~\ref{fig:fpga:dynamicrange} illustrates the issue of losing one
bit.
One can observe that the sine in Case \num{1} (top plot) uses the dynamic
range to a perfect extent as the full-scale sine has an amplitude of \num{31},
the maximum value a \SI{6}{\bit} \code{int} can hold. Case \num{2} (middle
plot) shows a sine that has been scaled to \num{34} and thus requires an
additional bit. But because a \SI{7}{\bit} \code{int} can hold values up to
\num{63}, most of the time the MSB ends up not being used. With \SI{7}{\bit},
the highest $\mathrm{SNR}_\mathrm{max}$ possible would be
\begin{align}
%\mathrm{ENOB} &= \log_2(130) \nonumber\\
%\mathrm{SNR}_\mathrm{max} &= \SI{1.76}{\dB} + \mathrm{ENOB} \cdot \SI{6.02}{\dB} = 44.03
\mathrm{ENOB} &= \log_2(34) = 5.09 \nonumber \\
\mathrm{SNR}_\mathrm{max} &= \SI{1.76}{\dB} + \mathrm{ENOB} \cdot \SI{6.02}{\dB} = 32.39
\end{align}
In Case \num{3}, the dynamic range is used well and the
$\mathrm{SNR}_\mathrm{max}$ with a \SI{6}{\bit} \code{int} is
\begin{align}
%\mathrm{ENOB} &= \log_2(126) \nonumber\\
%\mathrm{SNR}_\mathrm{max} &= \SI{1.76}{\dB} + ENOB \cdot \SI{6.02}{\dB} = 43.76
\mathrm{ENOB} &= \log_2(30) = 4.91 \nonumber\\
\mathrm{SNR}_\mathrm{max} &= \SI{1.76}{\dB} + \mathrm{ENOB} \cdot \SI{6.02}{\dB} = 31.30
\end{align}
%This yields a difference of only \SI{0.27}{\dB}. This example shows that it is
This yields a difference of only \SI{1.09}{\dB}\footnote{%
While \SI{1.09}{\dB} might still seem rather large, keep in mind that with
the wider numbers running in the actual filter chains, the result is
significantly better than in this illustrative example.%
}.
This example shows that it is well advised to scale the coefficient such that
they don't ripple around \num{1}, but rather that the maximum ripple is
exactly \num{1} and not more.
Because if it can be ensured that no additional MSB is used which is empty
most of the time, it is possible use an additional LSB which is always well
used and to effectively win a bit. So in most cases when the coefficients are
designed to have a unity gain, close to \SI{6.02}{\dB} can be won by rescaling
the coefficients.
\begin{figure}
\centering
\input{images/fpga/dynamicrange.tikz}
\caption[Good Vs. Bad Use Of Dynamic Range]%
{An illustration of good and bad use of dynamic range}
\label{fig:fpga:dynamicrange}
\end{figure}
%>>>
% ==============================================================================
%
% M E A N E R R O R A N D V A R I A N C E I N C I C F I L T E R
%
% ==============================================================================
\subsection{Errors Due to Truncation in the CIC Filter} % <<< ---------------- %
\label{subsec:fpga:errors_in_cic_filter}
% ---------------------------------------------------------------------------- %
As detailed in Section~\ref{subsubsec:cic:register_growth}, the high gain of
CIC filters generally requires discarding bits at the filter's output. In our
implementation, we discard \SI{17}{\bit} at the output of the $R=25$ CIC
filter, and \SI{26}{\bit} at the output of the $R=125$ filter, in both cases
through truncation. No bits are discarded at the filter's input, and the
CIC compiler ensures that its internal widths are always sufficient for
full precision~\cite{xilinx:cic-compiler}. Therefore, only the last stage
introduces an error.
Hogenauer's formulas are used to determine the mean error and variance of both
CIC filters. Interestingly, the result is identical for both filters. But
given that the output precision is \SI{16}{\bit} in both cases, this actually
does make sense. The results are:
\begin{align}
\mu_\mathrm{CIC25} &= 0.5 \label{eq:fpga:cic:mu:cic25} \\
\sigma^2_\mathrm{CIC25} &= 0.289\label{eq:fpga:cic:sigmasq:cic25} \\
\mu_\mathrm{CIC125} &= 0.5 \label{eq:fpga:cic:mu:cic125} \\
\sigma^2_\mathrm{CIC125} &= 0.289\label{eq:fpga:cic:sigmasq:cic125}
\end{align}
The calculations are performed by a Matlab script and are therefore not
further explained.
%>>>
%>>>
%^^A vim: foldenable foldcolumn=4 foldmethod=marker foldmarker=<<<,>>>
|
{"hexsha": "a0bfd6b9838afa118092da67e44e9115a69006ac", "size": 19520, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/report/chunks/fpga.tex", "max_stars_repo_name": "alpenwasser/pitaya", "max_stars_repo_head_hexsha": "a6ced99408171ffcd96c9444adfe30d2ba699f48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-03-22T15:26:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T20:19:03.000Z", "max_issues_repo_path": "doc/report/chunks/fpga.tex", "max_issues_repo_name": "alpenwasser/pitaya", "max_issues_repo_head_hexsha": "a6ced99408171ffcd96c9444adfe30d2ba699f48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/report/chunks/fpga.tex", "max_forks_repo_name": "alpenwasser/pitaya", "max_forks_repo_head_hexsha": "a6ced99408171ffcd96c9444adfe30d2ba699f48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.7741046832, "max_line_length": 93, "alphanum_fraction": 0.6050717213, "num_tokens": 4885}
|
import copy
import numpy as np
import sys
import pandas as pd
sys.path.append('/home/robinmid/repos/hurricanes_hindcasting_remake/analysis')
sys.path.append('/home/robin/repos/hurricanes_hindcasting_remake/analysis')
from analysis.utils import get_index_list, detect_stationarity_and_offset_in_series, WORLD_REGIONS
class DataCapsule:
def __init__(self, _data: np.ndarray, _variables: np.ndarray, _regions: dict, _sectors: dict,
_lambda_axis: np.ndarray, _duration_axis: np.ndarray):
if type(_data) is not np.ma.core.MaskedArray:
raise ValueError("_data must be of type np.ndarray.")
if type(_variables) is not np.ndarray:
raise ValueError("_variables must be of type np.ndarray.")
if type(_lambda_axis) is not np.ndarray:
raise ValueError("_lambda_axis must be of type np.ndarray.")
if type(_duration_axis) is not np.ndarray:
raise ValueError("_duration_axis must be of type np.ndarray.")
self.duration_axis = _duration_axis
self.lambda_axis = _lambda_axis
self.sectors = _sectors
self.regions = _regions
self.variables = _variables
self.data = _data
self.shape = _data.shape
# noinspection DuplicatedCode
class AggrData:
def __init__(self, *args, _base_damage=None, _base_forcing=None, _scaled_scenarios=None, _slope_meta=None):
if len(args) == 1:
if type(args[0]).__name__ == "DataCapsule":
self.data_capsule = args[0]
else:
raise TypeError('Must pass argument of type DataCapsule or six arguments')
elif len(args) == 6:
self.data_capsule = DataCapsule(*args)
else:
raise TypeError('Must pass one argument of type DataCapsule or six arguments')
self.base_damage = _base_damage
self.base_forcing = _base_forcing
self.scaled_scenarios = _scaled_scenarios
self.slope_meta = _slope_meta
def get_vars(self, _vars=None):
if _vars is None:
return self.data_capsule.variables
else:
if not isinstance(_vars, (list, tuple, np.ndarray)):
_vars = [_vars]
_vars = np.array(_vars)
vars_indices = get_index_list(_vars, self.data_capsule.variables)
data_new = self.data_capsule.data[vars_indices, ...]
return AggrData(data_new, self.data_capsule.variables[vars_indices], self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def get_regions(self, _regions=None):
if _regions is None:
return self.data_capsule.regions
else:
if not isinstance(_regions, (list, tuple, np.ndarray)):
_regions = [_regions]
_regions = np.array(_regions)
regions_indices = get_index_list(_regions, list(self.data_capsule.regions.keys()))
regions_new = {}
for _region in _regions:
if _region in self.data_capsule.regions.keys():
regions_new[_region] = self.data_capsule.regions[_region]
data_new = self.data_capsule.data[:, regions_indices, ...]
return AggrData(data_new, self.data_capsule.variables, regions_new, self.data_capsule.sectors,
self.data_capsule.lambda_axis, self.data_capsule.duration_axis,
_base_damage=self.base_damage, _base_forcing=self.base_forcing,
_scaled_scenarios=self.scaled_scenarios, _slope_meta=self.slope_meta)
def get_sectors(self, _sectors=None):
if _sectors is None:
return self.data_capsule.sectors
else:
if not isinstance(_sectors, (list, tuple, np.ndarray)):
_sectors = [_sectors]
_sectors = np.array(_sectors)
sectors_indices = get_index_list(_sectors, list(self.data_capsule.sectors.keys()))
sectors_new = {}
for _sector in _sectors:
if _sector in self.data_capsule.sectors.keys():
sectors_new[_sector] = self.data_capsule.sectors[_sector]
data_new = self.data_capsule.data[:, :, sectors_indices, ...]
return AggrData(data_new, self.data_capsule.variables, self.data_capsule.regions, sectors_new,
self.data_capsule.lambda_axis, self.data_capsule.duration_axis,
_base_damage=self.base_damage, _base_forcing=self.base_forcing,
_scaled_scenarios=self.scaled_scenarios, _slope_meta=self.slope_meta)
def get_lambdavals(self, _lambdavals=None):
if _lambdavals is None:
return self.data_capsule.lambda_axis
else:
if not isinstance(_lambdavals, (list, tuple, np.ndarray)):
_lambdavals = [_lambdavals]
_lambdavals = np.array(_lambdavals)
lambda_indices = get_index_list(_lambdavals, self.data_capsule.lambda_axis)
data_new = self.data_capsule.data[..., lambda_indices, :, :]
return AggrData(data_new, self.data_capsule.variables, self.data_capsule.regions, self.data_capsule.sectors,
self.data_capsule.lambda_axis[lambda_indices],
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def get_durationvals(self, _durationvals=None):
if _durationvals is None:
return self.data_capsule.duration_axis
else:
if not isinstance(_durationvals, (list, tuple, np.ndarray)):
_durationvals = [_durationvals]
_durationvals = np.array(_durationvals)
duration_indices = get_index_list(_durationvals, self.data_capsule.duration_axis)
data_new = self.data_capsule.data[..., duration_indices, :]
return AggrData(data_new, self.data_capsule.variables, self.data_capsule.regions, self.data_capsule.sectors,
self.data_capsule.lambda_axis,
self.data_capsule.duration_axis[duration_indices], _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def get_duration_axis(self):
return self.data_capsule.duration_axis
def get_lambda_axis(self):
return self.data_capsule.lambda_axis
def clip(self, *args):
if len(args) == 1:
_from = 0
_to = args[0]
elif len(args) > 1:
_from = args[0]
_to = args[1]
else:
raise ValueError('Must pass at least one argument _from or both _from and _to')
return AggrData(self.data_capsule.data[..., _from:_to], self.data_capsule.variables, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage, _base_forcing=self.base_forcing,
_scaled_scenarios=self.scaled_scenarios, _slope_meta=self.slope_meta)
@property
def data(self):
return self.get_data()
def get_data(self):
return self.data_capsule.data
@property
def shape(self):
return self.data_capsule.data.shape
@property
def dT_stepwidth(self):
return self.data_capsule.duration_axis[1] - self.data_capsule.duration_axis[0]
@property
def re_stepwidth(self):
return self.data_capsule.lambda_axis[1] - self.data_capsule.lambda_axis[0]
def get_sim_duration(self):
return self.data_capsule.data.shape[-1]
def add_var(self, _data, _name, _inplace=False):
if _data.shape[1:-1] != self.get_data().shape[1:-1]:
raise ValueError('Must pass array same dimensions in region, sector, lambda and duration!')
if _data.shape[-1] < self.get_data().shape[-1]:
raise UserWarning('Attention. New variable is shorter than existing data. Will mask remaining time steps.')
new_dim = np.ma.masked_all((1,) + self.get_data().shape[1:])
new_dim[..., :_data.shape[-1]] = _data
data_new = np.ma.concatenate([self.get_data(), new_dim], axis=0)
vars_new = np.concatenate([self.data_capsule.variables, np.array([_name])])
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.variables = np.concatenate([self.data_capsule.variables, np.array([_name])])
else:
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def drop_var(self, _name, _inplace=False):
if _name not in self.get_vars():
print('Variable {} is not contained in data. Doing nothing.'.format(_name))
return
vars_new = self.get_vars()[self.get_vars() != _name]
data_new = self.get_vars(vars_new).get_data()
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.variables = vars_new
else:
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def drop_region(self, _name, _inplace=False):
if _name not in self.get_regions():
print('Region {} is not contained in data. Doing nothing.'.format(_name))
return
regions_new = copy.deepcopy(self.get_regions())
regions_new.pop(_name)
data_new = self.get_regions(list(regions_new.keys())).get_data()
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.regions = regions_new
else:
return AggrData(data_new, self.data_capsule.variables, regions_new,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_prices(self, vars=None, _inplace=False):
if vars == None:
vars = ['communicated_possible_production', 'consumption', 'demand', 'direct_loss',
'expected_production', 'incoming_demand', 'production', 'storage', 'total_loss']
add_vars = []
for var in vars:
if var in self.get_vars() and var + "_value" in self.get_vars() and var + "_price" not in self.get_vars():
add_vars.append(var)
if _inplace:
for var in add_vars:
self.add_var(self.get_vars(var + "_value").get_data() / self.get_vars(var).get_data(), var + "_price",
_inplace=True)
else:
data_new = np.ma.masked_all((len(add_vars),) + self.shape[1:])
vars_new = self.get_vars()
for var_idx, var in enumerate(add_vars):
data_new[var_idx, ...] = self.get_vars(var + "_value").get_data() / self.get_vars(var).get_data()
vars_new = np.concatenate([vars_new, np.array([var + "_value"])])
data_new = np.ma.concatenate([self.get_data(), data_new], axis=0)
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_eff_prod_capacity(self, _inplace=False):
if 'effective_production_capacity' in self.get_vars():
print('Variable \'effective_production_capacity\' already in data. Doing nothing.')
return
if 'production' not in self.get_vars() or 'effective_forcing' not in self.get_vars():
print(
'Variables \'production\' and \'effective_forcing\' needed to calculate production_capacity. Doing nothing.')
return
eff_prod_capacity = self.get_vars('production').get_data() / (
self.get_vars('effective_forcing').get_data() * self.get_vars('production').clip(1).get_data())
if _inplace:
self.add_var(eff_prod_capacity, 'effective_production_capacity', _inplace=True)
else:
data_new = np.ma.masked_all((1,) + self.shape[1:])
data_new[...] = eff_prod_capacity
data_new = np.ma.concatenate([self.get_data(), data_new], axis=0)
vars_new = np.concatenate([self.get_vars(), np.array(['effective_production_capacity'])])
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_eff_forcing(self, _inplace=False):
if 'effective_forcing' in self.get_vars():
print('Variable \'effective_forcing\' already in data. Doing nothing.')
return
if 'forcing' not in self.get_vars() or 'production' not in self.get_vars():
print('Variables \'forcing\' and \'production\' required to calculate effective forcing. Doing nothing.')
return
eff_forcing = np.ma.masked_all((1,) + self.shape[1:])
for r_idx, sub_regions in enumerate(self.get_regions().values()):
if len(sub_regions) > 1 and list(self.get_regions().keys())[r_idx] in sub_regions:
sub_regions.remove(list(self.get_regions().keys())[r_idx])
for s_idx, sub_sectors in enumerate(self.get_sectors().values()):
if len(sub_sectors) > 1 and list(self.get_sectors().keys())[s_idx] in sub_sectors:
sub_sectors.remove(list(self.get_sectors().keys())[s_idx])
original_forcings = self.get_regions(sub_regions).get_sectors(sub_sectors).get_vars(
'forcing').get_data()
if len(sub_regions) > 1 or len(sub_sectors) > 1:
baseline_productions = self.get_regions(sub_regions).get_sectors(sub_sectors).get_vars(
'production').clip(1).get_data()
eff_forcing[0, r_idx, s_idx, ...] = np.sum(original_forcings * baseline_productions, axis=(1, 2),
keepdims=True) / np.sum(baseline_productions,
axis=(1, 2),
keepdims=True)
else:
eff_forcing[0, r_idx, s_idx, ...] = original_forcings
if _inplace:
self.add_var(eff_forcing, 'effective_forcing', _inplace=True)
else:
return self.add_var(eff_forcing, 'effective_forcing', _inplace=False)
def calc_demand_exceedence(self, _inplace=False):
if 'demand_exceedence' in self.get_vars():
print('Variable \'demand_exceedence\' already in data. Doing nothing.')
return
if 'effective_forcing' not in self.get_vars() or 'production' not in self.get_vars() or 'incoming_demand' not in self.get_vars():
print(
'Variables \'effective_forcing\', \'production\' and \'incoming_demand\' required to calculate demand_exceedence. Doing nothing.')
return
demand_exceedence = self.get_vars('incoming_demand').get_data() / (self.get_vars(
'effective_forcing').get_data() * self.get_vars('production').clip(1).get_data()) - 1
if _inplace:
self.add_var(demand_exceedence, 'demand_exceedence', _inplace=True)
else:
data_new = np.ma.masked_all((1,) + self.shape[1:])
data_new[...] = demand_exceedence
data_new = np.ma.concatenate([self.get_data(), data_new], axis=0)
vars_new = np.concatenate([self.get_vars(), np.array(['demand_exceedence'])])
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_change_to_baseline(self, mode, _inplace=False, _aggregate=False):
print('Calculating relative change to baseline values of all variables. Make sure that t=0 is the baseline'
'state!')
baseline_reference = self.get_data()[..., 0:1]
if mode == 'relative':
data_new = self.get_data() / self.get_data()[..., 0:1]
elif mode == 'absolute':
data_new = self.get_data() - self.get_data()[..., 0:1]
if _aggregate:
data_new = data_new.sum(axis=-1, keepdims=True)
if mode == 'relative':
aggregation_time = self.get_data().shape[-1]
data_new /= aggregation_time
vars_new = np.array([v + '_{}_change'.format(mode) for v in self.data_capsule.variables])
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.variables = vars_new
else:
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_demand_production_gap(self, _inplace=False):
if 'demand_production_gap' in self.get_vars():
print('Variable \'demand_production_gap\' already in data. Doing nothing.')
return
if 'effective_forcing' not in self.get_vars() or 'production' not in self.get_vars() or 'incoming_demand' not in self.get_vars():
print(
'Variables \'effective_forcing\', \'production\' and \'incoming_demand\' required to calculate demand_production_gap. Doing nothing.')
return
demand_prod_gap = self.get_vars('incoming_demand').get_data() - self.get_vars('production').get_data()
if _inplace:
self.add_var(demand_prod_gap, 'demand_production_gap', _inplace=True)
else:
data_new = np.ma.masked_all((1,) + self.shape[1:])
data_new[...] = demand_prod_gap
data_new = np.ma.concatenate([self.get_data(), data_new], axis=0)
vars_new = np.concatenate([self.get_vars(), np.array(['demand_production_gap'])])
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_desired_overproduction_capacity(self, _inplace=False):
if 'desired_overproduction_capacity' in self.get_vars():
print('Variable \'desired_overproduction_capacity\' already in data. Doing nothing.')
return
if 'desired_production_capacity' not in self.get_vars() or 'forcing' not in self.get_vars():
print(
'Variables \'desired_production_capacity\' and \'forcing\' required to calculate desired_overproduction_capacity. Doing nothing.')
return
des_overprod_capac = self.get_vars('desired_production_capacity').get_data() - self.get_vars(
'forcing').get_data() + 1
if _inplace:
self.add_var(des_overprod_capac, 'desired_overproduction_capacity', _inplace=True)
else:
data_new = np.ma.masked_all((1,) + self.shape[1:])
data_new[...] = des_overprod_capac
data_new = np.ma.concatenate([self.get_data(), data_new], axis=0)
vars_new = np.concatenate([self.get_vars(), np.array(['desired_overproduction_capacity'])])
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def aggregate(self, _method, _clip=None, _vars=None):
if self.get_sim_duration() <= 1:
raise ValueError('can only aggregate over more than one time step.')
if _clip is None:
_clip = self.get_sim_duration()
if _vars is None:
_vars = self.get_vars()
data_sum = self.get_vars(_vars).clip(_clip).get_data().sum(axis=-1, keepdims=True)
data_baseline = self.get_vars(_vars).clip(1).get_data() * _clip
if _method == 'relative_difference':
data_new = (data_sum / data_baseline - 1) * 100
elif _method == 'absolute_difference':
data_new = data_sum - data_baseline
elif _method == 'sum':
data_new = data_sum
else:
raise ValueError('{} is not a valid aggregation method.'.format(_method))
vars_new = np.array([v + '_{}_{}'.format(_method, _clip) for v in self.data_capsule.variables])
return AggrData(data_new, vars_new, self.data_capsule.regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def calc_sector_diff(self, sec1, sec2, _inplace=False):
if sec1 not in self.get_sectors() or sec2 not in self.get_sectors():
raise ValueError("Either {} or {} could not be found in sectors.".format(sec1, sec2))
sec1_idx = np.where(np.array(list(self.get_sectors().keys())) == sec1)[0][0]
sec2_idx = np.where(np.array(list(self.get_sectors().keys())) == sec2)[0][0]
sec_data = self.data[:, :, sec1_idx:sec1_idx + 1, ...] - self.data[:, :, sec2_idx:sec2_idx + 1, ...]
data_new = np.concatenate((self.data, sec_data), axis=2)
new_sectors = copy.deepcopy(self.data_capsule.sectors)
diff_sector_list = copy.deepcopy(new_sectors[sec1])
diff_sector_list.remove(sec2)
new_sectors["{}-{}".format(sec1, sec2)] = diff_sector_list
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.sectors = new_sectors
else:
return AggrData(data_new, self.data_capsule.variables, self.data_capsule.regions,
new_sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def aggregate_regions(self, region_list, _name=None, _inplace=False):
for r in region_list:
if r not in self.get_regions():
raise ValueError("{} could not be found in regions.".format(r))
r_data = self.get_regions(region_list).data.sum(axis=1, keepdims=True)
data_new = np.concatenate((self.data, r_data), axis=1)
new_regions = copy.deepcopy(self.data_capsule.regions)
if _name is None:
_name = ''
for r in region_list:
_name += '{}+'.format(r)
_name = _name[:-1]
new_regions[_name] = region_list
if _inplace:
self.data_capsule.data = data_new
self.data_capsule.regions = new_regions
else:
return AggrData(data_new, self.data_capsule.variables, new_regions,
self.data_capsule.sectors, self.data_capsule.lambda_axis,
self.data_capsule.duration_axis, _base_damage=self.base_damage,
_base_forcing=self.base_forcing, _scaled_scenarios=self.scaled_scenarios,
_slope_meta=self.slope_meta)
def have_equal_shape(data1: AggrData, data2: AggrData):
return (np.all(data1.get_vars() == data2.get_vars()) and
np.all(data1.get_regions() == data1.get_regions()) and
np.all(data1.get_sectors() == data2.get_sectors()) and
np.all(data1.get_durationvals() == data2.get_durationvals()) and
np.all(data1.get_lambdavals() == data2.get_lambdavals()) and
np.all(data1.shape == data2.shape))
# v r s l d
def calc_dataset_stationarity_and_offset(_data: AggrData, **kwargs):
df = pd.DataFrame(columns=['variable', 'region', 'sector', 'lambda', 'duration', 'num_seq', 'from', 'to', 'offset'])
for v in _data.get_vars():
for r in _data.get_regions():
for s in _data.get_sectors():
for l in _data.get_lambda_axis():
for d in _data.get_duration_axis():
ts = _data.get_vars(v).get_regions(r).get_sectors(s).get_lambdavals(l).get_durationvals(
d).get_data().flatten()
segment_and_offset, recursion_round = detect_stationarity_and_offset_in_series(ts, **kwargs)
if len(segment_and_offset) == 0:
print("Attention. No stationary segment found for var = {}, r = {}, s = {}, l = {}, "
"d = {}".format(v, r, s, l, d))
elif recursion_round > 0:
print(
"Attention. Stationary segment for var = {}, r = {}, s = {}, l = {}, d = {} only found "
"in recurion round {} with _threshold = . Consider choosing a different _threshold "
"value next time".format(v, r, s, l, d, recursion_round,
kwargs.get('_threshold') * np.power(2, recursion_round)))
for seq_idx in range(len(segment_and_offset)):
df.loc[len(df)] = [v, r, s, l, d, seq_idx] + list(segment_and_offset[seq_idx])
index = pd.MultiIndex.from_frame(df[['variable', 'region', 'sector', 'lambda', 'duration', 'num_seq']])
columns = ['from', 'to', 'offset']
df = pd.DataFrame(df[['from', 'to', 'offset']].to_numpy(), index=index, columns=columns).sort_index()
return df
def clean_regions(_data: AggrData):
for r in WORLD_REGIONS.keys():
if r in _data.get_regions():
_data.drop_region(r, _inplace=True)
_data.aggregate_regions(list(set(WORLD_REGIONS[r]) - set(WORLD_REGIONS.keys())), _name=r, _inplace=True)
|
{"hexsha": "c85c0e327104a4a9acbfd26302b5ed60eaa2c67f", "size": 28710, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/dataformat.py", "max_stars_repo_name": "rmiddelanis/harvey_scaling", "max_stars_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/dataformat.py", "max_issues_repo_name": "rmiddelanis/harvey_scaling", "max_issues_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/dataformat.py", "max_forks_repo_name": "rmiddelanis/harvey_scaling", "max_forks_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.8514851485, "max_line_length": 150, "alphanum_fraction": 0.6118425636, "include": true, "reason": "import numpy", "num_tokens": 6272}
|
C$Procedure PSV2PL ( Point and spanning vectors to plane )
SUBROUTINE PSV2PL ( POINT, SPAN1, SPAN2, PLANE )
C$ Abstract
C
C Make a SPICELIB plane from a point and two spanning vectors.
C
C$ Disclaimer
C
C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE
C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
C
C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
C
C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
C
C$ Required_Reading
C
C PLANES
C
C$ Keywords
C
C GEOMETRY
C MATH
C PLANE
C
C$ Declarations
INTEGER UBPL
PARAMETER ( UBPL = 4 )
DOUBLE PRECISION POINT ( 3 )
DOUBLE PRECISION SPAN1 ( 3 )
DOUBLE PRECISION SPAN2 ( 3 )
DOUBLE PRECISION PLANE ( UBPL )
C$ Brief_I/O
C
C Variable I/O Description
C -------- --- --------------------------------------------------
C POINT,
C SPAN1,
C SPAN2 I A point and two spanning vectors defining a plane.
C PLANE O An array representing the plane.
C
C$ Detailed_Input
C
C POINT,
C SPAN1,
C SPAN2 are, respectively, a point and two spanning vectors
C that define a geometric plane in three-dimensional
C space. The plane is the set of vectors
C
C POINT + s * SPAN1 + t * SPAN2
C
C where s and t are real numbers. The spanning
C vectors SPAN1 and SPAN2 must be linearly
C independent, but they need not be orthogonal or
C unitized.
C
C$ Detailed_Output
C
C PLANE is a SPICELIB plane that represents the geometric
C plane defined by POINT, SPAN1, and SPAN2.
C
C$ Parameters
C
C None.
C
C$ Exceptions
C
C 1) If SPAN1 and SPAN2 are linearly dependent, then the vectors
C POINT, SPAN1, and SPAN2 do not define a plane. The error
C SPICE(DEGENERATECASE) is signalled.
C
C$ Files
C
C None.
C
C$ Particulars
C
C SPICELIB geometry routines that deal with planes use the `plane'
C data type to represent input and output planes. This data type
C makes the subroutine interfaces simpler and more uniform.
C
C The SPICELIB routines that produce SPICELIB planes from data that
C define a plane are:
C
C NVC2PL ( Normal vector and constant to plane )
C NVP2PL ( Normal vector and point to plane )
C PSV2PL ( Point and spanning vectors to plane )
C
C The SPICELIB routines that convert SPICELIB planes to data that
C define a plane are:
C
C PL2NVC ( Plane to normal vector and constant )
C PL2NVP ( Plane to normal vector and point )
C PL2PSV ( Plane to point and spanning vectors )
C
C Any of these last three routines may be used to convert this
C routine's output, PLANE, to another representation of a
C geometric plane.
C
C$ Examples
C
C 1) Project a vector V orthogonally onto a plane defined by
C POINT, SPAN1, and SPAN2. PROJ is the projection we want; it
C is the closest vector in the plane to V.
C
C CALL PSV2PL ( POINT, SPAN1, SPAN2, PLANE )
C CALL VPRJP ( V, PLANE, PROJ )
C
C
C 2) Find the plane determined by a spacecraft's position vector
C relative to a central body and the spacecraft's velocity
C vector. We assume that all vectors are given in the same
C coordinate system.
C
C C
C C POS is the spacecraft's position, relative to
C C the central body. VEL is the spacecraft's velocity
C C vector. POS is a point (vector, if you like) in
C C the orbit plane, and it is also one of the spanning
C C vectors of the plane.
C C
C CALL PSV2PL ( POS, POS, VEL, PLANE )
C
C$ Restrictions
C
C None.
C
C$ Literature_References
C
C [1] `Calculus and Analytic Geometry', Thomas and Finney.
C
C$ Author_and_Institution
C
C N.J. Bachman (JPL)
C
C$ Version
C
C- SPICELIB Version 1.1.0, 31-AUG-2005 (NJB)
C
C Updated to remove non-standard use of duplicate arguments
C in VMINUS call.
C
C- SPICELIB Version 1.0.1, 10-MAR-1992 (WLT)
C
C Comment section for permuted index source lines was added
C following the header.
C
C- SPICELIB Version 1.0.0, 01-NOV-1990 (NJB)
C
C-&
C$ Index_Entries
C
C point and spanning vectors to plane
C
C-&
C$ Revisions
C
C- SPICELIB Version 1.1.0, 31-AUG-2005 (NJB)
C
C Updated to remove non-standard use of duplicate arguments
C in VMINUS call.
C
C-&
C
C SPICELIB functions
C
DOUBLE PRECISION VDOT
LOGICAL RETURN
LOGICAL VZERO
C
C Local parameters
C
C
C The contents of SPICELIB planes are as follows:
C
C Elements NMLPOS through NMLPOS + 2 contain a unit normal
C vector for the plane.
C
C Element CONPOS contains a constant for the plane; every point
C X in the plane satisifies
C
C < X, PLANE(NMLPOS) > = PLANE(CONPOS).
C
C The plane constant is the distance of the plane from the
C origin; the normal vector, scaled by the constant, is the
C closest point in the plane to the origin.
C
C
INTEGER NMLPOS
PARAMETER ( NMLPOS = 1 )
INTEGER CONPOS
PARAMETER ( CONPOS = 4 )
C
C Local variables
C
DOUBLE PRECISION TMPVEC ( 3 )
C
C This routine checks in only if an error is discovered.
C
IF ( RETURN () ) THEN
RETURN
END IF
C
C Find the unitized cross product of SPAN1 and SPAN2; this is our
C unit normal vector, or possibly its inverse.
C
CALL UCRSS ( SPAN1, SPAN2, PLANE(NMLPOS) )
IF ( VZERO ( PLANE(NMLPOS) ) ) THEN
CALL CHKIN ( 'PSV2PL' )
CALL SETMSG ( 'Spanning vectors are parallel.' )
CALL SIGERR ( 'SPICE(DEGENERATECASE)' )
CALL CHKOUT ( 'PSV2PL' )
RETURN
END IF
C
C Find the plane constant corresponding to the unit normal
C vector we've found.
C
PLANE(CONPOS) = VDOT ( PLANE(NMLPOS), POINT )
C
C The constant should be the distance of the plane from the
C origin. If the constant is negative, negate both it and the
C normal vector.
C
IF ( PLANE(CONPOS) .LT. 0.D0 ) THEN
PLANE(CONPOS) = -PLANE(CONPOS)
CALL VMINUS ( PLANE(NMLPOS), TMPVEC )
CALL VEQU ( TMPVEC, PLANE(NMLPOS) )
END IF
RETURN
END
|
{"hexsha": "3771f852a1fd37bd0582ac51bca8ed5a3b291688", "size": 7879, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/nasa_f/psv2pl.f", "max_stars_repo_name": "agforero/FTFramework", "max_stars_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-19T21:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T02:57:25.000Z", "max_issues_repo_path": "source/nasa_f/psv2pl.f", "max_issues_repo_name": "agforero/fortran-testing-framework", "max_issues_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-07T21:17:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T02:18:07.000Z", "max_forks_repo_path": "source/nasa_f/psv2pl.f", "max_forks_repo_name": "agforero/fortran-testing-framework", "max_forks_repo_head_hexsha": "6caf0bc7bae8dc54a62da62df37e852625f0427d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:41:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:41:53.000Z", "avg_line_length": 29.073800738, "max_line_length": 72, "alphanum_fraction": 0.6241908872, "num_tokens": 2231}
|
[STATEMENT]
lemma lim_Ref_alloc[simp]: "lim (snd (Ref.alloc x h)) = Suc (lim h)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lim (snd (Ref.alloc x h)) = Suc (lim h)
[PROOF STEP]
unfolding Ref.alloc_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lim (snd (let l = lim h; r = Ref l in (r, Ref.set r x (h\<lparr>lim := l + 1\<rparr>)))) = Suc (lim h)
[PROOF STEP]
by (simp add: Let_def)
|
{"llama_tokens": 184, "file": "Separation_Logic_Imperative_HOL_Tools_Imperative_HOL_Add", "length": 2}
|
struct Point{T<:Real}
x::T
y::T
end
Point(x::T) where {T<:Complex} = Point(real(x), imag(x))
Point(p::Point{T}) where {T<:Real} = Point(p.x, p.y)
function Point(x::AbstractVector{T}) where {T}
@assert length(x) == 2
return Point(x[1], x[2])
end
Base.:+(a::Point, b::Point) = Point(a.x + b.x, a.y + b.y)
Base.:-(a::Point, b::Point) = Point(a.x - b.x, a.y - b.y)
Base.:*(a::Point, f::Vector{T}) where {T<:Number} = Point(a.x * f[1], a.y * f[2])
Base.:/(a::Point{}, f::T) where {T<:Number} = Point(a.x / f, a.y / f)
Vector(p::Point) = [p.x, p.y]
middle(a::Point, b::Point) = Point(0.5*(a.x + b.x), 0.5*(a.y + b.y))
angle(A::Point) = atan(A.y, A.x)
function angle(A::Point, B::Point)
ϕ = angle(B) - angle(A)
ϕ < -π && return ϕ + 2π
ϕ > π && return ϕ - 2π
return ϕ
end
|
{"hexsha": "d6e5594e938880388053d2d6a63a5da365c32994", "size": 786, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Points.jl", "max_stars_repo_name": "jwscook/WindingNumbers", "max_stars_repo_head_hexsha": "253c697501e309e814a92244260fedbf9dffcfa5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-09T13:22:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T13:22:18.000Z", "max_issues_repo_path": "src/Points.jl", "max_issues_repo_name": "jwscook/WindingNumbers", "max_issues_repo_head_hexsha": "253c697501e309e814a92244260fedbf9dffcfa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Points.jl", "max_forks_repo_name": "jwscook/WindingNumbers", "max_forks_repo_head_hexsha": "253c697501e309e814a92244260fedbf9dffcfa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2, "max_line_length": 81, "alphanum_fraction": 0.5496183206, "num_tokens": 327}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_main = pd.read_csv('../data/correct_vs_incorrect.csv')
for model_ in ['code2vec', 'code2seq', 'ggnn']:
print(f'Plotting for {model_}...')
df_model = df_main[df_main['model'] == model_]
df_correct = df_model[df_model['type'] == 'correct']
df_correct = df_correct.drop('type', axis=1)
df_correct = df_correct.rename(columns={'pcp': 'pcp_correct'})
df_incorrect = df_model[df_model['type'] == 'incorrect']
df_incorrect = df_incorrect.drop('type', axis=1)
df_incorrect = df_incorrect.rename(columns={'pcp': 'pcp_incorrect'})
df = pd.merge(df_correct, df_incorrect, how='left', on=['model', 'dataset', 'transformation'])
print(df.head())
df.set_index(['transformation', 'dataset'])[['pcp_correct', 'pcp_incorrect']].plot.bar(rot=55)
plt.xlabel('(Transformation, Java Dataset)', fontsize=16, labelpad=10)
plt.ylabel('Change of Prediction (%)', fontsize=16)
plt.yticks(np.arange(0, 101, 10))
plt.rc('legend', fontsize=12)
plt.tick_params(labelsize=13)
lg = plt.legend()
lg.get_texts()[0].set_text('Correctly predicted method')
lg.get_texts()[1].set_text('Incorrectly predicted method')
plt.gcf().subplots_adjust(bottom=0.25)
plt.savefig('{}_correct_vs_incorrect.png'.format(model_), dpi=400)
|
{"hexsha": "07b35226ecb04b542b9325bc2e46849b70c2fab9", "size": 1349, "ext": "py", "lang": "Python", "max_stars_repo_path": "results/plots/correct_vs_incorrect.py", "max_stars_repo_name": "s1530129650/tnpa-generalizability", "max_stars_repo_head_hexsha": "5b1a508a2a6d7296d7a3f84d08cf2c9bb3490004", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "results/plots/correct_vs_incorrect.py", "max_issues_repo_name": "s1530129650/tnpa-generalizability", "max_issues_repo_head_hexsha": "5b1a508a2a6d7296d7a3f84d08cf2c9bb3490004", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "results/plots/correct_vs_incorrect.py", "max_forks_repo_name": "s1530129650/tnpa-generalizability", "max_forks_repo_head_hexsha": "5b1a508a2a6d7296d7a3f84d08cf2c9bb3490004", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8787878788, "max_line_length": 98, "alphanum_fraction": 0.685693106, "include": true, "reason": "import numpy", "num_tokens": 368}
|
(*===========================================================================
Properties of bit vectors
===========================================================================*)
Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq tuple fintype div zmodp ssralg.
Require Import ZArith.
Require Import tuplehelp bitsrep nathelp.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Lemma trivialBits (p q: BITS 0) : p = q.
Proof. by rewrite (tuple0 p) (tuple0 q). Qed.
(*---------------------------------------------------------------------------
Properties of conversion to and from natural numbers.
---------------------------------------------------------------------------*)
Lemma toNatCons n b (p:BITS n) : toNat (consB b p) = b + (toNat p).*2.
Proof. done. Qed.
Lemma toNatNil (p:BITS 0) : toNat p = 0.
Proof. by rewrite (tuple0 p). Qed.
(* toNat is left-inverse to fromNat *)
Lemma toNatK n : cancel (@toNat n) (@fromNat n).
Proof. induction n; first (move => p; apply trivialBits).
+ case/tupleP => b x. rewrite toNatCons/fromNat-/fromNat /= half_bit_double.
rewrite IHn odd_add odd_double. by case b.
Qed.
(* Hence toNat is injective *)
Definition toNat_inj n := can_inj (@toNatK n).
(* toNat result is bounded *)
Lemma toNatBounded n : forall (p: BITS n), toNat p < 2^n.
Proof. induction n. move => p. by rewrite toNatNil.
case/tupleP => [b p].
rewrite expnS mul2n toNatCons.
case b.
+ rewrite ltn_Sdouble. apply IHn.
+ rewrite ltn_double. apply IHn.
Qed.
Lemma toNat_fromNatBounded n : forall m, m < 2^n -> toNat (fromNat (n:=n) m) = m.
Proof. induction n.
+ rewrite expn0. by case.
+ rewrite expnS. move => m. specialize (IHn m./2).
move => LT.
assert (m./2 < 2^n).
rewrite -ltn_double. rewrite -(odd_double_half m) mul2n in LT.
rewrite -(ltn_add2l (odd m)).
by apply ltn_addl.
specialize (IHn H).
rewrite /toNat-/toNat/=.
rewrite /toNat/= in IHn. rewrite IHn.
by rewrite odd_double_half.
Qed.
Lemma fromNatBounded_eq m1 m2 n : m1 < 2^n -> m2 < 2^n ->
(m1==m2) = (fromNat (n:=n) m1 == fromNat m2).
Proof. move => B1 B2.
case E: (m1 == m2);
case E': (#m1 == #m2) => //. by rewrite (eqP E) eq_refl in E'.
rewrite -(toNat_fromNatBounded B1) -(toNat_fromNatBounded B2) in E.
by rewrite (eqP E') eq_refl in E.
Qed.
Lemma fromNatHalf n m : cons_tuple (odd m) (fromNat (n:=n) m./2) = fromNat m.
Proof. done. Qed.
Lemma fromNat_wrap n : forall m, fromNat (n:=n) m = fromNat (n:=n) (m + 2^n).
Proof. induction n => //.
rewrite expnS.
move => m.
case ODD: (odd m); rewrite /fromNat-/fromNat /=ODD odd_add odd_mul/=ODD/= halfD ODD/=.
specialize (IHn m./2). by rewrite odd_mul/= add0n mul2n doubleK IHn.
specialize (IHn m./2). by rewrite add0n mul2n doubleK IHn.
Qed.
Lemma fromNat_wrapMany n c : forall m, fromNat (n:=n) m = fromNat (n:=n) (m + c * 2^n).
Proof. induction c => m. by rewrite mul0n addn0.
rewrite mulSn (addnC (2^n)) addnA fromNat_wrap. rewrite IHc.
by rewrite -addnA (addnC (2^n)) addnA.
Qed.
Lemma toNat_mod n (p:BITS n): toNat p = toNat p %% 2^n.
Proof. rewrite modn_small => //. apply toNatBounded. Qed.
Lemma toNat_fromNat n m : @toNat n (fromNat m) = m %% 2^n.
Proof. have H:= divn_eq m (2^n). rewrite {1}H.
have HH:= @fromNat_wrapMany n (m %/ 2^n) (m %% 2^n). rewrite addnC in HH. rewrite -HH.
rewrite toNat_fromNatBounded. done. apply ltn_pmod. apply expn_gt0. Qed.
Lemma fromNat_succn n : forall b c, @fromNat n b = fromNat c -> @fromNat n (b.+1) = fromNat(c.+1).
Proof. induction n => //.
move => b c EQ. rewrite /fromNat-/fromNat. rewrite /fromNat-/fromNat in EQ.
elim: (splitTuple EQ) => [EQ1 EQ2]. simpl in EQ1. simpl in EQ2.
specialize (IHn _ _ EQ2). rewrite/= !uphalf_half /=EQ1.
case ODD: (odd c). + by rewrite !add1n IHn. + by rewrite !add0n EQ2.
Qed.
Lemma fromNat_addn n : forall a b c, @fromNat n b = fromNat c -> @fromNat n (a+b) = fromNat(a+c).
Proof. induction a => //.
move => b c EQ. rewrite -addn1 -!addnA !add1n. apply IHa. by apply fromNat_succn.
Qed.
Lemma toZp_fromNat n m : toZp (fromNat (n:=n.+1) m) = (m%:R)%R.
Proof. apply val_inj.
rewrite /toZp toNat_fromNat Zp_nat.
rewrite /=Zp_cast; last apply pow2_gt1.
by rewrite modn_mod.
Qed.
Lemma toZpAux_fromNat n c : toZpAux (m:=n.+1) (fromNat (n:=n.+1) c) = (c%:R)%R.
Proof. apply val_inj.
rewrite /toZpAux toNat_fromNat Zp_nat.
rewrite /=Zp_cast; last apply pow2_gt1.
by rewrite modn_mod.
Qed.
Hint Rewrite toZp_fromNat toZpAux_fromNat : ZpHom.
Lemma toNat_droplsb n (p: BITS n.+1) : toNat (droplsb p) = (toNat p)./2.
Proof. case/tupleP: p => [b p]. rewrite /droplsb/splitlsb beheadCons theadCons.
by rewrite toNatCons/= half_bit_double.
Qed.
Lemma toNatCat m n (p : BITS m) (q: BITS n) : toNat (p ## q) = toNat p * 2^n + toNat q.
Proof. induction n. rewrite (tuple0 q). by rewrite expn0 muln1.
case/tupleP: q => [b q].
unfold "##". rewrite catCons. rewrite !toNatCons. unfold "##" in IHn.
rewrite IHn. rewrite expnS. rewrite -!muln2. ring.
Qed.
(*---------------------------------------------------------------------------
Properties of conversion to and from 'Z_(2^n)
---------------------------------------------------------------------------*)
(* This only holds for n.+1 because 'Z_1 actually has two elements - it's
definitionally the same as 'Z_2 in order to force a ring structure. See zmodp
for more details *)
Lemma fromZpK n : cancel (@fromZp n.+1) (@toZp n.+1).
Proof.
move => x. rewrite /toZp/fromZp. rewrite toNat_fromNat modn_small. apply valZpK.
destruct x. simpl. rewrite Zp_cast in i => //.
apply pow2_gt1.
Qed.
Lemma toZpK n : cancel (@toZp n) (@fromZp n).
Proof. case E: (n == 0).
+ rewrite /cancel. rewrite (eqP E). move => x. apply trivialBits.
+ move => x. rewrite /fromZp/toZp/=.
rewrite Zp_cast. by rewrite (modn_small (toNatBounded _)) toNatK.
apply negbT in E. destruct n => //. apply pow2_gt1.
Qed.
Lemma toZp_inj n : injective (@toZp n).
Proof. apply (can_inj (@toZpK _)). Qed.
Lemma fromZp_inj n : injective (@fromZp n.+1).
Proof. apply (can_inj (@fromZpK _)). Qed.
Lemma toZp_eq n (x y: BITS n) : (x == y) = (toZp x == toZp y).
Proof. destruct n. by rewrite (tuple0 x) (tuple0 y).
case E: (toZp x == toZp y).
rewrite (toZp_inj (eqP E)). by rewrite eq_refl.
apply (contraFF (b:=false)) => // => H.
rewrite (eqP H) (eq_refl) in E. done.
Qed.
Corollary toZp_neq n (x y: BITS n) : (x != y) = (toZp x != toZp y).
Proof. by rewrite toZp_eq. Qed.
(*---------------------------------------------------------------------------
Properties of bit get and set
---------------------------------------------------------------------------*)
Lemma setBitThenGetSame n : forall (p: BITS n) i b, i<n -> getBit (setBit p i b) i = b.
Proof.
induction n => //.
case/tupleP => [b' p]. move => i b LT.
destruct i => //.
simpl. rewrite theadCons beheadCons. assert (LT' : i < n) by done.
rewrite /getBit/=. apply IHn; done.
Qed.
Lemma setBitThenGetDistinct n :
forall (p: BITS n) i i' b, i<n -> i'<n -> i<>i' -> getBit (setBit p i b) i' = getBit p i'.
Proof.
induction n => //.
case/tupleP => [b' p]. move => i i' b LT LT' NEQ.
destruct i.
(* i = 0 *) simpl. rewrite beheadCons. destruct i' => //.
(* i <> 0 *)
destruct i' => //.
rewrite /= theadCons beheadCons /getBit/=.
assert (lt : i < n) by done.
assert (lt' : i' < n) by done.
assert (neq' : i <> i') by intuition.
specialize (IHn p _ _ b lt lt' neq'). apply IHn.
Qed.
(*---------------------------------------------------------------------------
Properties of all zeroes and all ones
---------------------------------------------------------------------------*)
Lemma fromNat0 n : #0 = zero n.
Proof. induction n; first apply trivialBits.
+ rewrite /zero /copy. rewrite /zero /copy in IHn. by rewrite /fromNat-/fromNat IHn nseqCons.
Qed.
Lemma toNat_zero n : toNat (zero n) = 0.
Proof. induction n => //. rewrite /toNat/=. rewrite /toNat in IHn. by rewrite IHn. Qed.
Corollary toNat_fromNat0 n : @toNat n #0 = 0.
Proof. by rewrite fromNat0 toNat_zero. Qed.
Lemma msb_zero n : msb (zero n) = false.
Proof. by induction n. Qed.
Lemma toNat_ones_succ n : (toNat (ones n)).+1 = 2^n.
Proof. induction n => //.
rewrite /toNat/=. rewrite /toNat/= in IHn.
by rewrite expnS mul2n addnC addn1 -doubleS IHn.
Qed.
Corollary toNat_ones n : toNat (ones n) = (2^n).-1.
Proof. by rewrite -toNat_ones_succ succnK. Qed.
Lemma msb_ones n : msb (ones n.+1) = true.
Proof. by induction n. Qed.
Lemma toZp_zero n : toZp (zero n) = 0%R.
Proof. rewrite /toZp toNat_zero. by apply val_inj. Qed.
Lemma toZpAux_zero m n : toZpAux (m:=m) (zero n) = 0%R.
Proof. rewrite /toZpAux toNat_zero. by apply val_inj. Qed.
Lemma toZp_ones n : toZp (ones n.+1) = (-1)%R.
Proof. rewrite /toZp toNat_ones. apply val_inj.
rewrite /= Zp_cast; last apply pow2_gt1.
rewrite -subn1. replace (1 %% 2^n.+1) with 1 => //.
by rewrite modn_small; last apply pow2_gt1.
Qed.
Hint Rewrite toZpK fromZpK toZp_zero toZpAux_zero toZp_ones : ZpHom.
(*---------------------------------------------------------------------------
Properties of joinmsb and splitmsb
---------------------------------------------------------------------------*)
Lemma toNat_joinmsb n : forall c (p: BITS n), toNat (joinmsb (c, p)) = c * 2^n + toNat p.
Proof. induction n.
+ move => c p. by rewrite /joinmsb (tuple0 p) expn0 muln1.
+ move => c. case/tupleP => [b p].
rewrite /joinmsb-/joinmsb /splitlsb theadCons beheadCons !toNatCons expnS IHn.
by rewrite doubleD addnCA -mul2n mulnCA.
Qed.
Lemma toNat_joinmsb0 n (p: BITS n) : toNat (joinmsb0 p) = toNat p.
Proof. by rewrite toNat_joinmsb. Qed.
Lemma splitmsb_fromNat n :
forall m, splitmsb (n:=n) (fromNat m) = (odd (m %/ 2^n), fromNat m).
Proof. induction n => m.
+ by rewrite /dropmsb/=beheadCons!theadCons expn0 divn1.
+ rewrite expnS. rewrite /fromNat-/fromNat/=.
rewrite /joinlsb !beheadCons!theadCons fromNatHalf. specialize (IHn m./2). rewrite IHn.
by rewrite -divn2 -divnMA.
Qed.
Corollary dropmsb_fromNat n m : dropmsb (n:=n) (fromNat m) = (fromNat m).
Proof. by rewrite /dropmsb splitmsb_fromNat. Qed.
Corollary toNat_dropmsb n (p: BITS n.+1) : toNat (dropmsb p) = toNat p %% 2^n.
Proof. rewrite -{1}(toNatK p). rewrite dropmsb_fromNat. by rewrite toNat_fromNat. Qed.
Lemma toZp_joinmsb0 n (p: BITS n) : toZp (joinmsb0 p) = toZpAux p.
Proof. apply val_inj.
rewrite /toZp/toZpAux/= Zp_cast; last apply pow2_gt1.
by rewrite toNat_joinmsb0.
Qed.
Lemma toZp_dropmsb n (p: BITS n.+2) : toZp (n:=n.+1) (dropmsb p) = toZpAux (m:=n.+1) p.
Proof.
apply val_inj.
rewrite /toZp/toZpAux/= Zp_cast; last apply pow2_gt1.
rewrite toNat_dropmsb.
by rewrite modn_mod.
Qed.
Hint Rewrite toZp_joinmsb0 toZp_dropmsb : ZpHom.
Lemma splitmsbK n : cancel (@splitmsb n) (@joinmsb n).
Proof. induction n.
+ case/tupleP => [b p]. by rewrite (tuple0 p).
+ case/tupleP => [b p]. rewrite /= beheadCons theadCons. specialize (IHn p).
case E: (splitmsb p) => [b' p'].
rewrite beheadCons theadCons.
rewrite E in IHn. by rewrite IHn.
Qed.
Lemma joinmsbK n : cancel (@joinmsb n) (@splitmsb n).
Proof. induction n.
+ move => [b p]. by rewrite !(tuple0 p) /= theadCons beheadCons.
+ move => [c p]. case/tupleP: p => [b p].
by rewrite /= !theadCons !beheadCons IHn.
Qed.
Corollary dropmsb_joinmsb n b (p:BITS n) : dropmsb (joinmsb (b, p)) = p.
Proof. by rewrite /dropmsb joinmsbK. Qed.
Lemma splitlsbK n : cancel (@splitlsb n) (@joinlsb n).
Proof. case/tupleP => [b p]. by rewrite /splitlsb beheadCons theadCons. Qed.
Lemma joinlsbK n : cancel (@joinlsb n) (@splitlsb n).
Proof. move => [p b]. by rewrite /joinlsb /splitlsb beheadCons theadCons. Qed.
Lemma toNat_joinlsb n (p:BITS n) b : toNat (joinlsb (p, b)) = b + (toNat p).*2.
Proof. done. Qed.
(* Totally ridiculous proof *)
Lemma splitmsb_rev n : forall (b: BITS n.+1) hi (lo:BITS n),
splitmsb b = (hi,lo) -> rev b = hi::rev lo.
Proof. induction n => b hi lo/=.
+ move => [<- <-] {lo}/=. case/tupleP:b => [b u]//=. by rewrite tuple0/=.
+ move => H.
specialize (IHn (behead_tuple b) hi).
destruct (splitmsb (behead_tuple b)).
injection H => [H1 H2] {H}. rewrite H2 {H2} in IHn.
specialize (IHn b1 refl_equal). rewrite -H1/=.
case/tupleP E: b => [b' u]/=. rewrite E/= in IHn.
by rewrite 2!rev_cons IHn rcons_cons.
Qed.
(*---------------------------------------------------------------------------
Properties of concatenation and splitting of bit strings
---------------------------------------------------------------------------*)
Lemma high_catB n2 n1 (p:BITS n1) (q:BITS n2) : high n1 (p ## q) = p.
Proof. induction n2.
- rewrite /high (tuple0 q). by apply catNil.
- case/tupleP: q => x q. rewrite /catB catCons /= beheadCons. apply IHn2.
Qed.
Lemma low_catB n2 n1 (p:BITS n1) (q:BITS n2) : low n2 (p ## q) = q.
Proof. induction n2; first apply trivialBits.
case/tupleP: q => x q. rewrite /catB catCons /= beheadCons. by rewrite IHn2.
Qed.
Lemma low_fromNat n2 n1: forall m, low n2 (fromNat (n:=n2+n1) m) = fromNat (n:=n2) m.
Proof. induction n2 => m //. by rewrite /= /joinlsb !beheadCons !theadCons/= IHn2. Qed.
Lemma split2eta : forall n2 n1 p, let (p1,p2) := split2 n1 n2 p in p = p1 ## p2.
Proof. unfold split2. induction n2.
- move =>n1 p. by rewrite /catB catNil.
- move => n1. case/tupleP => x p. rewrite /= (IHn2 n1 p).
rewrite beheadCons theadCons high_catB low_catB. by rewrite /catB catCons. Qed.
Lemma split2app n2 n1 p1 p2 : split2 n1 n2 (p1 ## p2) = (p1,p2).
Proof. by rewrite /split2 high_catB low_catB. Qed.
Lemma split3app n3 n2 n1 p1 p2 p3 : split3 n1 n2 n3 (p1 ## p2 ## p3) = (p1,p2,p3).
Proof. by rewrite /split3 !split2app. Qed.
Lemma split4app n4 n3 n2 n1 p1 p2 p3 p4 :
split4 n1 n2 n3 n4 (p1 ## p2 ## p3 ## p4) = (p1,p2,p3,p4).
Proof. by rewrite /split4 !split2app. Qed.
Lemma split3eta n3 n2 n1 p: match split3 n1 n2 n3 p with (p1,p2,p3) => p1 ## p2 ## p3 end = p. Proof. rewrite /split3 /=. by rewrite -!split2eta. Qed.
Lemma split4eta n4 n3 n2 n1 p:
match split4 n1 n2 n3 n4 p with (p1,p2,p3,p4) => p1 ## p2 ## p3 ## p4 end = p.
Proof. rewrite /split4 /=. by rewrite -!split2eta. Qed.
Lemma split4eta' n4 n3 n2 n1 p:
let: (p1,p2,p3,p4) := split4 n1 n2 n3 n4 p in p1 ## p2 ## p3 ## p4 = p.
Proof. rewrite /split4 /=. by rewrite -!split2eta. Qed.
Lemma catB_inj n1 n2 (p1 q1: BITS n1) (p2 q2: BITS n2) :
p1 ## p2 = q1 ## q2 -> p1 = q1 /\ p2 = q2.
Proof.
move => EQ.
have H1 := high_catB p1 p2.
have H2 := high_catB q1 q2.
have L1 := low_catB p1 p2.
have L2 := low_catB q1 q2.
split. by rewrite -H1 -H2 EQ.
by rewrite -L1 -L2 EQ.
Qed.
Lemma toNat_low n1 n2 (p: BITS (n1+n2)) : toNat (low n1 p) = toNat p %% 2^n1.
Proof. by rewrite -{1}(toNatK p) low_fromNat toNat_fromNat. Qed.
(*---------------------------------------------------------------------------
Zero and sign extension
---------------------------------------------------------------------------*)
Lemma signExtendK extra n : pcancel (@signExtend extra n) (signTruncate extra).
Proof. move => p. rewrite /signExtend /signTruncate split2app.
case: (msb p).
+ by rewrite /ones eq_refl.
+ by rewrite /zero eq_refl.
Qed.
Lemma signTruncateK extra n p q :
signTruncate extra (n:=n) p = Some q ->
signExtend extra (n:=n) q = p.
Proof. rewrite /signTruncate/signExtend.
rewrite (split2eta p) split2app.
case P: (_ || _) => // H.
have EQ: low n.+1 p = q by congruence. subst.
case M: (msb _).
+ rewrite M andTb andFb orbF in P. by rewrite (eqP P).
+ rewrite M andTb andFb orFb in P. by rewrite (eqP P).
Qed.
Lemma zeroExtendK extra n : pcancel (@zeroExtend extra n) (zeroTruncate extra).
Proof. move => p. by rewrite /zeroExtend/zeroTruncate split2app eq_refl. Qed.
Lemma zeroTruncateK extra n p q :
zeroTruncate extra (n:=n) p = Some q ->
zeroExtend extra (n:=n) q = p.
Proof. rewrite /zeroTruncate/zeroExtend.
rewrite (split2eta p) split2app.
case P: (high extra p == zero extra) => // H.
have EQ: low n p = q by congruence. subst.
by rewrite (eqP P).
Qed.
Lemma toNat_zeroExtend extra n (p: BITS n) : toNat (zeroExtend extra p) = toNat p.
Proof. rewrite /zeroExtend. rewrite toNatCat. by rewrite toNat_zero. Qed.
Lemma toNat_zeroExtendAux extra n (p: BITS n) : toNat (zeroExtendAux extra p) = toNat p.
Proof. induction extra => //. by rewrite /= toNat_joinmsb0 IHextra. Qed.
(*---------------------------------------------------------------------------
Properties of equality
---------------------------------------------------------------------------*)
Lemma iffBool (b1 b2:bool) : (b1 <-> b2) -> b1==b2.
Proof. destruct b1; destruct b2; intuition. Qed.
Lemma bitsEq_nat n {b1 b2: BITS n} : (b1 == b2) = (toNat b1 == toNat b2).
Proof. suff: b1 == b2 <-> (toNat b1 == toNat b2).
move => H. assert (H' := iffBool H). apply (eqP H').
split. move => H. rewrite (eqP H). done.
move => H. assert (EQ:toNat b1 = toNat b2) by apply (eqP H). by rewrite (toNat_inj EQ).
Qed.
|
{"author": "jbj", "repo": "x86proved", "sha": "d314fa6d23c064a2be4bf686ac7da16a591fda01", "save_path": "github-repos/coq/jbj-x86proved", "path": "github-repos/coq/jbj-x86proved/x86proved-d314fa6d23c064a2be4bf686ac7da16a591fda01/src/bitsprops.v"}
|
import numpy as np
import math
from pdb import set_trace
class Landmark():
def __init__(self, id, x, y):
self.id = id
self.x = x
self.y = y
class OdometryData():
def __init__(self, r1, t, r2):
self.r1 = float(r1)
self.t = float(t)
self.r2 = float(r2)
class SensorData():
def __init__(self, id, range, bearing):
self.id = int(id)
self.range = float(range)
self.bearing = float(bearing)
class Data():
def __init__(self):
self.odometry_data = None
self.sensor_data = []
def get_odometry_data(self, data):
self.odometry_data = OdometryData(data[1], data[2], data[3])
def get_sensor_data(self, data):
self.sensor_data.append(SensorData(data[1], data[2], data[3]))
def read_world(filename="data/world.dat"):
data = np.loadtxt(filename)
landmarks = []
for d in data:
landmarks.append(Landmark(int(d[0]), d[1], d[2]))
return landmarks
def read_data(filename="data/sensor_data.dat"):
timesteps = []
with open(filename) as fn:
data = fn.readlines()
for d in data:
txt = d.split()
if txt[0] == "ODOMETRY":
timesteps.append(Data())
timesteps[-1].get_odometry_data(txt)
else:
timesteps[-1].get_sensor_data(txt)
return timesteps
def normalize_angle(phi):
while(phi > math.pi):
phi -= math.pi
while(phi < -math.pi):
phi += math.pi
return phi
def main():
timesteps = read_data()
print(timesteps[0].odometry_data.r1)
set_trace()
if __name__ == "__main__":
main()
|
{"hexsha": "5270384a9c2ac42561a5fdd08eacdff42e515bf5", "size": 1646, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/utils.py", "max_stars_repo_name": "BolunDai0216/EKFSLAM", "max_stars_repo_head_hexsha": "04c9213ce86cb8e2f2a1781950f08d8d2e755b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-03T08:33:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T00:09:32.000Z", "max_issues_repo_path": "python/utils.py", "max_issues_repo_name": "Abednego97/EKFSLAM", "max_issues_repo_head_hexsha": "04c9213ce86cb8e2f2a1781950f08d8d2e755b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/utils.py", "max_forks_repo_name": "Abednego97/EKFSLAM", "max_forks_repo_head_hexsha": "04c9213ce86cb8e2f2a1781950f08d8d2e755b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-25T17:03:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-25T02:23:50.000Z", "avg_line_length": 21.3766233766, "max_line_length": 70, "alphanum_fraction": 0.5874848117, "include": true, "reason": "import numpy", "num_tokens": 432}
|
import pandas as pd
import benchmarks as bm
import cuckoo_search as cs
import particle_swarm_opt as pso
from scipy.stats import ranksums
def cs_tune(opt_func):
lambda_ = [1.1, 1.5, 2, 2.5, 3]
step_size = [0.01, 0.5, 1]
print('| λ | α | Resultado |')
print('|-----|------|-----------|')
for l in lambda_:
cs.LAMBDA = l
for s in step_size:
cs.STEP_SIZE = s
cs.cuckoo_search(opt_func, verbose=False)
data = pd.read_csv('results/results_cs.csv', header=None)
final_mean = data[len(data.columns) - 1].mean()
print(f'|{l:4} |{s:5} | {round(final_mean, 4)} |')
def pso_tune(opt_func):
omega = [0.5, 0.6, 0.7]
phi_g = [0.25, 0.5, 0.75, 1]
phi_p = [0.1, 0.25, 0.5, 0.75, 1]
print('| ω | φ₁ | φ₂ | Resultado |')
print('|-----|------|------|-----------|')
for o in omega:
pso.OMEGA = o
for pg in phi_g:
pso.PHI_G = pg
for pp in phi_p:
pso.PHI_P = pp
pso.particle_swarm_opt(opt_func, verbose=False)
data = pd.read_csv('results/results_pso.csv', header=None)
final_mean = data[len(data.columns) - 1].mean()
print(f'|{o:4} |{pg:5} | {pp:5} |{round(final_mean, 4)} |')
def wilcoxon(opt_func, population_size, dimensions):
cs.POPULATION_SIZE = population_size
pso.POPULATION_SIZE = population_size
cs.DIMENSIONS = dimensions
pso.DIMENSIONS = dimensions
cs.cuckoo_search(opt_func, verbose=False)
pso.particle_swarm_opt(opt_func, verbose=False)
data_cs = pd.read_csv('results/results_cs.csv', header=None)
last_iter_cs = data_cs[len(data_cs.columns) - 1].to_list()
data_pso = pd.read_csv('results/results_pso.csv', header=None)
last_iter_pso = data_pso[len(data_pso.columns) - 1].to_list()
print(
f'{opt_func.func.__name__} Dimensions: {dimensions}',
f'Population: {population_size}',
ranksums(last_iter_cs, last_iter_pso)
)
if __name__ == "__main__":
# cs_tune(bm.schwefel)
# print('\n\n')
# cs_tune(bm.function_3)
# pso_tune(bm.schwefel)
# print('\n\n')
# pso_tune(bm.function_3)
# cs.cuckoo_search(bm.schwefel)
# pso.particle_swarm_opt(bm.schwefel)
wilcoxon(bm.schwefel, 100, 2)
wilcoxon(bm.function_3, 100, 2)
wilcoxon(bm.schwefel, 100, 10)
wilcoxon(bm.function_3, 100, 10)
|
{"hexsha": "d48781aa7850f3904ea9ac2a9744941759e6b75e", "size": 2448, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab_5/lab_5.py", "max_stars_repo_name": "Fredy/UCSP-Bioinspirada", "max_stars_repo_head_hexsha": "4476ce5c493f47887b61aded12b17d28a1f896a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab_5/lab_5.py", "max_issues_repo_name": "Fredy/UCSP-Bioinspirada", "max_issues_repo_head_hexsha": "4476ce5c493f47887b61aded12b17d28a1f896a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab_5/lab_5.py", "max_forks_repo_name": "Fredy/UCSP-Bioinspirada", "max_forks_repo_head_hexsha": "4476ce5c493f47887b61aded12b17d28a1f896a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6, "max_line_length": 75, "alphanum_fraction": 0.5910947712, "include": true, "reason": "from scipy", "num_tokens": 798}
|
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
print('Loading data...')
# OISST.shape = (1830, 18400)
OISST = np.loadtxt('data/OISST_19811101-20161116.dat')
# PREC.shape = (1688, 9)
PREC = np.loadtxt('data/zones_Prec_weekly_19811101-20140228.dat')
X = OISST[:PREC.shape[0],:]
X = X.reshape(X.shape[0], 80, 230)
Y = PREC
# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(80, 230))) # returns a sequence of vectors of dimension 32
model.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
model.add(LSTM(32)) # return a single vector of dimension 32
model.add(Dense(9, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X, Y,
batch_size=64, nb_epoch=5)
|
{"hexsha": "e6f82994f63306157b7b6f16e0574c7ea7b2fc48", "size": 957, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/keras_LSTM_test.py", "max_stars_repo_name": "MajorChina/CPOP", "max_stars_repo_head_hexsha": "03b8056bb3ec6c07500a3ac4fc07cdc495ae8052", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-31T15:58:10.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-31T15:58:10.000Z", "max_issues_repo_path": "test/keras_LSTM_test.py", "max_issues_repo_name": "Meelfy/CPOP", "max_issues_repo_head_hexsha": "03b8056bb3ec6c07500a3ac4fc07cdc495ae8052", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-10-18T08:33:25.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-04T09:23:15.000Z", "max_forks_repo_path": "test/keras_LSTM_test.py", "max_forks_repo_name": "Meelfy/CPOP", "max_forks_repo_head_hexsha": "03b8056bb3ec6c07500a3ac4fc07cdc495ae8052", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-01-09T09:14:13.000Z", "max_forks_repo_forks_event_max_datetime": "2017-01-09T09:14:13.000Z", "avg_line_length": 27.3428571429, "max_line_length": 91, "alphanum_fraction": 0.696969697, "include": true, "reason": "import numpy", "num_tokens": 264}
|
import unittest
import copy
import tensorbackends
import ctf
import numpy as np
from scipy import fft
from tensorbackends.utils import test_with_backend
from koala import Observable, candecomp, Gate, tensors
from experiments.qft import qft_candecomp
@test_with_backend()
class CanonicalDecomp(unittest.TestCase):
def test_apply_rank_one_gate(self, backend):
qstate = candecomp.random(nsite=4, rank=5, backend=backend)
init_factors = copy.deepcopy(qstate.factors)
qstate.apply_circuit([Gate('H', [], [0])])
diff = qstate.factors[0] - init_factors[0] @ backend.astensor(
tensors.H())
self.assertTrue(
np.isclose(backend.einsum("ab,ab->", diff, diff.conj()), 0.))
def test_qft_with_full_rank(self, backend):
nsite = 8 # maximum 14
debug = False
tb = tensorbackends.get(backend)
qstate = candecomp.random(nsite=nsite, rank=1, backend=backend)
statevector = qstate.get_statevector()
qft_candecomp(qstate, debug=debug)
out_statevector = qstate.get_statevector()
if isinstance(statevector.unwrap(), np.ndarray):
out_true = tb.astensor(fft(statevector.ravel(), norm="ortho"))
elif isinstance(statevector.unwrap(), ctf.core.tensor):
out_true = tb.astensor(
fft(statevector.ravel().to_nparray(), norm="ortho"))
self.assertTrue(
np.isclose(tb.norm(out_statevector.ravel() - out_true), 0.))
|
{"hexsha": "4c27f2eb8d78fb38a04cd2f54873bcfdddf65cc4", "size": 1492, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_candecomp.py", "max_stars_repo_name": "LinjianMa/koala", "max_stars_repo_head_hexsha": "a366af5d109cbcec820827702a4ef7cb0a3a02f3", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_candecomp.py", "max_issues_repo_name": "LinjianMa/koala", "max_issues_repo_head_hexsha": "a366af5d109cbcec820827702a4ef7cb0a3a02f3", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_candecomp.py", "max_forks_repo_name": "LinjianMa/koala", "max_forks_repo_head_hexsha": "a366af5d109cbcec820827702a4ef7cb0a3a02f3", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6976744186, "max_line_length": 74, "alphanum_fraction": 0.668230563, "include": true, "reason": "import numpy,from scipy", "num_tokens": 365}
|
#redirect wiki:woodland:ruby tuesday
|
{"hexsha": "45237b33ebe3872fad3aa20be2ec257ca4ba32c9", "size": 37, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Ruby_Tuesday.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Ruby_Tuesday.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Ruby_Tuesday.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.5, "max_line_length": 36, "alphanum_fraction": 0.8378378378, "num_tokens": 11}
|
subroutine ch_noqual
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine performs in-stream nutrient calculations. No transformations
!! are calculated. New concentrations of the nutrients are calculated based
!! on the loading to the reach from upstream.
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ai0 |ug chla/mg alg|ratio of chlorophyll-a to algal biomass
!! algae(:) |mg alg/L |algal biomass concentration in reach
!! ammonian(:) |mg N/L |ammonia concentration in reach
!! disolvp(:) |mg P/L |dissolved phosphorus concentration in reach
!! nitraten(:) |mg N/L |nitrate concentration in reach
!! nitriten(:) |mg N/L |nitrite concentration in reach
!! organicn(:) |mg N/L |organic nitrogen concentration in reach
!! organicp(:) |mg P/L |organic phosphorus concentration in reach
!! rch_cbod(:) |mg O2/L |carbonaceous biochemical oxygen demand in
!! |reach
!! rch_dox(:) |mg O2/L |dissolved oxygen concentration in reach
!! rchwtr |m^3 H2O |water stored in reach at beginning of day
!! rtwtr |m^3 H2O |flow out of reach
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! algae(:) |mg alg/L |algal biomass concentration in reach
!! ammonian(:) |mg N/L |ammonia concentration in reach
!! chlora(:) |mg chl-a/L |chlorophyll-a concentration in reach
!! disolvp(:) |mg P/L |dissolved phosphorus concentration in reach
!! nitraten(:) |mg N/L |nitrate concentration in reach
!! nitriten(:) |mg N/L |nitrite concentration in reach
!! organicn(:) |mg N/L |organic nitrogen concentration in reach
!! organicp(:) |mg P/L |organic phosphorus concentration in reach
!! rch_cbod(:) |mg O2/L |carbonaceous biochemical oxygen demand in
!! |reach
!! rch_dox(:) |mg O2/L |dissolved oxygen concentration in reach
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! algcon |mg alg/L |initial algal biomass concentration in reach
!! algin |mg alg/L |algal biomass concentration in inflow
!! ammoin |mg N/L |ammonium N concentration in inflow
!! cbodcon |mg/L |initial carbonaceous biological oxygen demand
!! |concentration in reach
!! cbodin |mg/L |carbonaceous biological oxygen demand
!! |concentration in inflow
!! chlin |mg chl-a/L |chlorophyll-a concentration in inflow
!! disoxin |mg O2/L |dissolved oxygen concentration in inflow
!! dispin |mg P/L |soluble P concentration in inflow
!! jrch |none |reach number
!! nh3con |mg N/L |initial ammonia concentration in reach
!! nitratin |mg N/L |nitrate concentration in inflow
!! nitritin |mg N/L |nitrite concentration in inflow
!! no2con |mg N/L |initial nitrite concentration in reach
!! no3con |mg N/L |initial nitrate concentration in reach
!! o2con |mg O2/L |initial dissolved oxygen concentration in
!! |reach
!! orgncon |mg N/L |initial organic N concentration in reach
!! orgnin |mg N/L |organic N concentration in inflow
!! orgpcon |mg P/L |initial organic P concentration in reach
!! orgpin |mg P/L |organic P concentration in inflow
!! solpcon |mg P/L |initial soluble P concentration in reach
!! wtrin |m^3 H2O |water flowing into reach on day
!! wtrtot |m^3 H2O |inflow + storage water
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use channel_data_module
use channel_module
use hydrograph_module, only : ob, icmd, jrch
implicit none
real :: chlin !mg chl-a/L |chlorophyll-a concentration in inflow
real :: algin !mg alg/L |algal biomass concentration in inflow
real :: orgnin !mg N/L |organic N concentration in inflow
real :: ammoin !mg N/L |ammonium N concentration in inflow
real :: nitratin !mg N/L |nitrate concentration in inflow
real :: nitritin !mg N/L |nitrite concentration in inflow
real :: orgpin !mg P/L |organic P concentration in inflow
real :: dispin !mg P/L |soluble P concentration in inflow
real :: cbodin !mg/L |carbonaceous biological oxygen demand
! |concentration in inflow
real :: disoxin !mg O2/L |dissolved oxygen concentration in inflow
real :: algcon !mg alg/L |initial algal biomass concentration in reach
real :: orgncon !mg N/L |initial organic N concentration in reach
real :: nh3con !mg N/L |initial ammonia concentration in reach
real :: no2con !mg N/L |initial nitrite concentration in reach
real :: no3con !mg N/L |initial nitrate concentration in reach
real :: orgpcon !mg P/L |initial organic P concentration in reach
real :: solpcon !mg P/L |initial soluble P concentration in reach
real :: cbodcon !mg/L |initial carbonaceous biological oxygen demand
! |concentration in reach
real :: o2con !mg O2/L |initial dissolved oxygen concentration in
! |reach
real :: wtrtot !m^3 H2O |inflow + storage water
real :: cinn !mg N/L |effective available nitrogen concentration
real :: rchwtr !m^3 H2O |water stored in reach at beginning of day
!! initialize water flowing into reach
wtrin = 0.
wtrin = ob(icmd)%hd(1)%flo
if (rtwtr / 86400. > 0.01 .and. wtrin > 0.01) then
!! concentrations
!! initialize inflow concentrations
chlin = 0.
algin = 0.
orgnin = 0.
ammoin = 0.
nitritin = 0.
nitratin = 0.
orgpin = 0.
dispin = 0.
cbodin = 0.
disoxin = 0.
cinn = 0.
if (ob(icmd)%hd(1)%chla < 1.e-6) ob(icmd)%hd(1)%cbod = 0.0
chlin = 1000. * ob(icmd)%hd(1)%chla / wtrin
algin = 1000. * chlin / ch_nut(jnut)%ai0 !! QUAL2E equation III-1
orgnin = 1000. * ob(icmd)%hd(1)%orgn / wtrin
ammoin = 1000. * ob(icmd)%hd(1)%nh3 / wtrin
nitritin = 1000. * ob(icmd)%hd(1)%no2 / wtrin
nitratin = 1000. * ob(icmd)%hd(1)%no3 / wtrin
orgpin = 1000. * ob(icmd)%hd(1)%sedp / wtrin
dispin = 1000. * ob(icmd)%hd(1)%solp / wtrin
if (ob(icmd)%hd(1)%cbod < 1.e-6) ob(icmd)%hd(1)%cbod = 0.0
cbodin = 1000. * ob(icmd)%hd(1)%cbod / wtrin
if (ob(icmd)%hd(1)%dox < 1.e-6) ob(icmd)%hd(1)%dox = 0.0
disoxin= 1000. * ob(icmd)%hd(1)%dox / wtrin
!! initialize concentration of nutrient in reach
wtrtot = 0.
algcon = 0.
orgncon = 0.
nh3con = 0.
no2con = 0.
no3con = 0.
orgpcon = 0.
solpcon = 0.
cbodcon = 0.
o2con = 0.
if (ch(jrch)%algae < 1.e-6) ch(jrch)%algae = 0.0
if (ch(jrch)%organicn < 1.e-6) ch(jrch)%organicn = 0.0
if (ch(jrch)%ammonian < 1.e-6) ch(jrch)%ammonian = 0.0
if (ch(jrch)%nitriten < 1.e-6) ch(jrch)%nitriten = 0.0
if (ch(jrch)%nitraten < 1.e-6) ch(jrch)%nitraten = 0.0
if (ch(jrch)%organicp < 1.e-6) ch(jrch)%organicp = 0.0
if (ch(jrch)%disolvp < 1.e-6) ch(jrch)%disolvp = 0.0
if (ch(jrch)%rch_cbod < 1.e-6) ch(jrch)%rch_cbod = 0.0
if (ch(jrch)%rch_dox < 1.e-6) ch(jrch)%rch_dox = 0.0
wtrtot = wtrin + rchwtr
algcon =(algin * wtrin + ch(jrch)%algae * rchwtr) / wtrtot
orgncon = (orgnin * wtrin + ch(jrch)%organicn * rchwtr) /wtrtot
nh3con = (ammoin * wtrin + ch(jrch)%ammonian * rchwtr) / wtrtot
no2con = (nitritin * wtrin + ch(jrch)%nitriten * rchwtr) / wtrtot
no3con = (nitratin * wtrin + ch(jrch)%nitraten * rchwtr) / wtrtot
orgpcon =(orgpin * wtrin + ch(jrch)%organicp * rchwtr) / wtrtot
solpcon = (dispin * wtrin + ch(jrch)%disolvp * rchwtr) / wtrtot
cbodcon= (cbodin * wtrin + ch(jrch)%rch_cbod * rchwtr) / wtrtot
o2con = (disoxin * wtrin + ch(jrch)%rch_dox * rchwtr) / wtrtot
!! calculate algal biomass concentration at end of day
ch(jrch)%algae = 0.
ch(jrch)%algae = algcon
if (ch(jrch)%algae < 1.e-6) ch(jrch)%algae = 0.
!! calculate chlorophyll-a concentration at end of day
ch(jrch)%chlora = 0.
ch(jrch)%chlora = ch(jrch)%algae * ch_nut(jnut)%ai0 / 1000.
!! oxygen calculations
!! calculate carbonaceous biological oxygen demand at end
!! of day
ch(jrch)%rch_cbod = 0.
ch(jrch)%rch_cbod = cbodcon
if (ch(jrch)%rch_cbod < 1.e-6) ch(jrch)%rch_cbod = 0.
!! calculate dissolved oxygen concentration if reach at
!! end of day
ch(jrch)%rch_dox = 0.
ch(jrch)%rch_dox = o2con
if (ch(jrch)%rch_dox < 1.e-6) ch(jrch)%rch_dox = 0.
!! end oxygen calculations
!! nitrogen calculations
!! calculate organic N concentration at end of day
ch(jrch)%organicn = 0.
ch(jrch)%organicn = orgncon
if (ch(jrch)%organicn < 1.e-6) ch(jrch)%organicn = 0.
!! calculate ammonia nitrogen concentration at end of day
ch(jrch)%ammonian = 0.
ch(jrch)%ammonian = nh3con
if (ch(jrch)%ammonian < 1.e-6) ch(jrch)%ammonian = 0.
!! calculate concentration of nitrite at end of day
ch(jrch)%nitriten = 0.
ch(jrch)%nitriten = no2con
if (ch(jrch)%nitriten < 1.e-6) ch(jrch)%nitriten = 0.
!! calculate nitrate concentration at end of day
ch(jrch)%nitraten = 0.
ch(jrch)%nitraten = no3con
if (ch(jrch)%nitraten < 1.e-6) ch(jrch)%nitraten = 0.
!! end nitrogen calculations
!! phosphorus calculations
!! calculate organic phosphorus concentration at end of
!! day
ch(jrch)%organicp = 0.
ch(jrch)%organicp = orgpcon
if (ch(jrch)%organicp < 1.e-6) ch(jrch)%organicp = 0.
!! calculate dissolved phosphorus concentration at end
!! of day (mineral P)
ch(jrch)%disolvp = 0.
ch(jrch)%disolvp = solpcon
if (ch(jrch)%disolvp < 1.e-6) ch(jrch)%disolvp = 0.
!! end phosphorus calculations
else
!! all water quality variables set to zero when no flow
algin = 0.0
chlin = 0.0
orgnin = 0.0
ammoin = 0.0
nitritin = 0.0
nitratin = 0.0
orgpin = 0.0
dispin = 0.0
cbodin = 0.0
disoxin = 0.0
ch(jrch)%algae = 0.0
ch(jrch)%chlora = 0.0
ch(jrch)%organicn = 0.0
ch(jrch)%ammonian = 0.0
ch(jrch)%nitriten = 0.0
ch(jrch)%nitraten = 0.0
ch(jrch)%organicp = 0.0
ch(jrch)%disolvp = 0.0
ch(jrch)%rch_cbod = 0.0
ch(jrch)%rch_dox = 0.0
endif
return
end subroutine ch_noqual
|
{"hexsha": "2fdd84072df96d496c92ed2215ca9f5dc269ed51", "size": 12032, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source_codes_60.5/ch_noqual.f90", "max_stars_repo_name": "ankwasa/wetlands_swatplus", "max_stars_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source_codes_60.5/ch_noqual.f90", "max_issues_repo_name": "ankwasa/wetlands_swatplus", "max_issues_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source_codes_60.5/ch_noqual.f90", "max_forks_repo_name": "ankwasa/wetlands_swatplus", "max_forks_repo_head_hexsha": "3cdf83cc6a4dc68ce4f53ce1d0ebacd7695b54cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.9362549801, "max_line_length": 87, "alphanum_fraction": 0.5342420213, "num_tokens": 3877}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 15:53:14 2021
@author: vader
"""
import imageio as io
import numpy as np
import torch.utils.data as data
import torch
import torch.nn as nn
import torch.optim as optim
from torchsummary import summary
from helper import *
from dataset import get_dataset
import cv2
dataset='HyRANK2'
checkpoint=None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
leak=True
if dataset=='HyRANK1':
img,gt,Erato,Kirki,Nefeli=get_dataset(dataset)
train_gt,val_gt=sample_gt(gt, 0.8)
train_dataset = HY_dataset(img, train_gt)
train_loader = data.DataLoader(
train_dataset,
batch_size=4,
pin_memory=device,
shuffle=True
)
if leak==True:
gt1=io.imread('Erato_GT.tif')
gt2=io.imread('Kirki_GT.tif')
gt3=io.imread('Nefeli_GT.tif')
val1_dataset = HY_dataset(Erato, gt1)
val2_dataset = HY_dataset(Kirki, gt2)
val3_dataset = HY_dataset(Nefeli, gt3)
val_dataset=data.ConcatDataset([val1_dataset,val2_dataset,val3_dataset])
val_loader = data.DataLoader(
val_dataset,
batch_size=2048,
shuffle=True,
pin_memory=device
)
else:
val_dataset = HY_dataset(img, val_gt)
val_loader = data.DataLoader(
val_dataset,
batch_size=2048,
shuffle=True,
pin_memory=device
)
n_bands=img.shape[2]
n_classes=len(np.unique(gt))
model=KarankEtAl(n_bands,n_classes)
weights = torch.ones(n_classes)
weights = weights.to(device)
weights[torch.LongTensor([0])] = 0.
epochs=20
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss(weight=weights)
elif dataset=='HyRANK2':
img1,img2,img3,img4,img5,gt1,gt2,gt5=get_dataset(dataset)
train_gt1,val_gt1=sample_gt(gt1, 0.8)
train_gt2,val_gt2=sample_gt(gt2, 0.8)
train_gt5,val_gt5=sample_gt(gt5, 0.8)
train_dataset1 = HY_dataset(img1, train_gt1)
train_dataset2 = HY_dataset(img2, train_gt2)
train_dataset3 = HY_dataset(img5, train_gt5)
train_dataset=data.ConcatDataset([train_dataset1,train_dataset2,train_dataset3])
train_loader = data.DataLoader(
train_dataset,
batch_size=512,
pin_memory=device,
shuffle=True
)
if leak==True:
gt3=io.imread('GT3.tif')
gt4=io.imread('GT4.tif')
val1_dataset = HY_dataset(img3, gt3)
val2_dataset = HY_dataset(img4, gt4)
val_dataset=data.ConcatDataset([val1_dataset,val2_dataset])
val_loader = data.DataLoader(
val_dataset,
batch_size=2048,
shuffle=True,
pin_memory=device
)
else:
val1_dataset = HY_dataset(img1, val_gt1)
val2_dataset = HY_dataset(img2, val_gt2)
val3_dataset = HY_dataset(img5, val_gt5)
val_dataset=data.ConcatDataset([val1_dataset,val2_dataset,val3_dataset])
val_loader = data.DataLoader(
val_dataset,
batch_size=2048,
shuffle=True,
pin_memory=device
)
n_bands=img1.shape[2]
n_classes=18
model=KarankEtAl(n_bands,n_classes)
weights = torch.ones(n_classes)
weights = weights.to(device)
weights[torch.LongTensor([0])] = 0.
epochs=20
optimizer = optim.Adam(model.parameters(), lr=0.00001)
criterion = nn.CrossEntropyLoss(weight=weights)
with torch.no_grad():
for input, _ in train_loader:
break
summary(model.to(device), input.size()[1:])
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint))
try:
train(model,optimizer,criterion,train_loader,epoch=epochs,
dataset=dataset,val_loader=val_loader,device=device)
except KeyboardInterrupt:
# Allow the user to stop the training
pass
if dataset=='HyRANK1':
for img,name in [(img,'Train'),(Erato,'Erato'),(Kirki,'Kirki'),(Nefeli,'Nefeli')]:
top, bottom, left, right = [10]*4
img=cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
probabilities = test(model, img, n_classes)
prediction = np.argmax(probabilities, axis=-1)
prediction = prediction.astype(np.int8)
io.imsave(name+'NN'+'.tif', prediction)
elif dataset=='HyRANK2':
for img,name in [(img1,'image1'),(img2,'image2'),(img5,'image5'),(img3,'image3'),(img4,'image4')]:
top, bottom, left, right = [10]*4
img=cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0])
probabilities = test(model, img, n_classes)
prediction = np.argmax(probabilities, axis=-1)
prediction = prediction.astype(np.int8)
io.imsave(name+'NN'+'.tif', prediction)
|
{"hexsha": "d6f2b1a63a33000ce74c20765e3172e3d491d49e", "size": 5302, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "giorgosouz/HSI-classification-makantasis-cnn", "max_stars_repo_head_hexsha": "95f18274d7cb67babb971db71f358a73dee2affc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "giorgosouz/HSI-classification-makantasis-cnn", "max_issues_repo_head_hexsha": "95f18274d7cb67babb971db71f358a73dee2affc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "giorgosouz/HSI-classification-makantasis-cnn", "max_forks_repo_head_hexsha": "95f18274d7cb67babb971db71f358a73dee2affc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3684210526, "max_line_length": 102, "alphanum_fraction": 0.5926065636, "include": true, "reason": "import numpy", "num_tokens": 1292}
|
import os
from typing import Tuple, List, Dict
import numpy as np
import cv2
import random
import pandas as pd
"""
When an image has it's mask predicted,A user can then click on the save csv file and the mask (or multiple masks)
will have it's properties calculated and then saved to a csv file.
Additionally a user can select a mask or multiple masks and send them to the analyze page and in this page,
The user can choose:
1- The maximum area and/or minimum area to filter dimples.
2- To show the internal contour and/or external contours*.
3- Show the centroid in the image and calculate it
4- number of the bins to divide the area to.
This script in order to handle these options and print them out to a csv file.
*If the user chooses not to show internal and external contours then this means he want an empty black image.
"""
def find_max_area(image: np.ndarray) -> float:
"""
the function finds contours,calculates area for each contour and returns the maximum value.
:param image: image 2D array of contours.
:return: value of the contour with maximum area.
"""
contours, hierarchy = cv2.findContours(image, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
return max(list(map(lambda x: cv2.contourArea(x), contours)))
def random_color():
"""returns a tuple of three random integers between 0-255 in order to get a random color each time."""
return (random.randint(0, 255),
random.randint(0, 255), random.randint(0, 255))
def findIntervals(areas: list, num_of_bins: int) -> List[pd.Interval]:
"""
:param areas: a list of contour areas.
:param num_of_bins: the number of intervals.
:return: a list containing Intervals of class pd.Intervals, The number may be less than num_of_bins because
of duplicated intervals.
"""
intervals = list(dict(pd.qcut(areas, num_of_bins, duplicates="drop", precision=4).value_counts()).keys())
for i in range(intervals.__len__()):
intervals[i] = pd.Interval(round(intervals[i].left, 2), round(intervals[i].right, 2), closed="both")
return intervals
def fitToIntervals(areas: list, num_of_bins: int):
"""
fitting the area of each contour to the suitable interval.
:param areas: a list of contour areas.
:param num_of_bins: the number of intervals.
:return: a list containing each suitable interval for a given area.
"""
intervals = findIntervals(areas=areas, num_of_bins=num_of_bins)
interval_ranges = []
for area in areas:
interval_ranges.append(
str([interval for interval in intervals if area in interval][0]))
return interval_ranges
def saveAnalysisToCSV(image_analysis: Dict, file_name: str, path: str):
"""
Saving the given dictionary into a csv file with the given file name.
:param image_analysis: a dictionary containing the properties we wanted to analyze in a prediction.
:param file_name: the name of the image, it will be used for the name of the csv file,example: image_analysis.csv
:param path: folder path.
"""
df = pd.DataFrame(image_analysis)
if not os.path.exists(f"{path}/files/csv_files/"):
os.makedirs(f"{path}/files/csv_files/")
df = df.sort_values(by='area', ascending=False)
df.to_csv(f"{path}/files/csv_files/{file_name}_analysis.csv", index=False)
def saveImagesAnalysisToCSV(images_analysis: list, file_names: list, path: str):
"""
If given a list of dictionaries then we save each image analysis with the corresponding file name into csv files.
:param images_analysis: a list of dictionaries containing the properties we wanted to analyze in a prediction.
:param file_names: a list of file name that correspond to the given images analysis
:param path: folder path.
"""
for index in range(images_analysis.__len__()):
file_name = file_names[index].split('.')[0]
saveAnalysisToCSV(images_analysis[index], file_name, path)
def calc_centroid(cnt: np.ndarray) -> Tuple[int, int]:
"""
The function calculates the centroid of a given contour.
:param cnt: 3D numpy array of the contour shape.
:return cx,cy: the coordinates of the centroid.
"""
cx, cy = 0, 0
try:
moment = cv2.moments(cnt)
# Calculate centroid
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
except ZeroDivisionError:
print("There was a contour who had an area equal to 0")
return cx, cy
def calc_ratio(cnt: np.ndarray) -> Tuple[float, tuple]:
"""
The function calculates the centroid of a given contour.
:param cnt: 3D numpy array of the contour shape.
:return ratio: float, the ratio of the minor and major axes of an ellipse that fits the given contour
"""
ellipse = cv2.fitEllipse(cnt)
(_, _), (d1, d2), angle = ellipse
semi_major_axis = max(d1, d2)
semi_minor_axis = min(d1, d2)
return (semi_minor_axis / semi_major_axis), ellipse
def calc_depth(image: np.ndarray, cnt: np.ndarray) -> tuple[float, float]:
"""
calculating the depth of a dimple using the contour we obtained from the mask of the original image.
:param image: 2D numpy array, in the context of this function it needs to be a gray scale original image.
:param cnt: 3D numpy array, a contour.
:return: tuple of two floating numbers. right is local average depth and left is global average depth calculated.
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
global_avg = np.average(image)
img1 = np.zeros(image.shape, dtype=np.uint8)
img2 = img1.copy()
# drawing the contour which will basically give the edges of the object
cv2.drawContours(img1, [cnt], -1, 255, 2)
# drawing inside the edges
cv2.fillPoly(img2, [cnt], 255)
# adding the filled poly with the drawn contour gives bigger object that
# contains the both the edges and the insides of the object
img1 = img1 + img2
res = np.bitwise_and(img1, image)
# cropping the ROI (the object we want)
x, y, w, h = cv2.boundingRect(cnt)
# (de)increased values in order to get nonzero borders or lost pixels
# avoiding edges
x = x if x == 0 else x - 1
y = y if y == 0 else y - 1
h = (y + h + 1) if (y + h + 1) <= image.shape[0] else y + h
w = (x + w + 1) if (x + w + 1) <= image.shape[1] else x + w
# cropping the contour
res1 = res[y:h, x:w]
# taking the min(none-zero) and max value in order to calculate the delta
non_zero_res = res1[np.nonzero(res1)]
min_value = np.min(non_zero_res)
max_value = np.max(non_zero_res)
delta = abs(max_value - min_value)
# the avg non-zero value in the given contour and can be called
# the local average pixel because this is calculated per contour
local_avg = np.average(non_zero_res)
# returning the depths
return round(delta / local_avg, 5), round(delta / global_avg, 5)
# noinspection DuplicatedCode
def analyze(images: dict
, flags: dict
, min_max_values: dict
, original_images: dict
, num_of_bins=15
) -> Tuple[dict, dict]:
"""
Analyze black and white images, and finds contours in these images
and calculates different properties for each contour
:param images: contains values as np.ndarray which are the images and the keys are the image names.
:param flags: a dictionary that contains the following flags:<br>
1- show_ex_contours: boolean, if the user wants to quantify the external contours.<br>
2- show_in_contours: boolean, if the user wants to quantify the internal contours.<br>
3- calc_centroid: boolean, if the user wants to calculate the centroid.<br>
:param min_max_values: a dictionary that contains requested min and max value for each image.
:param num_of_bins: the number of intervals.
:param original_images: dictionary, where the key is the image name identical to that on in images dict, and value
is 3D numpy array.
:returns: two dictionaries, the first dictionary values are the drawn images based on the given options and <br>
the second dictionary values are image analysis for each given image,the keys are image names.
"""
assert images.__len__() != 0, "Function received empty images list."
images_analysis = {}
drawn_images = {}
# pixel_to_um = 5
images_names = list(images.keys())
for name in images_names:
contours, hierarchy = cv2.findContours(images[name], cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
drawn_image = np.zeros(images[name].shape + (3,), dtype=np.uint8)
image_analysis = {"contour_index": [], "contour_type": [], "area": [], "ratio": [], "local": [], "global": []}
if flags["calc_centroid"]:
image_analysis["centroid"] = []
image_analysis["interval_range"] = []
min_limit, max_limit = min_max_values[name]
for i in range(len(contours)):
cnt = contours[i]
hier = hierarchy[0][i][3]
cnt_area = cv2.contourArea(cnt)
cnt_color = random_color()
if min_limit < cnt_area < max_limit:
cnt_ratio, ellipse = calc_ratio(cnt)
cnt_local_depth, cnt_global_depth = calc_depth(image=original_images[name],
cnt=cnt)
if flags["show_in_contours"] and flags["show_ex_contours"]:
image_analysis["contour_index"].append(i)
cv2.drawContours(drawn_image, [cnt], -1, cnt_color, 2)
if hier == -1:
image_analysis["contour_type"].append("external")
else:
image_analysis["contour_type"].append("internal")
if flags["calc_centroid"]:
cx, cy = calc_centroid(cnt)
image_analysis["centroid"].append((cx, cy))
cv2.circle(
drawn_image, (cx, cy), radius=3,
color=cnt_color, thickness=-1)
if flags["show_ellipses"]:
cv2.ellipse(drawn_image, ellipse, (0, 0, 255), 3)
image_analysis["area"].append(cnt_area)
image_analysis["ratio"].append(cnt_ratio)
image_analysis["local"].append(cnt_local_depth)
image_analysis["global"].append(cnt_global_depth)
elif flags["show_in_contours"] and not flags["show_ex_contours"]:
if hier != -1:
image_analysis["contour_index"].append(i)
cv2.drawContours(drawn_image, [cnt], -1, cnt_color, 2)
image_analysis["contour_type"].append("internal")
if flags["calc_centroid"]:
cx, cy = calc_centroid(cnt)
image_analysis["centroid"].append((cx, cy))
cv2.circle(
drawn_image, (cx, cy), radius=3,
color=cnt_color, thickness=-1)
if flags["show_ellipses"]:
cv2.ellipse(drawn_image, ellipse, (0, 0, 255), 3)
image_analysis["area"].append(cnt_area)
image_analysis["ratio"].append(cnt_ratio)
image_analysis["local"].append(cnt_local_depth)
image_analysis["global"].append(cnt_global_depth)
elif flags["show_ex_contours"] and not flags["show_in_contours"]:
if hier == -1:
image_analysis["contour_index"].append(i)
cv2.drawContours(drawn_image, [cnt], -1, cnt_color, 2)
image_analysis["contour_type"].append("external")
if flags["calc_centroid"]:
cx, cy = calc_centroid(cnt)
image_analysis["centroid"].append((cx, cy))
cv2.circle(
drawn_image, (cx, cy), radius=3,
color=cnt_color, thickness=-1)
if flags["show_ellipses"]:
cv2.ellipse(drawn_image, ellipse, (0, 0, 255), 3)
image_analysis["area"].append(cnt_area)
image_analysis["ratio"].append(cnt_ratio)
image_analysis["local"].append(cnt_local_depth)
image_analysis["global"].append(cnt_global_depth)
else:
if flags["show_ellipses"]:
cv2.ellipse(drawn_image, ellipse, (0, 0, 255), 3)
if flags["show_in_contours"] or flags["show_ex_contours"]:
image_analysis["interval_range"] = fitToIntervals(areas=image_analysis["area"], num_of_bins=num_of_bins)
drawn_images[name] = drawn_image
images_analysis[name] = image_analysis
return drawn_images, images_analysis
|
{"hexsha": "394c1d44736a1c37a84fa5c914c3bfe24a7b6d02", "size": 13166, "ext": "py", "lang": "Python", "max_stars_repo_path": "BackEnd/analyze_dimples.py", "max_stars_repo_name": "Mohamab29/FracSegment", "max_stars_repo_head_hexsha": "2cca4e69a82ca3e6733d3b29e0e093fff5e55fe2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BackEnd/analyze_dimples.py", "max_issues_repo_name": "Mohamab29/FracSegment", "max_issues_repo_head_hexsha": "2cca4e69a82ca3e6733d3b29e0e093fff5e55fe2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BackEnd/analyze_dimples.py", "max_forks_repo_name": "Mohamab29/FracSegment", "max_forks_repo_head_hexsha": "2cca4e69a82ca3e6733d3b29e0e093fff5e55fe2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3092105263, "max_line_length": 118, "alphanum_fraction": 0.6232720644, "include": true, "reason": "import numpy", "num_tokens": 3012}
|
import numpy as np
"""
It is created for using MTC in Mujoco. The dynamics in this model is not continuous. The integration error will be
accumulated overtime. And the system might get unstable if the timestep is too large. It is recommended to set the
timestamp lower than 5e-4 to get decent results.
The model is created based on Song's and Geyer's 2015 paper:
Song, S. and Geyer, H., 2015. A neural circuitry that emphasizes spinal feedback generates diverse behaviours of human
locomotion. The Journal of physiology, 593(16), pp.3493-3511.
V0.1
Passed basic tests. There're slightly difference compared to the simmechanics model.
V0.2
1. Verified with the simmechanics model. Difference in most of the cases can be ignored.
2. Changed the integration method from forward Euler to trapezoid.
3. Muscle force vce etc might vibrate/jitter if in some cases if the timestep is not low enough.
Need to improve this in the next version.
"""
class MuscleTendonComplex:
def __init__(self, paraMuscle, stateMuscle, paraMusAttach, offsetCorr, timestep, nameMuscle, angJoi):
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau, self.w, self.c, self.N, self.K = paraMuscle
self.stim, self.act, self.lmtc, self.lce, self.vce, self.frcmtc = stateMuscle
self.timestep = timestep
self.nameMuscle = nameMuscle
self.angJoi = angJoi
self.offsetCorr = offsetCorr
self.r, self.phiref, self.phimaxref, self.rho, self.dirAng, self.phiScale = paraMusAttach
self.MR = 0.01
self.typeMuscle = self.angJoi.size
nJoi = self.typeMuscle
self.levelArm = np.zeros(nJoi)
tmpL = np.zeros(nJoi)
for i in range(0, nJoi):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
self.lce = self.lmtc - self.lslack
self.lse = self.lmtc - self.lce
# unitless parameters
self.Lse = self.lse / self.lslack
self.Lce = self.lce / self.lopt
self.actsubstep = (self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act
self.lcesubstep = self.vce * self.timestep / 2.0 + self.lce
# test
self.lce_avg = self.lce
self.vce_avg = self.vce
self.frcmtc_avg = 0
self.act_avg = self.act
self.frame = 0
# self.Fse = 0.0
# self.Fbe = 0.0
# self.Fpe = 0.0
# self.Fce = 0.0
def stepUpdateState(self, angJoi):
"""
Muscle Tendon Complex Dynamics
update muscle states based on the muscle dynamics
Muscle state stim has to be updated outside before this function is called
"""
# update lmtc and level arm based on the geometry
self.angJoi = angJoi
nJoi = self.typeMuscle
tmpL = np.zeros(nJoi)
for i in range(0, nJoi):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
# update muscle activation
# integration, forward-Euler method
# self.act = (self.stim - self.act) * self.timestep / self.tau + self.act
# integration, trapezoidal method, 2-step
self.act = (self.stim - self.actsubstep) * self.timestep / 2.0 / self.tau + self.actsubstep
self.actsubstep = (self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act
# update lce and lse based on the lmtc
# integration, forward-Euler method
# self.lce = self.vce * self.timestep + self.lce
# integration, trapezoidal method, 2-step
self.lce = self.vce * self.timestep / 2.0 + self.lcesubstep
self.lcesubstep = self.vce * self.timestep / 2.0 + self.lce
self.lse = self.lmtc - self.lce
self.Lse = self.lse / self.lslack
self.Lce = self.lce / self.lopt
# Serial Elastic element (tendon) force-length relationship
if self.Lse > 1.0:
Fse = np.power((self.Lse - 1.0) / self.eref, 2)
else:
Fse = 0.0
# Parallel Elasticity PE
if self.Lce > 1.0:
Fpe = np.power((self.Lce - 1.0) / self.w, 2)
else:
Fpe = 0.0
# update frcmtc
self.frcmtc = Fse * self.frcmax
#self.frcmtc = np.clip(self.frcmtc, 0, self.frcmax)
# Buffer Elasticity BE
if (self.Lce - (1.0 - self.w)) < 0:
Fbe = np.power((self.Lce - (1.0 - self.w)) / (self.w / 2), 2)
else:
Fbe = 0.0
# Contractile Element force-length relationship
tmp = np.power(np.absolute(self.Lce - 1.0) / self.w, 3)
Fce = np.exp(tmp * np.log(self.c))
#Fv = (Fse + Fbe) / (Fpe + Fce * self.act)
if (Fpe + Fce * self.act) < 1e-10: # avoid numerical error
if (Fse + Fbe) < 1e-10:
Fv = 1.0
else:
Fv = (Fse + Fbe) / 1e-10
else:
Fv = (Fse + Fbe) / (Fpe + Fce * self.act)
# Contractile Element inverse force-velocity relationship
if Fv <= 1.0:
# Concentric
v = (Fv - 1) / (Fv * self.K + 1.0)
elif Fv <= self.N:
# excentric
tmp = (Fv - self.N) / (self.N - 1.0)
v = (tmp + 1.0) / (1.0 - tmp * 7.56 * self.K)
else:
# excentric overshoot
v = ((Fv - self.N) * 0.01 + 1)
self.vce = v * self.lopt * self.vmax
v_frac = self.vce / self.vmax
mr_scale = self.act * np.absolute(self.frcmax*self.vmax) *self.timestep
if self.vce <= 1:
self.MR = 0.01 - 0.11*(v_frac) + 0.06*np.exp(-8*v_frac)
else:
self.MR = 0.23 - 0.16*np.exp(-8*v_frac)
self.MR *= mr_scale
self.frame += 1
self.lce_avg = (self.lce_avg*(self.frame - 1) + self.lce) / self.frame
self.vce_avg = (self.vce_avg*(self.frame - 1) + self.vce) / self.frame
self.frcmtc_avg = (self.frcmtc_avg*(self.frame - 1) + self.frcmtc) / self.frame
self.act_avg = (self.act_avg*(self.frame - 1) + self.act) / self.frame
#self.MR = np.exp(-self.MR)
# print(self.MR, np.exp(-self.MR))
# self.Fv = Fv
# self.Fse = Fse
# self.Fbe = Fbe
# self.Fpe = Fpe
# self.Fce = Fce
def reset_state(self):
self.frame = 0
self.lce_avg = 0
self.frcmtc_avg = 0
self.act_avg = 0
self.vce_avg = 0
|
{"hexsha": "4fda88e07a7d5763add5321ef0cfe0319e550f49", "size": 7802, "ext": "py", "lang": "Python", "max_stars_repo_path": "mushroom_rl/environments/mujoco_envs/humanoid_gait/_external_simulation/mtc_model.py", "max_stars_repo_name": "PuzeLiu/mushroom-rl", "max_stars_repo_head_hexsha": "99942b425e66b4ddcc26009d7105dde23841e95d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 344, "max_stars_repo_stars_event_min_datetime": "2020-01-10T09:45:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:48:28.000Z", "max_issues_repo_path": "mushroom_rl/environments/mujoco_envs/humanoid_gait/_external_simulation/mtc_model.py", "max_issues_repo_name": "AmmarFahmy/mushroom-rl", "max_issues_repo_head_hexsha": "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2020-01-23T03:00:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T17:14:22.000Z", "max_forks_repo_path": "mushroom_rl/environments/mujoco_envs/humanoid_gait/_external_simulation/mtc_model.py", "max_forks_repo_name": "AmmarFahmy/mushroom-rl", "max_forks_repo_head_hexsha": "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 93, "max_forks_repo_forks_event_min_datetime": "2020-01-10T21:17:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:58:52.000Z", "avg_line_length": 41.5, "max_line_length": 120, "alphanum_fraction": 0.5697257114, "include": true, "reason": "import numpy", "num_tokens": 2423}
|
import gsw
import mixsea as mx
import numpy as np
from munch import Munch
from tqdm import tqdm
import utils
dvn = Munch(
{
"time": "time",
"C": "C",
"SP": "S",
"t": "T",
"lon": "lon",
"lat": "lat",
"depth": "depth",
}
)
def generate_CTD_Munch_from_list(
dlist, vn=dvn, depth_min=1.0, depth_max=300.0, depth_spacing=1.0
):
"""Handles the case of a list of dictionary-like objects."""
ctdlist = []
for d in dlist:
ctd_ = generate_CTD_Munch(
d[vn.time],
d[vn.depth],
d[vn.lon],
d[vn.lat],
d[vn.SP],
d[vn.t],
depth_min,
depth_max,
depth_spacing,
)
ctdlist.append(ctd_)
# Stack all the ctds together
ctd = Munch()
ctd.time = np.concatenate([ctd_.time for ctd_ in ctdlist], axis=0)
ctd.depth = ctdlist[0].depth
ctd.lon = np.concatenate([ctd_.lon for ctd_ in ctdlist], axis=0)
ctd.lat = np.concatenate([ctd_.lat for ctd_ in ctdlist], axis=0)
ctd.SP = np.concatenate([ctd_.SP for ctd_ in ctdlist], axis=1)
ctd.t = np.concatenate([ctd_.t for ctd_ in ctdlist], axis=1)
return ctd
def generate_CTD_Munch(
time, depth, lon, lat, SP, t, depth_min=1.0, depth_max=300.0, depth_spacing=1.0
):
"""Quality control CTD quantities and place into a common Munch structure.
Assumes input data is 1D with size N or M, or 2D with size N*M,
where M denotes profiles and N depths.
Parameters
----------
time : numpy array
Time matlab datenum, size M.
depth : numpy array
Depth (m), size N.
lon : numpy array
Longitude, size M.
lat : numpy array
Latitude, size M.
SP : numpy array
Practical salinity, size N*M.
t : numpy array
Temperature, size N*M.
depth_min : float, optional
Minimum depth (m).
depth_max : float, optional
Maximum depth (m).
depth_spacing : float, optional
Depth spacing (m).
"""
use = np.isfinite(time)
# First remove data if there is no valid timestamp.
time = time[use]
SP = SP[:, use]
t = t[:, use]
lon = lon[use]
lat = lat[use]
# Interpolate to a common depth grid if necessary.
depth_ = np.arange(depth_min, depth_max + depth_spacing, depth_spacing)
size_equal = depth_.size == depth.size
if size_equal:
all_close = np.allclose(depth, depth_)
else:
all_close = False
if size_equal and all_close:
pass
else:
SP = utils.interp_fill_valid_2D(depth_, depth, SP)
t = utils.interp_fill_valid_2D(depth_, depth, t)
depth = depth_
ctd = Munch()
ctd.time = time
ctd.depth = depth
ctd.lon = lon
ctd.lat = lat
ctd.SP = SP
ctd.t = t
return ctd
def apply_thermodynamics(ctd):
ctd.p, ctd.SA, ctd.CT, ctd.sig0, p_mid, ctd.N2 = common_thermodynamics(
ctd.depth, ctd.lon, ctd.lat, ctd.SP, ctd.t
)
ctd.p_mid = p_mid[:, 0]
return ctd
def apply_adiabatic_level(ctd, bin_width=20.0):
ctd.N2_ref = adiabatic_level_2D(ctd.p, ctd.SP, ctd.t, ctd.lon, ctd.lat, bin_width)
return ctd
def common_thermodynamics(depth, lon, lat, SP, t):
"""Wrapper for various thermodynamic calculations.
Assumes input data is 1D with size N or M, or 2D with size N*M,
where M denotes profiles and N depths.
Parameters
----------
depth : numpy array
Depth (m), size N.
lon : numpy array
Longitude, size M.
lat : numpy array
Latitude, size M.
SP : numpy array
Practical salinity, size N*M.
t : numpy array
Temperature, size N*M.
"""
p = gsw.p_from_z(-depth, np.mean(lat))
SA = gsw.SA_from_SP(SP, p[:, np.newaxis], lon[np.newaxis, :], lat[np.newaxis, :])
CT = gsw.CT_from_t(SA, t, p[:, np.newaxis])
sig0 = gsw.pot_rho_t_exact(SA, t, p[:, np.newaxis], 0)
N2, p_mid = gsw.Nsquared(SA, CT, p[:, np.newaxis], lat[np.newaxis, :])
return p, SA, CT, sig0, p_mid, N2
def depth_max(depth, mask):
"""Calculate the maximum depth of valid data.
Assumes mask is 2D with size N*M, where M denotes profiles and N depths.
Parameters
----------
depth : numpy array
Depth (m), size N.
mask : numpy array of boolean
Mask where True is valid data, size N*M.
"""
dmax = np.full((mask.shape[1]), np.nan)
for i in range(mask.shape[1]):
dmax[i] = np.max(depth[mask[:, i]])
return dmax
def adiabatic_level_2D(p, SP, t, lon, lat, bin_width=20.0):
"""Adiabatically level 2D data.
Assumes input data is 1D with size N or M, or 2D with size N*M,
where M denotes profiles and N depths.
Parameters
----------
p : numpy array
Pressure (dbar), size N.
SP : numpy array
Practical salinity, size N*M.
t : numpy array
Temperature, size N*M.
lon : numpy array
Longitude, size M.
lat : numpy array
Latitude, size M.
bin_width : float, optional
Bin width (dbar).
"""
print("Adiabatically levelling profiles")
N2_ref = np.full_like(t, np.nan)
for i in tqdm(range(t.shape[1])):
N2_ref[:, i] = mx.nsq.adiabatic_leveling(
p, SP[:, i], t[:, i], lon[i], lat[i], bin_width=bin_width
)
return N2_ref
def regrid_vmp_to_ctd(vmp, ctd, time_win=60.0):
"""Add some CTD variabiles to the VMP Munch."""
ctd.eps1 = utils.regrid_profiles(ctd.time, vmp.time, vmp.eps1)
ctd.Lo1 = utils.regrid_profiles(ctd.time, vmp.time, vmp.Lo1)
ctd.Kv1 = utils.regrid_profiles(ctd.time, vmp.time, vmp.Kv1)
try:
ctd.eps2 = utils.regrid_profiles(ctd.time, vmp.time, vmp.eps2)
ctd.Lo2 = utils.regrid_profiles(ctd.time, vmp.time, vmp.Lo2)
ctd.Kv2 = utils.regrid_profiles(ctd.time, vmp.time, vmp.Kv2)
except AttributeError:
pass
return ctd
|
{"hexsha": "34e3f344d033a41811677450fa3df0aca182c6ef", "size": 6163, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/CTD.py", "max_stars_repo_name": "jessecusack/LeConte_postprocessing", "max_stars_repo_head_hexsha": "1f1a1dc0541033fc83d5953b0a06c6695ae8036d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-12T18:33:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T18:33:16.000Z", "max_issues_repo_path": "code/CTD.py", "max_issues_repo_name": "jessecusack/LeConte_postprocessing", "max_issues_repo_head_hexsha": "1f1a1dc0541033fc83d5953b0a06c6695ae8036d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/CTD.py", "max_forks_repo_name": "jessecusack/LeConte_postprocessing", "max_forks_repo_head_hexsha": "1f1a1dc0541033fc83d5953b0a06c6695ae8036d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3376068376, "max_line_length": 86, "alphanum_fraction": 0.5786143112, "include": true, "reason": "import numpy", "num_tokens": 1793}
|
[STATEMENT]
lemma (in intruder_model) term_variants_pred_wf_trms:
assumes "term_variants_pred P s t"
and "\<And>f g. g \<in> set (P f) \<Longrightarrow> arity f = arity g"
and "wf\<^sub>t\<^sub>r\<^sub>m s"
shows "wf\<^sub>t\<^sub>r\<^sub>m t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf\<^sub>t\<^sub>r\<^sub>m t
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
term_variants_pred P s t
?g2 \<in> set (P ?f2) \<Longrightarrow> arity ?f2 = arity ?g2
wf\<^sub>t\<^sub>r\<^sub>m s
goal (1 subgoal):
1. wf\<^sub>t\<^sub>r\<^sub>m t
[PROOF STEP]
apply (induction rule: term_variants_pred.induct, simp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>T S g f. \<lbrakk>length T = length S; \<And>i. i < length T \<Longrightarrow> term_variants_pred P (T ! i) (S ! i); \<And>i. \<lbrakk>i < length T; \<And>g f. g \<in> set (P f) \<Longrightarrow> arity f = arity g; wf\<^sub>t\<^sub>r\<^sub>m (T ! i)\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m (S ! i); g \<in> set (P f); \<And>g f. g \<in> set (P f) \<Longrightarrow> arity f = arity g; wf\<^sub>t\<^sub>r\<^sub>m (Fun f T)\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m (Fun g S)
2. \<And>T S f. \<lbrakk>length T = length S; \<And>i. i < length T \<Longrightarrow> term_variants_pred P (T ! i) (S ! i); \<And>i. \<lbrakk>i < length T; \<And>g f. g \<in> set (P f) \<Longrightarrow> arity f = arity g; wf\<^sub>t\<^sub>r\<^sub>m (T ! i)\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m (S ! i); \<And>g f. g \<in> set (P f) \<Longrightarrow> arity f = arity g; wf\<^sub>t\<^sub>r\<^sub>m (Fun f T)\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m (Fun f S)
[PROOF STEP]
by (metis (no_types) wf_trmI wf_trm_arity in_set_conv_nth wf_trm_param_idx)+
|
{"llama_tokens": 785, "file": "Automated_Stateful_Protocol_Verification_Term_Variants", "length": 3}
|
[STATEMENT]
lemma Gets_B_knows_K:
"\<lbrakk> Gets B \<lbrace>Crypt (shrK B) \<lbrace>Number Tk, Agent A, Key K\<rbrace>,
Crypt K \<lbrace>Agent A, Number Ta\<rbrace>\<rbrace> \<in> set evs;
evs \<in> bankerb_gets \<rbrakk>
\<Longrightarrow> Key K \<in> analz (knows B evs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Gets B \<lbrace>Crypt (shrK B) \<lbrace>Number Tk, Agent A, Key K\<rbrace>, Crypt K \<lbrace>Agent A, Number Ta\<rbrace>\<rbrace> \<in> set evs; evs \<in> bankerb_gets\<rbrakk> \<Longrightarrow> Key K \<in> analz (knows B evs)
[PROOF STEP]
apply (force dest: Gets_imp_knows_analz)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 292, "file": null, "length": 2}
|
import os
import sys
import numpy
import tensorflow as tf
from joblib import Parallel, delayed
from sklearn.metrics import mean_absolute_error
from model.helper import HPLogger, NumpyEncoder
sys.path.append("..")
import json
import pandas
from sklearn.model_selection import KFold
from skopt import gp_minimize, dump
from skopt.space import Integer, Real
from skopt.utils import use_named_args
from model.dnn import DNNRegressor, MonotonicBatchDNNRegressor, IntervalRegressorMAE, \
IntervalRegressorMAEMonotonic
import time
from model.transformer import IntervalTargetTransformer, MeanTransformer
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
SCENARIO_1 = True
SCENARIO_2 = True
TUNE_INTERVAL = True
TUNE_BASELINE_MEAN = True
# monotonic additions
TUNE_BASELINE_MEAN_MONO = True
TUNE_INTERVAL_MONO = True
CV = 5
REPETITIONS = 2
N_JOBS = 10
SAMPLE_SIZE = 7500
ITERATIONS_PER_SPACE = 125
LOG_PATH = 'hp_tuning/%s/'
RES_PATH = 'hp_tuning/tuning_results.json'
dnn_param_names = ['num_dense_layers', 'num_input_nodes', 'num_dense_nodes', 'ratio_dropout', 'penalty_weight']
def prepare_params(**params):
reg_params = {}
trans_params = {}
for param in params:
if param in dnn_param_names:
reg_params[param] = params[param]
else:
trans_params[param] = params[param]
return {'reg': reg_params,
'trans': trans_params}
def score_fold(X_train, y_train, X_test, y_test, model_creator, params):
# create model
params['batch_size'] = 64
params['max_epochs'] = 1000
model = model_creator()
model.set_params(**params)
# train model with params
model.fit(X_train, y_train)
# evaluate model on tests set with mean absolute error
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
return mae
def scorer(params, X_trans, y_trans, X, y, y_mean, model_creater, transform_creater):
tf.keras.backend.clear_session()
kf = KFold(n_splits=5, shuffle=True, random_state=42)
# apply transform if needed
transform = transform_creater()
if transform is not None:
transform.set_params(**params['trans'])
X_trans, y_trans = transform.transform(X, y)
X_trans = pandas.DataFrame(X_trans)
y_trans = pandas.DataFrame(y_trans)
process_params = []
for train, test in kf.split(X):
X_train = pandas.DataFrame(X_trans.iloc[train]).reset_index(drop=True)
y_train = pandas.DataFrame(y_trans.iloc[train]).reset_index(drop=True)
# validation sets for early stopping of training
X_test = pandas.DataFrame(X.iloc[test]).reset_index(drop=True)
y_test = pandas.DataFrame(y_mean.iloc[test]).reset_index(drop=True)
for _ in range(REPETITIONS):
process_params.append([
X_train, y_train, X_test, y_test, model_creater, params['reg']
])
split_results = Parallel(n_jobs=N_JOBS)(delayed(score_fold)(*params) for params in process_params)
results_array = numpy.array(split_results).reshape(-1, REPETITIONS)
return numpy.mean(results_array, axis=1)
def tune_model(X_trans: pandas.DataFrame, y_trans: pandas.DataFrame, X, y, model_creater, transform_creater, search_space, log_path: str):
# create log folder if it not exists
dir_path = os.path.dirname(log_path + 'mock.file')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
log_columns = [elem.name for elem in search_space] + ['time', 'std', 'split_scores' , 'score']
log_monitor_path = log_path + "log.csv"
monitor = HPLogger(log_monitor_path, log_columns)
param_list = []
# mean y for the test set
y_mean = y.mean(axis=1)
@use_named_args(search_space)
def evaluate(**params):
# prepare parms
p = prepare_params(**params)
# apply params to transform and model
start = time.time()
result_splits = scorer(p, X_trans, y_trans, X, y, y_mean, model_creater, transform_creater)
score = numpy.mean(result_splits)
variance = numpy.std(result_splits)
log_row = {
**p['reg'],
**p['trans'],
'time': time.time() - start,
'split_scores': result_splits,
'std': variance,
'score': score
}
monitor.write_row(log_row)
param_list.append((p, score))
return score
total_iterations = ITERATIONS_PER_SPACE * len(search_space)
initial_points = int(total_iterations * 0.15)
print(total_iterations)
print(initial_points)
result = gp_minimize(evaluate, search_space,
n_calls=total_iterations,
n_initial_points=initial_points,
n_jobs=-1,
verbose=True,
random_state=27,
model_queue_size=10)
del result.specs['args']['func']
dump(result, filename=log_path + 'result.opt', store_objective=False)
return param_list
def tune(dataset_path, log_path, res_path):
X = pandas.read_csv(dataset_path % "X_train.csv")
y = pandas.read_csv(dataset_path % "y_train.csv")
X.fillna(0, inplace=True)
# suffle X and y since BayesSearchCV does not shuffle automatically
X = X.sample(n=SAMPLE_SIZE, random_state=27021996)
y = y.reindex(X.index)
# reset indices
X = X.reset_index(drop=True)
y = y.reset_index(drop=True)
num_datasets = 1#X['dataset'].nunique()
print("num_datasets: %d" % num_datasets)
search_space_nn = [
Integer(low=1, high=3, name='num_dense_layers'),
Integer(low=10, high=350, name='num_input_nodes'),
Integer(low=5, high=200, name='num_dense_nodes'),
Real(low=0.0, high=0.5, name='ratio_dropout')
]
monotonic_increasing = ['cpu_Frequency', 'cpu_Turbo Clock', 'cpu_Multiplier',
'gpu_Base Clock', 'gpu_Boost Clock', 'gpu_Bandwidth']
monotonic_decreasing = ['resolution', 'setting']
search_space_mon_penalty = [Real(low=0.0, high=100.0, name='penalty_weight')]
tuning_result_dict = {}
if TUNE_INTERVAL_MONO:
# monotonic increasing features
monotonic_increasing_indices = [X.columns.get_loc(name) for name in monotonic_increasing]
monotonic_decreasing_indices = [X.columns.get_loc(name) for name in monotonic_decreasing]
# monotonic decreasing features
def model_creater():
MonotonicBatchDNNRegressor.mon_increasing = monotonic_increasing_indices
MonotonicBatchDNNRegressor.mon_decreasing = monotonic_decreasing_indices
return IntervalRegressorMAEMonotonic()
# pre transform since the intervals are independent of the params
trans = IntervalTargetTransformer()
X_trans, y_trans = trans.transform(X, y)
X_trans = pandas.DataFrame(X_trans)
y_trans = pandas.DataFrame(y_trans)
def transform_creater():
return None
res_list = tune_model(X_trans, y_trans, X, y, model_creater, transform_creater, search_space_nn + search_space_mon_penalty,
log_path % 'interval_monotonic')
tuning_result_dict['interval_monotonic'] = sorted(res_list, key=lambda tuple: tuple[1])[0][0]
if TUNE_BASELINE_MEAN_MONO:
# monotonic increasing features
monotonic_increasing_indices = [X.columns.get_loc(name) for name in monotonic_increasing]
monotonic_decreasing_indices = [X.columns.get_loc(name) for name in monotonic_decreasing]
# monotonic decreasing features
MonotonicBatchDNNRegressor.mon_increasing = monotonic_increasing_indices
MonotonicBatchDNNRegressor.mon_decreasing = monotonic_decreasing_indices
def model_creater():
MonotonicBatchDNNRegressor.mon_increasing = monotonic_increasing_indices
MonotonicBatchDNNRegressor.mon_decreasing = monotonic_decreasing_indices
return MonotonicBatchDNNRegressor()
trans = MeanTransformer()
X_trans, y_trans = trans.transform(X, y)
X_trans = pandas.DataFrame(X_trans)
y_trans = pandas.DataFrame(y_trans)
def transform_creater():
return None
res_list = tune_model(X_trans, y_trans, X, y, model_creater, transform_creater, search_space_nn + search_space_mon_penalty,
log_path % 'mean_monotonic')
tuning_result_dict['mean_monotonic'] = sorted(res_list, key=lambda tuple: tuple[1])[0][0]
# interval
if TUNE_INTERVAL:
def model_creater():
return IntervalRegressorMAE()
# pre transform since the intervals are independent of the params
trans = IntervalTargetTransformer()
X_trans, y_trans = trans.transform(X, y)
X_trans = pandas.DataFrame(X_trans)
y_trans = pandas.DataFrame(y_trans)
def transform_creater():
return None
res_list = tune_model(X_trans, y_trans, X, y, model_creater, transform_creater, search_space_nn, log_path % 'interval')
tuning_result_dict['interval'] = sorted(res_list, key=lambda tuple: tuple[1])[0][0]
# baseline mean
if TUNE_BASELINE_MEAN:
def model_creater():
return DNNRegressor()
trans = MeanTransformer()
X_trans, y_trans = trans.transform(X, y)
X_trans = pandas.DataFrame(X_trans)
y_trans = pandas.DataFrame(y_trans)
def transform_creater():
return None
res_list = tune_model(X_trans, y_trans, X,y, model_creater, transform_creater, search_space_nn, log_path % 'mean')
tuning_result_dict['mean'] = sorted(res_list, key=lambda tuple: tuple[1])[0][0]
# dump result dict to json
with open(res_path, 'w') as result_file:
result_file.write(json.dumps(tuning_result_dict, cls=NumpyEncoder, indent=4))
if __name__ == '__main__':
# tune scenario 1
if SCENARIO_1:
tune(dataset_path='../data/case_study/scenario_1/%s', log_path='hp_tuning/scenario_1/%s/',
res_path='hp_tuning/scenario_1.json')
# tune scenario 2
if SCENARIO_2:
tune(dataset_path='../data/case_study/scenario_2/%s', log_path='hp_tuning/scenario_2/%s/',
res_path='hp_tuning_cs/scenario_2.json')
|
{"hexsha": "4634f07cea336cb5b2f8a5ac26294ff65af45552", "size": 10317, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/experiment/hp_tuning.py", "max_stars_repo_name": "svpeeters/performance_prediction", "max_stars_repo_head_hexsha": "713e78441d59a5dafccaa43858a0478a29b43e2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/experiment/hp_tuning.py", "max_issues_repo_name": "svpeeters/performance_prediction", "max_issues_repo_head_hexsha": "713e78441d59a5dafccaa43858a0478a29b43e2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/experiment/hp_tuning.py", "max_forks_repo_name": "svpeeters/performance_prediction", "max_forks_repo_head_hexsha": "713e78441d59a5dafccaa43858a0478a29b43e2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7156862745, "max_line_length": 139, "alphanum_fraction": 0.6746147136, "include": true, "reason": "import numpy", "num_tokens": 2440}
|
import torch
from torch.utils.data import Dataset, DataLoader
import scipy.sparse
from contextualized_topic_models.models.ctm import CTM
import pickle, os
from tqdm import tqdm
from utils import load_model
import numpy as np
def get_posteriors(teacher_dataset, teacher_model, contextual_size=512, batch_size=25, num_workers=10, epoch=99):
tp = pickle.load(open(os.path.join(teacher_model, "tp.pkl"), "rb"))
ctm = load_model(teacher_model, len(tp.vocab), contextual_size, epoch=epoch)
ctm.model.zero_grad()
posterior_variances = []
posterior_means = []
posterior_log_variances = []
loader = DataLoader(teacher_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
for batch_samples in tqdm(loader):
X_bow = batch_samples['X_bow']
X_bow = X_bow.reshape(X_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, \
estimated_labels =\
ctm.model(X_bow.cuda(), X_contextual.cuda())
posterior_variances.append(posterior_variance)
posterior_means.append(posterior_mean)
posterior_log_variances.append(posterior_log_variance)
posterior_variances = torch.cat(posterior_variances)
posterior_means = torch.cat(posterior_means)
posterior_log_variances = torch.cat(posterior_log_variances)
return posterior_variances.cpu().detach().numpy(), posterior_means.cpu().detach().numpy(), posterior_log_variances.cpu().detach().numpy()
class CTMDatasetPosteriors(Dataset):
"""Class to load BoW and the contextualized embeddings."""
def __init__(self, X_contextual, X_bow, idx2token, posterior_variance, posterior_mean, posterior_log_variance):
if X_bow.shape[0] != len(X_contextual):
raise Exception("Wait! BoW and Contextual Embeddings have different sizes! "
"You might want to check if the BoW preparation method has removed some documents. ")
self.X_bow = X_bow
self.X_contextual = X_contextual
self.idx2token = idx2token
self.posterior_variance = posterior_variance
self.posterior_mean = posterior_mean
self.posterior_log_variance = posterior_log_variance
def __len__(self):
"""Return length of dataset."""
return self.X_bow.shape[0]
def __getitem__(self, i):
"""Return sample from dataset at index i."""
if type(self.X_bow[i]) == scipy.sparse.csr.csr_matrix:
X_bow = torch.FloatTensor(self.X_bow[i].todense())
X_contextual = torch.FloatTensor(self.X_contextual[i])
else:
X_bow = torch.FloatTensor(self.X_bow[i])
X_contextual = torch.FloatTensor(self.X_contextual[i])
posterior_variance = torch.FloatTensor(self.posterior_variance[i])
posterior_mean = torch.FloatTensor(self.posterior_mean[i])
posterior_log_variance = torch.FloatTensor(self.posterior_log_variance[i])
return_dict = {'X_bow': X_bow, 'X_contextual': X_contextual,
'posterior_variance' : posterior_variance,
'posterior_mean': posterior_mean,
'posterior_log_variance': posterior_log_variance}
return return_dict
class StudentZeroShotTM(CTM):
def __init__(self, **kwargs):
inference_type = "zeroshot"
super().__init__(**kwargs, inference_type=inference_type)
def _train_epoch(self, loader):
"""Train epoch."""
self.model.train()
train_loss = 0
samples_processed = 0
print("Using teacher posterior")
for batch_samples in loader:
# batch_size x vocab_size
X_bow = batch_samples['X_bow']
X_bow = X_bow.reshape(X_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
teacher_posterior_variance = batch_samples['posterior_variance']
teacher_posterior_mean = batch_samples['posterior_mean']
teacher_posterior_log_variance = batch_samples['posterior_log_variance']
if self.USE_CUDA:
X_bow = X_bow.cuda()
X_contextual = X_contextual.cuda()
teacher_posterior_variance = teacher_posterior_variance.cuda()
teacher_posterior_mean = teacher_posterior_mean.cuda()
teacher_posterior_log_variance = teacher_posterior_log_variance.cuda()
# forward pass
self.model.zero_grad()
prior_mean, prior_variance, posterior_mean, posterior_variance,\
posterior_log_variance, word_dists, estimated_labels = self.model(X_bow, X_contextual)
# backward pass
kl_loss, rl_loss = self._loss(
X_bow, word_dists, teacher_posterior_mean, teacher_posterior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
loss = self.weights["beta"]*kl_loss + rl_loss
loss = loss.sum()
loss.backward()
self.optimizer.step()
# compute train loss
samples_processed += X_bow.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return samples_processed, train_loss
|
{"hexsha": "e41c9c0e2dbbc8da1c1fda2a10acc595caf75034", "size": 5416, "ext": "py", "lang": "Python", "max_stars_repo_path": "knowledge_distillation.py", "max_stars_repo_name": "EMBEDDIA/media_eval_vctm", "max_stars_repo_head_hexsha": "17111065f4a4435485d018cf81586a00cea8afff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "knowledge_distillation.py", "max_issues_repo_name": "EMBEDDIA/media_eval_vctm", "max_issues_repo_head_hexsha": "17111065f4a4435485d018cf81586a00cea8afff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "knowledge_distillation.py", "max_forks_repo_name": "EMBEDDIA/media_eval_vctm", "max_forks_repo_head_hexsha": "17111065f4a4435485d018cf81586a00cea8afff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.095890411, "max_line_length": 141, "alphanum_fraction": 0.6632200886, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1147}
|
# Use stepwise regression and PCA to confirm discriminating questions between groups
rm(list = ls())
source("../project_support.r")
# Check if additional libraries are installed and if they are not installed, install them
packages <- c("MASS", "factoextra", "ggfortify")
install.packages(setdiff(packages, rownames(installed.packages())))
# Load additional libraries
# This will break a lot of the code which relies on dplyr::select
# so run this code separately to the main analysis
library(MASS)
library(factoextra)
library(ggfortify)
# Load additional functions
# Perform stepwise regression
stepwise_regression <- function(data, cluster1, cluster2, value_cluster1, value_cluster2) {
clusters <- data %>%
mutate(Cluster = case_when(Cluster %in% cluster1 ~ value_cluster1,
Cluster %in% cluster2 ~ value_cluster2)) %>%
filter(!is.na(Cluster)) %>%
mutate(Cluster = as.numeric(as.character(Cluster))) %>%
dplyr::select(-ID, -`Entry ID`, -`Branching question`, -`Entry name`, -`Entry source`, -`Entry description`, -`Entry tags`, -Expert, -`Region ID`, -`Region name`, -`Region description`, -`Region tags`, -label, -elite, -non_elite, -religious_specialist)
clusters[clusters == "{01}"] <- "3"
clusters <- clusters %>%
mutate_if(is.character, as.numeric)
# Remove non changing columns
non_changing <- lapply(clusters, unique)
non_changing <- non_changing[lengths(non_changing)== 1]
non_changing <- names(non_changing)
clusters <- clusters %>%
dplyr::select(-all_of(non_changing))
# Fit the full model
full_model <- lm(Cluster ~ ., data = clusters)
# Stepwise regression model
step_model <- stepAIC(full_model, direction = "both", trace = FALSE)
}
# Extract questions selected by stepwise regression
extract_questions <- function(lm, questions){
# Extract Questions IDs from lm call
x = lm$call
x = as.character(x)
x = x[2]
x = unlist(str_split(x, "\\+"))
x = gsub("Cluster ~ `", "", x)
x = gsub("`", "", x)
x = gsub("\n", "", x)
x = str_trim(x)
x = as.numeric(x)
# Extract question metadata
used_questions = questions %>%
filter(`Question ID` %in% x)
}
# Load data
data <- read_csv("./input/b_f_r4_50_50.csv")
raw_data <- read_csv("./input/drh.csv")
# Extract all questions from drh data
questions <- raw_data %>%
dplyr::select(`Question ID`, Question, `Question description`, `Parent question`, `Parent question ID`) %>%
distinct()
# Stepwise regression
step_model <- stepwise_regression(data = data, cluster1 = c("1.1", "1.2"), cluster2 = c("2.1.1", "2.1.2", "2.2"), value_cluster1 = 1, value_cluster2 = 2)
# Extract used questions
used_questions <- extract_questions(step_model, questions)
# Remove metadata
data_sub <- data %>%
dplyr::select(-ID, -`Entry ID`, -`Branching question`, -`Entry name`, -`Entry source`, -`Entry description`, -`Entry tags`, -Expert, -`Region ID`, -`Region name`, -`Region description`, -`Region tags`, -label, -elite, -non_elite, -religious_specialist)
data_sub[data_sub == "{01}"] <- "3"
data_sub <- data_sub %>%
mutate(Cluster = as.factor(Cluster)) %>%
mutate_if(is.character, as.numeric)
# Perform PCA
pca <- prcomp(dplyr::select(data_sub, -Cluster))
# Plot PCA
autoplot(pca, data = data_sub, colour = 'Cluster') +
theme_minimal() +
scale_color_manual(values = c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2"))
# Visualize variables
fviz_pca_var(pca, col.var = "contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07")
)
# Extract the results for variables
pca_var <- get_pca_var(pca)
# Select variables that are most contributing to PC1 and PC2
pc1 <- tibble(`Question ID` = names(pca_var$contrib[,1]), contrib = pca_var$contrib[,1]) %>%
mutate(`Question ID` = as.numeric(`Question ID`)) %>%
left_join(questions) %>%
filter(contrib > 1) %>%
arrange(desc(contrib))
pc2 <- tibble(`Question ID` = names(pca_var$contrib[,2]), contrib = pca_var$contrib[,2]) %>%
mutate(`Question ID` = as.numeric(`Question ID`)) %>%
left_join(questions) %>%
filter(contrib > 1) %>%
arrange(desc(contrib))
|
{"hexsha": "38da3e667577230f2d0f9419e15aaafb12787184", "size": 4112, "ext": "r", "lang": "R", "max_stars_repo_path": "05_cluster_comparison/confirmatory_analysis.r", "max_stars_repo_name": "religionhistory/religion_taxonomy", "max_stars_repo_head_hexsha": "8ddac68fe76da2337d5d4c4ab6787a90cb56c59c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "05_cluster_comparison/confirmatory_analysis.r", "max_issues_repo_name": "religionhistory/religion_taxonomy", "max_issues_repo_head_hexsha": "8ddac68fe76da2337d5d4c4ab6787a90cb56c59c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "05_cluster_comparison/confirmatory_analysis.r", "max_forks_repo_name": "religionhistory/religion_taxonomy", "max_forks_repo_head_hexsha": "8ddac68fe76da2337d5d4c4ab6787a90cb56c59c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.389380531, "max_line_length": 257, "alphanum_fraction": 0.6770428016, "num_tokens": 1174}
|
import pandas as pd
import os
import numpy as np
import logging
import urllib
import zipfile
from pathlib import Path
AMPLIGRAPH_ENV_NAME = 'AMPLIGRAPH_DATA_HOME'
REMOTE_DATASET_SERVER = 'https://s3-eu-west-1.amazonaws.com/ampligraph/datasets/'
DATASET_FILE_NAME = {'WN18': 'wn18.zip',
'WN18RR': 'wn18RR.zip',
'FB15K': 'fb15k.zip',
'FB15K_237': 'fb15k-237.zip',
'YAGO3_10': 'YAGO3-10.zip',
}
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _clean_data(X, throw_valid=False):
train = X["train"]
valid = X["valid"]
test = X["test"]
train_ent = set(train.flatten())
valid_ent = set(valid.flatten())
test_ent = set(test.flatten())
# not throwing the unseen entities in validation set
if not throw_valid:
train_valid_ent = set(train.flatten()) | set(valid.flatten())
ent_test_diff_train_valid = test_ent - train_valid_ent
idxs_test = []
if len(ent_test_diff_train_valid) > 0:
count_test = 0
c_if = 0
for row in test:
tmp = set(row)
if len(tmp & ent_test_diff_train_valid) != 0:
idxs_test.append(count_test)
c_if += 1
count_test = count_test + 1
filtered_test = np.delete(test, idxs_test, axis=0)
logging.debug("fit validation case: shape test: {0} \
- filtered test: {1}: {2} triples \
with unseen entties removed" \
.format(test.shape, filtered_test.shape, c_if))
return {'train': train, 'valid': valid, 'test': filtered_test}
# throwing the unseen entities in validation set
else:
# for valid
ent_valid_diff_train = valid_ent - train_ent
idxs_valid = []
if len(ent_valid_diff_train) > 0:
count_valid = 0
c_if = 0
for row in valid:
tmp = set(row)
if len(tmp & ent_valid_diff_train) != 0:
idxs_valid.append(count_valid)
c_if += 1
count_valid = count_valid + 1
filtered_valid = np.delete(valid, idxs_valid, axis=0)
logging.debug("not fitting validation case: shape valid: {0} \
- filtered valid: {1}: {2} triples \
with unseen entties removed" \
.format(valid.shape, filtered_valid.shape, c_if))
# for test
ent_test_diff_train = test_ent - train_ent
idxs_test = []
if len(ent_test_diff_train) > 0:
count_test = 0
c_if = 0
for row in test:
tmp = set(row)
if len(tmp & ent_test_diff_train) != 0:
idxs_test.append(count_test)
c_if += 1
count_test = count_test + 1
filtered_test = np.delete(test, idxs_test, axis=0)
logging.debug("not fitting validation case: shape test: {0} \
- filtered test: {1}: {2} triples \
with unseen entties removed" \
.format(test.shape, filtered_test.shape, c_if))
return {'train': train, 'valid': filtered_valid, 'test': filtered_test}
def _get_data_home(data_home=None):
"""Get to location of the dataset folder to use.
Automatically determine the dataset folder to use.
If data_home is provided this location a check is
performed to see if the path exists and creates one if it does not.
If data_home is None the AMPLIGRAPH_ENV_NAME dataset is used.
If AMPLIGRAPH_ENV_NAME is not set the a default environment ~/ampligraph_datasets is used.
Parameters
----------
data_home : str
The path to the folder that contains the datasets.
Returns
-------
str
The path to the dataset directory
"""
if data_home is None:
data_home = os.environ.get(AMPLIGRAPH_ENV_NAME, os.path.join('~', 'ampligraph_datasets'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
logger.debug('data_home is set to {}'.format(data_home))
return data_home
def _unzip_dataset(source, destination):
"""Unzip a file from a source location to a destination.
Parameters
----------
source : str
The path to the zipped file
destination : str
The destination directory to unzip the files to.
"""
# TODO - add error checking
with zipfile.ZipFile(source, 'r') as zip_ref:
logger.debug('Unzipping {} to {}'.format(source, destination))
zip_ref.extractall(destination)
os.remove(source)
def _fetch_remote_data(url, download_dir, data_home):
"""Download a remote datasets.
Parameters
----------
url : str
The url of the dataset to download.
dataset_dir : str
The location to downlaod the file to.
data_home : str
The location to save the dataset.
"""
file_path = '{}.zip'.format(download_dir)
if not Path(file_path).exists():
urllib.request.urlretrieve(url, file_path)
# TODO - add error checking
_unzip_dataset(file_path, data_home)
def _fetch_dataset(dataset_name, data_home=None, url=None):
"""Get a dataset.
Gets the directory of a dataset. If the dataset is not found
it is downloaded automatically.
Parameters
----------
dataset_name : str
The name of the dataset to download.
data_home : str
The location to save the dataset to.
url : str
The url to download the dataset from.
Returns
------
str
The location of the dataset.
"""
data_home = _get_data_home(data_home)
dataset_dir = os.path.join(data_home, dataset_name)
if not os.path.exists(dataset_dir):
if url is None:
msg = 'No dataset at {} and no url provided.'.format(dataset_dir)
logger.error(msg)
raise Exception(msg)
_fetch_remote_data(url, dataset_dir, data_home)
return dataset_dir
def load_from_csv(directory_path, file_name, sep='\t', header=None):
"""Load a knowledge graph from a csv file
Loads a knowledge graph serialized in a csv file as:
.. code-block:: text
subj1 relationX obj1
subj1 relationY obj2
subj3 relationZ obj2
subj4 relationY obj2
...
.. note::
The function filters duplicated statements.
.. note::
It is recommended to use :meth:`ampligraph.evaluation.train_test_split_no_unseen` to split custom
knowledge graphs into train, validation, and test sets. Using this function will lead to validation, test sets
that do not include triples with entities that do not occur in the training set.
Parameters
----------
directory_path: str
folder where the input file is stored.
file_name : str
file name
sep : str
The subject-predicate-object separator (default \t).
header : int, None
The row of the header of the csv file. Same as pandas.read_csv header param.
Returns
-------
triples : ndarray , shape [n, 3]
the actual triples of the file.
Examples
--------
>>> from ampligraph.datasets import load_from_csv
>>> X = load_from_csv('folder', 'dataset.csv', sep=',')
>>> X[:3]
array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c']],
dtype='<U1')
"""
logger.debug('Loading data from {}.'.format(file_name))
df = pd.read_csv(os.path.join(directory_path, file_name),
sep=sep,
header=header,
names=None,
dtype=str)
logger.debug('Dropping duplicates.')
df = df.drop_duplicates()
return df.values
def load_dataset(dataset_name=None, url=None, data_home=None, train_name='train.txt', valid_name='valid.txt',
test_name='test.txt'):
if dataset_name is None:
if url is None:
raise ValueError('The dataset name or url must be provided to load a dataset.')
dataset_name = url[url.rfind('/') + 1:url.rfind('.')]
dataset_path = _fetch_dataset(dataset_name, data_home, url)
train = load_from_csv(dataset_path, train_name)
valid = load_from_csv(dataset_path, valid_name)
test = load_from_csv(dataset_path, test_name)
return {'train': train, 'valid': valid, 'test': test}
def _load_core_dataset(dataset_key, data_home=None):
return load_dataset(url='{}{}'.format(REMOTE_DATASET_SERVER, DATASET_FILE_NAME[dataset_key]), data_home=data_home)
def load_wn18():
"""Load the WN18 dataset
WN18 is a subset of Wordnet. It was first presented by :cite:`bordes2013translating`.
The WN18 dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
IF ``AMPLIGRAPH_DATA_HOME`` is not set the the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
The dataset is divided in three splits:
- ``train``: 141,442 triples
- ``valid`` 5,000 triples
- ``test`` 5,000 triples
========= ========= ======= ======= ============ ===========
Dataset Train Valid Test Entities Relations
========= ========= ======= ======= ============ ===========
WN18 141,442 5,000 5,000 40,943 18
========= ========= ======= ======= ============ ===========
.. warning::
The dataset includes a large number of inverse relations, and its use in experiments has been deprecated.
Use WN18RR instead.
Returns
-------
splits : dict
The dataset splits {'train': train, 'valid': valid, 'test': test}. Each split is an ndarray of shape [n, 3].
Examples
--------
>>> from ampligraph.datasets import load_wn18
>>> X = load_wn18()
>>> X['test'][:3]
array([['06845599', '_member_of_domain_usage', '03754979'],
['00789448', '_verb_group', '01062739'],
['10217831', '_hyponym', '10682169']], dtype=object)
"""
return _load_core_dataset('WN18', data_home=None)
def load_wn18rr(clean_unseen=True):
""" Load the WN18RR dataset
The dataset is described in :cite:`DettmersMS018`.
The WN18RR dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set the the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
It is divided in three splits:
- ``train``
- ``valid``
- ``test``
========= ========= ======= ======= ============ ===========
Dataset Train Valid Test Entities Relations
========= ========= ======= ======= ============ ===========
WN18RR 86,835 3,034 3,134 40,943 11
========= ========= ======= ======= ============ ===========
.. warning:: WN18RR's validation set contains 198 unseen entities over 210 triples.
The test set has 209 unseen entities, distributed over 210 triples.
Parameters
----------
clean_unseen : bool
If ``True``, filters triples in validation and test sets that include entities not present in the training set.
Returns
-------
splits : dict
The dataset splits: {'train': train, 'valid': valid, 'test': test}. Each split is an ndarray of shape [n, 3].
Examples
-------
>>> from ampligraph.datasets import load_wn18rr
>>> X = load_wn18rr()
>>> X["valid"][0]
array(['02174461', '_hypernym', '02176268'], dtype=object)
"""
if clean_unseen:
return _clean_data(_load_core_dataset('WN18RR', data_home=None), throw_valid=True)
else:
_load_core_dataset('WN18RR', data_home=None)
def load_fb15k():
"""Load the FB15k dataset
FB15k is a split of Freebase, first proposed by :cite:`bordes2013translating`.
The FB15k dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set the the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
The dataset is divided in three splits:
- ``train``
- ``valid``
- ``test``
========= ========= ======= ======= ============ ===========
Dataset Train Valid Test Entities Relations
========= ========= ======= ======= ============ ===========
FB15K 483,142 50,000 59,071 14,951 1,345
========= ========= ======= ======= ============ ===========
.. warning::
The dataset includes a large number of inverse relations, and its use in experiments has been deprecated.
Use FB15k-237 instead.
Returns
-------
splits : dict
The dataset splits: {'train': train, 'valid': valid, 'test': test}. Each split is an ndarray of shape [n, 3].
Examples
--------
>>> from ampligraph.datasets import load_fb15k
>>> X = load_fb15k()
>>> X['test'][:3]
array([['/m/01qscs',
'/award/award_nominee/award_nominations./award/award_nomination/award',
'/m/02x8n1n'],
['/m/040db', '/base/activism/activist/area_of_activism', '/m/0148d'],
['/m/08966',
'/travel/travel_destination/climate./travel/travel_destination_monthly_climate/month',
'/m/05lf_']], dtype=object)
"""
return _load_core_dataset('FB15K', data_home=None)
def load_fb15k_237(clean_unseen=True):
"""Load the FB15k-237 dataset
FB15k-237 is a reduced version of FB15K. It was first proposed by :cite:`toutanova2015representing`.
The FB15k-237 dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set the the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
The dataset is divided in three splits:
- ``train``
- ``valid``
- ``test``
========= ========= ======= ======= ============ ===========
Dataset Train Valid Test Entities Relations
========= ========= ======= ======= ============ ===========
FB15K-237 272,115 17,535 20,466 14,541 237
========= ========= ======= ======= ============ ===========
.. warning:: FB15K-237's validation set contains 8 unseen entities over 9 triples. The test set has 29 unseen entities,
distributed over 28 triples.
Parameters
----------
clean_unseen : bool
If ``True``, filters triples in validation and test sets that include entities not present in the training set.
Returns
-------
splits : dict
The dataset splits: {'train': train, 'valid': valid, 'test': test}. Each split is an ndarray of shape [n, 3].
Examples
--------
>>> from ampligraph.datasets import load_fb15k_237
>>> X = load_fb15k_237()
>>> X["train"][2]
array(['/m/07s9rl0', '/media_common/netflix_genre/titles', '/m/0170z3'],
dtype=object)
"""
if clean_unseen:
return _clean_data(_load_core_dataset('FB15K_237', data_home=None), throw_valid=True)
else:
_load_core_dataset('FB15K_237', data_home=None)
def load_yago3_10():
""" Load the YAGO3-10 dataset
The dataset is a split of YAGO3 :cite:`mahdisoltani2013yago3`, and has been first presented in :cite:`DettmersMS018`.
The YAGO3-10 dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set the the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
It is divided in three splits:
- ``train``
- ``valid``
- ``test``
========= ========= ======= ======= ============ ===========
Dataset Train Valid Test Entities Relations
========= ========= ======= ======= ============ ===========
YAGO3-10 1,079,040 5,000 5,000 123,182 37
========= ========= ======= ======= ============ ===========
Returns
-------
splits : dict
The dataset splits: {'train': train, 'valid': valid, 'test': test}. Each split is an ndarray of shape [n, 3].
Examples
-------
>>> from ampligraph.datasets import load_yago3_10
>>> X = load_yago3_10()
>>> X["valid"][0]
array(['Mikheil_Khutsishvili', 'playsFor', 'FC_Merani_Tbilisi'], dtype=object)
"""
return _load_core_dataset('YAGO3_10', data_home=None)
def load_all_datasets():
load_wn18()
load_wn18rr()
load_fb15k()
load_fb15k_237()
load_yago3_10()
def load_from_rdf(folder_name, file_name, format='nt', data_home=None):
"""Load an RDF file
Loads an RDF knowledge graph using rdflib_ APIs.
Multiple RDF serialization formats are supported (nt, ttl, rdf/xml, etc).
The entire graph will be loaded in memory, and converted into an rdflib `Graph` object.
.. _rdflib: https://rdflib.readthedocs.io/
.. warning::
Large RDF graphs should be serialized to ntriples beforehand and loaded with ``load_from_ntriples()`` instead.
.. note::
It is recommended to use :meth:`ampligraph.evaluation.train_test_split_no_unseen` to split custom
knowledge graphs into train, validation, and test sets. Using this function will lead to validation, test sets
that do not include triples with entities that do not occur in the training set.
Parameters
----------
folder_name: str
base folder where the file is stored.
file_name : str
file name
format : str
The RDF serialization format (nt, ttl, rdf/xml - see rdflib documentation)
Returns
-------
triples : ndarray , shape [n, 3]
the actual triples of the file.
"""
logger.debug('Loading rdf data from {}.'.format(file_name))
data_home = _get_data_home(data_home)
from rdflib import Graph
g = Graph()
g.parse(os.path.join(data_home, folder_name, file_name), format=format, publicID='http://test#')
return np.array(g)
def load_from_ntriples(folder_name, file_name, data_home=None):
"""Load RDF ntriples
Loads an RDF knowledge graph serialized as ntriples, without building an RDF graph in memory.
This function should be preferred over ``load_from_rdf()``, since it does not load the graph into an rdflib model
(and it is therefore faster by order of magnitudes). Nevertheless, it requires a ntriples_ serialization
as in the example below:
.. _ntriples: https://www.w3.org/TR/n-triples/.
.. code-block:: text
_:alice <http://xmlns.com/foaf/0.1/knows> _:bob .
_:bob <http://xmlns.com/foaf/0.1/knows> _:alice .
.. note::
It is recommended to use :meth:`ampligraph.evaluation.train_test_split_no_unseen` to split custom
knowledge graphs into train, validation, and test sets. Using this function will lead to validation, test sets
that do not include triples with entities that do not occur in the training set.
Parameters
----------
folder_name: str
base folder where the file is stored.
file_name : str
file name
Returns
-------
triples : ndarray , shape [n, 3]
the actual triples of the file.
"""
logger.debug('Loading rdf ntriples from {}.'.format(file_name))
data_home = _get_data_home(data_home)
df = pd.read_csv(os.path.join(data_home, folder_name, file_name),
sep=' ',
header=None,
names=None,
dtype=str,
usecols=[0, 1, 2])
return df.as_matrix()
|
{"hexsha": "2d81566b5a0a4ffbe4ffc4f350f1574c59d322b9", "size": 20523, "ext": "py", "lang": "Python", "max_stars_repo_path": "ampligraph/datasets/datasets.py", "max_stars_repo_name": "pyvandenbussche/AmpliGraph", "max_stars_repo_head_hexsha": "ac515bc34c64becc04385797c8b9f0d93b20b58d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-04T17:16:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-08T20:42:36.000Z", "max_issues_repo_path": "ampligraph/datasets/datasets.py", "max_issues_repo_name": "rezacsedu/AmpliGraph", "max_issues_repo_head_hexsha": "94a5c31187f1267782e95a8d3380ccd1a53221bd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ampligraph/datasets/datasets.py", "max_forks_repo_name": "rezacsedu/AmpliGraph", "max_forks_repo_head_hexsha": "94a5c31187f1267782e95a8d3380ccd1a53221bd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-09T20:03:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-09T20:03:02.000Z", "avg_line_length": 32.8368, "max_line_length": 123, "alphanum_fraction": 0.5894849681, "include": true, "reason": "import numpy", "num_tokens": 4996}
|
# -*- coding: utf-8 -*-
import birl
import utils
import numpy as np
import matplotlib.pyplot as plt
#calculate the policy loss between the hypothesis return and the map return
def calculate_policy_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, hyp_return = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
map_return = hyp_reward_fn.get_reward(map_placement)
return hyp_return - map_return
def calculate_placement_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#active_utils.visualize_reward(hyp_reward_fn, "hypothesis reward")
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, _ = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
#print "placement loss", np.linalg.norm(hyp_placement - map_placement)
#plt.show()
return np.linalg.norm(hyp_placement - map_placement)
def get_best_placement(config, map_params):
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
return map_placement
|
{"hexsha": "27e6231ce357de44ac8b72e4e41de64435377c77", "size": 2502, "ext": "py", "lang": "Python", "max_stars_repo_path": "gaze_birl/complexreward.py", "max_stars_repo_name": "asaran/gaze-LfD", "max_stars_repo_head_hexsha": "964635d9bf7b208abe35d40b2bf791b05b8a0c3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-16T15:35:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T15:35:58.000Z", "max_issues_repo_path": "gaze_birl/complexreward.py", "max_issues_repo_name": "asaran/gaze-LfD", "max_issues_repo_head_hexsha": "964635d9bf7b208abe35d40b2bf791b05b8a0c3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gaze_birl/complexreward.py", "max_forks_repo_name": "asaran/gaze-LfD", "max_forks_repo_head_hexsha": "964635d9bf7b208abe35d40b2bf791b05b8a0c3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.1153846154, "max_line_length": 85, "alphanum_fraction": 0.7965627498, "include": true, "reason": "import numpy", "num_tokens": 532}
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from monai.networks.layers.convutils import gaussian_1d
class TestGaussian1d(unittest.TestCase):
def test_gaussian(self):
np.testing.assert_allclose(
gaussian_1d(0.5, 8),
torch.tensor(
[
0.0000e00,
2.9802e-07,
1.3496e-03,
1.5731e-01,
6.8269e-01,
1.5731e-01,
1.3496e-03,
2.9802e-07,
0.0000e00,
]
),
rtol=1e-4,
)
np.testing.assert_allclose(
gaussian_1d(1, 1),
torch.tensor([0.24173, 0.382925, 0.24173]),
rtol=1e-4,
)
def test_wrong_sigma(self):
with self.assertRaises(ValueError):
gaussian_1d(-1, 10)
with self.assertRaises(ValueError):
gaussian_1d(1, -10)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "326cd2cd5ad61a5c91a233f81f8955d02545948f", "size": 1609, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_gaussian.py", "max_stars_repo_name": "JZK00/MONAI", "max_stars_repo_head_hexsha": "49e693c4e7df83dc1f8ab87349373de9263188a9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-12T02:13:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T11:46:21.000Z", "max_issues_repo_path": "tests/test_gaussian.py", "max_issues_repo_name": "JZK00/MONAI", "max_issues_repo_head_hexsha": "49e693c4e7df83dc1f8ab87349373de9263188a9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-17T12:41:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-29T15:20:37.000Z", "max_forks_repo_path": "tests/test_gaussian.py", "max_forks_repo_name": "JZK00/MONAI", "max_forks_repo_head_hexsha": "49e693c4e7df83dc1f8ab87349373de9263188a9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2545454545, "max_line_length": 74, "alphanum_fraction": 0.578620261, "include": true, "reason": "import numpy", "num_tokens": 399}
|
import numpy as np
from MyDQN.logger import Logger
from MyDQN import vrep
import time
import random
import cv2 as cv
image_pix = 84 # 输入图像的维度 image_pix * image_pix 灰度图
class EnvGrasp(object):
def __init__(self):
self.total_success = 0
self.total_try = 0
self.logger = Logger('./logs_grasp')
self.work_space = np.asarray([[-0.7, -0.3], [-0.2, 0.2], [0.1, 0.4]])
self.object_work_space = np.asarray([[-0.6, -0.4], [-0.1, 0.1], [0.01, 0.31]])
vrep.simxFinish(-1)
self.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5) # Connect to V-REP on port 19997
self.current_state = [] # 2d
self.target = [] # 3d
self.current_handle = 0
self.current_position = [0, 0, 0] # 3d
self.pre_distance = 0
self.correct_count = 0
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('Connected to simulation.')
def get_state(self):
sim_ret, self.current_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target',
vrep.simx_opmode_blocking)
sim_ret, self.current_position = vrep.simxGetObjectPosition(self.sim_client, self.current_handle, -1,
vrep.simx_opmode_blocking)
self.current_position = np.asarray(self.current_position)
# print('current position : ', self.current_position, 'target position : ', self.target)
self.current_state = self.current_position - self.target
frame = self.get_sensor_data()
return self.current_state[0:2], frame, self.current_position
def get_reward(self):
done = 0
if self.current_position[0] < self.work_space[0][0] or self.current_position[0] > self.work_space[0][1] \
or self.current_position[1] < self.work_space[1][0] or self.current_position[1] > self.work_space[1][1] \
or self.current_position[2] < self.work_space[2][0] or self.current_position[2] > self.work_space[2][1]:
print('arm is out of workspace!')
done = 1
self.correct_count = 0
return -1, done
distance = np.linalg.norm(self.current_state)
# print('distance : ', distance, ' predistance : ', self.pre_distance)
reward = -distance
# print(distance)
if distance < self.pre_distance:
reward += 0.3
else:
pass
if distance < 0.015:
self.correct_count += 1
print('reached !')
reward += 0.5
else:
self.correct_count = 0
if self.correct_count >= 5:
# grasp
success = self.grasp()
if success:
print('grasp success ! ')
reward = 1
else:
print('grasp failed !')
reward = -1
self.correct_count = 0
done = 1
self.pre_distance = distance
return reward, done
def reset(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target',
vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1,
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.47, 0, 0.3),
vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, 0, np.pi / 2),
vrep.simx_opmode_blocking)
while gripper_position[2] > 0.4: # V-REP bug requiring multiple starts and stops to restart
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1,
vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.47, 0, 0.3),
vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, 0, np.pi / 2),
vrep.simx_opmode_blocking)
# print('started!')
target_x = random.random() * (self.object_work_space[0][1] - self.object_work_space[0][0]) + self.object_work_space[0][0]
target_y = random.random() * (self.object_work_space[1][1] - self.object_work_space[1][0]) + self.object_work_space[1][0]
# target_z = random.random() * (self.work_space[2][1] - self.work_space[2][0]) + self.work_space[2][0]
target_z = 0.075
self.target = [target_x, target_y, target_z]
# print()
# vrep.simxPauseCommunication(self.sim_client, False)
# orientation_y = -random.random() * np.pi + np.pi / 2
# print()
# vrep.simxPauseCommunication(self.sim_client, False)
# object_orientation = [-np.pi / 2, orientation_y, -np.pi / 2]
object_orientation = [0, 0, 0]
curr_mesh_file = '/home/chen/stl-model/cup.obj'
curr_shape_name = 'shape001'
object_color = [0, 0.6, 1]
ret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client,
'remoteApiCommandServer',
vrep.sim_scripttype_childscript,
'importShape',
[0, 0, 255, 0],
self.target + object_orientation + object_color,
[curr_mesh_file,
curr_shape_name],
bytearray(),
vrep.simx_opmode_blocking)
self.target = np.asarray(self.target)
sim_ret, self.current_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target',
vrep.simx_opmode_blocking)
sim_ret, self.current_position = vrep.simxGetObjectPosition(self.sim_client, self.current_handle, -1,
vrep.simx_opmode_blocking)
self.pre_distance = np.linalg.norm(self.target[0:2] - self.current_position[0:2])
self.current_state, frame, pos = self.get_state()
return self.current_state, frame, pos
def open_griper(self):
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint',
vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity,
vrep.simx_opmode_blocking)
gripper_fully_opened = False
while gripper_joint_position < 0.0536: # Block until gripper is fully open
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
if new_gripper_joint_position <= gripper_joint_position:
return gripper_fully_opened
gripper_joint_position = new_gripper_joint_position
gripper_fully_opened = True
return gripper_fully_opened
def move_to(self, tool_position):
sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
# UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, UR5_target_handle, -1,
vrep.simx_opmode_blocking)
# print(UR5_target_position)
move_direction = np.asarray(tool_position)
move_magnitude = np.linalg.norm(move_direction)
if move_magnitude == 0 or not move_magnitude == move_magnitude:
move_step = [0, 0, 0]
num_move_steps = 0
print('magnitude error~!', move_magnitude, tool_position)
else:
move_step = 0.005 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_magnitude / 0.005))
# print('move direction : ', move_direction, 'move magnitude : ', move_magnitude, 'move step : ', move_step, 'num : ', num_move_steps)
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (UR5_target_position[0] +
move_step[0] * min(step_iter,
num_move_steps),
UR5_target_position[1] +
move_step[1] * min(step_iter,
num_move_steps),
UR5_target_position[2]),
vrep.simx_opmode_blocking)
def move_down(self, direction=1):
sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
# UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, UR5_target_handle, -1,
vrep.simx_opmode_blocking)
# print(UR5_target_position)
# time.sleep(200)
if direction == 1:
move_direction = np.asarray([0, 0, 0.039 - UR5_target_position[2]])
move_magnitude = UR5_target_position[2] - 0.039
else:
move_direction = np.asarray([0, 0, 0.301 - UR5_target_position[2]])
move_magnitude = 0.301 - UR5_target_position[2]
if move_magnitude == 0 or not move_magnitude == move_magnitude:
move_step = [0, 0, 0]
num_move_steps = 0
print('magnitude error~!', move_magnitude)
else:
move_step = 0.02 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_magnitude / 0.02))
# print('move_direction : ', move_direction, 'move_magnitude : ', move_magnitude)
# print('move step : ', move_step, 'num : ', num_move_steps)
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (UR5_target_position[0],
UR5_target_position[1],
UR5_target_position[2] +
move_step[2] * (step_iter+1)),
vrep.simx_opmode_blocking)
def close_gripper(self):
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint',
vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity,
vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.047: # Block until gripper is fully closed
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
# print(gripper_joint_position)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
return gripper_fully_closed
def grasp(self):
self.total_try += 1
self.open_griper()
self.move_down()
self.close_gripper()
self.move_down(-1) # move up
time.sleep(1)
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint',
vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle,
vrep.simx_opmode_blocking)
if 0.055 >= gripper_joint_position >= -0.04:
success = True
else:
success = False
if success:
self.total_success += 1
accuracy = float(self.total_success) / self.total_try
info = {'grasp success rate': accuracy}
for tag, value in info.items():
self.logger.scalar_summary(tag, value, step=self.total_try)
return success
def get_sensor_data(self, is_save=False):
sim_ret, cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_hand', vrep.simx_opmode_blocking)
# Get color image from simulation
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, cam_handle, 0,
vrep.simx_opmode_blocking)
color_img = np.asarray(raw_image)
# print(color_img.shape)
color_img.shape = (resolution[1], resolution[0], 3)
# print(color_img)
color_img = color_img.astype(np.float) / 255
color_img[color_img < 0] += 1
# color_img *= 255
color_img = np.fliplr(color_img)
test_show_image = color_img * 255
test_show_image = test_show_image.astype(np.uint8)
test_show_image = cv.cvtColor(test_show_image, cv.COLOR_BGR2GRAY)
if is_save:
# print(test_show_image.shape)
# test_show_image = Image.fromarray(test_show_image)
# test_show_image.show()
cv.imwrite('/home/chen/PycharmProjects/Reinforcement/VisualGrasp/image/' + 'grasp_success' + str(time.asctime(time.localtime(time.time()))) + '.png', test_show_image)
resize_color = cv.resize(test_show_image, (image_pix, image_pix))
resize_color = np.asarray(resize_color)
# cv.imwrite('t.png', resize_color)
resize_color = resize_color.astype(np.float) / 255.
# np.set_printoptions(threshold=np.inf, suppress=True)
# f = open('test.txt', 'w')
# print(resize_color, file=f)
# f.close()
# print(resize_color.shape)
return resize_color
def step(self, action):
# action : [x_ratio, y_ratio] ~ [-1, 1]
move_magnitude = 0.05
action = np.append(action, 0)
action = action * move_magnitude
self.move_to(action)
self.current_state, frame, pos = self.get_state()
reward, done = self.get_reward()
return reward, self.current_state, done, frame, pos
|
{"hexsha": "864b0cfa4ffd17fc0064ffd6ad54af2bf0affe39", "size": 17611, "ext": "py", "lang": "Python", "max_stars_repo_path": "environment.py", "max_stars_repo_name": "gouxiangchen/DRL-grasp", "max_stars_repo_head_hexsha": "daf4878e297b4762bd8eee71dca226826784dad1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-10-13T13:01:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T02:24:34.000Z", "max_issues_repo_path": "environment.py", "max_issues_repo_name": "gouxiangchen/DRL-grasp", "max_issues_repo_head_hexsha": "daf4878e297b4762bd8eee71dca226826784dad1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-08T03:13:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-08T03:13:28.000Z", "max_forks_repo_path": "environment.py", "max_forks_repo_name": "gouxiangchen/DRL-grasp", "max_forks_repo_head_hexsha": "daf4878e297b4762bd8eee71dca226826784dad1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.8562691131, "max_line_length": 178, "alphanum_fraction": 0.5499403782, "include": true, "reason": "import numpy", "num_tokens": 3798}
|
from os import listdir
import json
import pickle
import os, errno
import pandas as pd
from numpy import array
from pandas import DataFrame
from typing import cast
import io
from pathlib import Path
class MpFileUtil:
def save_pickle(self, dir_name: str, file_name: str, obj: object):
self.make_dir(dir_name)
path = os.path.join(dir_name, file_name)
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_pickle(self, dir_name: str, file_name: str) -> object:
path = os.path.join(dir_name, file_name)
with open(path, 'rb') as f:
meta_data = pickle.load(f)
return meta_data
def save_to_hd5_store(self, store: pd.HDFStore, store_name: str, obj: object) -> str:
store[store_name] = obj
store.close()
return store_name
def save_hd5_to_new_store(self, file_name: str, store_name: str, obj: object) -> (pd.HDFStore, str):
store = pd.HDFStore(file_name)
store[store_name] = obj
store.close()
return store, store_name
def load_hd5(self, store_name: str, store: pd.HDFStore) -> object:
obj = store[store_name]
store.close()
return obj
def df_to_csv(self, file_name: str, df: DataFrame, compression: bool = False):
if compression:
compression_opts = dict(method='zip',
archive_name=file_name + '.csv')
df.to_csv(file_name + '.zip', index=False,
compression=compression_opts)
else:
df.to_csv(file_name + '.csv', index=False)
# conda install openpyxl
def df_to_exel(self, file_name: str, df: DataFrame):
df.to_excel(file_name + ".xlsx", sheet_name='dataframe')
def close_stores(self, stores: list):
for store in stores:
if isinstance(store, pd.HDFStore):
store = cast(pd.HDFStore, store)
if store.is_open:
print('save closing store:', store.filename)
store.close()
def write_tsv_files_np(self, strings: list, weights: array, dir: str = 'output',
metadata_filename: str = 'metadata.tsv',
vectors_filename: str = 'vectors.tsv'):
vecs = self.to_path(dir, vectors_filename)
meta = self.to_path(dir, metadata_filename)
out_v = io.open(vecs, 'w', encoding='utf-8')
out_m = io.open(meta, 'w', encoding='utf-8')
for text, vec in zip(strings, weights):
out_m.write(text + "\n")
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_v.close()
out_m.close()
def write_metadata_tsv_file(self, tokenizer_items, dir: str = 'output',
metadata_filename: str = 'metadata.tsv', add_unknown: bool = True):
meta = self.to_path(dir, metadata_filename)
out_m = io.open(meta, 'w', encoding='utf-8')
# add 1 entry for "unknown" words in Embedding Layer
if add_unknown:
out_m.write("Z\n")
for item in tokenizer_items:
out_m.write(item[0] + "\n")
out_m.close()
return True
def read_from_csv(self, path: str):
csv = pd.read_csv(path, na_values='None')
return csv
def close_file_handle(self, fh: io.TextIOWrapper):
fh.close()
def delete_files(self, files: list, absolute: bool = True):
for file in files:
if absolute:
os.system('rm -r ' + file)
else:
os.system('rm -r ./' + file)
return True
def silentremove(self, filenames: list):
for filename in filenames:
try:
os.remove('./' + filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or
raise
return True
def make_dir(self, dirname):
os.makedirs(dirname, exist_ok=True)
return dirname
def to_path(self, dirname, filename):
os.makedirs(dirname, exist_ok=True)
return os.path.join(dirname, filename)
def is_file(self, dirname, filename):
path = os.path.join(dirname, filename)
my_file = Path(path)
return my_file.is_file()
def is_dir(self, dirname):
my_file = Path(dirname)
return my_file.is_dir()
def write_to_file(self, inputs: str = '', directory: str = 'data', filename: str = 'file.csv'):
file_path = self.to_path(directory, filename)
out_path = io.open(file_path, 'w', encoding='utf-8')
out_path.write(inputs)
out_path.close()
def load_json_file(self, dirname, filename):
filepath = self.to_path(dirname, filename)
json1_file = open(filepath)
json1_str = json1_file.read()
json1_data = json.loads(json1_str)
return json1_data
def list_all_files_in_dir(self, dirname):
onlyfiles = [f for f in listdir(dirname) if self.is_file(dirname, f)]
return onlyfiles
|
{"hexsha": "e10615ef9542ad59884be970ec141f6068cabed1", "size": 5169, "ext": "py", "lang": "Python", "max_stars_repo_path": "mp_scripts/mp_util/MpFileUtil.py", "max_stars_repo_name": "mpfuff/trackformer", "max_stars_repo_head_hexsha": "d8d970718d3a4adfa89c1430e34dcfaed65b080f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mp_scripts/mp_util/MpFileUtil.py", "max_issues_repo_name": "mpfuff/trackformer", "max_issues_repo_head_hexsha": "d8d970718d3a4adfa89c1430e34dcfaed65b080f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mp_scripts/mp_util/MpFileUtil.py", "max_forks_repo_name": "mpfuff/trackformer", "max_forks_repo_head_hexsha": "d8d970718d3a4adfa89c1430e34dcfaed65b080f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.923566879, "max_line_length": 104, "alphanum_fraction": 0.5904430257, "include": true, "reason": "from numpy", "num_tokens": 1219}
|
#ifndef MPLLIBS_SAFE_PRINTF_IMPL_MATCHES_HPP
#define MPLLIBS_SAFE_PRINTF_IMPL_MATCHES_HPP
// Copyright Abel Sinkovics (abel@sinkovics.hu) 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <mpllibs/safe_printf/v1/impl/any_type.hpp>
#include <mpllibs/metamonad/metafunction.hpp>
#include <mpllibs/metamonad/unbox.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/type_traits/remove_cv.hpp>
#include <boost/mpl/bool.hpp>
namespace mpllibs
{
namespace safe_printf
{
namespace v1
{
namespace impl
{
template <class A, class B>
struct matches_impl : boost::is_same<A, B> {};
template <class A>
struct matches_impl<A, any_type> : boost::mpl::true_ {};
template <class B>
struct matches_impl<any_type, B> : boost::mpl::true_ {};
template <class A, class B>
struct matches_recurse :
matches_impl<
typename boost::remove_cv<A>::type,
typename boost::remove_cv<B>::type
>
{};
template <class A, class B>
struct matches_impl<A*, B*> : matches_recurse<A, B> {};
template <class A, class B>
struct matches_impl<A[], B[]> : matches_recurse<A, B> {};
template <class A, class B>
struct matches_impl<A*, B[]> : matches_recurse<A, B> {};
template <class A, class B>
struct matches_impl<A[], B*> : matches_recurse<A, B> {};
MPLLIBS_METAFUNCTION(matches, (A)(B))
((
matches_recurse<
typename metamonad::unbox<A>::type,
typename metamonad::unbox<B>::type
>
));
}
}
}
}
#endif
|
{"hexsha": "cfc488705b41892634d9404b3774a81afadf42c1", "size": 1801, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "mpllibs/safe_printf/v1/impl/matches.hpp", "max_stars_repo_name": "sabel83/mpllibs", "max_stars_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 70.0, "max_stars_repo_stars_event_min_datetime": "2015-01-15T09:05:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T15:49:31.000Z", "max_issues_repo_path": "mpllibs/safe_printf/v1/impl/matches.hpp", "max_issues_repo_name": "sabel83/mpllibs", "max_issues_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-06-18T19:25:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T19:49:51.000Z", "max_forks_repo_path": "mpllibs/safe_printf/v1/impl/matches.hpp", "max_forks_repo_name": "sabel83/mpllibs", "max_forks_repo_head_hexsha": "8e245aedcf658fe77bb29537aeba1d4e1a619a19", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-07-10T08:18:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T07:17:57.000Z", "avg_line_length": 25.7285714286, "max_line_length": 65, "alphanum_fraction": 0.6118822876, "num_tokens": 456}
|
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from ..builder import NECKS
import torch
from torch import nn
from ..losses import SmoothL1Loss
from ..losses import FocalLoss
import matplotlib.pyplot as plt
from torch.nn.parameter import Parameter
from torch.nn.modules.loss import _Loss
import numpy as np
import cv2
def gaussian_kernel(size, sigma):
x, y = np.mgrid[-size:size+1, -size:size+1]
kernel = np.exp(-0.5*(x*x+y*y)/(sigma*sigma))
kernel /= kernel.sum()
return kernel
class SSIM_Loss(_Loss):
def __init__(self, in_channels, size=11, sigma=1.5, size_average=True):
super(SSIM_Loss, self).__init__(size_average)
#assert in_channels == 1, 'Only support single-channel input'
self.in_channels = in_channels
self.size = int(size)
self.sigma = sigma
self.size_average = size_average
kernel = gaussian_kernel(self.size, self.sigma)
self.kernel_size = kernel.shape
weight = np.tile(kernel, (in_channels, 1, 1, 1))
self.weight = Parameter(torch.from_numpy(weight).float(), requires_grad=False)
def forward(self, input, target, mask=None):
#_assert_no_grad(target)
mean1 = F.conv2d(input, self.weight, padding=self.size, groups=self.in_channels)
mean2 = F.conv2d(target, self.weight, padding=self.size, groups=self.in_channels)
mean1_sq = mean1*mean1
mean2_sq = mean2*mean2
mean_12 = mean1*mean2
sigma1_sq = F.conv2d(input*input, self.weight, padding=self.size, groups=self.in_channels) - mean1_sq
sigma2_sq = F.conv2d(target*target, self.weight, padding=self.size, groups=self.in_channels) - mean2_sq
sigma_12 = F.conv2d(input*target, self.weight, padding=self.size, groups=self.in_channels) - mean_12
C1 = 0.01**2
C2 = 0.03**2
ssim = ((2*mean_12+C1)*(2*sigma_12+C2)) / ((mean1_sq+mean2_sq+C1)*(sigma1_sq+sigma2_sq+C2))
if self.size_average:
out = 1 - ssim.mean()
else:
out = 1 - ssim.view(ssim.size(0), -1).mean(1)
return out
def gaussian2D(radius_x, radius_y, sigma_x=1, sigma_y=1, dtype=torch.float32, device='cpu'):
"""Generate 2D gaussian kernel.
Args:
radius (int): Radius of gaussian kernel.
sigma (int): Sigma of gaussian function. Default: 1.
dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
device (str): Device of gaussian tensor. Default: 'cpu'.
Returns:
h (Tensor): Gaussian kernel with a
``(2 * radius + 1) * (2 * radius + 1)`` shape.
"""
x = torch.arange(
-radius_x, radius_x + 1, dtype=dtype, device=device).view(1, -1)
y = torch.arange(
-radius_y, radius_y + 1, dtype=dtype, device=device).view(-1, 1)
# h = (-(x * x + y * y) / (2 * sigma_x * sigma_y)).exp()
h = (-((x * x / (2 * sigma_x * sigma_x)) + (y * y / (2 * sigma_y * sigma_y)))).exp()
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
return h
def gen_gaussian_target(heatmap, center, radius_x, radius_y, k=1):
"""Generate 2D gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (list[int]): Coord of gaussian kernel's center.
radius (int): Radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Default: 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
radius_x = int(radius_x)
radius_y = int(radius_y)
diameter_x = 2 * radius_x + 1
diameter_y = 2 * radius_y + 1
gaussian_kernel = gaussian2D(
radius_x, radius_y, sigma_x=diameter_x / 6, sigma_y=diameter_y / 6, dtype=heatmap.dtype, device=heatmap.device)
x, y = center
x = int(x)
y = int(y)
height, width = heatmap.shape[:2]
left, right = min(x, radius_x), min(width - x, radius_x + 1)
top, bottom = min(y, radius_y), min(height - y, radius_y + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian_kernel[radius_y - top:radius_y + bottom,
radius_x - left:radius_x + right]
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_gaussian * k,
out=out_heatmap[y - top:y + bottom, x - left:x + right])
return out_heatmap
def gen_rect_target(heatmap, center, radius_x, radius_y, k=1):
"""Generate 2D gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (list[int]): Coord of gaussian kernel's center.
radius (int): Radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Default: 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
radius_x = int(radius_x)
radius_y = int(radius_y)
diameter_x = 2 * radius_x + 1
diameter_y = 2 * radius_y + 1
x, y = center
x = int(x)
y = int(y)
height, width = heatmap.shape[:2]
left, right = min(x, radius_x), min(width - x, radius_x + 1)
top, bottom = min(y, radius_y), min(height - y, radius_y + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
# masked_rect = torch.ones(bottom+top, right+left, device=heatmap.device).to(torch.long)
masked_rect = torch.ones(bottom+top, right+left, device=heatmap.device)
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_rect * k,
out=out_heatmap[y - top:y + bottom, x - left:x + right])
return out_heatmap
@NECKS.register_module()
class ExtraMask(BaseModule):
def __init__(self,
in_channels,
num_levels,
with_mask_pooling=False,
with_mask_cac=False,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(ExtraMask, self).__init__(init_cfg)
self.with_mask_pooling = with_mask_pooling
self.with_mask_cac = with_mask_cac
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.maskconv1 = ConvModule(
self.in_channels,
self.in_channels // 4,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=dict(type='BN', requires_grad=True)
)
self.maskconv2 = ConvModule(
self.in_channels // 4,
self.in_channels // 16,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=dict(type='BN', requires_grad=True)
)
self.maskconv3 = ConvModule(
self.in_channels // 16,
1,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=dict(type='BN', requires_grad=True)
)
# 不用上采样,直接用下采样之前的feature
# self.upsamplev1 = ConvModule(
# 1,
# 16,
# 1,
# padding=0,
# norm_cfg=dict(type='BN', requires_grad=True)
# )
# self.upsamplev2 = ConvModule(
# 16,
# 64,
# 1,
# padding=0,
# norm_cfg=dict(type='BN', requires_grad=True)
# )
'''
if with_mask_pooling:
self.maskROIConv = nn.Sequential(
ConvModule(
64,
16,
1,
padding=0,
norm_cfg=dict(type='BN', requires_grad=True)),
ConvModule(
16,
16,
3,
padding=2,
stride=1,
dilation=2,
norm_cfg=dict(type='BN', requires_grad=True)),
ConvModule(
16,
64,
1,
padding=0,
norm_cfg=dict(type='BN', requires_grad=True))
)
self.mask_upsample = ConvModule(
64,
256,
1,
padding=0,
norm_cfg=dict(type='BN', requires_grad=True))
if with_mask_cac:
self.spatial_attention_conv=nn.Sequential(nn.Conv2d(in_channels*2, in_channels, 1), nn.ReLU(), nn.Conv2d(in_channels,2,3, padding=1))
# self.channel_attention_conv=nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), nn.Conv2d(in_channels*2, in_channels, 1), nn.ReLU(), nn.Conv2d(in_channels, in_channels*2, 1))
'''
# self.loss_mask = SmoothL1Loss(beta=1.0 / 9.0, loss_weight=1.0)
self.loss_mask = torch.nn.MSELoss()
# self.loss_mask = FocalLoss()
# self.ssim = SSIM_Loss(in_channels=1, size=5)
def forward(self, inputs):
inputs, gt_bboxes = inputs
"""Forward function."""
assert len(inputs) == self.num_levels
# 对fpn所有层进行mask监督
outs_upsample = []
loss_mask = []
loss_ssim = []
for i in range(self.num_levels):
out = inputs[i]
mask1 = self.maskconv1(out)
mask2 = self.maskconv2(mask1)
mask3 = self.maskconv3(mask2)
# plt.imshow(mask3.squeeze(1)[0].cpu().detach().numpy())
# plt.savefig('3.jpg')
# a = input("aaaa")
# mask = F.interpolate(mask3, size=[mask_size[0] * 4, mask_size[1] * 4], mode='nearest')
if gt_bboxes is not None:
mask_size = out.size()[2:]
heatmaps = []
# x = 0
for gt_bbox in gt_bboxes:
# heatmap = torch.zeros([mask_size[0], mask_size[1]], device=gt_bboxes[0].device).to(torch.long)
heatmap = torch.zeros([mask_size[0], mask_size[1]], device=gt_bboxes[0].device)
# center = (gt_bbox / 16)
center = gt_bbox / (2 ** (i + 2))
Ws = center[:, 2] - center[:, 0]
Hs = center[:, 3] - center[:, 1]
center = center[:, :2] + (center[:, 2:] - center[:, :2]) / 2
center = torch.clamp(center, 0)
for cen, w, h in zip(center, Ws, Hs):
# heatmap = gen_gaussian_target(heatmap, cen, w/2, h/2)
# heatmap = gen_gaussian_target(heatmap, cen, w/4, h/4)
heatmap = gen_rect_target(heatmap, cen, w/2, h/2)
# 使用高斯核平滑heatmap
# heatmap = heatmap.cpu().numpy()
# heatmap = cv2.GaussianBlur(heatmap,(3,3),0)
# heatmap = torch.from_numpy(heatmap).to(device=gt_bboxes[0].device)
heatmaps.append(heatmap)
# plt.imshow(heatmap.cpu().numpy())
# plt.savefig(str(x)+'.jpg')
# heatmaps = torch.stack(heatmaps).flatten(0)
heatmaps = torch.stack(heatmaps)
# loss_mask.append(self.loss_mask(mask3.squeeze(1).flatten(0).unsqueeze(1), heatmaps))
loss_mask.append(self.loss_mask(mask3.squeeze(1), heatmaps))
# loss_ssim.append(self.ssim(mask3, heatmaps.unsqueeze(1)))
# upsample1 = self.upsamplev1(mask3)
# upsample2 = self.upsamplev2(upsample1)
if self.with_mask_pooling:
outs_upsample.append(out)
# maskROI = self.maskROIConv(mask1) + mask1
# maskROI = self.mask_upsample(maskROI)
# if self.with_mask_cac:
# fusion_feature = torch.cat([out, maskROI], dim=1)
# '''
# channel_attention_conv = F.sigmoid(self.channel_attention_conv(fusion_feature))
# feats_post = channel_attention_conv * fusion_feature
# feats_x, feats_mask = torch.split(feats_post, [256, 256], 1)
# outs_upsample.append(feats_x + feats_mask)
# '''
# spatial_attention_conv = F.sigmoid(self.spatial_attention_conv(fusion_feature))
# feats_post = spatial_attention_conv[:, 0, None, :, :] * out + spatial_attention_conv[:, 1, None, :, :] * maskROI
# outs_upsample.append(feats_post)
# # channel_attention_conv = F.sigmoid(self.channel_attention_conv(fusion_feature))
# # feats_post = channel_attention_conv * fusion_feature
# # feats_x, feats_mask = torch.split(channel_attention_conv, [256, 256], 1)
# # spatial_attention_conv = F.sigmoid(self.spatial_attention_conv(fusion_feature))
# # feats_post = spatial_attention_conv[:, 0, None, :, :] * out * feats_x + spatial_attention_conv[:, 1, None, :, :] * maskROI * feats_mask
# # outs_upsample.append(feats_post)
# else:
# outs_upsample.append(maskROI)
# outs[i] = torch.cat([out, upsample2], dim=1)
loss_mask = sum(loss_mask)
# loss_ssim = sum(loss_ssim) * 0.01
if self.with_mask_pooling:
if gt_bboxes is not None:
if self.with_mask_cac:
return tuple(outs_upsample), None, dict(loss_mask=loss_mask)
else:
return inputs, tuple(outs_upsample), dict(loss_mask=loss_mask)
else:
if self.with_mask_cac:
return tuple(outs_upsample), None, None
else:
return inputs, tuple(outs_upsample), None
else:
return inputs, None, None
|
{"hexsha": "928bc996579c0f89ecd2424e75ac54ec03b8023e", "size": 14171, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdet/models/necks/extra_mask.py", "max_stars_repo_name": "w-sugar/mmdetection", "max_stars_repo_head_hexsha": "3f263c496c99827e4c90eb2e2f2be92f061f3b66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mmdet/models/necks/extra_mask.py", "max_issues_repo_name": "w-sugar/mmdetection", "max_issues_repo_head_hexsha": "3f263c496c99827e4c90eb2e2f2be92f061f3b66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mmdet/models/necks/extra_mask.py", "max_forks_repo_name": "w-sugar/mmdetection", "max_forks_repo_head_hexsha": "3f263c496c99827e4c90eb2e2f2be92f061f3b66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9183098592, "max_line_length": 183, "alphanum_fraction": 0.5506315715, "include": true, "reason": "import numpy", "num_tokens": 3521}
|
import numpy as np
__all__ = ['permute']
def permute(a):
"""
Creates all unique combinations of a list a that is passed in.
Function is based off of a function written by John Lettman:
TCHS Computer Information Systems. My thanks to him.
"""
a.sort() # Sort.
## Output the first input sorted.
yield list(a)
i = 0
first = 0
alen = len(a)
## "alen" could also be used for the reference to the last element.
while(True):
i = alen - 1
while(True):
i -= 1 # i--
if(a[i] < a[(i + 1)]):
j = alen - 1
while(not (a[i] < a[j])): j -= 1 # j--
a[i], a[j] = a[j], a[i] # swap(a[j], a[i])
t = a[(i + 1):alen]
t.reverse()
a[(i + 1):alen] = t
# Output current.
yield list(a)
break # next.
if(i == first):
a.reverse()
# yield list(a)
return
|
{"hexsha": "f6ddd2f28a9a6d5c63f4144d2022f2b81953adac", "size": 1037, "ext": "py", "lang": "Python", "max_stars_repo_path": "interpolation/smolyak/util.py", "max_stars_repo_name": "gboehl/interpolation.py", "max_stars_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2015-03-16T04:15:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T13:35:27.000Z", "max_issues_repo_path": "interpolation/smolyak/util.py", "max_issues_repo_name": "gboehl/interpolation.py", "max_issues_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "interpolation/smolyak/util.py", "max_forks_repo_name": "gboehl/interpolation.py", "max_forks_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-08-08T09:16:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T08:30:49.000Z", "avg_line_length": 20.74, "max_line_length": 71, "alphanum_fraction": 0.4349083896, "include": true, "reason": "import numpy", "num_tokens": 273}
|
import os
import random
import sys
import time
import imageio
import numpy as np
import skimage
import torch
import torchvision
from torch import nn
from torchvision import datasets, transforms
from spn.experiments.RandomSPNs_layerwise.distributions import RatNormal
from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpn, RatSpnConfig
def one_hot(vector):
result = np.zeros((vector.size, vector.max() + 1))
result[np.arange(vector.size), vector] = 1
return result
def time_delta_now(t_start: float) -> str:
"""
Convert a timestamp into a human readable timestring.
Args:
t_start (float): Timestamp.
Returns:
Human readable timestring.
"""
a = t_start
b = time.time() # current epoch time
c = b - a # seconds
days = round(c // 86400)
hours = round(c // 3600 % 24)
minutes = round(c // 60 % 60)
seconds = round(c % 60)
millisecs = round(c % 1 * 1000)
return f"{days} days, {hours} hours, {minutes} minutes, {seconds} seconds, {millisecs} milliseconds"
def count_params(model: torch.nn.Module) -> int:
"""
Count the number of parameters in a model.
Args:
model (torch.nn.Module): PyTorch model.
Returns:
int: Number of learnable parameters.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_mnist_loaders(use_cuda, device, batch_size):
"""
Get the MNIST pytorch data loader.
Args:
use_cuda: Use cuda flag.
"""
kwargs = {"num_workers": 8, "pin_memory": True} if use_cuda else {}
test_batch_size = batch_size
transformer = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# Train data loader
train_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=True, download=True, transform=transformer),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
# Test data loader
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=False, transform=transformer),
batch_size=test_batch_size,
shuffle=True,
**kwargs,
)
return train_loader, test_loader
def make_spn(S, I, R, D, dropout, device) -> RatSpn:
"""Construct the RatSpn"""
# Setup RatSpnConfig
config = RatSpnConfig()
config.F = 28 ** 2
config.R = R
config.D = D
config.I = I
config.S = S
config.C = 10
config.dropout = dropout
config.leaf_base_class = RatNormal
config.leaf_base_kwargs = {}
# Construct RatSpn from config
model = RatSpn(config)
model = model.to(device)
model.train()
print("Using device:", device)
return model
def run_torch(n_epochs=100, batch_size=256):
"""Run the torch code.
Args:
n_epochs (int, optional): Number of epochs.
batch_size (int, optional): Batch size.
"""
from torch import optim
from torch import nn
assert len(sys.argv) == 2, "Usage: train.mnist cuda/cpu"
dev = sys.argv[1]
if dev == "cpu":
device = torch.device("cpu")
use_cuda = False
else:
device = torch.device("cuda:0")
use_cuda = True
torch.cuda.benchmark = True
model = make_spn(S=10, I=10, D=3, R=5, device=dev, dropout=0.0)
model.train()
print(model)
print("Number of pytorch parameters: ", count_params(model))
# Define optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
train_loader, test_loader = get_mnist_loaders(use_cuda, batch_size=batch_size, device=device)
log_interval = 100
lmbda = 1.0
for epoch in range(n_epochs):
if epoch > 20:
# lmbda = lmbda_0 + lmbda_rel * (0.95 ** (epoch - 20))
lmbda = 0.5
t_start = time.time()
running_loss = 0.0
running_loss_ce = 0.0
running_loss_nll = 0.0
for batch_index, (data, target) in enumerate(train_loader):
# Send data to correct device
data, target = data.to(device), target.to(device)
data = data.view(data.shape[0], -1)
# Reset gradients
optimizer.zero_grad()
# Inference
output = model(data)
# Compute loss
loss_ce = loss_fn(output, target)
loss_nll = -output.sum() / (data.shape[0] * 28 ** 2)
loss = (1 - lmbda) * loss_nll + lmbda * loss_ce
# Backprop
loss.backward()
optimizer.step()
# scheduler.step()
# Log stuff
running_loss += loss.item()
running_loss_ce += loss_ce.item()
running_loss_nll += loss_nll.item()
if batch_index % log_interval == (log_interval - 1):
pred = output.argmax(1).eq(target).sum().cpu().numpy() / data.shape[0] * 100
print(
"Train Epoch: {} [{: >5}/{: <5} ({:.0f}%)]\tLoss_ce: {:.6f}\tLoss_nll: {:.6f}\tAccuracy: {:.0f}%".format(
epoch,
batch_index * len(data),
60000,
100.0 * batch_index / len(train_loader),
running_loss_ce / log_interval,
running_loss_nll / log_interval,
pred,
),
end="\r",
)
running_loss = 0.0
running_loss_ce = 0.0
running_loss_nll = 0.0
with torch.no_grad():
set_seed(0)
# samples = model.sample(n=25)
samples = model.sample(class_index=list(range(10)) * 5)
save_samples(samples, iteration=epoch)
t_delta = time_delta_now(t_start)
print("Train Epoch: {} took {}".format(epoch, t_delta))
if epoch % 5 == 4:
print("Evaluating model ...")
evaluate_model(model, device, train_loader, "Train")
evaluate_model(model, device, test_loader, "Test")
def evaluate_model(model: torch.nn.Module, device, loader, tag) -> float:
"""
Description for method evaluate_model.
Args:
model (nn.Module): PyTorch module.
device: Execution device.
loader: Data loader.
tag (str): Tag for information.
Returns:
float: Tuple of loss and accuracy.
"""
model.eval()
loss_ce = 0
loss_nll = 0
correct = 0
criterion = nn.CrossEntropyLoss(reduction="sum")
with torch.no_grad():
for data, target in loader:
data, target = data.to(device), target.to(device)
data = data.view(data.shape[0], -1)
output = model(data)
loss_ce += criterion(output, target).item() # sum up batch loss
loss_nll += -output.sum()
pred = output.argmax(dim=1)
correct += (pred == target).sum().item()
loss_ce /= len(loader.dataset)
loss_nll /= len(loader.dataset) + 28 ** 2
accuracy = 100.0 * correct / len(loader.dataset)
print(
"{} set: Average loss_ce: {:.4f} Average loss_nll: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format(
tag, loss_ce, loss_nll, correct, len(loader.dataset), accuracy
)
)
def ensure_dir(path: str):
"""
Ensure that a directory exists.
For 'foo/bar/baz.csv' the directories 'foo' and 'bar' will be created if not already present.
Args:
path (str): Directory path.
"""
d = os.path.dirname(path)
if not os.path.exists(d):
os.makedirs(d)
def plot_samples(x: torch.Tensor, path):
"""
Plot a single sample witht the target and prediction in the title.
Args:
x (torch.Tensor): Batch of input images. Has to be shape: [N, C, H, W].
"""
# Normalize in valid range
for i in range(x.shape[0]):
x[i, :] = (x[i, :] - x[i, :].min()) / (x[i, :].max() - x[i, :].min())
tensors = torchvision.utils.make_grid(x, nrow=10, padding=1).cpu()
arr = tensors.permute(1, 2, 0).numpy()
arr = skimage.img_as_ubyte(arr)
imageio.imwrite(path, arr)
def save_samples(samples, iteration: int):
d = "results/samples/"
ensure_dir(d)
plot_samples(samples.view(-1, 1, 28, 28), path=os.path.join(d, f"mnist-{iteration:03}.png"))
def set_seed(seed: int):
"""
Set the seed globally for python, numpy and torch.
Args:
seed (int): Seed.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if __name__ == "__main__":
torch.cuda.benchmark = True
run_torch(100, 100)
|
{"hexsha": "104fd439833356053f5015a995dfaf4e2deecb36", "size": 8654, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/spn/experiments/RandomSPNs_layerwise/train_mnist.py", "max_stars_repo_name": "steven-lang/SPFlow", "max_stars_repo_head_hexsha": "be7492d4229857454b4e23596be7ba71d7af5960", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 199, "max_stars_repo_stars_event_min_datetime": "2018-11-13T10:37:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T17:07:28.000Z", "max_issues_repo_path": "src/spn/experiments/RandomSPNs_layerwise/train_mnist.py", "max_issues_repo_name": "steven-lang/SPFlow", "max_issues_repo_head_hexsha": "be7492d4229857454b4e23596be7ba71d7af5960", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2018-11-30T13:40:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T21:05:07.000Z", "max_forks_repo_path": "src/spn/experiments/RandomSPNs_layerwise/train_mnist.py", "max_forks_repo_name": "steven-lang/SPFlow", "max_forks_repo_head_hexsha": "be7492d4229857454b4e23596be7ba71d7af5960", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 78, "max_forks_repo_forks_event_min_datetime": "2018-11-13T10:37:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T21:34:13.000Z", "avg_line_length": 28.4671052632, "max_line_length": 125, "alphanum_fraction": 0.5812341114, "include": true, "reason": "import numpy", "num_tokens": 2215}
|
[STATEMENT]
lemma length_filter_conv_size_filter_mset: "length (filter P xs) = size (filter_mset P (mset xs))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (filter P xs) = size (filter_mset P (mset xs))
[PROOF STEP]
by (induction xs) auto
|
{"llama_tokens": 99, "file": null, "length": 1}
|
\chapter{Conclusion and Further Work}
\label{ch:5}
\section{Conclusion}
This synopsis provides a detailed description of an Practical implementation of Online Biding system which provides Secure Key Exchange and agreement. We have implemented system for
\begin{itemize}
\item Capturing or uploading image;
\item Show status in Home and Profile Page;
\item Processing the image;
\item Signing the image
\item Create transactions
\item Create block in server
\item Adding to chain
\end{itemize}
\section{Further Work}
The next targets are to create a system that will standalone perform the following tasks
\begin{itemize}
\item making a peer-to-peer network for android that will perform the following tasks
\item connecting to Online Verifier service
\item sending and Receive messages containing data between peers
\item improving Verification Service
\item connecting in mobile application
\item estimating risks and reconfigure concensus protocols
\end{itemize}
|
{"hexsha": "9f0f20b84cd9d5ff88154af2b9a7459ae45368f2", "size": 967, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/thesis/doc_src/conclusion.tex", "max_stars_repo_name": "TheScienceUniverse/BlockChain", "max_stars_repo_head_hexsha": "03c33fabfa702fad7f6e3a9de5757a2ccda2d909", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/thesis/doc_src/conclusion.tex", "max_issues_repo_name": "TheScienceUniverse/BlockChain", "max_issues_repo_head_hexsha": "03c33fabfa702fad7f6e3a9de5757a2ccda2d909", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/thesis/doc_src/conclusion.tex", "max_forks_repo_name": "TheScienceUniverse/BlockChain", "max_forks_repo_head_hexsha": "03c33fabfa702fad7f6e3a9de5757a2ccda2d909", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1923076923, "max_line_length": 181, "alphanum_fraction": 0.8179937952, "num_tokens": 204}
|
"""
Embedding pipeline
This script will take a collected list of SMILES, and generate all of the
vector embeddings and perform transformations to prepare it for analysis.
Because we're dealing with potentially large datasets, it's important to
be mindful of the amount of memory you have access to, particularly for the
K-means step! If you have memory issues, I suggest changing over to the
dask_ml versions of sklearn algorithms for this step.
"""
USE_DASK = False
import h5py
import numpy as np
import pandas as pd
from joblib import parallel_backend
from umda import smi_vec, EmbeddingModel
from dask import array as da
from dask.distributed import Client, LocalCluster
if USE_DASK:
from dask_ml.decomposition import IncrementalPCA
from dask_ml.cluster import KMeans
from dask_ml.preprocessing import StandardScaler
else:
from sklearn.decomposition import IncrementalPCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from loguru import logger
from joblib import dump
logger.add("embedding.log")
# for stochastic reproducibility
seed = 42
rng = np.random.default_rng(seed)
logger.info("Loading mol2vec model.")
m2v_model = smi_vec.load_model("../models/mol2vec_model.pkl")
# number of feature dimensions for the embedding model
embedding_dim = 300
pca_dim = 70
n_clusters = 20
n_workers = 8
h5_target = f"../data/processed/smiles_embeddings_{embedding_dim}.h5"
output_target = f"../data/processed/pipeline_embeddings_{pca_dim}.h5"
# h5_file = h5py.File(f"../data/processed/smiles_embeddings_{embedding_dim}.h5", "a")
def train_fit_model(data: np.ndarray, model, dask: bool = False, n_jobs: int = 8):
"""
This function just helps simplify the main code by handling various contexts.
If `dask` is being used, we use the dask backend for computation as well
as making sure that the result is actually computed.
"""
if dask:
backend = "dask"
else:
backend = "threading"
with parallel_backend(backend, n_jobs):
model.fit(data)
transform = model.transform(data)
if dask:
transform = transform.compute()
# if we are fitting a clustering model we grab the labels
labels = getattr(model, "labels_", None)
if dask and labels is not None:
labels = labels.compute()
return (model, transform, labels)
RERUN = True
logger.info(f"mol2vec embedding dimension size: {embedding_dim}")
logger.info(f"PCA reduced dimensionality size: {pca_dim}")
logger.info(f"Will perform vectorization? {RERUN}")
if RERUN:
logger.info("Reading in list of SMILES")
df = pd.read_pickle("../data/processed/combined_smiles.pkl.bz2")
smi_list = df["Raw"].tolist()
logger.info("Beginning vectorization of SMILES.")
with h5py.File(h5_target, "a") as embeddings_file:
for key in ["labels", "smiles", "vectors"]:
try:
del embeddings_file[key]
except KeyError:
pass
smi_vec.serial_smi_vectorization(smi_list, m2v_model, embeddings_file, vec_length=embedding_dim)
embeddings_file.create_dataset("labels", data=df["Labels"].values)
embeddings_file = h5py.File(h5_target, "r")
output_file = h5py.File(output_target, "a")
if USE_DASK:
client = Client(threads_per_worker=2, n_workers=8)
vectors = da.from_array(embeddings_file["vectors"])
else:
vectors = embeddings_file["vectors"][:]
scaler = StandardScaler()
pca_model = IncrementalPCA(n_components=pca_dim)
kmeans = KMeans(n_clusters=n_clusters, random_state=seed)
# preprocess the embeddings
vectors = scaler.fit_transform(vectors)
logger.info("Beginning PCA dimensionality reduction")
# perform PCA dimensionality reduction
pca_model = IncrementalPCA(n_components=pca_dim)
pca_model, transformed, _ = train_fit_model(vectors, pca_model, USE_DASK, n_workers)
# save both the reduced dimension vector and the full
output_file["pca"] = transformed
output_file["explained_variance"] = pca_model.explained_variance_ratio_
logger.info("Saving the trained PCA model.")
dump(pca_model, "../models/pca_model.pkl")
logger.info("Performing K-means clustering on dataset")
kmeans = KMeans(n_clusters=n_clusters, random_state=seed)
kmeans, _, labels = train_fit_model(output_file["pca"], kmeans, USE_DASK, n_workers)
output_file["cluster_ids"] = labels
dump(kmeans, "../models/kmeans_model.pkl")
logger.info("Combining the models into a pipeline")
pipe = make_pipeline(scaler, pca_model, kmeans)
dump(pipe, "../models/embedding_pipeline.pkl")
output_file.close()
embeddings_file.close()
# generate a convenient wrapper for all the functionality
embedder = EmbeddingModel(m2v_model, transform=pipe)
dump(embedder, "../models/EmbeddingModel.pkl")
|
{"hexsha": "e0ff4d0a249727927cb360fed0f5203caf7ceea6", "size": 4811, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/embedding_pipeline.py", "max_stars_repo_name": "laserkelvin/umda", "max_stars_repo_head_hexsha": "ec6c77c2ffb179f221fa9914e3e07cdeb4383572", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-03T19:08:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T19:57:09.000Z", "max_issues_repo_path": "scripts/embedding_pipeline.py", "max_issues_repo_name": "laserkelvin/umda", "max_issues_repo_head_hexsha": "ec6c77c2ffb179f221fa9914e3e07cdeb4383572", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/embedding_pipeline.py", "max_forks_repo_name": "laserkelvin/umda", "max_forks_repo_head_hexsha": "ec6c77c2ffb179f221fa9914e3e07cdeb4383572", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4097222222, "max_line_length": 104, "alphanum_fraction": 0.7453751819, "include": true, "reason": "import numpy", "num_tokens": 1153}
|
module Get_kurtosis
export get_kurtosis
using Distributions, Statistics, Dierckx, SeisIO
"""
get_kurtosis(data::SeisChannel,kurtsis_tw_sparse::Float64; timewinlength::Float64=60)
compute kurtosis at each timewindow
# Input:
- `data::SeisData` : SeisData from SeisIO
- `kurtosis_tw_sparse::Float64` : time length of span for kurtosis time window
- `timewinlength::Float64` : time window to calculate kurtosis
kurtosis evaluation following Baillard et al.(2013)
"""
function get_kurtosis(data::SeisChannel, kurtosis_tw_sparse::Float64; timewinlength::Float64=60.0)
#convert window lengths from seconds to samples
TimeWin = trunc(Int,timewinlength * data.fs)
data.misc["kurtosis"] = fast_kurtosis_series(data.x, TimeWin, kurtosis_tw_sparse)
return data
end
"""
fast_kurtosis_series(v::RealArray, TimeWin::Int64)
fast compute kurtosis series at each timewindow
# Input:
- `v::RealArray` : SeisData from SeisIO
- `N::Int64` : time window length to calculate kurtosis
kurtosis evaluation following Baillard et al.(2013)
"""
function fast_kurtosis_series(v::Array{Float64, 1}, TN::Int64, kurtosis_tw_sparse::Float64)
kurt = zeros(length(v))
n = length(v)
kurt_grid = 1:n
KTWSparse = kurtosis_tw_sparse
if n < TN error("Kurtosis time window is larger than data length. Decrease time window.") end
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
# !DEPRECATED!: 1. compute mean value at each time window by numerical sequence
# 2. use mapreduce to sum values
# # first term
# Trace = @views v[1:TN]
# z2 = zeros(TN)
# m0 = mean(Trace)
#
# cm2 = Statistics.varm(Trace, m0, corrected=false)
# cm4 = fourthmoment(Trace, m0, corrected=false)
#
# # fill first part with kurtosis at TN
# kurt[1:TN] .= (cm4 / (cm2 * cm2)) - 3.0
#
# @simd for k = TN:n-1
#
# diff1 = @inbounds @views (v[k-TN+1] - v[k+1])/TN
# m1 = m0 - diff1
# Trace = @views v[k-TN+2:k+1]
# cm2 = Statistics.varm(Trace, m1, corrected=false)
# cm4 = fourthmoment(Trace, m1, corrected=false) #sum(xi - m)^4 / N
# kurt[k+1] = (cm4 / (cm2 * cm2)) - 3.0
# m0 = m1
# end
# first term
Trace = @views v[1:TN]
z2 = zeros(TN)
m0 = mean(Trace)
cm2 = Statistics.varm(Trace, m0, corrected=false)
cm4 = fourthmoment(Trace, m0, corrected=false)
# fill first part with kurtosis at TN
kurt[1:TN] .= (cm4 / (cm2 * cm2)) - 3.0
@simd for k = TN:n-1
diff1 = @inbounds @views (v[k-TN+1] - v[k+1])/TN
m1 = m0 - diff1
Trace = @views v[k-TN+2:k+1]
cm2 = Statistics.varm(Trace, m1, corrected=false)
cm4 = fourthmoment(Trace, m1, corrected=false) #sum(xi - m)^4 / N
kurt[k+1] = (cm4 / (cm2 * cm2)) - 3.0
m0 = m1
end
return kurt
end
#---following functions are modified from Statistics.jl---#
centralizedabs4fun(m) = x -> abs2.(abs2.(x - m))
centralize_sumabs4(A::AbstractArray, m) =
mapreduce(centralizedabs4fun(m), +, A)
centralize_sumabs4(A::AbstractArray, m, ifirst::Int, ilast::Int) =
Base.mapreduce_impl(centralizedabs4fun(m), +, A, ifirst, ilast)
function centralize_sumabs4!(R::AbstractArray{S}, A::AbstractArray, means::AbstractArray) where S
# following the implementation of _mapreducedim! at base/reducedim.jl
lsiz = Base.check_reducedims(R,A)
isempty(R) || fill!(R, zero(S))
isempty(A) && return R
if Base.has_fast_linear_indexing(A) && lsiz > 16 && !has_offset_axes(R, means)
nslices = div(length(A), lsiz)
ibase = first(LinearIndices(A))-1
for i = 1:nslices
@inbounds R[i] = centralize_sumabs4(A, means[i], ibase+1, ibase+lsiz)
ibase += lsiz
end
return R
end
indsAt, indsRt = Base.safe_tail(axes(A)), Base.safe_tail(axes(R)) # handle d=1 manually
keep, Idefault = Broadcast.shapeindexer(indsRt)
if Base.reducedim1(R, A)
i1 = first(Base.axes1(R))
@inbounds for IA in CartesianIndices(indsAt)
IR = Broadcast.newindex(IA, keep, Idefault)
r = R[i1,IR]
m = means[i1,IR]
@simd for i in axes(A, 1)
r += abs2(abs2(A[i,IA] - m))
end
R[i1,IR] = r
end
else
@inbounds for IA in CartesianIndices(indsAt)
IR = Broadcast.newindex(IA, keep, Idefault)
@simd for i in axes(A, 1)
R[i,IR] += abs2(abs2(A[i,IA] - means[i,IR]))
end
end
end
return R
end
function fourthmoment!(R::AbstractArray{S}, A::AbstractArray, m::AbstractArray; corrected::Bool=true) where S
if isempty(A)
fill!(R, convert(S, NaN))
else
rn = div(length(A), length(R)) - Int(corrected)
centralize_sumabs4!(R, A, m)
R .= R .* (1 // rn)
end
return R
end
"""
fourthmoment(v, m; dims, corrected::Bool=true)
Compute the fourthmoment of a collection `v` with known mean(s) `m`,
optionally over the given dimensions. `m` may contain means for each dimension of
`v`. If `corrected` is `true`, then the sum is scaled with `n-1`,
whereas the sum is scaled with `n` if `corrected` is `false` where `n = length(v)`.
!!! note
If array contains `NaN` or [`missing`](@ref) values, the result is also
`NaN` or `missing` (`missing` takes precedence if array contains both).
Use the [`skipmissing`](@ref) function to omit `missing` entries and compute the
variance of non-missing values.
"""
fourthmoment(A::AbstractArray, m::AbstractArray; corrected::Bool=true, dims=:) = _fourthmoment(A, m, corrected, dims)
_fourthmoment(A::AbstractArray{T}, m, corrected::Bool, region) where {T} =
fourthmoment!(Base.reducedim_init(t -> abs2(t)/2, +, A, region), A, m; corrected=corrected)
fourthmoment(A::AbstractArray, m; corrected::Bool=true) = _fourthmoment(A, m, corrected, :)
function _fourthmoment(A::AbstractArray{T}, m, corrected::Bool, ::Colon) where T
n = length(A)
n == 0 && return oftype((abs2(zero(T)) + abs2(zero(T)))/2, NaN)
return centralize_sumabs4(A, m) / (n - Int(corrected))
end
end
|
{"hexsha": "fc80cd9bf640b6d7e4c305debcbf7c734c5db5be", "size": 6245, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/get_kurtosis.jl", "max_stars_repo_name": "jaredbryan881/SeisRemoveEQ.jl", "max_stars_repo_head_hexsha": "589d8d5ff07963b75341db50f9365b98944e83d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-18T00:12:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-18T00:12:30.000Z", "max_issues_repo_path": "src/get_kurtosis.jl", "max_issues_repo_name": "jaredbryan881/SeisRemoveEQ.jl", "max_issues_repo_head_hexsha": "589d8d5ff07963b75341db50f9365b98944e83d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/get_kurtosis.jl", "max_forks_repo_name": "jaredbryan881/SeisRemoveEQ.jl", "max_forks_repo_head_hexsha": "589d8d5ff07963b75341db50f9365b98944e83d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0423280423, "max_line_length": 117, "alphanum_fraction": 0.6293034428, "num_tokens": 1982}
|
#=
Each complex should have collection of cells per dimension:
- cells::Dict{Int,Vector{C}} or Vector{Vector{C}}
=#
abstract type AbstractComplex end
#
# AbstractComplex Public Interface
#
"""Return a complex boundary given element and dimension"""
boundary(cplx::AbstractComplex, i::Integer, d::Int, ::Type{PID}) where {PID} =
throw(MethodError(boundary, (typeof(cplx),Int,Int,PID)))
boundary(cplx::AbstractComplex, i::Integer, d::Int) = boundary(cplx, i, d, Int)
boundary(cplx::AbstractComplex, cell::AbstractCell, ::Type{PID}) where {PID} =
boundary(cplx, hash(cell), dim(cell), PID)
boundary(cplx::AbstractComplex, cell::AbstractCell) = boundary(cplx, hash(cell), dim(cell), Int)
"""Return a complex coboundary given element and dimension"""
coboundary(cplx::AbstractComplex, i::Integer, d::Int, ::Type{PID}) where {PID} =
throw(MethodError(coboundary, (typeof(cplx),Int,Int,PID)))
coboundary(cplx::AbstractComplex, i::Integer, d::Int) = coboundary(cplx, i, d, Int)
coboundary(cplx::AbstractComplex, cell::AbstractCell, ::Type{PID}) where {PID} =
coboundary(cplx, hash(cell), dim(cell), PID)
coboundary(cplx::AbstractComplex, cell::AbstractCell) = coboundary(cplx, hash(cell), dim(cell), Int)
"""
faces(cplx::AbstractComplex, cellidx::Integer) -> Vector{Integer}
Return an index collection of faces of a cell element with an indentifier `cellidx` in the complex `cplx`.
"""
faces(cplx::AbstractComplex, cellidx::Integer) = throw(MethodError(faces, (typeof(cplx),Integer)))
"""
cofaces(cplx::AbstractComplex, cellidx::Integer) -> Vector{Integer}
Return an index collection of cofaces of a cell element with an indentifier `cellidx` in the complex `cplx`.
"""
cofaces(cplx::AbstractComplex, cellidx::Integer) = throw(MethodError(cofaces, (typeof(cplx),Integer)))
"""
Return a complex cell type
"""
eltype(cplx::AbstractComplex) = throw(MethodError(eltype, (typeof(cplx),)))
"""
cells(cplx::AbstractComplex) -> Dict{Int, AbstractVector{AbstractCell}}
Return a cell collection per dimension (increasing)
"""
cells(cplx::AbstractComplex) = throw(MethodError(cells, (typeof(cplx),)))
"""
cells(cplx::AbstractComplex, d::Int) -> AbstractVector{AbstractCell}
Return a cell collection for the dimenion `d`.
"""
cells(cplx::AbstractComplex, d::Int) = throw(MethodError(cells, (typeof(cplx),Int)))
"""
push!(cplx::AbstractComplex, cell::AbstractCell; recursive=false) -> Vector{AbstractCell}
Insert a `cell` to a complex `cplx`, and returns an array of inserted cell(s). If `recursive=true` is passed then all faces of the `cell` are also added to the complex `cplx`.
"""
push!(cplx::AbstractComplex, c::AbstractCell; recursive=false) =
throw(MethodError(push!, (typeof(cplx),typeof(c))))
#
# Public Methods
#
"""Return a number of cells per dimension"""
size(cplx::AbstractComplex) = (map(length, cells(cplx))...,)
"""Return a dimension of the complex"""
dim(cplx::AbstractComplex) = length(size(cplx))-1
"""Return a total number of cells in the complex"""
length(cplx::AbstractComplex) = sum(size(cplx))
"""Return a number of the cell in the complex of a dimension `d` (0-based)"""
function size(cplx::AbstractComplex, d::Int)
sz = size(cplx)
szlen = length(sz)
(d < 0 || d >= szlen) && return 0
return sz[d+1]
end
"""
position(complex, index, dimenion)
Return a position of the cell in an order of cells of the same dimenion of the `complex` given its `index` and `dimenion`.
"""
function position(cplx::AbstractComplex, idx::Integer, d::Int)
dcells = cells(cplx, d)
length(dcells) == 0 && return 0
cidx = findfirst(c->hash(c) == idx, dcells)
return cidx === nothing ? 0 : cidx
end
"""
position(complex, cell)
Return a position of the `cell` in an order of cells of the same dimenion of the `complex`.
"""
position(cplx::AbstractComplex, c::AbstractCell) = position(cplx, hash(c), dim(c))
"""
cplx[idx, d]
Return a `d`-dimensional cell given its index `idx` and dimenion `d`.
"""
function getindex(cplx::AbstractComplex, idx::Integer, d::Int)
cidx = position(cplx, idx, d)
cidx == 0 && return nothing
return cells(cplx, d)[cidx]
end
"""
boundary(cplx, ch)
Return the chain `ch` boundary in the complex `cplx`.
"""
function boundary(cplx::AbstractComplex, ch::Chain{IX,R}) where {R, IX<:Integer}
d = dim(ch)
cc = Chain(d-1, IX, R)
for (elem, coef) in ch
append!(cc, coef * boundary(R, cplx[elem, d]))
end
return simplify(cc)
end
"""
coboundary(cplx, ch)
Return the chain `ch` coboundary in the complex `cplx`.
"""
function coboundary(cplx::AbstractComplex, ch::Chain{IX,R}) where {R, IX<:Integer}
d = dim(ch)
cc = Chain(d+1, IX, R)
# δₙ₋₁(c)(σ) = c(∂ₙ(σ))
for (elem, coef) in ch
append!(cc, coef * coboundary(cplx, elem, d, R))
end
return simplify(cc)
end
"""
boundary(complex, d, PID)
Generate a boundary matrix from the cell `complex` of the dimension `d` in using coefficients of `PID`.
"""
function boundary(cplx::AbstractComplex, d::Int, ::Type{PID}) where {PID}
csize = size(cplx)
rows = d > 0 ? csize[d] : 0
cols = d <= dim(cplx) ? csize[d+1] : 0
bm = spzeros(PID, rows, cols)
if d>=0 && d <= dim(cplx)
for c in cells(cplx, d)
i = position(cplx, hash(c), d)
for (elem, coef) in boundary(PID, c)
j = position(cplx, elem, d-1)
bm[j, i] = coef
end
end
end
return bm
end
"""
cell in cplx
Checks if the `cell` is in the complex `cplx`
"""
in(c::AbstractCell, cplx::AbstractComplex) = position(cplx, c) > 0
"""
cochain(complex, d, coefficients)
Return a cochain of the dimension `d` for the `complex` with PID `coefficients`.
"""
function cochain(cplx::AbstractComplex, d::Int, coefs::Vector{PID}) where {PID}
cs = cells(cplx, d)
@assert length(cs) == length(coefs) "Number of coefficients must match number of cells of dimension $d"
return Chain(d, coefs, map(c->hash(c), cs))
end
cochain(cplx::AbstractComplex, d::Int, coef::PID) where {PID} =
cochain(cplx, d, fill(coef, size(cplx, d)))
"""
adjacency_matrix(cplx, [T=Int])
Construct an adjacency matrix of type `T` from a 1-skeleton (1D subcomplex) of the complex `cplx`.
"""
function adjacency_matrix(cplx::AbstractComplex, ::Type{T}) where {T<:Real}
C0 = map(hash, cells(cplx, 0))
N = length(C0)
adj = spzeros(T,N,N)
for c in cells(cplx, 1)
i, j = map(h->findfirst(isequal(h), C0), vertices(c))
adj[i, j] = adj[j, i] = one(T)
end
return adj
end
adjacency_matrix(cplx::AbstractComplex) = adjacency_matrix(cplx, Int)
"""
showchain(cplx::AbstractComplex, ch::AbstractChain)
Display the chain `ch` with elements from the complex `cplx`.
"""
function showchain(cplx::AbstractComplex, ch::AbstractChain)
d = dim(ch)
R = valtype(ch)
pos = one(R)
print("[$d]: ")
if iszero(ch)
print("0")
else
for (i,(id,v)) in enumerate(ch.cells)
val = abs(v)
if sign(v) == pos
i != 1 && print(" + ")
else
print(" - ")
end
splx = strip(repr("text/plain", cplx[id, d]), ['\"'])
print("$val⋅$splx")
end
end
end
|
{"hexsha": "b09b5aaabaecc15c6ff4d4f7d641b126954a819a", "size": 7282, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/complex.jl", "max_stars_repo_name": "wildart/ComputationalHomology.jl", "max_stars_repo_head_hexsha": "ccf8e4b2aa133d795a76da1197858c7e9f68b261", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-10-31T10:31:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T03:28:14.000Z", "max_issues_repo_path": "src/complex.jl", "max_issues_repo_name": "wildart/ComputationalHomology.jl", "max_issues_repo_head_hexsha": "ccf8e4b2aa133d795a76da1197858c7e9f68b261", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-01-24T16:16:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-30T03:12:59.000Z", "max_forks_repo_path": "src/complex.jl", "max_forks_repo_name": "wildart/ComputationalHomology.jl", "max_forks_repo_head_hexsha": "ccf8e4b2aa133d795a76da1197858c7e9f68b261", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-03-08T03:15:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T02:46:59.000Z", "avg_line_length": 31.5238095238, "max_line_length": 175, "alphanum_fraction": 0.6520186762, "num_tokens": 2081}
|
import os
import numpy as np
import pandas as pd
from tqdm import tqdm as tqdmn
from time import time
import multiprocessing
from joblib import Parallel, delayed
import csv
import geopandas as gpd
import sys
import argparse
import pickle
from sklearn.metrics import confusion_matrix, classification_report
from skimage import io
from sklearn.model_selection import StratifiedKFold
from scipy.special import binom
import keras
from keras.layers import Dense , GlobalAveragePooling2D, Concatenate, Input, Lambda, Multiply
from keras import backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard, CSVLogger
from keras.models import Model, load_model
from keras import metrics
from efficientnet.keras import EfficientNetB0 as EfficientNet
from aerial_training_utils import generate_full_idINSPIRE, my_preprocessor, fmeasure,recall,precision, fbeta_score
# Global paths
DATA_BASE_DIR = "../data/"
OUTPUT_BASE_DIR = "../results/"
AERIAL_DIR = DATA_BASE_DIR + "aerial_data/"
CENSUS_DIR = DATA_BASE_DIR + 'census_data/'
UA_DIR = DATA_BASE_DIR + "UA_data/"
IMG_OUTPUT_DIR = OUTPUT_BASE_DIR + "imagery_out/"
MODEL_OUTPUT_DIR = OUTPUT_BASE_DIR + "model_data/"
MAX_NB_JOBS = 1 #min(multiprocessing.cpu_count(),40) # Change to speed up extraction in case multicore architecture is available
# Argument Parsing
parser = argparse.ArgumentParser()
parser.add_argument('-city','--city',
help = 'City to study', type = str, default='Paris')
parser.add_argument('-lr','--learning_rate',
help = 'Learning Rate', type=float, default=8e-5)
parser.add_argument('-epochs','--num_epochs',
help = '# Epochs', type=int, default=3)
parser.add_argument('-spe','--samples_per_epoch',
help = '# Samples Per Epoch', type=int, default=100)
parser.add_argument('-lr_pat','--learning_rate_patience',
help = '# Epochs to wait to diminish the lr', type=int,default=2)
parser.add_argument('-lr_dec','--learning_rate_decay',
help = 'Decay with which to diminish the lr', type=float,default=.25)
parser.add_argument('-cv','--cv_folds',
help = '# folds for cross-validation', type=int,default=2)
# Global variables
args = parser.parse_args()
city = args.city
PATIENCE_BEFORE_LOWERING_LR = args.learning_rate_patience
MAX_EPOCH = args.num_epochs
INITIAL_LR = args.learning_rate
CV_FOLDS = args.cv_folds
NB_SAMPLES_EPOCHS = args.samples_per_epoch
LR_DECAY = args.learning_rate_decay
NB_SES_CLASSES = 5
PATIENCE_BEFORE_STOPPING = 25
TRAIN_TEST_FRAC = .8
VAL_SPLIT = .25
BATCH_SIZE = 10
IMG_SIZE = (800, 800)
INPUT_SHAPE = (IMG_SIZE[0], IMG_SIZE[1], 3)
CPU_COUNT = multiprocessing.cpu_count()
CPU_FRAC = .7
CPU_USE = int(CPU_FRAC*CPU_COUNT)
# Create log directory
if not os.path.isdir(MODEL_OUTPUT_DIR):
os.mkdir(MODEL_OUTPUT_DIR);
os.mkdir(MODEL_OUTPUT_DIR+"logs/");
# Generate Income Classes
full_im_df_ua = generate_full_idINSPIRE(UA_DIR, AERIAL_DIR, CENSUS_DIR, IMG_OUTPUT_DIR)
city_assoc = pd.read_csv(AERIAL_DIR + "city_assoc.csv")
full_im_df_ua = pd.merge(full_im_df_ua,city_assoc,on="idINSPIRE");
full_im_df_ua = full_im_df_ua[full_im_df_ua.FUA_NAME == city]
## Define Income Percentiles
val_min = lambda x : np.percentile(x,0)
val_per20 = lambda x : np.percentile(x,20)
val_per40 = lambda x : np.percentile(x,40)
val_per60 = lambda x : np.percentile(x,60)
val_per80 = lambda x : np.percentile(x,80)
val_max = lambda x : np.percentile(x,100)
val_min.__name__ = 'qmin'
val_per20.__name__ = 'q20'
val_per40.__name__ = 'q40'
val_per60.__name__ = 'q60'
val_per80.__name__ = 'q80'
val_max.__name__ = 'qmax'
ses_city_intervals = full_im_df_ua.groupby("FUA_NAME")[["income"]].agg(
[val_min,val_per20,val_per40,val_per60,val_per80,val_max]
)
# Generate Income classes for each city
df_cities = []
for city in list(ses_city_intervals.index):
city_df_new = full_im_df_ua[full_im_df_ua.FUA_NAME==city]
city_df_new.dropna(subset=["income"],inplace=True)
income = city_df_new.income
class_thresholds = ses_city_intervals.ix[city]["income"].values
x_to_class = np.digitize(income,class_thresholds)
x_to_class[x_to_class==np.max(x_to_class)] = NB_SES_CLASSES
city_df_new["treated_citywise_income"] = [ str(y-1) for y in x_to_class ]
df_cities.append(city_df_new)
full_im_df_ua = gpd.GeoDataFrame(pd.concat(df_cities,axis=0),
crs=full_im_df_ua.crs).sort_index()
# Generating Stratified k-fold Generators
full_im_df_ua = full_im_df_ua.sample(frac=1).reset_index(drop=True)
skf = StratifiedKFold(n_splits=CV_FOLDS)
for fold_id,(train_index, test_index) in enumerate(
skf.split(full_im_df_ua.path2im,full_im_df_ua.treated_citywise_income)):
#
print("City: {} Fold {}/{}".format(city,fold_id+1,CV_FOLDS))
train_im_df = full_im_df_ua.iloc[train_index]
test_im_df = full_im_df_ua.iloc[test_index]
train_test_split = train_im_df.shape[0]
train_image_count = int(train_test_split*(1-VAL_SPLIT))
val_image_count = int(train_test_split*VAL_SPLIT)
test_image_count = test_im_df.shape[0]
train_datagen = ImageDataGenerator(preprocessing_function=my_preprocessor,
horizontal_flip=True,validation_split=VAL_SPLIT,
vertical_flip=True)
test_datagen = ImageDataGenerator(preprocessing_function=my_preprocessor)
train_generator = train_datagen.flow_from_dataframe(
train_im_df,
directory=IMG_OUTPUT_DIR,
x_col="path2im",
y_col="treated_citywise_income",
target_size=IMG_SIZE,
color_mode ="rgb",
shuffle=True,
batch_size=BATCH_SIZE,
interpolation="bicubic",
subset="training",
class_mode='categorical')
val_generator = train_datagen.flow_from_dataframe(
dataframe=train_im_df,
directory=IMG_OUTPUT_DIR,
x_col="path2im",
y_col="treated_citywise_income",
target_size=IMG_SIZE,
color_mode ="rgb",
shuffle=True,
batch_size=BATCH_SIZE,
interpolation="bicubic",
subset="validation",
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe=test_im_df,
directory=IMG_OUTPUT_DIR,
x_col="path2im",
y_col="treated_citywise_income",
target_size=IMG_SIZE,
color_mode ="rgb",
shuffle=False,
batch_size=1,
interpolation="bicubic",
class_mode='categorical')
# Model Definition
base_model = EfficientNet(weights='imagenet',
include_top=False,input_shape=INPUT_SHAPE,)
x=GlobalAveragePooling2D()(base_model.output)
ses_predictions = Dense(1, activation='sigmoid',name="ses_output")(x)
binomized_ses_pred = Concatenate(axis=-1)(Lambda(lambda x:[
Multiply()([binom(NB_SES_CLASSES-1, k)*K.pow(x,k),K.pow(1-x,NB_SES_CLASSES-1-k)])
for k in range(NB_SES_CLASSES)])(ses_predictions))
# Model Compilation
model = Model(inputs=base_model.input,outputs=binomized_ses_pred)
model.compile(optimizer=Adam(lr=INITIAL_LR), loss="categorical_crossentropy",
metrics=[fmeasure,recall,precision])
model_checkpoint = ModelCheckpoint(MODEL_OUTPUT_DIR +\
"fold_{}-lastbest-0.hdf5".format(fold_id),
verbose=1, save_best_only=True)
early_stopping = EarlyStopping(patience=PATIENCE_BEFORE_STOPPING,
restore_best_weights=True)
tensorboard = TensorBoard(log_dir=MODEL_OUTPUT_DIR+\
"logs/fold_{}-{}".format(fold_id,time()),
histogram_freq=0, write_graph=False, write_images=False,
update_freq = 10)
reduce_lr = ReduceLROnPlateau('loss', factor=0.25,
patience=PATIENCE_BEFORE_LOWERING_LR, min_lr=1e-8)
csv_logger = CSVLogger(MODEL_OUTPUT_DIR +\
"fold_{}-training_metrics.csv".format(fold_id))
# Model Training
global_epoch = 0
restarts = 0
last_best_losses = []
last_best_epochs = []
while global_epoch < MAX_EPOCH:
history = model.fit_generator(
generator=train_generator,
steps_per_epoch = NB_SAMPLES_EPOCHS,
epochs=MAX_EPOCH - global_epoch,
validation_data=val_generator,
validation_steps = 100,
workers=10,
verbose=1,
callbacks=[tensorboard, model_checkpoint,
early_stopping, reduce_lr, csv_logger],
shuffle=True
)
del history.model
pickle.dump(history,
open(MODEL_OUTPUT_DIR +\
"fold_{}-lastbest_history-{}.p".format(fold_id,restarts),"wb"))
last_best_losses.append(min(history.history['val_loss']))
last_best_local_epoch = history.history['val_loss']\
.index(min(history.history['val_loss']))
last_best_epochs.append(global_epoch + last_best_local_epoch)
if early_stopping.stopped_epoch == 0:
print("Completed training after {} epochs.".format(MAX_EPOCH))
break
else:
global_epoch = global_epoch +\
early_stopping.stopped_epoch - PATIENCE_BEFORE_STOPPING + 1
print("Early stopping triggered after local epoch {} \
(global epoch {}).".format( early_stopping.stopped_epoch, global_epoch))
print("Restarting from last best val_loss at local epoch {} \
(global epoch {}).".format(early_stopping.stopped_epoch -
PATIENCE_BEFORE_STOPPING, global_epoch
- PATIENCE_BEFORE_STOPPING))
restarts = restarts + 1
model.compile(optimizer=Adam(lr=INITIAL_LR/ 2 ** restarts),
loss="categorical_crossentropy",
metrics=[fmeasure,recall,precision])
model_checkpoint = ModelCheckpoint(MODEL_OUTPUT_DIR +\
"fold_{}-lastbest{}.hdf5".format(fold_id,restarts),
monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
# Save last best model info
with open(MODEL_OUTPUT_DIR +\
"fold_{}-last_best_models.csv".format(fold_id), 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['Model file', 'Global epoch', 'Validation loss'])
for i in range(restarts + 1):
writer.writerow(["fold_{}-lastbest-{}.hdf5".format(fold_id,i),
last_best_epochs[i], last_best_losses[i]])
# Load the last best model
dic_load_model = {
"precision":precision,
"recall":recall,
"fbeta_score":fbeta_score,
"fmeasure":fmeasure,
"binom":binom,
"Multiply":Multiply,
"Concatenate":Concatenate,
"Lambda":Lambda,
"NB_SES_CLASSES":NB_SES_CLASSES,
}
model = load_model(
MODEL_OUTPUT_DIR + "fold_{}-lastbest-{}.hdf5".format(
fold_id,last_best_losses.index(min(last_best_losses))),
custom_objects=dic_load_model)
# Evaluate model on test subset for kth fold
ses_predictions = model.predict_generator(test_generator,test_image_count,
workers=10, verbose=1)
y_true_ses = test_generator.classes
y_pred_ses = np.argmax(ses_predictions, axis=1)
# Generate and print classification metrics and confusion matrix
print(classification_report(y_true_ses, y_pred_ses))
ses_report = classification_report(y_true_ses, y_pred_ses, output_dict=True)
with open(MODEL_OUTPUT_DIR + 'fold_{}-ses_classification_report.csv'.format(fold_id),
'w') as f:
for key in ses_report.keys():
f.write("%s,%s\n" % (key, ses_report[key]))
ses_conf_arr = confusion_matrix(y_true_ses, y_pred_ses)
print(ses_conf_arr)
np.savetxt(MODEL_OUTPUT_DIR + "fold_{}-ses_confusion_matrix.csv".format(fold_id),
ses_conf_arr, delimiter=",")
# Clear model from GPU after each iteration
K.clear_session()
|
{"hexsha": "77fc71d89af89b708b36d32b4b4b327d5fec26f4", "size": 12651, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/efficientnet_training.py", "max_stars_repo_name": "lisette-espin/SESEfficientCAM", "max_stars_repo_head_hexsha": "54dcce7abf7395ca8c017620541204d667da1b1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-04-14T08:25:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:12:12.000Z", "max_issues_repo_path": "code/efficientnet_training.py", "max_issues_repo_name": "lisette-espin/SESEfficientCAM", "max_issues_repo_head_hexsha": "54dcce7abf7395ca8c017620541204d667da1b1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/efficientnet_training.py", "max_forks_repo_name": "lisette-espin/SESEfficientCAM", "max_forks_repo_head_hexsha": "54dcce7abf7395ca8c017620541204d667da1b1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-04-14T08:24:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T12:37:42.000Z", "avg_line_length": 40.6784565916, "max_line_length": 128, "alphanum_fraction": 0.6608173267, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2963}
|
(* A carrier type for regular predicates. *)
From larith Require Import A_setup B1_utils C2_order C1_norm D1_automaton.
Section A_regular_predicate.
Variable letter : Set.
Variable P : list letter -> Prop.
(* P is regular iff its domain can be decided using a finite automaton. *)
(* An optional proof of determinism may be provided (for optimization). *)
(* We also request that the automaton states are comparable. *)
Record regular := Regular {
r_fsa : automaton letter;
r_size : nat;
r_finite : Finite r_fsa r_size;
r_spec : ∀w, Language r_fsa w <-> P w;
r_det : option (Deterministic r_fsa);
r_cmp : state r_fsa -> state r_fsa -> comparison;
r_ord : Order r_cmp;
}.
(* Regular predicates over a finite alphabet can be decided. *)
Variable alphabet : list letter.
Hypothesis full_alphabet : ∀c, In c alphabet.
Hypothesis is_regular : regular.
Theorem regular_dec :
(Σ w, P w) + {∀w, ¬P w}.
Proof.
destruct is_regular as [A size fin spec _ cmp ord].
edestruct Language_inhabited_dec with (A:=A).
apply full_alphabet. eapply cmp_dec, ord. apply fin.
- left; destruct s as [w H]; exists w; apply spec, H.
- right; intros; rewrite <-spec; apply n.
Defined.
End A_regular_predicate.
Arguments regular {_}.
(* Replace predicate with an equivalent one. *)
Theorem regular_ext {letter : Set} (P Q : list letter -> Prop) :
regular P -> (∀w, P w <-> Q w) -> regular Q.
Proof.
intros [A size fin spec det cmp ord] H.
eapply Regular with (r_fsa:=A). apply fin.
intros; rewrite <-H; apply spec. apply det. apply ord.
Defined.
(* Change the alphabet. *)
Theorem regular_proj {letter letter' : Set} P Q (pr : letter' -> letter) :
regular P -> (∀w, P (map pr w) <-> Q w) -> regular Q.
Proof.
intros [A size fin spec det cmp ord] H.
pose(B := Automata.proj _ A _ (λ c, [pr c])).
eapply Regular with (r_fsa:=B).
- apply fin.
- intros. rewrite <-H, <-spec.
unfold B; rewrite Automata.proj_spec.
unfold Automata.Image; rewrite map_map_singleton. split.
+ intros [v [H1 H2]]. apply Forall2_In_singleton in H1; congruence.
+ intros Hfw; exists (map pr w); split.
now apply Forall2_In_singleton. easy.
- destruct det.
+ apply Some, Automata.proj_det; [apply d|easy].
+ apply None.
- apply ord.
Defined.
Section Closure_under_logical_operations.
Variable letter : Set.
Variable P Q : list letter -> Prop.
Theorem regular_conjunction :
regular P -> regular Q -> regular (λ w, P w /\ Q w).
Proof.
intros [A sizeA finA specA detA cmpA ordA];
intros [B sizeB finB specB detB cmpB ordB].
eapply Regular with (r_fsa:=Automata.prod _ A B)(r_cmp:=cmp_pair _ _ cmpA cmpB).
- apply Automata.prod_size. apply finA. apply finB.
- intros; rewrite Automata.prod_spec, specA, specB; reflexivity.
- destruct detA, detB. apply Some, Automata.prod_det; easy. all: apply None.
- apply Order_pair; easy.
Defined.
Theorem regular_negation :
regular P -> regular (λ w, ¬P w).
Proof.
intros [A size fin spec [det|] cmp ord].
- (* A is deterministic. *)
eapply Regular with (r_fsa:=Automata.compl _ A).
+ apply fin.
+ intros; etransitivity.
apply Automata.compl_spec, det.
split; apply contra, spec.
+ apply Some, det.
+ apply ord.
- (* A is not deterministic; use the powerset construction. *)
pose(leb := cmp_leb _ cmp);
assert(Hleb := Linear_order_cmp_leb _ cmp ord);
assert(dec := cmp_dec cmp ord).
eapply Regular with (r_fsa:=Automata.compl _ (Automata.pow _ A leb Hleb)).
+ apply Automata.pow_size. apply dec. apply fin.
+ intros; etransitivity.
apply Automata.compl_spec, Automata.pow_det.
rewrite Automata.pow_spec.
split; apply contra, spec.
+ apply Some, Automata.pow_det.
+ eapply Order_sig. apply Order_list, ord.
apply Automata.pow_state_pir, dec.
Defined.
End Closure_under_logical_operations.
|
{"author": "bergwerf", "repo": "linear_integer_arithmetic", "sha": "123b0b02accfbbc3407033b43d74fac5288bf073", "save_path": "github-repos/coq/bergwerf-linear_integer_arithmetic", "path": "github-repos/coq/bergwerf-linear_integer_arithmetic/linear_integer_arithmetic-123b0b02accfbbc3407033b43d74fac5288bf073/D2_regular.v"}
|
from keras.applications.resnet50 import ResNet50, preprocess_input as res_preprocess_input
from keras.preprocessing import image
import cv2
from keras.applications.inception_v3 import InceptionV3, preprocess_input as incep_preprocess_input
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dense
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
# define ResNet50 model
ResNet50_model = ResNet50(weights='imagenet')
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def ResNet50_predict_labels(img_path):
# returns prediction vector for image located at img_path
img = res_preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
bottleneck_features = np.load('bottleneck_features/DogInceptionV3Data.npz')
train_inception = bottleneck_features['train']
inception_model = Sequential()
inception_model.add(GlobalAveragePooling2D(input_shape=train_inception.shape[1:]))
inception_model.add(Dense(64, activation='relu'))
inception_model.add(Dense(133, activation='softmax'))
inception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
inception_model.load_weights('saved_models/weights.best.inceptionV3.hdf5')
# extract bottleneck features
def incept_predict_breed(img_path):
# from keras.applications.inception_v3 import InceptionV3, preprocess_input
bottleneck_feature = InceptionV3(weights='imagenet', include_top=False).predict(incep_preprocess_input(path_to_tensor(img_path)))
predicted_vector = inception_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
def dog_or_human(img_path):
if face_detector(img_path):
print('You look like a human')
elif dog_detector(img_path):
print('You look like a dog!')
breed=incept_predict_breed(img_path)
print('Specifically, you look like a',breed.replace('_',' '))
else:
print('I don\'t know what you are...')
plt.imshow(mpimg.imread(img_path))
|
{"hexsha": "2571dcfe59327cd9e91a18630f4b06d156216b7c", "size": 2999, "ext": "py", "lang": "Python", "max_stars_repo_path": "face_dog.py", "max_stars_repo_name": "mccormd1/Dog_Breed_CNN", "max_stars_repo_head_hexsha": "f915dc555075140ebe83ba38e737e5d8ac2a3e6a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "face_dog.py", "max_issues_repo_name": "mccormd1/Dog_Breed_CNN", "max_issues_repo_head_hexsha": "f915dc555075140ebe83ba38e737e5d8ac2a3e6a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "face_dog.py", "max_forks_repo_name": "mccormd1/Dog_Breed_CNN", "max_forks_repo_head_hexsha": "f915dc555075140ebe83ba38e737e5d8ac2a3e6a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4487179487, "max_line_length": 133, "alphanum_fraction": 0.7539179727, "include": true, "reason": "import numpy", "num_tokens": 718}
|
import networkx as nx
# Main
G = nx.DiGraph()
G.add_edges_from([("IDLE", "Ruch do ladunku"),
("Ruch do ladunku", "Zgloszenie problemu"),
("Ruch do ladunku", "Zaladowanie ladunku"),
("Zgloszenie problemu", "IDLE"),
("Zaladowanie ladunku", "Ruch do magazynu"),
("Ruch do magazynu", "Odlozenie ladunku"),
("Ruch do magazynu", "Czekanie na zwolnienie miejsca"),
("Odlozenie ladunku", "IDLE"),
("Czekanie na zwolnienie miejsca", "Odlozenie ladunku")])
edge_labels = {("IDLE", "Ruch do ladunku"): "Nowy ladunek",
("Ruch do ladunku", "Zgloszenie problemu"): "Nie wykryto ladunku",
("Ruch do ladunku", "Zaladowanie ladunku"): "Wykrycie ladunku",
("Zgloszenie problemu", "IDLE"): "Anulowanie zadania",
("Zaladowanie ladunku", "Ruch do magazynu"): "Otrzymanie punktu skladowania",
("Ruch do magazynu", "Odlozenie ladunku"): "Wykrycie wolnego miejsca",
("Ruch do magazynu", "Czekanie na zwolnienie miejsca"): "Brak wolnego miejsca",
("Odlozenie ladunku", "IDLE"): "Potwierdzenie wykonania zadania",
("Czekanie na zwolnienie miejsca", "Odlozenie ladunku"): "Wykrycie wolnego miejsca"}
pos = nx.planar_layout(G)
# Navigation
G_nav = nx.MultiDiGraph()
G_nav.add_edges_from([("Czekanie na nowy ladunek", "Planowanie trasy i ruch"),
("Planowanie trasy i ruch", "Czekanie na nowy ladunek"),
("Planowanie trasy i ruch", "Zatrzymanie robota"),
("Zatrzymanie robota", "Planowanie trasy i ruch"),
("Zatrzymanie robota", "Czekanie na nowy ladunek")])
edge_labels_nav = [{("Czekanie na nowy ladunek", "Planowanie trasy i ruch"): "Nowy ladunek",
("Zatrzymanie robota", "Planowanie trasy i ruch"): "Brak kolizji"},
{("Planowanie trasy i ruch", "Zatrzymanie robota"): "Wykrycie kolizji",
("Planowanie trasy i ruch", "Czekanie na nowy ladunek"): "Wykonanie ruchu",
("Zatrzymanie robota", "Czekanie na nowy ladunek"): "Przerwanie zadania"}]
pos_nav = nx.planar_layout(G_nav)
|
{"hexsha": "91f03e7c47a414d4054c6d1511a753899bd289f8", "size": 2287, "ext": "py", "lang": "Python", "max_stars_repo_path": "agv/scripts/graphs.py", "max_stars_repo_name": "MicWeg/TS_proj", "max_stars_repo_head_hexsha": "70534063f454762b691799829c3c958cef087cf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "agv/scripts/graphs.py", "max_issues_repo_name": "MicWeg/TS_proj", "max_issues_repo_head_hexsha": "70534063f454762b691799829c3c958cef087cf5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-31T12:20:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-31T12:20:58.000Z", "max_forks_repo_path": "agv/scripts/graphs.py", "max_forks_repo_name": "MicWeg/TS_proj", "max_forks_repo_head_hexsha": "70534063f454762b691799829c3c958cef087cf5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1860465116, "max_line_length": 99, "alphanum_fraction": 0.5837341495, "include": true, "reason": "import networkx", "num_tokens": 700}
|
module modC
use modA
use modB, only : foo, bar
end module modC
|
{"hexsha": "3aaeda66a8ca0c9a7073aef7499156ddcf4ed92b", "size": 67, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/test_fortranfile/moduleC.f90", "max_stars_repo_name": "ZedThree/fort_depend.py", "max_stars_repo_head_hexsha": "ea2caf0010765f00de142d168b05665499ffe1ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2015-07-28T16:14:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T23:12:57.000Z", "max_issues_repo_path": "tests/test_fortranfile/moduleC.f90", "max_issues_repo_name": "ZedThree/fort_depend.py", "max_issues_repo_head_hexsha": "ea2caf0010765f00de142d168b05665499ffe1ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2015-01-04T20:31:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T08:55:47.000Z", "max_forks_repo_path": "tests/test_fortranproject/moduleC.f90", "max_forks_repo_name": "ZedThree/fort_depend.py", "max_forks_repo_head_hexsha": "ea2caf0010765f00de142d168b05665499ffe1ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2015-01-07T14:49:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T22:54:15.000Z", "avg_line_length": 13.4, "max_line_length": 27, "alphanum_fraction": 0.7014925373, "num_tokens": 25}
|
\chapter{Evaluation and Discussion}
\label{chap:eval}
\ldots
|
{"hexsha": "14541cda38ce7db4c8fba56d35dc81cbbe2b95db", "size": 63, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "TeX/chapters/chapter6.tex", "max_stars_repo_name": "Kirillfedoseev/dod-gamedev", "max_stars_repo_head_hexsha": "4192447627e204aebea57898540acd7ccf37b756", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TeX/chapters/chapter6.tex", "max_issues_repo_name": "Kirillfedoseev/dod-gamedev", "max_issues_repo_head_hexsha": "4192447627e204aebea57898540acd7ccf37b756", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TeX/chapters/chapter6.tex", "max_forks_repo_name": "Kirillfedoseev/dod-gamedev", "max_forks_repo_head_hexsha": "4192447627e204aebea57898540acd7ccf37b756", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.5, "max_line_length": 35, "alphanum_fraction": 0.7619047619, "num_tokens": 18}
|
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.algorithms import bipartite
from networkx.testing.utils import assert_edges_equal
class TestBiadjacencyMatrix:
@classmethod
def setupClass(cls):
global np, sp, sparse, np_assert_equal
try:
import numpy as np
import scipy as sp
import scipy.sparse as sparse
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('SciPy sparse library not available.')
def test_biadjacency_matrix_weight(self):
G=nx.path_graph(5)
G.add_edge(0,1,weight=2,other=4)
X=[1,3]
Y=[0,2,4]
M = bipartite.biadjacency_matrix(G,X,weight='weight')
assert_equal(M[0,0], 2)
M = bipartite.biadjacency_matrix(G, X, weight='other')
assert_equal(M[0,0], 4)
def test_biadjacency_matrix(self):
tops = [2,5,10]
bots = [5,10,15]
for i in range(len(tops)):
G = bipartite.random_graph(tops[i], bots[i], 0.2)
top = [n for n,d in G.nodes(data=True) if d['bipartite']==0]
M = bipartite.biadjacency_matrix(G, top)
assert_equal(M.shape[0],tops[i])
assert_equal(M.shape[1],bots[i])
def test_biadjacency_matrix_order(self):
G=nx.path_graph(5)
G.add_edge(0,1,weight=2)
X=[3,1]
Y=[4,2,0]
M = bipartite.biadjacency_matrix(G,X,Y,weight='weight')
assert_equal(M[1,2], 2)
@raises(nx.NetworkXError)
def test_null_graph(self):
bipartite.biadjacency_matrix(nx.Graph(),[])
@raises(nx.NetworkXError)
def test_empty_graph(self):
bipartite.biadjacency_matrix(nx.Graph([(1,0)]),[])
@raises(nx.NetworkXError)
def test_duplicate_row(self):
bipartite.biadjacency_matrix(nx.Graph([(1,0)]),[1,1])
@raises(nx.NetworkXError)
def test_duplicate_col(self):
bipartite.biadjacency_matrix(nx.Graph([(1,0)]),[0],[1,1])
@raises(nx.NetworkXError)
def test_duplicate_col(self):
bipartite.biadjacency_matrix(nx.Graph([(1,0)]),[0],[1,1])
@raises(nx.NetworkXError)
def test_format_keyword(self):
bipartite.biadjacency_matrix(nx.Graph([(1,0)]),[0],format='foo')
def test_from_biadjacency_roundtrip(self):
B1 = nx.path_graph(5)
M = bipartite.biadjacency_matrix(B1, [0,2,4])
B2 = bipartite.from_biadjacency_matrix(M)
assert_true(nx.is_isomorphic(B1,B2))
def test_from_biadjacency_weight(self):
M = sparse.csc_matrix([[1,2],[0,3]])
B = bipartite.from_biadjacency_matrix(M)
assert_edges_equal(B.edges(),[(0,2),(0,3),(1,3)])
B = bipartite.from_biadjacency_matrix(M, edge_attribute='weight')
e = [(0,2,{'weight':1}),(0,3,{'weight':2}),(1,3,{'weight':3})]
assert_edges_equal(B.edges(data=True),e)
def test_from_biadjacency_multigraph(self):
M = sparse.csc_matrix([[1,2],[0,3]])
B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph())
assert_edges_equal(B.edges(),[(0,2),(0,3),(0,3),(1,3),(1,3),(1,3)])
|
{"hexsha": "6a1ec296813ac09dda8b71a6223eeb087492d744", "size": 3181, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/networkx/algorithms/bipartite/tests/test_matrix.py", "max_stars_repo_name": "MarletteFunding/aws-kube-codesuite", "max_stars_repo_head_hexsha": "ab4e5ce45416b83bffb947ab8d234df5437f4fca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 184, "max_stars_repo_stars_event_min_datetime": "2017-12-20T21:50:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T13:24:58.000Z", "max_issues_repo_path": "src/networkx/algorithms/bipartite/tests/test_matrix.py", "max_issues_repo_name": "MarletteFunding/aws-kube-codesuite", "max_issues_repo_head_hexsha": "ab4e5ce45416b83bffb947ab8d234df5437f4fca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:07:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:12:27.000Z", "max_forks_repo_path": "src/networkx/algorithms/bipartite/tests/test_matrix.py", "max_forks_repo_name": "MarletteFunding/aws-kube-codesuite", "max_forks_repo_head_hexsha": "ab4e5ce45416b83bffb947ab8d234df5437f4fca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 136, "max_forks_repo_forks_event_min_datetime": "2018-01-09T22:52:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T13:26:18.000Z", "avg_line_length": 34.956043956, "max_line_length": 78, "alphanum_fraction": 0.6252750707, "include": true, "reason": "import numpy,import scipy,import networkx,from networkx", "num_tokens": 906}
|
from __future__ import absolute_import, division, print_function
import math
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import random_split
from scipy.stats import linregress
from ray import tune
a = 270.
b = 108.
d = 0.154
gamma = 0.641/1000
tau_s = 100
tau_noise = 2.
Jll = Jrr = 0.2609
Jlr = Jrl = 0.0497
J_ext = 0.00052
I_o = 0.3255
beta = 1
sigma_noise = I_o / 16.275
mu_o = 30
ndt = 0
threshold = 15
dt = 1
def wangwong(coherence, Jii = 0.2609, Jij = 0.0497 beta = 1, ndt=0):
"""
Run a single trial of the Wang & Wong (2006) model.
Parameters
----------
coherence: float - dot movement coherence. Must be between -1 and 1. Negative values for rightward movement.
Jii: float - self-excitatory coupling strength. Default: 0.2609 (Wang & Wong, 2006)
Jij: float - mutual-inhibitory coupling strength. Default: 0.0497 (Wang & Wong, 2006)
beta: float - common background current modulation. Default: 1.
ndt: int - non-decision time in ms. Default: 0.
Returns
-------
time, choice: tuple - reaction time between 0 and 2500ms and choice: 0 if wrong, 1 if correct, -1 if not decided
"""
# Input from stimulus
I_mot_l = J_ext * mu_o * (1 + coherence *1. / 100)
I_mot_r = J_ext * mu_o * (1 - coherence *1. / 100)
sl = random.random() * 0.1
sr = random.random() * 0.1
I_n1 = random.random() * 0.1
I_n2 = random.random() * 0.1
for i in range(2500):
I_l = Jii * sl - Jij*sr + I_mot_l + beta*I_o + I_n1
I_r = Jii * sr - Jij*sl + I_mot_r + beta*I_o + I_n2
r_l = (a*I_l - b)/(1 - np.exp(-d*(a*I_l - b)))
r_r = (a*I_r - b)/(1 - np.exp(-d*(a*I_r - b)))
sl += (-sl*1./ tau_s + (1 - sl) * gamma * r_l)*dt
sr += (-sr*1./ tau_s + (1 - sr) * gamma * r_r)*dt
I_n1 += (- I_n1 + random.random() * math.sqrt(tau_noise) * sigma_noise)*dt / tau_noise
I_n2 += (- I_n2 + random.random() * math.sqrt(tau_noise) * sigma_noise)*dt / tau_noise
sl_hz = sl *1./ ((1 - sl) * gamma * tau_s)
sr_hz = sr *1./ ((1 - sr) * gamma * tau_s)
if threshold - sl_hz < 1e-9 or threshold - sr_hz < 1e-9:
time = i + ndt
choice = sl_hz > sr_hz
break
time=i
choice=-1
return choice, time
def accuracy(outcomes):
"""
Compute accuracy.
Parameters
----------
outcomes: array-like - list of tuples or list of list number of outcomes x (choice, reaction time).
Returns
-------
Accuracy: float - fraction of correct choices over decisions made (non-decisions not counted).
If there are no decisions, returns -1.
"""
correct = np.array([choice[0] == 1 for choice in outcomes])
incorrect = np.array([choice[0] == 0 for choice in outcomes])
decided = np.sum(correct) + np.sum(incorrect)
if decided != 0:
return np.sum(correct)/decided
else:
return -1
def reaction_times(outcomes, agg_func = np.mean, correct = True):
"""
Compute statistics of reaction time. Default: average reaction time.
Parameters
----------
outcomes: array-like - list of tuples or list of list number of outcomes x (choice, reaction time).
agg_func: function - statistic to compute for reaction times. Default: np.mean
correct: bool - statistic to be calculated for correct choices. Default: True.
Returns
-------
statistic: statistic defined by agg_func applied to the reaction times.
"""
return agg_func([outcome[1] for outcome in outcomes if outcome[0] == correct])
class PRNet(nn.Module):
def __init__(self, input_size, l1, l2, l3, output_size):
super(PRNet, self).__init__()
self.input_size = input_size
self.fc1 = nn.Linear(self.input_size, l1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(l1, l2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(l2, l3)
self.relu3 = nn.ReLU()
self.output = nn.Linear(l3, output_size)
def forward(self, x):
output = self.relu1(self.fc1(x))
output = self.relu2(self.fc2(output))
output = self.relu3(self.fc3(output))
output = self.output(output)
return output
def load_data(data_dir, parameter, coherence=all):
features = np.load(str(data_dir / f'features_{parameter}_{coherence}.npy'), allow_pickle = True)
parameters = np.load(str(data_dir / f'parameters_{parameter}_{coherence}.npy'), allow_pickle = True)
n_pars = parameters.shape[1]
features = features[:,:,:]
valid = np.multiply(features[:,:,0] < .999, features[:,:,1] < 0.2)
cond = [all(valid[i]) for i in range(len(valid))]
raw_data = np.concatenate((np.expand_dims(features[cond][:,:,0], 2), features[cond][:,:,2:]), 2).astype(float)
parameters = parameters[cond]
data = np.reshape(raw_data,(raw_data.shape[0], -1))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
X = torch.Tensor(data).to(device).float()
y = torch.Tensor(parameters).to(device).float()
train_size = int(0.8 * len(data))
trainset = TensorDataset(X[:train_size], y[:train_size])
testset = TensorDataset(X[train_size:], y[train_size:])
return trainset, testset
def train(model, loss_function, optimizer, train_data, test_data, num_epochs=25):
for epoch in range(0, num_epochs):
print(f'Starting epoch {epoch+1}')
current_loss = 0.0
for i, (inputs, targets) in enumerate(train_data, 0):
optimizer.zero_grad()
outputs = model(inputs)
train_loss = loss_function(outputs, targets)
train_loss.backward()
optimizer.step()
current_loss += train_loss.item()
model.eval()
with torch.no_grad():
test_outputs = model(test_data[:][0])
test_loss = loss_function(test_outputs, test_data[:][1])
model.train()
print(f'Epoch {epoch}:\n'
f'Train loss: {current_loss:.4f}\n'
f'Test loss: {test_loss:.4f}\n'
f'----------')
print('Training process has finished. Saving the model...')
return model.state_dict()
def train_tune(config, checkpoint_dir = None, data_dir=None):
train_set, test_set = load_data(data_dir, "3_pars", "all")
test_abs = int(len(train_set) * 0.8)
train_subset, val_subset = random_split(train_set, [test_abs, len(train_set) - test_abs])
in_dim = train_set[0][0].shape[0]
out_dim = train_set[0][1].shape[0]
model = PRNet(in_dim, config['l1'], config['l2'], config['l3'], out_dim)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
model = DataParallel(model)
model.to(device)
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=config['lr'], momentum = config['mom'])
if checkpoint_dir:
model_state, optimizer_state = torch.load(checkpoint_dir / 'checkpoint')
net.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
trainloader = DataLoader(train_subset, batch_size=int(config['batch_size']), shuffle=True)
valloader = DataLoader(val_subset, batch_size=int(config['batch_size']), shuffle=True)
for epoch in range(50):
epoch_steps = 0
current_loss = 0.0
for i, (inputs, targets) in enumerate(trainloader, 0):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
train_loss = loss_function(outputs, targets)
train_loss.backward()
optimizer.step()
current_loss += train_loss.item()
epoch_steps += 1
if i % 100 == 99: # print every 100 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1,
current_loss / epoch_steps))
current_loss = 0.0
model.eval()
R = np.zeros(out_dim)
val_loss = 0.0
val_steps = 0
for i, (inputs, targets) in enumerate(valloader, 0):
with torch.no_grad():
inputs, targets = inputs.to(device), targets.to(device)
test_outputs = model(inputs)
loss = loss_function(test_outputs, targets)
for r in range(len(R)):
try:
linreg = linregress(test_outputs.cpu().numpy()[:,r], targets.cpu().numpy()[:,r])
R[r] += linreg.rvalue
except ValueError:
continue
val_loss += loss.detach().cpu().numpy()
val_steps += 1
model.train()
with tune.checkpoint_dir(epoch) as checkpoint_dir:
path = checkpoint_dir + "checkpoint"
torch.save((model.state_dict(), optimizer.state_dict()), path)
if val_steps != 0:
tune.report(loss = val_loss/val_steps,
R2_1 = (R[0]/val_steps)**2,
R2_2 = (R[1]/val_steps)**2,
R2_3 = (R[2]/val_steps)**2
)
else:
tune.report(loss=np.nan, R2_1=np.nan, R2_2=np.nan, R2_3=np.nan)
print("Finished Training")
|
{"hexsha": "87cdde8bbd5615b44ef58e5c244b56b12aea7476", "size": 9688, "ext": "py", "lang": "Python", "max_stars_repo_path": "wwparrecdl/wwparrecdl.py", "max_stars_repo_name": "Emalude/wwparrecdl", "max_stars_repo_head_hexsha": "47b87a277c33b82594bb975be4e2407b9b122e25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wwparrecdl/wwparrecdl.py", "max_issues_repo_name": "Emalude/wwparrecdl", "max_issues_repo_head_hexsha": "47b87a277c33b82594bb975be4e2407b9b122e25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wwparrecdl/wwparrecdl.py", "max_forks_repo_name": "Emalude/wwparrecdl", "max_forks_repo_head_hexsha": "47b87a277c33b82594bb975be4e2407b9b122e25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2332155477, "max_line_length": 116, "alphanum_fraction": 0.5799958712, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2536}
|
import math
import centrosome.outline
import numpy
import numpy.testing
import pytest
import skimage.measure
import skimage.segmentation
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import (
EXPERIMENT,
COLTYPE_FLOAT,
C_LOCATION,
)
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
import plugins.measureobjectintensitymultichannel as momc
IMAGE_NAME = "MyImage"
OBJECT_NAME = "MyObjects"
N_CHANNELS = 4
@pytest.fixture(scope="function")
def image():
return cellprofiler_core.image.Image()
@pytest.fixture(scope="function")
def measurements():
return cellprofiler_core.measurement.Measurements()
@pytest.fixture(scope="function")
def module():
module = momc.MeasureObjectIntensityMultichannel()
module.images_list.value = IMAGE_NAME
module.objects_list.value = OBJECT_NAME
return module
@pytest.fixture(scope="function")
def objects(image):
objects = cellprofiler_core.object.Objects()
objects.parent_image = image
return objects
@pytest.fixture(scope="function")
def workspace(image, measurements, module, objects):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add(IMAGE_NAME, image)
object_set = cellprofiler_core.object.ObjectSet()
object_set.add_objects(objects, OBJECT_NAME)
return cellprofiler_core.workspace.Workspace(
cellprofiler_core.pipeline.Pipeline(),
module,
image_set,
object_set,
measurements,
image_set_list,
)
def test_init():
x = momc.MeasureObjectIntensityMultichannel()
def assert_features_and_columns_match(measurements, module):
object_names = [
x
for x in measurements.get_object_names()
if x
not in (
"Image",
EXPERIMENT,
)
]
features = [
[f for f in measurements.get_feature_names(object_name) if f != "Exit_Status"]
for object_name in object_names
]
columns = module.get_measurement_columns(None)
assert sum([len(f) for f in features]) == len(columns)
for column in columns:
index = object_names.index(column[0])
assert column[1] in features[index]
assert column[2] == COLTYPE_FLOAT
def test_supplied_measurements(module):
"""Test the get_category / get_measurements, get_measurement_images functions"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
expected_categories = tuple(
sorted(
[
momc.INTENSITY,
C_LOCATION,
]
)
)
assert (
tuple(sorted(module.get_categories(None, "MyObjects1"))) == expected_categories
)
assert module.get_categories(None, "Foo") == []
measurements = module.get_measurements(None, "MyObjects1", momc.INTENSITY)
assert len(measurements) == len(momc.ALL_MEASUREMENTS)
measurements = module.get_measurements(None, "MyObjects1", C_LOCATION)
assert len(measurements) == len(momc.ALL_LOCATION_MEASUREMENTS)
assert all([m in momc.ALL_LOCATION_MEASUREMENTS for m in measurements])
assert (
module.get_measurement_images(
None,
"MyObjects1",
momc.INTENSITY,
momc.MAX_INTENSITY,
)
== ["MyImage"]
)
def test_get_measurement_columns(module):
"""test the get_measurement_columns method"""
module.images_list.value = "MyImage"
module.objects_list.value = "MyObjects1, MyObjects2"
module.nchannels.value = N_CHANNELS
columns = module.get_measurement_columns(None)
assert len(columns) == N_CHANNELS * 2 * (
len(momc.ALL_MEASUREMENTS) + len(momc.ALL_LOCATION_MEASUREMENTS)
)
for column in columns:
assert column[0] in ("MyObjects1", "MyObjects2")
assert column[2], COLTYPE_FLOAT
category = column[1].split("_")[0]
assert category in (
momc.INTENSITY,
C_LOCATION,
)
if category == momc.INTENSITY:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_MEASUREMENTS
for c in range(N_CHANNELS)
]
else:
assert column[1][column[1].find("_") + 1 :] in [
m + "_MyImage" + f"_c{c+1}"
for m in momc.ALL_LOCATION_MEASUREMENTS
for c in range(N_CHANNELS)
]
def test_zero(image, measurements, module, objects, workspace):
"""Make sure we can process a blank image"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
objects.segmented = numpy.zeros((10, 10))
module.nchannels.value = N_CHANNELS
module.run(workspace)
for category, features in (
(
momc.INTENSITY,
momc.ALL_MEASUREMENTS,
),
(
C_LOCATION,
momc.ALL_LOCATION_MEASUREMENTS,
),
):
for meas_name in features:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", c + 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 0, (
"Got data for feature %s" % feature_name
)
assert_features_and_columns_match(measurements, module)
def test_masked(image, measurements, module, objects, workspace):
"""Make sure we can process a completely masked image
Regression test of IMG-971
"""
image.pixel_data = numpy.zeros((10, 10, N_CHANNELS))
image.mask = numpy.zeros((10, 10), bool)
objects.segmented = numpy.ones((10, 10), int)
module.nchannels.value = N_CHANNELS
module.run(workspace)
for meas_name in momc.ALL_MEASUREMENTS:
for c in range(N_CHANNELS):
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
meas_name,
"MyImage",
c + 1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert numpy.all(numpy.isnan(data) | (data == 0))
assert_features_and_columns_match(measurements, module)
def test_one(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
data = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
image.pixel_data = data.astype(float)
objects.segmented = data.astype(int)
module.nchannels.value = 1
module.run(workspace)
for category, meas_name, value in (
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY,
9,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.INTEGRATED_INTENSITY_EDGE,
8,
),
(
momc.INTENSITY,
momc.MEAN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.STD_INTENSITY_EDGE,
0,
),
(
momc.INTENSITY,
momc.MIN_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MAX_INTENSITY_EDGE,
1,
),
(
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
0,
),
(
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
1,
),
(
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
1,
),
(
C_LOCATION,
momc.LOC_CMI_X,
3,
),
(
C_LOCATION,
momc.LOC_CMI_Y,
2,
),
):
feature_name = "%s_%s_%s_c%s" % (category, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_one_masked(image, measurements, module, objects, workspace):
"""Check measurements on a 3x3 square of 1's"""
img = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
mask = img > 0
image.pixel_data = img.astype(float)
image.mask = mask
objects.segmented = img.astype(int)
module.run(workspace)
for meas_name, value in (
(momc.INTEGRATED_INTENSITY, 9),
(momc.MEAN_INTENSITY, 1),
(momc.STD_INTENSITY, 0),
(momc.MIN_INTENSITY, 1),
(momc.MAX_INTENSITY, 1),
(momc.INTEGRATED_INTENSITY_EDGE, 8),
(momc.MEAN_INTENSITY_EDGE, 1),
(momc.STD_INTENSITY_EDGE, 0),
(momc.MIN_INTENSITY_EDGE, 1),
(momc.MAX_INTENSITY_EDGE, 1),
(momc.MASS_DISPLACEMENT, 0),
(momc.LOWER_QUARTILE_INTENSITY, 1),
(momc.MEDIAN_INTENSITY, 1),
(momc.MAD_INTENSITY, 0),
(momc.UPPER_QUARTILE_INTENSITY, 1),
):
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, meas_name, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(data.shape) == 1
assert data[0] == value, "%s expected %f != actual %f" % (
meas_name,
value,
data[0],
)
def test_intensity_location(image, measurements, module, objects, workspace):
data = (
numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 2, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
]
).astype(float)
/ 2.0
)
image.pixel_data = data
labels = (data != 0).astype(int)
objects.segmented = labels
module.run(workspace)
for feature, value in (
(momc.LOC_MAX_X, 5),
(momc.LOC_MAX_Y, 2),
):
feature_name = "%s_%s_%s_c%s" % (C_LOCATION, feature, "MyImage", 1)
values = measurements.get_current_measurement(OBJECT_NAME, feature_name)
assert len(values) == 1
assert values[0] == value
def test_mass_displacement(image, measurements, module, objects, workspace):
"""Check the mass displacement of three squares"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_mass_displacement_masked(image, measurements, module, objects, workspace):
"""Regression test IMG-766 - mass displacement of a masked image"""
labels = numpy.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 2, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0],
]
)
data = numpy.zeros(labels.shape, dtype=float)
#
# image # 1 has a single value in one of the corners
# whose distance is sqrt(8) from the center
#
data[1, 1] = 1
# image # 2 has a single value on the top edge
# and should have distance 2
#
data[7, 3] = 1
# image # 3 has a single value on the left edge
# and should have distance 2
data[15, 1] = 1
mask = numpy.zeros(data.shape, bool)
mask[labels > 0] = True
image.pixel_data = data
image.mask = mask
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MASS_DISPLACEMENT,
"MyImage",
1,
)
mass_displacement = measurements.get_current_measurement("MyObjects", feature_name)
assert numpy.product(mass_displacement.shape) == 3
numpy.testing.assert_almost_equal(mass_displacement[0], math.sqrt(8.0))
numpy.testing.assert_almost_equal(mass_displacement[1], 2.0)
numpy.testing.assert_almost_equal(mass_displacement[2], 2.0)
def test_quartiles_uniform(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square filled with uniform values"""
labels = numpy.ones((250, 250), int)
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.50, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.25, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 0.75, 2)
def test_quartiles_one_pixel(image, module, objects, workspace):
"""Regression test a bug that occurs in an image with one pixel"""
labels = numpy.zeros((10, 20))
labels[2:7, 3:8] = 1
labels[5, 15] = 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(10, 20))
image.pixel_data = data
objects.segmented = labels
# Crashes when pipeline runs in measureobjectintensity.py revision 7146
module.run(workspace)
def test_quartiles_four_objects(image, measurements, module, objects, workspace):
"""test quartile values on a 250x250 square with 4 objects"""
labels = numpy.ones((250, 250), int)
labels[125:, :] += 1
labels[:, 125:] += 2
numpy.random.seed(0)
data = numpy.random.uniform(size=(250, 250))
#
# Make the distributions center around .5, .25, 1/6 and .125
#
data /= labels.astype(float)
image.pixel_data = data
objects.segmented = labels
module.run(workspace)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.LOWER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.MEDIAN_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 2.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 6.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 8.0, 2)
feature_name = "%s_%s_%s_c%s" % (
momc.INTENSITY,
momc.UPPER_QUARTILE_INTENSITY,
"MyImage",
1,
)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 3.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 3.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 3.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 3.0 / 16.0, 2)
feature_name = "%s_%s_%s_c%s" % (momc.INTENSITY, momc.MAD_INTENSITY, "MyImage", 1)
data = measurements.get_current_measurement("MyObjects", feature_name)
numpy.testing.assert_almost_equal(data[0], 1.0 / 4.0, 2)
numpy.testing.assert_almost_equal(data[1], 1.0 / 8.0, 2)
numpy.testing.assert_almost_equal(data[2], 1.0 / 12.0, 2)
numpy.testing.assert_almost_equal(data[3], 1.0 / 16.0, 2)
def test_median_intensity_masked(image, measurements, module, objects, workspace):
numpy.random.seed(37)
labels = numpy.ones((10, 10), int)
mask = numpy.ones((10, 10), bool)
mask[:, :5] = False
pixel_data = numpy.random.uniform(size=(10, 10, N_CHANNELS)).astype(numpy.float32)
pixel_data[~mask, :] = 1
image.pixel_data = pixel_data
image.mask = mask
objects.segmented = labels
expected = [
numpy.sort(pixel_data[mask, c])[numpy.sum(mask) // 2] for c in range(N_CHANNELS)
]
module.nchannels.value = N_CHANNELS
module.run(workspace)
assert isinstance(measurements, cellprofiler_core.measurement.Measurements)
for c, exp in enumerate(expected):
values = measurements.get_current_measurement(
OBJECT_NAME,
"_".join((momc.INTENSITY, momc.MEDIAN_INTENSITY, IMAGE_NAME, f"c{c+1}")),
)
assert len(values) == 1
assert exp == values[0]
|
{"hexsha": "252619ad658e63876da84af2ee734d29fc0c1e19", "size": 20306, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_measureobjectintensitymultichannel.py", "max_stars_repo_name": "BodenmillerGroup/ImcPluginsCP", "max_stars_repo_head_hexsha": "a53bb7e1dea60b859d57677ea9a15281fa84d493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-06-04T16:59:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-14T08:20:44.000Z", "max_issues_repo_path": "tests/test_measureobjectintensitymultichannel.py", "max_issues_repo_name": "BodenmillerGroup/ImcPluginsCP", "max_issues_repo_head_hexsha": "a53bb7e1dea60b859d57677ea9a15281fa84d493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2018-02-28T23:20:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-17T15:02:01.000Z", "max_forks_repo_path": "tests/test_measureobjectintensitymultichannel.py", "max_forks_repo_name": "BodenmillerGroup/ImcPluginsCP", "max_forks_repo_head_hexsha": "a53bb7e1dea60b859d57677ea9a15281fa84d493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-11-23T03:01:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T22:40:01.000Z", "avg_line_length": 25.1623296159, "max_line_length": 88, "alphanum_fraction": 0.5704225352, "include": true, "reason": "import numpy", "num_tokens": 5875}
|
module Section10 where
open import Section9 public
-- 10. Conclusions
-- ===============
--
-- We have defined a calculus of proof trees for simply typed λ-calculus with explicit substitutions
-- and we have proved that this calculus is sound and complete with respect to Kripke
-- models. A decision algorithm for convertibility based on the technique of “normalisation by
-- evaluation” has also been proven correct.
--
-- One application of the results for proof trees is the soundness and completeness for a
-- formulation of an implicitly-typed λ-calculus with explicit substitutions.
--
-- An important aspect of this work is that it has been carried out on a machine; actually,
-- the problem was partly chosen because it seemed possible to do the formalization in a nice
-- way using ALF. It is often emphasised that a proof is machine checked, but this is the very
-- minimum one can ask of a proof system. Another important aspect is that the system helps
-- you to develop your proof, and I feel that ALF is on the right way: this work was not done
-- first by pen and paper and then typed into the machine, but was from the very beginning
-- carried out in interaction with the system.
--
-- NOTE: The above paragraphs apply to Agda, as well.
--
--
-- Appendix
-- --------
--
-- (…)
--
--
-- Acknowledgement
-- ---------------
--
-- The author wants to thank the editor Olivier Danvy for having had the patience with delays
-- and still being supportive. She is also grateful to Bernd Grobauer for help with improving
-- the presentation. The comments of the anonymous referees have also been very valuable.
--
--
-- References
-- ----------
--
-- 1. Abadi, M., Cardelli, L., Curien, P.-L., and Lévy, J.-J.
-- Explicit substitutions.
-- Journal of Functional Programming, 1(4) (1991) 375–416.
--
-- 2. Berger, U.
-- Program extraction from normalization proofs.
-- In Proceedings of TLCA’93, LNCS, Vol. 664, Springer Verlag, Berlin, 1993, pp. 91–106.
--
-- 3. Berger, U. and Schwichtenberg, H.
-- An inverse of the evaluation functional for typed λ-calculus.
-- In Proceedings of the 6th Annual IEEE Symposium on Logic in Comp. Sci., Amsterdam, 1991, pp. 203–211.
--
-- 4. Coquand, Th.
-- Pattern matching with dependent types.
-- In Proceedings of the 1992 Workshop on Types for Proofs and Programs, B. Nordström, K. Petersson, and
-- G. Plotkin (Eds.). Dept. of Comp. Sci. Chalmers Univ. of Technology and Göteborg Univ.
-- Available at (…), pp. 66–79.
--
-- 5. Coquand, Th. and Dybjer, P.
-- Intuitionistic model constructions and normalisation proofs.
-- Math. Structures Comp. Sci., 7(1) (1997) 75–94.
--
-- 6. Coquand, Th. and Gallier, J.
-- A proof of strong normalization for the theory of constructions using a Kripke-like interpretation.
-- In Proceedings of the First Workshop in Logical Frameworks, G. Huet and G. Plotkin (Eds.).
-- Available at (…), pp. 479–497.
--
-- 7. Coquand, Th., Nordström, B., Smith, J., and von Sydow, B.
-- Type theory and programming.
-- EATCS, 52 (1994) 203–228.
--
-- 8. Curien, P.-L.
-- An abstract framework for environment machines.
-- Theoretical Comp. Sci., 82 (1991) 389–402.
--
-- 9. Friedman, H.
-- Equality between functionals.
-- In Logic Colloquium, Symposium on Logic, held at Boston, 1972–1973, LNCS, Vol. 453,
-- Springer-Verlag, Berlin, 1975, pp. 22–37.
--
-- 10. Gandy, R.O.
-- On the axiom of extensionality—Part I.
-- The Journal of symbolic logic, 21 (1956) 36–48.
--
-- 11. Kripke S.A.
-- Semantical analysis of intuitionistic logic I.
-- In Formal Systems and Recursive Functions, J.N. Crossley and M.A.E. Dummet (Eds.).
-- North-Holland, Amsterdam, 1965, pp. 92–130.
--
-- 12. Magnusson, L. and Nordström, B.
-- The ALF proof editor and its proof engine.
-- In Types for Proofs and Programs, LNCS, Vol. 806, Springer-Verlag, Berlin, 1994, pp. 213–237.
--
-- 13. Martin-Löf, P.
-- Substitution calculus.
-- Handwritten notes, Göteborg, 1992.
--
-- 14. Mitchell, J.C.
-- Type systems for programming languages.
-- In Handbook of Theoretical Comp. Sci., Volume B: Formal Models and Semantics, J. van Leeuwen (Ed.).
-- Elsevier and MIT Press, Amsterdam, 1990, pp. 365–458.
--
-- 15. Mitchell, J.C. and Moggi, E.
-- Kripke-style models for typed lambda calculus.
-- Annals for Pure and Applied Logic, 51 (1991) 99–124.
--
-- 16. Nordström, B., Petersson, K., and Smith, J.
-- Programming in Martin-Löf’s Type THeory. An Introduction.
-- Oxford University Press, Oxford, UK, 1990.
--
-- 17. Scott, D.S.
-- Relating theories lambda calculus.
-- In To H.B. Curry: Essays on Combinatory Logic, Lambda Calculus, and Formalism.
-- Academic Press, New York, 1980, pp. 403–450.
--
-- 18. Statman, R.
-- Logical relation and the typed λ-calculus.
-- Information and Control, 65 (1985) 85–97.
--
-- 19. Streicher, T.
-- Semantics of Type Theory.
-- Birkhäuser, Basel, 1991.
--
-- 20. Tasistro, A.
-- Formulation of Martin-Löf’s theory of types with explicit substitutions.
-- Licentiate thesis, Dept. of Comp. Sci. Chalmers Univ. of Technology and Göteborg Univ., 1993.
|
{"hexsha": "ee8bb292d1a7eae4a9ba7689e60c596c9931c544", "size": 5237, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Section10.agda", "max_stars_repo_name": "mietek/coquand", "max_stars_repo_head_hexsha": "7c000654c4f97024d2939c412702f64dc821d4ec", "max_stars_repo_licenses": ["X11"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-03-27T01:29:58.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-07T12:44:40.000Z", "max_issues_repo_path": "src/Section10.agda", "max_issues_repo_name": "mietek/coquand", "max_issues_repo_head_hexsha": "7c000654c4f97024d2939c412702f64dc821d4ec", "max_issues_repo_licenses": ["X11"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Section10.agda", "max_forks_repo_name": "mietek/coquand", "max_forks_repo_head_hexsha": "7c000654c4f97024d2939c412702f64dc821d4ec", "max_forks_repo_licenses": ["X11"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9770992366, "max_line_length": 108, "alphanum_fraction": 0.6761504678, "num_tokens": 1494}
|
//
// MIT License
//
// © ESI Group, 2015
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
//
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
//
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
#include "inendi/PVRangeSubSampler.h"
#include <pvcop/db/array.h>
#include <pvcop/db/algo.h>
#include <pvcop/types/datetime_us.h>
#include <pvkernel/core/inendi_bench.h> // for BENCH_END, BENCH_START
#include <inendi/PVPlotted.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <numeric>
#include <math.h>
Inendi::PVRangeSubSampler::PVRangeSubSampler(
const pvcop::db::array& time,
const std::vector<pvcop::core::array<value_type>>& timeseries,
const PVRush::PVNraw& nraw,
const pvcop::db::selection& sel,
const pvcop::db::array* split /* = nullptr */,
size_t sampling_count /*= 2048*/)
: _original_time(time)
, _time(std::cref(time))
, _timeseries(timeseries)
, _nraw(nraw)
, _sel(sel)
, _split(split)
, _minmax()
{
set_sampling_count(
sampling_count); // should be the number of horizontal visible pixels in the plot
BENCH_START(sort);
if (not _time.get().is_sorted()) { // FIXME
_sorted_indexes = time.parallel_sort();
_sort = _sorted_indexes.to_core_array();
}
BENCH_END(sort, "sort", _time.get().size(), sizeof(uint64_t), _time.get().size(),
sizeof(uint64_t));
_minmax = pvcop::db::algo::minmax(_time);
_last_params = SamplingParams(0, 0, _minmax, 0, 0);
set_sampling_mode<SAMPLING_MODE::MEAN>();
allocate_internal_structures();
}
void Inendi::PVRangeSubSampler::set_sampling_count(size_t sampling_count)
{
_sampling_count = sampling_count;
_reset = true;
}
pvcop::db::array Inendi::PVRangeSubSampler::ratio_to_minmax(zoom_f ratio1, zoom_f ratio2) const
{
return _time.get().ratio_to_minmax(ratio1, ratio2, _minmax);
}
std::pair<Inendi::PVRangeSubSampler::zoom_f, Inendi::PVRangeSubSampler::zoom_f>
Inendi::PVRangeSubSampler::minmax_to_ratio(const pvcop::db::array& minmax) const
{
return _time.get().minmax_to_ratio(minmax, _minmax);
}
void Inendi::PVRangeSubSampler::subsample(zoom_f first_ratio,
zoom_f last_ratio,
zoom_f min_ratio /*= 0*/,
zoom_f max_ratio /*= 0*/)
{
const value_type min = min_ratio * std::numeric_limits<value_type>::max();
const value_type max = max_ratio * std::numeric_limits<value_type>::max();
subsample(ratio_to_minmax(first_ratio, last_ratio), min, max);
}
void Inendi::PVRangeSubSampler::subsample(const pvcop::db::array& minmax,
uint32_t min /*= 0*/,
uint32_t max /*= 0*/)
{
auto [first, past_end] = _time.get().equal_range(minmax, _sorted_indexes);
subsample(first, past_end - 1, minmax, min, max);
}
void Inendi::PVRangeSubSampler::subsample(size_t first,
size_t last,
const pvcop::db::array& minmax,
uint32_t min /*= 0*/,
uint32_t max /*= 0*/)
{
if (last == 0) {
last = _time.get().size() - 1;
}
if (max == 0) {
max = std::numeric_limits<uint32_t>::max();
}
// Resample every selected timeseries if params have changed
if (SamplingParams(first, last, minmax.copy(), min, max) != _last_params) {
_timeseries_to_subsample =
std::vector<size_t>(_selected_timeseries.begin(), _selected_timeseries.end());
}
_last_params = SamplingParams(first, last, minmax.copy(), min, max);
#ifdef INENDI_DEVELOPER_MODE
pvlogger::info() << "PVRangeSubSampler::subsample(first:" << first << ", last:" << last
<< ",minmax: " << minmax.at(0) << " .. " << minmax.at(1) << ", min:" << min
<< ", max:" << max << ", _sampling_count:" << _sampling_count
<< ", _reset:" << _reset
<< ", _timeseries_to_subsample.size():" << _timeseries_to_subsample.size()
<< ")\n";
#endif
if (_reset) {
allocate_internal_structures();
_reset = false;
}
_time.get().histogram(first, last, minmax, _sorted_indexes, _histogram);
_compute_ranges_reduction_f(first, last, min, max);
_subsampled.emit();
_valid = true;
}
void Inendi::PVRangeSubSampler::set_selected_timeseries(
const std::unordered_set<size_t>& selected_timeseries)
{
_timeseries_to_subsample.clear();
std::copy_if(selected_timeseries.begin(), selected_timeseries.end(),
std::back_inserter(_timeseries_to_subsample), [this](size_t index) {
return _selected_timeseries.find(index) == _selected_timeseries.end();
});
_selected_timeseries = selected_timeseries;
}
void Inendi::PVRangeSubSampler::resubsample()
{
subsample(_last_params.first, _last_params.last, _last_params.minmax, _last_params.min,
_last_params.max);
_timeseries_to_subsample =
std::vector<size_t>(_selected_timeseries.begin(), _selected_timeseries.end());
}
void Inendi::PVRangeSubSampler::resubsample(const std::unordered_set<size_t>& timeseries)
{
_timeseries_to_subsample.clear();
std::copy(timeseries.begin(), timeseries.end(), std::back_inserter(_timeseries_to_subsample));
subsample(_last_params.first, _last_params.last, _last_params.minmax, _last_params.min,
_last_params.max);
}
void Inendi::PVRangeSubSampler::set_split_column(const pvcop::db::array* split)
{
_split_count = 1;
_split = split;
if (_split) {
_split_groups.~groups();
new (&_split_groups) pvcop::db::groups();
_split_extents.~extents();
new (&_split_extents) pvcop::db::extents();
_split->group(_split_groups, _split_extents);
_split_count = _split_extents.size();
const pvcop::db::array& min_times = _time.get().group_min(_split_groups, _split_extents);
_shifted_time = _time.get().subtract(min_times, _split_groups);
_time = std::cref(_shifted_time);
} else {
_time = std::cref(_original_time);
}
_minmax = std::move(_time.get().minmax());
_last_params = SamplingParams(0, 0, _minmax, 0, 0);
_sorted_indexes = _time.get().parallel_sort();
_sort = _sorted_indexes.to_core_array();
_ts_matrix.resize(_timeseries.size() * _split_count);
for (auto& vec : _ts_matrix) {
vec.resize(_histogram.size());
}
}
void Inendi::PVRangeSubSampler::allocate_internal_structures()
{
assert(_sampling_count >= 2);
// range values count
_histogram = std::vector<size_t>(_sampling_count);
assert(_timeseries.size() > 0);
assert(_histogram.size() > 0);
// matrix of average values
set_split_column(_split);
_timeseries_to_subsample.clear();
std::copy(_selected_timeseries.begin(), _selected_timeseries.end(),
std::back_inserter(_timeseries_to_subsample));
}
bool Inendi::PVRangeSubSampler::valid() const
{
return _valid;
}
|
{"hexsha": "44969f44cb41a54e28844093d63496b737a0b32d", "size": 7803, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libinendi/src/PVRangeSubSampler.cpp", "max_stars_repo_name": "inendi-inspector/inspector", "max_stars_repo_head_hexsha": "9b9a00222d8a73cb0817ca56790ee9155db61cc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libinendi/src/PVRangeSubSampler.cpp", "max_issues_repo_name": "inendi-inspector/inspector", "max_issues_repo_head_hexsha": "9b9a00222d8a73cb0817ca56790ee9155db61cc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libinendi/src/PVRangeSubSampler.cpp", "max_forks_repo_name": "inendi-inspector/inspector", "max_forks_repo_head_hexsha": "9b9a00222d8a73cb0817ca56790ee9155db61cc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3461538462, "max_line_length": 95, "alphanum_fraction": 0.678200692, "num_tokens": 2006}
|
import numpy as np
from nayzakflow.utils import _onehot
def sigmoid(z):
return (1/(1+np.exp(-1*z)))
def _diff_sigmoid(z):
return sigmoid(z)*(1-sigmoid(z))
def tanh(z):
return np.tanh(z)
def _diff_tanh(z):
return 1-np.square(tanh(z))
def relu(z):
return np.maximum(0,z)
def _diff_relu(z):
a= np.zeros_like(z,dtype='int')
a[z>0] = 1
return a
def leaky_relu(z):
return np.maximum(z,0.1*z)
def _diff_leaky_relu(z):
a= np.zeros_like(z,dtype='int')+0.1
a[z>0] = 1
return a
def identity(z):
return z
def _diff_identity(z):
return 1
def softmax(z):
exp = np.exp(z)
tot= exp.sum(axis=0)
t= exp/tot
return t
def _diff_softmax(z,y):
yhat_r = softmax(z)
onehotY = _onehot(y,z.shape[0])
one_yi = onehotY *-1*(1-yhat_r)
z=(1-onehotY)*yhat_r
return one_yi +z
def get_activations():
return {"identity":identity, "relu":relu, "softmax":softmax,"tanh":tanh, "sigmoid":sigmoid, "leaky_relu":leaky_relu}
def get_activations_diff():
return {"identity":_diff_identity, "relu":_diff_relu, "softmax":_diff_softmax,"tanh":_diff_tanh, "sigmoid":_diff_sigmoid, "leaky_relu":_diff_leaky_relu}
|
{"hexsha": "310ad38cdbe7d3c09f484abd69024541aaa8edff", "size": 1185, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/nayzakflow/nn/activation.py", "max_stars_repo_name": "Ahmed-Salah6011/NayzakFlow", "max_stars_repo_head_hexsha": "1fd6bf442a03524d9995e8c77f93c324713c1a35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build/lib/nayzakflow/nn/activation.py", "max_issues_repo_name": "Ahmed-Salah6011/NayzakFlow", "max_issues_repo_head_hexsha": "1fd6bf442a03524d9995e8c77f93c324713c1a35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/nayzakflow/nn/activation.py", "max_forks_repo_name": "Ahmed-Salah6011/NayzakFlow", "max_forks_repo_head_hexsha": "1fd6bf442a03524d9995e8c77f93c324713c1a35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1129032258, "max_line_length": 156, "alphanum_fraction": 0.6514767932, "include": true, "reason": "import numpy", "num_tokens": 394}
|
[STATEMENT]
lemma strip_bot_acom[simp]: "strip(\<bottom>\<^sub>c c) = c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. strip (\<bottom>\<^sub>c c) = c
[PROOF STEP]
by(simp add: bot_acom_def)
|
{"llama_tokens": 85, "file": "Abs_Int_ITP2012_Abs_Int0", "length": 1}
|
[STATEMENT]
lemma iso_botf: "mono \<bottom>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mono \<bottom>
[PROOF STEP]
by (simp add: monoI)
|
{"llama_tokens": 65, "file": "Transformer_Semantics_Isotone_Transformers", "length": 1}
|
import logging
import datetime
import time
import ray
import cupy
from ray.util.collective.collective_group import nccl_util
from ray.util.collective.collective_group.base_collective_group \
import BaseGroup
from ray.util.collective.types import AllReduceOptions, \
BarrierOptions, Backend, ReduceOptions, BroadcastOptions, \
AllGatherOptions, ReduceScatterOptions
from ray.util.collective.const import get_nccl_store_name
logger = logging.getLogger(__name__)
# TODO(Hao):
# (1) stream management, instead of using the default stream,
# using a dedicate stream
# (2) communicator management and support num_gpus > 2 per actor.
class Rendezvous:
"""A rendezvous class for different actor/task processes to meet.
To initialize an NCCL collective communication group, different
actors/tasks spawned in Ray in a collective group needs to meet
each other to synchronize the NCCLUniqueID. This class guarantees
they meet via the NCCLUniqueIDStore, initialized on the rank=0
process.
Args:
group_name (str): the unique user-specified group name.
"""
def __init__(self, group_name):
if not group_name:
raise ValueError("Invalid group name.")
self._group_name = group_name
self._store_name = None
self._store = None
def meet(self, timeout_s=180):
"""Meet at the named actor store.
Args:
timeout_s: timeout in seconds.
Return:
None
"""
if timeout_s <= 0:
raise ValueError("The 'timeout' argument must be positive. "
"Got '{}'.".format(timeout_s))
self._store_name = get_nccl_store_name(self._group_name)
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
try:
logger.debug("Trying to meet at the store '{}'".format(
self._store_name))
self._store = ray.get_actor(self._store_name)
except ValueError:
logger.debug("Failed to meet at the store '{}'."
"Trying again...".format(self._store_name))
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
logger.debug("Successful rendezvous!")
break
if not self._store:
raise RuntimeError("Unable to meet other processes "
"at the rendezvous store.")
@property
def store(self):
return self._store
def get_nccl_id(self, timeout_s=180):
"""Get the NCCLUniqueID from the store through Ray.
Args:
timeout_s: timeout in seconds.
Return:
str: the NCCLUniqueID if successful.
"""
if not self._store:
raise ValueError("Rendezvous store is not setup.")
uid = None
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
uid = ray.get(self._store.get_id.remote())
if not uid:
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
break
if not uid:
raise RuntimeError(
"Unable to get the NCCLUniqueID from the store.")
return uid
class NCCLGroup(BaseGroup):
def __init__(self, world_size, rank, group_name):
"""Init an NCCL collective group."""
super(NCCLGroup, self).__init__(world_size, rank, group_name)
self._nccl_uid = None
# TODO(Hao): change this to a be a cache
self._nccl_comm = None
if nccl_util.get_nccl_build_version() < 2000:
raise RuntimeError("NCCL in Ray requires NCCL >= 2.0.")
# TODO(Hao): check version here
if nccl_util.get_nccl_runtime_version() < 2704:
logger.warning("NCCL send/recv calls requires NCCL>=2.7.4")
self._rendezvous = Rendezvous(self.group_name)
self._rendezvous.meet()
# Setup the nccl uid using the store
self._init_nccl_unique_id()
# Setup a tensor for barrier calls
self._barrier_tensor = cupy.array([1])
def _init_nccl_unique_id(self):
"""Init the NCCLUniqueID required for creating NCCL communicators."""
self._nccl_uid = self._rendezvous.get_nccl_id()
@property
def nccl_uid(self):
return self._nccl_uid
def destroy_group(self):
"""Destroy the group and release the NCCL communicators safely."""
if self._nccl_comm is not None:
self.barrier()
# We also need a barrier call here.
stream = self._get_cuda_stream()
stream.synchronize()
# destroy the communicator
self._nccl_comm.destroy()
self._nccl_comm = None
super(NCCLGroup, self).destroy_group()
@classmethod
def backend(cls):
return Backend.NCCL
def allreduce(self, tensor, allreduce_options=AllReduceOptions()):
"""AllReduce the tensor across the collective group following options.
Args:
tensor: the tensor to be reduced, each tensor locates on a GPU
allreduce_options:
Returns:
"""
# obtain the communicator
comm = self._get_nccl_communicator()
# obtain the stream: using default stream by now
# TODO(Hao): implement a simple stream manager here
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(allreduce_options.reduceOp)
# in-place allreduce
comm.allReduce(ptr, ptr, n_elems, dtype, reduce_op, stream.ptr)
def barrier(self, barrier_options=BarrierOptions()):
"""Blocks until all processes reach this barrier.
Args:
barrier_options:
Returns:
"""
self.allreduce(self._barrier_tensor)
def reduce(self, tensor, reduce_options=ReduceOptions()):
"""Reduce tensor to a destination process following options.
Args:
tensor: the tensor to be reduced.
reduce_options: reduce options
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(reduce_options.reduceOp)
# in-place reduce
comm.reduce(ptr, ptr, n_elems, dtype, reduce_op,
reduce_options.root_rank, stream.ptr)
def broadcast(self, tensor, broadcast_options=BroadcastOptions()):
"""Broadcast tensor to all other processes following options.
Args:
tensor: the tensor to be broadcasted.
broadcast_options: broadcast options.
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
# in-place broadcast
comm.broadcast(ptr, ptr, n_elems, dtype, broadcast_options.root_rank,
stream.ptr)
def allgather(self,
tensor_list,
tensor,
allgather_options=AllGatherOptions()):
"""Allgather tensors across the group into a list of tensors.
Args:
tensor_list: the tensor list to store the results.
tensor: the tensor to be allgather-ed across the group.
allgather_options: allgather options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
send_ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
flattened = _flatten_for_scatter_gather(tensor_list, copy=False)
recv_ptr = nccl_util.get_tensor_ptr(flattened)
comm.allGather(send_ptr, recv_ptr, n_elems, dtype, stream.ptr)
for i, t in enumerate(tensor_list):
nccl_util.copy_tensor(t, flattened[i])
def reducescatter(self,
tensor,
tensor_list,
reducescatter_options=ReduceScatterOptions()):
"""Reducescatter a list of tensors across the group.
Args:
tensor: the output after reducescatter (could be unspecified).
tensor_list: the list of tensor to be reduce and scattered.
reducescatter_options: reducescatter options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor_list[0])
n_elems = nccl_util.get_tensor_n_elements(tensor_list[0])
reduce_op = nccl_util.get_nccl_reduce_op(
reducescatter_options.reduceOp)
# get the send_ptr
flattened = _flatten_for_scatter_gather(tensor_list, copy=True)
send_ptr = nccl_util.get_tensor_ptr(flattened)
recv_ptr = nccl_util.get_tensor_ptr(tensor)
comm.reduceScatter(send_ptr, recv_ptr, n_elems, dtype, reduce_op,
stream.ptr)
def _get_nccl_communicator(self):
"""Create or use a cached NCCL communicator for the collective task.
"""
# TODO(Hao): later change this to use device keys and query from cache.
# TODO(Hao): implement a thin wrapper
if not self._nccl_comm:
self._nccl_comm = nccl_util.create_nccl_communicator(
self.world_size, self.nccl_uid, self.rank)
return self._nccl_comm
@staticmethod
def _get_cuda_stream():
"""Obtain an idle stream from a stream pool for the collective task."""
# TODO: implement a simple stream manager.
return cupy.cuda.Stream.null
# def _collective_call(self, *args):
# """Private method to encapsulate all collective calls"""
# pass
def _flatten_for_scatter_gather(tensor_list, copy=False):
"""Flatten the tensor for gather/scatter operations.
Args:
tensor_list: the list of tensors to be scattered/gathered.
copy: whether the copy the tensors in tensor_list into the buffer.
Returns:
The flattened tensor buffer.
"""
if not tensor_list:
raise RuntimeError("Received an empty list.")
t = tensor_list[0]
# note we need a cupy dtype here.
dtype = nccl_util.get_cupy_tensor_dtype(t)
buffer_shape = [len(tensor_list)] + nccl_util.get_tensor_shape(t)
buffer = cupy.empty(buffer_shape, dtype=dtype)
if copy:
for i, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(buffer[i], tensor)
return buffer
def _check_inputs_compatibility_for_scatter_gather(tensor, tensor_list):
"""Check the compatibility between tensor input and tensor list inputs."""
if not tensor_list:
raise RuntimeError("Got empty list of tensors.")
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
shape = nccl_util.get_tensor_shape(tensor)
for t in tensor_list:
# check dtype
dt = nccl_util.get_nccl_tensor_dtype(t)
if dt != dtype:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same dtype. Got '{}' and '{}'"
"".format(dt, dtype))
# Note: typically CCL libraries only requires they have the same
# number of elements;
# Here we make it more strict -- we require exact shape match.
if nccl_util.get_tensor_shape(t) != shape:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same shape.")
|
{"hexsha": "4341f8e672ba78742e82ad41b1c123e770976c49", "size": 12598, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/ray/util/collective/collective_group/nccl_collective_group.py", "max_stars_repo_name": "coreylowman/ray", "max_stars_repo_head_hexsha": "d95c8b8a418ef35154ffedecbad1812fcc171db9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-05T09:40:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-05T09:40:58.000Z", "max_issues_repo_path": "python/ray/util/collective/collective_group/nccl_collective_group.py", "max_issues_repo_name": "coreylowman/ray", "max_issues_repo_head_hexsha": "d95c8b8a418ef35154ffedecbad1812fcc171db9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/ray/util/collective/collective_group/nccl_collective_group.py", "max_forks_repo_name": "coreylowman/ray", "max_forks_repo_head_hexsha": "d95c8b8a418ef35154ffedecbad1812fcc171db9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6883852691, "max_line_length": 79, "alphanum_fraction": 0.633275123, "include": true, "reason": "import cupy", "num_tokens": 2695}
|
[STATEMENT]
lemma pdevs_val_degree_cong:
assumes "b = d"
assumes "\<And>i. i < degree b \<Longrightarrow> a i = c i"
shows "pdevs_val a b = pdevs_val c d"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pdevs_val a b = pdevs_val c d
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
b = d
?i < degree b \<Longrightarrow> a ?i = c ?i
goal (1 subgoal):
1. pdevs_val a b = pdevs_val c d
[PROOF STEP]
by (auto simp: pdevs_val_sum)
|
{"llama_tokens": 203, "file": "Affine_Arithmetic_Affine_Form", "length": 2}
|
// Copyright 2022 DeepMind Technologies Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "func_wrap.h"
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <Eigen/Eigen>
#include "func_traits.h"
namespace {
struct BoxedDouble {
double value;
};
struct BoxedInt {
int value;
};
} // namespace
namespace mujoco::util {
template <> struct wrapped<BoxedDouble> {
static BoxedDouble unwrap(const std::string& wrapped) {
return BoxedDouble{std::stod(wrapped)};
}
};
template <> struct wrapped<BoxedInt> {
static BoxedInt unwrap(int wrapped) {
return BoxedInt{wrapped};
}
};
template <typename T, int N> struct wrapped<T(*)[N]> {
using Array = T[N];
static Array* unwrap(Eigen::Ref<Eigen::Vector<T, N>> wrapped) {
return reinterpret_cast<Array*>(wrapped.data());
}
};
template <typename T, int N> struct wrapped<const T(*)[N]> {
using Array = const T[N];
static Array* unwrap(const Eigen::Vector<T, N>& wrapped) {
return reinterpret_cast<Array*>(wrapped.data());
}
};
} // namespace mujoco::util
namespace {
using ::mujoco::util::func_arg_t;
using ::mujoco::util::UnwrapArgs;
using ::mujoco::util::ReturnArrayArg0;
double add(BoxedDouble x, float y, BoxedInt z) {
return x.value + y + z.value;
}
void add_array4(double (*out)[4], const double (*x)[4], const double (*y)[4]) {
for (int i = 0; i < 4; ++i) {
(*out)[i] = (*x)[i] + (*y)[i];
}
}
TEST(FuncWrapTest, UnwrapArgs) {
{
auto wrapped_add = UnwrapArgs(add);
static_assert(std::is_same_v<
func_arg_t<decltype(wrapped_add), 0>, const std::string&
>);
static_assert(std::is_same_v<
func_arg_t<decltype(wrapped_add), 1>, float
>);
static_assert(std::is_same_v<
func_arg_t<decltype(wrapped_add), 2>, int
>);
// Use binary powers so that we can do exact floating point comparison.
EXPECT_EQ(wrapped_add("1.6e+1", 5e-1, 2), 18.5);
}
{
Eigen::Vector4d out;
UnwrapArgs(add_array4)(out, {1, 3, 5, 7}, {2, 6, 9, 11});
EXPECT_THAT(out, ::testing::ElementsAre(3, 9, 14, 18));
}
}
TEST(FuncWrapTest, ReturnArrayArg0) {
auto out = UnwrapArgs(ReturnArrayArg0(add_array4))({1, 3, 5, 7},
{2, 6, 9, 11});
static_assert(std::is_same_v<decltype(out), Eigen::Vector4d>);
EXPECT_THAT(out, ::testing::ElementsAre(3, 9, 14, 18));
}
} // namespace
|
{"hexsha": "47346dba74374883c41388bfb9726743a603f5e2", "size": 2944, "ext": "cc", "lang": "C++", "max_stars_repo_path": "python/mujoco/util/func_wrap_test.cc", "max_stars_repo_name": "mfkiwl/mujoco", "max_stars_repo_head_hexsha": "21338e61784eebea1ae850a1cdb3bcf049de5924", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1575.0, "max_stars_repo_stars_event_min_datetime": "2021-10-18T15:09:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:13:23.000Z", "max_issues_repo_path": "python/mujoco/util/func_wrap_test.cc", "max_issues_repo_name": "mfkiwl/mujoco", "max_issues_repo_head_hexsha": "21338e61784eebea1ae850a1cdb3bcf049de5924", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 142.0, "max_issues_repo_issues_event_min_datetime": "2021-10-18T20:45:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:46:00.000Z", "max_forks_repo_path": "python/mujoco/util/func_wrap_test.cc", "max_forks_repo_name": "mfkiwl/mujoco", "max_forks_repo_head_hexsha": "21338e61784eebea1ae850a1cdb3bcf049de5924", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 133.0, "max_forks_repo_forks_event_min_datetime": "2021-10-18T15:37:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T09:29:09.000Z", "avg_line_length": 28.5825242718, "max_line_length": 79, "alphanum_fraction": 0.659986413, "num_tokens": 833}
|
%----------------------------------------------------------------------------------------
%----------------------------------------------------------------------------------------
% =====================================================================================================
%
% EDA - Exploratory Data Analysis
%
% =====================================================================================================
\section{Missing \& Duplicate Observations}
\label{sec:EDA}
We observe an initial 97 samples in the \textit{crime\_v2.csv} file, as well as all 25 columns listed in the code book above. An initial pass through the data reveals two obvious data-collection errors: (\textbf{a}) 6 empty rows at the tail of the file (see \ref{fig:Empty Rows}), and (\textbf{b}) one row that has been duplicated for county 193 (see \ref{fig:Duplicate Row}): \\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\caption{6 rows with missing values at tail of file}
\includegraphics[width=\linewidth]{images/EDA_empty_rows.jpg}
\label{fig:Empty Rows}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\hfill
\begin{subfigure}[b]{1.0\textwidth}
\centering
\caption{Row for county 193 duplicated}
\includegraphics[width=\linewidth]{images/EDA_duplicate_rows.jpg}
\label{fig:Duplicate Row}
\end{subfigure}
\label{fig:EDA Duplicate and Missing Rows}
\caption{EDA : Duplicated and Missing Rows}
\end{figure}
\pagebreak
\section{Descriptive Statistics}
\label{sec:Descriptive Statistics}
%\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Following removal of six empty rows and one of the duplicate rows, we are left with 90 observations useful for analysis. The descriptive statistics for the variables of interest were captured:\\
\begin{table}[!htbp]
\centering
\small
\caption{EDA : Descriptive Statistics}
\label{EDA - Descriptive Statistics}
\begin{tabular}{@{\extracolsep{5pt}}lccccccc}
\\[-1.8ex]\hline
\hline \\[-1.8ex]
Statistic & \multicolumn{1}{c}{N} & \multicolumn{1}{c}{Mean} & \multicolumn{1}{c}{St. Dev.} & \multicolumn{1}{c}{Min} & \multicolumn{1}{c}{Pctl(25)} & \multicolumn{1}{c}{Pctl(75)} & \multicolumn{1}{c}{Max} \\
\hline \\[-1.8ex]
county & 90 & 100.60 & 58.32 & 1 & 51.5 & 150.5 & 197 \\
year & 90 & 87.00 & 0.00 & 87 & 87 & 87 & 87 \\
crmrte & 90 & 0.03 & 0.02 & 0.01 & 0.02 & 0.04 & 0.10 \\
prbarr & 90 & 0.30 & 0.14 & 0.09 & 0.20 & 0.34 & 1.09 \\
prbconv & 90 & 0.55 & 0.35 & 0.07 & 0.34 & 0.59 & 2.12 \\
prbpris & 90 & 0.41 & 0.08 & 0.15 & 0.36 & 0.46 & 0.60 \\
avgsen & 90 & 9.69 & 2.83 & 5.38 & 7.38 & 11.47 & 20.70 \\
polpc & 90 & 0.002 & 0.001 & 0.001 & 0.001 & 0.002 & 0.01 \\
density & 90 & 1.44 & 1.52 & 0.0000 & 0.55 & 1.57 & 8.83 \\
taxpc & 90 & 38.16 & 13.11 & 25.69 & 30.73 & 41.01 & 119.76 \\
west & 90 & 0.24 & 0.43 & 0 & 0 & 0 & 1 \\
central & 90 & 0.38 & 0.49 & 0 & 0 & 1 & 1 \\
urban & 90 & 0.09 & 0.29 & 0 & 0 & 0 & 1 \\
pctmin80 & 90 & 25.71 & 16.98 & 1.28 & 10.02 & 38.18 & 64.35 \\
wcon & 90 & 285.35 & 47.75 & 193.64 & 250.75 & 314.98 & 436.77 \\
wtuc & 90 & 410.91 & 77.36 & 187.62 & 374.33 & 440.68 & 613.23 \\
wtrd & 90 & 210.92 & 33.87 & 154.21 & 190.71 & 224.28 & 354.68 \\
wfir & 90 & 321.62 & 54.00 & 170.94 & 285.56 & 342.63 & 509.47 \\
wser & 90 & 275.34 & 207.40 & 133.04 & 229.34 & 277.65 & 2,177.07 \\
wmfg & 90 & 336.03 & 88.23 & 157.41 & 288.60 & 359.89 & 646.85 \\
wfed & 90 & 442.62 & 59.95 & 326.10 & 398.78 & 478.26 & 597.95 \\
wsta & 90 & 357.74 & 43.29 & 258.33 & 329.27 & 383.15 & 499.59 \\
wloc & 90 & 312.28 & 28.13 & 239.17 & 297.23 & 328.78 & 388.09 \\
mix & 90 & 0.13 & 0.08 & 0.02 & 0.08 & 0.15 & 0.47 \\
pctymle & 90 & 0.08 & 0.02 & 0.06 & 0.07 & 0.08 & 0.25 \\
\hline \\[-1.8ex]
\end{tabular}
\end{table}
From the descriptive statistics, we note several potential areas of concern and interest. First, the $\textcolor{Blue}{county}$ variable appears to be the EPA FIPS code for \href{https://en.wikipedia.org/wiki/List_of_counties_in_North_Carolina}{North Carolina counties}. The values are odd numbered only and from Figure \ref{fig:LocationCorrectedMap} (below) we can see that the Central/West/East indicators provided in the data geographically aligns using these values as FIPS codes.\\
Additionally, the variables $\textcolor{Blue}{wser}$, $\textcolor{Blue}{density}$, $\textcolor{Blue}{polpc}$, $\textcolor{Blue}{taxpc}$, and $\textcolor{Blue}{pctymle}$ all appear to have a distribution that suggest potential outliers. We will explore these variables to see if there may be more issues in the data collection that we can address.
\pagebreak
\section{Outlier Analysis}
\label{sec:Outliers}
\subsection{Weekly Wage, Service Industry [WSER]}
We start with $\textcolor{Blue}{wser}$, which appears to be the \textit{average weekly income for service industry workers}. There exists a single large maximum value of $2177.07$ which appears to be well outside the distribution of the other values (see \ref{fig:EDA WSER variable uncorrected}). The remaining values appear to be in the range of 133-348, so it seems very unlikely that only one county has a value in the 2,000+ weekly range (\$104,000 / year) for the service industry. The county tied to this record is 185, which is the FIPS code for \href{https://en.wikipedia.org/wiki/Warren_County,_North_Carolina}{Warren County, North Carolina}.\\
This value appears to be the result of a decimal placement issue, where the likely real value is $217.7068$, based on a survey of the counties surrounding Warren County: Vance County ($347.6609$), Franklin County ($239.2233$), Nash County ($305.7612$), Halifax County ($172.6281$), and Northampton County ($213.5822$). Given these surrounding county wages for service industry professionals, we come to a regional mean average of $\$255.7711$:\\
\textmathbf{
\begin{equation*}
\begin{aligned}
\mu_{regional\_wser} &= \frac{Vance + Franklin + Nash + Halifax + Northampton}{n}\\
&= \frac{\textcolor{Purple}{347.6609 + 239.2233 + 305.7612 + 172.6281 + 213.5822}}{5} = \frac{1278.856}{5}\\
\therefore &= \textcolor{OrangeRed}{255.7711}
\end{aligned}
\end{equation*}
}
Given these results, we elect to remediate the large outlier in $\textcolor{Blue}{wser}$ by multiplying the value by $0.1$. The impact to distribution of values is depicted in \ref{fig:EDA WSER variable corrected} below:\\
\begin{figure}[!ht]
\begin{subfigure}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth,height=3in]{images/EDA_wser_uncorrected.jpg}
\caption{WSER with uncorrected, large outlier}
\label{fig:EDA WSER variable uncorrected}
\end{subfigure}
\hfill
\begin{subfigure}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth,height=3in]{images/EDA_wser_corrected.jpg}
\caption{WSER with large outlier corrected}
\label{fig:EDA WSER variable corrected}
\end{subfigure}
\caption{Outliers : Weekly Wage, Service Industry (WSER)}
\label{fig:EDA WSER Outlier Treatment}
\end{figure}
\pagebreak
\subsection{People per Square Mile [DENSITY]}
Observation 79 (county = 173, \href{http://www.swaincountync.gov/}{Swain County}) is currently listed with a $\textcolor{Blue}{density}$ of of 0.0000203422 people per square mile. By a wide-margin, this is the lowest value in the dataset (see \ref{fig:EDA DENSITY variable uncorrected}) and appears to be a potential mistake. According to Wikipedia, \href{https://en.wikipedia.org/wiki/Swain_County,_North_Carolina}{Swain County} has a landmass of 541 square miles, which would equal 0.011 people living in the entire county - an impossibility. \\
According to \href{https://www.google.com/publicdata/explore?ds=kf7tgg1uo9ude_&met_y=population&idim=county:37173&hl=en&dl=en}{U.S. Census Bureau records}, Swain County North Carolina had a population of 10,932 in 1987. Upon reviewing $\textcolor{Blue}{density}$ more closely along with the Census Bureau records for population and the square mile landmass reported on Wikipedia, it appears that \textcolor{OrangeRed}{\textit{this variable is actually in units of 100 people per square mile}}. With that adjustment, the data for Swain County would still equal only 1.1 person for the entire county; which is still clearly incorrect.\\
Based on the adjusted amount of 109.32 persons (in units of 100), the correct $\textcolor{Blue}{density}$ value for Swain County in 1987 should be $0.202070$. We adjust accordingly (see \ref{fig:EDA DENSITY variable corrected}):\\
\vspace*{0.5in}
\begin{figure}[!ht]
\begin{subfigure}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth,height=3.5in]{images/EDA_density_uncorrected.jpg}
\caption{log(density) with uncorrected, small outlier}
\label{fig:EDA DENSITY variable uncorrected}
\end{subfigure}
\hfill
\begin{subfigure}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth,height=3.5in]{images/EDA_density_corrected.jpg}
\caption{log(density) with small outlier, corrected}
\label{fig:EDA DENSITY variable corrected}
\end{subfigure}
\label{fig:EDA DENSITY Outlier Untreaded and Treated}
\caption{Outliers : People per Square Mile (DENSITY)}
\end{figure}
\pagebreak
\subsection{Police per Capita [POLPC]}
There are a total of five data points in the $\textcolor{Blue}{polpc}$ variable that qualify as anomalous (\href{https://en.wikipedia.org/wiki/Outlier}{IQR Rule}), but one stands well above the others and warrants additional scrutiny. The entry for county 115 (\href{https://www.madisoncountync.gov/}{Madison County}) has a value of $0.00905433$ (see \ref{fig:EDA POLPC variable uncorrected}), significantly higher than the other values in all other counties. According to the \href{https://www.google.com/publicdata/explore?ds=kf7tgg1uo9ude_&met_y=population&idim=county:37173&hl=en&dl=en#!ctype=l&strail=false&bcs=d&nselm=h&met_y=population&scale_y=lin&ind_y=false&rdim=country&idim=county:37115&ifdim=country&hl=en_US&dl=en&ind=false}{U.S. Census}, Madison County, NC had a population size of only 17,051 residents in 1987 making it one of the smaller counties in the state overall. Madison County covers only 452 square miles of geography and is located in the Northwest portion of the state, directly bordering Tennessee.\\
At this per capita level, Madison County would have $154.38538$ officers covering just 17,051 people. The mean of the $\textcolor{Blue}{polpc}$ variable is $0.00162543$ (excluding Madison County); with this value substituted for Madison, we would have a more realistic level of $\approx 27.715$ law enforcement officers, which is in-line with other counties in the 20k and below population range. We'll substitute with the mean for Madison County to address this apparent mistake; \ref{fig:EDA POLPC variable corrected} reflects the data distribution following the adjustment:\\
\vspace*{0.5in}
\begin{figure}[!ht]
\begin{subfigure}[b]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{images/EDA_polpc_uncorrected.jpg}
\caption{POLPC with uncorrected, large outlier}
\label{fig:EDA POLPC variable uncorrected}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{images/EDA_polpc_corrected.jpg}
\caption{POLPC with large outlier corrected}
\label{fig:EDA POLPC variable corrected}
\end{subfigure}
\label{fig:EDA POLPC Outlier Treatment}
\caption{Outliers : Police per Capita (POLPC)}
\end{figure}
\pagebreak
\subsection{Tax Revenue per Capita [TAXPC]}
Next, we analyze a single large outlier found in the $\textcolor{Blue}{taxpc}$ variable. According to the code book (ref: \ref{fig:Code Book}), $\textcolor{Blue}{taxpc}$ is the tax revenue per capita and while the typical range is from 25 - 75, county 55 (\href{https://www.darenc.com/}{Dare County}) has a large value of $119.76$ per person. In 1987, Dare County had a population of 19,580 according to \href{https://www.google.com/publicdata/explore?ds=kf7tgg1uo9ude_&met_y=population&idim=county:37055:37053&hl=en&dl=en}{U.S. Census records}. The tax rate in Dare County is roughly the same as other counties at 2\% with a total state + county combined rate of 6.75\%.\\
Based on the historical tax rates and the increasing burden we see in NC taxes from 1981-1987 \href{https://www.ncleg.gov/DocumentSites/committees/FiscalModernization/Comission\%20Meetings/Nov\%2028\%20and\%2029/Nov\%2028\%20Presentations/History\%20of\%20State\%20and\%20Local\%20Taxes\%20in\%20NC\%20Paper.pdf}{(reference)}, it is not clear if the value reported in the data is incorrect. As such, we elect not to treat this value and leave it as-is for the purposes of our analysis.\\
\vspace*{0.5in}
\begin{figure}[!ht]
\centering
\includegraphics{images/EDA_taxpc_uncorrected.jpg}
\label{fig:EDA TAXPC variable uncorrected}
\caption{Outliers : Tax Revenue per Capita (TAXPC)}
\end{figure}
\pagebreak
\subsection{Percentage of Demographic as Young Males [PCTYMLE]}
We analyze a single large outlier found in the $\textcolor{Blue}{pctymle}$ variable. According to the code book (ref: \ref{fig:Code Book}), $\textcolor{Blue}{pctymle}$ is the percent of young males representing the county population. County 133 (\href{https://www.onslowcountync.gov/}{Onslow County}) has a relatively large value of almost 25\%, warranting further investigation. According to Wikipedia, \href{https://en.wikipedia.org/wiki/Onslow_County,_North_Carolina}{Onslow County} is home to the U.S. Marine Corps Base \href{https://www.lejeune.marines.mil/}{Camp Lejeune}.\\
Though women have been permitted to join the U.S. Marines since 1918, historically they have made up less than 10\% of all U.S. Marines roles \href{https://en.wikipedia.org/wiki/Women_in_the_United_States_Marines}{(reference)}. It was not until calendar year 2016 that women were allowed to serve in all roles. In addition to this, contemporary demographics statistics report that the median age of Onslow County is 25 \href{https://en.wikipedia.org/wiki/Onslow_County,_North_Carolina}{(reference)}. Given these data, \textcolor{OrangeRed}{\textit{we conclude that this large value is accurate}} and elect to leave it as-is for the purposes of this analysis.
\vspace*{0.5in}
\begin{figure}[!ht]
\centering
\includegraphics{images/EDA_pctymle_uncorrected.jpg}
\label{fig:EDA PCTYMLE variable uncorrected}
\caption{Outliers : Percentage of Demographic as Young Males (PCTYMLE)}
\end{figure}
\pagebreak
\section{Location Errata}
The use of one-hot encoding for location variables $\textcolor{Blue}{west}$ and $\textcolor{Blue}{central}$ is potentially problematic as it allows for the possibility of insert/update anomalies. That is, the form of the data allows for impossible assignments into more than one location. The dataset we were provided with contains only one such anomaly for \href{http://www.gastongov.com/}{Gaston County} (FIPS code 71). This variable has inadvertently been assigned to both the "Central" and "West" groups (see figures \ref{fig:EDA Location - Gaston County} and \ref{fig:EDA Location map - Gaston County}).\\
\begin{figure}[!ht]
\begin{subfigure}[b]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{images/EDA_location_incorrect_category.jpg}
\caption{Gaston County assigned to two location codes}
\label{fig:EDA Location - Gaston County}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{images/EDA_location_map.jpg}
\caption{Gaston and surrounding counties}
\label{fig:EDA Location map - Gaston County}
\end{subfigure}
\caption{EDA : Location Category of Gaston County}
\label{fig:Local Errata}
\end{figure}
\href{http://www.gastongov.com/}{Gaston County} is surrounded by only three North Carolina counties: \href{https://www.clevelandcounty.com/main/}{Cleveland County} to the West, \href{https://www.lincolncounty.org/}{Lincoln County} to the North, and \href{https://www.mecknc.gov/Pages/Home.aspx}{Mecklenburg County} to the East. In this case, all of the surrounding counties are labeled as members of the "Central" category, so \textcolor{OrangeRed}{we correct the value for Gaston by removing it from the "West" category, leaving it assigned to "Central"}. Following this correction, all counties appear to be distributed uniformly (see \ref{fig:LocationCorrectedMap})\\
\begin{figure}[!ht]
\centering
\includegraphics[scale=0.9]{images/EDA_location_map_correct.jpg}
\caption{EDA : North Carolina Geographic Boundaries (West, Central, East)}
\label{fig:LocationCorrectedMap}
\end{figure}
\pagebreak
\section{Crime Rate by County}
Earlier, we identified the $\textcolor{Blue}{county}$ variable as the FIPS code for North Carolina counties (ref \ref{sec:Descriptive Statistics}). We can use this information to identify counties missing from our dataset as well as plot crime rates at a county level to see if there are any overt geographical signals of crime rate.\\
We received data for only 90 of the 100 counties in North Carolina; the missing counties are shown in Figure \ref{fig:MissingCountiesList}, and are identified geographically in Figure \ref{fig:MissingCountiesGraph} [in grey].
\begin{figure}[!ht]
\small
\begin{minipage}[t]{0.3\textwidth}
\caption{EDA : Missing Counties}
\begin{tabular}[t]{{p{1.0cm}p{3.0cm}}}
\toprule
\textbf{FIPS} & \textbf{County} \\
\midrule
29 & Camden \\
31 & Carteret \\
43 & Clay \\
73 & Gates \\
75 & Graham \\
95 & Hyde \\
103 & Jones \\
121 & Mitchell \\
177 & Tyrrell \\
199 & Yancey \\
\bottomrule
\end{tabular}
\label{fig:MissingCountiesList}
\end{minipage} \hfill
\begin{minipage}[t]{0.7\textwidth}
\centering
\caption{EDA : North Carolina Crime by County, 1987}
\begin{subfigure}[t]{1.0\textwidth}
\centering
\includegraphics[width=\linewidth]{images/EDA_crmrte_by_county.jpg}
\end{subfigure}
\label{fig:MissingCountiesGraph}
\end{minipage}
\end{figure}
No apparent strong crime rate patterns exist from a purely visual geographic positioning perspective; however, it is noteworthy that counties with high crime rate per West/Central/East grouping \textit{in general} tends to increase moving West to East. \\
\begin{figure}[!ht]
\label{fig:EDA : County Top and Bottom 10 Crime Rates}
\begin{minipage}[t]{0.5\textwidth}
\centering
\caption{EDA : Top 10 Counties by Crime Rate}
\begin{tabular}[t]{{p{1.0cm}p{3.0cm}p{2.5cm}}}
\toprule
\textbf{FIPS} & \textbf{County} & \textbf{Crime Rate} \\
\midrule
119 & Mecklenburg & $9.897 \%$ \\
51 & Cumberland & $8.838 \%$ \\
129 & New Hanover & $8.350 \%$ \\
55 & Dare & $7.902 \%$ \\
181 & Vance & $7.295 \%$ \\
63 & Durham & $7.066 \%$ \\
65 & Edgecombe & $6.588 \%$ \\
135 & Orange & $6.290 \%$ \\
67 & Forsyth & $6.142 \%$ \\
81 & Guilford & $6.045 \%$ \\
\bottomrule
\end{tabular}
\end{minipage} \hfill
\begin{minipage}[t]{0.5\textwidth}
\centering
\caption{EDA : Bottom 10 Counties by Crime Rate}
\begin{tabular}[t]{{p{1.0cm}p{3.0cm}p{2.5cm}}}
\toprule
\textbf{FIPS} & \textbf{County} & \textbf{Crime Rate} \\
\midrule
117 & Martin & $0.553 \%$ \\
9 & Ashe & $1.062 \%$ \\
185 & Warren & $1.087 \%$ \\
39 & Cherokee & $1.192 \%$ \\
169 & Stokes & $1.210 \%$ \\
137 & Pamlico & $1.267 \%$ \\
5 & Alleghany & $1.296 \%$ \\
173 & Swain & $1.399 \%$ \\
53 & Currituck & $1.407 \%$ \\
197 & Yadkin & $1.419 \%$ \\
\bottomrule
\end{tabular}
\end{minipage}
\end{figure}
\pagebreak
\section{Frequency Distribution (Natural \& Log)}
%\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
After identifying and, when appropriate, treating outliers, we move to consider the distribution of our data. Here, we provide a Histogram view for all raw and log transformed data only for variables that are non-binary or of no regression value (i.e. $\textcolor{Blue}{west}$, $\textcolor{Blue}{central}$, $\textcolor{Blue}{urban}$, $\textcolor{Blue}{county}$, and $\textcolor{Blue}{year}$). We evaluate log transformations for their common utility in improving explanatory power and natural tendency to normalize distribution:\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_crmrte.jpg}
\caption{EDA : Histogram of CRMRTE and log(CRMRTE)}
\label{fig:EDA Histogram CRMRTE}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_avgsen.jpg}
\caption{EDA : Histogram of AVGSEN and log(AVGSEN)}
\label{fig:EDA Histogram AVGSEN}
\end{subfigure}
\label{fig:CRMRTE and AVGSEN Histogram}
\caption{EDA : Distribution of Variables CRMRTE and AVGSEN}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_density.jpg}
\caption{EDA : Histogram of DENSITY and log(DENSITY)}
\label{fig:EDA Histogram DENSITY}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_mix.jpg}
\caption{EDA : Histogram of MIX and log(MIX)}
\label{fig:EDA Histogram MIX}
\end{subfigure}
\label{fig:DENSITY and MIX Histogram}
\caption{EDA : Distribution of Variables DENSITY and MIX}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_pctmin80.jpg}
\caption{EDA : Histogram of PCTMIN80 and log(PCTMIN80)}
\label{fig:EDA Histogram PCTMIN80}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_pctymle.jpg}
\caption{EDA : Histogram of PCTYMLE and log(PCTYMLE)}
\label{fig:EDA Histogram PCTYMLE}
\end{subfigure}
\label{fig:PCTMIN80 and PCTYMLE Histogram}
\caption{EDA : Distribution of Variables PCTMIN80 and PCTYMLE}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_polpc.jpg}
\caption{EDA : Histogram of POLPC and log(POLPC)}
\label{fig:EDA Histogram POLPC}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_prbarr.jpg}
\caption{EDA : Histogram of PRBARR and log(PRBARR)}
\label{fig:EDA Histogram PRBARR}
\end{subfigure}
\label{fig:POLPC and PRBARR Histogram}
\caption{EDA : Distribution of Variables POLPC and PRBARR}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_prbconv.jpg}
\caption{EDA : Histogram of PRBCONV and log(PRBCONV)}
\label{fig:EDA Histogram PRBCONV}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_prbpris.jpg}
\caption{EDA : Histogram of PRBPRIS and log(PRBPRIS)}
\label{fig:EDA Histogram PRBPRIS}
\end{subfigure}
\label{fig:PRBCONV and PRBPRIS Histogram}
\caption{EDA : Distribution of Variables PRBCONV and PRBPRIS}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_taxpc.jpg}
\caption{EDA : Histogram of TAXPC and log(TAXPC)}
\label{fig:EDA Histogram TAXPC}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wcon.jpg}
\caption{EDA : Histogram of WCON and log(WCON)}
\label{fig:EDA Histogram WCON}
\end{subfigure}
\label{fig:TAXPC and WCON Histogram}
\caption{EDA : Distribution of Variables TAXPC and WCON}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wfed.jpg}
\label{fig:EDA Histogram WFED}
\caption{Histogram of WFED}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wfir.jpg}
\label{fig:EDA Histogram WFIR}
\caption{Histogram of WFIR}
\end{subfigure}
\label{fig:WFED and WFIR Histogram}
\caption{EDA : Distribution of Variables WFED and WFIR}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wloc.jpg}
\label{fig:EDA Histogram WLOC}
\caption{Histogram of WLOC}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wmfg.jpg}
\label{fig:EDA Histogram WMFG}
\caption{Histogram of WMFG}
\end{subfigure}
\label{fig:WLOC and WMFG Histogram}
\caption{EDA : Distribution of Variables WLOC and WMFG}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wser.jpg}
\label{fig:EDA Histogram WSER}
\caption{Histogram of WSER}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wsta.jpg}
\label{fig:EDA Histogram WSTA}
\caption{Histogram of WSTA}
\end{subfigure}
\label{fig:WSER and WSTA Histogram}
\caption{EDA : Distribution of Variables WSER and WSTA}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
Histograms Continued.\\
\begin{figure}[!ht]
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wtrd.jpg}
\label{fig:EDA Histogram WTRD}
\caption{Histogram of WTRD}
\end{subfigure}\vspace{3mm}% or \hspace{0.3\textwidth}
\begin{subfigure}[b]{1.0\textwidth}
\centering
\includegraphics[width=0.9\textwidth,height=0.30\textheight]{images/EDA_histograms_wtuc.jpg}
\label{fig:EDA Histogram WTUC}
\caption{Histogram of WTUC}
\end{subfigure}
\label{fig:WTRD and WTUC Histogram}
\caption{EDA : Distribution of Variables WTRD and WTUC}
\end{figure}
\pagebreak
%\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
\section{Correlation}
We assess the correlation between dependent and independent variables, excluding all binary and identifier attributes. Intersections where there exists a low statistical significance are removed from the plot to improve visibility of significant relationships. There appears to be the strongest Pearson r correlation of the dependent variable $\textcolor{Blue}{crmrte}$ with potential regressors $\textcolor{Blue}{density}$ (0.73), $\textcolor{Blue}{wfed}$ (0.49), and $\textcolor{Blue}{polpc}$ (0.48).\\
Superficially, the relationship between $\textcolor{Blue}{polpc}$ and $\textcolor{Blue}{crmrte}$ seems as though it might be causal in the opposite direction - that is, the more crime present, the more police the county hires. In this sense, $\textcolor{Blue}{polpc}$ might be better thought of as a dependent variable.\\
\begin{figure}[!ht]
\centering
\includegraphics[width=0.9\textwidth]{images/EDA_correlation.jpg}
\label{fig:EDA Correlation Matrix1}
\caption{EDA : Correlation of Independent and Dependent Variables}
\end{figure}
\pagebreak
\textbf{\textcolor{OrangeRed}{EXPLORATORY DATA ANALYSIS - CONTD.}}\\
We also assess the correlation between log transforms of the variables of interest to assess the impact. Feature intersections where there exists a low statistical significance are also removed from the plot for better visibility. Generally, the impact is minimal to correlation between log and non-log transformed variables.\\
\begin{figure}[!ht]
\centering
\includegraphics[width=0.9\textwidth]{images/EDA_log_correlation.jpg}
\label{fig:EDA Correlation Matrix2}
\caption{EDA : Correlation of Independent and Dependent Variables}
\end{figure}
|
{"hexsha": "d2709c88109bc83c902e16afec3dc5a5c0e3da6c", "size": 29592, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/eda.tex", "max_stars_repo_name": "cbenge509/w203_Final", "max_stars_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/eda.tex", "max_issues_repo_name": "cbenge509/w203_Final", "max_issues_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/eda.tex", "max_forks_repo_name": "cbenge509/w203_Final", "max_forks_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.32, "max_line_length": 1031, "alphanum_fraction": 0.7232022168, "num_tokens": 9638}
|
\documentclass[11pt]{article}
\usepackage{hyperref, graphicx, floatrow}
\usepackage[letterpaper, margin=1.25in]{geometry}
\setlength{\parskip}{\baselineskip}
\setlength{\parindent}{0pt}
\title{Lhyra: Learned HYbrid Recursive Algorithms}
\author{
Josh Gruenstein\\\texttt{jgru@mit.edu} \and Lior Hirschfeld\\\texttt{liorh@mit.edu} \and
Benjamin Spector\\\texttt{spectorb@mit.edu}
}
\date{6.890 Spring 2019 Final Project}
\begin{document}
\maketitle
\section{Introduction}
A large number of computer science problems lend themselves naturally to recursive solutions. Of these, if more than one known algorithm exists, choosing between them may be difficult. Even if asymptotic, worst-case run time is known, this may have no bearing on real world use cases, which oftentimes operate on small inputs or data that is not representative of the entire space. Generally, a decision tree is chosen manually by an expert, who benchmarks the performance of each algorithm and selects whichever appears to perform most efficiently. A clear example of this is in the C++ STL, in which the standard algorithm is Introsort, which calls Quicksort until hitting a certain recursion depth, where it switches to Heapsort. This approach is far from ideal, not only because it is so time consuming, but also because decision making will be restricted only to those features which the expert tested most rigorously.
Instead, we propose that this process should be automated through the introduction of machine learning. For this project, we designed and implemented Lhyra, a framework designed to automatically find efficient recursive trees given arbitrary solvers, data distributions, and optimization criteria. Lhyra was largely inspired by Professor Kraska's work on learned B-Trees and the Recursive Model Index \cite{kraska}, as we believed that, through high-level abstractions, it might be possible to apply a similar methodology to a large number of recursive data structures and algorithms. Below, we discuss Lhyra's components in more detail and present our results when applying Lhyra to two common problems, \textsc{Closest Pair} and \textsc{Sorting}.
\section{Framework}
A Lhyra instance is described by the following components:
\begin{itemize}
\item \textbf{Bag of Solvers $S$.} Each solver $s_i \in S$ is equipped to solve an instance of the problem at hand, potentially by calling the Lhyra instance on sub-problems. Each solver $s_i$ may also have a set of hyper-parameters $h_{ij}$.
\item \textbf{Data Generator / Training Data Set $D$.} In the training phase the problems in this set / generated by this generator are used by the optimizer to learn how to select a solver $s_i$ from S given problem features.
\item \textbf{Feature Extractor $F(x)$.} Our goal is to learn how to select the ideal solver based on a given input, thus we need a nice way of extracting useful features about an input. For example, a feature extractor for a sorting Lhyra instance may return a vector containing the length of the list, number of unique elements, and how sorted it already is. Defaults to a vector of length 1 containing the depth of the recursion.
\item \textbf{Optimizer $O$.} Given a feature extractor, a training data set, and a bag of solvers, the optimizer learns how to select a solver given a vector of problem features. Optimizers should default to minimizing computation time and/or memory, but can take an arbitrary cost function $C(x,y)$ that produces some cost to be minimized given a problem and solution.
\end{itemize}
Once initialized, a call to a Lhyra instance solves a problem $x$ through the following steps:
\begin{enumerate}
\item Compute $F(x)$.
\item Pass $F(x)$ into the solver selector learned by the optimizer, which returns a parametrized solver $s_{ih}$.
\item Compute $s_{ih}(x)$.
\begin{enumerate}
\item For each sub-problem $y_i$ identified by $s_{ih}$, repeat the above process.
\item Combine the sub-problems and return a result.
\end{enumerate}
\end{enumerate}
An ideal solver selector and feature extractor would be fast enough for the benefits gained by ideal solver selection to outweigh the additional overhead. After implementing Lhyra in both Python and C++, we found relative performance to be very impacted by language choice and implementation specifics.
\subsection{Optimizers}
The optimizer carries the responsibility of selecting the ideal solver, and training the model to perform said selection process. An ideal Lhyra optimizer would satisfy the following conditions:
\begin{itemize}
\item \textbf{Applicability.} The optimizer must learn using data that accessible. For example, gradient descent may be impossible if the relationship between input and cost is unclear.
\item \textbf{Flexibility} Given a feature vector, the optimizer must be able to apply a sufficiently complex matching to extract meaningful data for solver selection.
\item \textbf{Speed.} The optimizer must predict quickly enough that its overhead does not outweigh gains from selecting the proper solver. This component can be ignored if time is not an optimization criteria.
\end{itemize}
Although each optimizer we experimented with used a different training process, we decided to use a common overall framework in generating feedback times (our optimizer cost function $C$). We chose to measure the entire time to execute a sub-problem, including recursive calls to (potentially different) solvers selected by Lhyra. At face value, this is an odd choice compared to the far more straightforward method of simply measuring a solver's own execution time, not including that of its children. It introduces strange cyclical dependencies, where Lhyra's choice of solver must depend on an ingrained understanding of its own decision making. This also forces Lhyra's ``training'' process to be online, as timing measurements are only useful using the most up-to-date model parameters.
However, this is exactly the property we would want in an ideal optimizer. Consider a solver which did minimal work, instead deciding to punt nearly all of the computation to its children. A naive optimizer would unduly favor such a solver, and fail to recognize useful sequential relationships in solvers: for example, that solver B might be best following solver A. Although this methodology is harder to train, it is ideal presuming the model is able to converge at all (which we empirically do with some optimizers).
\subsubsection{Neural Network Classifiers}
A natural optimizer architecture might feed our feature vector through a neural network, which outputs a one-hot vector representing solver selection. As we can make this neural network arbitrary large, this system is guaranteed to satisfy our condition of flexibility. However, it suffers in other respects. As our classifier gets larger, classification latency would suffer. Techniques designed to speed up neural network execution (such as GPUs) largely rely on batching for speed and are do not significantly improve latency for single inferences, limiting the size of potential networks.
Training is also a challenge. Gradient descent is incompatible with this architecture, because there exists no discernible relationship between network weights and cost: without exhausting all available options, it is impossible to determine whether the ideal solver was selected. We attempted to train a single perceptron classifier using REINFORCE \cite{reinforce}, a member of the policy gradients family that uses reward signals to estimate a classifier gradient. However, we found our classifier was unable to converge on reasonable solutions, as the estimated gradients were far too noisy.
We were able to obtain somewhat reasonable results training the same model from above using naive hill-climbing, randomly perturbing weights and keeping perturbations that led to performance improvements. However, our results were outperformed by better optimizers (such as our linear regressor), and we do not think this training method would scale to different and more complex problems due to its fragility.
\subsubsection{Linear Regressors}
An alternative architecture trains a set of linear regressors, where each predicts execution costs for a call-tree starting at a certain solver type. For any given feature vector, the optimizer predicts with each regressor and selects the solver corresponding to the regressor with smallest output. Each regressor can be easily trained through linear least-squares.
The primary concern with this method is that it lacks flexibility. With only a single perceptron, a linear regressor lacks the ability to extract nonlinear relationships between features. For both \textsc{Closest Pair} and \textsc{Sorting}, we did not find this was not problematic, but this deficiency should be considered when solver selection is more complex. One could imagine remedying this issue either with automatic feature extraction or a more powerful (non-linear and non-convex) regressor.
Even with a convergent optimizer, we struggled with overhead from running our regressors. For example, in order to make the overhead on recursive calls in our Python implementation net-positive, we had to perform micro-optimizations such as swapping out Scikit-learn's \texttt{LinearRegression} \cite{sklearn} class's inference methods for our own.
\section{Experiments}
In order to develop and test Lhyra, we built implementations in both Python and C++. We will show here results from C++ benchmarks, as we found relative performance measurement to be difficult in Python due to overhead such as garbage collection.
\subsection{Closest Pair of Points}
Consider the \textsc{Closest Pair} problem, in which given a set of $n$ points $P$ you must identify a pair of unique points $(p_i,p_j)$ to minimize distance $||p_i - p_j||_2$. This is a common problem, but not quite common enough for hyper-optimized hand-coded adaptive implementations to exist. However, there are two common algorithms for solving this problem.
\begin{enumerate}
\item \textbf{Brute force.} In $O(n^2)$ time, exhaustively enumerate all possible pairs of points, and choose the one with the smallest distance.
\item \textbf{Divide \& conquer.} Partition the points by the median $x$ coordinate, and recurse on the points to the left and right of the dividing line, $P_l$ and $P_r$ respectively. Then, for each point within $\min(d(P_r),d(P_r))$ of the dividing line, enumerate all possible pairs to see if one has a shorter distance than $d(P_r)$ or $d(P_l)$, where $d$ is the smallest pair distance in that set. It can be shown geometrically that the number of pairs within this distance of the dividing line is $O(n)$ \cite{clrs}, so the algorithm overall is $O(n\log n)$.
\end{enumerate}
We implemented a Lhyra instance with the above two solvers, using our linear regressor-based optimizer and extracting set size $n$ as our only feature. With only seconds of training, Lhyra was able to substantially outperform both algorithms for nearly all $n$ by learning to use the divide-and-conquer method at high $n$, and the brute-force method at lower $n$.
\begin{figure}[!h]
\centering
\begin{floatrow}
\ffigbox{
\caption{\textsc{Closest Pair} training curve}
}{
\includegraphics[width=8cm]{images/training_times_points}
}
\ffigbox{
\caption{\textsc{Closest Pair} relative performance}
}{%
\includegraphics[width=8cm]{images/relative_plot_points}
}
\end{floatrow}
\end{figure}
\subsection{List Sorting}
A more complex recursive problem is \textsc{Sorting}, producing an ordered list from a potentially unordered one. We chose the following common sorting methods for our bag of solvers: Insertion Sort ($O(n^2)$), Merge Sort ($O(n \log n)$), and Quicksort (also $O(n \log n)$). Due to their ubiquity, we will not review how these algorithms work.
We implemented a Lhyra instance with the three solvers, the same optimizer as above, and two features: the list length $n$, and sortedness $n*s$, calculated in $O(1)$ time by randomly sampling 10 elements from the list and checking their orderedness $s$, then multiplying by $n$ to normalize to our other feature. Below are results from this instance using purely-random lists, showing that Lhyra dramatically outperforms the fastest algorithm Quicksort at large $n$, and matches Insertion Sort at small $n$.
\begin{figure}[!ht]
\centering
\begin{floatrow}
\ffigbox{
\caption{Random \textsc{Sorting} training curve}
}{
\includegraphics[width=8cm]{images/training_times_unsorted_lists}
}
\ffigbox{
\caption{Random \textsc{Sorting} relative performance}
}{%
\includegraphics[width=8cm]{images/relative_plot_unsorted_lists}
}
\end{floatrow}
\end{figure}
However, Lhyra can perform even better with interesting and changing data distributions. Take the following example, where lists are 80\% nearly sorted.
\begin{figure}[!ht]
\centering
\begin{floatrow}
\ffigbox{
\caption{Partially-sorted \textsc{Sorting} training curve}
}{
\includegraphics[width=8cm]{images/training_times_mostly_sorted_lists}
}
\ffigbox{
\caption{Partially-sorted \textsc{Sorting} relative performance}
}{%
\includegraphics[width=8cm]{images/relative_plot_mostly_sorted_lists}
}
\end{floatrow}
\end{figure}
Lhyra learns that our implementation of Insertion Sort is $O(n)$ on sorted lists, and dynamically exploits that knowledge to widen its performance gap against other solvers.
\section{Discussion}
% parametrizing solvers: random variables, etc
% learned feature extraction
One potential future optimization is pruning the \textit{Optimizer}. Shrinking the network's size would save time with each prediction, perhaps resulting in a significant speedup over many recursive calls. This process would also inform us of useless features. If any edges attached to the input are deleted from the network during pruning, then we can automatically remove the corresponding feature from the \textit{Feature Extractor}, resulting in additional performance gains.
A natural extension of Lhyra is introducing learned feature extraction. This process may involve combining the \textit{Feature Extractor} and \textit{Optimizer} components. Instead of passing in a feature vector to the \textit{Optimizer}, it receives raw data as input. For example, if operating on a graph, we could make use of Message Passage Neural Networks, which have recently been shown to dramatically improve performance over traditional features on tasks like molecular property prediction \cite{GilmerSRVD17}. Clearly, this process would require more computation, so this method would likely only be worthwhile on tasks where cost is lessened dramatically by good solver selection.
An application of Lhyra which we have not yet explored is online learning in production environments. If the data distribution changes over time, Lhyra could continuously use evaluation data to train its solver picker. This is actually doable for both \textsc{Sorting} and \textsc{Closest Pair}, as training for the whole dataset occurs in seconds, so miniature iterative updates could easily be done in production while remaining net-positive.
We would also like to investigate Lhyra's effectiveness at optimizing criteria other than time. One option would be to explore polynomial time approximation algorithms to NP-hard problems. In this scenario, cost could be a weighted function of evaluation time and accuracy, depending on the priorities of the user. It is also likely that for these more difficult problems, the benefits of Lhyra could be far greater in magnitude than they are in \textsc{Sorting} and \textsc{Closest Pair}, because optimization for time on already-fast problems constrains the types of features and complexity of optimizer that Lhyra can use. For problems relating to protein-folding, for example, Lhyra could use more complex features, models, and solvers, and operate on more diverse data distributions, and possibly yield big gains for a problem which is useful and hard.
In conclusion, we have designed and implemented a general framework for learned optimization of recursive functions. We have shown that Lhyra can learn models for simple problems whih provide substantial improvements over individual algorithms, and also demonstrated the generality and extensibiity of our approach, with an eye towards future work. We believe Lhyra constitutes a useful development in the field of learning-augmented algorithms.
\bibliographystyle{unsrt}
\bibliography{ref}
\appendix
\section{Code and Reproducibility}
Our research codebase is available under the MIT license at the below Github repository. Our code structure is described in the file \texttt{README.md}.
\begin{center}
\url{https://github.com/joshuagruenstein/lhyra}
\end{center}
In order to maximize reproducibility, we bundled our codebase into a Docker container, and set up Continuous Integration through Travis CI to auto-generate plots whenever we push to our Github repository. Thus, we have a transparent, reproducible pipeline for obtaining results with Lhyra. You can check out the figures generated by our most recent commit by going to the following link, and navigating to the \texttt{i.imgur.com} links at the bottom of the console output.
\begin{center}
\url{https://travis-ci.org/joshuagruenstein/lhyra}
\end{center}
\end{document}
|
{"hexsha": "5bf445ccb25ff68e95bdb8f6439b5e9e7a2ef486", "size": 17720, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/main.tex", "max_stars_repo_name": "joshuagruenstein/lhyra", "max_stars_repo_head_hexsha": "a8798ff92c44c3188525cc59b21f9525db68b4a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/main.tex", "max_issues_repo_name": "joshuagruenstein/lhyra", "max_issues_repo_head_hexsha": "a8798ff92c44c3188525cc59b21f9525db68b4a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/main.tex", "max_forks_repo_name": "joshuagruenstein/lhyra", "max_forks_repo_head_hexsha": "a8798ff92c44c3188525cc59b21f9525db68b4a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 90.4081632653, "max_line_length": 924, "alphanum_fraction": 0.7832957111, "num_tokens": 3883}
|
function pow(a,b)
k = b
t = 1
p = a
while k > 0
if k%2 == 0
k ÷= 2
p *= p
else
k -= 1
t *= p
end
end
return t
end
|
{"hexsha": "92ba64f5b5b23aead20ac14612b51769e1d7cd2b", "size": 215, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "task_7_1.jl", "max_stars_repo_name": "Litger45/julia-algorithms-2", "max_stars_repo_head_hexsha": "ff8f650b314cc920e0d35238509a0838d4b0a5ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "task_7_1.jl", "max_issues_repo_name": "Litger45/julia-algorithms-2", "max_issues_repo_head_hexsha": "ff8f650b314cc920e0d35238509a0838d4b0a5ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "task_7_1.jl", "max_forks_repo_name": "Litger45/julia-algorithms-2", "max_forks_repo_head_hexsha": "ff8f650b314cc920e0d35238509a0838d4b0a5ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.3333333333, "max_line_length": 20, "alphanum_fraction": 0.2744186047, "num_tokens": 75}
|
#!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle
from magLabUtilities.datafileutilities.timeDomain import importFromXlsx
from magLabUtilities.signalutilities.interpolation import Legendre, nearestPoint
from magLabUtilities.signalutilities.hysteresis import HysteresisSignalBundle, XExpOfHGedney071720
from magLabUtilities.optimizerutilities.costFunctions import rmsNdNorm
from magLabUtilities.optimizerutilities.parameterSpaces import GridNode
from magLabUtilities.optimizerutilities.gradientDescent import GradientDescent
from magLabUtilities.signalutilities.calculus import finiteDiffDerivative, integralIndexQuadrature
from magLabUtilities.uiutilities.plotting.hysteresis import MofHXofMPlotter
from datetime import datetime
import json
class CostEvaluator:
def __init__(self, dataFP, tuneHistoryFP):
# Import data
self.fp = dataFP
self.refBundle = HysteresisSignalBundle(importFromXlsx(self.fp, '9.4k', 1, 'A,B', dataColumnNames=['H','M']))
# Re-parameterize data by arc length
refBundleArray = np.vstack([self.refBundle.signals['H'].dependentThread.data, self.refBundle.signals['H'].independentThread.data, self.refBundle.signals['M'].independentThread.data])
refBundleArray = SignalBundle.arcLengthND(refBundleArray, totalArcLength=5.0)
self.refBundle = HysteresisSignalBundle.fromSignalBundleArray(refBundleArray, ['H', 'M'])
# Re-sample data for more even arc length
interpolator = Legendre(interpRadius=100, legendreOrder=3)
tThread = SignalThread(np.linspace(0.0, 5.0, 1000))
self.refBundle.signals['M'] = interpolator.interpolate(self.refBundle.signals['M'], tThread)
self.refBundle.signals['H'] = interpolator.interpolate(self.refBundle.signals['H'], tThread)
# Find indices of reversals
self.pMAmpIndex = np.argmax(self.refBundle.signals['M'].independentThread.data[0:int(self.refBundle.signals['M'].independentThread.data.shape[0]/2)])
self.nMAmpIndex = np.argmin(self.refBundle.signals['M'].independentThread.data)
# Take the derivative of the data
xThread = SignalThread(finiteDiffDerivative( \
fNum=self.refBundle.signals['M'].independentThread.data, \
fDenom=self.refBundle.signals['H'].independentThread.data, \
windowRadius=1, \
discontinuousPoints=[self.pMAmpIndex, self.nMAmpIndex], \
differenceMode='centralDifference'))
self.refBundle.addSignal('X', Signal.fromThreadPair(xThread, self.refBundle.signals['M'].dependentThread))
self.tuneHistoryFP = tuneHistoryFP
self.plotter = MofHXofMPlotter()
self.plotter.addMofHPlot(self.refBundle, 'Data')
self.plotter.addXofMPlot(self.refBundle, 'Data')
def runCostFunction(self, gridNode:GridNode) -> GridNode:
# xInit:float, hCoercive:float, hNuc:float, mNuc:float, mSat:float, hCoop:float, hAnh:float
xInit = gridNode.coordList[0]
hCoercive = gridNode.coordList[1]
hNuc = gridNode.coordList[2]
mNuc = gridNode.coordList[3]
mSat = gridNode.coordList[4]
hCoop = gridNode.coordList[5]
hAnh = gridNode.coordList[6]
# Configure input H-threads
virginH = Signal.fromThreadPair(SignalThread(self.refBundle.signals['H'].independentThread.data[0:self.pMAmpIndex]), SignalThread(self.refBundle.signals['H'].dependentThread.data[0:self.pMAmpIndex]))
pRevH = Signal.fromThreadPair(SignalThread(self.refBundle.signals['H'].independentThread.data[self.pMAmpIndex:self.nMAmpIndex]), SignalThread(self.refBundle.signals['H'].dependentThread.data[self.pMAmpIndex:self.nMAmpIndex]))
nRevH = Signal.fromThreadPair(SignalThread(self.refBundle.signals['H'].independentThread.data[self.nMAmpIndex:]), SignalThread(self.refBundle.signals['H'].dependentThread.data[self.nMAmpIndex:]))
# Configure Xexp generator
xExpGen = XExpOfHGedney071720(xInit=xInit, hCoercive=hCoercive, hNuc=hNuc, mNuc=mNuc, mSat=mSat, hCoop=hCoop, hAnh=hAnh)
# Evaluate Xexp along loop
hRev = np.amin(self.refBundle.signals['H'].independentThread.data)
mRev = np.amin(self.refBundle.signals['M'].independentThread.data)
virginX = xExpGen.evaluate(hSignal=virginH, hRev=0.0, mRev=0.0, curveRegion='virgin')
hRev = np.amax(virginX.signals['H'].independentThread.data)
mRev = np.amax(virginX.signals['M'].independentThread.data)
pRevX = xExpGen.evaluate(hSignal=pRevH, hRev=hRev, mRev=mRev, curveRegion='reversal')
hRev = np.amin(pRevX.signals['H'].independentThread.data)
mRev = np.amin(pRevX.signals['M'].independentThread.data)
nRevX = xExpGen.evaluate(hSignal=nRevH, hRev=hRev, mRev=mRev, curveRegion='reversal')
# Compile curve regions into one signalBundle
testBundle = HysteresisSignalBundle.fromSignalBundleSequence([virginX, pRevX, nRevX])
refMatrix = self.refBundle.sample(tThread=self.refBundle.signals['H'].dependentThread, signalInterpList=[('M',nearestPoint),('H',nearestPoint)])
testMatrix = testBundle.sample(tThread=self.refBundle.signals['H'].dependentThread, signalInterpList=[('M',nearestPoint),('H',nearestPoint)])
tWeightMatrix = np.vstack([self.refBundle.signals['H'].dependentThread.data, np.hstack([np.ones(200), np.ones(800)])])
gridNode.loss = rmsNdNorm(refMatrix, testMatrix, tWeightMatrix)
gridNode.data = testBundle
return gridNode
def gradientStep(self, newCenterGridNode):
self.plotter.addMofHPlot(newCenterGridNode.data, 'Model')
self.plotter.addXofMPlot(newCenterGridNode.data, 'Model')
self.plotter.addXRevofMPlot(newCenterGridNode.data, 'Xrev')
# with open(tuneHistoryFP, 'a') as tuneHistoryFile:
# tuneHistoryFile.write(str(datetime.fromtimestamp(datetime.timestamp(datetime.now()))) + '\n')
# tuneHistoryFile.write(str(newCenterGridNode.coordList) + '\n')
# tuneHistoryFile.write('Error: %s\n' % str(newCenterGridNode.loss))
# tuneHistoryFile.write(json.dumps(newCenterGridNode.data) + '\n')
print(newCenterGridNode.loss)
print('Switching to node: %s' % str(newCenterGridNode.coordList))
if __name__ == '__main__':
# xInit = gridNode.coordList[0]
# hCoercive = gridNode.coordList[1]
# hNuc = gridNode.coordList[2]
# mNuc = gridNode.coordList[3]
# mSat = gridNode.coordList[4]
# hCoop = gridNode.coordList[5]
# hAnh = gridNode.coordList[6]
parameterList = [
{ 'name':'xInit',
'initialValue':69.0,
'stepSize':3,
'testGridLocalIndices':[0]
# 'testGridLocalIndices':[-1,0,1]
},
{ 'name':'hCoercive',
'initialValue':680.0,
'stepSize':25.0,
'testGridLocalIndices':[0]
# 'testGridLocalIndices':[-1,0,1]
},
{ 'name':'hNuc',
'initialValue':11974.0,
'stepSize':200,
# 'testGridLocalIndices':[0]
'testGridLocalIndices':[-1,0,1]
},
{ 'name':'mNuc',
'initialValue':1.5221e6,
'stepSize':0.03e6,
# 'testGridLocalIndices':[0]
'testGridLocalIndices':[-1,0,1]
},
{ 'name':'mSat',
'initialValue':1.66e6,
'stepSize':0.01e6,
'testGridLocalIndices':[0]
# 'testGridLocalIndices':[-1,0,1]
},
{
'name':'hCoop',
'initialValue':660.0,
'stepSize':50.0,
# 'testGridLocalIndices':[0]
'testGridLocalIndices':[-1,0,1]
},
{
'name':'hAnh',
'initialValue':4300.0,
'stepSize':50.0,
# 'testGridLocalIndices':[0]
'testGridLocalIndices':[-1,0,1]
}
]
fp = './tests/workflowTests/datafiles/CarlData.xlsx'
tuneHistoryFP = './tests/workflowTests/datafiles/tuneHistory01.txt'
costEvaluator = CostEvaluator(fp, tuneHistoryFP)
tuner = GradientDescent(parameterList, costEvaluator.runCostFunction, costEvaluator.gradientStep)
tuner.tune(numIterations=np.infty, maxThreads=8)
print('done')
|
{"hexsha": "935d8a5d2c4d6751a50750f11897ed5499f6daa1", "size": 9198, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/workflowTests/fit_XExpGedney071720.py", "max_stars_repo_name": "MarkTravers/magLabUtilities", "max_stars_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/workflowTests/fit_XExpGedney071720.py", "max_issues_repo_name": "MarkTravers/magLabUtilities", "max_issues_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/workflowTests/fit_XExpGedney071720.py", "max_forks_repo_name": "MarkTravers/magLabUtilities", "max_forks_repo_head_hexsha": "e116c8cb627cd82c3b8ba651dd6979b66e568632", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.7777777778, "max_line_length": 234, "alphanum_fraction": 0.6116547075, "include": true, "reason": "import numpy", "num_tokens": 2213}
|
module Types where
import Level
open import Data.Unit as Unit renaming (tt to ∗)
open import Data.List as List
open import Data.Product
open import Categories.Category using (Category)
open import Function
open import Relation.Binary.PropositionalEquality as PE hiding ([_]; subst)
open import Relation.Binary using (module IsEquivalence; Setoid; module Setoid)
open ≡-Reasoning
open import Common.Context as Context
-- open import Categories.Object.BinaryCoproducts ctx-cat
-- Codes
mutual
data TermCtxCode : Set where
emptyC : TermCtxCode
cCtxC : (γ : TermCtxCode) → TypeCode γ → TermCtxCode
TyCtxCode : Set
data TypeCode (δ : TyCtxCode) (γ : TermCtxCode) : Set where
closeAppTyC : TypeCode δ γ
data TyFormerCode (γ : TermCtxCode) : Set where
univ : TyFormerCode γ
abs : (A : TypeCode γ) → (TyFormerCode (cCtxC γ A)) → TyFormerCode γ
TyCtxCode = Ctx (Σ TermCtxCode TyFormerCode)
TyVarCode : TyCtxCode → {γ : TermCtxCode} → TyFormerCode γ → Set
TyVarCode δ {γ} T = Var δ (γ , T)
emptyTy : TyCtxCode
emptyTy = []
{-
ctxTyFormer : (γ : TermCtxCode) → TyFormerCode γ → TyFormerCode emptyC
ctxTyFormer = ?
-}
data AppTypeCode (δ : TyCtxCode) (γ : TermCtxCode) : Set where
varC : (T : TyFormerCode γ) → (x : TyVarCode δ T) → AppTypeCode δ γ
appTyC : (T : TyFormerCode γ) → AppTypeCode δ γ T
μC : (γ₁ : TermCtxCode) → (t : TypeCode (ctxTyFormer γ univ ∷ δ) γ₁)
→ AppTypeCode δ γ
{- (T : TyFormerCode) → (A : TypeCode δ γ univ)
→ (B : TypeCode δ γ (cCtxC γ A T)) → (t : TermCode γ A)
→ Type Δ Γ (subst B t) -}
{-
-- Just one constructor/destructor for now
μ : (Γ Γ₁ : TermCtx) → (t : Type (ctxTyFormer Γ univ ∷ Δ) Γ₁ univ)
→ Type Δ Γ (ctxTyFormer Γ univ)
ν : (Γ Γ₁ : TermCtx) → (t : Type (ctxTyFormer Γ univ ∷ Δ) Γ₁ univ)
→ Type Δ Γ (ctxTyFormer Γ univ)
-}
{-
mutual
data TermCtx : Set where
empty : TermCtx
cCtx : (Γ : TermCtx) → TypeCode Γ → TermCtx
data TypeCode (Γ : TermCtx) : Set where
appTy : TypeCode Γ
Type : (Γ : TermCtx) → TypeCode Γ → Set
data Term : (Γ : TermCtx) → TypeCode Γ → Set where
data TyFormer (Γ : TermCtx) : Set where
univ : TyFormer Γ
abs : (A : TypeCode Γ) → (TyFormer (cCtx Γ A)) → TyFormer Γ
subst : {Γ : TermCtx} → {A : TypeCode Γ}
→ TyFormer (cCtx Γ A) → Term Γ A → TyFormer Γ
subst = {!!}
Type Γ appTy = Σ (TypeCode Γ) (λ A →
Σ (AppType emptyTy Γ (abs A univ)) (λ B →
Term Γ A))
ctxTyFormer : (Γ : TermCtx) → TyFormer Γ → TyFormer
ctxTyFormer empty T = T
ctxTyFormer (cCtx Γ A) T = ctxTyFormer Γ (abs Γ A)
TyCtx : Set
TyCtx = Ctx (Σ TermCtx TyFormer)
TyVar : TyCtx → {Γ : TermCtx} → TyFormer Γ → Set
TyVar Δ {Γ} T = Var Δ (Γ , T)
emptyTy : TyCtx
emptyTy = []
-- | Type syntax
data AppType (Δ : TyCtx) : (Γ : TermCtx) → TyFormer Γ → Set where
var : (Γ : TermCtx) → (T : TyFormer Γ) → (x : TyVar Δ T) → AppType Δ Γ T
appTy : (Γ : TermCtx) → (T : TyFormer) → (A : Type Δ Γ univ)
→ (B : Type Δ Γ (cCtx Γ A T)) → (t : Term Γ)
→ Type Δ Γ (subst B t)
-- Just one constructor/destructor for now
μ : (Γ Γ₁ : TermCtx) → (t : Type (ctxTyFormer Γ univ ∷ Δ) Γ₁ univ)
→ Type Δ Γ (ctxTyFormer Γ univ)
ν : (Γ Γ₁ : TermCtx) → (t : Type (ctxTyFormer Γ univ ∷ Δ) Γ₁ univ)
→ Type Δ Γ (ctxTyFormer Γ univ)
-}
{-
succ' : ∀{Δ} (x : TyVar Δ) → TyVar (∗ ∷ Δ)
succ' = Context.succ ∗
-}
{-
-- | Congruence for types
data _≅T_ {Γ Γ' : Ctx} : Type Γ → Type Γ' → Set where
unit : unit ≅T unit
var : ∀{x : TyVar Γ} {x' : TyVar Γ'} → (x ≅V x') → var x ≅T var x'
_⊕_ : ∀{t₁ t₂ : Type Γ} {t₁' t₂' : Type Γ'} →
(t₁ ≅T t₁') → (t₂ ≅T t₂') →
(t₁ ⊕ t₂) ≅T (t₁' ⊕ t₂')
_⊗_ : ∀{t₁ t₂ : Type Γ} {t₁' t₂' : Type Γ'} →
(t₁ ≅T t₁') → (t₂ ≅T t₂') →
(t₁ ⊗ t₂) ≅T (t₁' ⊗ t₂')
μ : ∀{t : Type (∗ ∷ Γ)} {t' : Type (∗ ∷ Γ')} →
(t ≅T t') →
(μ t) ≅T (μ t')
_⇒_ : ∀{t₁ t₁' : Type []} {t₂ : Type Γ} {t₂' : Type Γ'} →
(t₁ ≅T t₁') → (t₂ ≅T t₂') →
(t₁ ⇒ t₂) ≅T (t₁' ⇒ t₂')
ν : ∀{t : Type (∗ ∷ Γ)} {t' : Type (∗ ∷ Γ')} →
(t ≅T t') →
(ν t) ≅T (ν t')
Trefl : ∀ {Γ : Ctx} {t : Type Γ} → t ≅T t
Trefl {t = unit} = unit
Trefl {t = var x} = var e.refl
where
module s = Setoid
module e = IsEquivalence (s.isEquivalence ≅V-setoid)
Trefl {t = t₁ ⊕ t₂} = Trefl ⊕ Trefl
Trefl {t = μ t} = μ Trefl
Trefl {t = t ⊗ t₁} = Trefl ⊗ Trefl
Trefl {t = t ⇒ t₁} = Trefl ⇒ Trefl
Trefl {t = ν t} = ν Trefl
Tsym : ∀ {Γ Γ' : Ctx} {t : Type Γ} {t' : Type Γ'} → t ≅T t' → t' ≅T t
Tsym unit = unit
Tsym (var u) = var (Vsym u)
Tsym (u₁ ⊕ u₂) = Tsym u₁ ⊕ Tsym u₂
Tsym (u₁ ⊗ u₂) = Tsym u₁ ⊗ Tsym u₂
Tsym (μ u) = μ (Tsym u)
Tsym (u₁ ⇒ u₂) = Tsym u₁ ⇒ Tsym u₂
Tsym (ν u) = ν (Tsym u)
Ttrans : ∀ {Γ₁ Γ₂ Γ₃ : Ctx} {t₁ : Type Γ₁} {t₂ : Type Γ₂} {t₃ : Type Γ₃} →
t₁ ≅T t₂ → t₂ ≅T t₃ → t₁ ≅T t₃
Ttrans unit unit = unit
Ttrans (var u₁) (var u₂) = var (Vtrans u₁ u₂)
Ttrans (u₁ ⊕ u₂) (u₃ ⊕ u₄) = Ttrans u₁ u₃ ⊕ Ttrans u₂ u₄
Ttrans (u₁ ⊗ u₂) (u₃ ⊗ u₄) = Ttrans u₁ u₃ ⊗ Ttrans u₂ u₄
Ttrans (μ u₁) (μ u₂) = μ (Ttrans u₁ u₂)
Ttrans (u₁ ⇒ u₂) (u₃ ⇒ u₄) = Ttrans u₁ u₃ ⇒ Ttrans u₂ u₄
Ttrans (ν u₁) (ν u₂) = ν (Ttrans u₁ u₂)
≡→≅T : ∀ {Γ : Ctx} {t₁ t₂ : Type Γ} →
t₁ ≡ t₂ → t₁ ≅T t₂
≡→≅T {Γ} {t₁} {.t₁} refl = Trefl
-- Note: makes the equality homogeneous in Γ
≅T-setoid : ∀ {Γ} → Setoid _ _
≅T-setoid {Γ} = record
{ Carrier = Type Γ
; _≈_ = _≅T_
; isEquivalence = record
{ refl = Trefl ; sym = Tsym ; trans = Ttrans }
}
-- | Ground type
GType = Type []
unit′ : GType
unit′ = unit
-- | Variable renaming in types
rename : {Γ Δ : TyCtx} → (ρ : Γ ▹ Δ) → Type Γ → Type Δ
rename ρ unit = unit
rename ρ (var x) = var (ρ ∗ x)
rename ρ (t₁ ⊕ t₂) = rename ρ t₁ ⊕ rename ρ t₂
rename {Γ} {Δ} ρ (μ t) = μ (rename ρ' t)
where
ρ' : (∗ ∷ Γ) ▹ (∗ ∷ Δ)
ρ' = ctx-id {[ ∗ ]} ⧻ ρ
rename ρ (t₁ ⊗ t₂) = rename ρ t₁ ⊗ rename ρ t₂
rename ρ (t₁ ⇒ t₂) = t₁ ⇒ rename ρ t₂
rename {Γ} {Δ} ρ (ν t) = ν (rename ρ' t)
where
ρ' : (∗ ∷ Γ) ▹ (∗ ∷ Δ)
ρ' = ctx-id {[ ∗ ]} ⧻ ρ
-------------------------
---- Generating structure on contexts (derived from renaming)
weaken : {Γ : TyCtx} (Δ : TyCtx) → Type Γ -> Type (Δ ∐ Γ)
weaken {Γ} Δ = rename {Γ} {Δ ∐ Γ} (i₂ {Δ} {Γ})
exchange : (Γ Δ : TyCtx) → Type (Γ ∐ Δ) -> Type (Δ ∐ Γ)
exchange Γ Δ = rename [ i₂ {Δ} {Γ} , i₁ {Δ} {Γ} ]
contract : {Γ : TyCtx} → Type (Γ ∐ Γ) -> Type Γ
contract = rename [ ctx-id , ctx-id ]
-- weaken-id-empty-ctx : (Δ : TyCtx) (t : GType) → weaken {[]} Δ t ≡ t
-- weaken-id-empty-ctx = ?
Subst : TyCtx → TyCtx → Set
Subst Γ Δ = TyVar Γ → Type Δ
id-subst : ∀{Γ : TyCtx} → Subst Γ Γ
id-subst x = var x
update : ∀{Γ Δ : TyCtx} → Subst Γ Δ → Type Δ → (Subst (∗ ∷ Γ) Δ)
update σ a zero = a
update σ _ (succ′ _ x) = σ x
single-subst : ∀{Γ : TyCtx} → Type Γ → (Subst (∗ ∷ Γ) Γ)
single-subst a zero = a
single-subst _ (succ′ _ x) = var x
lift : ∀{Γ Δ} → Subst Γ Δ → Subst (∗ ∷ Γ) (∗ ∷ Δ)
lift σ = update (weaken [ ∗ ] ∘ σ) (var zero)
-- | Simultaneous substitution
subst : {Γ Δ : TyCtx} → (σ : Subst Γ Δ) → Type Γ → Type Δ
subst σ unit = unit
subst σ (var x) = σ x
subst σ (t₁ ⊕ t₂) = subst σ t₁ ⊕ subst σ t₂
subst {Γ} {Δ} σ (μ t) = μ (subst (lift σ) t)
subst σ (t₁ ⊗ t₂) = subst σ t₁ ⊗ subst σ t₂
subst σ (t₁ ⇒ t₂) = t₁ ⇒ subst σ t₂
subst {Γ} {Δ} σ (ν t) = ν (subst (lift σ) t)
subst₀ : {Γ : TyCtx} → Type Γ → Type (∗ ∷ Γ) → Type Γ
subst₀ {Γ} a = subst (update id-subst a)
rename′ : {Γ Δ : TyCtx} → (ρ : Γ ▹ Δ) → Type Γ → Type Δ
rename′ ρ = subst (var ∘ (ρ ∗))
-- | Unfold lfp
unfold-μ : (Type [ ∗ ]) → GType
unfold-μ a = subst₀ (μ a) a
-- | Unfold gfp
unfold-ν : (Type [ ∗ ]) → GType
unfold-ν a = subst₀ (ν a) a
--------------------------------------------------
---- Examples
Nat : Type []
Nat = μ (unit ⊕ x)
where x = var zero
Str-Fun : {Γ : TyCtx} → Type Γ → Type (∗ ∷ Γ)
Str-Fun a = (weaken [ ∗ ] a ⊗ x)
where x = var zero
Str : {Γ : TyCtx} → Type Γ → Type Γ
Str a = ν (Str-Fun a)
lemma : ∀ {Γ : Ctx} {a b : Type Γ} {σ : Subst Γ Γ} →
subst (update σ b) (weaken [ ∗ ] a) ≅T subst σ a
lemma {a = unit} = unit
lemma {a = var x} = Trefl
lemma {a = a₁ ⊕ a₂} = lemma {a = a₁} ⊕ lemma {a = a₂}
lemma {a = μ a} = μ {!!}
lemma {a = a₁ ⊗ a₂} = lemma {a = a₁} ⊗ lemma {a = a₂}
lemma {a = a₁ ⇒ a₂} = Trefl ⇒ lemma {a = a₂}
lemma {a = ν a} = ν {!!}
lift-id-is-id-ext : ∀ {Γ : Ctx} (x : TyVar (∗ ∷ Γ)) →
(lift (id-subst {Γ})) x ≡ id-subst x
lift-id-is-id-ext zero = refl
lift-id-is-id-ext (succ′ ∗ x) = refl
lift-id-is-id : ∀ {Γ : Ctx} → lift (id-subst {Γ}) ≡ id-subst
lift-id-is-id = η-≡ lift-id-is-id-ext
id-subst-id : ∀ {Γ : Ctx} {a : Type Γ} →
subst id-subst a ≅T a
id-subst-id {a = unit} = unit
id-subst-id {a = var x} = var Vrefl
id-subst-id {a = a ⊕ a₁} = id-subst-id ⊕ id-subst-id
id-subst-id {a = μ a} =
μ (Ttrans (≡→≅T (cong (λ u → subst u a) lift-id-is-id)) id-subst-id)
id-subst-id {a = a ⊗ a₁} = id-subst-id ⊗ id-subst-id
id-subst-id {a = a ⇒ a₁} = Trefl ⇒ id-subst-id
id-subst-id {a = ν a} =
ν (Ttrans (≡→≅T (cong (λ u → subst u a) lift-id-is-id)) id-subst-id)
lemma₂ : ∀ {Γ : Ctx} {a b : Type Γ} →
subst (update id-subst b) (weaken [ ∗ ] a) ≅T a
lemma₂ {Γ} {a} {b} = Ttrans (lemma {Γ} {a} {b} {σ = id-subst}) id-subst-id
unfold-str : ∀{a : Type []} → (unfold-ν (Str-Fun a)) ≅T (a ⊗ Str a)
unfold-str {a} = lemma₂ ⊗ Trefl
LFair : {Γ : TyCtx} → Type Γ → Type Γ → Type Γ
LFair a b = ν (μ ((w a ⊗ x) ⊕ (w b ⊗ y)))
where
x = var zero
y = var (succ zero)
Δ = ∗ ∷ [ ∗ ]
w = weaken Δ
-}
|
{"hexsha": "3d9d0de31464a2482b9e2b691f17ab084ad8c6d1", "size": 9854, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "TypeTheory/FibDataTypes/Types.agda", "max_stars_repo_name": "hbasold/Sandbox", "max_stars_repo_head_hexsha": "8fc7a6cd878f37f9595124ee8dea62258da28aa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TypeTheory/FibDataTypes/Types.agda", "max_issues_repo_name": "hbasold/Sandbox", "max_issues_repo_head_hexsha": "8fc7a6cd878f37f9595124ee8dea62258da28aa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TypeTheory/FibDataTypes/Types.agda", "max_forks_repo_name": "hbasold/Sandbox", "max_forks_repo_head_hexsha": "8fc7a6cd878f37f9595124ee8dea62258da28aa4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.79375, "max_line_length": 79, "alphanum_fraction": 0.5254718896, "num_tokens": 4223}
|
module Model
!*******************************************************************************
!
! This contains the five main subroutines of UVAFME:
!
! BioGeoClimate: computes daily and yearly site- and plot-level weather and
! soil dynamics
!
! Canopy: computes the plot-level LAI and light availability
!
! Growth: computes annual tree growth and branch thinning
!
! Mortality: determines which trees die and adds their components to the
! soil
!
! Renewal updates the seed and seedling banks for each species and
! regenerates new trees
!
!*******************************************************************************
use Parameters
use Constants
use Soil
use Site
use Species
use Tree
use Random
use Climate
use Input
implicit none
contains
!:.........................................................................:
subroutine BioGeoClimate(site, year)
!
! Computes daily weather data and annaul sums of weather and soil
! characteristics
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 05/01/05 Y. Xiaodong Original Code
! 01/01/12 K. Holcomb Updated to OOP structure
! 10/10/16 A. C. Foster Updated for soil/plot overhaul
! and permafrost updates
! 05/01/17 A. C. Foster Updated for moss and nutrient
! updates
!
! Data dictionary: constants
real, parameter :: MIN_GROW_TEMP = 5.0 ! Minimum temperature for growing season (degC)
real, parameter :: MAX_DRY_PARM = 1.0001 ! Threshold for below wilting point
real, parameter :: DRY_THRESH = 0.8 ! Threshold for droughty conditions
real, parameter :: MIN_FLOOD_PARM = 0.8 ! Threshold for flooded conditions
integer, parameter :: N_MEM = 3 ! Number of days to calculate temperature "memory"
! Last Julian Day in each month
integer, dimension(12), parameter :: MODAYS = [31, 59, 90, 120, 151, &
181, 212, 243, 273, 304, 334, 365]
! Data dictionary: calling arguments
integer, intent(in) :: year ! Year of simulation
type(SiteData), intent(inout) :: site ! Site object
! Data dictionary: local variables
real, dimension(NTEMPS, 2) :: tdd ! Thawing degree-days (>0degC)
real, dimension(NTEMPS, 2) :: fdd ! Freezing degree-days (<0degC)
real, dimension(NTEMPS) :: tmin ! Monthly minimum temperature (degC)
real, dimension(NTEMPS) :: tmax ! Monthly maximum temperature (degC)
real, dimension(NTEMPS) :: prcp ! Monthly precipitation (cm)
real, dimension(NTEMPS) :: tmean ! Monthly average temperature (deg)
real, dimension(NTEMPS) :: cld ! Monthly cloudiness (tenths of sky covered)
real, dimension(NTEMPS) :: rh ! Monthly relative humidity (%)
real, dimension(NTEMPS) :: wind ! Monthly wind speed
real, dimension(NTEMPS) :: strikes ! Monthly lightning (strikes/km2/day)
real, dimension(NTEMPS) :: tmptmin ! Temporary variable for calculating actual tmin (degC)
real, dimension(NTEMPS) :: tmptmax ! Temporary variable for calculating actual tmax (degC)
real, dimension(NTEMPS) :: tmpprec ! Temporary variable for calculating actual prcp (cm)
real, dimension(NTEMPS) :: tmpcld ! Temporary variable for calculating actual cld (tenths of sky)
real, dimension(DAYS_PER_YEAR) :: daytemp ! Daily temperature (degC)
real, dimension(DAYS_PER_YEAR) :: daytemp_min ! Daily minimum temperature (degC)
real, dimension(DAYS_PER_YEAR) :: daytemp_max ! Daily maximum temperature (degC)
real, dimension(DAYS_PER_YEAR) :: daycld ! Daily cloud cover (tenths of sky covered)
real, dimension(DAYS_PER_YEAR) :: dayprecip ! Daily precipitation (cm)
real, dimension(DAYS_PER_YEAR) :: sun ! Surface solar radiation (cal/cm2/day)
real, dimension(DAYS_PER_YEAR) :: st ! Horizontal surface solar radiation (cal/cm2/day)
real, dimension(DAYS_PER_YEAR) :: exrad ! Top of atmosphere solar radiation (cal/cm2/day)
real, dimension(DAYS_PER_YEAR) :: pot_ev_day ! Potential evapotranspiration (cm)
character(len = MAX_CHAR) :: message ! Error message
real :: rain ! Annual precipitation (cm)
real :: rain_n ! Annual N deposition (tN)
real :: temp_f ! Factor for creating temperature randomness
real :: prcp_f ! Factor for creating precipitation randomness
real :: cld_f ! Factor for creating cloud cover randomness
real :: temp_max ! Maximum temperature of warmest month (degC)
real :: temp_min ! Mininum temperature of warmest month (degC)
real :: daytemp_mem ! Average temperature over last N_MEM days
real :: tmean_max ! Maximum average temperature - for finding warmest month
real :: n_avail ! Plant-available nitrogen (tN/ha)
real :: pet ! Potential evapotranspiration (cm)
real :: e1 ! Saturation vapor presstion at tmin of warmest month
real :: e2 ! Saturation vapor presstion at tmax of warmest month
real :: aet ! Annual actual evapotranspiration (cm)
real :: aet_mm ! Annual actual evapotranspiration (mm)
real :: growdays ! Growing season length (days)
real :: soildays ! Soil degree-days (>0degC)
real :: flooddays ! Proportion of growing season with flooded conditions
real :: wpdays ! Proportion of growing season below wilting point
real :: drydays ! Proportion of growing season with drought conditions
real :: degday ! Growing degree-days (>5degC)
real :: outwater ! Runoff (cm)
real :: tot_sun ! Annual surface solar radiation (cal/cm2/day)
real :: tot_st ! Annual horizontal surface solar radiation (cal/cm2/day)
real :: cfs ! Ratio of surface:horizontal surface solar radiation
real :: act_ev_day ! Actual evapotranspiration (cm)
real :: tcum ! Cumulative thawing degree-days (>0degC)
real :: fcum ! Cumulative freezing degree-days (<0degC)
real :: amlt ! Last year's active layer depth (m)
real :: xmlt ! Thaw depth (m)
real :: xfrz ! Freezing depth (m)
real :: zh ! Soil layer depth
real :: alff ! Available light on the forest floor (0-1)
real :: pc_germ ! Effect of temperature on germination (0-1)
real :: aow0_ByMin ! Organic layer moisture scaled by wilting point
real :: saw0_ByFC ! Mineral layer moisture scaled by field capacity
real :: saw0_BySAT ! Mineral layer moisture scaled by saturation capacity
real :: saw0_ByWP ! Mineral layer moisture scaled by wilting point
real :: saw0_ByFC_sum ! Sum of mineral layer moisture scaled by field capacity
real :: aow0_ByMin_sum ! Sum of organic layer moisture scaled by wilting point
real :: saw0_BySAT_sum ! Sum of mineral layer moisture scaled by saturation capacity
real :: tmpstep1 ! Temporary variable for implementing linear climate change
real :: tmpstep2 ! Temporary variable for implementing linear climate change
real :: tmp ! Temporary variable for implementing linear climate change
integer :: gcm_year ! Year of climate change simulation
integer :: siteid ! Site ID
integer :: warmest_month ! Warmest month
integer :: hrise ! Hour of sunrise
integer :: i, j, m, ip ! Looping indices
integer :: l
! Initialize accumulators
rain = 0.0
rain_n = 0.0
tmean_max = RNVALID
! Set site ID - in case we need to warn user
siteid = site%site_id
! Check for and implement climate chnage
! The user is expected to input decr_by values as positive
if (linear_cc) then
! Using linear climate change
if (year .ge. site%gcm_year .and. year .le. &
(site%gcm_year + gcm_duration)) then
site%accum_tmin = site%accum_tmin + tmin_change
site%accum_tmax = site%accum_tmax + tmax_change
do m = 1, NTEMPS
tmpstep1 = site%precip(m) + site%accum_precip(m)
tmpstep2 = tmpstep1 * precip_change
site%accum_precip(m) = site%accum_precip(m) + tmpstep2
end do
endif
else if (use_gcm) then
! Using climate change from input file - figure out which year
! we are in the file
gcm_year = start_gcm + year - site%gcm_year
if (gcm_year .ge. start_gcm .and. gcm_year .le. end_gcm) then
! Read in climate change data
call read_gcm_climate(site%site_id, gcm_year, start_gcm, tmin, &
tmax, prcp)
if (site%site_id .ne. INVALID) then
! Update climate values
site%tmin = tmin
site%tmax = tmax
site%precip = prcp*MM_TO_CM
! Readjust for altitude if needed
if (adjust_altitude) then
call adjustForAltitude(site)
end if
else
! Problem reading in climate data - tell user, but don't
! update
write(message, '(A, I6, A)') "Bad climate data for site ", &
siteid, " - using historical climate."
call warning(message)
end if
endif
endif
! Generate current year's weather from distributions of input climate
! data
do i = 1, NTEMPS
if (linear_cc) then
! Adjust for linear climate change
tmptmin(i) = site%tmin(i) + site%accum_tmin
tmptmax(i) = site%tmax(i) + site%accum_tmax
tmpprec(i) = site%precip(i) + site%accum_precip(i)
tmpcld(i) = site%cld(i)
else
tmptmin(i) = site%tmin(i)
tmptmax(i) = site%tmax(i)
tmpprec(i) = site%precip(i)
tmpcld(i) = site%cld(i)
endif
! Calculate climate fluctuations
temp_f = clim_nrand(0.0, 1.0)
prcp_f = clim_nrand(0.0, 1.0)
cld_f = clim_nrand(0.0, 1.0)
! Adjust
prcp_f = max(-1.0, min(prcp_f, 1.0))
temp_f = max(-1.0, min(temp_f, 1.0))
cld_f = max(-1.0, min(cld_f, 1.0))
! Adjust monthly climate vars with std and random numbers
if (use_gcm .and. (year .ge. site%gcm_year .and. year .le. &
(site%gcm_year + gcm_duration))) then
! Just use input values for that month for precip, temperature,
! relative humidity, and lightning
tmin(i) = tmptmin(i)
tmax(i) = tmptmax(i)
prcp(i) = max(tmpprec(i), 0.0)
! Adjust for std
cld(i) = max(tmpcld(i) + cld_f*site%cld_std(i), 0.0)
else
! Adjust for std
tmin(i) = tmptmin(i) + temp_f*site%tmin_std(i)
tmax(i)= tmptmax(i) + temp_f*site%tmax_std(i)
! Can't be less than 0.0
cld(i) = max(tmpcld(i) + cld_f*site%cld_std(i), 0.0)
prcp(i) = max(tmpprec(i) + prcp_f*site%precip_std(i), 0.0)
end if
! Accumulate precipitation and N deposition
rain = rain + prcp(i)
rain_n = rain_n + prcp(i)*PRCP_N
! Get mean monthly temperature for warmest month calculation
tmean(i) = (site%tmin(i) + site%tmax(i))/2.0
end do
! Find warmest month of this year
do i = 1, NTEMPS
tmean_max = max(tmean_max, tmean(i))
if (tmean_max .eq. tmean(i)) warmest_month = i
end do
! Get tmax and tmin of warmest month
temp_max = site%tmax(warmest_month)
temp_min = site%tmin(warmest_month)
! Calculate e2 and e1 (used for PET calculation)
e1 = esat(temp_min)
e2 = esat(temp_max)
! Convert monthly weather data into daily weather data
call cov365_state(tmin, daytemp_min)
call cov365_state(tmax, daytemp_max)
call cov365_integr(prcp, dayprecip)
call cov365_state(cld, daycld)
! Initialize accumulators
pet = 0.0
tot_sun = 0.0
tot_st = 0.0
m = 1
! Calculate mean daily temperature, solar radiation, and PET
do i = 1, DAYS_PER_YEAR
! Mean daily temperature (degC)
daytemp(i) = 0.5*(daytemp_min(i) + daytemp_max(i))
! Calculate solar radiation (cal/cm2/day)
call solar_rad(i, site%latitude, site%slope, site%aspect, &
daycld(i), exrad(i), sun(i), st(i), hrise)
! Accumulate surface and horizontal surface radiation
tot_sun = tot_sun + sun(i) ! Actual surface
tot_st = tot_st + st(i) ! Horizontal surface
! Calculate PET (cm)
pot_ev_day(i) = pot_evap(daytemp(i), sun(i), site%altitude, e2, e1)
! Accumulate PET (cm)
pet = pet + pot_ev_day(i)
end do
! Calculate ratio of actual surface to horizontal surface radiation
cfs = tot_sun/tot_st
! Calculate freezing and thawing degree days for permafrost subroutine
tdd = 0.0
fdd = 0.0
m = 1
do j = 1, DAYS_PER_YEAR
if (j .gt. MODAYS(m)) m = m + 1
if (tmean(m) > epsilon(1.0) .and. daytemp(j) > epsilon(1.0)) then
tdd(m, 1) = tdd(m, 1) + daytemp(j)
end if
if (tmean(m) <= epsilon(1.0) .and. daytemp(j) <= epsilon(1.0)) then
fdd(m, 1) = fdd(m, 1) + abs(daytemp(j))
end if
end do
! Calculate cumulative freezing and thawing degree days
tcum = 0.0
fcum = 0.0
do m = 12, 1, -1
if (fdd(m, 1) .gt. 0.0) fcum = fcum + fdd(m, 1)
if (fdd(m, 1) .eq. 0.0) exit
end do
do m = 1, 12
if (tdd(m, 1) .eq. 0.0) tcum = 0.0
tcum = tcum + tdd(m, 1)
tdd(m, 2) = tcum
if (fdd(m, 1) .eq. 0.0) fcum = 0.0
fcum = fcum + fdd(m, 1)
fdd(m, 2) = fcum
end do
! Loop through each plot to calculate soil dynamics
do ip = 1, site%numplots
! Initialize accumulators
aet = 0.0
degday = 0.0
growdays = 0.0
soildays = 0.0
flooddays = 0.0
drydays = 0.0
outwater = 0.0
wpdays = 0.0
aow0_ByMin_sum = 0.0
saw0_ByFC_sum = 0.0
saw0_BySAT_sum = 0.0
! Store depth of thaw from previous year (m)
amlt = min(site%plots(ip)%soil%active, site%plots(ip)%soil%A_depth)
site%plots(ip)%amlt = amlt
! Reset
site%plots(ip)%soil%active = 0.0
site%plots(ip)%soil%z_freeze = 0.0
! Initialize depths freeze and thaw
xmlt = 0.0
xfrz = site%plots(ip)%soil%M_depth + &
site%plots(ip)%soil%O_depth + site%plots(ip)%soil%A_depth
! Calculate light on the forest floor
alff = 1.0*exp(-0.25*site%plots(ip)%cla/plotsize)
do l = 1, 2
! Calculate drainage conditions
site%plots(ip)%soil%z_drain(l) = &
(site%plots(ip)%soil%sat(l)*(1.0 - amlt) + &
site%plots(ip)%soil%fc(l)*(amlt - 0.32))/(1.0 - 0.32)
! Must be between field capacity and saturation capacity
site%plots(ip)%soil%z_drain(l) = &
min(site%plots(ip)%soil%z_drain(l), &
site%plots(ip)%soil%sat(l))
site%plots(ip)%soil%z_drain(l) = &
max(site%plots(ip)%soil%z_drain(l), &
site%plots(ip)%soil%fc(l))
! Set soil to fully saturated at onset of year
if (l .eq. 1) then
site%plots(ip)%soil%wc(l) = &
site%plots(ip)%soil%z_drain(l)*H2O_D/ &
site%plots(ip)%soil%O_bulk_dens
zh = site%plots(ip)%soil%O_depth + &
site%plots(ip)%soil%M_depth
else
site%plots(ip)%soil%wc(l) = &
site%plots(ip)%soil%z_drain(l)*H2O_D/ &
site%plots(ip)%soil%A_bulk_dens
zh = site%plots(ip)%soil%A_depth
end if
! Soil is completely frozen at start of year
site%plots(ip)%soil%H2Oice(l) = &
site%plots(ip)%soil%z_drain(l)*zh
site%plots(ip)%soil%water(l) = 0.0
site%plots(ip)%soil%d_melt(l) = 0.0
site%plots(ip)%soil%d_freeze(l) = zh
site%plots(ip)%soil%minWC = site%plots(ip)%soil%wc(2)
end do
! Loop on days - thaw depths are calculated monthly so have to
! increment the months as well
m = 1
do j = 1, DAYS_PER_YEAR
if (j .gt. MODAYS(m)) m = m + 1
! Calculate freeze/thaw depths (xfrz, xmlt) and maximum depths
! of freeze and thaw
call permf(site%plots(ip)%soil, m, 1, alff, tdd, fdd, cfs, xfrz)
call permf(site%plots(ip)%soil, m, 2, alff, tdd, fdd, cfs, xmlt)
! Update maximum depths (z_freeze and active)
site%plots(ip)%soil%z_freeze = max((xfrz - &
site%plots(ip)%soil%M_depth - &
site%plots(ip)%soil%O_depth), site%plots(ip)%soil%z_freeze)
site%plots(ip)%soil%active = max((xmlt - &
site%plots(ip)%soil%M_depth - &
site%plots(ip)%soil%O_depth), site%plots(ip)%soil%active)
! Calculate soil water dynamics for the day
call moist(site%plots(ip)%soil, site%site_id, ip, year, j, &
daytemp(j), dayprecip(j), pot_ev_day(j), &
site%leaf_area_ind, site%slope, amlt, xmlt, xfrz, tdd, m, &
act_ev_day, aow0_ByMin, saw0_ByFC, saw0_ByWP, saw0_BySAT)
! Update minimum water content for the year
site%plots(ip)%soil%minWC = min(site%plots(ip)%soil%minWC, &
site%plots(ip)%soil%wc(2))
! Accumulate variables
outwater = outwater + site%plots(ip)%soil%runoff
aet = act_ev_day + aet
! Compute degday, dry days, flood days, and growing season
! length (days)
if (daytemp(j) .ge. MIN_GROW_TEMP) then
! Growing degree-days
degday = degday + (daytemp(j) - MIN_GROW_TEMP)
! Growing season length
growdays = growdays + 1.0
! For averageing values
saw0_ByFC_sum = saw0_ByFC_sum + saw0_ByFC
saw0_BySAT_sum = saw0_BySAT_sum + saw0_BySAT
aow0_ByMin_sum = aow0_ByMin_sum + aow0_ByMin
if (saw0_ByFC .lt. DRY_THRESH) then
drydays = drydays + 1.0
end if
if (aow0_ByMin .lt. MAX_DRY_PARM) then
wpdays = wpdays + 1.0
end if
if (saw0_BySAT .gt. MIN_FLOOD_PARM) then
flooddays = flooddays + 1.0
endif
end if
! Accumulate soil degree-days
if (daytemp(j) .ge. 0.0) then
soildays = soildays + (daytemp(j) - 0.0)
end if
end do
! Convert drydays, flooddays, and wpdays to proportion of growing
! season
if (growdays .eq. 0) then
drydays = 0.0
flooddays = 0.0
wpdays = 0.0
else
tmp = max(min(rain/pet, 1.0), min(aet/pet, 1.0))
drydays = ((drydays/growdays) + (1.0 - tmp))/2.0
flooddays = flooddays/growdays
wpdays = wpdays/growdays
endif
! Convert aet to mm for decomposition
aet_mm = aet*10.0
call moss(site%plots(ip)%soil, alff, site%plots(ip)%cla, &
site%plots(ip)%soil%dec_fuel, drydays, site%site_id, ip, year)
call soiln(site%plots(ip)%soil, aet_mm, site%plots(ip)%cla, &
soildays, flooddays, n_avail)
! Set fan to 0.0 (used last year's fan for this year's soiln
! calculation)
site%plots(ip)%soil%fan = 0.0
! Set plot-level attributes for year-end values
site%plots(ip)%soil%avail_N = n_avail + rain_n
site%plots(ip)%saw0_ByFC = saw0_ByFC_sum/growdays
site%plots(ip)%aow0_ByMin = aow0_ByMin_sum/growdays
site%plots(ip)%saw0_BySAT = saw0_BySAT_sum/growdays
site%plots(ip)%soil%runoff = outwater
site%plots(ip)%act_evap_day = aet
site%plots(ip)%flood_days = flooddays
site%plots(ip)%dry_days = drydays
site%plots(ip)%wilt_days = wpdays
end do
!calculate site's base aridity (calculated from first 10 years) and
!yearly aridity
if (year .le. 9) then
if (year .eq. 0) then
site%aridity_base = min(rain/pet, 1.0)
else if (year .gt. 0 .and. year .lt. 9) then
site%aridity_base = site%aridity_base + min(rain/pet, 1.0)
else if (year .eq. 9) then
site%aridity_base = (site%aridity_base + min(rain/pet, 1.0))/10.0
endif
end if
site%aridity = min(rain/pet, 1.0)
! Set site-level attributes to yearly sums of climate values
site%deg_days = degday
site%grow_days = growdays
site%pot_evap_day = pet
site%solar = tot_sun
site%rain = rain
end subroutine BioGeoClimate
!:.........................................................................:
subroutine Canopy(site)
!
! Calculates plot-level leaf area, LAI, and light environment
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 05/01/05 Y. Xiaodong Original Code
! 01/01/12 K. Holcomb Updated to OOP structure
! Data dictionary: constants
real, parameter :: XT = -0.40 ! Light extinction coefficient
! Data dictionary: calling arguments
type(SiteData), intent(inout) :: site ! Site object
! Data dictionary: local variables
real, dimension(maxheight) :: la_dec ! Leaf area experienced by deciduous plants (m2)
real, dimension(maxheight) :: la_con ! Leaf area experienced by evergreen plants (m2)
real, dimension(maxheight) :: cla_dec ! Cumulative leaf area experienced by deciduous plants (m2)
real, dimension(maxheight) :: cla_con ! Cumulative leaf area experienced by evergreen plants (m2)
real :: forht ! Tree height (m)
real :: canht ! Clear branch bole height (m)
real :: tla ! Tree leaf area (m2)
real :: tla_adj ! Leaf area per 1-m height bin (m2)
integer :: ntrees ! Number of trees on plot
integer :: iht ! Tree height (m)
integer :: cl ! Canopy length (m)
integer :: i, ip ! Looping indices
integer :: ih, it
integer :: m
! Initialize accumulators
site%leaf_area_ind = 0.0
site%lai_array = 0.0
! Loop through plots to calculate leaf area of each tree and LAI of
! stand
do ip = 1, site%numplots
! Get number of trees on plot
ntrees = site%plots(ip)%numtrees
if (ntrees .eq. 0) then
! Full light conditions and no nutrient pressure
site%plots(ip)%con_light = 1.0
site%plots(ip)%dec_light = 1.0
site%plots(ip)%fc_nutr = 1.0
else
! Initialize leaf area and canopy biomass arrays
la_dec = 0.0
la_con = 0.0
cla_dec = 0.0
cla_con = 0.0
do it = 1, ntrees
! Total tree height (m)
forht = max(site%plots(ip)%trees(it)%forska_ht, 2.0)
! Integer of tree height (m)
iht = min(int(forht), maxheight)
! Clear branch bole height (m)
canht = max(site%plots(ip)%trees(it)%canopy_ht, 1.0)
! Calculate leaf area (m2)
tla = leaf_area(site%plots(ip)%trees(it))
! Accumulate site leaf area
site%leaf_area_ind = site%leaf_area_ind + tla
!Calculate canopy depth and divide leaf area and biomass
! into 1-m sections
cl = max(int(forht) - int(canht) + 1, 1)
tla_adj = tla/float(cl)
! Fill temporary arrays with leaf area/biomass
if (site%plots(ip)%trees(it)%conifer) then
! Leaf experienced by evergreens reduced for deciduous
! plants. This accounts for part of each year without
! decid. leaves
do ih = int(canht), int(forht)
la_dec(ih) = la_dec(ih) + tla_adj
la_con(ih) = la_con(ih) + tla_adj
end do
else
do ih = int(canht), int(forht)
la_dec(ih) = la_dec(ih) + tla_adj
la_con(ih) = la_con(ih) + tla_adj*0.8
end do
end if
end do
! Calculate cumulative leaf area from top down
cla_dec(maxheight) = la_dec(maxheight)
cla_con(maxheight) = la_con(maxheight)
do ih = 1, maxheight - 1
! Reduced leaf area from deciduous plants means higher
! light environment for evergreens
! Leaf area for deciduous plants normal b/c evergreen and
! decid both present when decid has leaves
cla_dec(maxheight - ih) = cla_dec(maxheight - ih + 1) + &
la_dec(maxheight - ih)
cla_con(maxheight - ih) = cla_con(maxheight - ih + 1) + &
la_con(maxheight - ih)
end do
! Calculate light environment
do ih = 1, maxheight - 1
site%plots(ip)%con_light(ih) = exp(XT*cla_con(ih + 1)/ &
plotsize)
site%plots(ip)%dec_light(ih) = exp(XT*cla_dec(ih + 1)/ &
plotsize)
end do
end if ! end if any trees
! Save plot attributes
site%plots(ip)%cla = cla_dec(1)
site%lai_array = site%lai_array + la_dec
end do !end plot loop
! Get average LAI (m2/m2) for site
site%leaf_area_ind = site%leaf_area_ind/float(site%numplots)/plotsize
site%lai_array = site%lai_array/float(site%numplots)/plotsize
end subroutine Canopy
!:.........................................................................:
subroutine Growth(site)
!
! Calculates annual growth and branch thinning of each tree
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 05/01/05 Y. Xiaodong Original Code
! 01/01/12 K. Holcomb Updated to OOP structure
! 10/10/16 A. C. Foster Updated for soil/plot overhaul
! and permafrost updates
!
! Data dictionary: constants
integer, parameter :: MCOUNT = 2
! Data dictionary: calling arguments
type(SiteData), intent(inout) :: site ! Site object
! Data dictionary: local variables
real, dimension(size(site%species)) :: recr_trees ! Number of reproductively active trees
real, dimension(maxcells*maxcells) :: diam ! Tree diameter (dbh)
real, dimension(maxcells*maxcells) :: shade ! Shade stress at top of tree's canopy (0-1)
real, dimension(maxcells*maxcells) :: can_shade ! Shade stress at bottom of tree's canopy (0-1)
real, dimension(maxcells*maxcells) :: biomC ! Woody biomass (tC)
real, dimension(maxcells*maxcells) :: bleaf ! Leaf biomass (tC)
real :: ht ! Tree height (m)
real :: canht ! Clear branch bole height(m)
real :: envstress ! Growth stress factor (0-1)
real :: totbiomC ! Total biomass on plot (tC)
real :: NPP ! Net primary production
real :: d_leafb ! Change in leaf biomass (tC)
real :: N_used ! Nitrogen used by plant growth (tN)
real :: N_req ! Nitrogen required for plant growth (tN)
real :: Npavail ! Percent N available of required N
real :: dt ! Diameter increment (cm)
real :: mindt ! Minimum diameter increment before "stressed" (cm)
real :: dbiomC ! Change in woody biomass from previous year (tC)
real :: leafbm ! Leaf biomass (tC)
real :: bct ! Aboveground woody biomass (tC)
real :: bcr ! Branch biomass (tC)
real :: bcs ! Stem biomass (tC)
real :: bcbr ! Total branch biomass (tC)
real :: d_bc ! Woody biomass lost to branch thinning (tC)
real :: d_bcs ! Stem biomass lost to branch thinning (tC)
real :: d_bcr ! Root biomass lost to branch thinning (tC)
real :: d_bcbr ! Total branch biomass lost to branch thinning (tC)
real :: d_bctw ! Twig biomass lost to branch thinning (tC)
real :: d_bcsb ! Small branch biomass lost to branch thinning (tC)
real :: d_bclb ! Large branch biomass lost to branch thinning (tC)
integer :: hc ! Canopy height (m)
integer :: h ! Tree height (m)
integer :: ntrees ! Number of trees on plot
integer :: num_species ! Number of species in site
integer :: it, is, ip ! Looping indices
integer :: lc ! Litter class
! Get number of species at site
num_species = size(site%species)
! Initialize recruiting trees accumulator
recr_trees = 0.0
plot: do ip = 1, site%numplots
! Initialize accumulators
N_used = 0.0
N_req = 0.0
NPP = 0.0
totbiomC = 0.0
site%plots(ip)%mature(:) = 0
site%plots(ip)%avail_spec = 0.0
! Calculate species-level response to drought, over-saturation,
! temperature, and permafrost
do is = 1, num_species
call temp_rsp(site%species(is), site%deg_days, &
site%plots(ip)%fc_gdd(is))
call drought_rsp(site%species(is), site%plots(ip)%dry_days, &
site%plots(ip)%fc_drought(is))
call perm_rsp(site%species(is)%perm_tol, &
site%plots(ip)%soil%active, site%plots(ip)%fc_perm(is))
end do
! Get number of trees
ntrees = site%plots(ip)%numtrees
numtrees: if (ntrees .gt. 0) then
stress: do it = 1, ntrees
! Get species index and update tree
is = site%plots(ip)%trees(it)%species_index
!call update_tree(site%plots(ip)%trees(it), site%species(is))
! Save diameter here
diam(it) = site%plots(ip)%trees(it)%diam_bht
! Convenience variables to reduce table lookups
canht = site%plots(ip)%trees(it)%canopy_ht
ht = site%plots(ip)%trees(it)%forska_ht
! Calculate if species is able to regenerate
site%plots(ip)%avail_spec(is) = max(kron(diam(it) - &
site%species(is)%max_diam*site%species(is)%dbh_min), &
site%plots(ip)%avail_spec(is))
if (site%plots(ip)%trees(it)%tree_age .ge. &
site%species(is)%recr_age .and. &
site%plots(ip)%trees(it)%diam_bht .gt. 10.0) then
site%plots(ip)%mature(is) = site%plots(ip)%mature(is) + 1
end if
! Get canopy and total tree height as integers
h = max(int(ht), 1)
hc = max(int(canht), 1)
! Get leaf biomass and maximum possible DBH growth
call leaf_biomass_c(site%plots(ip)%trees(it))
call max_growth(site%plots(ip)%trees(it))
! Save current value of leaf biomass
bleaf(it) = site%plots(ip)%trees(it)%leaf_bm
! Calculate shading effect on tree
if (site%plots(ip)%trees(it)%conifer) then
shade(it) = light_rsp(site%species(is), &
site%plots(ip)%con_light(h))
can_shade(it) = light_rsp(site%species(is), &
site%plots(ip)%con_light(hc))
else
shade(it) = light_rsp(site%species(is), &
site%plots(ip)%dec_light(h))
can_shade(it) = light_rsp(site%species(is), &
site%plots(ip)%dec_light(hc))
end if
! Calculate environmental stress (excluding nutrients)
call env_stress(site%plots(ip)%trees(it), shade(it), &
site%plots(ip)%fc_gdd(is), &
site%plots(ip)%fc_drought(is), &
site%plots(ip)%fc_perm(is), envstress)
! Increment tree diameter using potential DBH growth
site%plots(ip)%trees(it)%diam_bht = &
site%plots(ip)%trees(it)%diam_bht + &
site%plots(ip)%trees(it)%diam_opt*envstress
! Compute total height for new diameter
call forska_height(site%plots(ip)%trees(it))
! Update clear branch bole height and leaf biomass with
! new height
call stem_shape(site%plots(ip)%trees(it))
call leaf_biomass_c(site%plots(ip)%trees(it))
! Calculate leaf and fine root growth N requirement
if (site%species(is)%conifer) then
N_req = N_req + &
(site%plots(ip)%trees(it)%leaf_bm - &
bleaf(it)*(1.0 - CON_LEAF_RATIO))/CON_LEAF_C_N
else
N_req = N_req + &
site%plots(ip)%trees(it)%leaf_bm/DEC_LEAF_C_N
end if
! Store old value of woody biomass
biomC(it) = site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC
! Compute new value
call biomass_c(site%plots(ip)%trees(it))
call biomass_n(site%plots(ip)%trees(it))
! Calculate woody growth N requirement
N_req = N_req + ((site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC) - biomC(it))/STEM_C_N
end do stress
! Convert N_req tonnes N/ha
N_req = max(N_req*HEC_TO_M2/plotsize, epsilon(1.0))
! Calculate percent available N
Npavail = site%plots(ip)%soil%avail_N/N_req
! Calculate species-level response to available N
do is = 1, num_species
site%plots(ip)%fc_nutr(is) = poor_soil_rsp(Npavail, &
site%species(is)%lownutr_tol)
end do
! Calculate actual DBH growth and N used
grow: do it = 1, ntrees
! Get species index and update tree
is = site%plots(ip)%trees(it)%species_index
!call update_tree(site%plots(ip)%trees(it), site%species(is))
! Calculate environmental stress - including nutrients
call env_stress(site%plots(ip)%trees(it), shade(it), &
site%plots(ip)%fc_gdd(is), &
site%plots(ip)%fc_drought(is), &
site%plots(ip)%fc_perm(is), &
envstress, site%plots(ip)%fc_nutr(is))
! Calculate actual diameter increment growth
dt = site%plots(ip)%trees(it)%diam_opt*envstress
! Increment old diameter
site%plots(ip)%trees(it)%diam_bht = diam(it) + dt
! Get check value for age and growth-related mortality
mindt = min(site%species(is)%max_diam/ &
site%species(is)%max_age*0.1, site%species(is)%dbh_min)
! Check for possible mortality age/growth stress mortality
if (dt .le. site%species(is)%dbh_min) then
! Diameter growth is below minimum level, increment
! mortality counter
site%plots(ip)%trees(it)%mort_count = &
site%plots(ip)%trees(it)%mort_count + 1
if (site%plots(ip)%trees(it)%mort_count .ge. MCOUNT) then
! Tree has been stressed for too many years,
! turn on mortality flag
site%plots(ip)%trees(it)%mort_marker = .true.
else
! Still possible to live
site%plots(ip)%trees(it)%mort_count = 0
endif
else
! Rest mortality counter and set flag to false
site%plots(ip)%trees(it)%mort_count = 0
site%plots(ip)%trees(it)%mort_marker = .false.
endif
! Compute actual new height and diameter
call forska_height(site%plots(ip)%trees(it))
call stem_shape(site%plots(ip)%trees(it))
! Update biomass, saving leaf biomass into convenience
! variable
call leaf_biomass_c(site%plots(ip)%trees(it))
call biomass_c(site%plots(ip)%trees(it))
call biomass_n(site%plots(ip)%trees(it))
leafbm = site%plots(ip)%trees(it)%leaf_bm
! Calculate change in biomass from previous year
dbiomC = (site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC) - biomC(it)
! Calculate C and N used
NPP = NPP + dbiomC
N_used = N_used + dbiomC/STEM_C_N
! Update NPP and N_used from leaf biomass
if (site%plots(ip)%trees(it)%conifer) then
! Conifers don't have to put all of their leaf biomass
! back on
NPP = NPP + leafbm - bleaf(it)*(1.0 - CON_LEAF_RATIO)
N_used = N_used + (leafbm - &
bleaf(it)*(1.0 - CON_LEAF_RATIO))/CON_LEAF_C_N
! Accumulate total biomass
totbiomC = totbiomC + site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC + leafbm
else
NPP = NPP + leafbm
N_used = N_used + leafbm/DEC_LEAF_C_N
! Accumulate total woody biomass (no leaves)
totbiomC = totbiomC + site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC
end if
! Calculate stand age as maximum tree age
site%plots(ip)%stand_age = max(site%plots(ip)%stand_age, &
site%plots(ip)%trees(it)%tree_age)
! Get updated tree height and clear branch bole height
ht = site%plots(ip)%trees(it)%forska_ht
canht = site%plots(ip)%trees(it)%canopy_ht
hc = max(int(canht), 1)
! Check for lower branch thinning
! This will increase clear branch bole height
branchfall: if (dt <= site%species(is)%dbh_min) then
! Tree form and will drop some branches
! Increment clear branch bole height
hc = hc + 1
htcheck: if (hc < int(ht)) then
! Clear branch bole height is still less than tree
! height
! So we can update it without any issues
site%plots(ip)%trees(it)%canopy_ht = float(hc) + &
0.01
! Update diameter at clear branch bole height
call stem_shape(site%plots(ip)%trees(it))
! Save old woody biomass
bct = site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC
! Save old root, twig, stem C biomass
bcr = site%plots(ip)%trees(it)%rootC
bcbr = site%plots(ip)%trees(it)%branchC
bcs = site%plots(ip)%trees(it)%stemC
! Update biomass C and N given new clear branch
! bole height
call biomass_c(site%plots(ip)%trees(it))
call biomass_n(site%plots(ip)%trees(it))
! How much wood litter did we lose?
d_bc = bct - (site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC)
d_bcr = bcr - site%plots(ip)%trees(it)%rootC
d_bcbr = bcbr - site%plots(ip)%trees(it)%branchC
d_bcs = bcs - site%plots(ip)%trees(it)%stemC
! Divide branch biomass into twigs, large branches,
! and small branches (Thonicke et al. 2010)
d_bctw = d_bcbr*PERC_BRANCHES(1)
d_bcsb = d_bcbr*PERC_BRANCHES(2)
d_bclb = d_bcbr*PERC_BRANCHES(3)
! Add litter loss to litter pools
! Here we convert to dry biomass because soiln
! subroutine calculates weight loss not C loss
! Roots
site%plots(ip)%soil%litter(IROOT) = &
site%plots(ip)%soil%litter(IROOT) + d_bcr/B_TO_C
! Twigs
site%plots(ip)%soil%litter(ITW) = &
site%plots(ip)%soil%litter(ITW) + d_bctw/B_TO_C
! Small branches
site%plots(ip)%soil%litter(ISBR) = &
site%plots(ip)%soil%litter(ISBR) + d_bcsb/B_TO_C
!large branches
site%plots(ip)%soil%litter(ILBR) = &
site%plots(ip)%soil%litter(ILBR) + d_bclb/B_TO_C
! Tree DBH < 10 cm go into smallwood, otherwise into
! large wood
if (site%plots(ip)%trees(it)%diam_bht .gt. &
10.0) then
site%plots(ip)%soil%litter(ILBL) = &
site%plots(ip)%soil%litter(ILBL) + &
d_bcs/B_TO_C
else
site%plots(ip)%soil%litter(ISBL) = &
site%plots(ip)%soil%litter(ISBL) + &
d_bcs/B_TO_C
end if
! Save previous value of leaf biomass
leafbm = site%plots(ip)%trees(it)%leaf_bm
! Update leaf bm and get difference
call leaf_biomass_c(site%plots(ip)%trees(it))
d_leafb = leafbm - site%plots(ip)%trees(it)%leaf_bm
! Add that litter to correct leaf litter class
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + d_leafb/B_TO_C
end if htcheck
end if branchfall
end do grow
end if numtrees
! Update plot-level soil characteristics
N_used = N_used*HEC_TO_M2/plotsize ! tN/ha
site%plots(ip)%NPP = NPP*HEC_TO_M2/plotsize ! tC/ha
site%plots(ip)%soil%N_used = N_used
site%plots(ip)%soil%avail_N = max(0.0, &
site%plots(ip)%soil%avail_N - site%plots(ip)%soil%N_used)
end do plot
end subroutine Growth
!:.........................................................................:
subroutine Mortality(site, year)
!
! Determines which trees die by age, stress, or disturbances, and adds
! their biomass to the appropriate soil litter pools
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 05/01/05 Y. Xiaodong Original Code
! 01/01/12 K. Holcomb Updated to OOP structure
! 10/10/16 A. C. Foster Updated for soil/plot overhaul
! and permafrost updates
! 03/01/17 A. C. Foster Updated for fire updates
!
! Data dictionary: constants
real, parameter :: SBR_RT = 0.1 ! Rate of consumption of small branches
real, parameter :: LBR_RT = 0.1 ! Rate of consumption of large branches
real, parameter :: BL_RT = 0.05 ! Rate of consumption of boles
! Data dictionary: calling argumnets
type(SiteData), intent(inout) :: site ! Site object
integer, intent(in) :: year ! Simulation year
! Data dictionary: local variables
integer, dimension(:), allocatable :: dbh_ind ! Index of sorted tree DBH array
real, dimension(:), allocatable :: dbh ! Tree DBH (cm)
real :: totbiomC ! Plot-wide biomass (tC)
real :: NPP_loss ! Loss of NPP from mortality
real :: fan ! N volatilized by fires (tN)
real :: consRoot ! Proportion roots consumed by fire (0-1)
real :: wind_prob ! Random number for windthrow
real :: fire_prob ! Random number for fire
real :: fire_p ! Modified fire probability
real :: biomC ! Total tree biomass (tC)
real :: leaf_bm ! Leaf biomass (tC)
real :: bcr ! Root biomass (tC)
real :: N_cons ! Proportion of N consumed by fire (0-1)
real :: burn ! Amount of tree burned (tC)
real :: bctw ! Twig biomass (tC)
real :: bcs ! Stem biomass (tC)
real :: bcbr ! Total branch biomass (tC)
real :: bcsb ! Small branch biomass (tC)
real :: bclb ! Large branch biomass (tC)
real :: av_fuel ! Available fuel for fire consumption
integer :: num_species ! Number of species on site
integer :: it, ip ! Looping indices
integer :: dt ! Counter for dead trees
integer :: lt ! Counter for live trees
integer :: is ! Species index
integer :: trow ! Row of tree
integer :: tcol ! Column of tree
logical :: age_survive ! Does tree survive age check?
logical :: growth_survive ! Does tree survive stress check?
logical :: fire_survive ! Does tree survive fire?
logical :: wind_survive ! Does tree survive windthrow?
integer :: snum ! Tree growth stressor
integer :: lc ! Litter class
! Get number of species on site
num_species = size(site%species)
plot: do ip = 1, site%numplots
! Initialize accumulators
site%plots(ip)%num_dead = 0
totbiomC = 0.0
NPP_loss = 0.0
fan = 0.0
site%plots(ip)%d_type = 0.0
! Set fire and wind to 0
site%plots(ip)%fire = 0
site%plots(ip)%wind = 0
! Get random number for fire and wind throw
wind_prob = urand()
fire_prob = urand()
!increase or decrease fire probability based on aridity
if (year >= 10) then
if (site%aridity .lt. site%aridity_base) then
fire_p = site%fire_prob + ((site%aridity_base - &
site%aridity)/site%aridity_base)*site%fire_prob
else if (site%aridity .gt. site%aridity_base) then
fire_p = site%fire_prob - ((site%aridity - &
site%aridity_base)/site%aridity_base)*site%fire_prob
else
fire_p = site%fire_prob
end if
else
fire_p = site%fire_prob
endif
treatments: if (fire_prob < fire_p) then
! We have a fire on the plot
fntrees: if (site%plots(ip)%numtrees > 0) then
site%plots(ip)%fire = 1
! Calculate and consume available litter fuels
call forest_fuels(site%plots(ip)%soil, &
site%plots(ip)%dry_days, av_fuel, N_cons, consRoot)
! Kill trees that died by fire, age, or low growth - only
! copy surviving trees, rest go into soil or burn
! Initialize dead and live tree counters
dt = 0
lt = 0
fireloop: do it = 1, site%plots(ip)%numtrees
! Get species index
is = site%plots(ip)%trees(it)%species_index
! Get leaf biomass
call leaf_biomass_c(site%plots(ip)%trees(it))
leaf_bm = site%plots(ip)%trees(it)%leaf_bm
! Check for growth and age survival
call growth_survival(site%plots(ip)%trees(it), &
growth_survive)
call age_survival(site%plots(ip)%trees(it), &
age_survive)
! Check for fire survival
call fire_survival(site%plots(ip)%trees(it), av_fuel, &
fire_survive)
fdeathcheck: if (growth_survive .and. &
age_survive .and. fire_survive) then
! Tree survives
lt = lt + 1 ! Increment live tree counter
! Increment tree age
site%plots(ip)%trees(it)%tree_age = &
site%plots(ip)%trees(it)%tree_age + 1
! Copy tree to front of list
call copy_tree(site%plots(ip)%trees(lt), &
site%plots(ip)%trees(it))
! Calculate leaf litter
if (site%species(is)%conifer) then
! Conifers only drop some of their needles
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm*CON_LEAF_RATIO/B_TO_C
! Accumulate NPP losses
NPP_loss = NPP_loss + leaf_bm*CON_LEAF_RATIO
else
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm/B_TO_C
! Acumulate NPP losses
NPP_loss = NPP_loss + leaf_bm
end if
else fdeathcheck
! Tree dies from something (need to check which)
dt = dt + 1 ! Increment dead tree counter
! Copy to dead tree list for output
call copy_tree(site%plots(ip)%deadtrees(dt), &
site%plots(ip)%trees(it))
! Set cells of that tree to unfilled
trow = site%plots(ip)%trees(it)%row
tcol = site%plots(ip)%trees(it)%col
site%plots(ip)%cells(trow, tcol) = 0
! Get most limiting growth factor
snum = site%plots(ip)%trees(it)%stressor
firecheck: if (.not. fire_survive) then
! Died by fire, fire consumes some of each
!litter class. also calculate N volatilized in
! tree burning
! Add biomass to array of dead biomass
site%plots(ip)%d_type(IFIRE) = &
site%plots(ip)%d_type(IFIRE) + &
site%plots(ip)%trees(it)%biomC + leaf_bm
! Update "stressor"
site%plots(ip)%deadtrees(dt)%stressor = IFIRE
! Roots - fire consumption from Bonan (1989)
bcr = site%plots(ip)%trees(it)%rootC
burn = bcr*consRoot
bcr = bcr - burn
site%plots(ip)%soil%litter(IROOT) = &
site%plots(ip)%soil%litter(IROOT) + &
bcr/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(IROOT, 2)* &
(1.0 - N_cons)
! Branch biomass
bcbr = site%plots(ip)%trees(it)%branchC
! Convert branch litter into twigs, small
! branches, and large branches (Thonicke et al. 2010)
bctw = bcbr*PERC_BRANCHES(1)
bcsb = bcbr*PERC_BRANCHES(2)
bclb = bcbr*PERC_BRANCHES(3)
! Twigs
burn = bctw*(site%plots(ip)%trees(it)%CK)
bctw = bctw - burn
site%plots(ip)%soil%litter(ITW) = &
site%plots(ip)%soil%litter(ITW) + &
bctw/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(ITW, 2)* &
(1.0 - N_cons)
! Small branches
burn = bcsb*(SBR_RT*site%plots(ip)%trees(it)%CK)
bcsb = bcsb - burn
site%plots(ip)%soil%litter(ISBR) = &
site%plots(ip)%soil%litter(ISBR) + &
bcsb/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(ISBR, 2)* &
(1.0 - N_cons)
! Large branches
burn = bclb*(LBR_RT*site%plots(ip)%trees(it)%CK)
bclb = bclb - burn
site%plots(ip)%soil%litter(ILBR) = &
site%plots(ip)%soil%litter(ILBR) + &
bclb/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(ILBR, 2)* &
(1.0 - N_cons)
! Stems
bcs = site%plots(ip)%trees(it)%stemC
burn = bcs*(BL_RT*site%plots(ip)%trees(it)%CK)
bcs = bcs - burn
! Small boles (DBH < 10) vs. large boles
if (site%plots(ip)%trees(it)%diam_bht &
> 10.0) then
site%plots(ip)%soil%litter(ILBL) = &
site%plots(ip)%soil%litter(ILBL) + &
bcs/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(ILBL, 2)* &
(1.0 - N_cons)
else
site%plots(ip)%soil%litter(ISBL) = &
site%plots(ip)%soil%litter(ISBL) + &
bcs/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(ISBL, 2)* &
(1.0 - N_cons)
end if
! Leaves
lc = site%species(is)%litter_class
burn = leaf_bm*(site%plots(ip)%trees(it)%CK)
leaf_bm = leaf_bm - burn
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + leaf_bm/B_TO_C
! Accumulate volatilized N
fan = fan + burn*litter_params(lc, 2)* &
(1.0 - N_cons)
else if (.not. growth_survive .or. &
.not. age_survive) then firecheck
! Died from growth or age-related stress, all
! litter goes into soil
! Add biomass to array of dead biomass
site%plots(ip)%d_type(snum) = &
site%plots(ip)%d_type(snum) + &
site%plots(ip)%trees(it)%biomC + leaf_bm
! Add total tree litter components to all
! litter categories
! Roots
bcr = site%plots(ip)%trees(it)%rootC
site%plots(ip)%soil%litter(IROOT) = &
site%plots(ip)%soil%litter(IROOT) + &
bcr/B_TO_C
! Branches
bcbr = site%plots(ip)%trees(it)%branchC
! Convert branch litter into twigs, small
! branches, and large branches
bctw = bcbr*PERC_BRANCHES(1)
bcsb = bcbr*PERC_BRANCHES(2)
bclb = bcbr*PERC_BRANCHES(3)
! Twigs
site%plots(ip)%soil%litter(ITW) = &
site%plots(ip)%soil%litter(ITW) + &
bctw/B_TO_C
! Small branches
site%plots(ip)%soil%litter(ISBR) = &
site%plots(ip)%soil%litter(ISBR) + &
bcsb/B_TO_C
! Large branches
site%plots(ip)%soil%litter(ILBR) = &
site%plots(ip)%soil%litter(ILBR) + &
bclb/B_TO_C
! Stems
bcs = site%plots(ip)%trees(it)%stemC
! Small boles (DBH < 10) vs. large boles
if (site%plots(ip)%trees(it)%diam_bht &
> 10.0) then
site%plots(ip)%soil%litter(ILBL) = &
site%plots(ip)%soil%litter(ILBL) + &
bcs/B_TO_C
else
site%plots(ip)%soil%litter(ISBL) = &
site%plots(ip)%soil%litter(ISBL) + &
bcs/B_TO_C
end if
! Leaves
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm/B_TO_C
end if firecheck
! Get biomass of tree
biomC = site%plots(ip)%trees(it)%biomC
! Acumulate NPP losses
NPP_loss = NPP_loss + biomC + leaf_bm
end if fdeathcheck
end do fireloop
! Set number of live and dead trees
site%plots(ip)%numtrees = lt
site%plots(ip)%num_dead = dt
end if fntrees
else if (wind_prob < site%wind_prob) then treatments
! We have a windthrow event
! Set fire to 0 and wind to 1
site%plots(ip)%fire = 0
site%plots(ip)%wind = 1
! Set wind counter for regeneration
site%plots(ip)%windCount = 3
wntrees: if (site%plots(ip)%numtrees > 0) then
! Initialize counters for live and dead trees
lt = 0
dt = 0
windloop: do it = 1, site%plots(ip)%numtrees
! Get species index and update tree
is = site%plots(ip)%trees(it)%species_index
!call update_tree(site%plots(ip)%trees(it), &
! site%species(is))
! Get leaf biomass
call leaf_biomass_c(site%plots(ip)%trees(it))
leaf_bm = site%plots(ip)%trees(it)%leaf_bm
! Check for growth and age mortality
call growth_survival(site%plots(ip)%trees(it), &
growth_survive)
call age_survival(site%plots(ip)%trees(it), &
age_survive)
! Check for wind survival
call wind_survival(site%plots(ip)%trees(it), &
wind_survive)
wdeathcheck: if (growth_survive .and. age_survive &
.and. wind_survive) then
! Tree survives
lt = lt + 1 ! Increment live tree counter
! Increment tree age
site%plots(ip)%trees(it)%tree_age = &
site%plots(ip)%trees(it)%tree_age + 1
! Copy tree object to top of list
call copy_tree(site%plots(ip)%trees(lt), &
site%plots(ip)%trees(it))
! Calculate leaf litter
if (site%species(is)%conifer) then
! Evergreens only lose some of their leaves
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm*CON_LEAF_RATIO/B_TO_C
! Acumulate NPP losses
NPP_loss = NPP_loss + leaf_bm*CON_LEAF_RATIO
else
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm/B_TO_C
! Accumulate NPP losses
NPP_loss = NPP_loss + leaf_bm
end if
else wdeathcheck
! Tree dies
dt = dt + 1 ! Increment dead tree counter
! Copy to list of dead trees for output
call copy_tree(site%plots(ip)%deadtrees(dt), &
site%plots(ip)%trees(it))
! Set cells of that tree to unfilled
trow = site%plots(ip)%trees(it)%row
tcol = site%plots(ip)%trees(it)%col
site%plots(ip)%cells(trow, tcol) = 0
! Get most limiting growth factor
snum = site%plots(ip)%trees(it)%stressor
windcheck: if (.not. growth_survive .or. &
.not. age_survive) then
! Tree died from low growth or age
! Add biomass to mortality array
site%plots(ip)%d_type(snum) = &
site%plots(ip)%d_type(snum) + &
site%plots(ip)%trees(it)%biomC + leaf_bm
else windcheck
! Died by windthrow
! Add biomass to mortality array
site%plots(ip)%d_type(IWIND) = &
site%plots(ip)%d_type(IWIND) + &
site%plots(ip)%trees(it)%biomC + leaf_bm
site%plots(ip)%deadtrees(dt)%stressor = IWIND
end if windcheck
! Add total tree litter components to all
! litter categories
! Roots
bcr = site%plots(ip)%trees(it)%rootC
site%plots(ip)%soil%litter(IROOT) = &
site%plots(ip)%soil%litter(IROOT) + bcr/B_TO_C
! Branches
bcbr = site%plots(ip)%trees(it)%branchC
! Convert branch litter into twigs, small
! branches, and large branches
bctw = bcbr*PERC_BRANCHES(1)
bcsb = bcbr*PERC_BRANCHES(2)
bclb = bcbr*PERC_BRANCHES(3)
! Twigs
site%plots(ip)%soil%litter(ITW) = &
site%plots(ip)%soil%litter(ITW) + bctw/B_TO_C
! Small branches
site%plots(ip)%soil%litter(ISBR) = &
site%plots(ip)%soil%litter(ISBR) + bcsb/B_TO_C
! Large branches
site%plots(ip)%soil%litter(ILBR) = &
site%plots(ip)%soil%litter(ILBR) + bclb/B_TO_C
! Stems
bcs = site%plots(ip)%trees(it)%stemC
! Small boles (DBH < 10) vs. large boles
if (site%plots(ip)%trees(it)%diam_bht > 10.0) then
site%plots(ip)%soil%litter(ILBL) = &
site%plots(ip)%soil%litter(ILBL) + &
bcs/B_TO_C
else
site%plots(ip)%soil%litter(ISBL) = &
site%plots(ip)%soil%litter(ISBL) + &
bcs/B_TO_C
end if
! Leaves
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + leaf_bm/B_TO_C
! Acumulate NPP losses
NPP_loss = NPP_loss + &
site%plots(ip)%trees(it)%biomC + leaf_bm
end if wdeathcheck
end do windloop
! Set number of live and trees
site%plots(ip)%numtrees = lt
site%plots(ip)%num_dead = dt
end if wntrees
else treatments
! No disturbances - just check for age and growth stress
! mortality
! Set fire and wind to 0
site%plots(ip)%fire = 0
site%plots(ip)%wind = 0
gntrees: if (site%plots(ip)%numtrees > 0) then
! Initialize counters for live and dead trees
lt = 0
dt = 0
deathloop: do it = 1, site%plots(ip)%numtrees
! Get species index and update tree
is = site%plots(ip)%trees(it)%species_index
!call update_tree(site%plots(ip)%trees(it), &
! site%species(is))
! Get leaf biomass
call leaf_biomass_c(site%plots(ip)%trees(it))
leaf_bm = site%plots(ip)%trees(it)%leaf_bm
! Check for age and growth survival
call growth_survival(site%plots(ip)%trees(it), &
growth_survive)
call age_survival(site%plots(ip)%trees(it), &
age_survive)
deathcheck: if (growth_survive .and. age_survive) then
! Tree survives
lt = lt + 1 ! Increment live tree counter
! Increment tree age
site%plots(ip)%trees(it)%tree_age= &
site%plots(ip)%trees(it)%tree_age + 1
! Copy tree to top of list
call copy_tree(site%plots(ip)%trees(lt), &
site%plots(ip)%trees(it))
! Calculate litterfall
if (site%species(is)%conifer) then
! Conifers only drop some of their leaves
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm*CON_LEAF_RATIO/B_TO_C
! Accumulate NPP losses
NPP_loss = NPP_loss + leaf_bm*CON_LEAF_RATIO
else
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leaf_bm/B_TO_C
! Accumulate NPP losses
NPP_loss = NPP_loss + leaf_bm
end if
else deathcheck
! Tree dies
dt = dt + 1
! Copy to list of dead trees for output
call copy_tree(site%plots(ip)%deadtrees(dt), &
site%plots(ip)%trees(it))
! Set cells of that tree to unfilled
trow = site%plots(ip)%trees(it)%row
tcol = site%plots(ip)%trees(it)%col
site%plots(ip)%cells(trow, tcol) = 0
! Get most limiting growth factor
snum = site%plots(ip)%trees(it)%stressor
! Add biomass to mortality array
site%plots(ip)%d_type(snum) = &
site%plots(ip)%d_type(snum) + &
site%plots(ip)%trees(it)%biomC + leaf_bm
! Add total tree litter components to all
! litter categories
! Roots
bcr = site%plots(ip)%trees(it)%rootC
site%plots(ip)%soil%litter(IROOT) = &
site%plots(ip)%soil%litter(IROOT) + bcr/B_TO_C
! Branches
bcbr = site%plots(ip)%trees(it)%branchC
! Convert branch litter into twigs, small
! branches, and large branches
bctw = bcbr*PERC_BRANCHES(1)
bcsb = bcbr*PERC_BRANCHES(2)
bclb = bcbr*PERC_BRANCHES(3)
! Twigs
site%plots(ip)%soil%litter(ITW) = &
site%plots(ip)%soil%litter(ITW) + bctw/B_TO_C
! Small branches
site%plots(ip)%soil%litter(ISBR) = &
site%plots(ip)%soil%litter(ISBR) + bcsb/B_TO_C
! Large branches
site%plots(ip)%soil%litter(ILBR) = &
site%plots(ip)%soil%litter(ILBR) + bclb/B_TO_C
! Stems
bcs = site%plots(ip)%trees(it)%stemC
! Small boles (DBH < 10) vs. large boles
if (site%plots(ip)%trees(it)%diam_bht > 10.0) then
site%plots(ip)%soil%litter(ILBL) = &
site%plots(ip)%soil%litter(ILBL) + &
bcs/B_TO_C
else
site%plots(ip)%soil%litter(ISBL) = &
site%plots(ip)%soil%litter(ISBL) + &
bcs/B_TO_C
end if
! Leaves
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + leaf_bm/B_TO_C
! Acumulate NPP losses
NPP_loss = NPP_loss + &
site%plots(ip)%trees(it)%biomC + leaf_bm
end if deathcheck
end do deathloop
! Set number of live and dead trees
site%plots(ip)%num_dead = dt
site%plots(ip)%numtrees = lt
end if gntrees
end if treatments
! Update N volatilized N (tN/ha)
site%plots(ip)%soil%fan = fan/plotsize/M2_TO_HEC
! Update NPP (tC/ha)
site%plots(ip)%NPP = site%plots(ip)%NPP - &
NPP_loss/plotsize/M2_TO_HEC
end do plot
end subroutine Mortality
!:.........................................................................:
subroutine Renewal(site, year)
!
! Calculates the seedling and seed banks of each species and
! regenerates new trees and shrubs
!
! Record of revisions:
! Date Programmer Description of change
! ==== ========== =====================
! 05/01/05 Y. Xiaodong Original Code
! 01/01/12 K. Holcomb Updated to OOP structure
! 10/10/16 A. C. Foster Updated for soil/plot overhaul
! and permafrost updates
!
! Data dictionary: calling arguments
type(SiteData), intent(inout) :: site ! Site object
integer, intent(in) :: year ! Year of simulation
! Data dictionary: constants
real, parameter :: regmin = 0.01 ! Minimum threshold for regeneration
! Data dictionary: local variables
real, dimension(size(site%species)) :: regstress ! Regeneration stress factor (0-1)
real, dimension(site%num_trees) :: probt ! Probability of regeneration (trees)
integer, dimension(site%num_trees) :: tree_sp ! Id locations of tree species
integer, dimension(maxcells*maxcells) :: r_empty ! Rows of empty cells
integer, dimension(maxcells*maxcells) :: c_empty ! Columns of empty cells
integer, dimension(:), allocatable :: locs ! Locations of empty cells
real :: NPP ! Net primary productivity (tC/ha)
real :: N_used ! N used in regeneration (tN/ha)
real :: tregmax ! Maximum growth stress factor for trees
real :: fc_org ! Effect of organic layer depth on regeneration
real :: germinants ! Number of seedlings regenerating (#/m2)
real :: probtsum ! Sum of probability of regeneration for trees
real :: rand ! Random number for determining species (uniform)
real :: dbh ! Tree dbh (cm)
real :: leafbm ! Leaf biomass (tC)
real :: shade ! Shade stress (0-1)
real :: envstress ! Growth stress factor (0-1)
integer :: num_species ! Number of species at site
integer :: new_trees ! Number of trees regenerated
integer :: numtrees ! Number of trees on plot
integer :: max_trenew ! Max number of trees to renew
integer :: ntrenew ! Number of trees to renew
integer :: org_tol ! Ability to regenerate on deep soil
integer :: n_empty ! Number of empty cells on plot
integer :: ht ! Tree height (m)
integer :: lc ! Litter class
integer :: is, ip, it ! Looping indices
integer :: stp ! Species counters
integer :: t, r, c ! Looping indices
integer :: irenew, i ! Looping indices
! Get number of species at site
num_species = size(site%species)
plot: do ip = 1, site%numplots
! Initialize accumulators
new_trees = 0
NPP = 0.0
N_used = 0.0
tregmax = 0.0
numtrees = site%plots(ip)%numtrees
navail: if (site%plots(ip)%soil%avail_N > epsilon(1.0)) then
! Check to make sure we aren't still waiting on wind counter
windcheck: if (site%plots(ip)%windCount == 0) then
! Calculate species-level environmental stressors for
! the plot
do is = 1, num_species
! First get minimum of gdd, nutrient, and drought
! stress
regstress(is) = min(site%plots(ip)%fc_gdd(is), &
site%plots(ip)%fc_nutr(is), &
site%plots(ip)%fc_drought(is))
if (site%plots(ip)%numtrees .eq. 0) then
! We don't have to worry about shade stress
regstress(is) = regstress(is)* &
site%plots(ip)%fc_perm(is)
else
! Need to consider shading
if (site%species(is)%conifer) then
regstress(is) = min(regstress(is), &
light_rsp(site%species(is), &
site%plots(ip)%con_light(1)))* &
site%plots(ip)%fc_perm(is)
else
regstress(is) = min(regstress(is), &
light_rsp(site%species(is), &
site%plots(ip)%dec_light(1)))* &
site%plots(ip)%fc_perm(is)
end if
end if
! Check for enough mature trees
if (site%plots(ip)%mature(is) <= 5 .and. &
site%species(is)%conifer) then
regstress(is) = regstress(is)*0.5
end if
! Can't regenerate if below minimum
if (regstress(is) <= site%species(is)%dbh_min) then
regstress(is) = 0.0
end if
! Calculate maximum regrowth capacity across all species
tregmax = max(tregmax, regstress(is))
end do
! Compute the max renew number trees and shrubs
max_trenew = max(min(int(maxtrees*tregmax) - numtrees, &
maxtrees), 0)
! Compute actual number of renewable trees and shrubs
ntrenew = min(max_trenew, maxtrees - numtrees)
! Update the seed bank size and seedling bank size (#/m2)
seedbank: do is = 1, num_species
! Get species-level regeneration response to fire
call fire_rsp(site%species(is), site%plots(ip)%fire, &
site%plots(ip)%fc_fire(is))
site%plots(ip)%seedbank(is) = &
site%plots(ip)%seedbank(is) + &
site%species(is)%invader + &
site%species(is)%seed_num* &
site%plots(ip)%avail_spec(is)* &
site%plots(ip)%fc_fire(is)
! We don't allow seedling regeneration the first
! year of a fire
firecheck: if (site%plots(ip)%fire == 0) then
! Put seeds into seedling bank if envstress is
! high enough
seedcheck: if (regstress(is) > site%species(is)%dbh_min) then
! Ability to reproduce on moss-covered soil
org_tol = site%species(is)%org_tol
fc_org = exp(ORG_GF(org_tol)* &
(site%plots(ip)%soil%O_depth + &
site%plots(ip)%soil%M_depth))
! Calculate the number of new seedlings
germinants = site%plots(ip)%seedbank(is)* &
fc_org
! Remove new seedlings from seedbank
site%plots(ip)%seedbank(is) = &
site%plots(ip)%seedbank(is) - germinants
! Add germinants to seedling bank
site%plots(ip)%seedling(is) = &
site%plots(ip)%seedling(is) + germinants
! Decrease seedbank for survival
site%plots(ip)%seedbank(is) = &
site%plots(ip)%seedbank(is)* &
site%species(is)%seed_surv
else seedcheck
! No new seedlings from seedbank
! Decrease seedbank for survival
site%plots(ip)%seedbank(is) = &
site%plots(ip)%seedbank(is)* &
site%species(is)%seed_surv
endif seedcheck
! Add seedlings from layering
if (site%species(is)%layering) then
if ((site%plots(ip)%soil%M_depth + &
site%plots(ip)%soil%O_depth) > 0.05) then
site%plots(ip)%seedling(is) = &
site%plots(ip)%seedling(is)*1.8
end if
end if
! Add seedlings from sprouts
site%plots(ip)%seedling(is) = &
site%plots(ip)%seedling(is) + &
site%species(is)%sprout_num* &
site%plots(ip)%avail_spec(is)
! Convert seedling bank to #/plot
site%plots(ip)%seedling(is) = &
site%plots(ip)%seedling(is)*plotsize
end if firecheck
end do seedbank
! Calculate probability of regeneration
probtsum = 0.0
stp = 1
do is = 1, num_species
probt(stp) = site%plots(ip)%seedling(is)*regstress(is)
probtsum = probtsum + probt(stp)
tree_sp(stp) = is
stp = stp + 1
end do
else windcheck
! We are still waiting after a windthrow event
! Decrease counter
site%plots(ip)%windCount = max(0, &
site%plots(ip)%windCount - 1)
! Set probsum to 0
probtsum = 0.0
end if windcheck
! After setting seed and seedling banks
! Calculate cumulative probability of regeneration for trees
if (probtsum .gt. epsilon(1.0)) then
do is = 1, site%num_trees
probt(is) = probt(is)/probtsum
end do
do is = 2, site%num_trees
probt(is) = probt(is - 1) + probt(is)
end do
else
ntrenew = 0
end if
! Get current number of trees on plot
it = site%plots(ip)%numtrees
renew: if (ntrenew >= 1) then
! Count number of unfilled cells
n_empty = count(site%plots(ip)%cells(:,:) == 0)
nempty: if (n_empty > 0) then
! Some cells are unfilled - can place trees
allocate(locs(n_empty))
! Loop through whole rows and columns and fill
! r_empty and c_empty with empty cell indices -
! keeping them together with the same 't' index
t = 1
do while (t <= n_empty)
do r = 1, maxcells
do c = 1, maxcells
if (site%plots(ip)%cells(r, c) == 0) then
r_empty(t) = r
c_empty(t) = c
locs(t) = t
t = t + 1
end if
end do
end do
end do
! Shuffle locations array so we can randomly place
! new trees
call shuffle(locs)
ntrees: if (ntrenew >= 1) then
! Renew trees
tree_renew: do irenew = 1, ntrenew
! Determine species of new tree
rand = urand()
is = 1
do while (rand .gt. probt(is))
is = is + 1
if (is .gt. site%num_trees) then
is = 1 + int(urand(0.0, &
real(site%num_trees)))
rand = urand()
endif
end do
! Increment new tree counter
new_trees = new_trees + 1
is = tree_sp(is)
! Decrement seedling bank of species
site%plots(ip)%seedling(is) = &
max(0.0, site%plots(ip)%seedling(is) - 1.0)
! Increment number of plants and
! initialize new tree
it = it + 1
call initialize_tree(site%plots(ip)%trees(it), &
site%species(is), is)
! Grab the r and c values of that index
r = r_empty(locs(irenew))
c = c_empty(locs(irenew))
! Set tree location to that value
site%plots(ip)%trees(it)%row = r
site%plots(ip)%trees(it)%col = c
! Set this to filled in cells array
site%plots(ip)%cells(r,c) = 1
! Get dbh value of new tree
! (must be between 0.5 and 2.5 cm)
dbh = 1.5 + nrand(0.0, 1.0)
if (dbh >= 2.5) dbh = 2.5
if (dbh <= 0.5) dbh = 0.5
site%plots(ip)%trees(it)%diam_bht = dbh
! Set clear branch bole height
site%plots(ip)%trees(it)%canopy_ht = 1.0
! Get other characteristics
call forska_height(site%plots(ip)%trees(it))
call stem_shape(site%plots(ip)%trees(it))
call biomass_c(site%plots(ip)%trees(it))
call biomass_n(site%plots(ip)%trees(it))
call leaf_biomass_c(site%plots(ip)%trees(it))
! Leaf biomass
leafbm = site%plots(ip)%trees(it)%leaf_bm
! Tree height
ht = max(int(site%plots(ip)%trees(it)%forska_ht), 1)
!calculate shading effect on tree
if (site%plots(ip)%trees(it)%conifer) then
shade = light_rsp(site%species(is), &
site%plots(ip)%con_light(ht))
else
shade = light_rsp(site%species(is), &
site%plots(ip)%dec_light(ht))
end if
!calculate environmental stressors
call env_stress(site%plots(ip)%trees(it), &
shade, site%plots(ip)%fc_gdd(is), &
site%plots(ip)%fc_drought(is), &
site%plots(ip)%fc_perm(is), &
envstress, site%plots(ip)%fc_nutr(is))
! Add leaf litter to soil and update NPP and
! N used
if (site%species(is)%conifer) then
NPP = NPP + leafbm* &
(1.0 - CON_LEAF_RATIO) + &
site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC
N_used = N_used + leafbm/CON_LEAF_C_N + &
site%plots(ip)%trees(it)%biomN
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leafbm*CON_LEAF_RATIO/B_TO_C
else
NPP = NPP + leafbm + &
site%plots(ip)%trees(it)%biomC + &
site%plots(ip)%trees(it)%rootC
N_used = N_used + &
site%plots(ip)%trees(it)%biomN + &
leafbm/DEC_LEAF_C_N
lc = site%species(is)%litter_class
site%plots(ip)%soil%litter(lc) = &
site%plots(ip)%soil%litter(lc) + &
leafbm/B_TO_C
end if
end do tree_renew
end if ntrees
end if nempty
end if renew
! Set new number of trees
site%plots(ip)%numtrees = it
! Decrease seedling bank for survivability
! Also convert to #/m2
do is = 1, num_species
site%plots(ip)%seedling(is) = site%plots(ip)%seedling(is)* &
site%species(is)%seedling_surv/plotsize
end do
end if navail
! Update site and soil variables
N_used = N_used*HEC_TO_M2/plotsize
site%plots(ip)%NPP = site%plots(ip)%NPP + NPP*HEC_TO_M2/plotsize
! Convert to tonnes/ha
do i = 1, 18
site%plots(ip)%soil%litter(i) = &
site%plots(ip)%soil%litter(i)*HEC_TO_M2/plotsize
end do
! Deallocate locs array
if (allocated(locs)) deallocate(locs)
end do plot
end subroutine Renewal
!:.........................................................................:
end module Model
|
{"hexsha": "574c57d88f95477110a4b6bbffe0d42cfa781221", "size": 107022, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/Model.f90", "max_stars_repo_name": "jinyun1tang/UVAFME_model", "max_stars_repo_head_hexsha": "c8b847f07aca934d4c008f3f2cf67855e98274fc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-12T13:36:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-03T22:37:20.000Z", "max_issues_repo_path": "src/Model.f90", "max_issues_repo_name": "jinyun1tang/UVAFME_model", "max_issues_repo_head_hexsha": "c8b847f07aca934d4c008f3f2cf67855e98274fc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-30T21:26:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-30T21:27:49.000Z", "max_forks_repo_path": "src/Model.f90", "max_forks_repo_name": "UVAFME/UVAFME_model", "max_forks_repo_head_hexsha": "d11edfc6d9d0f839361f803ffdee6e97104f59cd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-22T16:32:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T16:22:08.000Z", "avg_line_length": 47.9704168534, "max_line_length": 123, "alphanum_fraction": 0.4115508961, "num_tokens": 22116}
|
# ************
# File: Bounds.py
# Top contributors (to current version):
# Panagiotis Kouvaros (panagiotis.kouvaros@gmail.com)
# This file is part of the Venus project.
# Copyright: 2019-2021 by the authors listed in the AUTHORS file in the
# top-level directory.
# License: BSD 2-Clause (see the file LICENSE in the top-level directory).
# Description: class for bounds of nodes.
# ************
import numpy as np
class Bounds:
def __init__(self, lower=None, upper=None):
self.lower = lower
self.upper = upper
def normalise(self, mean, std):
"""
Normalises the bounds
Arguments:
mean: normalisation mean
std: normalisation standard deviation
Returns
None
"""
self.lower = ( self.lower - mean ) / std
self.upper = ( self.upper - mean ) / std
def clip(self, min_value, max_value):
"""
Clips the bounds
Arguments:
min_value: valid lower bound
max_value: valid upper bound
Returns:
None
"""
self.lower = np.clip(self.lower, min_value, max_value)
self.upper = np.clip(self.upper, min_value, max_value)
def copy(self):
"""
Returns:
a copy of the calling bounds object
"""
return Bounds(self.lower.copy(), self.upper.copy())
|
{"hexsha": "ce946230d546cf92046a3edad4f588ce8de41711", "size": 1405, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Bounds.py", "max_stars_repo_name": "pkouvaros/venus2_vnncomp21", "max_stars_repo_head_hexsha": "57e9608041d230b5d78c4f2afb890b81035436a1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Bounds.py", "max_issues_repo_name": "pkouvaros/venus2_vnncomp21", "max_issues_repo_head_hexsha": "57e9608041d230b5d78c4f2afb890b81035436a1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Bounds.py", "max_forks_repo_name": "pkouvaros/venus2_vnncomp21", "max_forks_repo_head_hexsha": "57e9608041d230b5d78c4f2afb890b81035436a1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3015873016, "max_line_length": 74, "alphanum_fraction": 0.5779359431, "include": true, "reason": "import numpy", "num_tokens": 320}
|
module RowMajorArrays
export RowMajorArray
using LinearAlgebra
"""
Wrapper of a column major array (e.g. `Array` or `CuArray`) to make it a row-major array.
The default constructor takes a column major array as input, it is interpreted as row-major array.
This causes an implicit transpose of the input array, e.g. a `(2, 3)` column major array is converted to a
`(3, 2)` row major array.
For more than 2 dimensions, the order of the dimensions is reversed, e.g. a `(2, 3, 4, 5)` element column major array
is converted to a `(5, 4, 3, 2)` RowMajorArray.
Example:
a = [1 2 3; 4 5 6] # 2x3 (column major) array
a_r = RowMajorArray(a) # 3x2 row-major array
"""
struct RowMajorArray{T, N, A <: AbstractArray{T, N}} <: AbstractArray{T, N}
data:: A
end
"""
Constructor which allows modification of array eltype.
Example:
a = [1 2 3; 4 5 6] # eltype: Int64
a_r = RowMajorArray{Float64}(a) # eltype Float64
"""
function RowMajorArray{T}(a:: AbstractArray{X, N}) where {X, T, N}
a_converted = convert.(T, a)
RowMajorArray{T, N, typeof(a_converted)}(a_converted)
end
# initializing with undefined fields
RowMajorArray{T, N, A}(::UndefInitializer, I::Vararg{<: Int, N}) where {T, N, A <: AbstractArray} = RowMajorArray(A(undef, reverse(I)...))
import Base
# getindex and setindex! methods
Base.getindex(a:: RowMajorArray, I::Vararg{<: Int, N}) where {N} = getindex(a.data, reverse(I)...)
Base.getindex(a:: RowMajorArray, i:: Int) = getindex(a.data, i) # linear indexing is done row-major here to have optimal memory alignment
Base.setindex!(a:: RowMajorArray, v, i:: Int) = setindex!(a.data, v, i)
Base.setindex!(a:: RowMajorArray, v, I::Vararg{<: Int, N}) where {N} = setindex!(a.data, v, reverse(I)...)
# standard function definitions
Base.size(a:: RowMajorArray) = reverse(size(a.data))
Base.size(a:: RowMajorArray, dim:: Int) = reverse(size(a.data))[dim]
Base.:(==)(a1:: RowMajorArray, a2:: RowMajorArray) = a1.data == a2.data
# for display for 2d RowMajorArrays, transpose data for printing so that the dimensions are printed in the right order
Base.show_nd(io::IO, a::RowMajorArray{T, 2, A}, print_matrix::Function, label_slices::Bool) where {T, A} = Base.show_nd(io, collect(transpose(a)), print_matrix, label_slices)
forward_methods = (:length, :eltype)
for m in forward_methods
@eval Base.$m(a:: RowMajorArray) = Base.$m(a.data)
end
Base.similar(a:: RowMajorArray{T, N, A}) where {T, N, A} = similar(a, T, size(a))
Base.similar(a:: RowMajorArray{T, N, A}, dims:: Dims) where {T, N, A} = RowMajorArray{T, N, A}(A(undef, reverse(dims)...))
function Base.similar(a:: RowMajorArray{X, N, A}, ::Type{T}, dims:: Dims) where {X, T, N, A}
data = similar(a.data, T, reverse(dims))
RowMajorArray(data)
end
# defining broadcasting
Base.BroadcastStyle(::Type{<: RowMajorArray}) = Broadcast.ArrayStyle{RowMajorArray}()
# see https://docs.julialang.org/en/v1/manual/interfaces/#Selecting-an-appropriate-output-array
function Base.similar(bc:: Broadcast.Broadcasted{Broadcast.ArrayStyle{RowMajorArray}}, ::Type{ElType}) where ElType
# Scan the inputs for the ArrayAndChar:
A = find_row_major_array(bc)
# Use the char field of A to create the output
similar(A, ElType, size(A))
end
# utility function for similar
# `A = find_row_major_array(As)` returns the first RowMajorArray among the arguments.
find_row_major_array(bc::Base.Broadcast.Broadcasted) = find_row_major_array(bc.args)
find_row_major_array(args::Tuple) = find_row_major_array(find_row_major_array(args[1]), Base.tail(args))
find_row_major_array(a::RowMajorArray) = a
find_row_major_array(::Tuple{}) = nothing
find_row_major_array(a::RowMajorArray, rest) = a
find_row_major_array(::Any, rest) = find_row_major_array(rest)
# constructor methods
# separate from `Base.zeros`, etc. - execute with qualified module name, e.g. `RowMajorArrays.zeros`, etc.
forward_methods = (:zeros, :ones, :rand, :randn)
for m in forward_methods
@eval $m(T:: Type, I::Vararg{<: Int, N}) where {N} = RowMajorArray(Base.$m(T, reverse(I)...))
@eval $m(I::Vararg{<: Int, N}) where {N} = RowMajorArray(Base.$m(reverse(I)...))
end
fill(x, I::Vararg{<: Int, N}) where {N} = RowMajorArray(Base.fill(x, reverse(I)...))
fill(x, I::Tuple) = RowMajorArray(Base.fill(x, reverse(I)))
# operations
forward_methods = (:(+), :(-))
for m in forward_methods
@eval Base.$m(a1:: RowMajorArray, a2:: RowMajorArray) = RowMajorArray(Base.$m(a1.data, a2.data))
end
# Matrix multiplication - very crude implementation by converting to standard arrays.
# This should not be too slow for large matrices because converting is O(n²) whereas matrix multiplication is O(n³).
# Tdeally, this should be replaced with calls to OpenBlas Row-Major functions
Base.:(*)(a1:: RowMajorArray, a2:: RowMajorArray) = RowMajorArray(collect(transpose(a1.data)) * collect(transpose(a2.data)))
end
|
{"hexsha": "74c3281e2e873fbd08121665c1fcee9e8e3c4217", "size": 4876, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/RowMajorArrays.jl", "max_stars_repo_name": "lungben/RowMajorArrays.jl", "max_stars_repo_head_hexsha": "1091427e7584e37cf48db4eb27ebd6bd55a357c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/RowMajorArrays.jl", "max_issues_repo_name": "lungben/RowMajorArrays.jl", "max_issues_repo_head_hexsha": "1091427e7584e37cf48db4eb27ebd6bd55a357c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/RowMajorArrays.jl", "max_forks_repo_name": "lungben/RowMajorArrays.jl", "max_forks_repo_head_hexsha": "1091427e7584e37cf48db4eb27ebd6bd55a357c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3220338983, "max_line_length": 174, "alphanum_fraction": 0.7095980312, "num_tokens": 1434}
|
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""Utilities for bounding box manipulation and GIoU."""
import numpy as np
import torch
# rewrite for temporal localization setting
def prop_cl_to_se(x):
c, l = x.unbind(-1)
b = [(c - 0.5 * l), (c + 0.5 * l)]
return torch.stack(b, dim=-1).clamp(0, 1)
def prop_se_to_cl(x):
s, e = x.unbind(-1)
b = [(s + e) / 2, (e - s)]
return torch.stack(b, dim=-1)
def prop_relative_to_absolute(x, base, window_size, interval):
s, e = x.unbind(-1)
num_samples = s.shape[1]
base = base.unsqueeze(1).repeat(1, num_samples).cuda()
b = [s * window_size * interval + base, e * window_size * interval + base]
return torch.stack(b, dim=-1)
def segment_tiou(box_a, box_b):
# gt: [N, 2], detections: [M, 2]
N = box_a.shape[0]
M = box_b.shape[0]
tiou = torch.zeros((N, M)).to(box_a.device)
for i in range(N):
inter_max_xy = torch.min(box_a[i, 1], box_b[:, 1])
inter_min_xy = torch.max(box_a[i, 0], box_b[:, 0])
inter = torch.clamp((inter_max_xy - inter_min_xy), min=0)
# calculate union
union = (box_b[:, 1] - box_b[:, 0]) + (box_a[i, 1] -
box_a[i, 0]) - inter
tiou[i, :] = inter / union
return tiou # (N, M)
def pairwise_temporal_iou(candidate_segments, target_segments):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
[init, end]/[m x 2:=[init, end]].
target_segments (np.ndarray): 2-dim array in format
[n x 2:=[init, end]].
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
"""
candidate_segments_ndim = candidate_segments.ndim
if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:
raise ValueError('Dimension of arguments is incorrect')
if candidate_segments_ndim == 1:
candidate_segments = candidate_segments[np.newaxis, :]
n, m = target_segments.shape[0], candidate_segments.shape[0]
t_iou = np.empty((n, m), dtype=np.float32)
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
t_iou[:, i] = (segments_intersection.astype(float) / segments_union)
if candidate_segments_ndim == 1:
t_iou = np.squeeze(t_iou, axis=1)
return t_iou
def generalized_prop_iou(props1, props2):
"""rewritten Generalized IoU from https://giou.stanford.edu/ to work under
temporal localization setting.
The props should be in [start, end] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
return segment_tiou(props1, props2)
|
{"hexsha": "649bc01829801897837c8a209707c3f3a9306756", "size": 3499, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/box_ops.py", "max_stars_repo_name": "tony2016uestc/RTD-Action", "max_stars_repo_head_hexsha": "ae51cf06d1c138bff3a41a3d014939bd409550a8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 64, "max_stars_repo_stars_event_min_datetime": "2021-02-04T02:58:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:10:49.000Z", "max_issues_repo_path": "util/box_ops.py", "max_issues_repo_name": "sparkstj/RTD-Action", "max_issues_repo_head_hexsha": "e80e56ed0d98f7f0c5f1ecedfbac839a62b03e75", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-03-10T12:17:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:56:06.000Z", "max_forks_repo_path": "util/box_ops.py", "max_forks_repo_name": "sparkstj/RTD-Action", "max_forks_repo_head_hexsha": "e80e56ed0d98f7f0c5f1ecedfbac839a62b03e75", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-04-07T13:01:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T11:45:22.000Z", "avg_line_length": 34.6435643564, "max_line_length": 78, "alphanum_fraction": 0.6047442126, "include": true, "reason": "import numpy", "num_tokens": 934}
|
from __future__ import print_function
import sys, os, math, re
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath('..'))
import sasmodels
from sasmodels import generate, core
from sasmodels.direct_model import DirectModel, call_profile
from sasmodels.data import empty_data1D, empty_data2D
try:
from typing import Dict, Any
except ImportError:
pass
else:
from matplotlib.axes import Axes
from sasmodels.kernel import KernelModel
from sasmodels.modelinfo import ModelInfo
def plot_1d(model, opts, ax):
# type: (KernelModel, Dict[str, Any], Axes) -> None
"""
Create a 1-D image.
"""
q_min, q_max, nq = opts['q_min'], opts['q_max'], opts['nq']
q_min = math.log10(q_min)
q_max = math.log10(q_max)
q = np.logspace(q_min, q_max, nq)
data = empty_data1D(q)
calculator = DirectModel(data, model)
Iq1D = calculator()
ax.plot(q, Iq1D, color='blue', lw=2, label=model.info.name)
ax.set_xlabel(r'$Q \/(\AA^{-1})$')
ax.set_ylabel(r'$I(Q) \/(\mathrm{cm}^{-1})$')
ax.set_xscale(opts['xscale'])
ax.set_yscale(opts['yscale'])
#ax.legend(loc='best')
def plot_2d(model, opts, ax):
# type: (KernelModel, Dict[str, Any], Axes) -> None
"""
Create a 2-D image.
"""
qx_max, nq2d = opts['qx_max'], opts['nq2d']
q = np.linspace(-qx_max, qx_max, nq2d) # type: np.ndarray
data2d = empty_data2D(q, resolution=0.0)
calculator = DirectModel(data2d, model)
Iq2D = calculator() #background=0)
Iq2D = Iq2D.reshape(nq2d, nq2d)
if opts['zscale'] == 'log':
Iq2D = np.log(np.clip(Iq2D, opts['vmin'], np.inf))
ax.imshow(Iq2D, interpolation='nearest', aspect=1, origin='lower',
extent=[-qx_max, qx_max, -qx_max, qx_max], cmap=opts['colormap'])
ax.set_xlabel(r'$Q_x \/(\AA^{-1})$')
ax.set_ylabel(r'$Q_y \/(\AA^{-1})$')
def plot_profile_inset(model_info, ax):
p = ax.get_position()
width, height = 0.4*(p.x1-p.x0), 0.4*(p.y1-p.y0)
left, bottom = p.x1-width, p.y1-height
inset = plt.gcf().add_axes([left, bottom, width, height])
x, y, labels = call_profile(model_info)
inset.plot(x, y, '-')
inset.locator_params(nbins=4)
#inset.set_xlabel(labels[0])
#inset.set_ylabel(labels[1])
inset.text(0.99, 0.99, "profile",
horizontalalignment="right",
verticalalignment="top",
transform=inset.transAxes)
def figfile(model_info):
# type: (ModelInfo) -> str
return model_info.id + '_autogenfig.png'
def make_figure(model_info, opts):
# type: (ModelInfo, Dict[str, Any]) -> None
"""
Generate the figure file to include in the docs.
"""
model = core.build_model(model_info)
fig_height = 3.0 # in
fig_left = 0.6 # in
fig_right = 0.5 # in
fig_top = 0.6*0.25 # in
fig_bottom = 0.6*0.75
if model_info.parameters.has_2d:
plot_height = fig_height - (fig_top+fig_bottom)
plot_width = plot_height
fig_width = 2*(plot_width + fig_left + fig_right)
aspect = (fig_width, fig_height)
ratio = aspect[0]/aspect[1]
ax_left = fig_left/fig_width
ax_bottom = fig_bottom/fig_height
ax_height = plot_height/fig_height
ax_width = ax_height/ratio # square axes
fig = plt.figure(figsize=aspect)
ax2d = fig.add_axes([0.5+ax_left, ax_bottom, ax_width, ax_height])
plot_2d(model, opts, ax2d)
ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height])
plot_1d(model, opts, ax1d)
#ax.set_aspect('square')
else:
plot_height = fig_height - (fig_top+fig_bottom)
plot_width = (1+np.sqrt(5))/2*fig_height
fig_width = plot_width + fig_left + fig_right
ax_left = fig_left/fig_width
ax_bottom = fig_bottom/fig_height
ax_width = plot_width/fig_width
ax_height = plot_height/fig_height
aspect = (fig_width, fig_height)
fig = plt.figure(figsize=aspect)
ax1d = fig.add_axes([ax_left, ax_bottom, ax_width, ax_height])
plot_1d(model, opts, ax1d)
if model_info.profile:
plot_profile_inset(model_info, ax1d)
# Save image in model/img
path = os.path.join('model', 'img', figfile(model_info))
plt.savefig(path, bbox_inches='tight')
#print("figure saved in",path)
def copy_if_newer(src, dst):
from os.path import dirname, exists, getmtime
import shutil
if not exists(dst):
path = dirname(dst)
if not exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
elif getmtime(src) > getmtime(dst):
shutil.copy2(src, dst)
def link_sources(model_info):
from os.path import basename, dirname, realpath, join as joinpath
# List source files in reverse order of dependency.
model_file = basename(model_info.filename)
sources = list(reversed(model_info.source + [model_file]))
# Copy files to src dir under models directory. Need to do this
# because sphinx can't link to an absolute path.
root = dirname(dirname(realpath(__file__)))
src = joinpath(root, "sasmodels", "models")
dst = joinpath(root, "doc", "model", "src")
for path in sources:
copy_if_newer(joinpath(src, path), joinpath(dst, path))
# Link to local copy of the files
downloads = [":download:`%s <src/%s>`"%(path, path) for path in sources]
# Could do syntax highlighting on the model files by creating a rst file
# beside each source file named containing source file with
#
# src/path.rst:
#
# .. {{ path.replace('/','_') }}:
#
# .. literalinclude:: {{ src/path }}
# :language: {{ "python" if path.endswith('.py') else "c" }}
# :linenos:
#
# and link to it using
#
# colors = [":ref:`%s`"%(path.replace('/','_')) for path in sources]
#
# Probably need to dump all the rst files into an index.rst to build them.
# Link to github repo (either the tagged sasmodels version or master)
url = "https://github.com/SasView/sasmodels/blob/v%s"%sasmodels.__version__
#url = "https://github.com/SasView/sasmodels/blob/master"%sasmodels.__version__
links = ["`%s <%s/sasmodels/models/%s>`_"%(path, url, path) for path in sources]
sep = "\n$\\ \\star\\ $ " # bullet
body = "\n**Source**\n"
#body += "\n" + sep.join(links) + "\n\n"
body += "\n" + sep.join(downloads) + "\n\n"
return body
def gen_docs(model_info):
# type: (ModelInfo) -> None
"""
Generate the doc string with the figure inserted before the references.
"""
# Load the doc string from the module definition file and store it in rst
docstr = generate.make_doc(model_info)
# Auto caption for figure
captionstr = '\n'
captionstr += '.. figure:: img/' + figfile(model_info) + '\n'
captionstr += '\n'
if model_info.parameters.has_2d:
captionstr += ' 1D and 2D plots corresponding to the default parameters of the model.\n'
else:
captionstr += ' 1D plot corresponding to the default parameters of the model.\n'
captionstr += '\n'
# Add figure reference and caption to documentation (at end, before References)
pattern = '\*\*REFERENCE'
match = re.search(pattern, docstr.upper())
sources = link_sources(model_info)
insertion = captionstr + sources
if match:
docstr1 = docstr[:match.start()]
docstr2 = docstr[match.start():]
docstr = docstr1 + insertion + docstr2
else:
print('------------------------------------------------------------------')
print('References NOT FOUND for model: ', model_info.id)
print('------------------------------------------------------------------')
docstr += insertion
open(sys.argv[2], 'w').write(docstr)
def process_model(path):
# type: (str) -> None
"""
Generate doc file and image file for the given model definition file.
"""
# Load the model file
model_name = os.path.basename(path)[:-3]
model_info = core.load_model_info(model_name)
# Plotting ranges and options
opts = {
'xscale' : 'log',
'yscale' : 'log' if not model_info.structure_factor else 'linear',
'zscale' : 'log' if not model_info.structure_factor else 'linear',
'q_min' : 0.001,
'q_max' : 1.0,
'nq' : 1000,
'nq2d' : 1000,
'vmin' : 1e-3, # floor for the 2D data results
'qx_max' : 0.5,
#'colormap' : 'gist_ncar',
'colormap' : 'nipy_spectral',
#'colormap' : 'jet',
}
# Generate the RST file and the figure. Order doesn't matter.
gen_docs(model_info)
make_figure(model_info, opts)
if __name__ == "__main__":
process_model(sys.argv[1])
|
{"hexsha": "aea810a916557bbdf01b77952029b5172c0c9688", "size": 8879, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/genmodel.py", "max_stars_repo_name": "zattala/sasmodels", "max_stars_repo_head_hexsha": "a547aa73d43145b3bd34770b0ea27ba8882170a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/genmodel.py", "max_issues_repo_name": "zattala/sasmodels", "max_issues_repo_head_hexsha": "a547aa73d43145b3bd34770b0ea27ba8882170a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/genmodel.py", "max_forks_repo_name": "zattala/sasmodels", "max_forks_repo_head_hexsha": "a547aa73d43145b3bd34770b0ea27ba8882170a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4147286822, "max_line_length": 99, "alphanum_fraction": 0.6138078612, "include": true, "reason": "import numpy", "num_tokens": 2481}
|
import numpy as np
import os
from reader import read_cifar10
class Input(object):
def __init__(self, is_training, batch_num=128):
self.is_training = is_training
self.batch_num = batch_num
r = read_cifar10(os.getcwd()+'/cifar10_dataset', is_training=is_training)
d, l = r.load_data()
if self.is_training is True:
self.train_data = np.transpose(
d[:49000].astype('float32').reshape(-1, 3, 32, 32), (0, 2, 3, 1))
self.train_labels = l[:49000]
self.val_data = np.transpose(
d[49000:].astype('float32').reshape(-1, 3, 32, 32), (0, 2, 3, 1))
self.val_data = self._normalize(self.val_data)
self.val_labels = l[49000:]
self.num_data = self.train_data.shape[0]
self.epoch_size = np.ceil(self.num_data/float(batch_num)).astype('int32')
else:
self.eval_data = np.transpose(
d.astype('float32').reshape(-1, 3, 32, 32), (0, 2, 3, 1))
self.eval_data = self._normalize(self.eval_data)
self.eval_labels = l
def _normalize(self, x):
mean = np.mean(x, axis=0)
#mean = np.mean(x, axis=(1,2,3)).reshape(-1,1,1,1)
#std = np.std(x, axis=(1,2,3)).reshape(-1,1,1,1)
x -= mean
#x /= np.maximum(std, 1.0/np.sqrt(x.shape[1:]))
return x
def _brighten_and_contrast(self, x):
gain = 0.5 + np.random.rand(x.shape[0], 1, 1, 1)
bias = np.random.randint(-40, 40, (x.shape[0], 1, 1, 1))
x = x * gain + bias
x = np.clip(x, 0.0, 255.0)
return x
def _flippen(self, x):
flip_idx = np.random.permutation(x.shape[0])[:x.shape[0]/2]
x[flip_idx] = x[flip_idx,:,::-1,:]
return x
def _augmenting(self, x, idx):
x = self.train_data[idx]
# random cropping
'''
xx = np.random.randint(5)
yy = np.random.randint(5)
x = np.pad(x, ((0,),(2,),(2,),(0,)), 'constant')[:, xx:xx+32, yy:yy+32, :]
'''
x = self._flippen(x)
#x = self._brighten_and_contrast(x)
x = self._normalize(x)
for i in xrange(x.shape[0]):
x[i] = self._random_crop(x[i])
return x
def batch(self):
try:
idx = self.batch_idx_iter.next()
except (AttributeError, StopIteration):
print "Start a new epoch!"
self.batch_idx = np.array_split(np.random.permutation(self.num_data),
self.epoch_size)
self.batch_idx_iter = iter(self.batch_idx)
idx = self.batch_idx_iter.next()
x = self.train_data[idx]
y = self.train_labels[idx]
x = self._augmenting(x, idx)
return x, y
def _random_crop(self, x):
xx = np.random.randint(9)
yy = np.random.randint(9)
x = np.pad(x, ((4,),(4,),(0,)), 'constant')[xx:xx+32, yy:yy+32, :]
return x
|
{"hexsha": "2a6bd74f2ad982dfffd1885b75a9a1360470fdd4", "size": 2716, "ext": "py", "lang": "Python", "max_stars_repo_path": "Input.py", "max_stars_repo_name": "pianomania/cifar10", "max_stars_repo_head_hexsha": "823b86421fbe8be7f94e693b11f71dfb35e22b74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Input.py", "max_issues_repo_name": "pianomania/cifar10", "max_issues_repo_head_hexsha": "823b86421fbe8be7f94e693b11f71dfb35e22b74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Input.py", "max_forks_repo_name": "pianomania/cifar10", "max_forks_repo_head_hexsha": "823b86421fbe8be7f94e693b11f71dfb35e22b74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6226415094, "max_line_length": 79, "alphanum_fraction": 0.5865243004, "include": true, "reason": "import numpy", "num_tokens": 869}
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (Input, InputSpec, Layer,
Activation, BatchNormalization,
Conv2D, Conv2DTranspose, Add,
Concatenate, Flatten, Reshape)
from tensorflow.keras.regularizers import l2
from nets.resnet import Backbone
from tensorflow.keras import initializers
class relative_to_abslolue(Layer):
def __init__(self, name=None, **kwargs):
super(relative_to_abslolue, self).__init__(name=name, **kwargs)
def build(self, input_shape):
self.input_spec = InputSpec(shape=input_shape)
self.b, self.h, self.w, self.c= self.input_spec.shape
self.ct = tf.cast(tf.transpose(tf.meshgrid(tf.range(0, self.h), tf.range(0, self.w))), tf.float32)+0.5
def get_config(self):
config = super(relative_to_abslolue, self).get_config()
return config
def call(self, pred_ltrb):
'''
pred_ltrb 上的4個value分別是(y1, x1, y2, x2)表示以每個cell為中心,預測出來的框架左上角與右下角的相對距離
ltrb(left-up-right-bottom)
此函數將預測出來的相對位置換算成絕對位置
下面是一個框,在cell(cy,cx)取得相對距離(y1,x1,y2,x2)後,換算成絕對位置(cy-y1,cx-x1,cy+y2,cx+x2)
(cy-y1,cx-x1)
----------------------------------
| ↑ |
| | |
| |y1 |
| | |
|←------(cx,cy)-----------------→|
| x1 | x2 |
| | |
| | |
| |y2 |
| | |
| | |
| ↓ |
----------------------------------(cx+x2,cy+y2)
'''
# locations : w*h*2 這2個 value包含 cy=ct[0], cx=ct[1]
locations = tf.concat((self.ct - pred_ltrb[:, :, :, :2], self.ct + pred_ltrb[:, :, :, 2:]), axis=-1)
locations = tf.divide(locations, [self.h, self.w, self.h, self.w])
return locations
@classmethod
def from_config(cls, config):
return cls(**config)
class input_anchor(Layer):
def __init__(self, name=None, anchorsize=None, **kwargs):
super(input_anchor, self).__init__(name=name, **kwargs)
self.anchorsize = anchorsize
def build(self, input_shape):
self.input_spec = InputSpec(shape=input_shape)
self.b, self.h, self.w, self.c= self.input_spec.shape
self.dhw = tf.cast(tf.divide(self.anchorsize,2),tf.float32) #0.5dh, 0.5dh
self.dct = tf.cast(tf.transpose(tf.meshgrid(tf.range(0, self.h), tf.range(0, self.w))), tf.float32)+0.5
def get_config(self):
config = super(input_anchor, self).get_config()
return config
def call(self, pred_ltrb):
'''
pred_ltrb 上的4個value分別是(y1, x1, y2, x2)表示以每個cell為中心,預測出來的框架左上角與右下角的相對距離
ltrb(left-up-right-bottom)
此函數將預測出來的相對位置換算成絕對位置
下面是一個框,在cell(cy,cx)取得相對距離(y1,x1,y2,x2)後,換算成絕對位置(cy-y1,cx-x1,cy+y2,cx+x2)
(cy-y1,cx-x1)
----------------------------------
| ↑ |
| | |
| |y1 |
| | |
|←------(cx,cy)-----------------→|
| x1 | x2 |
| | |
| | |
| |y2 |
| | |
| | |
| ↓ |
----------------------------------(cx+x2,cy+y2)
'''
# locations : w*h*2 這2個 value包含 cy=ct[0], cx=ct[1]
cxy = self.dct + pred_ltrb[:, :, :, :2]*self.dhw
hw = tf.exp(pred_ltrb[:, :, :, 2:])*self.dhw
locations = tf.concat((cxy-hw, cxy+hw), axis=-1)
locations = tf.divide(locations, [self.h, self.w, self.h, self.w])
return locations
@classmethod
def from_config(cls, config):
return cls(**config)
def count_anchor_size(output_layers=4, min_size=0.2,max_size=0.9):
anchor_size = []
for i in range(output_layers):
anchor_size.append(min_size+(max_size-min_size)*i/(output_layers-1))
return anchor_size
def faster_onenet_head(input_tensor = Input(shape=(300, 300, 3)), num_classes=20, prior_prob=0.01, backbone='resnet50'):
# ---------------------------------#
# 典型的输入大小为[300,300,3]
# ---------------------------------#
# net变量里面包含了整个SSD的结构,通过层名可以找到对应的特征层
net = Backbone(input_tensor, backbone_name=backbone)
bias_value = -np.log((1 - prior_prob) / prior_prob)
anchorsize = [[0.2, 0.2],
[0.5, 0.5],
[0.8, 0.8]]
num_anchors = len(anchorsize)
cls_concate_list = []
loc_concate_list = []
# conv
net['final_conv'] = Conv2D(64, 3, padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=l2(5e-4),
bias_initializer=initializers.Constant(value=bias_value),
name='final_conv')(net['o4'])
net['final_bn'] = BatchNormalization(name='final_bn')(net['final_conv'])
net['final_relu'] = Activation('relu', name='final_relu')(net['final_bn'])
for i in range(1, num_anchors+1):
# cls header (10*10*20)
net['cls{}_conv'.format(i)] = Conv2D(num_classes, 3, padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=l2(5e-4),
bias_initializer=initializers.Constant(value=bias_value),
name='cls{}_conv'.format(i))(net['final_relu'])
net['cls{}_flatten'.format(i)] = Flatten(name='cls{}_flatten'.format(i))(net['cls{}_conv'.format(i)])
cls_concate_list.append(net['cls{}_flatten'.format(i)])
# loc1 header (10*10*4)
net['loc{}_offsets'.format(i)] = Conv2D(4, 3, padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=l2(5e-4),
name='loc{}_offsets'.format(i))(net['final_relu'])
net['loc{}_pred'.format(i)] = input_anchor(name='loc{}_pred'.format(i), anchorsize=anchorsize[i-1])(net['loc{}_offsets'.format(i)])
net['loc{}_flatten'.format(i)] = Flatten(name='loc{}_flatten'.format(i))(net['loc{}_pred'.format(i)])
loc_concate_list.append(net['loc{}_flatten'.format(i)])
net['cls_concate'] = Concatenate(axis=1, name='cls_concate')(cls_concate_list)
net['loc_concate'] = Concatenate(axis=1, name='loc_concate')(loc_concate_list)
net['cls_pred'] = Reshape((-1, num_classes), name='cls_pred')(net['cls_concate'])
net['cls_pred'] = Activation('sigmoid', name='cls_pred_final')(net['cls_pred'])
net['loc_pred'] = Reshape((-1, 4), name='loc_pred')(net['loc_concate'])
return net, num_anchors
|
{"hexsha": "0b2567c0892ca7ddeea8e01994b26f42f4d7d3a2", "size": 7309, "ext": "py", "lang": "Python", "max_stars_repo_path": "nets/head.py", "max_stars_repo_name": "simon108018/Faster-OneNet-tf2", "max_stars_repo_head_hexsha": "091d17ba851c8bbe01090f7d9693aacb5e74fc94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nets/head.py", "max_issues_repo_name": "simon108018/Faster-OneNet-tf2", "max_issues_repo_head_hexsha": "091d17ba851c8bbe01090f7d9693aacb5e74fc94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nets/head.py", "max_forks_repo_name": "simon108018/Faster-OneNet-tf2", "max_forks_repo_head_hexsha": "091d17ba851c8bbe01090f7d9693aacb5e74fc94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.9685534591, "max_line_length": 139, "alphanum_fraction": 0.4898070872, "include": true, "reason": "import numpy", "num_tokens": 2011}
|
/-
Copyright (c) 2020 Dany Fabian. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Dany Fabian
-/
import tactic.split_ifs
/-!
# Unfold cases tactic
In Lean, pattern matching expressions are not atomic parts of the syntax, but
rather they are compiled down into simpler terms that are later checked by the kernel.
This allows Lean to have a minimalistic kernel but can occasionally lead an explosion
of cases that need to be considered. What looks like one case in the `match` expression
can in fact be compiled into many different cases that all need to proved by case analysis.
This tactic automates the process by allowing us to write down an equation `f x = y`
where we know that `f x = y` is provably true, but does not hold definitionally. In that
case the `unfold_cases` tactic will continue unfolding `f` and introducing `cases` where
necessary until the left hand side becomes definitionally equal to the right hand side.
Consider a definition as follows:
```lean
def myand : bool → bool → bool
| ff _ := ff
| _ ff := ff
| _ _ := tt
```
The equation compiler generates 4 equation lemmas for us:
```lean
myand ff ff = ff
myand ff tt = ff
myand tt ff = ff
myand tt tt = tt
```
This is not in line with what one might expect looking at the definition.
Whilst it is provably true, that `∀ x, myand ff x = ff` and `∀ x, myand x ff = ff`,
we do not get these stronger lemmas from the compiler for free but must in fact
prove them using `cases` or some other local reasoning.
In other words, the following does not constitute a proof that lean accepts.
```lean
example : ∀ x, myand ff x = ff :=
begin
intros, refl
end
```
However, you can use `unfold_cases { refl }` to prove `∀ x, myand ff x = ff` and
`∀ x, myand x ff = ff`. For definitions with many cases, the savings can be very
significant.
The term that gets generated for the above definition looks like this:
```lean
λ (a a_1 : bool),
a.cases_on
(a_1.cases_on (id_rhs bool ff) (id_rhs bool ff))
(a_1.cases_on (id_rhs bool ff) (id_rhs bool tt))
```
When the tactic tries to prove the goal `∀ x, myand ff x = ff`, it starts by `intros`,
followed by unfolding the definition:
```lean
⊢ ff.cases_on
(x.cases_on (id_rhs bool ff) (id_rhs bool ff))
(x.cases_on (id_rhs bool ff) (id_rhs bool tt)) = ff
```
At this point, it can make progress using `dsimp`. But then it gets stuck:
```lean
⊢ bool.rec (id_rhs bool ff) (id_rhs bool ff) x = ff
```
Next, it can introduce a case split on `x`. At this point, it has to prove two
goals:
```lean
⊢ bool.rec (id_rhs bool ff) (id_rhs bool ff) ff = ff
⊢ bool.rec (id_rhs bool ff) (id_rhs bool ff) tt = ff
```
Now, however, both goals can be discharged using `refl`.
-/
namespace tactic
open expr
namespace unfold_cases
/--
Given an equation `f x = y`, this tactic tries to infer an expression that can be
used to do distinction by cases on to make progress.
Pre-condition: assumes that the outer-most application cannot be beta-reduced
(e.g. `whnf` or `dsimp`).
-/
meta def find_splitting_expr : expr → tactic expr
| `(@ite _ %%cond %%dec_inst _ _ = _) := pure `(@decidable.em %%cond %%dec_inst)
| `(%%(app x y) = _) := pure y
| e := fail!"expected an expression of the form: f x = y. Got:\n{e}"
/--
Tries to finish the current goal using the `inner` tactic. If the tactic
fails, it tries to find an expression on which to do a distinction by
cases and calls itself recursively.
The order of operations is significant. Because the unfolding can potentially
be infinite, it is important to apply the `inner` tactic at every step.
Notice, that if the `inner` tactic succeeds, the recursive unfolding is stopped.
-/
meta def unfold_cases_core (inner : interactive.itactic) : tactic unit :=
inner <|>
(do split_ifs [], all_goals unfold_cases_core, skip) <|>
do
tgt ← target,
e ← find_splitting_expr tgt,
focus1 $ do
cases e,
all_goals $ (dsimp_target >> unfold_cases_core) <|> skip,
skip
/--
Given a target of the form `⊢ f x₁ ... xₙ = y`, unfolds `f` using a delta reduction.
-/
meta def unfold_tgt : expr → tactic unit
| `(%%l@(app _ _) = %%r) :=
match l.get_app_fn with
| const n ls := delta_target [n]
| e := fail!"couldn't unfold:\n{e}"
end
| e := fail!"expected an expression of the form: f x = y. Got:\n{e}"
end unfold_cases
namespace interactive
open unfold_cases
/--
This tactic unfolds the definition of a function or `match` expression.
Then it recursively introduces a distinction by cases. The decision what expression
to do the distinction on is driven by the pattern matching expression.
A typical use case is using `unfold_cases { refl }` to collapse cases that need to be
considered in a pattern matching.
```lean
have h : foo x = y, by unfold_cases { refl },
rw h,
```
The tactic expects a goal in the form of an equation, possibly universally quantified.
We can prove a theorem, even if the various case do not directly correspond to the
function definition. Here is an example application of the tactic:
```lean
def foo : ℕ → ℕ → ℕ
| 0 0 := 17
| (n+2) 17 := 17
| 1 0 := 23
| 0 (n+18) := 15
| 0 17 := 17
| 1 17 := 17
| _ (n+18) := 27
| _ _ := 15
example : ∀ x, foo x 17 = 17 :=
begin
unfold_cases { refl },
end
```
The compiler generates 57 cases for `foo`. However, when we look at the definition, we see
that whenever the function is applied to `17` in the second argument, it returns `17`.
Proving this property consists of merely considering all the cases, eliminating invalid ones
and applying `refl` on the ones which remain.
Further examples can be found in `test/unfold_cases.lean`.
-/
meta def unfold_cases (inner : itactic) : tactic unit := focus1 $ do
tactic.intros,
tgt ← target,
unfold_tgt tgt,
try dsimp_target,
unfold_cases_core inner
add_tactic_doc
{ name := "unfold_cases",
category := doc_category.tactic,
decl_names := [`tactic.interactive.unfold_cases],
tags := ["induction", "case bashing"] }
end interactive
end tactic
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/tactic/unfold_cases.lean"}
|
[STATEMENT]
lemma isOK_check [simp]:
"isOK (check b e) = b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. isOK (check b e) = b
[PROOF STEP]
by (simp add: check_def)
|
{"llama_tokens": 77, "file": "Certification_Monads_Check_Monad", "length": 1}
|
import numpy as np
import cv2
def blur_background(img, mask):
mask[mask < 0.25] = 0
mask[mask >= 0.25] = 1
mask = mask.astype(np.uint8)
person = img * mask[:,:,np.newaxis]
kernel = np.ones((5,5), np.float32)/25
all = cv2.filter2D(img,-1,kernel)
mask = np.logical_not(mask)
back = all * mask[:,:,np.newaxis]
result = back + person
return result
def change_back(src, dst, mask, p):
#print('test')
print('clone_test.jpg - processing')
mask[mask < 0.75] = 0
mask[mask >= 0.75] = 255
mask = mask.astype(np.uint8)
res = cv2.seamlessClone(src, dst, mask, p, cv2.NORMAL_CLONE)
print('clone_test.jpg - saving')
crop_coords = (max(p[0] - 120, 0), max(p[1] - 160, 0))
im_croped = res[crop_coords[1]:crop_coords[1] + 320, crop_coords[0]:crop_coords[0] + 240]
cv2.imwrite('static/results/clone_test.jpg', im_croped)
print('clone_test.jpg - saved')
print(np.array(res))
return im_croped#cv2.imread('clone_test.jpg')
#return new_im + (dst * np.logical_not(new_mask)[:,:,np.newaxis])
|
{"hexsha": "92ce3f27b346eab088e897ac5f2e70aed7e63be1", "size": 1020, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/filters.py", "max_stars_repo_name": "hoopoe/picsart_hackathon", "max_stars_repo_head_hexsha": "d65aeb1432890626e778b9b0232ce3c2f6d4906f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "server/filters.py", "max_issues_repo_name": "hoopoe/picsart_hackathon", "max_issues_repo_head_hexsha": "d65aeb1432890626e778b9b0232ce3c2f6d4906f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "server/filters.py", "max_forks_repo_name": "hoopoe/picsart_hackathon", "max_forks_repo_head_hexsha": "d65aeb1432890626e778b9b0232ce3c2f6d4906f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9090909091, "max_line_length": 91, "alphanum_fraction": 0.6588235294, "include": true, "reason": "import numpy", "num_tokens": 341}
|
*
* ------------------------------------------------------------------
* S E T U P m
* ------------------------------------------------------------------
*
SUBROUTINE SETUPm(ish,j1,j2,JA,JB,na,nb)
IMPLICIT DOUBLE PRECISION(A-H,O-Z)
*
COMMON/MEDEFN/IHSH,NJ(16),LJ(16),NOSH(16,2),J1QN(31,3,2),IJFUL(16)
CAB POINTER(QNOC,NOCCSH(1)),(QNELCSH,NELCSH(8,1)),
CAB : (QNOCORB,NOCORB(8,1)),(QJ1,J1QNRD(15,1))
CAB POINTER(QIAJCMP,IAJCMP(1)),(QLJCOMP,LJCOMP(1)),
CAB : (QNJCOMP,NJCOMP(1)),(QIAJCLD,IAJCLD(1)),
CAB : (QLJCLSD,LJCLSD(1))
POINTER(QNOC,NOCCSH(*)),(QNELCSH,NELCSH(8,*)),
: (QNOCORB,NOCORB(8,*)),(QJ1,J1QNRD(15,*))
POINTER(QIAJCMP,IAJCMP(*)),(QLJCOMP,LJCOMP(*)),
: (QNJCOMP,NJCOMP(*)),(QIAJCLD,IAJCLD(*)),
: (QLJCLSD,LJCLSD(*))
COMMON /NDIMS/ QNOC,QNELCSH,QNOCORB,QJ1,NCFG
COMMON /NON30/ QIAJCMP,QNJCOMP,QLJCOMP,QIAJCLD,QLJCLSD,MAXORB
*
* Inserts the current subshell i1 into the left and right
* coupling tree at position ish.
* j1, j2 : location in the configurations of the shell
* na, nb : occupied or not: =1 not occupied, =2 occupied
*
if (na .eq. 2) then
I1 = nocorb(j1,ja)
else
I1 = nocorb(j2,jb)
end if
*
* This code allows a maximum of 8 open shells but at
* most two shells may differ in the present case. So the
* resultant coupling will be stored starting after location 10.
* A resultant coupling will not occur for the first shell (ISH=1)
*
I2HSH = 10 + ISH - 1
*
* --- FIRST CONSIDER THE L.H.S. (I=1) OF THE MATRIX ELEMENT. NCC=1
* MEANS UNOCCUPIED, REPRESENTED BY A DUMMY SINGLET S SHELL, AND THE
* ADDITIONAL SET OF COUPLING QUANTUM NUMBERS WILL BE THE SAME AS THE
* LAST SET OF COUPLING QUANTUM NUMBERS ALREADY OBTAINED.
* NCC=2 MEANS OCCUPIED. THEN ALL THE NEW QUANTUM NUMBERS (BOTH FOR
* THE SHELL AND FOR THE COUPLING OF THIS SHELL TO THE RESULTANT OF
* THE PREVIOUS ONES) ARE DEFINED IN THE CORRESPONDING J1QNRD ARRAY.
* NOSH - THE NUMBER OF ELECTRONS IN THIS SHELL, IS DEFINED BY THE
* APPROPRIATE ENTRY IN NELCSH . THE R.H.S. IS THEN CONSIDERED
* SIMILARLY (I=2)
*
I=1
25 if (i .eq. 1) then
ic = j1
jc = ja
NCC = na
else
ic = j2
jc = jb
NCC = nb
end if
if (NCC .eq. 1 ) then
* .. shell is not occupied
NOSH(ISH,I)=0
J1QN(ISH,1,I)=0
J1QN(ISH,2,I)=1
J1QN(ISH,3,I)=1
IF(ISH .eq. 2) then
J1QN(I2HSH,1,I)=0
J1QN(I2HSH,2,I)=J1QN(1,2,I)
J1QN(I2HSH,3,I)=J1QN(1,3,I)
else if (ish .gt. 2) then
DO 27 K=1,3
J1QN(I2HSH,K,I)=J1QN(I2HSH-1,K,I)
27 CONTINUE
end if
else
* .. shell is occupied
NOSH(ISH,I)=NELCSH(IC,JC)
JD = J1QNRD(IC,JC)
J1QN(ISH,1,I)=MOD(JD,64)
JD = JD/64
J1QN(ISH,2,I) = MOD(JD,64)
J1QN(ISH,3,I) = JD/64
*
IF (ISH .gt. 1) THEN
*
* .. a resultant coupling is present
*
* IS THIS THE FIRST OCCUPIED SHELL OF THIS CONFIGURATION, THOUGH NOT
* THE FIRST OF THE OTHER CONFIGURATION. IF SO, THE INTERMEDIATE
* COUPLING FORMED HAS THE SAME L,S VALUES AS THIS OCCUPIED SHELL,
* SINCE WE COUPLE THE SHELL TO A DUMMY SINGLET S.
*
IF(IC .eq.1) THEN
I2 = 1
ELSE
I2 = NOCCSH(JC)+IC-1
END IF
JD = J1QNRD(I2,JC)
IF (IC .eq. 1) THEN
J1QN(I2HSH,1,I) = 0
ELSE
J1QN(I2HSH,1,I) = MOD(JD,64)
END IF
JD = JD/64
J1QN(I2HSH,2,I) = MOD(JD, 64)
J1QN(I2HSH,3,I) = JD/64
END IF
end if
*
I=I+1
If (i .eq. 2) GO TO 25
*
* --- SET THE NJ AND LJ VALUES OF THE OCCUPIED SHELLS
*
24 NJ(ISH)=NJCOMP(I1)
IJFUL(ISH)=I1
LJ(ISH)=LJCOMP(I1)
*
END
|
{"hexsha": "3dfea6a8744867f3f24e3f2ea2f8f6856c87c86a", "size": 3894, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/appl/density/setupm.f", "max_stars_repo_name": "mansour2014/ATSP2K_plus", "max_stars_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-21T14:03:39.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-21T14:03:39.000Z", "max_issues_repo_path": "src/appl/density/setupm.f", "max_issues_repo_name": "mzmansour/ATSP2K_plus", "max_issues_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/appl/density/setupm.f", "max_forks_repo_name": "mzmansour/ATSP2K_plus", "max_forks_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9180327869, "max_line_length": 72, "alphanum_fraction": 0.5495634309, "num_tokens": 1476}
|
import Test
import PredictMD
a = PredictMD.version()
Test.@test( typeof(a) == VersionNumber )
Test.@test( typeof(a) === VersionNumber )
Test.@test( a != VersionNumber(0) )
Test.@test( a > VersionNumber(0) )
Test.@test( a > VersionNumber("0.1.0") )
Test.@test( a < VersionNumber("123456789.0.0") )
|
{"hexsha": "3ce823f9c988dac416e589824a16c0b6fe61140c", "size": 299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/unit/base/test_get_version_number.jl", "max_stars_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_stars_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-05-24T14:59:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T08:17:39.000Z", "max_issues_repo_path": "test/unit/base/test_get_version_number.jl", "max_issues_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_issues_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 383, "max_issues_repo_issues_event_min_datetime": "2018-04-12T21:53:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-24T15:53:37.000Z", "max_forks_repo_path": "test/unit/base/test_get_version_number.jl", "max_forks_repo_name": "UnofficialJuliaMirror/PredictMD.jl-3e7d7328-36f8-4388-bd01-4613c92c7370", "max_forks_repo_head_hexsha": "7987993b5900e658c3aa9c568a9ed7fe38e82f11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-05-06T23:16:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:49:49.000Z", "avg_line_length": 24.9166666667, "max_line_length": 48, "alphanum_fraction": 0.6755852843, "num_tokens": 93}
|
function DCV=plsldadcv(X,y,A,K,method,OPT,order)
%+++ K-fold double cross validation Cross-validation for PLS-LDA
%+++ Input: X: m x n (Sample matrix)
% y: m x 1 (measured property)
% A: The max PC for cross-validation
% K: fold. when K = m, it is leave-one-out CV
% method: pretreatment method. Contains: autoscaling, center etc.
% OPT: =1 Print process.
% =0 No print.
% pareto,minmax,center or none.
%+++ Order: =1 sorted,default. For CV partition.
% =0 random.
%+++ Output: Structural data: CV
%+++ Hongdong Li, Oct. 16, 2008.
%+++ Revised in Jan.12, 2009.
if nargin<7;order=1;end;
if nargin<6;OPT=1;end;
if nargin<5;method='autoscaling';end;
if nargin<4;K=10;end;
if nargin<3;A=2;end;
check=0; %+++ status variable: 1: Inf
if order==1
[y,indexyy]=sort(y);
X=X(indexyy,:);
else
indexyy=randperm(length(y));
X=X(indexyy,:);
y=y(indexyy);
end
A=min([size(X,1)-ceil(length(y)/K) size(X,2) A]);
yytest=[];YR=[];
[Mx,Nx]=size(X);
groups = 1+rem(0:Mx-1,K);
yytest=[];yp=[];nLV=zeros(K,1);
for group=1:K
testk = find(groups==group); calk = find(groups~=group);
Xcal=X(calk,:);ycal=y(calk);
Xtest=X(testk,:);ytest=y(testk);
CV=plsldacv(Xcal,ycal,A,K,method,0,order);
if CV.check==1;check==1;break;end;
LDA=plslda(Xcal,ycal,CV.optPC,method);
ypred=plsldaval(LDA,Xtest,ytest);
yytest=[yytest;ytest];
yp=[yp;ypred;];
nLV(group)=CV.optPC;
if OPT==1;fprintf('The %dth outer loop finished.\n',group);end;
end
%+++ Find the most frequently chosen nLV.
uniLV=unique(nLV);
for j=1:length(uniLV); freq(j)=length(find(nLV==uniLV(j)));end
[maxf,maxindex]=max(freq);
optPC=uniLV(maxindex(1));
%+++ output
if check==0
F=roccurve(yytest,yp,0);
error=sum(sign(yp)~=yytest)/Mx;
DCV.method=method;
DCV.check=check;
DCV.error=error;
DCV.Sensitivity=F.sensitivity;
DCV.Specificity=F.specificity;
DCV.nLV=nLV;
DCV.optPC=optPC;
elseif check==1
DCV.method=method;
DCV.check=check;
end
|
{"author": "viggin", "repo": "domain-adaptation-toolbox", "sha": "2a991816a0ac39043b526c2b0cbe01bc844d8890", "save_path": "github-repos/MATLAB/viggin-domain-adaptation-toolbox", "path": "github-repos/MATLAB/viggin-domain-adaptation-toolbox/domain-adaptation-toolbox-2a991816a0ac39043b526c2b0cbe01bc844d8890/plslda/plsldadcv.m"}
|
pdf_file<-"pdf/timeseries_daily.pdf"
cairo_pdf(bg="grey98", pdf_file,width=10,height=8.27)
par(cex.axis=1.1,omi=c(1,0.5,0.95,0.5),mai=c(0.1,1.25,0.1,0.2),mgp=c(5,1,0),family="Lato Light",las=1)
# Import data
christmas<-read.csv(file="myData/allyears.calendar.byday.dat.a",head=F,sep=" ",dec=".")
attach(christmas)
# Create chart
plot(axes=F,type="n",xlab="",ylab="number of deaths",V1,V2)
# other elements
axis(1,tck=-0.01,col="grey",cex.axis=0.9,at=V1[c(1,length(V1))],labels=c("1 July","30 June"))
axis(2,at=py<-pretty(V2),labels=format(py,big.mark=","),cex.axis=0.9,col=par("bg"),col.ticks="grey81",lwd.ticks=0.5,tck=-0.025)
points(V1,V2,type="l")
points(lowess(V2,f=1/5),type="l",lwd=25,col=rgb(255,97,0,70,maxColorValue=255))
text(123,V2[179],"Christmas",cex=1.1)
arrows(157,V2[179],172,V2[179],length=0.10,angle=10,code=0,lwd=2,col=rgb(100,100,100,100,maxColorValue=255))
arrows(192,V2[185],220,V2[185],length=0.10,angle=10,code=0,lwd=2,col=rgb(100,100,100,100,maxColorValue=255))
text(240,V2[185],"New Year",cex=1.1)
# Titling
mtext("Death risk on Christmas and New Year 1979-2004 (USA)",3,line=1.5,adj=0,cex=2,family="Lato Black",outer=T)
mtext("Number of deaths before reaching the emergeny room, sums of years per day",3,line=-0.2,adj=0,cex=1.35,font=3,col="black",outer=T)
mtext("Source: David Phillips, Gwendolyn E. Barker, Kimberly E. Brewer, Christmas and New Year as risk factors for death, Social Science & Medicine 71 (2010) 1463-1471",1,line=3,adj=1,cex=0.75,font=3,outer=T)
dev.off()
|
{"hexsha": "861657d5c1ba66f6481ab26566f028bb485402e0", "size": 1515, "ext": "r", "lang": "R", "max_stars_repo_path": "src/scripts/timeseries_daily.r", "max_stars_repo_name": "wilsonify/data-visualization", "max_stars_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/scripts/timeseries_daily.r", "max_issues_repo_name": "wilsonify/data-visualization", "max_issues_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/scripts/timeseries_daily.r", "max_forks_repo_name": "wilsonify/data-visualization", "max_forks_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.34375, "max_line_length": 208, "alphanum_fraction": 0.704290429, "num_tokens": 605}
|
import numpy as np
import torch
def num_params(model) :
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000
print('Trainable Parameters: %.3f million' % parameters)
# for mulaw encoding and decoding in torch tensors, modified from: https://github.com/pytorch/audio/blob/master/torchaudio/transforms.py
def mulaw_quantize(x, quantization_channels=256):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int): Number of channels. default: 256
"""
mu = quantization_channels - 1
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
elif isinstance(x, (torch.Tensor, torch.LongTensor)):
if isinstance(x, torch.LongTensor):
x = x.float()
mu = torch.FloatTensor([mu])
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
return x_mu
def inv_mulaw_quantize(x_mu, quantization_channels=256, cuda=False):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int): Number of channels. default: 256
"""
mu = quantization_channels - 1.
if isinstance(x_mu, np.ndarray):
x = ((x_mu) / mu) * 2 - 1.
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
if isinstance(x_mu, (torch.LongTensor, torch.cuda.LongTensor)):
x_mu = x_mu.float()
if cuda:
mu = (torch.FloatTensor([mu])).cuda()
else:
mu = torch.FloatTensor([mu])
x = ((x_mu) / mu) * 2 - 1.
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
return x
def test_inv_mulaw():
wav = torch.rand(5, 5000)
wav = wav.cuda()
de_quant = inv_mulaw_quantize(wav, 512, True)
|
{"hexsha": "3d973b369f519a24553029de4b465469cb2ef863", "size": 2494, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "Edresson/WaveRNN-Pytorch", "max_stars_repo_head_hexsha": "f4bcfc0b84ad077eeb5011e24c80349c702f57dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-25T06:38:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-26T03:20:25.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "Edresson/WaveRNN-Pytorch", "max_issues_repo_head_hexsha": "f4bcfc0b84ad077eeb5011e24c80349c702f57dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "Edresson/WaveRNN-Pytorch", "max_forks_repo_head_hexsha": "f4bcfc0b84ad077eeb5011e24c80349c702f57dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.223880597, "max_line_length": 136, "alphanum_fraction": 0.6359262229, "include": true, "reason": "import numpy", "num_tokens": 691}
|
///////////////////////////////////////////////////////////////////////////////
// statistics::survival::model::example::model::exponential.cpp //
// //
// Copyright 2009 Erwann Rogard. Distributed under the Boost //
// Software License, Version 1.0. (See accompanying file //
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) //
///////////////////////////////////////////////////////////////////////////////
#include <stdexcept>
#include <string> //needed?
#include <vector>
#include <limits>
#include <ostream>
#include <fstream>
#include <algorithm>
#include <iterator>
// #include <boost/archive/binary_oarchive.hpp>
// #include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/arithmetic/equal.hpp>
#include <boost/assign/std/vector.hpp>
#include <boost/iterator/range_cycle.hpp>
#include <boost/range.hpp>
#include <boost/assert.hpp>
#include <boost/foreach.hpp>
#include <boost/assign/std/vector.hpp>
#include <boost/iterator/range_cycle.hpp>
#include <boost/standard_distribution/distributions/normal.hpp>
#include <boost/statistics/survival/data/include.hpp>
#include <boost/statistics/survival/model/models/exponential/include.hpp>
#include <libs/statistics/survival/model/example/exponential.h>
// Must come after the model to be used
#include <boost/statistics/model/include.hpp>
#include <libs/statistics/survival/model/example/exponential.h>
void example_exponential(std::ostream& out){
out << "-> example_model_exponential : ";
// Steps shown in this example:
//
// Loads the first batch of a set of records
// Creates events at given time
// Evaluates the likelihoods and posteriors
using namespace boost;
using namespace statistics;
namespace surv = survival;
// [ Types ]
// Value
typedef double val_;
typedef std::vector<val_> vals_;
typedef surv::constant<val_> const_;
// I/O
typedef boost::archive::text_oarchive oa_;
typedef boost::archive::text_iarchive ia_;
// Records
typedef surv::data::record<val_> record_;
typedef std::vector<record_> records_;
typedef range_iterator<records_>::type it_record_;
// Events
typedef surv::data::event<val_> event_;
typedef std::vector<event_> events_;
typedef range_iterator<events_>::type it_event_;
// Covariates
typedef val_ x_;
typedef vals_ r_x_;
typedef range_cycle<> range_cycle_;
typedef range_cycle_::apply<r_x_>::type x_cycle_;
// Model
typedef surv::model::exponential::model<val_> model_;
typedef val_ par_;
typedef vals_ pars_;
// [ Constants ]
const val_ entry_bound = const_::inf_;
const val_ par = 2.0;
const char* batches_path
= "/Users/erwann/projets/2009/Xcode/survival/build/Release/batches";
// [ Variables ]
long n_record;
// [ Upload first batch of records ]
records_ records;
{
std::ifstream ifs(batches_path);
if(ifs.good()){
ia_ ia(ifs);
ia >> records;
}else{
std::string str = "error opening : ";
str.append( batches_path );
throw std::runtime_error(str);
}
ifs.close();
}
n_record = boost::size( records );
// [ Events ]
events_ events;
events.reserve( size(records) );
surv::data::events(
begin(records),
end(records),
entry_bound,
std::back_inserter(events)
);
// [ Covariates ]
r_x_ r_x;
{
using namespace boost::assign;
r_x += -0.5, 0.5;
}
x_cycle_ x_cycle = range_cycle_::make(r_x,0,n_record);
out << "size(x_cycle) = " << size(x_cycle) << std::endl;
BOOST_ASSERT( size(x_cycle)>=size(events) );
// Resize x_cycle to a size that matches that of events
x_cycle.advance_end( - (size(x_cycle) - size(events)) );
BOOST_ASSERT( size(x_cycle) == size(events) );
// Model
model_ model;
// Pars
pars_ pars;
{
using namespace assign;
pars += -2.0, -1.0, 0.0, 1.0, 2.0;
}
// [ Likelihood ]
typedef math::normal_distribution<val_> mprior_;
mprior_ mprior;
out << '(';
out << model::log_likelihood<val_>(
model::make_model_data(
model,
r_x[0],
events[0]
),
par
);
out << ',';
out << model::log_likelihood<val_>(
model::make_model_data(
model,
r_x[1],
events[1]
),
par
) << ')';
// [ Likelihoods ]
vals_ lls;
model::log_likelihoods<val_>(
model::make_model_dataset(model,r_x,events),
boost::begin(pars),
boost::end(pars),
std::back_inserter(lls)
);
// [ Prior ]
vals_ lprs;
math::transform<math::fun_wrap::log_unnormalized_pdf_>(
mprior,
boost::begin(pars),
boost::end(pars),
std::back_inserter(lprs)
);
// [ Posteriors ]
vals_ lpos;
model::log_posteriors2<val_>(
model::make_prior_model_dataset(mprior,model,r_x,events),
boost::begin(pars),
boost::end(pars),
std::back_inserter(lpos)
);
// Consistency check
typedef range_iterator<vals_>::type it_val_;
{
it_val_ i_lpr = boost::begin(lprs);
it_val_ i_lpo = boost::begin(lpos);
out << std::endl;
out << "log(prior,likelihood,posterior)" << std::endl;
for(
it_val_ i_ll = boost::begin(lls);
i_ll< boost::end(lls);
i_ll++,i_lpr++,i_lpo++
){
out << '(';
val_ lpr = *i_lpr; out << lpr << ',';
val_ ll = *i_ll; out << ll << ',';
val_ lpo = *i_lpo; out << lpo << ')' << std::endl;
val_ lpo2 = lpr + ll;
BOOST_ASSERT(
arithmetic_tools::equal(
lpo,
lpo2
)
);
}
}
// Consistency check2
{
vals_ lpr2s ( size(pars) );
model::log_posteriors<val_>(
model::make_prior_model_dataset(mprior,model,r_x,events),
boost::begin(pars),
boost::end(pars),
boost::begin(lls), //subtracted
boost::begin(lpr2s)
);
it_val_ i_lpr = boost::begin(lprs);
out << std::endl;
out << "log(prior,prior2)" << std::endl;
for(
it_val_ i_lpr2 = boost::begin(lpr2s);
i_lpr2< boost::end(lpr2s);
i_lpr2++,i_lpr++
){
out << '(';
val_ lpr = *i_lpr; out << lpr << ',';
val_ lpr2 = *i_lpr2; out << lpr2 << ')' << std::endl;
BOOST_ASSERT(
arithmetic_tools::equal(
lpr,
lpr2
)
);
}
}
out << "<-" << std::endl;
}
|
{"hexsha": "5e223b905511bf37ac7f19bef8444239600e0728", "size": 7513, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "survival_model copy/libs/statistics/survival/model/example/exponential.cpp", "max_stars_repo_name": "rogard/boost_sandbox_statistics", "max_stars_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "survival_model copy/libs/statistics/survival/model/example/exponential.cpp", "max_issues_repo_name": "rogard/boost_sandbox_statistics", "max_issues_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "survival_model copy/libs/statistics/survival/model/example/exponential.cpp", "max_forks_repo_name": "rogard/boost_sandbox_statistics", "max_forks_repo_head_hexsha": "16aacbc716a31a9f7bb6c535b1c90dc343282a23", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0077220077, "max_line_length": 79, "alphanum_fraction": 0.5229602023, "num_tokens": 1801}
|
from itertools import permutations
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from distancematrix.generator import ZNormEuclidean
from distancematrix.consumer import MatrixProfileLR
from distancematrix.calculator import AnytimeCalculator
from distancematrix.ostinato import find_consensus_motif, CMResult
class TestOstinato(TestCase):
def test_exact_match(self):
# Each series contains a shifted/scaled version of [1, 1, 0, 2, 2]
series_list = np.array([
np.array([0.04, 0.45, 0.45, 0.00, 0.90, 0.90, 0.74, 0.72, 0.48, 0.82, 0.49, 0.36, 0.02, 0.37, 0.21]),
np.array([0.08, 0.19, 0.25, 0.59, 0.50, 0.72, 0.16, 0.45, 1.49, 1.49, 0.49, 2.49, 2.49, 0.92, 0.16]),
np.array([0.29, 0.42, 0.96, 1.68, 1.68, 1.00, 2.36, 2.36, 0.14, 0.22, 0.51, 0.45, 0.01, 0.66, 0.53]),
np.array([0.84, 0.01, 0.01, 0.00, 0.02, 0.02, 0.51, 0.53, 0.91, 0.94, 0.47, 0.36, 0.28, 0.15, 0.08])
])
correct_subseq_idx = [1, 8, 3, 1]
for perm in permutations(range(len(series_list))):
perm = list(perm) # Tuple to list for indexing
calc_result = find_consensus_motif(series_list[perm], 5)
bf_result = find_consensus_motif_bruteforce(series_list[perm], 5)
npt.assert_almost_equal(bf_result.radius, 0)
npt.assert_equal(bf_result.series_index, 0)
npt.assert_equal(bf_result.subseq_index, correct_subseq_idx[perm[0]])
npt.assert_almost_equal(calc_result.radius, 0)
npt.assert_equal(calc_result.series_index, 0)
npt.assert_equal(calc_result.subseq_index, correct_subseq_idx[perm[0]])
def test_near_match(self):
# Fourth series contains shifted/scaled [1, 1, 1, 2, 2],
# all other series contain shifted/scaled versions with slight noise.
series_list = np.array([
np.array([0.04, 0.40, 0.50, 0.45, 0.90, 0.90, 0.74, 0.72, 0.48, 0.82, 0.49, 0.36, 0.02, 0.37, 0.21]),
np.array([0.08, 0.19, 0.25, 0.59, 0.50, 0.72, 0.16, 0.45, 1.53, 1.44, 1.49, 2.49, 2.49, 0.92, 0.16]),
np.array([0.29, 0.42, 0.96, 1.68, 1.78, 1.58, 2.36, 2.36, 0.14, 0.22, 0.51, 0.45, 0.01, 0.66, 0.53]),
np.array([0.84, 0.01, 0.01, 0.01, 0.02, 0.02, 0.51, 0.53, 0.91, 0.94, 0.47, 0.36, 0.28, 0.15, 0.08])
])
for perm in permutations(range(len(series_list))):
perm = list(perm) # Tuple to list for indexing
calc_result = find_consensus_motif(series_list[perm], 5)
bf_result = find_consensus_motif_bruteforce(series_list[perm], 5)
npt.assert_almost_equal(calc_result.radius, bf_result.radius)
npt.assert_equal(bf_result.series_index, perm.index(3))
npt.assert_equal(calc_result.series_index, perm.index(3))
npt.assert_equal(bf_result.subseq_index, 1)
npt.assert_equal(calc_result.subseq_index, 1)
def test_on_random_data(self):
data = np.array([
[0.292, 0.183, 0.509, 0.128, 0.718, 0.054, 0.7, 0.532, 0.178, 0.076, 0.46, 0.027, 0.882, 0.288, 0.746],
[0.57, 0.539, 0.239, 0.328, 0.784, 0.614, 0.288, 0.696, 0.12, 0.337, 0.54, 0.401, 0.589, 0.461, 0.666],
[0.454, 0.487, 0.687, 0.981, 0.24, 0.863, 0.458, 0.203, 0.798, 0.917, 0.336, 0.562, 0.266, 0.325, 0.818],
[0.749, 0.886, 0.095, 0.335, 0.247, 0.403, 0.063, 0.047, 0.804, 0.976, 0.836, 0.065, 0.27, 0.59, 0.747],
[0.196, 0.924, 0.968, 0.19, 0.999, 0.31, 0.908, 0.576, 0.521, 0.246, 0.444, 0.319, 0.781, 0.628, 0.183],
[0.136, 0.444, 0.115, 0.954, 0.231, 0.876, 0.566, 0.886, 0.898, 0.287, 0.544, 0.365, 0.108, 0.345, 0.03],
[0.813, 0.324, 0.465, 0.459, 0.565, 0.28, 0.334, 0.169, 0.479, 0.957, 0.621, 0.026, 0.998, 0.732, 0.365],
[0.176, 0.072, 0.288, 0.915, 0.867, 0.215, 0.566, 0.555, 0.602, 0.943, 0.786, 0.404, 0.271, 0.579, 0.362],
[0.7, 0.113, 0.159, 0.701, 0.476, 0.216, 0.359, 0.613, 0.358, 0.871, 0.888, 0.668, 0.604, 0.574, 0.555],
[0.745, 0.298, 0.213, 0.669, 0.303, 0.737, 0.93, 0.998, 0.529, 0.215, 0.839, 0.666, 0.669, 0.583, 0.168]])
calc_result = find_consensus_motif(data, 5)
bf_result = find_consensus_motif_bruteforce(data, 5)
npt.assert_almost_equal(calc_result.radius, bf_result.radius)
npt.assert_equal(calc_result.series_index, bf_result.series_index)
npt.assert_equal(calc_result.subseq_index, bf_result.subseq_index)
def find_consensus_motif_bruteforce(series_list, m) -> CMResult:
result = CMResult(np.inf, -1, -1)
for series_idx, series in enumerate(series_list):
radii = np.zeros(len(series) - m + 1)
for series2_idx, series2 in enumerate(series_list):
if series_idx == series2_idx:
continue
calc = AnytimeCalculator(m, series, series2)
calc.add_generator(0, ZNormEuclidean())
mp_cons = calc.add_consumer([0], MatrixProfileLR())
calc.calculate_columns()
mp = mp_cons.matrix_profile()
radii = np.maximum(radii, mp)
subseq_idx = np.argmin(radii)
subseq_radius = radii[subseq_idx]
if subseq_radius < result.radius:
result = CMResult(subseq_radius, series_idx, subseq_idx)
return result
|
{"hexsha": "799f617e6997b6bc93470e3c50cab4f680e3ffef", "size": 5371, "ext": "py", "lang": "Python", "max_stars_repo_path": "distancematrix/tests/test_ostinato.py", "max_stars_repo_name": "IDLabResearch/seriesdistancematrix", "max_stars_repo_head_hexsha": "c0e666d036f24184511e766cee9fdfa55f41df97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-11-22T14:34:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-04T19:23:55.000Z", "max_issues_repo_path": "distancematrix/tests/test_ostinato.py", "max_issues_repo_name": "predict-idlab/seriesdistancematrix", "max_issues_repo_head_hexsha": "c0e666d036f24184511e766cee9fdfa55f41df97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-28T07:59:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-28T07:59:03.000Z", "max_forks_repo_path": "distancematrix/tests/test_ostinato.py", "max_forks_repo_name": "IDLabResearch/seriesdistancematrix", "max_forks_repo_head_hexsha": "c0e666d036f24184511e766cee9fdfa55f41df97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-02T12:39:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-22T13:36:25.000Z", "avg_line_length": 50.1962616822, "max_line_length": 118, "alphanum_fraction": 0.6017501396, "include": true, "reason": "import numpy", "num_tokens": 2167}
|
import cv2
from matplotlib import pyplot as plt
import numpy as np
import easygui
import imutils
import easyocr
def read_in_image():
"""
Function that reads a user selected image
:return: image that was selected
"""
easygui.msgbox(
"Select an image with a registration plate to begin...",
title="License Plate Extractor",
ok_button="Select Image")
# Read in the path (will be used later to identify image)
return cv2.imread(easygui.fileopenbox())
def edge_detection(img):
"""
Function that detects edges in an image
:param img: Takes in original user selected image
:return: image with highlighted edges and a grayscale version of the original image
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pixel_diameter = 11
sigmaColor, sigmaSpace = 17, 17
threshold1 = 30
threshold2 = 200
filtered_image = cv2.bilateralFilter(gray, pixel_diameter, sigmaColor, sigmaSpace) # Noise reduction
edged = cv2.Canny(filtered_image, threshold1, threshold2) # Edge detection
# plt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))
return edged, gray
def find_contours(edged_image):
"""
Function to detect contours from the provided edged image
:param edged_image: Image with highlighted edges
:return: Sorted contours array
"""
contours = cv2.findContours(edged_image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
return sorted(contours, key=cv2.contourArea, reverse=True)[:10]
def identify_polygonal_curves(contours):
"""
Function that detects polygonal curves in the contoured image
:param contours: Contoured image
:return: Location of the polygonal curves where length of it is 4 (e.g square or rectangle)
"""
for contour in contours:
polygonal_curves = cv2.approxPolyDP(contour, 10, True)
if len(polygonal_curves) == 4:
location = polygonal_curves
break
return location, polygonal_curves
def identify_and_mask_contours(location, gray, img):
"""
Function that identifies contours and creates a mask from it
:param location: location array which contains the polygonal curves (forming rectangle)
:param gray: grayscale version of originally selected image
:param img: original image selected by user
:return: reverse mask of the contoured image
"""
h, w = gray.shape
mask = np.zeros((h, w), np.uint8)
new_image = cv2.drawContours(mask, [location], 0,255, -1)
new_image = cv2.bitwise_and(img, img, mask=mask)
return mask
def extract_x_y(mask, gray):
white_pixels = mask == 255
x, y = np.where(white_pixels)
x1 = y1 = float("inf")
x2 = y2 = 0
# Get the lowest x1, x2 values
for val in x:
x1 = min(x1, val)
x2 = max(x2, val)
# Get the lowest y1, y2 values
for val in y:
y1 = min(y1, val)
y2 = max(y2, val)
return gray[x1:x2+1, y1:y2+1]
def extract_text_from_plate(cropped_image):
"""
Extracts english text from the image using easyOCR library
:param cropped_image: cropped license plate of the grayscaled image
:return: license plate number as a string
"""
reader = easyocr.Reader(['en'])
result = reader.readtext(cropped_image)
license_plate = ''
for item in result:
print(str(item[1]) + '\n')
license_plate += str(item[1]) + ' '
return license_plate
def draw_rect_text_on_img(img, license_plate, polygonal_curves):
"""
Function that draws a rectangle around the license plate and the plate's number
:param img: original image selected by user
:param license_plate: registration plate number
:param polygonal_curves:
"""
font = cv2.FONT_HERSHEY_SIMPLEX
text_location = (polygonal_curves[0][0][0], polygonal_curves[1][0][1])
color = (0, 255, 0) # Green font
offset = 60
rectangle_location = (polygonal_curves[0][0], polygonal_curves[2][0])
# Draw a rectangle on the image where the license plate is located
paint_image = cv2.rectangle(img, rectangle_location[0], rectangle_location[1], color, 5)
paint_image = cv2.putText(img, text=license_plate, org=(text_location[0], text_location[1] + offset), fontFace=font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
plt.imshow(cv2.cvtColor(paint_image, cv2.COLOR_BGR2RGB))
plt.show()
# Read in image
img = read_in_image()
# Detect the edges in the image
edged_image, gray = edge_detection(img)
# Locate the contours in the edged image
contours = find_contours(edged_image)
# Identify the polygonal curves in the contour locations
location, polygonal_curves = identify_polygonal_curves(contours)
# Identify the contours, draw them and create a reverse mask
masked_img = identify_and_mask_contours(location, gray, img)
# Extract coordinates of the license plate and crop it
cropped = extract_x_y(masked_img, gray)
# Extract text from the cropped license plate
license_plate = extract_text_from_plate(cropped)
# Draw rectangle and license plate's contents onto the image
draw_rect_text_on_img(img, license_plate, polygonal_curves)
|
{"hexsha": "980674b8cc3c93525296474b2ac3ffbc55e245af", "size": 5354, "ext": "py", "lang": "Python", "max_stars_repo_path": "LicensePlateExtractor.py", "max_stars_repo_name": "Ares2k/Reg-Plate-Extractor", "max_stars_repo_head_hexsha": "4a16139479687de95eb95e6b962bf666e1a3b4a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LicensePlateExtractor.py", "max_issues_repo_name": "Ares2k/Reg-Plate-Extractor", "max_issues_repo_head_hexsha": "4a16139479687de95eb95e6b962bf666e1a3b4a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LicensePlateExtractor.py", "max_forks_repo_name": "Ares2k/Reg-Plate-Extractor", "max_forks_repo_head_hexsha": "4a16139479687de95eb95e6b962bf666e1a3b4a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2546583851, "max_line_length": 182, "alphanum_fraction": 0.6849084796, "include": true, "reason": "import numpy", "num_tokens": 1330}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.