text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#pragma once
#include <Eigen/Dense>
using namespace Eigen;
class lrsgd
{
public:
lrsgd();
~lrsgd() {};
VectorXf sigmoid(VectorXf& a);
void lr_objective(float& cost, VectorXf& grad, VectorXf& theta);
void fit(void);
void generate_data(MatrixXf& X, VectorXi& y);
int num_iter; //max number of iterations
VectorXf theta; //logistic regression weights
float lambda; //regularization parameter
float cost; //LR objective
VectorXf grad; //gradient of LR objective
int tau0; //learning rate parameter
int kappa; //learning rate parameter
VectorXf eta; //learning rate schedule
MatrixXf X; //input data n x d
VectorXi y; //input labels n x 1
};
|
{"hexsha": "ebcbe36e769fac34b1da05d76f0c1f9ee2553b53", "size": 729, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "machine_learning/logreg/logreg.hpp", "max_stars_repo_name": "vishalbelsare/cpp", "max_stars_repo_head_hexsha": "772178d911e8f90c23e9d3c1d8d32482bc397fc5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39.0, "max_stars_repo_stars_event_min_datetime": "2017-11-14T03:20:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-06T09:46:17.000Z", "max_issues_repo_path": "machine_learning/logreg/logreg.hpp", "max_issues_repo_name": "kunalyadav684/cpp", "max_issues_repo_head_hexsha": "3ce14b012acb2dcdf91459fb677de4bd0cb46170", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-10-01T22:30:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-01T22:30:50.000Z", "max_forks_repo_path": "machine_learning/logreg/logreg.hpp", "max_forks_repo_name": "kunalyadav684/cpp", "max_forks_repo_head_hexsha": "3ce14b012acb2dcdf91459fb677de4bd0cb46170", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16.0, "max_forks_repo_forks_event_min_datetime": "2018-02-07T22:44:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-19T10:18:16.000Z", "avg_line_length": 25.1379310345, "max_line_length": 65, "alphanum_fraction": 0.6598079561, "num_tokens": 190}
|
[STATEMENT]
lemma lcmof_leastUpper:
fixes G (structure)
assumes carr[simp]: "a \<in> carrier G" "b \<in> carrier G"
shows "(x \<in> carrier G \<and> x lcmof a b) = least (division_rel G) x (Upper (division_rel G) {a, b})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<in> carrier G \<and> x lcmof a b) = is_lub (division_rel G) x {a, b}
[PROOF STEP]
by (auto simp: islcm_def least_def Upper_def elem_def)
|
{"llama_tokens": 182, "file": null, "length": 1}
|
import numpy as np
import cv2
from id.trafficmon.objecttracking import ObjectTrackingAbstract
__author__ = 'Luqman'
class OpticalFlowHS(ObjectTrackingAbstract):
"""
class OpticalFlowHS
implementation of classical Horn-Schunck optical flow (Horn, 1981)
"""
def __init__(self):
ObjectTrackingAbstract.__init__(self, "OpticalFlowHS")
def pyramid(self, image, n):
cur_img = np.copy(image)
image_pyramid = [cur_img]
for i in range(n):
# downsampling current image
height, width, depth = cur_img.shape
if (height / 2 < 8) or (width / 2 < 8):
break
else:
next_img = self.image_downsampling(cur_img)
# append to image list
image_pyramid.append(next_img)
cur_img = next_img
return image_pyramid
@staticmethod
def image_downsampling(image):
# image_slice_1_1 = image[0::2, :]
# image_slice_1_2 = image[1::2, :]
# if image_slice_1_1.shape[0] > image_slice_1_2.shape[0]:
# image_slice_1_1 = image_slice_1_1[:image_slice_1_2.shape[0]]
#
# image_slice_1 = np.add(image_slice_1_1, image_slice_1_2).astype(np.float32)
#
# image_slice_2_1 = image_slice_1[:, 0::2]
# image_slice_2_2 = image_slice_1[:, 1::2]
# if image_slice_2_1.shape[1] > image_slice_2_2.shape[1]:
# image_slice_2_1 = image_slice_2_1[:, :image_slice_2_2.shape[1]]
#
# image_slice_2 = np.add(image_slice_2_1, image_slice_2_2).astype(np.float32) / 4.
#
# result_image = np.copy(image_slice_2.astype(np.uint8))
# return result_image
# not working, using opencv instead, maybe will visit later
return cv2.pyrDown(image)
@staticmethod
def resample_flow(flow, obj_shape):
# use opencv pyrUp / pyrDown for resampling, maybe will visit later
if obj_shape[0] > flow.shape[0]:
result = cv2.pyrUp(flow, dstsize=obj_shape)
else:
result = cv2.pyrDown(flow, dstsize=obj_shape)
return result
def compute_flow(self, image):
image_pyramid = self.pyramid(image, 5)
flow = np.zeros_like(image)
return flow
def partial_derivative(self, image, prev_flow):
# init flow
pass
def bilinear_interpolation(self):
pass
|
{"hexsha": "84a37a1420a7b629009b1bee6c16de1fb4ab5af9", "size": 2436, "ext": "py", "lang": "Python", "max_stars_repo_path": "id/trafficmon/objecttracking/OpticalFlow.py", "max_stars_repo_name": "umanium/trafficmon", "max_stars_repo_head_hexsha": "86c138bda3c8a3e38fff273e5d61610acee123b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "id/trafficmon/objecttracking/OpticalFlow.py", "max_issues_repo_name": "umanium/trafficmon", "max_issues_repo_head_hexsha": "86c138bda3c8a3e38fff273e5d61610acee123b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "id/trafficmon/objecttracking/OpticalFlow.py", "max_forks_repo_name": "umanium/trafficmon", "max_forks_repo_head_hexsha": "86c138bda3c8a3e38fff273e5d61610acee123b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7073170732, "max_line_length": 90, "alphanum_fraction": 0.6149425287, "include": true, "reason": "import numpy", "num_tokens": 652}
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import csv
# CPU UTILIZATION
# Make the data
num = 0
# Amount of x ticks
x_ticks=np.arange(0, 900, 10)
plt.xticks(x_ticks)
for files in range(6):
x = []
y = []
num += 1
with open('APM' + str(num) + '_metrics.csv', 'r') as file:
lines = csv.reader(file, delimiter=',')
for row in lines:
x.append(row[0])
y.append(float(row[1]))
if num == 1:
plt.plot(x, y, color='blue', linestyle='solid', marker='o', label="APM1")
elif num == 2:
plt.plot(x, y, color='black', linestyle='solid', marker='o', label="APM2")
elif num == 3:
plt.plot(x, y, color='red', linestyle='solid', marker='o', label="APM3")
elif num == 4:
plt.plot(x, y, color='green', linestyle='solid', marker='o', label="APM4")
elif num == 5:
plt.plot(x, y, color='yellow', linestyle='solid', marker='o', label="APM5")
elif num == 6:
plt.plot(x, y, color='cyan', linestyle='solid', marker='o', label="APM6")
# Settings for the graph
plt.xlabel('Seconds')
plt.ylabel('% Usage')
plt.title('CPU Utilization')
plt.legend()
# MEMORY UTILIZATION
plt.figure(2)
# Amount of x ticks
plt.xticks(x_ticks)
num = 0
for files in range(6):
x = []
y = []
num += 1
with open('APM' + str(num) + '_metrics.csv', 'r') as file:
lines = csv.reader(file, delimiter=',')
for row in lines:
x.append(row[0])
y.append(float(row[2]))
if num == 1:
plt.plot(x, y, color='blue', linestyle='solid', marker='o', label="APM1")
elif num == 2:
plt.plot(x, y, color='black', linestyle='solid', marker='o', label="APM2")
elif num == 3:
plt.plot(x, y, color='red', linestyle='solid', marker='o', label="APM3")
elif num == 4:
plt.plot(x, y, color='green', linestyle='solid', marker='o', label="APM4")
elif num == 5:
plt.plot(x, y, color='yellow', linestyle='solid', marker='o', label="APM5")
elif num == 6:
plt.plot(x, y, color='cyan', linestyle='solid', marker='o', label="APM6")
# Settings for the graph
plt.xlabel('Seconds')
plt.ylabel('% Memory')
plt.title('Memory Utilization')
plt.legend()
# BANDWIDTH UTILIZATION
plt.figure(3)
# Amount of x ticks
plt.xticks(x_ticks)
for i in range(2):
i += 1
x = []
y = []
with open('system_metrics.csv', 'r') as file:
lines = csv.reader(file, delimiter=',')
for row in lines:
x.append(row[0])
y.append(float(row[i]))
if i == 1:
plt.plot(x, y, color='orange', linestyle='solid', marker='o', label="RX Data Rate (kb/s)")
elif i == 2:
plt.plot(x, y, color='grey', linestyle='solid', marker='o', label="TX Data Rate (kb/s)")
# Settings for the graph
plt.xlabel('Seconds')
plt.ylabel('Amount of Bandwidth (kb/s)')
plt.title('Network Bandwidth Utilization')
plt.legend()
# DISK ACCESS RATES
plt.figure(4)
# Amount of x ticks
plt.xticks(x_ticks)
x = []
y = []
with open('system_metrics.csv', 'r') as file:
lines = csv.reader(file, delimiter=',')
for row in lines:
x.append(row[0])
y.append(float(row[3]))
plt.plot(x, y, color='blue', linestyle='solid', marker='o', label="Disk 1")
# Settings for the graph
plt.xlabel('Seconds')
plt.ylabel('Disk Access Writes (kb/s)')
plt.title('Disk Access Rates (kb/s)')
plt.legend()
# DISK UTILIZATION
plt.figure(5)
# Amount of x ticks
plt.xticks(x_ticks)
x = []
y = []
with open('system_metrics.csv', 'r') as file:
lines = csv.reader(file, delimiter=',')
for row in lines:
x.append(row[0])
y.append(float(row[4]))
plt.plot(x, y, color='blue', linestyle='solid', marker='o', label="Disk 1")
# Settings for the graph
plt.xlabel('Seconds')
plt.ylabel('Disk Capacity')
plt.title('Disk Utilization')
plt.legend()
# Finally, show the plots
plt.show()
|
{"hexsha": "d6b5474536e9465d594c0a08338620bdb5d7acd9", "size": 3629, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python Tutorials/Graphing/Lab4.py", "max_stars_repo_name": "GarrettMaury7921/OpenAI_Tutorials", "max_stars_repo_head_hexsha": "6db988c249565264c94efda65739b2e87007b87e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python Tutorials/Graphing/Lab4.py", "max_issues_repo_name": "GarrettMaury7921/OpenAI_Tutorials", "max_issues_repo_head_hexsha": "6db988c249565264c94efda65739b2e87007b87e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python Tutorials/Graphing/Lab4.py", "max_forks_repo_name": "GarrettMaury7921/OpenAI_Tutorials", "max_forks_repo_head_hexsha": "6db988c249565264c94efda65739b2e87007b87e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5649350649, "max_line_length": 92, "alphanum_fraction": 0.6398456875, "include": true, "reason": "import numpy", "num_tokens": 1104}
|
c###findf.for
SUBROUTINE FINDF(K)
C--------------------------------
C
C THIS ROUTINE DOES AREA COVERAGE FOR A SPECIFIED FREQUENCY
C (FIND ALL MODES FOR AN OPERATING FREQUENCY)
C INSERTS PENETRATION ANGLES INTO THE ANGLE TABLE AND COMPUTES ALL
C RAY PATH PARAMETERS FOR EACH ANGLE AT THE FREQUENCY "FREQ"
C
C FREQ GIVEN OPERATING FREQUENCY - MHZ-
C GHOP GIVEN GROUND DISTANCE -RADIANS-
C DELPEN(3,5) PENETRATION ANGLE FOR FREQUENCY FMHZ -DEGREES-
C
C
C DELMOD(6,5) TAKE OFF ANGLE AT FMHZ,GHOP -DEGREES-
C HPMOD (6,5) VIRTUAL HEIGHT AT FMHZ,GHOP -KM-
C HTMOD (6,5) TRUE HEIGHT AT FMHZ,GHOP -KM-
C DSKPKM SKIP DISTANCE AT FMHZ,GHOP -KM-
C DELSKP TAKE OFF ANGLE FOR SKIP DISTANCE -DEGREES-
C HPSKP VIRTUAL HEIGHT FOR SKIP DISTANCE -KM-
C HTSKP TRUE HEIGHT FOR SKIP DISTANCE -KM-
C NANG IS THE HIGHEST ANGLE NUMBER (PRESET IN SUBROUTINE SANG)
C
C K IS THE SAMPLE AREA
C ICUSP IS THE INSERT CUSP INDEX
C (=-1 FOR NOT IN, =0 FOR ONE SIDE IN, =1 FOR FINISHED)
C IH IS THE HEIGHT INDEX FOR COMMON/RAYS/ (FROM 1 TO 30)
C ILOW IS THE LOWER LIMIT (IH) FOR LAYER
C IHIGH IS THE UPPER LIMIT (IH) FOR LAYER
C IA IS THE ANGLE INDEX FOR COMMON/RAYS/ (1 TO NANG .LE. 40)
C IAF IS THE ANGLE INDEX FOR COMMON/REFLX/ (1 TO 45)
C IFOB IS IN KHZ
C
C LONG PATH PARAMETERS,SEE SUBR LNGPAT.
COMMON /DON /ALATD, AMIN, AMIND, BTR, BTRD, DLONG, DMP, ERTR, GCD,
1 GCDKM, PMP, PWR, TLAT, TLATD, TLONG, TLONGD, RSN, SIGTR, RLAT,
2 RLATD,RLONG,RLONGD,BRTD,FLUX,ULAT,ULATD,ULONG,ULONGD,SSN,D90R,
3 D50R,D10R,D90S,D50S,D10S
COMMON/FRQ/FREA(13),FREL(29),FREQ,JMODE,ITXRCP(2)
COMMON/REFLX/DELFX(45,3),HPFLX(45,3),HTFLX(45,3),GDFLX(45,3),FVFLX
A (45,3),DSKPKM(3),DELSKP(3),HPSKP(3),HTSKP(3),DMAXKM(3),FVSKP(3)
B ,ISKP(3),IMODE(45,3),AFFLX(45,3),DELPEN(3,5),GML(45,3),FHP(45,3)
COMMON/LOSX/ANDVX(45,3),ADVX(45,3),AOFX(45,3),ARFX(45,3),GRLOSX(45
A ,3),TGAINX(45,3),TLSKM(45,3),EFFlp(45),IAFTXR(3)
COMMON /CON /D2R, DCL, GAMA, PI, PI2, PIO2, R2D, RZ, VOFL
COMMON /RON /CLAT(5), CLONG(5), GLAT(5), RD(5), FI(3,5), YI(3,5),
1HI(3,5), HPRIM(30,5), HTRUE(30,5), FVERT(30,5),KM,KFX, AFAC(30,5),
2HTR(50,3), FNSQ(50,3)
COMMON/INFORM/INFO,IHSHR,IHLNG
COMMON/RAYS/ANG(40),IFOB(40,30,5),NANG
DIMENSION ITYPE(3)
CHARACTER ITF*1
DATA ITYPE/1,2,3/
JFHZ = 1000. * FREQ
DMAXKM (K) = 0.
DSKPKM (K) = 10000.
DO 100 IA = 1, 45
HPFLX (IA, K) = 0.
DELFX (IA,K) = 0.
100 GDFLX (IA, K) = 0.
FC2 = FI (3, K) * FI (3, K)
C
C FIND PENETRATION ANGLES
C
CALL PENANG(K)
ITF=CHAR(12)
IF(IAND(INFO,32).GT.0.OR.IAND(INFO,8).GT.0)THEN
WRITE(99,'(A1,/,/,18X,8H FREQ ,F7.3,8H NANG ,I4,8H AMIND ,
1 F6.2,/,31X,12HCONTROL AREA,I4,/,16X,20HPENETRATION ANGLES ,
2 3F8.3)')ITF,FREQ,NANG,AMIND,K,(delpen(ia,k),ia=1,3)
WRITE(99,'(/,A,A,/)')' IDX CUSP DISTANCE ANGLE V',
1'IRTUAL M.CORR TRUE MODE FVERT COLL-ADJ'
ENDIF
IA = 0
IAF = 1
C
C SET LAYER
C
C.....SET FOR E LAYER
ICUSP = - 1
IL = 1
IH = 1
ILOW = 1
IHIGH = 10
GO TO 275
C.....SET FOR F LAYER
225 IH = 11
ILOW = 11
IF (FI (2, K))235, 235, 245
C.....SET FOR F2 LAYER ONLY
235 IL = 3
ICUSP = - 1
IHIGH = 30
236 CONTINUE
GO TO 275
C.....SET FOR F1 LAYER
245 IL = 2
ICUSP = - 1
IHIGH = 20
GO TO 236
C.....SET FOR F2 LAYER
255 IL = 3
ICUSP = - 1
ILOW = IHIGH + 1
IHIGH = 30
IH = 21
GO TO 236
265 GO TO (225, 255, 400), IL
C.....START OF SEARCH
275 CONTINUE
C.....CHECK TO SEE IF ANY MODES FROM THIS LAYER
IF (DELPEN (IL, K))265, 265, 285
C.....CHECK IF PENETRATED ALL LAYERS
285 IF (DELPEN (IL, K) - 89.99)295, 295, 400
C.....INCREMENT ANGLE
295 IA = IA + 1
C.....STOP IF THERE ARE MORE HOPS THAN REASONABLE
IF(IA-NANG) 300,300,400
300 CONTINUE
C.....CHECK TO SEE IF LAYER WAS PENETRATED
IF (DELPEN (IL, K) - ANG (IA))345, 345, 305
C.....SEARCH FOR FREQUENCY
305 CONTINUE
IF(IFOB(IA,ILOW,K) - JFHZ) 306, 325, 325
306 IF(IH - IHIGH) 315, 275, 275
315 IF (IFOB (IA, IH, K) - JFHZ)335, 325, 330
C.....EXACT FREQUENCY TO THREE PLACES (IN MHZ)
325 DELFX (IAF, K) = ANG (IA)
HTFLX (IAF, K) = HTRUE (IH, K)
AFFLX(IAF,K)=AFAC(IH,K)
FV = FVERT (IH, K)
HP = HPRIM (IH, K)
IMODE (IAF, K) = ITYPE (IL)
GO TO 375
C.....INCREMENT HEIGHT INDEX
330 IH = IH + 1
GO TO 305
335 IF (IFOB (IA, IH + 1, K) - JFHZ)330, 340, 340
C.....BEGIN INTERPOLATION
340 SLOPD = IFOB (IA, IH + 1, K) - IFOB (IA, IH, K)
SLOPD = AMAX1 (1., SLOPD)
SLOPE = JFHZ - IFOB (IA, IH, K)
SLOPE = SLOPE / SLOPD
HTFLX (IAF, K) = HTRUE (IH, K) + SLOPE * (HTRUE (IH + 1, K) - HTRU
1E (IH, K))
FV = FVERT (IH, K) + SLOPE * (FVERT (IH + 1, K) - FVERT (IH, K))
DELFX (IAF, K) = ANG (IA)
HP = HPRIM (IH, K) + SLOPE * (HPRIM (IH + 1, K) - HPRIM (IH, K))
AFFLX (IAF, K) = AFAC (IH, K) + SLOPE * (AFAC (IH + 1, K) - AFAC (
1IH, K))
IMODE (IAF, K) = ITYPE (IL)
C.....END INTERPOLATION
GO TO 375
C.....BEGIN INSERT OF CUSP
345 DELFX (IAF, K) = DELPEN (IL, K)
HTFLX (IAF, K) = HTRUE (IHIGH, K)
AFFLX( IAF,K ) = AFAC( IHIGH, K)
FV = FVERT (IHIGH, K)
HP = HPRIM (IHIGH, K)
C.....KEEP ANGLE COUNT CORRECT
IA = IA - 1
ICUSP = 0
IMODE (IAF, K) = ITYPE (IL)
C.....END OF INSERT CUSP
GO TO 375
C.....F2 IS THE LAST LAYER
350 IF (IL - 3)355, 400, 400
C.....IS NEXT LAYER POSSIBLE
C
C.....BEGIN INSERT CUSP FOR NEXT LAYER
355 IF (DELPEN (IL, K) - 89.9)360, 400, 400
360 DELFX (IAF, K) = DELFX (IAF - 1, K) + .001
HTFLX (IAF, K) = HTRUE (IHIGH + 1, K)
AFFLX(IAF,K) = AFAC(IHIGH+1,K)
FV = FVERT (IHIGH + 1, K)
HP = HPRIM (IHIGH + 1, K)
ICUSP = 1
IF (FI (2, K))365, 365, 370
365 IMODE (IAF, K) = ITYPE (3)
GO TO 375
370 IMODE (IAF, K) = ITYPE (IL + 1)
C.....END OF INSERT CUSP FOR NEXT LAYER
375 CONTINUE
C CORRECT MARTYN S THEOREM
C.....MARTYN"S THEOREM ASSUMES FLAT IONOSPHERE
C.....THIS IS A CORRECTION FOR A SPHERICAL IONOSPHERE
DEL = DELFX (IAF, K) * D2R
RCOSD = RZ * COS (DEL)
XFSQ = FREQ * FREQ / FC2
HT = HTFLX (IAF, K)
XMUT = 1. - FV * FV / (FREQ * FREQ)
XHP = (HP - HT) / RZ
SPH = XFSQ * XMUT * XHP * (HT + 2. * (RZ + HT) * XHP)
c.....SPH=amin1(SPH,60.)
CANCEL IF(IAND(INFO,8).GT.0)THEN
CANCEL WRITE(99,'(2(A,I2),3(A,F7.2))')' "FINDF" LAYER=',IL,
CANCEL 1' IAF=',IAF,' FREQ=',FREQ,' HP=',HP,' MARTYN CORR=',SPH
CANCEL ENDIF
HP = HP + SPH
PHE = RCOSD / (RZ + HP)
PHE = ASIN (PHE)
GDR = 2. * RZ * (PIO2 - DEL - PHE)
C.....GROUND DISTANCE (KM)
GDFLX (IAF, K) = GDR
HPFLX (IAF, K) = HP
FVFLX (IAF, K) = FV
C.....BEGIN TO FIND SKIP DISTANCE (MINIMUM)
IF (DSKPKM (K) - GDR)385, 385, 380
380 DSKPKM (K) = GDR
DELSKP (K) = DELFX (IAF, K)
HTSKP (K) = HT
HPSKP (K) = HP
FVSKP (K) = FV
ISKP (K) = ITYPE (IL)
SKPSPH=SPH
ICUSPSKP=ICUSP
C.....END OF FINDING SKIP DISTANCE
385 IF (DMAXKM (K) - GDR)390, 390, 395
390 IF(DELFX(IAF,K) - AMIND ) 395,391,391
C.....FIND MAXIMUM DISTANCE
391 DMAXKM(K) = GDR
ICUSPMAX=ICUSP
395 CONTINUE
C.....INCREMENT INDEX FOR COMMON/REFLX/ (MAXIMUM IS 45)
IF(IAND(INFO,32).GT.0.OR.IAND(INFO,8).GT.0)THEN
WRITE(99,'(2I5,F11.1,F7.2,F8.2,F6.2,F9.2,I4,F8.3,F7.2))')IAF,
1 ICUSP,GDFLX(IAF,K),DELFX(IAF,K),HPFLX(IAF,K),SPH,HTFLX(IAF,K),
2 IMODE(IAF,K),FVFLX(IAF,K),AFFLX(IAF,K)
ENDIF
IAFTXR(K)=IAF
IAF = IAF + 1
IF(IAF - 45) 396, 396, 400
396 IF(ICUSP) 275, 350, 265
400 CONTINUE
C.....END OF INSERT CUSP (IE CUSP FINISHED)
IF(IAND(INFO,32).GT.0.OR.IAND(INFO,8).GT.0)THEN
WRITE(99,'(/,5H SKIP,I5,F11.1,F7.2,F8.2,F6.2,F9.2,I4,F8.3,/,A,
1 I5,F11.1)')ICUSPSKP,DSKPKM(K),DELSKP(K),HPSKP(K),SKPSPH,HTSKP(K),
2 ISKP(K),FVSKP(K),' MAX ',ICUSPMAX,DMAXKM(K)
ENDIF
RETURN
END
C--------------------------------
|
{"hexsha": "e0b886614176047f481b578117d6c74960befa3f", "size": 8494, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "src/voacapw/findf.for", "max_stars_repo_name": "jawatson/voacapl", "max_stars_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2015-11-19T16:11:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:20:21.000Z", "max_issues_repo_path": "src/voacapw/findf.for", "max_issues_repo_name": "jawatson/voacapl", "max_issues_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-08-17T11:46:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T20:14:16.000Z", "max_forks_repo_path": "src/voacapw/findf.for", "max_forks_repo_name": "jawatson/voacapl", "max_forks_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-02-04T10:18:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-05T08:16:58.000Z", "avg_line_length": 34.6693877551, "max_line_length": 73, "alphanum_fraction": 0.5482693666, "num_tokens": 3703}
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Self-Supervised GAN with auxiliary rotation loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
from compare_gan.architectures.arch_ops import linear
from compare_gan.gans import loss_lib
from compare_gan.gans import modular_gan
from compare_gan.gans import penalty_lib
from compare_gan.gans import utils
import gin
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
NUM_ROTATIONS = 4
# pylint: disable=not-callable
@gin.configurable(blacklist=["kwargs"])
class SSGAN(modular_gan.ModularGAN):
"""Self-Supervised GAN.
http://arxiv.org/abs/1811.11212
"""
def __init__(self,
self_supervision="rotation_gan",
rotated_batch_size=gin.REQUIRED,
weight_rotation_loss_d=1.0,
weight_rotation_loss_g=0.2,
**kwargs):
"""Creates a new Self-Supervised GAN.
Args:
self_supervision: One of [rotation_gan, rotation_only, None]. When it is
rotation_only, no GAN loss is used, degenerates to a pure rotation
model.
rotated_batch_size: The total number images per batch for the rotation
loss. This must be a multiple of (4 * #CORES) since we consider 4
rotations of each images on each TPU core. For GPU training #CORES is 1.
weight_rotation_loss_d: Weight for the rotation loss for the discriminator
on real images.
weight_rotation_loss_g: Weight for the rotation loss for the generator
on fake images.
**kwargs: Additional arguments passed to `ModularGAN` constructor.
"""
super(SSGAN, self).__init__(**kwargs)
self._self_supervision = self_supervision
self._rotated_batch_size = rotated_batch_size
self._weight_rotation_loss_d = weight_rotation_loss_d
self._weight_rotation_loss_g = weight_rotation_loss_g
# To safe memory ModularGAN supports feeding real and fake samples
# separately through the discriminator. SSGAN does not support this to
# avoid additional additional complexity in create_loss().
assert not self._deprecated_split_disc_calls, \
"Splitting discriminator calls is not supported in SSGAN."
def discriminator_with_rotation_head(self, x, y, is_training):
"""Discriminator network with augmented auxiliary predictions.
Args:
x: an input image tensor.
y: Tensor with label indices.
is_training: boolean, whether or not it is a training call.
Returns:
real_probs: the [0, 1] probability tensor of x being real images.
real_scores: the unbounded score tensor of x being real images.
rotation_scores: the categorical probablity of x being rotated in one of
the four directions.
"""
real_probs, real_scores, final = self.discriminator(
x=x, y=y, is_training=is_training)
use_sn = self._discriminator._spectral_norm # pylint: disable=protected-access
with tf.variable_scope("discriminator_rotation", reuse=tf.AUTO_REUSE):
rotation_scores = linear(tf.reshape(final, (tf.shape(x)[0], -1)),
NUM_ROTATIONS,
scope="score_classify",
use_sn=use_sn)
return real_probs, real_scores, rotation_scores
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
images = features["images"] # Input images.
generated = features["generated"] # Fake images.
if self.conditional:
y = self._get_one_hot_labels(labels)
sampled_y = self._get_one_hot_labels(features["sampled_labels"])
else:
y = None
sampled_y = None
all_y = None
# Batch size per core.
bs = images.shape[0].value
num_replicas = params["context"].num_replicas if "context" in params else 1
assert self._rotated_batch_size % num_replicas == 0
# Rotated batch size per core.
rotated_bs = self._rotated_batch_size // num_replicas
assert rotated_bs % 4 == 0
# Number of images to rotate. Each images gets rotated 3 times.
num_rotated_examples = rotated_bs // 4
logging.info("num_replicas=%s, bs=%s, rotated_bs=%s, "
"num_rotated_examples=%s, params=%s",
num_replicas, bs, rotated_bs, num_rotated_examples, params)
# Augment the images with rotation.
if "rotation" in self._self_supervision:
# Put all rotation angles in a single batch, the first batch_size are
# the original up-right images, followed by rotated_batch_size * 3
# rotated images with 3 different angles.
assert num_rotated_examples <= bs, (num_rotated_examples, bs)
images_rotated = utils.rotate_images(
images[-num_rotated_examples:], rot90_scalars=(1, 2, 3))
generated_rotated = utils.rotate_images(
generated[-num_rotated_examples:], rot90_scalars=(1, 2, 3))
# Labels for rotation loss (unrotated and 3 rotated versions). For
# NUM_ROTATIONS=4 and num_rotated_examples=2 this is:
# [0, 0, 1, 1, 2, 2, 3, 3]
rotate_labels = tf.constant(
np.repeat(np.arange(NUM_ROTATIONS, dtype=np.int32),
num_rotated_examples))
rotate_labels_onehot = tf.one_hot(rotate_labels, NUM_ROTATIONS)
all_images = tf.concat([images, images_rotated,
generated, generated_rotated], 0)
if self.conditional:
y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1])
sampled_y_rotated = tf.tile(y[-num_rotated_examples:], [3, 1])
all_y = tf.concat([y, y_rotated, sampled_y, sampled_y_rotated], 0)
else:
all_images = tf.concat([images, generated], 0)
if self.conditional:
all_y = tf.concat([y, sampled_y], axis=0)
# Compute discriminator output for real and fake images in one batch.
d_all, d_all_logits, c_all_logits = self.discriminator_with_rotation_head(
all_images, y=all_y, is_training=is_training)
d_real, d_fake = tf.split(d_all, 2)
d_real_logits, d_fake_logits = tf.split(d_all_logits, 2)
c_real_logits, c_fake_logits = tf.split(c_all_logits, 2)
# Separate the true/fake scores from whole rotation batch.
d_real_logits = d_real_logits[:bs]
d_fake_logits = d_fake_logits[:bs]
d_real = d_real[:bs]
d_fake = d_fake[:bs]
self.d_loss, _, _, self.g_loss = loss_lib.get_losses(
d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits,
d_fake_logits=d_fake_logits)
penalty_loss = penalty_lib.get_penalty_loss(
x=images, x_fake=generated, y=y, is_training=is_training,
discriminator=self.discriminator, architecture=self._architecture)
self.d_loss += self._lambda * penalty_loss
# Add rotation augmented loss.
if "rotation" in self._self_supervision:
# We take an even pieces for all rotation angles
assert len(c_real_logits.shape.as_list()) == 2, c_real_logits.shape
assert len(c_fake_logits.shape.as_list()) == 2, c_fake_logits.shape
c_real_logits = c_real_logits[- rotated_bs:]
c_fake_logits = c_fake_logits[- rotated_bs:]
preds_onreal = tf.cast(tf.argmax(c_real_logits, -1), rotate_labels.dtype)
accuracy = tf.reduce_mean(
tf.cast(tf.equal(rotate_labels, preds_onreal), tf.float32))
c_real_probs = tf.nn.softmax(c_real_logits)
c_fake_probs = tf.nn.softmax(c_fake_logits)
c_real_loss = - tf.reduce_mean(
tf.reduce_sum(rotate_labels_onehot * tf.log(c_real_probs + 1e-10), 1))
c_fake_loss = - tf.reduce_mean(
tf.reduce_sum(rotate_labels_onehot * tf.log(c_fake_probs + 1e-10), 1))
if self._self_supervision == "rotation_only":
self.d_loss *= 0.0
self.g_loss *= 0.0
self.d_loss += c_real_loss * self._weight_rotation_loss_d
self.g_loss += c_fake_loss * self._weight_rotation_loss_g
else:
c_real_loss = 0.0
c_fake_loss = 0.0
accuracy = tf.zeros([])
self._tpu_summary.scalar("loss/c_real_loss", c_real_loss)
self._tpu_summary.scalar("loss/c_fake_loss", c_fake_loss)
self._tpu_summary.scalar("accuracy/d_rotation", accuracy)
self._tpu_summary.scalar("loss/penalty", penalty_loss)
|
{"hexsha": "d51f36b29a6e3f8c7947a64ead69957fd88e98fc", "size": 9724, "ext": "py", "lang": "Python", "max_stars_repo_path": "DiffAugment-biggan-imagenet/compare_gan/gans/ssgan.py", "max_stars_repo_name": "Rian-T/data-efficient-gans", "max_stars_repo_head_hexsha": "1d7e58be909524c86c863b5ab7c9d591afc00d1d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1902, "max_stars_repo_stars_event_min_datetime": "2018-02-20T20:11:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:40:10.000Z", "max_issues_repo_path": "DiffAugment-biggan-imagenet/compare_gan/gans/ssgan.py", "max_issues_repo_name": "Rian-T/data-efficient-gans", "max_issues_repo_head_hexsha": "1d7e58be909524c86c863b5ab7c9d591afc00d1d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 88, "max_issues_repo_issues_event_min_datetime": "2020-06-20T01:57:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:45:04.000Z", "max_forks_repo_path": "DiffAugment-biggan-imagenet/compare_gan/gans/ssgan.py", "max_forks_repo_name": "Rian-T/data-efficient-gans", "max_forks_repo_head_hexsha": "1d7e58be909524c86c863b5ab7c9d591afc00d1d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 355, "max_forks_repo_forks_event_min_datetime": "2018-03-22T14:22:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T13:35:09.000Z", "avg_line_length": 42.8370044053, "max_line_length": 83, "alphanum_fraction": 0.6983751543, "include": true, "reason": "import numpy", "num_tokens": 2377}
|
-- Math 52: Week 5
import .utils
open classical
-- The following lemmas may be useful for the next proof.
-- mul_lt_mul_of_pos_left (a b c : ℝ) : a < b → 0 < c → c * a < c * b
-- mul_lt_mul_of_pos_right (a b c : ℝ) : a < b → 0 < c → a * c < b * c
-- Lakins 2.1.2: For all real numbers a and b, if 0 < a < b, then a² < b².
theorem L212 : ∀ (a b : ℝ), 0 < a ∧ a < b → a * a < b * b :=
begin
sorry
end
-- The following lemmas may be useful for the next proof.
-- mul_le_mul_of_nonneg_left (a b c : ℝ) : a ≤ b → 0 ≤ c → c * a ≤ c * b
-- mul_le_mul_of_nonneg_right (a b c : ℝ) : a ≤ b → 0 ≤ c → a * c ≤ b * c
-- mul_le_mul_of_nonpos_left (a b c : ℝ) : b ≤ a → c ≤ 0 → c * a ≤ c * b
-- mul_le_mul_of_nonpos_right (a b c : ℝ) : b ≤ a → c ≤ 0 → a * c ≤ b * c
-- Lakins 2.1.6: For all real numbers x, 0 ≤ x².
theorem L216 : ∀ (x : ℝ), 0 ≤ x * x :=
begin
sorry
end
-- The following lemmas may be useful in the following proof.
-- div_le_of_le_mul_of_pos (a b c : ℝ) : a ≤ b * c → c > 0 → a / c ≤ b
-- le_div_of_mul_le_of_pos (a b c : ℝ) : a * c ≤ b → c > 0 → a ≤ b / c
-- Lakins 2.1.11: For all real numbers x and y, if x ≤ y then x ≤ (x + y)/2 ≤ y.
theorem L2111 : ∀ (x y : ℝ), x ≤ y → x ≤ (x + y)/2 ∧ (x + y)/2 ≤ y :=
begin
sorry
end
-- The following lemmas may be useful in the next proof.
-- ne_of_lt (a b : ℝ) : a < b → a ≠ b
-- mul_pos (a b : ℝ) : a > 0 → b > 0 → a * b > 0
-- mul_neg_of_pos_of_neg (a b : ℝ) : a > 0 → b < 0 → a * b < 0
-- mul_neg_of_neg_of_pos (a b : ℝ) : a < 0 → b > 0 → a * b < 0
-- mul_pos_of_neg_of_neg (a b : ℝ) : a < 0 → b < 0 → a * b > 0
-- Lakins 2.1.7: For all real numbers x and y, if xy = 0, then x = 0 or y = 0.
theorem L217 : ∀ (x y : ℝ), x * y = 0 → x = 0 ∨ y = 0 :=
begin
sorry
end
-- This is a really tricky proof!
-- Lakins 2.1.9: For all real numbers x and y, if x² = y², then x = y or x = −y; i.e., x = ±y.
theorem L219 : ∀ (x y : ℝ), x * x = y * y → x = y ∨ x = -y :=
begin
intros x y H,
have L : x - y = 0 ∨ x + y = 0,
begin
apply L217,
calc (x - y) * (x + y)
= x * (x + y) - y * (x + y) : by rw sub_mul ...
= (x * x + x * y) - y * (x + y) : by rw mul_add ...
= (x * x + x * y) - (y * x + y * y) : by rw mul_add ...
= ((x * x + x * y) - y * x) - y * y : by rw sub_sub ...
= ((x * x + x * y) - x * y) - y * y : by ac_refl ...
= x * x - y * y : by rw add_sub_cancel ...
= x * x - x * x : by rw H ...
= 0 : by rw sub_self,
end,
cases L,
{ left,
apply eq_of_sub_eq_zero,
assumption
},
{ right,
apply eq_of_sub_eq_zero,
rw sub_neg_eq_add,
assumption,
},
end
|
{"author": "UVM-M52", "repo": "week-5-maddiehutchinson", "sha": "7fd99c56b0a9a313ed1b462e9a8e50c0d66857aa", "save_path": "github-repos/lean/UVM-M52-week-5-maddiehutchinson", "path": "github-repos/lean/UVM-M52-week-5-maddiehutchinson/week-5-maddiehutchinson-7fd99c56b0a9a313ed1b462e9a8e50c0d66857aa/src/week05.lean"}
|
import sys
sys.path.append('..')
import numpy as np
import apis
from apis import apis_system
from apis import apis_basic
from .hvdc import calculate_dc_line_power
def init_powerflow_solution():
'''
Initial power flow solution with flat start.
Args: None
Rets:
(1) S, array, node complex power injection in pu.
(2) Um, array, initialized node voltage in pu.
(3) Ua, array, node voltage phase angle in rad.
'''
buses = apis.get_all_devices('BUS')
Um, Ua = np.ones(len(buses)), np.zeros(len(buses))
S = np.zeros(len(buses), dtype=complex)
for bus in buses:
IDE = apis.get_device_data(bus, 'BUS', 'IDE')
if IDE == 1:
apis.set_device_data(bus, 'BUS', 'VM', 1.0)
apis.set_device_data(bus, 'BUS', 'VA', 0.0)
if IDE == 2:
apis.set_device_data(bus, 'BUS', 'VA', 0.0)
i = apis_system.get_bus_num_after_renumber(bus)
VM = apis.get_device_data(bus, 'BUS', 'VM')
Um[i] = VM
if IDE == 3:
i = apis_system.get_bus_num_after_renumber(bus)
VM = apis.get_device_data(bus, 'BUS', 'VM')
VA = apis.get_device_data(bus, 'BUS', 'VA')
Um[i] = VM
Ua[i] = apis_basic.convert_deg_to_rad(VA)
loads = apis.get_all_devices('LOAD')
for load in loads:
i = apis_system.get_bus_num_after_renumber(load)
PL = apis.get_device_data(load, 'LOAD', 'PL')
QL = apis.get_device_data(load, 'LOAD', 'QL')
S[i] = - PL - 1j * QL + S[i]
generators = apis.get_all_devices('GENERATOR')
for generator in generators:
i = apis_system.get_bus_num_after_renumber(generator)
PG = apis.get_device_data(generator, 'GENERATOR', 'PG')
S[i] = PG + S[i]
wt_gens = apis.get_all_devices('WT GENERATOR')
for wt_gen in wt_gens:
PG = apis.get_device_data(wt_gen, 'WT GENERATOR', 'PG')
i = apis_system.get_bus_num_after_renumber(wt_gen)
S[i] = PG + S[i]
pv_units = apis.get_all_devices('PV UNIT')
for pv_unit in pv_units:
i = apis_system.get_bus_num_after_renumber(pv_unit)
PG = apis.get_device_data(pv_unit, 'PV UNIT', 'PG')
S[i] = PG + S[i]
hvdcs = apis.get_all_devices('HVDC')
for hvdc in hvdcs:
VM = apis.get_device_data(hvdc[0], 'BUS', 'VM')
BASKV = apis.get_device_data(hvdc[0], 'BUS', 'BASKV')
Vtr = BASKV * VM
VM = apis.get_device_data(hvdc[1], 'BUS', 'VM')
BASKV = apis.get_device_data(hvdc[1], 'BUS', 'BASKV')
Vti = BASKV * VM
Pacr, Qacr, Paci, Qaci = calculate_dc_line_power(hvdc, Vtr, Vti) # Calculate the initial injection power of HVDC
i = apis_system.get_bus_num_after_renumber(hvdc[0])
j = apis_system.get_bus_num_after_renumber(hvdc[1])
S[i] = - Pacr - 1j * Qacr + S[i]
S[j] = Paci - 1j * Qaci + S[j]
SBASE = apis_system.get_system_base_data('SBASE')
S = S / SBASE # Convert power to unit power
return S, Um, Ua
def update_powerflow(S, Um, Ua):
'''
Update component data after solving power flow.
Args:
(1) S, array, node complex power injection in pu.
(2) Um, array, initialized node voltage in pu.
(3) Ua, array, node voltage phase angle in rad.
Rets: None
'''
PQ_num = apis_system.get_system_bus_number('PQ')
PV_num = apis_system.get_system_bus_number('PV')
Y_mat = apis_system.get_system_Y_network_matrix('basic')
buses = apis.get_all_devices('BUS')
for i in range(PQ_num, len(buses)): # Calculate reactive power injection of PV node and balance node
ang_d = Ua[i] - Ua
S[i] = S[i].real + 1j * Um[i] * np.sum(Um * (Y_mat[i, :].real * np.sin(ang_d) - Y_mat[i, :].imag * np.cos(ang_d)))
for i in range(PQ_num+PV_num, len(buses)): # Calculate of active power injection of balance node
ang_d = Ua[i] - Ua
S[i] = 1j * S[i].imag + Um[i] * np.sum(Um * (Y_mat[i, :].real * np.cos(ang_d) + Y_mat[i, :].imag * np.sin(ang_d)))
Ua = apis_basic.convert_rad_to_deg(Ua)
for bus in buses: # Update bus voltage and phase angle
i = apis_system.get_bus_num_after_renumber(bus)
apis.set_device_data(bus, 'BUS', 'VM', Um[i])
apis.set_device_data(bus, 'BUS', 'VA', Ua[i])
generators = apis.get_all_devices('GENERATOR')
SBASE = apis_system.get_system_base_data('SBASE')
for generator in generators: # Update the output of generator
i = apis_system.get_bus_num_after_renumber(generator)
apis.set_device_data(generator, 'GENERATOR', 'QG', S[i].imag * SBASE)
IDE = apis.get_device_data(generator, 'BUS', 'IDE')
if IDE == 3:
apis.set_device_data(generator, 'GENERATOR', 'PG', S[i].real * SBASE)
return
def show_powerflow_result():
'''
Show powerflow result
Args: None
Rets: None
'''
print('-------------------------------------')
print('Power flow solution reports:')
print('BUS, VOLTAGE/pu, ANGLE/deg, LOAD/pu, GENERATE/pu')
buses = apis.get_all_devices('BUS')
loads = apis.get_all_devices('LOAD')
generators = apis.get_all_devices('GENERATOR')
wt_generators = apis.get_all_devices('WT GENERATOR')
pv_units = apis.get_all_devices('PV UNIT')
SBASE = apis_system.get_system_base_data('SBASE')
for bus in buses:
VM = apis.get_device_data(bus, 'BUS', 'VM')
VA = apis.get_device_data(bus, 'BUS', 'VA')
load_power = 0 + 1j * 0
if bus in loads:
PL = apis.get_device_data(bus, 'LOAD', 'PL')
QL = apis.get_device_data(bus, 'LOAD', 'QL')
load_power = (PL + 1j * QL) / SBASE
gen_power = 0 + 1j * 0
if bus in generators:
PG = apis.get_device_data(bus, 'GENERATOR', 'PG')
QG = apis.get_device_data(bus, 'GENERATOR', 'QG')
gen_power = (PG + 1j * QG) / SBASE
if bus in wt_generators:
PG = apis.get_device_data(bus, 'WT GENERATOR', 'PG')
QG = apis.get_device_data(bus, 'WT GENERATOR', 'QG')
gen_power = (PG + 1j * QG) / SBASE
if bus in pv_units:
PG = apis.get_device_data(bus, 'PV UNIT', 'PG')
QG = apis.get_device_data(bus, 'PV UNIT', 'QG')
gen_power = (PG + 1j * QG) / SBASE
print('#{}, {:.4f}, {:.3f}, {:.3f}, {:.3f}'.format(bus, VM, VA, load_power, gen_power))
return
|
{"hexsha": "0824f72b7bbd3081fe2180d3f0d66718bd08bbde", "size": 6815, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/powerflow/branch.py", "max_stars_repo_name": "yanzhaochang/PSATools-Python", "max_stars_repo_head_hexsha": "7524d7eeed26db9fba93c0ea03a7c8c0bfee7410", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-29T10:36:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T06:54:10.000Z", "max_issues_repo_path": "src/powerflow/branch.py", "max_issues_repo_name": "yanzhaochang/PSATools-Python", "max_issues_repo_head_hexsha": "7524d7eeed26db9fba93c0ea03a7c8c0bfee7410", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/powerflow/branch.py", "max_forks_repo_name": "yanzhaochang/PSATools-Python", "max_forks_repo_head_hexsha": "7524d7eeed26db9fba93c0ea03a7c8c0bfee7410", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-01T06:54:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T06:54:24.000Z", "avg_line_length": 39.3930635838, "max_line_length": 123, "alphanum_fraction": 0.5684519442, "include": true, "reason": "import numpy", "num_tokens": 1898}
|
[STATEMENT]
lemma sturm_meta_spec: "(\<And>x::real. P x) \<Longrightarrow> P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x. P x) \<Longrightarrow> P x
[PROOF STEP]
by simp
|
{"llama_tokens": 79, "file": "Sturm_Sequences_Sturm_Method", "length": 1}
|
#--------------------------
# Tensorflow Keras imports
#--------------------------
import os
import warnings
import logging
from distutils.util import strtobool
from packaging import version
import re
os.environ['NUMEXPR_MAX_THREADS'] = '8' # suppress warning from NumExpr on machines with many CPUs
# TensorFlow
SUPPRESS_DEP_WARNINGS = strtobool(os.environ.get('SUPPRESS_DEP_WARNINGS', '1'))
if SUPPRESS_DEP_WARNINGS: # 2021-11-12: copied this here to properly suppress TF/CUDA warnings in Kaggle notebooks, etc.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
DISABLE_V2_BEHAVIOR = strtobool(os.environ.get('DISABLE_V2_BEHAVIOR', '0'))
try:
if DISABLE_V2_BEHAVIOR:
# TF2-transition
ACC_NAME = 'acc'
VAL_ACC_NAME = 'val_acc'
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.compat.v1 import keras
print('Using DISABLE_V2_BEHAVIOR with TensorFlow')
else:
# TF2
ACC_NAME = 'accuracy'
VAL_ACC_NAME = 'val_accuracy'
import tensorflow as tf
from tensorflow import keras
K = keras.backend
# suppress autograph warnings
tf.autograph.set_verbosity(1)
if version.parse(tf.__version__) < version.parse('2.0'):
raise Exception('As of v0.8.x, ktrain needs TensorFlow 2. Please upgrade TensorFlow.')
os.environ['TF_KERAS'] = '1' # to use keras_bert package below with tf.Keras
except ImportError:
warnings.warn('TensorFlow is not installed. You can still use ktrain\'s scikit-learn models and pretrained PyTorch models: '+\
'text.zsl.ZeroShotClassifier, text.translation.Translator, text.summarization.TransformerSummarizer, '+\
'text.speech.Transcriber, and text.eda.TopicModel. To train neural network models, you will need to install TensorFlow: '+\
'pip install tensorflow')
keras = None
K = None
# for TF backwards compatibility (e.g., support for TF 2.3.x):
try:
MobileNetV3Small = keras.applications.MobileNetV3Small
pre_mobilenetv3small = keras.applications.mobilenet_v3.preprocess_input
HAS_MOBILENETV3 = True
except:
HAS_MOBILENETV3 = False
#----------------------------------------------------------
# standards
#----------------------------------------------------------
#import warnings # imported above
import sys
import os
import os.path
import re
import operator
from collections import Counter
from distutils.version import StrictVersion
import tempfile
import pickle
from abc import ABC, abstractmethod
import math
import itertools
import csv
import copy
import glob
import codecs
import urllib.request
import zipfile
import gzip
import shutil
import string
import random
import json
import mimetypes
#----------------------------------------------------------
# external dependencies
#----------------------------------------------------------
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import rgb2hex
plt.ion() # interactive mode
import sklearn
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelEncoder
#from sklearn.externals import joblib
import joblib
from scipy import sparse # utils
from scipy.sparse import csr_matrix
import pandas as pd
try:
# fastprogress >= v0.2.0
from fastprogress.fastprogress import master_bar, progress_bar
except:
# fastprogress < v0.2.0
from fastprogress import master_bar, progress_bar
import requests
# verify=False added to avoid headaches from some corporate networks
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# text processing
import syntok.segmenter as segmenter
# multilingual text processing
import langdetect
import jieba
import cchardet as chardet
# 'bert' text classification model
try:
import keras_bert
from keras_bert import Tokenizer as BERT_Tokenizer
except ImportError:
warnings.warn("keras_bert (and/or its TensorFlow dependency) is not installed. keras_bert is only needed only for 'bert' text classification model")
# transformers for models in 'text' module
logging.getLogger("transformers").setLevel(logging.ERROR)
try:
import transformers
except ImportError:
warnings.warn("transformers not installed - needed by various models in 'text' module")
try:
from PIL import Image
PIL_INSTALLED = True
except:
PIL_INSTALLED = False
SG_ERRMSG = 'ktrain currently uses a forked version of stellargraph v0.8.2. '+\
'Please install with: '+\
'pip install https://github.com/amaiya/stellargraph/archive/refs/heads/no_tf_dep_082.zip'
ALLENNLP_ERRMSG = 'To use ELMo embedings, please install allenlp:\n' +\
'pip install allennlp'
# ELI5
KTRAIN_ELI5_TAG = '0.10.1-1'
# Suppress Warnings
def set_global_logging_level(level=logging.ERROR, prefices=[""]):
"""
Override logging levels of different modules based on their name as a prefix.
It needs to be invoked after the modules have been loaded so that their loggers have been initialized.
Args:
- level: desired level. e.g. logging.INFO. Optional. Default is logging.ERROR
- prefices: list of one or more str prefices to match (e.g. ["transformers", "torch"]). Optional.
Default is `[""]` to match all active loggers.
The match is a case-sensitive `module_name.startswith(prefix)`
"""
prefix_re = re.compile(fr'^(?:{ "|".join(prefices) })')
for name in logging.root.manager.loggerDict:
if re.match(prefix_re, name):
logging.getLogger(name).setLevel(level)
if SUPPRESS_DEP_WARNINGS:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
warnings.simplefilter(action='ignore', category=FutureWarning)
# elevate warnings to errors for debugging dependencies
#warnings.simplefilter('error', FutureWarning)
set_global_logging_level(logging.ERROR, ["transformers", "nlp", "torch", "tensorflow", "tensorboard", "wandb", 'mosestokenizer', 'shap'])
|
{"hexsha": "f52ec79dde08035d4ad963f47fe0d7fddf97aab4", "size": 6415, "ext": "py", "lang": "Python", "max_stars_repo_path": "ktrain/imports.py", "max_stars_repo_name": "rubensmau/ktrain", "max_stars_repo_head_hexsha": "de32b87b9d3996f35882e7ca18273356c72d342c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ktrain/imports.py", "max_issues_repo_name": "rubensmau/ktrain", "max_issues_repo_head_hexsha": "de32b87b9d3996f35882e7ca18273356c72d342c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ktrain/imports.py", "max_forks_repo_name": "rubensmau/ktrain", "max_forks_repo_head_hexsha": "de32b87b9d3996f35882e7ca18273356c72d342c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2361809045, "max_line_length": 152, "alphanum_fraction": 0.7116134061, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1466}
|
simulationMode = False
if not simulationMode:
import TensorflowProcessingModule as TPM
from imutils.video.pivideostream import PiVideoStream
import MathModule as MM
import math, time, copy
import cv2
import numpy as np
from multiprocessing import Process, RawValue, RawArray
#program sluzacy do analizy obrazu z kamery, wykrywania kulki
class ImageProcessor:
#parametry kamery
camera_resolution = (256, 256)
camera_framerate = 40
corner_detecton_area = (0.09, 0.09, 0.11, 0.11) #prostakat, w ktorym szukana jest krawedz plyty, jest on powielany dla kazdego rogu obrazu
detection_image_resolution = (200, 200)
detection_image_resolution_cropped = (-1, -1)
#rozmiar bitmapy przeszkod
obstacle_map_size = 60
obstacle_map_update_delta = 40
def __init__(self, _simulationCommunicator=None):
print("ImageProcessor object created")
self.simulationCommunicator = _simulationCommunicator
#wartosci-rezultaty przetwarzania obrazu
self.result_x = RawValue('f', 0.0)
self.result_y = RawValue('f', 0.0)
self.key = RawValue('i', 0)
self.corners = np.zeros((4, 2), np.int32) #pozycje rogow plyty
self.obstacle_map = RawArray('i', ImageProcessor.obstacle_map_size**2)
self.obstacle_map_update_counter = 0
def getBallPosition(self): #zwraca pozycje kulki
if simulationMode: return self.simulationCommunicator.getBallPosition()
return (self.result_x.value, self.result_y.value)
def StartProcessing(self): #uruchamia proces przetwarzajacy obraz
print("Starting image processing")
self.process = Process(target=ImageProcessor.ProcessImage, args=(self,))
self.process.daemon = True
self.process.start()
#ImageProcessor.ProcessImage(self)
def StopProcessing(self): #wydaje polecenie do zatrzymania przetwarzania obrazu
print("Stopping image processing")
self.key.value = -666
self.process.terminate()
def ProcessImage(self): #przetwarza obraz pobierajac klatke z kamery i wykonujac na niej operacje analizy
#bufor dzielenia mapy przeszkod z innymi procesami
self.obstacle_map_np = np.frombuffer(self.obstacle_map, dtype=np.int32).reshape(ImageProcessor.obstacle_map_size**2)
#parametry trackera kulki
self.ballTracker_pos = [ImageProcessor.detection_image_resolution[0]//2, ImageProcessor.detection_image_resolution[1]//2]
self.ballTracker_size = 40
self.ballTracker_result = [0, 0]
if not simulationMode:
self.tensorflowProcessor = TPM.TensorflowProcessor()
videoStream = PiVideoStream(resolution=ImageProcessor.camera_resolution, framerate=ImageProcessor.camera_framerate).start() #uruchamianie watku, ktory czyta kolejne klatki z kamery
else:
videoStream = self.simulationCommunicator
time.sleep(1)
self.frame_original = videoStream.read()
lastTime = time.time()
a = 190
lastID = 0
saveCounter = 0
saveCount = 0
while True:
if self.key.value == -666: break
#prosty licznik przetworzonych klatek w ciagu sekundy
a = a + 1
if a > 200:
if ImageProcessor.detection_image_resolution_cropped[0] == -1:
ImageProcessor.detection_image_resolution_cropped = (np.size(self.frame_original, 0), np.size(self.frame_original, 1))
print(str(a * 1.0 / (time.time() - lastTime)))
lastTime = time.time()
a = 0
#synchronizacja pobierania nowej klatki z czestotliwascia kamery
while True:
frameGrabbed = videoStream.read()
ID = id(frameGrabbed)
if ID != lastID:
self.frame_original = frameGrabbed
lastID = ID
break
elif not simulationMode:
time.sleep(0.01)
#klatka przeznaczona do debugowania
#self.frame_debug = copy.copy(self.frame_original)
if not simulationMode:
newCorners = ImageProcessor.FindBoardCorners(self) #znajdowanie pozycji rogow plyty
for i in range(4):
self.corners[i] = (MM.lerp(self.corners[i][0], newCorners[i][0], 0.3), MM.lerp(self.corners[i][1], newCorners[i][1], 0.3)) #wygladzanie aktualizacji pozycji rogow plyty
else: self.corners = self.simulationCommunicator.FindBoardCorners()
ImageProcessor.ChangePerspective(self) #zmiana perspektywy znalezionej tablicy, aby wygladala jak kwadrat
if not simulationMode: ImageProcessor.UpdateBallTracker(self) #aktualizacja trackera kulki
else:
pos = self.simulationCommunicator.getBallPosition()
self.ballTracker_result[0] = pos[0] * ImageProcessor.detection_image_resolution_cropped[0]
self.ballTracker_result[1] = pos[1] * ImageProcessor.detection_image_resolution_cropped[1]
#ustawianie znalezionej pozycji kulki w zmiennych dzielonych miedzy procesami
self.result_x.value = self.ballTracker_result[0] / ImageProcessor.detection_image_resolution_cropped[0]
self.result_y.value = self.ballTracker_result[1] / ImageProcessor.detection_image_resolution_cropped[1]
ImageProcessor.UpdateObstacleMap(self)
#cv2.imshow("Frame debug", self.frame_debug)
if saveCounter < saveCount:
cv2.imwrite("Frame" + str(saveCounter) + ".png", self.frame_original)
saveCounter += 1
cv2.imshow("Frame Casted", self.frame_original)
key = cv2.waitKey(1) & 0xFF
#if key == ord("q"):
# break
videoStream.stop()
#aktualizuje tracker kulki
def UpdateBallTracker(self):
self.ballTracker_pos[0] = MM.clamp(self.ballTracker_pos[0], 0, ImageProcessor.detection_image_resolution_cropped[0] - self.ballTracker_size)
self.ballTracker_pos[1] = MM.clamp(self.ballTracker_pos[1], 0, ImageProcessor.detection_image_resolution_cropped[1] - self.ballTracker_size)
self.ballTracker_pos[0] = int(self.ballTracker_pos[0])
self.ballTracker_pos[1] = int(self.ballTracker_pos[1])
#przygotowanie klatki z kamery do analizy
tracker_frame = self.frame_original[self.ballTracker_pos[1]:self.ballTracker_pos[1]+self.ballTracker_size,
self.ballTracker_pos[0]:self.ballTracker_pos[0]+self.ballTracker_size]
tracker_frame = cv2.cvtColor(tracker_frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow("Tracker", tracker_frame)
#cv2.imshow("Tracker denoised", cv2.fastNlMeansDenoising(tracker_frame, None, 10, 7, 21))
#analiza klatki z uzyciem sieci neuronowych
result = self.tensorflowProcessor.getBallPosition(tracker_frame) * (self.ballTracker_size-1)
self.ballTracker_result[0] = self.ballTracker_pos[0] + result[0]
self.ballTracker_result[1] = self.ballTracker_pos[1] + result[1]
#zaznaczanie wizualne pozycji kulki
cv2.circle(self.frame_original, tuple(np.round(self.ballTracker_result).astype("int")), 1, (0, 255, 0), -1)
#aktualizacja pozycji trackera
self.ballTracker_pos[0] = round(self.ballTracker_result[0]) - self.ballTracker_size // 2
self.ballTracker_pos[1] = round(self.ballTracker_result[1]) - self.ballTracker_size // 2
#znajduje pozycje krawedzi plyty
def FindBoardCorners(self):
corners = np.zeros((4, 2), dtype=np.int32)
corner_detection_area_pixels = [round(self.corner_detecton_area[0] * self.camera_resolution[0]),
round(self.corner_detecton_area[1] * self.camera_resolution[1]),
round(self.corner_detecton_area[2] * self.camera_resolution[0]),
round(self.corner_detecton_area[3] * self.camera_resolution[1])]
for i in range(4):
flipX = False
flipY = False
detectionArea = copy.copy(corner_detection_area_pixels) #domyslnie lewy gorny
if i == 1 or i == 2:
detectionArea[0] = self.camera_resolution[0] - detectionArea[0] - detectionArea[2]
flipX = True
if i == 3 or i == 2:
detectionArea[1] = self.camera_resolution[1] - detectionArea[1] - detectionArea[3]
flipY = True
rect = (detectionArea[0], detectionArea[1], detectionArea[0] + detectionArea[2], detectionArea[1] + detectionArea[3])
#cv2.rectangle(self.frame_debug, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 0), 1);
img = self.frame_original[rect[1]:rect[3], rect[0]:rect[2]]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (30, 30), interpolation=cv2.INTER_NEAREST)
if flipX and flipY: img = cv2.flip(img, -1)
elif flipX: img = cv2.flip(img, 1)
elif flipY: img = cv2.flip(img, 0)
#cv2.imshow("Corner " + str(i), img)
result = self.tensorflowProcessor.getCornerPosition(img)
if flipX: result[0] = 1.0 - result[0]
if flipY: result[1] = 1.0 - result[1]
corners[i] = (round(result[0] * detectionArea[2]) + detectionArea[0], round(result[1] * detectionArea[3]) + detectionArea[1])
#cv2.circle(self.frame_debug, tuple(corners[i]), 1, (0, 0, 255), -1)
return corners
#zmienia perspektywe obrazu z kamery tak, aby niewidoczne bylo przechylenie plyty
def ChangePerspective(self):
pts = np.array(self.corners, np.float32)
res = self.detection_image_resolution
enlarge = 3
pts2 = np.float32([[enlarge,enlarge],[res[0]-enlarge,enlarge],[res[0]-enlarge, res[1]-enlarge], [enlarge, res[1]-enlarge]])
M = cv2.getPerspectiveTransform(pts, pts2)
self.frame_original = cv2.warpPerspective(self.frame_original, M, res)
#zamalowanie bialych pol w rogach plyty
for x in (0, res[0]):
for y in (0, res[1]):
cv2.circle(self.frame_original, (x, y), 10, (0, 0, 0), -1)
#aktualizuje mape przeszkod na plycie
def UpdateObstacleMap(self):
self.obstacle_map_update_counter += 1
if self.obstacle_map_update_counter >= ImageProcessor.obstacle_map_update_delta:
self.obstacle_map_update_counter = 0
frame = cv2.resize(self.frame_original, (ImageProcessor.obstacle_map_size, ImageProcessor.obstacle_map_size), interpolation=cv2.INTER_NEAREST)
frame = np.int32(frame)
frame = 2 * frame[...,2] - frame[...,1] - frame[...,0]
np.copyto(self.obstacle_map_np, frame.ravel())
#self.obstacle_map = frame[...,2].ravel()
|
{"hexsha": "2702ded874a778118a2f2e4c08e01dbcf139ffa0", "size": 11360, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonCode/ImageProcessingModule.py", "max_stars_repo_name": "janw23/Ballance", "max_stars_repo_head_hexsha": "f085d2d03c31a8e3be74d4c82300f571cc3cad65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-16T21:24:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-19T15:24:03.000Z", "max_issues_repo_path": "PythonCode/ImageProcessingModule.py", "max_issues_repo_name": "janw23/Ballance", "max_issues_repo_head_hexsha": "f085d2d03c31a8e3be74d4c82300f571cc3cad65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonCode/ImageProcessingModule.py", "max_forks_repo_name": "janw23/Ballance", "max_forks_repo_head_hexsha": "f085d2d03c31a8e3be74d4c82300f571cc3cad65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.3913043478, "max_line_length": 194, "alphanum_fraction": 0.6245598592, "include": true, "reason": "import numpy", "num_tokens": 2898}
|
import pandas as pd
import numpy as np
from dataclasses import dataclass, InitVar, field
from enum import Enum, Flag, auto, unique
from functools import reduce
print("\nWarning: pre-loading selected modules, see your config", end='')
|
{"hexsha": "9948661f9f157cc1a137345ce1e15902114b75be", "size": 236, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ipython/profile_default/startup/00-import_selected_modules.py", "max_stars_repo_name": "dpopchev/linux", "max_stars_repo_head_hexsha": "b5348b9543276b06366497681924441ef4f43005", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ipython/profile_default/startup/00-import_selected_modules.py", "max_issues_repo_name": "dpopchev/linux", "max_issues_repo_head_hexsha": "b5348b9543276b06366497681924441ef4f43005", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ipython/profile_default/startup/00-import_selected_modules.py", "max_forks_repo_name": "dpopchev/linux", "max_forks_repo_head_hexsha": "b5348b9543276b06366497681924441ef4f43005", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2222222222, "max_line_length": 73, "alphanum_fraction": 0.7796610169, "include": true, "reason": "import numpy", "num_tokens": 54}
|
# used from predict.py
import pandas as pd
from collections import OrderedDict
import numpy as np
import random
from sato.extract.helpers import utils
from sherlock.features.bag_of_characters import extract_bag_of_characters_features
from sherlock.features.bag_of_words import extract_bag_of_words_features
from sherlock.features.word_embeddings import extract_word_embeddings_features
from sherlock.features.paragraph_vectors import infer_paragraph_embeddings_features
n_samples = 1000
vec_dim = 400
def extract_sherlock_features(df_dic):
df, locator, dataset_id = df_dic['df'], df_dic['locator'], df_dic['dataset_id']
all_field_features = []
for i in range(len(df.columns)):
single_field_feature_set = OrderedDict()
all_field_features.append(single_field_feature_set)
for field_order, field_name in enumerate(df.columns):
v = df[field_name]
field_id = field_order
all_field_features[field_order]['locator'] = locator
all_field_features[field_order]['dataset_id'] = dataset_id
all_field_features[field_order]['field_id'] = '{}:{}'.format(dataset_id, field_id) # field id in the filterd table
all_field_features[field_order]['header'] = field_name
all_field_features[field_order]['header_c'] = utils.canonical_header(field_name)
n_values = len(v)
try:
try:
field_values = list(v[:v.last_valid_index()])
except Exception as e:
field_values = v
continue
# sample if more than 1000 values in column
if n_values > n_samples:
n_values = n_samples
v = random.choices(v, k=n_values)
raw_sample = pd.Series(v).astype(str)
f_ch = extract_bag_of_characters_features(raw_sample, n_values)
f_word = extract_word_embeddings_features(raw_sample)
f_par = infer_paragraph_embeddings_features(raw_sample, vec_dim)
f_stat = extract_bag_of_words_features(raw_sample)
for feature_set in [ f_ch, f_word, f_par, f_stat ]:
for k, v in feature_set.items():
all_field_features[field_order][k] = v
except Exception as e:
print('Single field exception:', e)
continue
return pd.DataFrame(all_field_features)
|
{"hexsha": "35569439a3077dcea88b8a06490fd25423a989a6", "size": 2385, "ext": "py", "lang": "Python", "max_stars_repo_path": "sato/extract/feature_extraction/sherlock_features.py", "max_stars_repo_name": "VIDA-NYU/sato", "max_stars_repo_head_hexsha": "8fb51787b36114df13f54c1acd11df12a66ad3e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sato/extract/feature_extraction/sherlock_features.py", "max_issues_repo_name": "VIDA-NYU/sato", "max_issues_repo_head_hexsha": "8fb51787b36114df13f54c1acd11df12a66ad3e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sato/extract/feature_extraction/sherlock_features.py", "max_forks_repo_name": "VIDA-NYU/sato", "max_forks_repo_head_hexsha": "8fb51787b36114df13f54c1acd11df12a66ad3e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1363636364, "max_line_length": 122, "alphanum_fraction": 0.679245283, "include": true, "reason": "import numpy", "num_tokens": 492}
|
#include <Server.h>
#include <Session.h>
#include <Database.h>
#include <iterator>
#include <boost/asio.hpp>
#include <signal.h>
namespace NHttpProxy {
TServerError::TServerError(const std::string& message)
: std::runtime_error(message)
{}
class TServer::TImpl {
public:
TImpl()
: IOContext_(1)
, Signals_(IOContext_)
, Acceptor_(IOContext_)
{
Signals_.add(SIGINT);
Signals_.add(SIGTERM);
Signals_.add(SIGQUIT);
}
void Bind(const std::string& host, const std::string& port) {
boost::asio::ip::tcp::resolver resolver(IOContext_);
boost::asio::ip::tcp::resolver::query query(host, port);
auto endpoints = resolver.resolve(query);
if (endpoints.empty()) {
throw TServerError("Couldn't resolve " + host + ":" + port);
}
boost::asio::ip::tcp::endpoint endpoint = *endpoints.begin();
Acceptor_.open(endpoint.protocol());
Acceptor_.set_option(
boost::asio::ip::tcp::acceptor::reuse_address(true)
);
Acceptor_.bind(endpoint);
}
void Run() {
Signals_.async_wait(
[this](...) {
Acceptor_.close();
for (TSession& session : Sessions_) {
session.Stop();
}
Sessions_.clear();
}
);
Acceptor_.listen();
AsyncAccept();
IOContext_.run();
}
private:
void AsyncAccept() {
Acceptor_.async_accept(
[this](boost::system::error_code ec, boost::asio::ip::tcp::socket socket) {
if (!Acceptor_.is_open()) {
return;
}
if (!ec) {
Serve(std::move(socket));
}
AsyncAccept();
}
);
}
void Serve(boost::asio::ip::tcp::socket socket) {
Sessions_.emplace_back(std::move(socket), IOContext_, Database_);
Sessions_.back().SetEndCallback(
[this, it = std::prev(Sessions_.end())]() {
Sessions_.erase(it);
}
);
Sessions_.back().Start();
}
boost::asio::io_context IOContext_;
boost::asio::signal_set Signals_;
boost::asio::ip::tcp::acceptor Acceptor_;
std::list<TSession> Sessions_;
TDatabase Database_;
};
TServer::TServer()
: Impl_(new TImpl())
{}
TServer::~TServer() = default;
TServer::TServer(TServer&&) = default;
TServer& TServer::operator=(TServer&&) = default;
void TServer::Bind(const std::string& host, const std::string& port) {
Impl_->Bind(host, port);
}
void TServer::Run() {
Impl_->Run();
}
}
|
{"hexsha": "8c1c61df83b8f41ff8a8beb6db653a0f52aab3d1", "size": 2700, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "individual/lib/Server.cpp", "max_stars_repo_name": "vasalf/NetworksLab2020HSE", "max_stars_repo_head_hexsha": "b1971981921ae6288e741a578182bfd5279d792f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "individual/lib/Server.cpp", "max_issues_repo_name": "vasalf/NetworksLab2020HSE", "max_issues_repo_head_hexsha": "b1971981921ae6288e741a578182bfd5279d792f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "individual/lib/Server.cpp", "max_forks_repo_name": "vasalf/NetworksLab2020HSE", "max_forks_repo_head_hexsha": "b1971981921ae6288e741a578182bfd5279d792f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6842105263, "max_line_length": 87, "alphanum_fraction": 0.5425925926, "num_tokens": 621}
|
[STATEMENT]
lemma [code_unfold]:
fixes literal :: Literal and clause :: Clause
shows "literal el clause = List.member clause literal"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. literal el clause = List.member clause literal
[PROOF STEP]
by (auto simp add: member_def)
|
{"llama_tokens": 91, "file": "SATSolverVerification_SatSolverCode", "length": 1}
|
'''
This is the homework for SJTU IE308 Image Processing by Prof. Yi Xu
Copy right by Sizhe Wei, Dec 2019; ID: 517021910796
HW No.2 BM3D Denoising Implement & False Color Transfer
If you have any question, feel free to contact me at sizhewei@sjtu.edu.cn
'''
import cv2
import numpy
import math
import numpy.matlib
cv2.setUseOptimized(True)
# Parameters initialization
sigma = 25
Threshold_Hard3D = 2.7 * sigma # Threshold for Hard Thresholding
Step1_Blk_Size = 4 # block_Size
Step1_Blk_Step = 1 # Here sattle as 1
Step1_Search_Step = 1 # block search step
Match_threshold_1 = 125 * Step1_Blk_Size ** 2 # threshold for similarity
Step1_max_matched_cnt = 16 # Most amounts for group
Step1_Search_Window = 15 # Search for candidate matching blocks in a local neighborhood of restricted size NS*NS centered
Step2_Blk_Size = 4
Step2_Blk_Step = 1
Step2_Search_Step = 1
Match_threshold_2 = 220. / 16 * Step2_Blk_Size ** 2 # threshold for calculating the similarity
Step2_max_matched_cnt = 32
Step2_Search_Window = 25
Kaiser_Beta = 1.5
def init(img, _blk_size, _Kaiser_Beta):
# Initialize the parameters
m_shape = img.shape
m_img = numpy.matrix(numpy.zeros(m_shape, dtype=float))
m_wight = numpy.matrix(numpy.zeros(m_shape, dtype=float))
# Window Function: 0 outside the the spercific area
K = numpy.matrix(numpy.kaiser(_blk_size, _Kaiser_Beta))
m_Kaiser = numpy.array(K.T * K)
# print m_Kaiser, type(m_Kaiser), m_Kaiser.shape
# cv2.imshow("Kaisser", m_Kaiser)
# cv2.waitKey(0)
# cv2.imwrite("Kaisser.jpg", m_Kaiser.astype(numpy.uint8))
return m_img, m_wight, m_Kaiser
def Locate_blk(i, j, blk_step, block_Size, width, height):
# To ensure the location is inside the image
if i * blk_step + block_Size < width:
point_x = i * blk_step
else:
point_x = width - block_Size
if j * blk_step + block_Size < height:
point_y = j * blk_step
else:
point_y = height - block_Size
m_blockPoint = numpy.array((point_x, point_y), dtype=int) # reference point
return m_blockPoint
def Define_SearchWindow(_noisyImg, _BlockPoint, _WindowSize, Blk_Size):
"""
return the coordinate of search window's reference point
"""
point_x = _BlockPoint[0] # current corrdinate
point_y = _BlockPoint[1] #
# get 4 coordinates
LX = point_x + Blk_Size / 2 - _WindowSize / 2 # up-left x
LY = point_y + Blk_Size / 2 - _WindowSize / 2 # up-left y
RX = LX + _WindowSize # down-right x
RY = LY + _WindowSize # down-right y
# test if outside the image
if LX < 0:
LX = 0
elif RX > _noisyImg.shape[0]:
LX = _noisyImg.shape[0] - _WindowSize
if LY < 0:
LY = 0
elif RY > _noisyImg.shape[0]:
LY = _noisyImg.shape[0] - _WindowSize
return numpy.array((LX, LY), dtype=int)
def Step1_fast_match(_noisyImg, _BlockPoint):
"""fast matching"""
'''
* return most similar block including itself
*_noisyImg
*_BlockPoint: the corrdinate of current point
'''
(present_x, present_y) = _BlockPoint # current coordinate
Blk_Size = Step1_Blk_Size
Search_Step = Step1_Search_Step
Threshold = Match_threshold_1
max_matched = Step1_max_matched_cnt
Window_size = Step1_Search_Window
blk_positions = numpy.zeros((max_matched, 2), dtype=int) # to record the location of similar blocks
Final_similar_blocks = numpy.zeros((max_matched, Blk_Size, Blk_Size), dtype=float) # to record the result
img = _noisyImg[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
dct_img = cv2.dct(img.astype(numpy.float64)) # dct
Final_similar_blocks[0, :, :] = dct_img # sace the blocks
blk_positions[0, :] = _BlockPoint
Window_location = Define_SearchWindow(_noisyImg, _BlockPoint, Window_size, Blk_Size)
blk_num = (Window_size - Blk_Size) / Search_Step # get the amount of blocks will be found
blk_num = int(blk_num)
(present_x, present_y) = Window_location
similar_blocks = numpy.zeros((blk_num ** 2, Blk_Size, Blk_Size), dtype=float)
m_Blkpositions = numpy.zeros((blk_num ** 2, 2), dtype=int)
Distances = numpy.zeros(blk_num ** 2, dtype=float) # record the similarity
# begin the search in the search area
matched_cnt = 0
for i in range(blk_num):
for j in range(blk_num):
tem_img = _noisyImg[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
dct_Tem_img = cv2.dct(tem_img.astype(numpy.float64))
# dct, then l2-norm
m_Distance = numpy.linalg.norm((dct_img - dct_Tem_img)) ** 2 / (Blk_Size ** 2)
# to record the blocks
if m_Distance < Threshold and m_Distance > 0:
similar_blocks[matched_cnt, :, :] = dct_Tem_img
m_Blkpositions[matched_cnt, :] = (present_x, present_y)
Distances[matched_cnt] = m_Distance
matched_cnt += 1
present_y += Search_Step
present_x += Search_Step
present_y = Window_location[1]
# first matched_cnt blocks
Distances = Distances[:matched_cnt]
# sort the block
Sort = Distances.argsort()
# count the number of blocks
if matched_cnt < max_matched:
Count = matched_cnt + 1
else:
Count = max_matched
# matched_cnt->Final_similar_blocks, location saved in lk_positions
if Count > 0:
for i in range(1, Count):
Final_similar_blocks[i, :, :] = similar_blocks[Sort[i - 1], :, :]
blk_positions[i, :] = m_Blkpositions[Sort[i - 1], :]
return Final_similar_blocks, blk_positions, Count
def Step1_3DFiltering(_similar_blocks):
'''
* 3D Filtering
'''
statis_nonzero = 0 # non-zero
m_Shape = _similar_blocks.shape
for i in range(m_Shape[1]):
for j in range(m_Shape[2]):
# print _similar_blocks[:, i, j], type(_similar_blocks[:, i, j])
tem_Vct_Trans = cv2.dct(_similar_blocks[:, i, j])
# hard threshold transfer
tem_Vct_Trans[numpy.abs(tem_Vct_Trans[:]) < Threshold_Hard3D] = 0.
statis_nonzero += tem_Vct_Trans.nonzero()[0].size
_similar_blocks[:, i, j] = cv2.idct(tem_Vct_Trans)[0]
return _similar_blocks, statis_nonzero
def Aggregation_hardthreshold(_similar_blocks, blk_positions, m_basic_img, m_wight_img, _nonzero_num, Count, Kaiser):
'''
* weight by number non-zero and put back the weighted blocks
'''
_shape = _similar_blocks.shape
if _nonzero_num < 1:
_nonzero_num = 1
block_wight = (1. / (sigma ** 2 * _nonzero_num)) * Kaiser
for i in range(Count):
point = blk_positions[i, :]
tem_img = block_wight * cv2.idct(_similar_blocks[i, :, :])
m_basic_img[point[0]:point[0] + _shape[1], point[1]:point[1] + _shape[2]] += tem_img
m_wight_img[point[0]:point[0] + _shape[1], point[1]:point[1] + _shape[2]] += block_wight
def BM3D_step1(_noisyImg):
"""Step 1: Basic"""
# Initialization
(width, height) = _noisyImg.shape # width = row, height = col
block_Size = Step1_Blk_Size # block size
blk_step = Step1_Blk_Step # step
Width_num = (width - block_Size) / blk_step
Height_num = (height - block_Size) / blk_step
# empty image, empty weight list, Kasier-window
Basic_img, m_Wight, m_Kaiser = init(_noisyImg, Step1_Blk_Size, Kaiser_Beta)
# Proceed
for i in range(int(Width_num + 2)):
for j in range(int(Height_num + 2)):
# m_blockPoint reference point: up-left
m_blockPoint = Locate_blk(i, j, blk_step, block_Size, width, height) # ensure the point is inside the image
Similar_Blks, Positions, Count = Step1_fast_match(_noisyImg, m_blockPoint) # count means the number of similar blocks
Similar_Blks, statis_nonzero = Step1_3DFiltering(Similar_Blks) # Blocks group after filtering
Aggregation_hardthreshold(Similar_Blks, Positions, Basic_img, m_Wight, statis_nonzero, Count, m_Kaiser)
Basic_img[:, :] /= m_Wight[:, :]
basic = numpy.matrix(Basic_img, dtype=int)
basic.astype(numpy.uint8)
return basic
'''
Color Image BM3D
def BM3D_step1(_noisyImg):
yImg, uImg, vImg = cv2.split(_noisyImg)
# yImg.astype(numpy.int)
# uImg.astype(numpy.int)
# vImg.astype(numpy.int)
(width, height) = yImg.shape # width = row, height = col
block_Size = Size_Step1_blk
blk_step = Step1_Blk_Step
Width_num = (width - block_Size) / blk_step
Height_num = (height - block_Size) / blk_step
Basic_img, m_Wight, m_Kaiser = init(yImg, Size_Step1_blk, Beta_Kaiser)
Basic_img_U, m_Wight_U, m_Kaiser_U = init(yImg, Size_Step1_blk, Beta_Kaiser)
Basic_img_V, m_Wight_V, m_Kaiser_V = init(yImg, Size_Step1_blk, Beta_Kaiser)
for i in range(int(Width_num + 2)):
for j in range(int(Height_num + 2)):
m_blockPoint = Locate_blk(i, j, blk_step, block_Size, width, height)
Similar_Blks, Positions, Count = Step1_fast_match(yImg, m_blockPoint)
Similar_Blks_U = numpy.zeros(Similar_Blks.shape)
Similar_Blks_V = numpy.zeros(Similar_Blks.shape)
for cnt in range(Count):
Similar_Blks_U[cnt] = uImg[Positions[cnt, 0]: Positions[cnt, 0] + block_Size, Positions[cnt, 1]: Positions[cnt, 1] + block_Size]
Similar_Blks_V[cnt] = vImg[Positions[cnt, 0]: Positions[cnt, 0] + block_Size, Positions[cnt, 1]: Positions[cnt, 1] + block_Size]
Similar_Blks, statis_nonzero = Step1_3DFiltering(Similar_Blks)
Similar_Blks_U, statis_nonzero_U = Step1_3DFiltering(Similar_Blks_U)
Similar_Blks_V, statis_nonzero_V = Step1_3DFiltering(Similar_Blks_V)
Aggregation_hardthreshold(Similar_Blks, Positions, Basic_img, m_Wight, statis_nonzero, Count, m_Kaiser)
Aggregation_hardthreshold(Similar_Blks_U, Positions, Basic_img_U, m_Wight_U, statis_nonzero_U, Count, m_Kaiser_U)
Aggregation_hardthreshold(Similar_Blks_V, Positions, Basic_img_V, m_Wight_V, statis_nonzero_V, Count, m_Kaiser_V)
Basic_img[:, :] /= m_Wight[:, :]
Basic_img_U[:, :] /= m_Wight_U[:, :]
Basic_img_V[:, :] /= m_Wight_V[:, :]
basic_Y = numpy.matrix(Basic_img, dtype=int)
basic_U = numpy.matrix(Basic_img_U, dtype=int)
basic_V = numpy.matrix(Basic_img_V, dtype=int)
basic_Y = basic_Y.astype(numpy.uint8)
basic_U = basic_U.astype(numpy.uint8)
basic_V = basic_V.astype(numpy.uint8)
basic = cv2.merge((basic_Y, basic_U, basic_V))
return basic
'''
def Step2_fast_match(_Basic_img, _noisyImg, _BlockPoint):
'''
* block match
'''
(present_x, present_y) = _BlockPoint
Blk_Size = Step2_Blk_Size
Threshold = Match_threshold_2
Search_Step = Step2_Search_Step
max_matched = Step2_max_matched_cnt
Window_size = Step2_Search_Window
blk_positions = numpy.zeros((max_matched, 2), dtype=int)
Final_similar_blocks = numpy.zeros((max_matched, Blk_Size, Blk_Size), dtype=float)
Final_noisy_blocks = numpy.zeros((max_matched, Blk_Size, Blk_Size), dtype=float)
img = _Basic_img[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
dct_img = cv2.dct(img.astype(numpy.float32))
Final_similar_blocks[0, :, :] = dct_img
n_img = _noisyImg[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
dct_n_img = cv2.dct(n_img.astype(numpy.float32))
Final_noisy_blocks[0, :, :] = dct_n_img
blk_positions[0, :] = _BlockPoint
Window_location = Define_SearchWindow(_noisyImg, _BlockPoint, Window_size, Blk_Size)
blk_num = (Window_size - Blk_Size) / Search_Step
blk_num = int(blk_num)
(present_x, present_y) = Window_location
similar_blocks = numpy.zeros((blk_num ** 2, Blk_Size, Blk_Size), dtype=float)
m_Blkpositions = numpy.zeros((blk_num ** 2, 2), dtype=int)
Distances = numpy.zeros(blk_num ** 2, dtype=float)
matched_cnt = 0
for i in range(blk_num):
for j in range(blk_num):
tem_img = _Basic_img[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
# dct_Tem_img = cv2.dct(tem_img.astype(numpy.float32))
# m_Distance = numpy.linalg.norm((dct_img - dct_Tem_img)) ** 2 / (Blk_Size ** 2)
m_Distance = numpy.linalg.norm((img - tem_img)) ** 2 / (Blk_Size ** 2)
if m_Distance < Threshold and m_Distance > 0:
dct_Tem_img = cv2.dct(tem_img.astype(numpy.float32))
similar_blocks[matched_cnt, :, :] = dct_Tem_img
m_Blkpositions[matched_cnt, :] = (present_x, present_y)
Distances[matched_cnt] = m_Distance
matched_cnt += 1
present_y += Search_Step
present_x += Search_Step
present_y = Window_location[1]
Distances = Distances[:matched_cnt]
Sort = Distances.argsort()
if matched_cnt < max_matched:
Count = matched_cnt + 1
else:
Count = max_matched
if Count > 0:
for i in range(1, Count):
Final_similar_blocks[i, :, :] = similar_blocks[Sort[i - 1], :, :]
blk_positions[i, :] = m_Blkpositions[Sort[i - 1], :]
(present_x, present_y) = m_Blkpositions[Sort[i - 1], :]
n_img = _noisyImg[present_x: present_x + Blk_Size, present_y: present_y + Blk_Size]
Final_noisy_blocks[i, :, :] = cv2.dct(n_img.astype(numpy.float64))
return Final_similar_blocks, Final_noisy_blocks, blk_positions, Count
def Step2_3DFiltering(_Similar_Bscs, _Similar_Imgs):
'''
* 3D filtering
'''
m_Shape = _Similar_Bscs.shape
Wiener_wight = numpy.zeros((m_Shape[1], m_Shape[2]), dtype=float)
for i in range(m_Shape[1]):
for j in range(m_Shape[2]):
tem_vector = _Similar_Bscs[:, i, j]
tem_Vct_Trans = numpy.matrix(cv2.dct(tem_vector))
Norm_2 = numpy.float64(tem_Vct_Trans.T * tem_Vct_Trans)
m_weight = Norm_2 / (Norm_2 + sigma ** 2)
Wiener_wight[i, j] = m_weight
#if m_weight != 0: Wiener_wight[i, j] = 1. / (m_weight ** 2 * sigma ** 2)
# else:
# Wiener_wight[i, j] = 10000
# RES=IDCT(WEIGHT(DCT(NOISE_BLOCK)))
tem_vector = _Similar_Imgs[:, i, j]
tem_Vct_Trans = m_weight * cv2.dct(tem_vector)
_Similar_Bscs[:, i, j] = cv2.idct(tem_Vct_Trans)[0]
return _Similar_Bscs, Wiener_wight
def Aggregation_Wiener(_Similar_Blks, _Wiener_wight, blk_positions, m_basic_img, m_wight_img, Count, Kaiser):
'''
* Aggregation proceed
'''
_shape = _Similar_Blks.shape
block_wight = _Wiener_wight * Kaiser
for i in range(Count):
point = blk_positions[i, :]
tem_img = _Wiener_wight * cv2.idct(_Similar_Blks[i, :, :]) * Kaiser
m_basic_img[point[0]:point[0] + _shape[1], point[1]:point[1] + _shape[2]] += tem_img
m_wight_img[point[0]:point[0] + _shape[1], point[1]:point[1] + _shape[2]] += block_wight
def BM3D_2nd_step(_basicImg, _noisyImg):
'''Step 2. Final estimate '''
(width, height) = _noisyImg.shape
block_Size = Step2_Blk_Size
blk_step = Step2_Blk_Step
Width_num = (width - block_Size) / blk_step
Height_num = (height - block_Size) / blk_step
m_img, m_Wight, m_Kaiser = init(_noisyImg, block_Size, Kaiser_Beta)
for i in range(int(Width_num + 2)):
for j in range(int(Height_num + 2)):
m_blockPoint = Locate_blk(i, j, blk_step, block_Size, width, height)
Similar_Blks, Similar_Imgs, Positions, Count = Step2_fast_match(_basicImg, _noisyImg, m_blockPoint)
Similar_Blks, Wiener_wight = Step2_3DFiltering(Similar_Blks, Similar_Imgs)
Aggregation_Wiener(Similar_Blks, Wiener_wight, Positions, m_img, m_Wight, Count, m_Kaiser)
m_img[:, :] /= m_Wight[:, :]
Final = numpy.matrix(m_img, dtype=int)
Final.astype(numpy.uint8)
return Final
# add noise:
def Gauss_noise(img, sigma=25):
noise = numpy.matlib.randn(img.shape) * sigma
res = img + noise
return res
# psnr
def PSNR(img1, img2):
D = numpy.array(img1 - img2, dtype=numpy.int64)
D[:, :] = D[:, :] ** 2
RMSE = D.sum() / img1.size
psnr = 10 * math.log10(float(255. ** 2) / RMSE)
return psnr
'''
# Color Image BM3D
def Gauss_noise_color(img, sigma=0.001):
mean = 0
image = numpy.array(img / 255, dtype=float)
noise = numpy.random.normal(mean, sigma ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = numpy.clip(out, low_clip, 1.0)
out = numpy.uint8(out * 255)
return out
def PSNR(img1, img2):
D = numpy.array(numpy.int64(img1) - numpy.int64(img2), dtype=numpy.int64)
print(D)
D[:, :, :] = D[:, :, :] ** 2
RMSE = D.sum() / img1.size
psnr = 10 * math.log10(float(255. ** 2) / RMSE)
return psnr
'''
if __name__ == '__main__':
cv2.setUseOptimized(True)
img_name = "images/eGrass_xs.jpg"
ori = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
cv2.imwrite("results/ori.jpg", ori)
img = Gauss_noise(ori)
cv2.imwrite("results/noise.jpg", img)
print('The PSNR After adding noise %f' % PSNR(ori, img))
e1 = cv2.getTickCount()
Basic_img = BM3D_step1(img)
e2 = cv2.getTickCount()
time = (e2 - e1) / cv2.getTickFrequency()
print ("The Processing time of the Step 1 is %f s" % time)
cv2.imwrite("results/Basic3.jpg", Basic_img)
print ("The PSNR between the two img of the First step is %f" % PSNR(ori, Basic_img))
Final_img = BM3D_2nd_step(Basic_img, img)
e3 = cv2.getTickCount()
time = (e3 - e2) / cv2.getTickFrequency()
print ("The Processing time of the Step 2 is %f s" % time)
cv2.imwrite("results/Final3.jpg", Final_img)
print ("The PSNR between the two img of the Second step is %f" % PSNR(ori, Final_img))
time = (e3 - e1) / cv2.getTickFrequency()
print ("The total Processing time is %f s" % time)
'''
Color Image BM3D
'''
# if __name__ == '__main__':
# cv2.setUseOptimized(True)
# img_name = "images/eGrass.jpg"
# ori = cv2.imread(img_name,1)
# cv2.imwrite("results/ori.jpg", ori)
# # print(ori[:,:,0])
# # print("-----------------------")
# oriYUV = cv2.cvtColor(ori, cv2.COLOR_BGR2YUV)
# yImg, uImg, vImg = cv2.split(oriYUV)
# noiBGR = Gauss_noise_color(ori)
# # print(noiBGR[:,:,0])
# noiYUV = cv2.cvtColor(noiBGR,cv2.COLOR_BGR2YUV)
# cv2.imwrite("results/noise.jpg", noiBGR)
# # cv2.imshow('noiYUV-Y',noiYUV[:,:,0])
# # cv2.waitKey(0)
# # print("-----------------------")
# # print(noiBGR[:,:,0]-ori[:,:,0])
# # print(noiYUV[:,:,0]-oriYUV[:,:,0])
# print('The PSNR After add noise %f' % PSNR(ori, noiBGR))
# e1 = cv2.getTickCount()
# Basic_img = BM3D_step1(noiYUV)
# e2 = cv2.getTickCount()
# time = (e2 - e1) / cv2.getTickFrequency()
# Basic_img_test = cv2.cvtColor(Basic_img, cv2.COLOR_YUV2BGR)
# cv2.imwrite("results/Basic_test.jpg", Basic_img_test)
|
{"hexsha": "dd36d33f761f75bbae4f5de8335a6da57c2611b6", "size": 19180, "ext": "py", "lang": "Python", "max_stars_repo_path": "Codework_and_Images/bm3d/bm3d.py", "max_stars_repo_name": "SizheWei/bm3d", "max_stars_repo_head_hexsha": "e6f3a084cafb2bc87b3860dd80eb0aa42398f51b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-12-09T09:12:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-21T08:00:01.000Z", "max_issues_repo_path": "Codework_and_Images/bm3d/bm3d.py", "max_issues_repo_name": "SizheWei/bm3d", "max_issues_repo_head_hexsha": "e6f3a084cafb2bc87b3860dd80eb0aa42398f51b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-12-24T04:00:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-27T02:48:37.000Z", "max_forks_repo_path": "Codework_and_Images/bm3d/bm3d.py", "max_forks_repo_name": "SizheWei/bm3d", "max_forks_repo_head_hexsha": "e6f3a084cafb2bc87b3860dd80eb0aa42398f51b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6817288802, "max_line_length": 144, "alphanum_fraction": 0.6509906152, "include": true, "reason": "import numpy", "num_tokens": 5720}
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
def find_matching_points(img1, img2, method='sift', match_method='bf', plot=False):
'''
Find matching points in img1 and img2.
'''
if method == 'orb':
# Initiate ORB detector
detector = cv2.ORB_create()
norm = cv2.NORM_HAMMING
elif method == 'sift':
# Initiate SIFT detector
detector = cv2.xfeatures2d.SIFT_create()
#Since SIFT returns a detectorType() of CV32F (=float)
#you cannot use any Hamming-distance as matcher.
#Hamming-distance works only for binary feature-types like ORB
norm = cv2.NORM_L2
# find the keypoints and descriptors
kp1, des1 = detector.detectAndCompute(img1,None)
kp2, des2 = detector.detectAndCompute(img2,None)
print('{},{} keypoints found in img1, img2.'.format(len(kp1), len(kp2)))
#define matcher
if match_method == 'bf':
matcher = cv2.BFMatcher(norm, crossCheck=True)
elif match_method == 'flann':
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or pass empty dictionary
matcher = cv2.FlannBasedMatcher(index_params, search_params)
# Match descriptors.
matches = matcher.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
print('{} matches found.'.format(len(matches)))
if (plot):
# Draw first 10 matches.
img3 = img1.copy()
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], img3, flags=2)
plt.imshow(img3),plt.show()
cv2.imwrite('output/matches_' + method + '.jpg',img3)
# Find homography
points1 = [kp1[match.queryIdx].pt for match in matches]
points2 = [kp2[match.trainIdx].pt for match in matches]
return np.array(points1), np.array(points2)
def registration(img1, img2, method='sift', match_method='bf', plot=False):
'''
Align img2 with img1.
'''
pts1, pts2 = find_matching_points(img2, img1, method, match_method, plot)
M, mask = cv2.findHomography(pts1, pts2, cv2.RANSAC, 5.0)
img2_aligned = cv2.warpPerspective(img2, M, (img2.shape[1], img2.shape[0]))
return img2_aligned
|
{"hexsha": "5f5d4317913e35398061494faa90d952ac0308fe", "size": 2319, "ext": "py", "lang": "Python", "max_stars_repo_path": "registration.py", "max_stars_repo_name": "duembgen/NIRdehazing", "max_stars_repo_head_hexsha": "b985762a5a80f20874751abd7f4b3683e4f2e8b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-06-25T09:34:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T01:20:04.000Z", "max_issues_repo_path": "registration.py", "max_issues_repo_name": "majedelhelou/NIRdehazing", "max_issues_repo_head_hexsha": "96e81fe9e0c3b892b35ca2ff691ee35fc4d4c415", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "registration.py", "max_forks_repo_name": "majedelhelou/NIRdehazing", "max_forks_repo_head_hexsha": "96e81fe9e0c3b892b35ca2ff691ee35fc4d4c415", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-08-09T07:26:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-10T14:24:30.000Z", "avg_line_length": 36.8095238095, "max_line_length": 83, "alphanum_fraction": 0.6511427339, "include": true, "reason": "import numpy", "num_tokens": 642}
|
import Flux
struct PointwiseFeedForward
FirstFilter::Flux.Conv
SecondFilter::Flux.Conv
end
PointwiseFeedForward(dims::Integer) = PointwiseFeedForward(
Flux.Conv((1, 1), dims => 4 * dims, Flux.relu) |> Flux.gpu,
Flux.Conv((1, 1), 4 * dims => dims) |> Flux.gpu
)
Flux.@treelike PointwiseFeedForward
function (m::PointwiseFeedForward)(x::AbstractArray{T, 3} where T)
# x = permutedims(repeat(x, outer=[1, 1, 1, 1]), [1, 4, 2, 3])
(T, D, N) = size(x)
x = reshape(x, (T, 1, D, N))
x = m.SecondFilter(m.FirstFilter(x))
return dropdims(x, dims=2)
end
|
{"hexsha": "a74580f6ceab5c8e56395231d25a67bbc61126d5", "size": 583, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FeedForward.jl", "max_stars_repo_name": "brainsqueeze/SequenceModels.jl", "max_stars_repo_head_hexsha": "990d8217150807fcc8989eac562af3d63153f43c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FeedForward.jl", "max_issues_repo_name": "brainsqueeze/SequenceModels.jl", "max_issues_repo_head_hexsha": "990d8217150807fcc8989eac562af3d63153f43c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-08-21T11:22:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-25T14:50:46.000Z", "max_forks_repo_path": "src/FeedForward.jl", "max_forks_repo_name": "brainsqueeze/SequenceModels.jl", "max_forks_repo_head_hexsha": "990d8217150807fcc8989eac562af3d63153f43c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.15, "max_line_length": 67, "alphanum_fraction": 0.6432246998, "num_tokens": 205}
|
"""
Bookcrossing dataset transformation methods
"""
import os
import shutil
import urllib
import zipfile
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
def download_bookcrossing(url='http://www2.informatik.uni-freiburg.de/~cziegler/BX/BX-CSV-Dump.zip',
zip_path='bookcrossing.zip'):
"""
:param url:
:param zip_path:
:return:
"""
zip_path, _ = urllib.request.urlretrieve(url, zip_path)
zipfile.ZipFile(zip_path, "r").extractall('bookcrossing')
os.remove(zip_path)
def bookcrossing_converting():
"""
:return: users, items, ratings
"""
download_bookcrossing()
train_data = pd.read_csv('bookcrossing/BX-Book-Ratings.csv', delimiter=';', encoding='latin1')
curusers = list(set(train_data["User-ID"]))
users_uuid_int_dict = dict(zip(curusers, range(len(curusers))))
curitems = list(set(train_data["ISBN"]))
items_uuid_int_dict = dict(zip(curitems, range(len(curitems))))
train_data["User-ID"] = train_data["User-ID"].apply(lambda x: users_uuid_int_dict[x])
train_data["ISBN"] = train_data["ISBN"].apply(lambda x: items_uuid_int_dict[x])
train_data["Book-Rating"] = train_data["Book-Rating"].apply(lambda x: int(x))
shutil.rmtree('bookcrossing')
data = pd.DataFrame({'user_id': train_data["User-ID"],
'item_id': train_data["ISBN"],
'rating': train_data["Book-Rating"]})
train, test = train_test_split(data, test_size=0.2)
return train, test, len(np.unique(data.user_id)), len(np.unique(data.item_id))
|
{"hexsha": "9b054ddf19c9b5e044e4e0e560e6b5c6c8626906", "size": 1622, "ext": "py", "lang": "Python", "max_stars_repo_path": "rec_tool/transformations/bookcrossing.py", "max_stars_repo_name": "Ilyushin/rec-tool", "max_stars_repo_head_hexsha": "f9e3338ee19f93c5dee483feb5301958591abf80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-01T06:09:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T06:09:29.000Z", "max_issues_repo_path": "rec_tool/transformations/bookcrossing.py", "max_issues_repo_name": "Ilyushin/rec-tool", "max_issues_repo_head_hexsha": "f9e3338ee19f93c5dee483feb5301958591abf80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-17T04:23:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-06T16:24:05.000Z", "max_forks_repo_path": "rec_tool/transformations/bookcrossing.py", "max_forks_repo_name": "Ilyushin/rec-tool", "max_forks_repo_head_hexsha": "f9e3338ee19f93c5dee483feb5301958591abf80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-26T18:46:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-29T09:37:05.000Z", "avg_line_length": 31.8039215686, "max_line_length": 100, "alphanum_fraction": 0.6707768187, "include": true, "reason": "import numpy", "num_tokens": 400}
|
# Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
#
# This work is made available
# under the Nvidia Source Code License (1-way Commercial).
# To view a copy of this license, visit
# https://nvlabs.github.io/Dancing2Music/License.txt
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
if torch.cuda.is_available():
T = torch.cuda
else:
T = torch
###########################################################
##########
########## Stage 1: Movement
##########
###########################################################
class InitPose_Enc(nn.Module):
def __init__(self, pose_size, dim_z_init):
super(InitPose_Enc, self).__init__()
nf = 64
#nf = 32
self.enc = nn.Sequential(
nn.Linear(pose_size, nf),
nn.LayerNorm(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nf, nf),
nn.LayerNorm(nf),
nn.LeakyReLU(0.2, inplace=True),
)
self.mean = nn.Sequential(
nn.Linear(nf,dim_z_init),
)
self.std = nn.Sequential(
nn.Linear(nf,dim_z_init),
)
def forward(self, pose):
enc = self.enc(pose)
return self.mean(enc), self.std(enc)
class InitPose_Dec(nn.Module):
def __init__(self, pose_size, dim_z_init):
super(InitPose_Dec, self).__init__()
nf = 64
#nf = dim_z_init
self.dec = nn.Sequential(
nn.Linear(dim_z_init, nf),
nn.LayerNorm(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nf, nf),
nn.LayerNorm(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nf,pose_size),
)
def forward(self, z_init):
return self.dec(z_init)
class Movement_Enc(nn.Module):
def __init__(self, pose_size, dim_z_movement, length, hidden_size, num_layers, bidirection=False):
super(Movement_Enc, self).__init__()
self.hidden_size = hidden_size
self.bidirection = bidirection
if bidirection:
self.num_dir = 2
else:
self.num_dir = 1
self.recurrent = nn.GRU(pose_size, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=bidirection)
self.init_h = nn.Parameter(torch.randn(num_layers*self.num_dir, 1, hidden_size).type(T.FloatTensor), requires_grad=True)
if bidirection:
self.mean = nn.Sequential(
nn.Linear(hidden_size*2,dim_z_movement),
)
self.std = nn.Sequential(
nn.Linear(hidden_size*2,dim_z_movement),
)
else:
'''
self.enc = nn.Sequential(
nn.Linear(hidden_size, hidden_size//2),
nn.LayerNorm(hidden_size//2),
nn.ReLU(inplace=True),
)
'''
self.mean = nn.Sequential(
nn.Linear(hidden_size,dim_z_movement),
)
self.std = nn.Sequential(
nn.Linear(hidden_size,dim_z_movement),
)
def forward(self, poses):
num_samples = poses.shape[0]
h_t = [self.init_h.repeat(1, num_samples, 1)]
output, hidden = self.recurrent(poses, h_t[0])
if self.bidirection:
output = torch.cat((output[:,-1,:self.hidden_size], output[:,0,self.hidden_size:]), 1)
else:
output = output[:,-1,:]
#enc = self.enc(output)
#return self.mean(enc), self.std(enc)
return self.mean(output), self.std(output)
def getFeature(self, poses):
num_samples = poses.shape[0]
h_t = [self.init_h.repeat(1, num_samples, 1)]
output, hidden = self.recurrent(poses, h_t[0])
if self.bidirection:
output = torch.cat((output[:,-1,:self.hidden_size], output[:,0,self.hidden_size:]), 1)
else:
output = output[:,-1,:]
return output
class StandardPose_Dec(nn.Module):
def __init__(self, pose_size, dim_z_init, dim_z_movement, length, hidden_size, num_layers):
super(StandardPose_Dec, self).__init__()
self.length = length
self.pose_size = pose_size
self.hidden_size = hidden_size
self.num_layers = num_layers
#dim_z_init=0
'''
self.z2init = nn.Sequential(
nn.Linear(dim_z_init+dim_z_movement, hidden_size),
nn.LayerNorm(hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, num_layers*hidden_size)
)
'''
self.z2init = nn.Sequential(
nn.Linear(dim_z_init+dim_z_movement, num_layers*hidden_size)
)
self.recurrent = nn.GRU(dim_z_movement, hidden_size, num_layers=num_layers, batch_first=True)
self.pose_g = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.LayerNorm(hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, pose_size)
)
def forward(self, z_init, z_movement):
h_init = self.z2init(torch.cat((z_init, z_movement), 1))
#h_init = self.z2init(z_movement)
h_init = h_init.view(self.num_layers, h_init.size(0), self.hidden_size)
z_movements = z_movement.view(z_movement.size(0),1,z_movement.size(1)).repeat(1, self.length, 1)
z_m_t, _ = self.recurrent(z_movements, h_init)
z_m = z_m_t.contiguous().view(-1, self.hidden_size)
poses = self.pose_g(z_m)
poses = poses.view(z_movement.shape[0], self.length, self.pose_size)
return poses
class StandardPose_Dis(nn.Module):
def __init__(self, pose_size, length):
super(StandardPose_Dis, self).__init__()
self.pose_size = pose_size
self.length = length
nd = 1024
self.main = nn.Sequential(
nn.Linear(length*pose_size, nd),
nn.LayerNorm(nd),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd,nd//2),
nn.LayerNorm(nd//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//2,nd//4),
nn.LayerNorm(nd//4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//4, 1)
)
def forward(self, pose_seq):
pose_seq = pose_seq.view(-1, self.pose_size*self.length)
return self.main(pose_seq).squeeze()
###########################################################
##########
########## Stage 2: Dance
##########
###########################################################
class Dance_Enc(nn.Module):
def __init__(self, dim_z_movement, dim_z_dance, hidden_size, num_layers, bidirection=False):
super(Dance_Enc, self).__init__()
self.hidden_size = hidden_size
self.bidirection = bidirection
if bidirection:
self.num_dir = 2
else:
self.num_dir = 1
self.recurrent = nn.GRU(2*dim_z_movement, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=bidirection)
self.init_h = nn.Parameter(torch.randn(num_layers*self.num_dir, 1, hidden_size).type(T.FloatTensor), requires_grad=True)
if bidirection:
self.mean = nn.Sequential(
nn.Linear(hidden_size*2,dim_z_dance),
)
self.std = nn.Sequential(
nn.Linear(hidden_size*2,dim_z_dance),
)
else:
self.mean = nn.Sequential(
nn.Linear(hidden_size,dim_z_dance),
)
self.std = nn.Sequential(
nn.Linear(hidden_size,dim_z_dance),
)
def forward(self, movements_mean, movements_std):
movements = torch.cat((movements_mean, movements_std),2)
num_samples = movements.shape[0]
h_t = [self.init_h.repeat(1, num_samples, 1)]
output, hidden = self.recurrent(movements, h_t[0])
if self.bidirection:
output = torch.cat((output[:,-1,:self.hidden_size], output[:,0,self.hidden_size:]), 1)
else:
output = output[:,-1,:]
return self.mean(output), self.std(output)
class Dance_Dec(nn.Module):
def __init__(self, dim_z_dance, dim_z_movement, hidden_size, num_layers):
super(Dance_Dec, self).__init__()
#self.length = length
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dim_z_movement = dim_z_movement
#dim_z_init=0
'''
self.z2init = nn.Sequential(
nn.Linear(dim_z_init+dim_z_movement, hidden_size),
nn.LayerNorm(hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, num_layers*hidden_size)
)
'''
self.z2init = nn.Sequential(
nn.Linear(dim_z_dance, num_layers*hidden_size)
)
self.recurrent = nn.GRU(dim_z_dance, hidden_size, num_layers=num_layers, batch_first=True)
self.movement_g = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.LayerNorm(hidden_size),
nn.ReLU(True),
#nn.Linear(hidden_size, dim_z_movement)
)
self.mean = nn.Sequential(
nn.Linear(hidden_size,dim_z_movement),
)
self.std = nn.Sequential(
nn.Linear(hidden_size,dim_z_movement),
)
def forward(self, z_dance, length=3):
h_init = self.z2init(z_dance)
h_init = h_init.view(self.num_layers, h_init.size(0), self.hidden_size)
z_dance = z_dance.view(z_dance.size(0),1,z_dance.size(1)).repeat(1, length, 1)
z_d_t, _ = self.recurrent(z_dance, h_init)
z_d = z_d_t.contiguous().view(-1, self.hidden_size)
z_movement = self.movement_g(z_d)
z_movement_mean, z_movement_std = self.mean(z_movement), self.std(z_movement)
#z_movement = z_movement.view(z_dance.shape[0], length, self.dim_z_movement)
return z_movement_mean, z_movement_std
class DanceAud_Dis2(nn.Module):
def __init__(self, aud_size, dim_z_movement, length=3):
super(DanceAud_Dis2, self).__init__()
self.aud_size = aud_size
self.dim_z_movement = dim_z_movement
self.length = length
nd = 1024
self.movementd = nn.Sequential(
nn.Linear(dim_z_movement*2*length, nd),
nn.LayerNorm(nd),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd,nd//2),
nn.LayerNorm(nd//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//2,nd//4),
nn.LayerNorm(nd//4),
nn.LeakyReLU(0.2, inplace=True),
#nn.Linear(nd//4, 30),
nn.Linear(nd//4, 30),
)
self.audd = nn.Sequential(
nn.Linear(aud_size, 30),
nn.LayerNorm(30),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(30, 30),
nn.LayerNorm(30),
nn.LeakyReLU(0.2, inplace=True),
)
self.jointd = nn.Sequential(
nn.Linear(60, 1)
)
def forward(self, movements, aud):
if len(movements.shape) == 3:
movements = movements.view(movements.shape[0], movements.shape[1]*movements.shape[2])
m = self.movementd(movements)
a = self.audd(aud)
ma = torch.cat((m,a),1)
return self.jointd(ma).squeeze(), None
class DanceAud_Dis(nn.Module):
def __init__(self, aud_size, dim_z_movement, length=3):
super(DanceAud_Dis, self).__init__()
self.aud_size = aud_size
self.dim_z_movement = dim_z_movement
self.length = length
nd = 1024
self.movementd = nn.Sequential(
#nn.Linear(dim_z_movement*3, nd),
nn.Linear(dim_z_movement*2, nd),
nn.LayerNorm(nd),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd,nd//2),
nn.LayerNorm(nd//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//2,nd//4),
nn.LayerNorm(nd//4),
nn.LeakyReLU(0.2, inplace=True),
#nn.Linear(nd//4, 30),
nn.Linear(nd//4, 30),
)
def forward(self, movements, aud):
#movements = movements.view(movements.shape[0], movements.shape[1]*movements.shape[2])
m = self.movementd(movements)
return m.squeeze()
#a = self.audd(aud)
#ma = torch.cat((m,a),1)
#return self.jointd(ma).squeeze()
class DanceAud_InfoDis(nn.Module):
def __init__(self, aud_size, dim_z_movement, length):
super(DanceAud_InfoDis, self).__init__()
self.aud_size = aud_size
self.dim_z_movement = dim_z_movement
self.length = length
nd = 1024
self.movementd = nn.Sequential(
nn.Linear(dim_z_movement*6, nd*2),
nn.LayerNorm(nd*2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd*2, nd),
nn.LayerNorm(nd),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd,nd//2),
nn.LayerNorm(nd//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//2,nd//4),
nn.LayerNorm(nd//4),
nn.LeakyReLU(0.2, inplace=True),
)
self.dis = nn.Sequential(
nn.Linear(nd//4, 1)
)
self.reg = nn.Sequential(
nn.Linear(nd//4, aud_size)
)
def forward(self, movements, aud):
movements = movements.view(movements.shape[0], movements.shape[1]*movements.shape[2])
m = self.movementd(movements)
return self.dis(m).squeeze(), self.reg(m)
class Dance2Style(nn.Module):
def __init__(self, dim_z_dance, aud_size):
super(Dance2Style, self).__init__()
self.aud_size = aud_size
self.dim_z_dance = dim_z_dance
nd = 512
self.main = nn.Sequential(
nn.Linear(dim_z_dance, nd),
nn.LayerNorm(nd),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd, nd//2),
nn.LayerNorm(nd//2),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//2, nd//4),
nn.LayerNorm(nd//4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nd//4, aud_size),
)
def forward(self, zdance):
return self.main(zdance)
###########################################################
##########
########## Audio
##########
###########################################################
class AudioClassifier_rnn(nn.Module):
def __init__(self, dim_z_motion, hidden_size, pose_size, cls, num_layers=1, h_init=2):
super(AudioClassifier_rnn, self).__init__()
self.dim_z_motion = dim_z_motion
self.hidden_size = hidden_size
self.pose_size = pose_size
self.h_init = h_init
self.num_layers = num_layers
self.init_h = nn.Parameter(torch.randn(1, 1, self.hidden_size).type(T.FloatTensor), requires_grad=True)
self.recurrent = nn.GRU(pose_size, hidden_size, num_layers=num_layers, batch_first=True)
self.classifier = nn.Sequential(
#nn.Dropout(p=0.2),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(True),
#nn.Dropout(p=0.2),
nn.Linear(hidden_size, cls)
)
def forward(self, poses):
hidden, _ = self.recurrent(poses, self.init_h.repeat(1, poses.shape[0], 1))
last_hidden = hidden[:,-1,:]
cls = self.classifier(last_hidden)
return cls
def get_style(self, auds):
hidden, _ = self.recurrent(auds, self.init_h.repeat(1, auds.shape[0], 1))
last_hidden = hidden[:,-1,:]
return last_hidden
class Audstyle_Enc(nn.Module):
def __init__(self, aud_size, dim_z, dim_noise=30):
super(Audstyle_Enc, self).__init__()
self.dim_noise = dim_noise
nf = 64
#nf = 32
self.enc = nn.Sequential(
nn.Linear(aud_size+dim_noise, nf),
nn.LayerNorm(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(nf, nf*2),
nn.LayerNorm(nf*2),
nn.LeakyReLU(0.2, inplace=True),
)
self.mean = nn.Sequential(
nn.Linear(nf*2,dim_z),
)
self.std = nn.Sequential(
nn.Linear(nf*2,dim_z),
)
def forward(self, aud):
noise = torch.randn(aud.shape[0], self.dim_noise).cuda()
y = torch.cat((aud, noise), 1)
enc = self.enc(y)
return self.mean(enc), self.std(enc)
|
{"hexsha": "f1d84875073abb4bac6836d0ff13df50c664e68a", "size": 14704, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks.py", "max_stars_repo_name": "Neoutlier/Dancing2Music", "max_stars_repo_head_hexsha": "b1b51ed5319415238869a9bc78a34d4d90662dc0", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": 466, "max_stars_repo_stars_event_min_datetime": "2019-11-02T20:31:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:02:09.000Z", "max_issues_repo_path": "networks.py", "max_issues_repo_name": "NVlabs/Dance2Music", "max_issues_repo_head_hexsha": "7ff1d95f9f3d3585e29ee7e4ca5a3a45e29db6de", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2019-11-08T02:32:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T09:27:57.000Z", "max_forks_repo_path": "networks.py", "max_forks_repo_name": "NVlabs/Dance2Music", "max_forks_repo_head_hexsha": "7ff1d95f9f3d3585e29ee7e4ca5a3a45e29db6de", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": 77, "max_forks_repo_forks_event_min_datetime": "2019-11-07T08:27:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T19:02:45.000Z", "avg_line_length": 32.3164835165, "max_line_length": 126, "alphanum_fraction": 0.6342491839, "include": true, "reason": "import numpy", "num_tokens": 3992}
|
// This file is part of the dune-xt project:
// https://zivgitlab.uni-muenster.de/ag-ohlberger/dune-community/dune-xt
// Copyright 2009-2021 dune-xt developers and contributors. All rights reserved.
// License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
// or GPL-2.0+ (http://opensource.org/licenses/gpl-license)
// with "runtime exception" (http://www.dune-project.org/license.html)
// Authors:
// René Fritze (2020)
// Tobias Leibner (2019 - 2020)
#ifndef DUNE_XT_LA_CONTAINER_MATRIX_MARKET_HH
#define DUNE_XT_LA_CONTAINER_MATRIX_MARKET_HH
#include <fstream>
#include <iostream>
#include <string>
#include <boost/algorithm/string/constants.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <dune/xt/common/exceptions.hh>
#include <dune/xt/common/string.hh>
#include <dune/xt/common/type_traits.hh>
#include <dune/xt/la/container/pattern.hh>
namespace Dune::XT::LA {
namespace internal {
template <class MatrixType>
MatrixType read_matrix_market_array_format(std::ifstream& matrix_file,
std::string& curr_line,
const bool field_qualifier_is_complex,
const bool is_general,
const bool is_symmetric,
const bool is_skew_symmetric)
{
using M = XT::Common::MatrixAbstraction<MatrixType>;
using RealType = typename M::RealType;
using ScalarType = typename M::ScalarType;
// dense matrix format, first line contains 'num_rows num_cols', each following line contains one matrix entry,
// entries are in column-major order
// parse first line
std::vector<std::string> matrix_dimensions;
boost::algorithm::split(
matrix_dimensions, curr_line, boost::algorithm::is_space(), boost::algorithm::token_compress_on);
DUNE_THROW_IF(matrix_dimensions.size() != 2, Dune::IOError, "Could not read matrix dimensions!");
const auto rows = XT::Common::from_string<size_t>(matrix_dimensions[0]);
const auto cols = XT::Common::from_string<size_t>(matrix_dimensions[1]);
MatrixType ret = M::create(rows, cols, 0., XT::LA::dense_pattern(rows, cols));
DUNE_THROW_IF(!is_general && rows != cols,
Dune::InvalidStateException,
"You're trying to create a non-square symmetric/skew-symmetric/hermitian matrix!");
// read entries
const size_t expected_entry_size = field_qualifier_is_complex ? 2 : 1;
std::vector<std::string> curr_entry(expected_entry_size);
const size_t expected_num_entries =
is_general ? rows * cols : (is_skew_symmetric ? (rows * (rows - 1)) / 2 : (rows * (rows - 1)) / 2 + rows);
for (size_t ii = 0; ii < expected_num_entries; ++ii) {
size_t curr_row, curr_col;
if (is_general) {
curr_row = ii % rows;
curr_col = ii / rows;
} else {
// (skew-)symmetric or hermitian
// only (strictly) lower triangular part is given
// In the symmetric/hermitian case, first column has rows entries, second one rows - 1 entries, ...
// In the skew-symmetric case, the diagonal is also ommited, so each column has one entry less
size_t col_size = is_skew_symmetric ? rows - 1 : rows;
curr_col = 0;
curr_row = ii;
while (curr_row >= col_size) {
curr_row -= col_size;
curr_col += 1;
col_size -= 1;
}
curr_row += is_skew_symmetric ? curr_col + 1 : curr_col;
}
// get next line, skip blank lines
do {
std::getline(matrix_file, curr_line);
XT::Common::trim(curr_line);
} while (curr_line.empty() && matrix_file.good());
DUNE_THROW_IF(!matrix_file.good(), Dune::IOError, "There were not enough entries for this matrix!");
boost::algorithm::split(curr_entry, curr_line, boost::algorithm::is_space(), boost::algorithm::token_compress_on);
DUNE_THROW_IF(
!(curr_entry.size() == expected_entry_size), Dune::InvalidStateException, "Invalid entry encountered!");
ScalarType entry = XT::Common::create_real_or_complex_number<ScalarType>(
XT::Common::from_string<RealType>(curr_entry[0]),
XT::Common::from_string<RealType>(field_qualifier_is_complex ? curr_entry[1] : "0."));
M::set_entry(ret, curr_row, curr_col, entry);
} // ii
if (!is_general) {
// fill in upper triangular part
for (size_t ii = 0; ii < rows; ++ii) {
for (size_t jj = 0; jj < ii; ++jj) {
if (is_symmetric)
M::set_entry(ret, jj, ii, M::get_entry(ret, ii, jj));
else if (is_skew_symmetric)
M::set_entry(ret, jj, ii, -1. * M::get_entry(ret, ii, jj));
else // format is hermitian
M::set_entry(ret, jj, ii, XT::Common::conj(M::get_entry(ret, ii, jj)));
} // jj
} // ii
}
return ret;
}
template <class MatrixType>
MatrixType read_matrix_market_coordinate_format(std::ifstream& matrix_file,
std::string& curr_line,
const bool field_qualifier_is_complex,
const bool is_general,
const bool is_symmetric,
const bool is_skew_symmetric)
{
using M = XT::Common::MatrixAbstraction<MatrixType>;
using RealType = typename M::RealType;
using ScalarType = typename M::ScalarType;
// coordinate format, first line contains 'num_rows num_cols num_nonzeros', each following line contains one matrix
// entry in the format 'row_index col_index entry', indices are 1-based
std::vector<std::string> matrix_dimensions;
boost::algorithm::split(
matrix_dimensions, curr_line, boost::algorithm::is_space(), boost::algorithm::token_compress_on);
DUNE_THROW_IF(matrix_dimensions.size() != 3, Dune::IOError, "Could not read matrix dimensions!");
const auto rows = XT::Common::from_string<size_t>(matrix_dimensions[0]);
const auto cols = XT::Common::from_string<size_t>(matrix_dimensions[1]);
const auto nnz = XT::Common::from_string<size_t>(matrix_dimensions[2]);
// read entries
std::vector<std::tuple<size_t, size_t, ScalarType>> entries(nnz);
const size_t expected_entry_size = field_qualifier_is_complex ? 4 : 3;
std::vector<std::string> curr_entry(expected_entry_size);
for (size_t ii = 0; ii < nnz; ++ii) {
// get next line, skip blank lines
do {
std::getline(matrix_file, curr_line);
XT::Common::trim(curr_line);
} while (curr_line.empty() && matrix_file.good());
DUNE_THROW_IF(!matrix_file.good(), Dune::IOError, "There were not enough entries for this matrix!");
boost::algorithm::split(curr_entry, curr_line, boost::algorithm::is_space(), boost::algorithm::token_compress_on);
DUNE_THROW_IF(
!(curr_entry.size() == expected_entry_size), Dune::InvalidStateException, "Invalid entry encountered!");
ScalarType entry = XT::Common::create_real_or_complex_number<ScalarType>(
XT::Common::from_string<RealType>(curr_entry[2]),
XT::Common::from_string<RealType>(field_qualifier_is_complex ? curr_entry[3] : "0."));
entries[ii] = std::make_tuple(
XT::Common::from_string<size_t>(curr_entry[0]), XT::Common::from_string<size_t>(curr_entry[1]), entry);
}
SparsityPatternDefault pattern(rows);
for (size_t ii = 0; ii < nnz; ++ii) {
// entries in matrix market format are 1-based
const size_t row = std::get<0>(entries[ii]);
const size_t col = std::get<1>(entries[ii]);
DUNE_THROW_IF(
row == 0 || col == 0, Dune::InvalidStateException, "Indices in matrix market format have to be 1-based!");
DUNE_THROW_IF(
(!is_general) && row < col,
Dune::InvalidStateException,
"Only provide (strictly) lower triangular portion of matrix for symmetric/hermitian/skew-symmetric format!");
DUNE_THROW_IF(
is_skew_symmetric && row == col,
Dune::InvalidStateException,
"Only provide (strictly) lower triangular portion of matrix for symmetric/hermitian/skew-symmetric format!");
pattern.insert(row - 1, col - 1);
if (!is_general)
pattern.insert(col - 1, row - 1);
}
pattern.sort();
MatrixType ret = M::create(rows, cols, 0., pattern);
for (size_t ii = 0; ii < nnz; ++ii) {
const size_t row = std::get<0>(entries[ii]) - 1;
const size_t col = std::get<1>(entries[ii]) - 1;
const ScalarType entry = std::get<2>(entries[ii]);
M::set_entry(ret, row, col, entry);
if (!is_general) {
if (is_symmetric)
M::set_entry(ret, col, row, entry);
else if (is_skew_symmetric)
M::set_entry(ret, col, row, -entry);
else // format is hermitian
M::set_entry(ret, col, row, XT::Common::conj(entry));
}
} // ii
return ret;
}
} // namespace internal
template <class MatrixType>
MatrixType read_matrix_market(const std::string& filename)
{
using M = XT::Common::MatrixAbstraction<MatrixType>;
using ScalarType = typename M::ScalarType;
constexpr bool scalartype_is_complex = XT::Common::is_complex<ScalarType>::value;
std::ifstream matrix_file(filename);
DUNE_THROW_IF(!matrix_file.is_open(), Dune::IOError, "Opening matrix file for reading failed!");
std::string curr_line;
static const std::string matrix_market_prefix = "%%MatrixMarket";
// Search for matrix market header
// Find line that starts with %%MatrixMarket, compare returns 0 if match is found
while (curr_line.compare(0, matrix_market_prefix.size(), matrix_market_prefix) && matrix_file.good()) {
std::getline(matrix_file, curr_line);
XT::Common::trim(curr_line);
}
DUNE_THROW_IF(!matrix_file.good(), Dune::IOError, "File is not a valid matrix market file!");
std::vector<std::string> tokens;
boost::algorithm::split(tokens, curr_line, boost::algorithm::is_space(), boost::algorithm::token_compress_on);
DUNE_THROW_IF(
!(tokens[0] == matrix_market_prefix), Dune::IOError, "There has to be whitespace after %%MatrixMarket!");
const std::string object_str = XT::Common::to_lower(tokens[1]);
const std::string format_str = XT::Common::to_lower(tokens[2]);
const std::string field_qualifier = XT::Common::to_lower(tokens[3]);
const std::string symmetry_qualifier = tokens.size() >= 5 ? XT::Common::to_lower(tokens[4]) : "";
const bool field_qualifier_is_complex = (field_qualifier == "complex");
const bool is_general = (symmetry_qualifier == "general");
const bool is_symmetric = (symmetry_qualifier == "symmetric");
const bool is_skew_symmetric = (symmetry_qualifier == "skew-symmetric");
DUNE_THROW_IF(
!(object_str == "matrix"), Dune::NotImplemented, "Only matrix market matrix files are supported by now!");
DUNE_THROW_IF(!(format_str == "coordinate" || format_str == "array"),
Dune::NotImplemented,
"Only coordinate and array format are supported!");
DUNE_THROW_IF(!(field_qualifier == "real" || field_qualifier == "complex"),
Dune::NotImplemented,
"Only real and complex matrices are supported by now!");
DUNE_THROW_IF(field_qualifier_is_complex && !scalartype_is_complex,
Dune::InvalidStateException,
"You are trying to read a complex matrix into a real matrix type!");
DUNE_THROW_IF(!is_general && !is_symmetric && !is_skew_symmetric && (symmetry_qualifier != "hermitian"),
Dune::NotImplemented,
"Symmetry qualifier has to be 'general', 'symmetric', 'skew-symmetric' or 'hermitian'");
// Skip comments (comments start with %)
std::getline(matrix_file, curr_line);
XT::Common::trim(curr_line);
while ((!(curr_line.compare(0, 1, "%")) || curr_line.empty()) && matrix_file.good()) {
std::getline(matrix_file, curr_line);
XT::Common::trim(curr_line);
}
DUNE_THROW_IF(!matrix_file.good(), Dune::IOError, "File only contains header and comments, no data found!");
if (format_str == "array")
return internal::read_matrix_market_array_format<MatrixType>(
matrix_file, curr_line, field_qualifier_is_complex, is_general, is_symmetric, is_skew_symmetric);
return internal::read_matrix_market_coordinate_format<MatrixType>(
matrix_file, curr_line, field_qualifier_is_complex, is_general, is_symmetric, is_skew_symmetric);
}
template <class MatrixType>
void write_matrix_market(const MatrixType& mat, const std::string& filename, const int precision = 20)
{
using M = XT::Common::MatrixAbstraction<MatrixType>;
using ScalarType = typename M::ScalarType;
constexpr bool scalartype_is_complex = XT::Common::is_complex<ScalarType>::value;
std::ofstream matrix_file(filename);
DUNE_THROW_IF(!matrix_file.is_open(), Dune::IOError, "Opening matrix file for writing failed!");
matrix_file.precision(precision);
static constexpr bool is_dense = (M::storage_layout == Common::StorageLayout::dense_column_major
|| M::storage_layout == Common::StorageLayout::dense_row_major);
std::string format_str = is_dense ? "array" : "coordinate";
std::string field_qualifier = scalartype_is_complex ? "complex" : "real";
std::string symmetry_qualifier = "general";
// Write header
matrix_file << "%%MatrixMarket matrix " << format_str << " " << field_qualifier << " " << symmetry_qualifier
<< std::endl;
// Write dimensions
matrix_file << mat.rows() << " " << mat.cols() << (is_dense ? "" : (" " + XT::Common::to_string(mat.non_zeros())))
<< std::endl;
// Write data
if (is_dense) {
// write all entries in column-major order
for (size_t jj = 0; jj < mat.cols(); ++jj) {
for (size_t ii = 0; ii < mat.rows(); ++ii) {
const ScalarType& entry = M::get_entry(mat, ii, jj);
const std::string entry_str = scalartype_is_complex ? (XT::Common::to_string(std::real(entry), precision) + " "
+ XT::Common::to_string(std::imag(entry), precision))
: XT::Common::to_string(entry, precision);
matrix_file << entry_str << std::endl;
} // ii
} // jj
} else {
// write non-zero entries in coordinate format
const auto& pattern = mat.pattern();
for (size_t ii = 0; ii < mat.rows(); ++ii) {
for (const auto& jj : pattern.inner(ii)) {
const ScalarType& entry = M::get_entry(mat, ii, jj);
const std::string entry_str = scalartype_is_complex ? (XT::Common::to_string(std::real(entry), precision) + " "
+ XT::Common::to_string(std::imag(entry), precision))
: XT::Common::to_string(entry, precision);
matrix_file << ii + 1 << " " << jj + 1 << " " << entry_str << std::endl;
} // ii
} // jj
}
} // ... write_matrix_market(...)
} // namespace Dune::XT::LA
#endif // DUNE_XT_LA_CONTAINER_MATRIX_MARKET_HH
|
{"hexsha": "2fff57ef690d0b7a223974fec44fed1c0d5dcb74", "size": 15094, "ext": "hh", "lang": "C++", "max_stars_repo_path": "dune/xt/la/container/matrix-market.hh", "max_stars_repo_name": "dune-community/dune-xt", "max_stars_repo_head_hexsha": "da921524c6fff8d60c715cb4849a0bdd5f020d2b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-02-08T04:08:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-01T18:54:14.000Z", "max_issues_repo_path": "dune/xt/la/container/matrix-market.hh", "max_issues_repo_name": "dune-community/dune-xt", "max_issues_repo_head_hexsha": "da921524c6fff8d60c715cb4849a0bdd5f020d2b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 35.0, "max_issues_repo_issues_event_min_datetime": "2019-08-19T12:06:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-27T08:20:39.000Z", "max_forks_repo_path": "dune/xt/la/container/matrix-market.hh", "max_forks_repo_name": "dune-community/dune-xt", "max_forks_repo_head_hexsha": "da921524c6fff8d60c715cb4849a0bdd5f020d2b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-02-08T04:09:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T04:09:34.000Z", "avg_line_length": 49.8151815182, "max_line_length": 119, "alphanum_fraction": 0.6528421889, "num_tokens": 3751}
|
import numpy as np
import os, sys
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from torch.autograd import Variable
from torch.nn import Parameter
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm
from ..proj_utils.plot_utils import *
from ..proj_utils.torch_utils import *
from torch.multiprocessing import Pool
import scipy
import time, json
import random
TINY = 1e-8
def train_nd(dataset_train, dataset_test, model_root, mode_name, img_encoder, vs_model, args):
dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=True)
lr = args.lr
tot_epoch = args.maxepoch
train_num = len(dataset_train)
test_num = len(dataset_test)
number_example = train_num + test_num
prob_use_train = float(train_num)/number_example
# pool = Pool(3)
transform = get_transform(img_encoder)
updates_per_epoch = int(number_example / args.batch_size)
''' configure optimizer '''
optimizer = optim.Adam(vs_model.parameters(), lr= args.lr, betas=(0.5, 0.999) )
model_folder = os.path.join(model_root, mode_name)
if not os.path.exists(model_folder):
os.makedirs(model_folder)
''' load model '''
if args.reuse_weights :
weightspath = os.path.join(model_folder, 'W_epoch{}.pth'.format(args.load_from_epoch))
if os.path.exists(weightspath) :
weights_dict = torch.load(weightspath, map_location=lambda storage, loc: storage)
print('reload weights from {}'.format(weightspath))
vs_model.load_state_dict(weights_dict)
start_epoch = args.load_from_epoch + 1
else:
print ('{} do not exist!!'.format(weightspath))
raise NotImplementedError
else:
start_epoch = 1
loss_plot = plot_scalar(name = "loss", env= mode_name, rate = args.display_freq)
lr_plot = plot_scalar(name = "lr", env= mode_name, rate = args.display_freq)
for epoch in range(start_epoch, tot_epoch):
start_timer = time.time()
# learning rate
if epoch % args.epoch_decay == 0:
lr = lr/2
set_lr(optimizer, lr)
for it in range(updates_per_epoch):
vs_model.train()
img_encoder.eval()
if np.random.random() >= prob_use_train:
images, _, _, txt_code, txt_len, _ = iter(dataloader_train).next()
else:
images, _, _, txt_code, txt_len, _, _, _ = iter(dataloader_test).next()
img_224 = pre_process(images, transform)
img_224 = to_device(img_224, volatile=True)
txt_code = to_device(txt_code, volatile=True)
img_feat = img_encoder(img_224)
img_feat = img_feat.squeeze(-1).squeeze(-1)
img_feat = to_device(img_feat.data, requires_grad=True)
sent_emb, img_emb = vs_model(txt_code, img_feat)
cost = PairwiseRankingLoss(img_emb, sent_emb, args.margin)
optimizer.zero_grad()
cost.backward()
optimizer.step()
cost_val = cost.cpu().data.numpy().mean()
loss_plot.plot(cost_val)
lr_plot.plot(lr)
end_timer = time.time() - start_timer
if it % args.verbose_per_iter == 0:
print ('[epoch %d/%d iter %d]: lr = %.6f cost_val = %.5f' % (epoch, tot_epoch, it, lr, cost_val))
sys.stdout.flush()
if epoch % args.save_freq == 0:
vs_model = vs_model.cpu()
torch.save(vs_model.state_dict(), os.path.join(model_folder, 'W_epoch{}.pth'.format(epoch)))
print('save weights at {}'.format(model_folder))
vs_model = vs_model.cuda(args.device_id)
print ('epoch {}/{} finished [time = {}s] ...'.format(epoch, tot_epoch, end_timer))
def PairwiseRankingLoss(im, sent, margin):
# compute image-sentence score matrix
scores = torch.mm(im, sent.transpose(1, 0))
diagonal = scores.diag()
batch_size = scores.size()[0]
sent_zeros = Variable(sent.data.new(scores.size()[0], scores.size()[1]).fill_(0.0) )
img_zeros = Variable(im.data.new(scores.size()[0], scores.size()[1]).fill_(0.0) )
cost_s = torch.max(sent_zeros, (margin-diagonal).expand_as(scores)+scores)
cost_im = torch.max(img_zeros, (margin-diagonal).expand_as(scores).transpose(1, 0)+scores)
for i in range(scores.size()[0]):
cost_s[i, i] = 0
cost_im[i, i] = 0
return (cost_s.sum() + cost_im.sum())
def resize_images(img, dst_shape):
tmp = scipy.misc.imresize(img, dst_shape)
return tmp
def get_transform(img_encoder):
img_tensor_list = []
trans = transforms.Compose([
transforms.ToTensor(),
#transforms.RandomSizedCrop(224),
transforms.Normalize(mean=img_encoder.mean, std=img_encoder.std),
])
return trans
def _process(inputs):
this_img, trans = inputs
this_crop = resize_images(this_img, (299, 299))
this_img_tensor = trans(this_crop)
return this_img_tensor
# def pre_process(images, pool, trans=None):
def pre_process(images, trans=None):
images = (images + 1) /2 * 255
images = images.permute(0,2,3,1)
bs = images.shape[0]
img_tensor_list = []
targets = [_process((images[idx], trans)) for idx in range(bs)]
for idx in range(bs):
this_img_tensor = targets[idx]
img_tensor_list.append(this_img_tensor)
img_tensor_all = torch.stack(img_tensor_list,0)
return img_tensor_all
|
{"hexsha": "8892c27e328788c7cde3f51068e89f534f6cf790", "size": 5801, "ext": "py", "lang": "Python", "max_stars_repo_path": "gan/neuralDist/trainNeuralDist.py", "max_stars_repo_name": "pedroryzewski/TCC", "max_stars_repo_head_hexsha": "ba5167928d5a14dc0c6e144d0927c050090950a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gan/neuralDist/trainNeuralDist.py", "max_issues_repo_name": "pedroryzewski/TCC", "max_issues_repo_head_hexsha": "ba5167928d5a14dc0c6e144d0927c050090950a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gan/neuralDist/trainNeuralDist.py", "max_forks_repo_name": "pedroryzewski/TCC", "max_forks_repo_head_hexsha": "ba5167928d5a14dc0c6e144d0927c050090950a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5297619048, "max_line_length": 113, "alphanum_fraction": 0.6412687468, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1361}
|
export snr, smooth, smooth!, abs_max, abs_max!, standardize, standardize!
export mad, std_threshold
"""
snr(A)
Signal to noise ratio of cross-correlations in matrix `A`.
Follows method of Clarke et. al, 2011. Measures SNR at each point.
"""
function snr(A::AbstractArray)
Nrows,Ncols = size(A)
A_mean = mean(A,dims=2)
# calculate noise and envelope functions
sigma = mean(A.^2,dims=2) .- A_mean.^2
sigma .= sqrt.(sigma./ (Ncols-1))
s = abs.(A_mean .+ im .* hilbert(A_mean))
return s ./ sigma
end
snr(C::CorrData) = snr(C.corr)
"""
smooth(A, half_win)
Smooth array `A` with half-window `half_win` (defaults to 3).
"""
function smooth!(A::AbstractArray, half_win::Int=3, dims::Int=1)
T = eltype(A)
window_len = 2 * half_win + 1
csumsize = tuple(collect(size(A)) .+ [i==1 ? 2 * (window_len - 1) + 1 : 0 for i in 1:ndims(A)]...)
csum = similar(A,T,csumsize)
csum[1:window_len,:] .= zero(T)
csum[end - window_len + 1:end,:] .= zero(T)
csum[window_len+1:end-window_len + 1,:] .= A
csum .= cumsum(csum,dims=dims)
weight = similar(A,T,size(A,1))
weight[1:half_win] = T.(window_len ÷ 2 + 1:window_len - 1)
weight[half_win + 1: end - half_win] .= T(window_len)
weight[end-half_win:end] = T.(window_len:-1:window_len ÷ 2 + 1)
A[:,:] .= (csum[window_len+half_win+1:end-half_win,:] .- csum[half_win+1:end-window_len-half_win,:]) ./ weight
return nothing
end
smooth(A::AbstractArray,half_win::Int=3, dims::Int=1) =
(U = deepcopy(A);smooth!(U,half_win,dims);return U)
"""
abs_max!(A)
Returns array `A` divided by its absolute maximum value.
"""
function abs_max!(A::AbstractArray)
maxabs = maximum(abs.(A),dims=1)
if any(maxabs .== 0)
throw(DomainError("All zero column leads to NaN"))
end
A ./= maxabs
return nothing
end
abs_max(A::AbstractArray) = (U = deepcopy(A);abs_max!(U);return U)
abs_max!(C::CorrData) = abs_max!(C.corr)
abs_max(C::CorrData) = (U = deepcopy(C);abs_max!(U);return U)
"""
standardize!(A)
Demean and standardize array `A` to unit std.
"""
function standardize!(A::AbstractArray)
A .-= mean(A,dims=1)
A ./= std(A,dims=1)
return nothing
end
standardize(A::AbstractArray) = (U = deepcopy(A);standardize!(U);return U)
standardize!(C::CorrData) = standardize!(C.corr)
standardize(C::CorrData) = (U = deepcopy(C);standardize!(U);return U)
"""
mad(A)
Median Absolute Deviation of array `A`.
MAD = median(|Xi- median(A)|)
"""
function mad(A::AbstractArray)
return median(abs.(A .- median(A,dims=1)),dims=1)
end
mad(C::CorrData) = mad(C.corr)
"""
std_threshold(A,thresh)
Returns indices of cols of `A` where max(abs.A[:,col]))/std(A[:,col]) < `max_std`.
"""
function std_threshold(A::AbstractArray, max_std::Real)
stds = std(A,dims=1)[1,:]
maxs = maximum(abs.(A),dims=1)[1,:]
threshs = maxs ./ stds
ind = []
for ii = 1:length(threshs)
if threshs[ii] < max_std
append!(ind,ii)
end
end
return ind
end
|
{"hexsha": "691666aa8eeb7fc041ae0be3065515a9ba6e3eae", "size": 3025, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tools.jl", "max_stars_repo_name": "Denolle-Lab/SeisNoise.jl", "max_stars_repo_head_hexsha": "0eba10ca056ddbc73da50a137b4d2c13cea956ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2019-07-03T19:02:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T12:54:32.000Z", "max_issues_repo_path": "src/tools.jl", "max_issues_repo_name": "Denolle-Lab/SeisNoise.jl", "max_issues_repo_head_hexsha": "0eba10ca056ddbc73da50a137b4d2c13cea956ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2019-11-30T14:55:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T23:08:30.000Z", "max_forks_repo_path": "src/tools.jl", "max_forks_repo_name": "Denolle-Lab/SeisNoise.jl", "max_forks_repo_head_hexsha": "0eba10ca056ddbc73da50a137b4d2c13cea956ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-07-01T18:31:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T02:42:25.000Z", "avg_line_length": 28.2710280374, "max_line_length": 114, "alphanum_fraction": 0.6323966942, "num_tokens": 985}
|
from astropy import units as u
from astropy.coordinates import SkyCoord, AltAz, Angle
def azToDirection(az): #azimut to direction
az = float(az)
if (az >=360): az -= 360
if (az <0): az += 360
if (az >= 0 and az < 22.5) or (az >= 337.5 and az < 360): lettre='North'
elif az >= 22.5 and az < 67.5: lettre='North-East'
elif az >= 67.5 and az < 112.5: lettre='East'
elif az >= 112.5 and az < 157.5: lettre='South-East'
elif az >= 157.5 and az < 202.5: lettre='South'
elif az >= 202.5 and az < 247.5: lettre='South-West'
elif az >= 247.5 and az < 292.5: lettre='West'
elif az >= 292.5 and az < 337.5: lettre='North-West'
return lettre
def SpTypeInPickles(insptype):
#sptype = insptype.lower()
sptype = re.search(r'[OBA]\d+[IV]+s?[a-z]*',insptype.replace(' ',''))
if sptype: sptype=sptype.group()
else: return insptype
picklesdb = ['o5v', 'o8iii', 'o9v', 'a0i', 'a0iii', 'a0iv', 'a0v', 'a2i', 'a2v', 'a3iii', 'a3v', 'a47iv', 'a5iii', 'a5v', 'a7iii', 'a7v', 'b0i', 'b0v', 'b12iii', 'b1i', 'b1v', 'b2ii', 'b2iv', 'b3i', 'b3iii', 'b3v', 'b57v', 'b5i', 'b5ii', 'b5iii', 'b6iv', 'b8i', 'b8v', 'b9iii', 'b9v']
if sptype.lower() in picklesdb: return '<abbr class="good" title="The spectral type '+sptype.upper()+' is in the Pickles Library of Stellar Spectra">'+insptype+'</abbr>'
else: return insptype
def starload(star,target,maxseparation,altaz): #Compute an OrderedDict : separation, alt, az
#try:
maxebv = 5
star['Name'] = star['Name'].strip()
if (star['EB-V'] is None or float(star['EB-V'])<0): star['EB-V'] = '0'
if (maxebv is not None and float(star['EB-V']) > maxebv): return None #maximum Eb-v
star['Sky'] = SkyCoord(star['RA_dec'], star['de_dec'], unit='deg')
if (not Angle(star['de_dec']+'d').is_within_bounds((target.sky.dec.deg -maxseparation)*u.deg,(target.sky.dec.deg+maxseparation)*u.deg)): return None
star['Separation'] = target.sky.separation(star['Sky']).deg
if star['Separation']>maxseparation: return None
altaz = star['Sky'].transform_to(altaz)
star['Alt'] = float(altaz.alt.to_string(decimal=True))
star['Az'] = float(altaz.az.to_string(decimal=True))
if (star['Alt']<=-10): return None
star['Delta'] = star['Alt'] - target.alt
star['secz'] = float(altaz.secz)
if len(star['B-V'])>5: star['B-V'] = str(round(float(star['B-V']),3))
#except:
#return None
return star
|
{"hexsha": "d393986883dcc6098350bc5698987cda4f06bdf5", "size": 2491, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/functions.py", "max_stars_repo_name": "serge-golovanow/SpectroStars", "max_stars_repo_head_hexsha": "19e1ca2fbecd8dd814ceea18a54c54603b9efbf4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-27T17:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-31T00:09:07.000Z", "max_issues_repo_path": "lib/functions.py", "max_issues_repo_name": "serge-golovanow/SpectroStars", "max_issues_repo_head_hexsha": "19e1ca2fbecd8dd814ceea18a54c54603b9efbf4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/functions.py", "max_forks_repo_name": "serge-golovanow/SpectroStars", "max_forks_repo_head_hexsha": "19e1ca2fbecd8dd814ceea18a54c54603b9efbf4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0, "max_line_length": 289, "alphanum_fraction": 0.5997591329, "include": true, "reason": "from astropy", "num_tokens": 889}
|
'''
wrapper for change detector that is called by 3D solver based on tomostream
'''
from roi_utils.roi import roi_search, load_seg_nn, roi_search_subtraction, rescale_vol_for_NN
from roi_utils.patches import Patches
from roi_utils.voxel_processing import modified_autocontrast
from roi_utils.ADet4RoI import roi_search_ADet
import os
import numpy as np
detect_flag = True
p_fpath = '/data/2021-12/streaming_rois'
from datetime import datetime as dt
N_SELECT_MAX = 1
CYLINDER_WIDTH = 0.6
CYLINDER_HEIGHT = 0.7
SEARCH_DOWNSAMPLING = 2
#CHECK# this volume passed is a numpy array
def change_detector(vol_t1, vol_t2, model, mode_flag = 1):
# some things to keep track of
orig_shape = vol_t2
upsample_fac = SEARCH_DOWNSAMPLING
sbin = tuple([slice(None, None, SEARCH_DOWNSAMPLING)]*3)
vol_t1 = vol_t1[sbin]
vol_t2 = vol_t2[sbin]
#to-do: if projection shape changes within gui, this will happen often.
# Should we return without error?
print_out = "%s not identical to %s"%(str(vol_t1.shape), str(vol_t2.shape))
assert vol_t1.shape == vol_t2.shape, print_out
# adjust contrast
h = modified_autocontrast(vol_t1, s = 0.01, normalize_sampling_factor = 4)
vol_t1 = np.clip(vol_t1, *h)
h = modified_autocontrast(vol_t2, s = 0.01, normalize_sampling_factor = 4)
vol_t2 = np.clip(vol_t2, *h)
min_, max_ = vol_t1[::4,::4,::4].min(), vol_t1[::4,::4,::4].max()
vol_t1 = 255.0*(vol_t1 - min_) / (max_ - min_ + 1.e-12)
min_, max_ = vol_t2[::4,::4,::4].min(), vol_t2[::4,::4,::4].max()
vol_t2 = 255.0*(vol_t2 - min_) / (max_ - min_ + 1.e-12)
if mode_flag == 0:
centers, importance, bbox_start, bbox_width, RoIs = roi_search_subtraction(vol_t1, vol_t2) # subtraction based
elif mode_flag == 1:
centers, importance, bbox_start, bbox_width, RoIs = roi_search(vol_t1, vol_t2, model=model, mbsz=8) # segmentation based
elif mode_flag == 2:
centers, importance, bbox_start, bbox_width, RoIs = roi_search_ADet(vol_t2) # model has been hard coded
else:
pass
# Zliu - add line here to dump csv for trouble-shooting
RoIs.to_csv(os.path.join(p_fpath, dt.now().strftime("%Y%m%d-%H%M%S") + ".csv"), index=False)
if len(centers) == 0:
return np.asarray([[np.asarray(vol_t2.shape)//2]])
p = Patches(vol_t1.shape, initialize_by = "data", points = bbox_start, widths = bbox_width)
p.add_features(importance, names = ["importance"])
p.add_features(centers*upsample_fac, names = ["cent_z", "cent_y", "cent_x"])
# rescale coordinates back to original shape
#p = p.rescale(upsample_fac, orig_shape)
p = p.filter_by_cylindrical_mask(mask_ratio = CYLINDER_WIDTH, height_ratio = CYLINDER_HEIGHT)
change_locations = p.select_by_feature(N_SELECT_MAX, ife = 0)
patches_fname = os.path.join(p_fpath, dt.now().strftime("%Y%m%d-%H%M%S") + ".hdf5")
p.dump(patches_fname)
print("Done saving roi file to: %s" % patches_fname)
# Nothing below this line should be edited!
#bboxes = p.slices()
#for bbox in bboxes:
# vol_t2[tuple(bbox)] = 0.3*vol_t2[tuple(bbox)]
change_locations = p.centers()
return np.asarray(change_locations)*upsample_fac
if __name__ == "__main__":
import pdb; pdb.set_trace()
# rec = np.zeros((250, 245, 245))
rec = np.random.normal(0, 1.0, (753, 511, 511))
|
{"hexsha": "b80bb9d7442ba7c2982b6dabd1a39508ee1e8d40", "size": 3405, "ext": "py", "lang": "Python", "max_stars_repo_path": "scratchpad/tomostream_roi/tomostream/change_detector.py", "max_stars_repo_name": "arshadzahangirchowdhury/TomoEncoders", "max_stars_repo_head_hexsha": "9c2b15fd515d864079f198546821faee5d78df17", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scratchpad/tomostream_roi/tomostream/change_detector.py", "max_issues_repo_name": "arshadzahangirchowdhury/TomoEncoders", "max_issues_repo_head_hexsha": "9c2b15fd515d864079f198546821faee5d78df17", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scratchpad/tomostream_roi/tomostream/change_detector.py", "max_forks_repo_name": "arshadzahangirchowdhury/TomoEncoders", "max_forks_repo_head_hexsha": "9c2b15fd515d864079f198546821faee5d78df17", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2234042553, "max_line_length": 128, "alphanum_fraction": 0.6798825257, "include": true, "reason": "import numpy", "num_tokens": 1053}
|
import re
import collections.abc
import datetime
import hashlib
import logging
import os
import time
import random
from copy import deepcopy
from concurrent.futures import (
ProcessPoolExecutor,
as_completed,
ThreadPoolExecutor,
)
import github3
import networkx as nx
import requests
from xonsh.lib.collections import ChainDB, _convert_to_dict
from .all_feedstocks import get_all_feedstocks
from .utils import parse_meta_yaml, setup_logger
from .git_utils import (
refresh_pr,
is_github_api_limit_reached,
close_out_labels,
)
logger = logging.getLogger("conda_forge_tick.make_graph")
pin_sep_pat = re.compile(" |>|<|=|\[")
NUM_GITHUB_THREADS = 4
def get_attrs(name, i):
sub_graph = {
"time": time.time(),
"feedstock_name": name,
# All feedstocks start out as good
"bad": False,
}
logger.info((i, name))
r = requests.get(
"https://raw.githubusercontent.com/"
"conda-forge/{}-feedstock/master/recipe/"
"meta.yaml".format(name)
)
if r.status_code != 200:
logger.warn(
"Something odd happened when fetching recipe "
"{}: {}".format(name, r.status_code)
)
sub_graph["bad"] = "make_graph: {}".format(r.status_code)
return sub_graph
text = r.content.decode("utf-8")
sub_graph["raw_meta_yaml"] = text
yaml_dict = ChainDB(
*[parse_meta_yaml(text, arch=arch) for arch in ["osx", "linux"]]
)
if not yaml_dict:
logger.warn(
"Something odd happened when parsing recipe " "{}".format(name)
)
sub_graph["bad"] = "make_graph: Could not parse"
return sub_graph
sub_graph["meta_yaml"] = _convert_to_dict(yaml_dict)
# TODO: Write schema for dict
req = yaml_dict.get("requirements", set())
if req:
build = list(
req.get("build", []) if req.get("build", []) is not None else []
)
host = list(
req.get("host", []) if req.get("host", []) is not None else []
)
run = list(
req.get("run", []) if req.get("run", []) is not None else []
)
req = build + host + run
req = set(
pin_sep_pat.split(x)[0].lower() for x in req if x is not None
)
sub_graph["req"] = req
keys = [("package", "name"), ("package", "version")]
missing_keys = [k[1] for k in keys if k[1] not in yaml_dict.get(k[0], {})]
source = yaml_dict.get("source", [])
if isinstance(source, collections.abc.Mapping):
source = [source]
source_keys = set()
for s in source:
if not sub_graph.get("url"):
sub_graph["url"] = s.get("url")
source_keys |= s.keys()
if "url" not in source_keys:
missing_keys.append("url")
if missing_keys:
logger.warn(
"Recipe {} doesn't have a {}".format(name, ", ".join(missing_keys))
)
sub_graph["bad"] = "make_graph: missing {}".format(
", ".join(missing_keys)
)
for k in keys:
if k[1] not in missing_keys:
sub_graph[k[1]] = yaml_dict[k[0]][k[1]]
k = next(iter((source_keys & hashlib.algorithms_available)), None)
if k:
sub_graph["hash_type"] = k
return sub_graph
def make_graph(names, gx=None):
logger.info("reading graph")
if gx is None:
gx = nx.DiGraph()
new_names = [name for name in names if name not in gx.nodes]
old_names = [name for name in names if name in gx.nodes]
old_names = sorted(old_names, key=lambda n: gx.nodes[n].get("time", 0))
total_names = new_names + old_names
logger.info("start loop")
with ProcessPoolExecutor(max_workers=20) as pool:
futures = {
pool.submit(get_attrs, name, i): name
for i, name in enumerate(total_names)
}
for f in as_completed(futures):
name = futures[f]
try:
sub_graph = f.result()
except Exception as e:
logger.warn("Error adding {} to the graph: {}".format(name, e))
else:
if name in new_names:
gx.add_node(name, **sub_graph)
else:
gx.nodes[name].update(**sub_graph)
gx2 = deepcopy(gx)
for node, attrs in gx2.node.items():
for dep in attrs.get("req", []):
if dep not in gx.nodes:
gx.add_node(dep, archived=True, time=time.time())
gx.add_edge(dep, node)
return gx
def update_graph_pr_status(gx: nx.DiGraph) -> nx.DiGraph:
gh = github3.login(os.environ["USERNAME"], os.environ["PASSWORD"])
futures = {}
node_ids = list(gx.nodes)
# this makes sure that github rate limits are dispersed
random.shuffle(node_ids)
with ThreadPoolExecutor(max_workers=NUM_GITHUB_THREADS) as pool:
for node_id in node_ids:
node = gx.nodes[node_id]
prs = node.get("PRed_json", {})
for migrator, pr_json in prs.items():
# allow for false
if pr_json:
future = pool.submit(refresh_pr, pr_json, gh)
futures[future] = (node_id, migrator)
for f in as_completed(futures):
name, muid = futures[f]
try:
res = f.result()
if res:
gx.nodes[name]["PRed_json"][muid].update(**res)
logger.info("Updated json for {}: {}".format(name, res["id"]))
except github3.GitHubError as e:
logger.critical("GITHUB ERROR ON FEEDSTOCK: {}".format(name))
if is_github_api_limit_reached(e, gh):
break
except Exception as e:
logger.critical("ERROR ON FEEDSTOCK: {}: {}".format(name, muid))
raise
return gx
def close_labels(gx: nx.DiGraph) -> nx.DiGraph:
gh = github3.login(os.environ["USERNAME"], os.environ["PASSWORD"])
futures = {}
node_ids = list(gx.nodes)
# this makes sure that github rate limits are dispersed
random.shuffle(node_ids)
with ThreadPoolExecutor(max_workers=NUM_GITHUB_THREADS) as pool:
for node_id in node_ids:
node = gx.nodes[node_id]
prs = node.get("PRed_json", {})
for migrator, pr_json in prs.items():
# allow for false
if pr_json:
future = pool.submit(close_out_labels, pr_json, gh)
futures[future] = (node_id, migrator)
for f in as_completed(futures):
name, muid = futures[f]
try:
res = f.result()
if res:
gx.node[name]["PRed"].remove(muid)
del gx.nodes[name]["PRed_json"][muid]
logger.info(
"Closed and removed PR and branch for "
"{}: {}".format(name, res["id"])
)
except github3.GitHubError as e:
logger.critical("GITHUB ERROR ON FEEDSTOCK: {}".format(name))
if is_github_api_limit_reached(e, gh):
break
except Exception as e:
logger.critical("ERROR ON FEEDSTOCK: {}: {}".format(name, muid))
raise
return gx
def main(args=None):
setup_logger(logger)
names = get_all_feedstocks(cached=True)
gx = nx.read_gpickle("graph.pkl")
gx = make_graph(names, gx)
gx = update_graph_pr_status(gx)
gx = close_labels(gx)
logger.info("writing out file")
nx.write_gpickle(gx, "graph.pkl")
if __name__ == "__main__":
main()
|
{"hexsha": "6f672b510f766ec63db2fdf2e1688577b203bee1", "size": 7570, "ext": "py", "lang": "Python", "max_stars_repo_path": "conda_forge_tick/make_graph.py", "max_stars_repo_name": "sdvillal/cf-scripts", "max_stars_repo_head_hexsha": "026a90a0a0e4e5ef09c3f0cb8e1a8659977250b2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "conda_forge_tick/make_graph.py", "max_issues_repo_name": "sdvillal/cf-scripts", "max_issues_repo_head_hexsha": "026a90a0a0e4e5ef09c3f0cb8e1a8659977250b2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "conda_forge_tick/make_graph.py", "max_forks_repo_name": "sdvillal/cf-scripts", "max_forks_repo_head_hexsha": "026a90a0a0e4e5ef09c3f0cb8e1a8659977250b2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2809917355, "max_line_length": 79, "alphanum_fraction": 0.5733157199, "include": true, "reason": "import networkx", "num_tokens": 1812}
|
\documentclass{article}
\usepackage{jobapp}
%% Contact info
\SetName{PHILLIP FRY}
\SetProfessionalTitle{A Real Person}
\SetAddress{1600 Pennsylvania Avenue, N.W. \\ Washington, DC 20500}
\SetPhone{(555) 555-5555}
\SetEmail{p.fry@dev.null}
\begin{document}
\section*{Skills \& Expertise}
\ResumeLayout
{\textbf{Vehicles}}
{Spaceships, Cars, Jetpacks}
\ResumeLayout
{\textbf{Partners}}
{Leela, Michelle, Amy, Mildred }
\medskip
\section*{Work Experience}
\WorkExperience
{Planet Express}
{New New York}
{Delivery Pilot}
{Jan 3000 -- present}
{
\item Deliver customer goods in a timely manner with high quality assurance.
\item Pilot interplanetary transportation vessels. Manager of crew and robot assets.
}
\WorkExperience
{Pizza}
{New New York}
{Delivery Specialist}
{May 1999 -- Jan 3000}
{
\item Delivered pizzas.
}
\medskip
\section*{Education}
\ResumeLayout
{1995}
{\textbf{Bachelor of Science}, Stuff \hfill \textit{Uiversity of New New York}}
\ResumeLayout
{1991}
{\textbf{GED}, Grade 12 \hfill \textit{New New York High School}}
\medskip
\section*{Awards}
\ResumeLayout
{Universe Gamma}
{Tentacle Pope of Universe Gamma \hfill \textit{https://en.wikipedia.org}}
\ResumeLayout
{Boyfriend}
{Of Leela \hfill \textit{https://en.wikipedia.org}}
\medskip
\section*{Recognition}
\ResumeLayout
{1999--3000}
{\textbf{Oldest Living Human} \hfill \textit{Earth}}
\end{document}
|
{"hexsha": "523660cac8c5352fee0406478c84693f559b014d", "size": 1619, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "example.tex", "max_stars_repo_name": "cahna/JobApp", "max_stars_repo_head_hexsha": "74d369b016e050c25d151155e3018e99726271d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example.tex", "max_issues_repo_name": "cahna/JobApp", "max_issues_repo_head_hexsha": "74d369b016e050c25d151155e3018e99726271d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example.tex", "max_forks_repo_name": "cahna/JobApp", "max_forks_repo_head_hexsha": "74d369b016e050c25d151155e3018e99726271d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6984126984, "max_line_length": 94, "alphanum_fraction": 0.641136504, "num_tokens": 468}
|
export l_bfgs_rcst
export Steighaug
export BhaskaraTop
export LimitedMemory
"""
l_bfgs_rcst(nlp; options...) - Ainda não chegamos nesse!
Este método é chamado L-BFGS com região de confiança por Steihaug-Toint.
Tenta-se resolver B_k d = - ∇f(xₖ) usando Gradientes Conjugados.
Se em algum momento a direção ficar maior que a região de confiança,
ela é truncada. Bₖ é a aproximação de memória limitada de BFGS.
Porém, deve-se guardar apenas os últimos p vetores sₖ e yₖ,
e não criar a matriz Bₖ explicitamente.
Options:
- atol: absolute tolerance for the first order condition (default: 1e-6)
- rtol: relative tolerance for the first order condition (default: 1e-6)
- max_eval: maximum number of [functions] evaluations, use ≤ 0 for unlimited (default: 1000)
- max_iter: maximum number of iterations, use ≤ 0 for unlimited (default: 0)
- max_time: maximum elapsed time in seconds, use ≤ 0 for unlimited (default: 10)
Disclaimers for the developer:
- nlp should be the only mandatory argument
- these five options are the current default for other JSO-compliant solvers
- always return a GenericExecutionStats
"""
function Steighaug(gx, B, Δ)
# ϵ = 1e-4
ϵ = min(0.5,(norm(gx))^0.5)*norm(gx) # tolerância sugerida Algor 7.1
m = length(gx)
zx = zeros(m)
r = gx
d = -r
rx = r
z = zx
normr = norm(r)
if normr < ϵ
return z
end
# enquanto estiver dentro da região de confiança
k=0
while normr > ϵ || k < m # CG vai em no máximo m-variáveis direções
dotdBd = dot(d,B*d)
if dotdBd ≤ 0
# pára o método se a direção dⱼ é direção de curvatura não positiva
# encontrar τ >=0 tal que pk e satisfaz ||pk|| = Δk
# ou seja, a interseção da direção com a região de confiança
m1, m2 = BhaskaraTop(z, d, Δ)
return z + m1*d
end
dotrr = dot(r,r)
α = dotrr/dotdBd
zx = z + α*d
if norm(zx) ≥ Δ
#para se zⱼ₊₁ viola os limites da região de confiança
# encontrar τ >=0 tal que pk e satisfaz ||pk|| = Δk
# ou seja, a interseção da direção com a região de confiança
m1, m2 = BhaskaraTop(z, d, Δ)
return z + m1*d
end
rx = r + α*B*d #Conjugate Gradient
#se α = 0 => rx = r , β = 1 (logo abaixo)
if norm(rx) < ϵ
return zx
end
β = dot(rx,rx)/dotrr
d = -rx+ β*d
r = rx
normr = norm(r)
z = zx
k+=1
end
if k >= m
#@error ("não encontrou a borda, direção multipla da própria direção")
return zeros(m)
end
end
function BhaskaraTop(z, d, Δ)
a=dot(d,d)
b=2dot(z,d)
c=dot(z,z) - Δ^2
Delta = b^2-4*a*c
if Delta < 0
@warn("Δ<0")
else
t1 = (-b+sqrt(Delta))/2a
t2 = (-b-sqrt(Delta))/2a
m1 = max(t1,t2)
m2 = min(t1,t2)
return m1, m2
end
end
# L-BGFS-RC-Steighaug-Toint
function l_bfgs_rcst(
nlp::AbstractNLPModel;
atol::Real = 1e-6,
rtol::Real = 1e-6,
max_eval::Int = 1000,
max_iter::Int = 0,
max_time::Float64 = 10.0
)
if !unconstrained(nlp)
error("Problem is not unconstrained")
end
#Given the starting point
x = copy(nlp.meta.x0)
f(x) = obj(nlp, x)
∇f(x) = grad(nlp, x)
fx = f(x)
∇fx = ∇f(x)
# initial Hessian approximation
n = length(x)
#trust region radius
Δ = 1.0
B = Matrix(1.0*I, n, n)
s = Steighaug(∇fx, B, Δ)
y = ∇f(x.+s) - ∇fx
γ = dot(y,y)/dot(s,y) #ver Nocedal p. 178
B = Matrix(γ*I, n, n)
# Criar matrizes para armazenar s e y
if n < 5
m_vetores = n
else
m_vetores = 5
end
S = zeros(n, m_vetores)
Y = zeros(n, m_vetores)
#convergence tolerance
ϵ = atol + rtol * norm(∇fx)
#parameters η e r
η = 0.01 #∈ (0,1e-3)
r = 0.001 #∈ (0,1)
t₀ = time()
iter = 0
Δt = time() - t₀
solved = norm(∇fx) < ϵ # First order stationary
tired = neval_obj(nlp) ≥ max_eval > 0 ||
iter ≥ max_iter > 0 ||
Δt ≥ max_time > 0
status = :unknown
# log_header is up for some rewrite in the future. For now, it simply prints the column names with some spacing
@info log_header(
[:iter, :fx, :ngx, :nf, :Δt],
[Int, Float64, Float64, Int, Float64],
hdr_override=Dict(:fx => "f(x)", :ngx => "‖∇f(x)‖", :nf => "#f")
)
# log_row uses the type information of each value, thus we use `Any` here.
@info log_row(
Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt]
)
# Aqui começa o show
while !(solved || tired)
#compute sₖ by solving the subproblem
s = Steighaug(∇fx, B, Δ)
y = ∇f(x.+s) - ∇fx
if norm(y) < 10e-18
status=:small_step
end
ared = fx - f(x.+s)
pred = -(dot(∇fx, s) + 1/2 * dot(s, B*s))
ρ = ared/pred
if ρ < η
Δ = Δ/2
if Δ < 10e-50
status =:user
end
else
x = x + s
fx = f(x)
∇fx = ∇f(x)
if ρ > 0.75 && norm(s) > 0.8 * Δ
Δ = 2*Δ
if Δ > 10e50 #evita que o raio aumente muito
status =:not_desc
end
end
end
if status != :unknown #small_step
break
end
if iter < m_vetores
yBs = y .-B*s
if abs(dot(s,yBs)) > r*norm(s,2)*norm(yBs,2)
#6.26 - evita atualizações se o denominador é pequeno
B = B + (yBs*yBs')/dot(yBs,s)
end
S[:,iter+1] = s
Y[:,iter+1] = y
else
yBs = y .-B*s
if abs(dot(s,yBs)) > r*norm(s,2)*norm(yBs,2) #evita situações em que são ambas zero
#6.26 - evita atualizações se o denominador é pequeno
#aqui tem que ter mágica - LBFGS
δₖ = dot(y,y)/dot(s, y)
B = LimitedMemory(δₖ, S, Y, iter, n, m_vetores)
end
cpatu = mod(iter, m_vetores) + 1
S[:,cpatu] = s
Y[:,cpatu] = y
end
iter+= 1
Δt = time() - t₀
solved = norm(∇fx) < ϵ # First order stationary
tired = neval_obj(nlp) ≥ max_eval > 0|| iter ≥ max_iter > 0 ||
Δt ≥ max_time > 0 # Excess time, iteration, evaluations
@info log_row(
Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt]
)
end
if solved
status = :first_order
elseif tired
if neval_obj(nlp) ≥ max_eval > 0
status = :max_eval
elseif iter ≥ max_iter > 0
status = :max_iter
elseif Δt ≥ max_time > 0
status = :max_time
end
end
return GenericExecutionStats(
status,
nlp,
solution=x,
objective=f(x),
dual_feas=norm(∇fx),
elapsed_time=Δt,
iter=iter
)
end
function LimitedMemory(δₖ, S, Y, iter, n, m)
Dₖ = zeros(m,m)
Lₖ = zeros(m,m)
B1 = δₖ.*I(n)
inicio = mod(iter, m) + 1
Sₖ= [S[:, inicio:m] S[:, 1:inicio-1]]
Yₖ = [Y[:, inicio:m] Y[:, 1:inicio-1]]
# Sₖ= [S[:, inicio-1:-1:1] S[:, m:-1:inicio] ] #sey_inverso
# Yₖ = [Y[:, inicio-1:-1:1] Y[:, m:-1:inicio] ]
δS = δₖ.*Sₖ
B2 = [δS Yₖ]
for i=1:m, j=1:m
if i>j
Lₖ[i,j] = dot(S[:,i], Y[:,j])
end
if i==j
Dₖ[i,j] = dot(S[:,i], Y[:,j])
end
end
# B3 = inv([δS'*Sₖ Lₖ; Lₖ' -Dₖ])
B4 = [δS';Yₖ']
return B1 - B2*(([δS'*Sₖ Lₖ; Lₖ' -Dₖ])\B4)
end
function Unrolling(B, S, Y, iter, n, m)
#Unrolling the BFGS formula - Nocedal p.184
inicio = mod(iter, m) + 1
Sₖ= [S[:, inicio:m] S[:, 1:inicio-1]]
Yₖ = [Y[:, inicio:m] Y[:, 1:inicio-1]]
# Sₖ= [S[:, inicio-1:-1:1] S[:, m:-1:inicio] ] #sey_inverso
# Yₖ = [Y[:, inicio-1:-1:1] Y[:, m:-1:inicio] ]
a = zeros(n,m)
b = zeros(n,m)
for i = 1:m
b[:,i] = Y[:,i]/sqrt(dot(Y[:,i], S[:,i]))
a[:,i] = B*S[:,i] + sum(dot(b[:,j],S[:,i])*b[:,j] -
dot(a[:,j],S[:,i])*a[:,j]
for j=1:m)
a[:,i] = a[:,i]/sqrt(dot(S[:,i], a[:,i]))
end
return B + sum(b[:,i]*b[:,i]' - a[:,i]*a[:,i]' for i=1:m)
end
|
{"hexsha": "f9fe58f57bdbefaaedbf7914347524dd7f37c9ce", "size": 8224, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/l_bfgs_rcst.jl", "max_stars_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_stars_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/l_bfgs_rcst.jl", "max_issues_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_issues_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/l_bfgs_rcst.jl", "max_forks_repo_name": "FKrukoski/Projeto2Solvers.jl", "max_forks_repo_head_hexsha": "8d48093e9bc1fca9470f568c2415df7bdbaa1672", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T15:00:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T15:00:49.000Z", "avg_line_length": 26.358974359, "max_line_length": 115, "alphanum_fraction": 0.5171449416, "num_tokens": 3007}
|
import category_theory.category category_theory.epi_mono tactic-- this transitively imports
open category_theory
universes v u
variables (C : Type u) [category.{v} C]
/-Prove Lemma 1.2.11 by proving either (i) or (i') and either (ii) or (i'),
then arguing by duality. Conclude that the monomorphisms in any category
define a subcategory of that category and dually that the epimorphisms also
define a subcategory. -/
/-Lemma 1.2.11(i) If f : x ⟶ y and g : y ⟶ z are monomorphisms, then so is
gf : x ⟶ z.-/
lemma mono_comp_mono (X Y Z : C) (f : X ⟶ Y) (g : Y ⟶ Z) [hg : mono g] [hf : mono f]:
mono (f ≫ g) :=
begin
constructor, --what the heck this is so useful
intros Z h k hcomp,
repeat {rw ← category.assoc at hcomp},
rw cancel_mono g at hcomp,
rw cancel_mono f at hcomp,
exact hcomp,
end
/-Lemma 1.2.11(ii) If f : x ⟶ y and g : y ⟶ z are morphisms so that gf is monic,
then f is monic. -/
lemma comp_mono_mono (X Y Z : C) (f : X ⟶ Y) (g : Y ⟶ Z) [hfg : mono (f ≫ g)]:
mono f :=
begin
constructor,
intros Z h k hf,
rw ← cancel_mono (f ≫ g),
repeat {rw ← category.assoc},
rw hf,
end
|
{"author": "agusakov", "repo": "riehl-solutions", "sha": "3cba620de377649e9269c53e17a65cfccd7db651", "save_path": "github-repos/lean/agusakov-riehl-solutions", "path": "github-repos/lean/agusakov-riehl-solutions/riehl-solutions-3cba620de377649e9269c53e17a65cfccd7db651/src/category_theory_stuff/solutions/chapter_1/section_2.lean"}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import pickle
import random
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from threading import Semaphore, Condition
from typing import Any, Dict, List, Optional, TYPE_CHECKING
import numpy as np
import torch
import yaml
from mephisto.operations.registry import register_mephisto_abstraction
from mephisto.abstractions.blueprint import SharedTaskState
from mephisto.abstractions.blueprints.parlai_chat.parlai_chat_blueprint import (
ParlAIChatBlueprint,
SharedParlAITaskState,
ParlAIChatBlueprintArgs,
)
from omegaconf import DictConfig, MISSING
from parlai.crowdsourcing.tasks.model_chat.bot_agent import TurkLikeAgent
from parlai.crowdsourcing.tasks.model_chat.utils import (
ImageStack,
get_context_generator,
)
from parlai.tasks.blended_skill_talk.agents import ContextGenerator
if TYPE_CHECKING:
from mephisto.data_model.task import TaskRun
def get_task_path():
return os.path.dirname(os.path.realpath(__file__))
BLUEPRINT_TYPE = 'model_chat_blueprint'
IMAGE_CHAT_BLUEPRINT_TYPE = 'model_image_chat_blueprint'
@dataclass
class SharedBaseModelChatTaskState(SharedParlAITaskState):
"""
Base shared-state class from which all model-chat tasks inherit.
"""
shared_models: Dict[str, Any] = field(default_factory=dict)
@dataclass
class SharedModelChatTaskState(SharedBaseModelChatTaskState):
context_generator: Optional[ContextGenerator] = None
conversations_needed: Dict[str, Any] = field(default_factory=dict)
run_statistics: Dict[str, int] = field(default_factory=dict)
onboard_statistics: Dict[str, int] = field(default_factory=dict)
statistics_condition: Optional[Condition] = None
@dataclass
class SharedModelImageChatTaskState(SharedBaseModelChatTaskState):
image_contexts: List[Dict[str, Any]] = None
image_stack: ImageStack = None
@dataclass
class BaseModelChatBlueprintArgs(ParlAIChatBlueprintArgs):
_group: str = field(
default="BaseModelChatBlueprint",
metadata={'help': "Args that are common to all model-chat tasks"},
)
custom_source_dir: str = field(
default=os.path.join(get_task_path(), 'frontend'),
metadata={"help": "Path to frontend code"},
)
num_turns: int = field(default=6, metadata={"help": 'minimum number of turns'})
random_seed: int = field(
default=42, metadata={"help": 'Seed for random operations'}
)
annotation_question: str = field(
default='Does this comment require any annotations? (Check all that apply)',
metadata={
"help": "The string displayed above the checkboxes for each annotation in the task."
},
)
model_opt_path: str = field(
default="${mephisto.blueprint.task_config_path}/model_opts.yaml",
metadata={"help": "Path to YAML of opts for each model"},
)
task_model_parallel: bool = field(
default=True,
metadata={
"help": 'Whether to load models to be used with model_parallel True.'
},
)
max_resp_time: int = field(
default=180, metadata={"help": "time limit for entering a dialog message"}
)
chat_data_folder: str = field(
default=MISSING,
metadata={"help": "Folder in which to save collected conversation data"},
)
check_acceptability: bool = field(
default=False,
metadata={
"help": "Check worker's responses against several metrics of acceptability"
},
)
context_seed: int = field(
default=MISSING,
metadata={"help": "Set seed for pulling the context info (for testing)"},
)
task_config_path: str = field(
default=os.path.join(get_task_path(), 'task_config'),
metadata={"help": "Base path to pull task configuration information"},
)
task_description_file: str = field(
default="${mephisto.blueprint.task_config_path}/task_description.html",
metadata={"help": "Path to file of HTML to show on the task-description page"},
)
left_pane_text_path: str = field(
default="${mephisto.blueprint.task_config_path}/left_pane_text.html",
metadata={
"help": "Path to file of HTML to show on the left-hand pane of the chat window"
},
)
annotations_config_path: str = field(
default="",
metadata={
"help": 'Path to JSON of annotation categories. Set to "" to disable annotations'
},
)
final_rating_question: str = field(
default='Please rate your partner on a scale of 1-5.',
metadata={"help": "Text to show when asking worker to make their final rating"},
)
max_concurrent_responses: int = field(
default=1,
metadata={"help": "Limit on the number of models that can generate at once"},
)
override_opt: Dict[str, Any] = field(
default_factory=dict,
metadata={
"help": "Additional args to pass to initialize the context generator "
"in order to override the parlai parser defaults."
},
)
class BaseModelChatBlueprint(ParlAIChatBlueprint, ABC):
"""
This Blueprint uses somewhat specialized arguments for turn annotations, manages
their validation, and also has specialized data storage for the result format.
It also has options for the onboarding data answers and the annotation bucket
definitions.
"""
ArgsClass = BaseModelChatBlueprintArgs
SharedStateClass = SharedBaseModelChatTaskState
@classmethod
def assert_task_args(
cls, args: "DictConfig", shared_state: "SharedTaskState"
) -> None:
"""
Ensure that arguments are properly configured to launch this task.
"""
super().assert_task_args(args, shared_state)
assert (
args.blueprint.get("task_description_file", None) is not None
), "Must provide a task description file"
full_path = os.path.expanduser(args.blueprint.task_description_file)
assert os.path.exists(
full_path
), f"Target task description path {full_path} doesn't exist"
assert (
args.blueprint.get("left_pane_text_path", None) is not None
), "Must provide a left pane text file"
full_path = os.path.expanduser(args.blueprint.left_pane_text_path)
assert os.path.exists(
full_path
), f"Target left pane text path {full_path} doesn't exist"
if args.blueprint.get("chat_data_folder") == '':
raise ValueError('Must provide a valid chat data folder')
assert '~' not in args.blueprint.chat_data_folder, (
f'"~" can\'t currently be parsed in the chat data folder path '
f'{args.blueprint.chat_data_folder}'
)
# Currently Hydra overrides the tilde key at lower levels as described here: https://hydra.cc/docs/next/advanced/override_grammar/basic/#grammar
# Thus the TILDE key cannot be used in replacement for $HOME variable
# Some hacky solution can probably be achieved but won't be good code so for now this assert is written as a placeholder
if args.blueprint.get("annotations_config_path", "") != "":
full_path = os.path.expanduser(args.blueprint.annotations_config_path)
assert os.path.exists(
full_path
), f"Target annotation config path {full_path} doesn't exist"
def __init__(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
# Default conversation initialization
super().__init__(task_run, args=args, shared_state=shared_state)
random.seed(self.args.blueprint.random_seed)
np.random.seed(self.args.blueprint.random_seed)
torch.manual_seed(self.args.blueprint.random_seed)
# Load task configuration data beyond the task description, as the super does
# that
left_pane_path = os.path.expanduser(args.blueprint.left_pane_text_path)
with open(left_pane_path, "r") as left_pane_file:
self.left_pane_text = left_pane_file.read()
self.annotations_config: Optional[str] = None
if args.blueprint.get("annotations_config_path", "") != "":
annotations_config_path = os.path.expanduser(
args.blueprint.annotations_config_path
)
with open(annotations_config_path, "r") as annotations_config_file:
self.annotations_config = annotations_config_file.read()
# Initialize models
shared_state.shared_models = self._get_shared_models(args)
# Limits the number of models that can generate at once
semaphore = Semaphore(args.blueprint.max_concurrent_responses)
# Move shared state into the world opt, so that it can be used by the world
shared_state.onboarding_world_opt.update(
{'skip_onboarding': self.annotations_config is None}
)
# The onboarding checks how well workers annotate conversations, so it should be
# skipped if we are not annotating
shared_state.world_opt.update(
{
'block_qualification': args.blueprint.block_qualification,
'annotations_config': self.annotations_config,
'semaphore': semaphore,
'shared_bot_agents': shared_state.shared_models,
'num_turns': args.blueprint.num_turns,
'max_resp_time': args.blueprint.max_resp_time,
'is_sandbox': args.provider.requester_name == 'MOCK_REQUESTER',
'check_acceptability': args.blueprint.check_acceptability,
'chat_data_folder': args.blueprint.chat_data_folder,
}
)
@abstractmethod
def _get_shared_models(self, args: "DictConfig") -> Dict[str, dict]:
"""
Return a dictionary whose values are the shared models.
"""
def get_frontend_args(self) -> Dict[str, Any]:
"""
Specifies what options within a task_config should be forwarded to the client
for use by the task's frontend.
"""
if self.args.blueprint.get('annotations_config_path', '') != '':
with open(
self.args.blueprint.annotations_config_path, "r", encoding="utf-8-sig"
) as f:
annotation_buckets = json.loads(f.read())
else:
annotation_buckets = None
return {
"min_num_turns": self.args.blueprint.num_turns,
"task_description": self.full_task_description,
"task_title": self.args.task.get('task_title', None),
"annotation_question": self.args.blueprint.annotation_question,
"annotation_buckets": annotation_buckets,
"onboarding_data": getattr(self, 'onboard_task_data', None),
"left_pane_text": self.left_pane_text,
"frame_height": '650px',
"final_rating_question": self.args.blueprint.final_rating_question,
"block_mobile": True,
}
@dataclass
class ModelChatBlueprintArgs(BaseModelChatBlueprintArgs):
_blueprint_type: str = BLUEPRINT_TYPE
_group: str = field(
default="ModelChatBlueprint",
metadata={
'help': "This task runs conversations between a human and one of a set of "
"provided models, asking workers to evaluate individual turns and "
"the overall model quality."
},
)
conversation_start_mode: str = field(
default='hi',
metadata={
"help": 'Whether to show "Hi!" or two previous utterances (as in BlendedSkillTalk) at the beginning of the conversation',
"choices": ['hi', 'bst'],
},
)
include_persona: bool = field(
default=False, metadata={"help": "Show persona to the bot"}
)
conversations_needed_string: str = field(
default=MISSING,
metadata={
"help": 'Number of convos needed for each model. For example: "modelA:50,modelB:20"'
},
)
max_onboard_time: int = field(
default=300, metadata={"help": "time limit accepting onboarding"}
)
onboard_task_data_path: str = field(
default="${mephisto.blueprint.task_config_path}/onboard_task_data.json",
metadata={
"help": "Path to JSON containing settings for running onboarding. Not used if not annotating model responses"
},
)
world_file: str = field(
default=os.path.join(get_task_path(), 'worlds.py'),
metadata={"help": "Path to file containing parlai world"},
)
@register_mephisto_abstraction()
class ModelChatBlueprint(BaseModelChatBlueprint):
"""
Blueprint for model chat without images.
This blueprint subclasses BaseModelChatBlueprint to provide logic for keeping track
of how many more conversations are needed per model; this logic is not shared with
other model-chat blueprints.
"""
ArgsClass = ModelChatBlueprintArgs
SharedStateClass = SharedModelChatTaskState
BLUEPRINT_TYPE = BLUEPRINT_TYPE
@classmethod
def assert_task_args(
cls, args: "DictConfig", shared_state: "SharedTaskState"
) -> None:
"""
Ensure that arguments are properly configured to launch this task.
"""
if (
not isinstance(shared_state.conversations_needed, dict)
or len(shared_state.conversations_needed) == 0
):
assert (
args.blueprint.get('conversations_needed_string', None) is not None
), (
"Must provide a string of needed conversations per model if not providing "
"a conversations needed dict"
)
try:
conversations_needed = {}
parts = args.blueprint.conversations_needed_string.split(',')
for part in parts:
model_name, num_string = part.split(':')
conversations_needed[model_name] = int(num_string)
except Exception as e:
raise Exception(
"Could not create conversations needed dict from given string. "
f"Error was {e}.\n"
"Be sure the format is like modelA:50,modelB:20"
)
else:
conversations_needed = shared_state.conversations_needed
args.blueprint.num_conversations = sum(conversations_needed.values())
super().assert_task_args(args=args, shared_state=shared_state)
if args.blueprint.get("annotations_config_path", "") != "":
# We are going to do annotations, so check for the presence of an onboarding
# data file that will be used to onboard users into knowing how to do the
# annotations properly
assert (
args.blueprint.get("onboard_task_data_path", None) is not None
), "Must provide an onboarding data file"
full_path = os.path.expanduser(args.blueprint.onboard_task_data_path)
assert os.path.exists(
full_path
), f"Target onboarding data path {full_path} doesn't exist"
def __init__(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
conversations_needed = self._process_conversations_needed(args)
self.conversations_needed = conversations_needed
shared_state.conversations_needed = conversations_needed
args.blueprint.num_conversations = sum(conversations_needed.values())
super().__init__(task_run=task_run, args=args, shared_state=shared_state)
if args.blueprint.get("annotations_config_path", "") != "":
# We are going to do annotations, so load the onboarding data file that will
# be used to onboard users into knowing how to do the annotations properly
onboard_task_data_path = os.path.expanduser(
args.blueprint.onboard_task_data_path
)
with open(onboard_task_data_path, "r") as onboard_task_data_file:
self.onboard_task_data = json.load(onboard_task_data_file)
else:
self.onboard_task_data = None
run_statistics = {r: 0 for (r, v) in self.conversations_needed.items()}
shared_state.run_statistics = run_statistics
context_generator: Optional[ContextGenerator] = None
if (
args.blueprint.include_persona
or args.blueprint.conversation_start_mode == 'bst'
):
context_generator = get_context_generator(args.blueprint.override_opt)
shared_state.context_generator = context_generator
# Lock for editing run statistics between threads
statistics_condition = Condition()
# Move shared state into the world and onboarding opts, such that these
# can be used by the worlds
shared_state.onboarding_world_opt.update(
{
'onboard_statistics': shared_state.onboard_statistics,
'statistics_condition': statistics_condition,
'max_onboard_time': args.blueprint.max_onboard_time,
'onboard_task_data': self.onboard_task_data,
'onboarding_qualification': args.blueprint.onboarding_qualification,
}
)
shared_state.world_opt.update(
{
'conversations_needed': conversations_needed,
'run_statistics': shared_state.run_statistics,
'context_generator': context_generator,
'statistics_condition': statistics_condition,
'conversation_start_mode': args.blueprint.conversation_start_mode,
'include_persona': args.blueprint.include_persona,
}
)
def _process_conversations_needed(self, args: "DictConfig") -> Dict[str, int]:
"""
Set the number of conversations needed.
"""
conversations_needed_string = args.blueprint.conversations_needed_string
conversations_needed = {}
parts = conversations_needed_string.split(',')
for part in parts:
model_name, num_string = part.split(':')
conversations_needed[model_name] = int(num_string)
return conversations_needed
def _get_shared_models(self, args: "DictConfig") -> Dict[str, dict]:
with open(args.blueprint.model_opt_path) as f:
all_model_opts = yaml.safe_load(f.read())
active_model_opts = {
model: opt
for model, opt in all_model_opts.items()
if self.conversations_needed[model] > 0
}
return TurkLikeAgent.get_bot_agents(args=args, model_opts=active_model_opts)
@dataclass
class ModelImageChatBlueprintArgs(BaseModelChatBlueprintArgs):
_blueprint_type: str = IMAGE_CHAT_BLUEPRINT_TYPE
_group: str = field(
default="ModelImageChatBlueprint",
metadata={
'help': "This task runs conversations between a human and one of a set of "
"provided models, asking workers chat about a provided image."
},
)
evals_per_image_model_combo: int = field(
default=1,
metadata={
"help": "The number of HITs to perform per combination of image and model"
},
)
image_context_path: str = field(
default="${mephisto.blueprint.task_config_path}/image_contexts",
metadata={
"help": "Path to pickle file containing images and the context information that goes with each one"
},
)
num_conversations: int = field(
default=10, metadata={'help': 'The number of conversations to collect'}
)
stack_folder: str = field(
default=os.path.join(get_task_path(), 'image_stack'),
metadata={
"help": 'Folder in which to save backups of the stack of which image-and-model combinations have had HITs launched'
},
)
world_file: str = field(
default=os.path.join(get_task_path(), 'worlds_image_chat.py'),
metadata={"help": "Path to file containing ParlAI world for image chat"},
)
@register_mephisto_abstraction()
class ModelImageChatBlueprint(BaseModelChatBlueprint):
"""
Subclass of BaseModelChatBlueprint to show the speakers an image on the first turn.
The image is drawn from a stack that keeps track of how many HITs have been launched
for a given combination of image and model.
"""
ArgsClass = ModelImageChatBlueprintArgs
SharedStateClass = SharedModelImageChatTaskState
BLUEPRINT_TYPE = IMAGE_CHAT_BLUEPRINT_TYPE
@classmethod
def assert_task_args(
cls, args: "DictConfig", shared_state: "SharedTaskState"
) -> None:
"""
Ensure that arguments are properly configured to launch this task.
"""
super().assert_task_args(args=args, shared_state=shared_state)
image_context_path = os.path.expanduser(args.blueprint.image_context_path)
assert os.path.exists(
image_context_path
), f"The image context path {image_context_path} doesn't exist!"
model_opt_path = os.path.expanduser(args.blueprint.model_opt_path)
assert os.path.exists(
model_opt_path
), f"The model opt path {model_opt_path} doesn't exist!"
def __init__(
self, task_run: "TaskRun", args: "DictConfig", shared_state: "SharedTaskState"
):
super().__init__(task_run=task_run, args=args, shared_state=shared_state)
with open(args.blueprint.image_context_path, 'rb') as f:
shared_state.image_contexts = pickle.load(f)
# Create the stack to keep track of how many workers have seen which
# combinations of images and models
image_opt = {
'evals_per_image_model_combo': args.blueprint.evals_per_image_model_combo,
'num_images': len(shared_state.image_contexts),
'models': list(shared_state.shared_models.keys()),
'stack_folder': args.blueprint.stack_folder,
}
shared_state.image_stack = ImageStack(image_opt)
shared_state.world_opt.update(
{
'image_contexts': shared_state.image_contexts,
'image_stack': shared_state.image_stack,
}
)
def _get_shared_models(self, args: "DictConfig") -> Dict[str, dict]:
with open(args.blueprint.model_opt_path) as f:
model_opts = yaml.safe_load(f.read())
return TurkLikeAgent.get_bot_agents(args=args, model_opts=model_opts)
|
{"hexsha": "22e5d087398964c1be75899619889bfe28bdc2af", "size": 22886, "ext": "py", "lang": "Python", "max_stars_repo_path": "parlai/crowdsourcing/tasks/model_chat/model_chat_blueprint.py", "max_stars_repo_name": "Uzornd/ParlAI", "max_stars_repo_head_hexsha": "5c07877f493db75847029ed9906aabc48c4d9f54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-09T06:14:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T06:14:06.000Z", "max_issues_repo_path": "parlai/crowdsourcing/tasks/model_chat/model_chat_blueprint.py", "max_issues_repo_name": "Uzornd/ParlAI", "max_issues_repo_head_hexsha": "5c07877f493db75847029ed9906aabc48c4d9f54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-07-22T04:20:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-29T13:37:12.000Z", "max_forks_repo_path": "parlai/crowdsourcing/tasks/model_chat/model_chat_blueprint.py", "max_forks_repo_name": "Uzornd/ParlAI", "max_forks_repo_head_hexsha": "5c07877f493db75847029ed9906aabc48c4d9f54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0104895105, "max_line_length": 152, "alphanum_fraction": 0.657039238, "include": true, "reason": "import numpy", "num_tokens": 4765}
|
from __future__ import print_function
# This lets us use the python3-style print() function even in python2. It should have no effect if you're already running python3.
import os
import dwl
import numpy as np
# Configure the printing
np.set_printoptions(suppress=True)
# Construct an instance of the FloatingBaseSystem class, which wraps the C++ class.
fbs = dwl.FloatingBaseSystem()
ws = dwl.WholeBodyState()
# Initializing the URDF model and whole-body state
fpath = os.path.dirname(os.path.abspath(__file__))
fbs.resetFromURDFFile(fpath + "/../hyq.urdf", fpath + "/../../config/hyq.yarf")
ws.setJointDoF(fbs.getJointDoF())
# The robot state
ws.setBasePosition(np.array([0., 0., 0.]))
ws.setBaseRPY(np.array([0., 0., 0.]))
ws.setBaseVelocity_W(np.array([0., 0., 0.]))
ws.setBaseRPYVelocity_W(np.array([0., 0., 0.]))
ws.setBaseAcceleration_W(np.array([0., 0., 0.]))
ws.setBaseRPYAcceleration_W(np.array([0., 0., 0.]))
ws.setJointPosition(0.75, fbs.getJointId("lf_hfe_joint"))
ws.setJointPosition(-1.5, fbs.getJointId("lf_kfe_joint"))
ws.setJointPosition(-0.75, fbs.getJointId("lh_hfe_joint"))
ws.setJointPosition(1.5, fbs.getJointId("lh_kfe_joint"))
ws.setJointPosition(0.75, fbs.getJointId("rf_hfe_joint"))
ws.setJointPosition(-1.5, fbs.getJointId("rf_kfe_joint"))
ws.setJointPosition(-0.75, fbs.getJointId("rh_hfe_joint"))
ws.setJointPosition(1.5, fbs.getJointId("rh_kfe_joint"))
# Getting the total mass of the system. Note that you could also get the mass of a specific
# body (e.g. sys.getBodyMass(body_name))
print("Total mass: ", fbs.getTotalMass())
print("lf_upperleg mass: ", fbs.getBodyMass("lf_upperleg"))
print("The gravity acceleration is ", fbs.getGravityAcceleration())
# Getting the CoM of the floating-base body. Note that you could also get the CoM of a
# specific body (e.g. sys.getBodyCoM(body_name))
print("Floating-base CoM: ", fbs.getFloatingBaseCoM().transpose())
print("lf_upperleg CoM: ", fbs.getBodyCoM("lf_upperleg").transpose())
# Getting the number of system DoF, floating-base Dof, joints and end-effectors
print("Total DoF: ", fbs.getSystemDoF())
print("Floating-base DoF: ", fbs.getFloatingBaseDoF());
print("Number of joint: ", fbs.getJointDoF())
print("Number of end-effectors: ", fbs.getNumberOfEndEffectors())
print("The floating-base body name: ", fbs.getFloatingBaseName())
print("lf_kfe_joint ID: ", fbs.getJointId("lf_kfe_joint"))
print("The CoM position: ", fbs.getSystemCoM(ws.base_pos, ws.joint_pos).transpose())
print("The CoM velocity: ", fbs.getSystemCoMRate(ws.base_pos, ws.joint_pos,
ws.base_vel, ws.joint_vel).transpose())
# Getting the floating-base information
for i in range(0,6):
base_joint = fbs.getFloatingBaseJoint(i)
if (base_joint.active):
print("Base joint[", base_joint.id, "] = ", base_joint.name)
for name in fbs.getFloatingJointNames():
print("The base joint names are ", name)
# Getting the joint names and ids
joints = fbs.getJoints()
for name in joints:
print("Joint[", joints[name], "] = ", name)
for name in fbs.getJointNames():
print("The joint names are ", name)
# Getting the end-effector names and ids
contacts = fbs.getEndEffectors()
print("The number of feet are ", fbs.getNumberOfEndEffectors(dwl.FOOT))
for name in contacts:
print("End-effector[", contacts[name], "] = ", name)
# Getting the joint limits
joint_lim = fbs.getJointLimits()
for key in joint_lim:
# The lower limit
print(key, "lower limit =", fbs.getLowerLimit(joint_lim[key]))
# print(key, "lower limit =", fbs.getLowerLimit(key))
# print(key, "lower limit =", joint_lim[key].lower)
# The upper limit
print(key, "upper limit =", fbs.getUpperLimit(joint_lim[key]))
# print(key, "upper limit =", fbs.getUpperLimit(key))
# print(key, "upper limit =", joint_lim[key].upper)
# The velocity limit
print(key, "velocity limit =", fbs.getVelocityLimit(joint_lim[key]))
# print(key, "velocity limit =", fbs.getVelocityLimit(key))
# print(key, "velocity limit =", joint_lim[key].velocity)
# The effort limit
print(key, "effort limit =", fbs.getEffortLimit(joint_lim[key]))
# print(key, "effort limit =", fbs.getEffortLimit(key))
# print(key, "effort limit =", joint_lim[key].effort)
# Getting the default posture
joint_pos0 = fbs.getDefaultPosture()
print("The nominal joint positions = ", joint_pos0.transpose())
# Converting between whole-body state to generalized state, and viceverse
base_state = np.zeros(6)
joint_state = joint_pos0
generalized_state = fbs.toGeneralizedJointState(base_state, joint_state)
print("Converting the whole-body state to generalized state")
print("Base state = ", base_state.transpose())
print("Joint state = ", joint_state.transpose())
print("The generalized state = ", generalized_state.transpose())
new_base_state = np.zeros(6)
new_joint_state = np.zeros(fbs.getJointDoF())
fbs.fromGeneralizedJointState(new_base_state, new_joint_state, generalized_state);
print("New base state = ", new_base_state.transpose())
print("New joint state = ", new_joint_state.transpose())
# Setting up the branch states
joint_pos = ws.getJointPosition()
lf_branch_pos = np.array([0.5, 0.75, 1.5]);
fbs.setBranchState(joint_pos, lf_branch_pos, "lf_foot");
ws.setJointPosition(joint_pos)
print("Setting up the lf_foot branch position = ", lf_branch_pos.transpose())
print("Joint_position = ", ws.getJointPosition().transpose())
print("The lf_foot branch position = ", fbs.getBranchState(ws.getJointPosition(), "lf_foot").transpose())
|
{"hexsha": "48893f398e94989c34633abff9d9adef53fd4b0a", "size": 5445, "ext": "py", "lang": "Python", "max_stars_repo_path": "sample/python/FloatingBaseSystem.py", "max_stars_repo_name": "EricWang1hitsz/dwl", "max_stars_repo_head_hexsha": "b45e6babfcace928af2601f3d453e3f27e2e76c8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2018-01-18T08:26:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T03:31:22.000Z", "max_issues_repo_path": "sample/python/FloatingBaseSystem.py", "max_issues_repo_name": "EricWang1hitsz/dwl", "max_issues_repo_head_hexsha": "b45e6babfcace928af2601f3d453e3f27e2e76c8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-09-27T16:06:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T03:31:11.000Z", "max_forks_repo_path": "sample/python/FloatingBaseSystem.py", "max_forks_repo_name": "EricWang1hitsz/dwl", "max_forks_repo_head_hexsha": "b45e6babfcace928af2601f3d453e3f27e2e76c8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2018-01-24T14:07:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T03:31:25.000Z", "avg_line_length": 38.8928571429, "max_line_length": 130, "alphanum_fraction": 0.740312213, "include": true, "reason": "import numpy", "num_tokens": 1488}
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jens Wagemaker, Aaron Anderson
-/
import ring_theory.coprime.basic
import ring_theory.principal_ideal_domain
/-!
# Divisibility over ℕ and ℤ
This file collects results for the integers and natural numbers that use abstract algebra in
their proofs or cases of ℕ and ℤ being examples of structures in abstract algebra.
## Main statements
* `nat.prime_iff`: `nat.prime` coincides with the general definition of `prime`
* `nat.irreducible_iff_prime`: a non-unit natural number is only divisible by `1` iff it is prime
* `nat.factors_eq`: the multiset of elements of `nat.factors` is equal to the factors
given by the `unique_factorization_monoid` instance
* ℤ is a `normalization_monoid`
* ℤ is a `gcd_monoid`
## Tags
prime, irreducible, natural numbers, integers, normalization monoid, gcd monoid,
greatest common divisor, prime factorization, prime factors, unique factorization,
unique factors
-/
theorem nat.prime_iff {p : ℕ} : p.prime ↔ prime p :=
begin
split; intro h,
{ refine ⟨h.ne_zero, ⟨_, λ a b, _⟩⟩,
{ rw nat.is_unit_iff, apply h.ne_one },
{ apply h.dvd_mul.1 } },
{ refine ⟨_, λ m hm, _⟩,
{ cases p, { exfalso, apply h.ne_zero rfl },
cases p, { exfalso, apply h.ne_one rfl },
exact (add_le_add_right (zero_le p) 2 : _ ) },
{ cases hm with n hn,
cases h.2.2 m n (hn ▸ dvd_rfl) with hpm hpn,
{ right, apply nat.dvd_antisymm (dvd.intro _ hn.symm) hpm },
{ left,
cases n, { exfalso, rw [hn, mul_zero] at h, apply h.ne_zero rfl },
apply nat.eq_of_mul_eq_mul_right (nat.succ_pos _),
rw [← hn, one_mul],
apply nat.dvd_antisymm hpn (dvd.intro m _),
rw [mul_comm, hn], }, } }
end
theorem nat.irreducible_iff_prime {p : ℕ} : irreducible p ↔ prime p :=
begin
refine ⟨λ h, _, prime.irreducible⟩,
rw ← nat.prime_iff,
refine ⟨_, λ m hm, _⟩,
{ cases p, { exfalso, apply h.ne_zero rfl },
cases p, { exfalso, apply h.not_unit is_unit_one, },
exact (add_le_add_right (zero_le p) 2 : _ ) },
{ cases hm with n hn,
cases h.is_unit_or_is_unit hn with um un,
{ left, rw nat.is_unit_iff.1 um, },
{ right, rw [hn, nat.is_unit_iff.1 un, mul_one], } }
end
namespace nat
instance : wf_dvd_monoid ℕ :=
⟨begin
apply rel_hom.well_founded _ (with_top.well_founded_lt nat.lt_wf),
refine ⟨λ x, if x = 0 then ⊤ else x, _⟩,
intros a b h,
cases a,
{ exfalso, revert h, simp [dvd_not_unit] },
cases b,
{simp [succ_ne_zero, with_top.coe_lt_top]},
cases dvd_and_not_dvd_iff.2 h with h1 h2,
simp only [succ_ne_zero, with_top.coe_lt_coe, if_false],
apply lt_of_le_of_ne (nat.le_of_dvd (nat.succ_pos _) h1) (λ con, h2 _),
rw con,
end⟩
instance : unique_factorization_monoid ℕ :=
⟨λ _, nat.irreducible_iff_prime⟩
end nat
/-- `ℕ` is a gcd_monoid. -/
instance : gcd_monoid ℕ :=
{ gcd := nat.gcd,
lcm := nat.lcm,
gcd_dvd_left := nat.gcd_dvd_left ,
gcd_dvd_right := nat.gcd_dvd_right,
dvd_gcd := λ a b c, nat.dvd_gcd,
gcd_mul_lcm := λ a b, by rw [nat.gcd_mul_lcm],
lcm_zero_left := nat.lcm_zero_left,
lcm_zero_right := nat.lcm_zero_right }
instance : normalized_gcd_monoid ℕ :=
{ normalize_gcd := λ a b, normalize_eq _,
normalize_lcm := λ a b, normalize_eq _,
.. (infer_instance : gcd_monoid ℕ),
.. (infer_instance : normalization_monoid ℕ) }
lemma gcd_eq_nat_gcd (m n : ℕ) : gcd m n = nat.gcd m n := rfl
lemma lcm_eq_nat_lcm (m n : ℕ) : lcm m n = nat.lcm m n := rfl
namespace int
section normalization_monoid
instance : normalization_monoid ℤ :=
{ norm_unit := λa:ℤ, if 0 ≤ a then 1 else -1,
norm_unit_zero := if_pos (le_refl _),
norm_unit_mul := assume a b hna hnb,
begin
cases hna.lt_or_lt with ha ha; cases hnb.lt_or_lt with hb hb;
simp [mul_nonneg_iff, ha.le, ha.not_le, hb.le, hb.not_le]
end,
norm_unit_coe_units := assume u, (units_eq_one_or u).elim
(assume eq, eq.symm ▸ if_pos zero_le_one)
(assume eq, eq.symm ▸ if_neg (not_le_of_gt $ show (-1:ℤ) < 0, by dec_trivial)), }
lemma normalize_of_nonneg {z : ℤ} (h : 0 ≤ z) : normalize z = z :=
show z * ↑(ite _ _ _) = z, by rw [if_pos h, units.coe_one, mul_one]
lemma normalize_of_neg {z : ℤ} (h : z < 0) : normalize z = -z :=
show z * ↑(ite _ _ _) = -z,
by rw [if_neg (not_le_of_gt h), units.coe_neg, units.coe_one, mul_neg_one]
lemma normalize_coe_nat (n : ℕ) : normalize (n : ℤ) = n :=
normalize_of_nonneg (coe_nat_le_coe_nat_of_le $ nat.zero_le n)
theorem coe_nat_abs_eq_normalize (z : ℤ) : (z.nat_abs : ℤ) = normalize z :=
begin
by_cases 0 ≤ z,
{ simp [nat_abs_of_nonneg h, normalize_of_nonneg h] },
{ simp [of_nat_nat_abs_of_nonpos (le_of_not_ge h), normalize_of_neg (lt_of_not_ge h)] }
end
lemma nonneg_of_normalize_eq_self {z : ℤ} (hz : normalize z = z) : 0 ≤ z :=
calc 0 ≤ (z.nat_abs : ℤ) : coe_zero_le _
... = normalize z : coe_nat_abs_eq_normalize _
... = z : hz
lemma nonneg_iff_normalize_eq_self (z : ℤ) : normalize z = z ↔ 0 ≤ z :=
⟨nonneg_of_normalize_eq_self, normalize_of_nonneg⟩
lemma eq_of_associated_of_nonneg {a b : ℤ} (h : associated a b) (ha : 0 ≤ a) (hb : 0 ≤ b) : a = b :=
dvd_antisymm_of_normalize_eq (normalize_of_nonneg ha) (normalize_of_nonneg hb) h.dvd h.symm.dvd
end normalization_monoid
section gcd_monoid
instance : gcd_monoid ℤ :=
{ gcd := λa b, int.gcd a b,
lcm := λa b, int.lcm a b,
gcd_dvd_left := assume a b, int.gcd_dvd_left _ _,
gcd_dvd_right := assume a b, int.gcd_dvd_right _ _,
dvd_gcd := assume a b c, dvd_gcd,
gcd_mul_lcm := λ a b, by
{ rw [← int.coe_nat_mul, gcd_mul_lcm, coe_nat_abs_eq_normalize],
exact normalize_associated (a * b) },
lcm_zero_left := assume a, coe_nat_eq_zero.2 $ nat.lcm_zero_left _,
lcm_zero_right := assume a, coe_nat_eq_zero.2 $ nat.lcm_zero_right _}
instance : normalized_gcd_monoid ℤ :=
{ normalize_gcd := λ a b, normalize_coe_nat _,
normalize_lcm := λ a b, normalize_coe_nat _,
.. int.normalization_monoid,
.. (infer_instance : gcd_monoid ℤ) }
lemma coe_gcd (i j : ℤ) : ↑(int.gcd i j) = gcd_monoid.gcd i j := rfl
lemma coe_lcm (i j : ℤ) : ↑(int.lcm i j) = gcd_monoid.lcm i j := rfl
lemma nat_abs_gcd (i j : ℤ) : nat_abs (gcd_monoid.gcd i j) = int.gcd i j := rfl
lemma nat_abs_lcm (i j : ℤ) : nat_abs (gcd_monoid.lcm i j) = int.lcm i j := rfl
end gcd_monoid
lemma exists_unit_of_abs (a : ℤ) : ∃ (u : ℤ) (h : is_unit u), (int.nat_abs a : ℤ) = u * a :=
begin
cases (nat_abs_eq a) with h,
{ use [1, is_unit_one], rw [← h, one_mul], },
{ use [-1, is_unit_one.neg], rw [ ← neg_eq_iff_neg_eq.mp (eq.symm h)],
simp only [neg_mul_eq_neg_mul_symm, one_mul] }
end
lemma gcd_eq_nat_abs {a b : ℤ} : int.gcd a b = nat.gcd a.nat_abs b.nat_abs := rfl
lemma gcd_eq_one_iff_coprime {a b : ℤ} : int.gcd a b = 1 ↔ is_coprime a b :=
begin
split,
{ intro hg,
obtain ⟨ua, hua, ha⟩ := exists_unit_of_abs a,
obtain ⟨ub, hub, hb⟩ := exists_unit_of_abs b,
use [(nat.gcd_a (int.nat_abs a) (int.nat_abs b)) * ua,
(nat.gcd_b (int.nat_abs a) (int.nat_abs b)) * ub],
rw [mul_assoc, ← ha, mul_assoc, ← hb, mul_comm, mul_comm _ (int.nat_abs b : ℤ),
← nat.gcd_eq_gcd_ab, ←gcd_eq_nat_abs, hg, int.coe_nat_one] },
{ rintro ⟨r, s, h⟩,
by_contradiction hg,
obtain ⟨p, ⟨hp, ha, hb⟩⟩ := nat.prime.not_coprime_iff_dvd.mp hg,
apply nat.prime.not_dvd_one hp,
rw [←coe_nat_dvd, int.coe_nat_one, ← h],
exact dvd_add ((coe_nat_dvd_left.mpr ha).mul_left _)
((coe_nat_dvd_left.mpr hb).mul_left _) }
end
lemma coprime_iff_nat_coprime {a b : ℤ} : is_coprime a b ↔ nat.coprime a.nat_abs b.nat_abs :=
by rw [←gcd_eq_one_iff_coprime, nat.coprime_iff_gcd_eq_one, gcd_eq_nat_abs]
lemma sq_of_gcd_eq_one {a b c : ℤ} (h : int.gcd a b = 1) (heq : a * b = c ^ 2) :
∃ (a0 : ℤ), a = a0 ^ 2 ∨ a = - (a0 ^ 2) :=
begin
have h' : is_unit (gcd_monoid.gcd a b), { rw [← coe_gcd, h, int.coe_nat_one], exact is_unit_one },
obtain ⟨d, ⟨u, hu⟩⟩ := exists_associated_pow_of_mul_eq_pow h' heq,
use d,
rw ← hu,
cases int.units_eq_one_or u with hu' hu'; { rw hu', simp }
end
lemma sq_of_coprime {a b c : ℤ} (h : is_coprime a b) (heq : a * b = c ^ 2) :
∃ (a0 : ℤ), a = a0 ^ 2 ∨ a = - (a0 ^ 2) := sq_of_gcd_eq_one (gcd_eq_one_iff_coprime.mpr h) heq
lemma nat_abs_euclidean_domain_gcd (a b : ℤ) :
int.nat_abs (euclidean_domain.gcd a b) = int.gcd a b :=
begin
apply nat.dvd_antisymm; rw ← int.coe_nat_dvd,
{ rw int.nat_abs_dvd,
exact int.dvd_gcd (euclidean_domain.gcd_dvd_left _ _) (euclidean_domain.gcd_dvd_right _ _) },
{ rw int.dvd_nat_abs,
exact euclidean_domain.dvd_gcd (int.gcd_dvd_left _ _) (int.gcd_dvd_right _ _) }
end
end int
theorem irreducible_iff_nat_prime : ∀(a : ℕ), irreducible a ↔ nat.prime a
| 0 := by simp [nat.not_prime_zero]
| 1 := by simp [nat.prime, one_lt_two]
| (n + 2) :=
have h₁ : ¬n + 2 = 1, from dec_trivial,
begin
simp [h₁, nat.prime, irreducible_iff, (≥), nat.le_add_left 2 n, (∣)],
refine forall_congr (assume a, forall_congr $ assume b, forall_congr $ assume hab, _),
by_cases a = 1; simp [h],
split,
{ assume hb, simpa [hb] using hab.symm },
{ assume ha, subst ha,
have : n + 2 > 0, from dec_trivial,
refine nat.eq_of_mul_eq_mul_left this _,
rw [← hab, mul_one] }
end
lemma nat.prime_iff_prime_int {p : ℕ} : p.prime ↔ _root_.prime (p : ℤ) :=
⟨λ hp, ⟨int.coe_nat_ne_zero_iff_pos.2 hp.pos, mt int.is_unit_iff_nat_abs_eq.1 hp.ne_one,
λ a b h, by rw [← int.dvd_nat_abs, int.coe_nat_dvd, int.nat_abs_mul, hp.dvd_mul] at h;
rwa [← int.dvd_nat_abs, int.coe_nat_dvd, ← int.dvd_nat_abs, int.coe_nat_dvd]⟩,
λ hp, nat.prime_iff.2 ⟨int.coe_nat_ne_zero.1 hp.1,
mt nat.is_unit_iff.1 $ λ h, by simpa [h, not_prime_one] using hp,
λ a b, by simpa only [int.coe_nat_dvd, (int.coe_nat_mul _ _).symm] using hp.2.2 a b⟩⟩
/-- Maps an associate class of integers consisting of `-n, n` to `n : ℕ` -/
def associates_int_equiv_nat : associates ℤ ≃ ℕ :=
begin
refine ⟨λz, z.out.nat_abs, λn, associates.mk n, _, _⟩,
{ refine (assume a, quotient.induction_on' a $ assume a,
associates.mk_eq_mk_iff_associated.2 $ associated.symm $ ⟨norm_unit a, _⟩),
show normalize a = int.nat_abs (normalize a),
rw [int.coe_nat_abs_eq_normalize, normalize_idem] },
{ intro n, dsimp, rw [associates.out_mk ↑n,
← int.coe_nat_abs_eq_normalize, int.nat_abs_of_nat, int.nat_abs_of_nat] }
end
lemma int.prime.dvd_mul {m n : ℤ} {p : ℕ}
(hp : nat.prime p) (h : (p : ℤ) ∣ m * n) : p ∣ m.nat_abs ∨ p ∣ n.nat_abs :=
begin
apply (nat.prime.dvd_mul hp).mp,
rw ← int.nat_abs_mul,
exact int.coe_nat_dvd_left.mp h
end
lemma int.prime.dvd_mul' {m n : ℤ} {p : ℕ}
(hp : nat.prime p) (h : (p : ℤ) ∣ m * n) : (p : ℤ) ∣ m ∨ (p : ℤ) ∣ n :=
begin
rw [int.coe_nat_dvd_left, int.coe_nat_dvd_left],
exact int.prime.dvd_mul hp h
end
lemma int.prime.dvd_pow {n : ℤ} {k p : ℕ}
(hp : nat.prime p) (h : (p : ℤ) ∣ n ^ k) : p ∣ n.nat_abs :=
begin
apply @nat.prime.dvd_of_dvd_pow _ _ k hp,
rw ← int.nat_abs_pow,
exact int.coe_nat_dvd_left.mp h
end
lemma int.prime.dvd_pow' {n : ℤ} {k p : ℕ}
(hp : nat.prime p) (h : (p : ℤ) ∣ n ^ k) : (p : ℤ) ∣ n :=
begin
rw int.coe_nat_dvd_left,
exact int.prime.dvd_pow hp h
end
lemma prime_two_or_dvd_of_dvd_two_mul_pow_self_two {m : ℤ} {p : ℕ}
(hp : nat.prime p) (h : (p : ℤ) ∣ 2 * m ^ 2) : p = 2 ∨ p ∣ int.nat_abs m :=
begin
cases int.prime.dvd_mul hp h with hp2 hpp,
{ apply or.intro_left,
exact le_antisymm (nat.le_of_dvd zero_lt_two hp2) (nat.prime.two_le hp) },
{ apply or.intro_right,
rw [sq, int.nat_abs_mul] at hpp,
exact (or_self _).mp ((nat.prime.dvd_mul hp).mp hpp)}
end
open unique_factorization_monoid
theorem nat.factors_eq {n : ℕ} : normalized_factors n = n.factors :=
begin
cases n, { simp },
rw [← multiset.rel_eq, ← associated_eq_eq],
apply factors_unique (irreducible_of_normalized_factor) _,
{ rw [multiset.coe_prod, nat.prod_factors (nat.succ_pos _)],
apply normalized_factors_prod (nat.succ_ne_zero _) },
{ apply_instance },
{ intros x hx,
rw [nat.irreducible_iff_prime, ← nat.prime_iff],
exact nat.prime_of_mem_factors hx }
end
lemma nat.factors_multiset_prod_of_irreducible
{s : multiset ℕ} (h : ∀ (x : ℕ), x ∈ s → irreducible x) :
normalized_factors (s.prod) = s :=
begin
rw [← multiset.rel_eq, ← associated_eq_eq],
apply unique_factorization_monoid.factors_unique irreducible_of_normalized_factor h
(normalized_factors_prod _),
rw [ne.def, multiset.prod_eq_zero_iff],
intro con,
exact not_irreducible_zero (h 0 con),
end
namespace multiplicity
lemma finite_int_iff_nat_abs_finite {a b : ℤ} : finite a b ↔ finite a.nat_abs b.nat_abs :=
by simp only [finite_def, ← int.nat_abs_dvd_iff_dvd, int.nat_abs_pow]
lemma finite_int_iff {a b : ℤ} : finite a b ↔ (a.nat_abs ≠ 1 ∧ b ≠ 0) :=
by rw [finite_int_iff_nat_abs_finite, finite_nat_iff, pos_iff_ne_zero, int.nat_abs_ne_zero]
instance decidable_nat : decidable_rel (λ a b : ℕ, (multiplicity a b).dom) :=
λ a b, decidable_of_iff _ finite_nat_iff.symm
instance decidable_int : decidable_rel (λ a b : ℤ, (multiplicity a b).dom) :=
λ a b, decidable_of_iff _ finite_int_iff.symm
end multiplicity
lemma induction_on_primes {P : ℕ → Prop} (h₀ : P 0) (h₁ : P 1)
(h : ∀ p a : ℕ, p.prime → P a → P (p * a)) (n : ℕ) : P n :=
begin
apply unique_factorization_monoid.induction_on_prime,
exact h₀,
{ intros n h,
rw nat.is_unit_iff.1 h,
exact h₁, },
{ intros a p _ hp ha,
exact h p a (nat.prime_iff.2 hp) ha, },
end
lemma int.associated_nat_abs (k : ℤ) : associated k k.nat_abs :=
associated_of_dvd_dvd (int.coe_nat_dvd_right.mpr dvd_rfl) (int.nat_abs_dvd.mpr dvd_rfl)
lemma int.prime_iff_nat_abs_prime {k : ℤ} : prime k ↔ nat.prime k.nat_abs :=
(int.associated_nat_abs k).prime_iff.trans nat.prime_iff_prime_int.symm
theorem int.associated_iff_nat_abs {a b : ℤ} : associated a b ↔ a.nat_abs = b.nat_abs :=
begin
rw [←dvd_dvd_iff_associated, ←int.nat_abs_dvd_iff_dvd,
←int.nat_abs_dvd_iff_dvd, dvd_dvd_iff_associated],
exact associated_iff_eq,
end
lemma int.associated_iff {a b : ℤ} : associated a b ↔ (a = b ∨ a = -b) :=
begin
rw int.associated_iff_nat_abs,
exact int.nat_abs_eq_nat_abs_iff,
end
namespace int
lemma zmultiples_nat_abs (a : ℤ) :
add_subgroup.zmultiples (a.nat_abs : ℤ) = add_subgroup.zmultiples a :=
le_antisymm
(add_subgroup.zmultiples_subset (mem_zmultiples_iff.mpr (dvd_nat_abs.mpr (dvd_refl a))))
(add_subgroup.zmultiples_subset (mem_zmultiples_iff.mpr (nat_abs_dvd.mpr (dvd_refl a))))
lemma span_nat_abs (a : ℤ) : ideal.span ({a.nat_abs} : set ℤ) = ideal.span {a} :=
by { rw ideal.span_singleton_eq_span_singleton, exact (associated_nat_abs _).symm }
theorem eq_pow_of_mul_eq_pow_bit1_left {a b c : ℤ}
(hab : is_coprime a b) {k : ℕ} (h : a * b = c ^ (bit1 k)) : ∃ d, a = d ^ (bit1 k) :=
begin
obtain ⟨d, hd⟩ := exists_associated_pow_of_mul_eq_pow' hab h,
replace hd := hd.symm,
rw [associated_iff_nat_abs, nat_abs_eq_nat_abs_iff, ←neg_pow_bit1] at hd,
obtain rfl|rfl := hd; exact ⟨_, rfl⟩,
end
theorem eq_pow_of_mul_eq_pow_bit1_right {a b c : ℤ}
(hab : is_coprime a b) {k : ℕ} (h : a * b = c ^ (bit1 k)) : ∃ d, b = d ^ (bit1 k) :=
eq_pow_of_mul_eq_pow_bit1_left hab.symm (by rwa mul_comm at h)
theorem eq_pow_of_mul_eq_pow_bit1 {a b c : ℤ}
(hab : is_coprime a b) {k : ℕ} (h : a * b = c ^ (bit1 k)) :
(∃ d, a = d ^ (bit1 k)) ∧ (∃ e, b = e ^ (bit1 k)) :=
⟨eq_pow_of_mul_eq_pow_bit1_left hab h, eq_pow_of_mul_eq_pow_bit1_right hab h⟩
end int
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/ring_theory/int/basic.lean"}
|
import numpy as np
import halotools.empirical_models as htem
import halotools.sim_manager as htsm
import galtab
def test_bolshoi_zheng_placeholder_weights(use_jax=False):
redshift = 0
threshold = -21
halocat = htsm.CachedHaloCatalog(simname="bolshoi", redshift=redshift,
halo_finder="rockstar")
if use_jax:
model = htem.HodModelFactory(
centrals_occupation=galtab.jax.JaxZheng07Cens(
threshold=threshold, redshift=redshift),
satellites_occupation=galtab.jax.JaxZheng07Sats(
threshold=threshold, redshift=redshift),
centrals_profile=htem.TrivialPhaseSpace(redshift=redshift),
satellites_profile=htem.NFWPhaseSpace(redshift=redshift)
)
gt = galtab.jax.GalaxyTabulator(halocat, model)
else:
model = htem.PrebuiltHodModelFactory("zheng07", threshold=threshold,
redshift=redshift)
gt = galtab.jax.GalaxyTabulator(halocat, model)
# Test that the histogram of central weights and satellite weights
# matches known values exactly
is_central = gt.galaxies["gal_type"] == "centrals"
weights = gt.calc_weights(model)
weights_hist_cens = np.histogram(weights[is_central], bins=10)[0]
weights_hist_sats = np.histogram(weights[~is_central], bins=10)[0]
assert np.all(np.isclose(
weights_hist_cens,
[19756, 4879, 2868, 2099, 1714, 1461, 1323, 1279, 1452, 4025],
atol=2)), f"known values != {weights_hist_cens}"
assert np.all(np.isclose(
weights_hist_sats,
[13505, 3774, 1796, 1137, 837, 1099, 1002, 1173, 1811, 3912],
atol=2)), f"known values != {weights_hist_sats}"
# Test that the fraction of halos with central/satellite placeholders
# matches known values exactly
nhalo = len(gt.halocat.halo_table)
ph_frac = len(set(gt.galaxies["halo_id"])) / nhalo
cen_ph_frac = len(set(gt.galaxies["halo_id"][is_central])) / nhalo
sat_ph_frac = len(set(gt.galaxies["halo_id"][~is_central])) / nhalo
assert ph_frac == cen_ph_frac == 0.029876569752093796, \
f"known value != {ph_frac} or {cen_ph_frac}"
assert sat_ph_frac == 0.017895521220218313, \
f"known value != {sat_ph_frac}"
# Test histogram of satellite weights after changing logM1
model.param_dict.update(dict(alpha=0.9))
weights = gt.calc_weights(model)
new_weights_hist_sats = np.histogram(weights[~is_central], bins=10)[0]
assert np.all(np.isclose(
new_weights_hist_sats,
[11297, 4471, 2310, 1545, 2438, 3890, 2324, 1027, 475, 269],
atol=2)), f"known values != {new_weights_hist_sats}"
def test_bolshoi_zheng_cic(use_jax=False):
redshift = 0
halocat = htsm.CachedHaloCatalog(simname="bolshoi", redshift=redshift,
halo_finder="rockstar")
if use_jax:
model19 = htem.HodModelFactory(
centrals_occupation=galtab.jax.JaxZheng07Cens(
threshold=-19, redshift=redshift),
satellites_occupation=galtab.jax.JaxZheng07Sats(
threshold=-19, redshift=redshift),
centrals_profile=htem.TrivialPhaseSpace(redshift=redshift),
satellites_profile=htem.NFWPhaseSpace(redshift=redshift)
)
model205 = htem.HodModelFactory(
centrals_occupation=galtab.jax.JaxZheng07Cens(
threshold=-20.5, redshift=redshift),
satellites_occupation=galtab.jax.JaxZheng07Sats(
threshold=-20.5, redshift=redshift),
centrals_profile=htem.TrivialPhaseSpace(redshift=redshift),
satellites_profile=htem.NFWPhaseSpace(redshift=redshift)
)
gt = galtab.jax.GalaxyTabulator(halocat, model19, seed=1)
else:
model19 = htem.PrebuiltHodModelFactory("zheng07", threshold=-19)
model205 = htem.PrebuiltHodModelFactory("zheng07", threshold=-20.5)
gt = galtab.GalaxyTabulator(halocat, model19, seed=1)
bin_edges = np.concatenate([[-0.5, 2.5, 5.5], np.geomspace(9.5, 100.5, 4)])
cic_kwargs = dict(proj_search_radius=2.0, cylinder_half_length=10.0)
predictor = gt.tabulate_cic(bin_edges=bin_edges, **cic_kwargs)
cic19 = predictor.predict(model19)
cic205 = predictor.predict(model205)
assert np.all(np.isclose(
cic19, [0.05488518, 0.06851228, 0.05155291,
0.02285464, 0.00497551, 0.00077832],
)), f"known values != {cic19}"
assert np.all(np.isclose(
cic205, [1.63377928e-01, 9.66691666e-02, 3.42786433e-02,
6.50972054e-03, 3.83935374e-04, 2.79730971e-05],
)), f"known values != {cic205}"
|
{"hexsha": "704084b5da172b351eb33282feb40f74c05d4a06", "size": 4726, "ext": "py", "lang": "Python", "max_stars_repo_path": "galtab/tests/test_galtab.py", "max_stars_repo_name": "AlanPearl/hodtab", "max_stars_repo_head_hexsha": "d10e1ba2c314a068665d2cfc1bfc4c4f9a16c10f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "galtab/tests/test_galtab.py", "max_issues_repo_name": "AlanPearl/hodtab", "max_issues_repo_head_hexsha": "d10e1ba2c314a068665d2cfc1bfc4c4f9a16c10f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "galtab/tests/test_galtab.py", "max_forks_repo_name": "AlanPearl/hodtab", "max_forks_repo_head_hexsha": "d10e1ba2c314a068665d2cfc1bfc4c4f9a16c10f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3577981651, "max_line_length": 79, "alphanum_fraction": 0.6582733813, "include": true, "reason": "import numpy", "num_tokens": 1420}
|
import os
import re
from collections import defaultdict
import numpy as np
from utensor_cgen.frontend import FrontendSelector
from utensor_cgen.frontend.base import Parser
from utensor_cgen.ir.base import OperationInfo, TensorInfo, uTensorGraph
from utensor_cgen.ir.converter import (AttrValueConverter,
GenericTensorConverterMixin)
from utensor_cgen.legalizer import Legalizer
from utensor_cgen.utils import topologic_order_graph
from utensor_cgen.logger import logger
from .tflite_flatbuffer.ActivationFunctionType import ActivationFunctionType
from .tflite_flatbuffer.BuiltinOperator import BuiltinOperator
from .tflite_flatbuffer.CustomOptionsFormat import CustomOptionsFormat
from .tflite_flatbuffer.FullyConnectedOptionsWeightsFormat import \
FullyConnectedOptionsWeightsFormat
from .tflite_flatbuffer.Model import Model
_CUSTOM_OPTION_FORMAT_MAP = {v: k for k, v in CustomOptionsFormat.__dict__.items()}
@FrontendSelector.register(target_exts=[".tflite"])
class TFLiteParser(Parser):
_TENSOR_NP_TYPE = {
0:np.dtype("float32"),
1: np.dtype("float16"),
2: np.dtype("int32"),
3: np.dtype("uint8"),
4: np.dtype("uint64"),
5: np.dtype("str"),
6: np.dtype("bool"),
7: np.dtype("int16"),
8: np.dtype("cdouble"),
9: np.dtype("int8"),
}
_BUILTIN_OPS = {v: k for k, v in BuiltinOperator.__dict__.items()}
def parse(self, tflite_file, output_nodes=None, model_name=None):
if output_nodes is None:
output_nodes = []
if model_name:
graph_name = model_name
else:
graph_name, _ = os.path.splitext(
os.path.basename(tflite_file)
)
with open(tflite_file, "rb") as fid:
buf = bytearray(fid.read())
fb_model = Model.GetRootAsModel(buf, 0)
ugraph = uTensorGraph(
name=graph_name,
output_nodes=output_nodes,
lib_name="tflite",
ops_info={},
)
self._build_graph(fb_model, ugraph)
ugraph = Legalizer.legalize(ugraph)
return ugraph
def _build_graph(self, fb_model, ugraph):
# addresseed by index
tensor_names_map = self._build_tensor_map(fb_model, ugraph)
self._build_param_ops(fb_model, ugraph, tensor_names_map)
# find and set input nodes
self._build_input_ops(fb_model, ugraph, tensor_names_map)
self._build_intermediate_ops(fb_model, ugraph, tensor_names_map)
self._set_output_ops(fb_model, ugraph, tensor_names_map)
self._prepare_quant_params(ugraph)
topologic_order_graph(ugraph)
def _build_tensor_map(self, fb_model, ugraph):
tensor_names_map = {}
subgraph = self._get_tflm_get_subgraph(fb_model)
for idx in range(0, subgraph.TensorsLength()):
tensor = subgraph.Tensors(idx)
tensor_name = tensor.Name().decode('utf8')
if tensor_name is "" or None:
tensor_name = "tensor_" + str(idx)
dtype = self._TENSOR_NP_TYPE[tensor.Type()]
attributes = dict()
quant_params = tensor.Quantization()
if quant_params is not None and \
quant_params.ZeroPointLength() and \
quant_params.ScaleLength():
attributes["quantization_zeros"] = quant_params.ZeroPointAsNumpy()
attributes["quantization_scales"] = quant_params.ScaleAsNumpy()
if isinstance(tensor.ShapeAsNumpy(), np.ndarray):
shape = tensor.ShapeAsNumpy().tolist()
else:
shape = list(fb_model.Buffers(12).DataAsNumpy().shape)
tensor_names_map[idx] = TensorInfo(
name=self._format_tensor_name("", tensor_name, 0),
op_name="",
dtype=dtype,
shape=shape,
attributes=attributes,
ugraph=ugraph,
)
return tensor_names_map
def _build_param_ops(self, fb_model, ugraph, tensor_names_map):
"""Const tensors are identified by buffer_index == 0. These tensors are converted to Const Op and added to ugraph
"""
subgraph = self._get_tflm_get_subgraph(fb_model)
for idx in range(0, subgraph.TensorsLength()):
tensor = subgraph.Tensors(idx)
buffer_index = tensor.Buffer()
# buffer_index == 0 if intermediate
if buffer_index == 0:
continue
node_name = re.sub(r':\d+', '', tensor_names_map[idx].name) + "_Const"
dtype = tensor_names_map[idx].dtype
buffer_array = fb_model.Buffers(buffer_index).DataAsNumpy()
if isinstance(buffer_array, int):
continue # somehow, sometimes, the buffer contains no data, likely to be an intermediate tensor
buffer_content = fb_model.Buffers(buffer_index).DataAsNumpy().view(dtype).reshape(
tensor_names_map[idx].shape
)
OperationInfo(
name=node_name,
input_tensors=[],
output_tensors=[tensor_names_map[idx]],
op_type="Const",
lib_name="tflm",
ugraph=ugraph,
op_attr={
"value": AttrValueConverter.GenericType(
value_name="tensor",
value=GenericTensorConverterMixin.GenericType(
np_array=buffer_content
),
)
},
)
self._set_tensor_node(idx, node_name, tensor_names_map)
def _build_input_ops(self, fb_model, ugraph, tensor_names_map):
"""Find placeholders
Attach placeholders to input tensors
Note this method will update inputs **inplace**
"""
subgraph = self._get_tflm_get_subgraph(fb_model)
subgraph_inputs_indexi = subgraph.InputsAsNumpy()
for index in subgraph_inputs_indexi:
node_name = tensor_names_map[index].name + "_Placeholder"
self._set_tensor_node(index, node_name, tensor_names_map)
OperationInfo(
name=node_name,
input_tensors=[],
output_tensors=[tensor_names_map[index]],
op_type="Placeholder",
ugraph=ugraph,
lib_name="tflm",
op_attr={},
)
def _build_intermediate_ops(self, fb_model, ugraph, tensor_names_map):
"""Build all intermediate nodes
"""
subgraphs_len = fb_model.SubgraphsLength()
assert subgraphs_len == 1, "only 1 subgraph is supported"
subgraph = fb_model.Subgraphs(0)
for i in range(0, subgraph.OperatorsLength()):
# topological order, op-index defined by schema
# BuiltinOperator: https://github.com/tensorflow/tensorflow/blob/031804922d8f4d18b61e3ad077f9f1b69273ff21/tensorflow/lite/schema/schema_v3.fbs#L71
op = subgraph.Operators(i)
op_type = _get_op_type(op, fb_model)
node_name = str(i) + "_" + op_type
input_tensor_names = [
tensor_names_map[input_index] for input_index in op.InputsAsNumpy()
]
output_tensor_names = [
tensor_names_map[output_index]
for output_index in op.OutputsAsNumpy()
]
op_attr = _OP_DATA_FUNC_MAP[op_type](op, fb_model)
OperationInfo(
name=node_name,
input_tensors=input_tensor_names,
output_tensors=output_tensor_names,
op_type=self._format_op_type(op_type),
ugraph=ugraph,
lib_name="tflm",
op_attr=op_attr,
)
for tensor_index in op.OutputsAsNumpy():
self._set_tensor_node(tensor_index, node_name, tensor_names_map)
def _set_output_ops(self, fb_model, ugraph, tensor_names_map):
"""identfy output nodes in fb_mdel
sets output_nodes in ugraph
Note this method will update ugraph **inplace**
"""
subgraph = self._get_tflm_get_subgraph(fb_model)
subgraph_outputs_indexi = subgraph.OutputsAsNumpy() # tensor indexi
output_node_names = set()
for index in subgraph_outputs_indexi:
output_node_names.add(tensor_names_map[index].op_name)
ugraph.output_nodes = list(output_node_names)
def _get_tflm_get_subgraph(self, fb_model):
subgraphs_len = fb_model.SubgraphsLength()
assert subgraphs_len == 1, "only 1 subgraph is supported"
subgraph = fb_model.Subgraphs(0)
return subgraph
def _set_tensor_node(self, idx, name, tensor_names_map):
assert tensor_names_map[idx].op_name == ""
tensor_names_map[idx].op_name = name
@staticmethod
def _prepare_quant_params(ugraph):
# spec: https://www.tensorflow.org/lite/performance/quantization_spec
for op_info in ugraph.get_ops_by_type('DepthwiseConv2d'):
bias = op_info.input_tensors[2]
if 'quantization_zeros' in bias.attributes:
zp = bias.attributes['quantization_zeros']
bias.attributes['quantization_zeros'] = zp.astype(np.dtype('int32'))
for op_info in ugraph.get_ops_by_type('FullyConnected'):
bias = op_info.input_tensors[2]
if 'quantization_zeros' in bias.attributes:
zp = bias.attributes['quantization_zeros']
bias.attributes['quantization_zeros'] = zp.astype(np.dtype('int32'))
for op_info in ugraph.get_ops_by_type('Conv2d'):
bias = op_info.input_tensors[2]
if 'quantization_zeros' in bias.attributes:
zp = bias.attributes['quantization_zeros']
bias.attributes['quantization_zeros'] = zp.astype(np.dtype('int32'))
def _format_node_name(self, node_name, op_type, op_cnt):
if node_name == "":
node_name = "{}_{}".format(op_type, op_cnt)
return re.sub(r"[\.:/]", "_", node_name)
def _format_tensor_name(self, name, node_name, offset):
if re.match(r"[a-zA-Z][a-zA-Z0-9]*:[0-9]+", name):
return name
return "{}:{}".format(node_name, offset)
def _format_op_type(self, op_type):
return ''.join(map(lambda s: s.capitalize(), op_type.split('_')))
# helper functions for parsing op data (will be stored in op_attr)
def class_option2str(obj, idx):
names_lookup = {v: k for k, v in obj.__dict__.items()}
name = names_lookup[idx]
return str(idx) + " (" + name + ")"
def fully_connected_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.FullyConnectedOptions import FullyConnectedOptions
option = FullyConnectedOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["FusedActivationFunction"] = class_option2str(
ActivationFunctionType, option.FusedActivationFunction()
)
option_dict["w_formats"] = class_option2str(
FullyConnectedOptionsWeightsFormat, option.WeightsFormat()
)
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def depthwise_conv2d_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.DepthwiseConv2DOptions import DepthwiseConv2DOptions
option = DepthwiseConv2DOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["Padding"] = option.Padding()
option_dict["StrideW"] = option.StrideW()
option_dict["StrideH"] = option.StrideH()
option_dict["DepthMultiplier"] = option.DepthMultiplier()
option_dict["FusedActivationFunction"] = class_option2str(
ActivationFunctionType, option.FusedActivationFunction()
)
option_dict["DilationWFactor"] = option.DilationWFactor()
option_dict["DilationHFactor"] = option.DilationHFactor()
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def conv_2d_op_data(op, fb_model):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.Conv2DOptions import Conv2DOptions
option = Conv2DOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["Padding"] = option.Padding()
option_dict["StrideW"] = option.StrideW()
option_dict["StrideH"] = option.StrideH()
option_dict["FusedActivationFunction"] = class_option2str(
ActivationFunctionType, option.FusedActivationFunction()
)
option_dict["DilationWFactor"] = option.DilationWFactor()
option_dict["DilationHFactor"] = option.DilationHFactor()
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def reshape_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.ReshapeOptions import ReshapeOptions
option = ReshapeOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["new_shape"] = list(option.NewShapeAsNumpy())
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def dequantize_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.DequantizeOptions import DequantizeOptions
option = DequantizeOptions()
builtin_data = op.BuiltinOptions()
if builtin_data is None:
return option_dict
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict['builtin'] = option
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def quantize_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.QuantizeOptions import QuantizeOptions
option = QuantizeOptions()
builtin_data = op.BuiltinOptions()
if builtin_data is None:
return option_dict
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict['builtin'] = option
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def pool2d_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.Pool2DOptions import Pool2DOptions
option = Pool2DOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["Padding"] = option.Padding()
option_dict["StrideW"] = option.StrideW()
option_dict["StrideH"] = option.StrideH()
option_dict["FilterWidth"] = option.FilterWidth()
option_dict["FilterHeight"] = option.FilterHeight()
option_dict["FusedActivationFunction"] = class_option2str(
ActivationFunctionType, option.FusedActivationFunction()
)
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def argmax_op_data(op, fb_mdel):
option_dict = {}
if op.CustomOptionsLength() < 1:
from .tflite_flatbuffer.ArgMaxOptions import ArgMaxOptions
option = ArgMaxOptions()
builtin_data = op.BuiltinOptions()
option.Init(builtin_data.Bytes, builtin_data.Pos)
option_dict["OutputType"] = option.OutputType()
else:
option_dict[
_CUSTOM_OPTION_FORMAT_MAP[op.CustomOptionsFormat()]
] = op.CustomOptionsAsNumpy()
return option_dict
def default_op_data(op, fb_mdel):
op_type = _get_op_type(op, fb_mdel)
logger.warning('the op data parser is missing for %s', op_type)
return {}
_OP_DATA_FUNC_MAP = defaultdict(lambda: default_op_data)
_OP_DATA_FUNC_MAP["QUANTIZE"] = quantize_op_data
_OP_DATA_FUNC_MAP["DEPTHWISE_CONV_2D"] = depthwise_conv2d_op_data
_OP_DATA_FUNC_MAP["CONV_2D"] = conv_2d_op_data
_OP_DATA_FUNC_MAP["MAX_POOL_2D"] = pool2d_op_data
_OP_DATA_FUNC_MAP["RESHAPE"] = reshape_op_data
_OP_DATA_FUNC_MAP["FULLY_CONNECTED"] = fully_connected_op_data
_OP_DATA_FUNC_MAP["DEQUANTIZE"] = dequantize_op_data
_OP_DATA_FUNC_MAP["ARG_MAX"] = argmax_op_data
def _get_op_type(op, fb_model):
local_op_code = op.OpcodeIndex()
global_op_code = fb_model.OperatorCodes(local_op_code)
builtin_op_code = global_op_code.BuiltinCode()
return TFLiteParser._BUILTIN_OPS[builtin_op_code]
|
{"hexsha": "d7229a35c23cf079e8934b813e719dd62e54a998", "size": 15662, "ext": "py", "lang": "Python", "max_stars_repo_path": "utensor_cgen/frontend/tflite.py", "max_stars_repo_name": "uTensor/utensor_cgen", "max_stars_repo_head_hexsha": "eccd6859028d0b6a350dced25ea72ff02faaf9ad", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2018-01-06T12:57:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-03T09:48:32.000Z", "max_issues_repo_path": "utensor_cgen/frontend/tflite.py", "max_issues_repo_name": "uTensor/utensor_cgen", "max_issues_repo_head_hexsha": "eccd6859028d0b6a350dced25ea72ff02faaf9ad", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 101, "max_issues_repo_issues_event_min_datetime": "2018-01-16T19:24:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-10T19:39:33.000Z", "max_forks_repo_path": "utensor_cgen/frontend/tflite.py", "max_forks_repo_name": "uTensor/utensor_cgen", "max_forks_repo_head_hexsha": "eccd6859028d0b6a350dced25ea72ff02faaf9ad", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2018-02-15T19:39:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T22:32:05.000Z", "avg_line_length": 34.8044444444, "max_line_length": 152, "alphanum_fraction": 0.7134465585, "include": true, "reason": "import numpy", "num_tokens": 3952}
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as functional
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from config import Config
from dataset import Vocabulary, DualNovelDataSet
config = Config()
class SeqAutoEncoder(object):
"""
序列自编码器
用于计算语句的语义差别
"""
def __init__(self):
"""
初始化类
步骤包括设置数据集、初始化模型和优化器、设定参数
"""
# 设置数据集
self.vocabulary = Vocabulary(config.vocab_file)
self.pad = self.vocabulary.word2id['<pad>']
self.go = self.vocabulary.word2id['<go>']
self.eos = self.vocabulary.word2id['<eos>']
self.unk = self.vocabulary.word2id['<unk>']
self.train_set = DualNovelDataSet()
self.test_set = DualNovelDataSet()
# 初始化模型
self.encoder = RNNEncoder(config.encoder_num_layers, config.encoder_bidirectional)
self.decoder = RNNDecoder(config.decoder_num_layers, config.decoder_bidirectional)
self.trainable_variables = []
for k, v in self.encoder.state_dict(keep_vars=True).items():
if v.requires_grad:
self.trainable_variables.append(v)
for k, v in self.decoder.state_dict(keep_vars=True).items():
if v.requires_grad:
self.trainable_variables.append(v)
# 设定优化器和参数
self.learning_rate = config.ae_learning_rate
self.beta1 = config.ae_beta1
self.beta2 = config.ae_beta2
self.optimizer = Adam(self.trainable_variables, self.learning_rate, (self.beta1, self.beta2))
self.criterion = nn.CrossEntropyLoss()
self.mse_loss = nn.MSELoss(reduction='mean')
self.batch_size = config.ae_batch_size
self.epochs = config.ae_epochs
self.num_workers = config.ae_num_workers
def set_training(self, train_mode):
"""设定训练/测试模式
Args:
train_mode: 布尔型,是否是训练模式
"""
self.encoder.train(mode=train_mode)
self.decoder.train(mode=train_mode)
def train(self, verbose=False, graph=False):
"""训练自编码器
Args:
verbose: 是否输出提示信息,即每个Epoch结束后输出该代的损失
graph: 训练结束后是否显示损失变化曲线图
"""
loss_list = []
for epoch in range(self.epochs):
epoch_loss = self.run_epoch(test=False)
loss_list += epoch_loss
if verbose:
print('\n[TRAIN] Epoch {}, mean loss {}'.format(epoch, np.mean(epoch_loss)))
if graph:
plt.figure()
plt.plot([x for x in range(len(loss_list))], loss_list)
plt.xlabel('step')
plt.ylabel('loss')
plt.grid()
plt.title('Training loss')
plt.show()
def run_epoch(self, test=False):
"""运行一个epoch,可以指定训练/测试模式
Args:
test: 布尔型,是否是测试模式
Returns:
test = True:
mean_loss: 平均损失含函数值
test = False:
loss_list: 训练过程中的损失函数,是一个列表
"""
loss_list = []
if test:
loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.encoder.train(mode=True)
self.decoder.train(mode=True)
else:
loader = DataLoader(self.test_set, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.encoder.train(mode=False)
self.encoder.train(mode=False)
with tqdm(loader) as pbar:
for data in pbar:
sentences, labels = self.preprocess_data(data)
batch_size = sentences.shape[0]
encoder_state = self.encoder.init_hidden(batch_size)
encoder_output, encoder_hidden = self.encoder(sentences, encoder_state)
decoder_input = self.go * torch.ones((batch_size, config.max_sentence_length, config.embedding_dim))
decoder_output, decoder_hidden = self.decoder(decoder_input, encoder_hidden)
loss = self.criterion(decoder_output, sentences.reshape(-1))
if not test:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_list.append(loss.item())
return np.mean(loss_list) if test else loss_list
def mean_difference(self, sentences_0, sentences_1):
"""计算两组语句的平均语义差别
将语句进行编码,语义差别定义为其编码结果的均方差
Args:
sentences_0: 形状为[batch_size, max_seq_len]的LongTensor,表示语句
sentences_1: 同上
Returns:
mean_difference: 平均语义差别
"""
with torch.no_grad():
assert sentences_0.shape[0] == sentences_1.shape[0]
batch_size = sentences_0.shape[0]
encoder_state = self.encoder.init_hidden(batch_size)
_, hidden_0 = self.encoder(sentences_0, encoder_state)
_, hidden_1 = self.encoder(sentences_1, encoder_state)
return self.mse_loss(hidden_0, hidden_1)
def save_model(self):
"""
将模型保存到指定路径
"""
torch.save(self.encoder.state_dict(), config.encoder_model_path)
torch.save(self.decoder.state_dict(), config.decoder_model_path)
def load_model(self):
"""
从指定路径的文件读取模型参数
"""
self.encoder.load_state_dict(torch.load(config.encoder_model_path, map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(torch.load(config.decoder_model_path, map_location=lambda storage, loc: storage))
def preprocess_data(self, data):
"""预处理数据
这里的数据是来自DualNovelDataSet的数据样本,同时包括风格为0和1的数据
Args:
data: DataLoader给出的每一个数据样本,格式见本函数第一行
Returns:
sentences: 形状为[batch_size * 2, max_len]的Tensor,代表语句数据
label: 形状为[batch_size * 2]的Tensor,代表标签
"""
(bare_0, go_0, eos_0, len_0), (bare_1, go_1, eos_1, len_1) = data
batch_size = bare_0.shape[0]
label_0 = torch.zeros(batch_size)
label_1 = torch.ones(batch_size)
sentences = torch.cat([bare_0, bare_1], dim=0)
label = torch.cat([label_0, label_1], dim=0)
if config.gpu:
sentences = sentences.cuda()
label = label.cuda()
return sentences, label
class RNNEncoder(nn.Module):
"""
基于RNN的序列编码器
结构包含一个Embedding层和一个多层GRU模块
"""
def __init__(self, num_layers, bidirectional):
"""初始化编码器
Args:
num_layers: GRU网络的层数
bidirectional: 布尔型,是否使用双向网络
"""
super(RNNEncoder, self).__init__()
self.vocabulary = Vocabulary(config.vocab_file)
self.embedding_dim = config.embedding_dim
self.num_layers = num_layers
self.bidirectional = bidirectional
self.directions = 2 if bidirectional else 1
self.embedding = nn.Embedding(self.vocabulary.vocab_size, self.embedding_dim)
if config.gpu:
self.embedding = self.embedding.cuda()
self.gru = nn.GRU(
self.embedding_dim, self.embedding_dim, num_layers, batch_first=True, bidirectional=bidirectional
)
if config.gpu:
self.gru = self.gru.cuda()
def forward(self, inputs, hidden):
"""前向传播步骤
Args:
inputs: 形状为[batch_size, max_seq_len]的LongTensor,表示网络输入
hidden: 形状为[num_layers * directions, batch_size, embedding_dim]的Tensor,表示隐藏状态
Returns:
output: 网络输出,形状为[batch_size, max_seq_len, embedding_dim]的Tensor
hidden: 网络输出的状态,形状同输入的hidden
"""
# embedded shape: [batch_size, max_seq_len, embedding_dim]
embedded = self.embedding(inputs)
# output shape: [batch_size, max_seq_len, embedding_dim]
output, hidden = self.gru(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size):
"""取得用于初始化的全零状态
Args:
batch_size: 一批数据的大小,和初始状态的维度有关
Returns:
state: 可用于初始化的隐藏状态,是形状为[num_layers * directions, batch_size, embedding_dim]的Tensor
"""
state = torch.zeros((self.num_layers * self.directions, batch_size, self.embedding_dim))
if config.gpu:
state = state.cuda()
return state
class RNNDecoder(nn.Module):
"""
基于RNN的解码器模块
结构包含一个多层GRU模块和一个全连接层
解码器不需要初始化状态,因为初始状态来自编码器
"""
def __init__(self, num_layers, bidirectional):
"""初始化解码器
Args:
num_layers: GRU网络的层数
bidirectional: 布尔型,是否使用双向网络
"""
super(RNNDecoder, self).__init__()
self.vocabulary = Vocabulary(config.vocab_file)
self.embedding_dim = config.embedding_dim
self.num_layers = num_layers
self.bidirectional = bidirectional
self.directions = 2 if self.bidirectional else 1
self.embedding = nn.Embedding(self.embedding_dim, self.embedding_dim)
if config.gpu:
self.embedding = self.embedding.cuda()
self.gru = nn.GRU(
self.embedding_dim, self.embedding_dim, num_layers, batch_first=True, bidirectional=bidirectional
)
if config.gpu:
self.gru = self.gru.cuda()
self.dense = nn.Linear(self.embedding_dim * self.directions, self.vocabulary.vocab_size)
if config.gpu:
self.dense = self.dense.cuda()
def forward(self, inputs, hidden):
"""前向传播步骤
Args:
inputs: 形状为[batch_size, max_seq_len]的LongTensor,表示网络输入,一般用起始符号填充
hidden: 形状为[num_layers * directions, batch_size, embedding_dim]的Tensor,表示隐藏状态,来自编码器
Returns:
output_logits: 形状为[batch_size * max_seq_len, vocab_size]的Tensor,表示解码后得到的语句
中各个单词出现概率的评分(one-hot编码),前两个维度被flatten了
output_probs: output_logits经过softmax的结果,表示各单词出现的概率
"""
output = inputs # shape: [batch_size, max_seq_len, embedding_dim]
output, hidden = self.gru(output, hidden)
output = output.reshape(output.size(0) * output.size(1), output.size(2))
# output_logits shape: [batch_size * max_seq_len, vocab_size]
output_logits = self.dense(output)
output_probs = functional.softmax(output_logits, dim=1)
return output_logits, output_probs
def train_autoencoder():
model = SeqAutoEncoder()
model.train(verbose=True, graph=True)
model.save_model()
loader = DataLoader(model.train_set, batch_size=16, shuffle=True)
for data in loader:
(sen_0, _, _, _), (sen_1, _, _, _) = data
print(model.mean_difference(sen_0, sen_1))
break
if __name__ == '__main__':
train_autoencoder()
|
{"hexsha": "9d757ddcea374b48451a0be014e63fb5edcaa26b", "size": 11129, "ext": "py", "lang": "Python", "max_stars_repo_path": "autoencoder.py", "max_stars_repo_name": "RunTimeError2/TextStyleTransfer_PTO", "max_stars_repo_head_hexsha": "870ee837696d8eddffed6a3fd162ea1ad403ce44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-09T07:15:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T07:15:39.000Z", "max_issues_repo_path": "autoencoder.py", "max_issues_repo_name": "RunTimeError2/TextStyleTransfer_PTO", "max_issues_repo_head_hexsha": "870ee837696d8eddffed6a3fd162ea1ad403ce44", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autoencoder.py", "max_forks_repo_name": "RunTimeError2/TextStyleTransfer_PTO", "max_forks_repo_head_hexsha": "870ee837696d8eddffed6a3fd162ea1ad403ce44", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.926035503, "max_line_length": 120, "alphanum_fraction": 0.5996046365, "include": true, "reason": "import numpy", "num_tokens": 2849}
|
import time
t0t=time.time()
from os.path import join
import os
import numpy as n
import glob
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import astropy.io.fits as fits
from scipy.interpolate import interp1d
from scipy.stats import norm as gaussD
import GalaxySpectrumFIREFLY as gs
env = 'DATA_DIR'
spec_dir = join( os.environ[env], "spm", "GAMAmock")
filenames = n.array(glob.glob(os.path.join(spec_dir, "gal_*.dat")))
filenames.sort()
print "N files=",len(filenames)
stellarpop_dir = join( os.environ[env],
"spm", "GAMAmock", 'stellarpop')
out_dir = join( os.environ[env], "spm", "GAMAmock", 'results-SNR')
im_dir = os.path.join(os.environ[env], "spm", "GAMAmock", 'images-SNR')
suffixes = n.array(["-miles-kr.fits","-emf05-miles-kr.fits","-emf01-miles-kr.fits","-emf005-miles-kr.fits"])
imfs = n.array(["Kroupa", "Kroupa", "Kroupa", "Kroupa"])
libs = n.array(["miles", "miles", "miles", "miles"])
error_factors = n.array([1., 0.5, 0.01, 0.05])
path_2_out = lambda filename : os.path.join(out_dir, os.path.basename(filename)[:-4]+".fits")
path_2_im = lambda filename : os.path.join(im_dir, os.path.basename(filename)[:-4]+".SNR.png")
def create_tbhdu(sp_cha, imf, lib, error_factor):
c1 = fits.Column(name='wavelength', format='D', unit='Angstrom', array=sp_cha[1].data['wavelength'])
c2 = fits.Column(name='model_flux', format='D', unit='1e-17 erg/cm2/s', array=sp_cha[1].data['firefly_model'])
coldefs = fits.ColDefs([c1, c2])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.header['HIERARCH SNR_multiplicative_value'] = error_factor
tbhdu.header['HIERARCH library'] = lib
tbhdu.header['HIERARCH IMF'] = imf
tbhdu.header['HIERARCH age_lightW'] = sp_cha[1].header['age_lightW_mean']
tbhdu.header['HIERARCH age_lightW_up'] = sp_cha[1].header['age_lightW_mean_up']
tbhdu.header['HIERARCH age_lightW_low'] = sp_cha[1].header['age_lightW_mean_low']
tbhdu.header['HIERARCH metallicity_lightW'] = sp_cha[1].header['metallicity_lightW_mean']
tbhdu.header['HIERARCH metallicity_lightW_up'] = sp_cha[1].header['metallicity_lightW_mean_up']
tbhdu.header['HIERARCH metallicity_lightW_low'] = sp_cha[1].header['metallicity_lightW_mean_low']
tbhdu.header['HIERARCH age_massW'] = sp_cha[1].header['age_massW_mean']
tbhdu.header['HIERARCH age_massW_up'] = sp_cha[1].header['age_massW_mean_up']
tbhdu.header['HIERARCH age_massW_low'] = sp_cha[1].header['age_massW_mean_low']
tbhdu.header['HIERARCH metallicity_massW'] = sp_cha[1].header['metallicity_massW_mean']
tbhdu.header['HIERARCH metallicity_massW_up'] = sp_cha[1].header['metallicity_massW_mean_up']
tbhdu.header['HIERARCH metallicity_massW_low'] = sp_cha[1].header['metallicity_massW_mean_low']
tbhdu.header['HIERARCH EBV'] = sp_cha[1].header['EBV']
tbhdu.header['HIERARCH stellar_mass'] = sp_cha[1].header['stellar_mass_mean']
tbhdu.header['HIERARCH stellar_mass_up'] = sp_cha[1].header['stellar_mass_mean_up']
tbhdu.header['HIERARCH stellar_mass_low'] = sp_cha[1].header['stellar_mass_mean_low']
tbhdu.header['HIERARCH ssp_number'] = sp_cha[1].header['ssp_number']
for el in sp_cha[1].header[33:]:
tbhdu.header['HIERARCH '+el] = sp_cha[1].header[el]
return tbhdu
def create_hdu_list(filename):
model_hdus = n.array([create_tbhdu(fits.open(os.path.join(stellarpop_dir, os.path.basename(filename)[:-4]+suff)), imf, lib, error_factor) for imf, lib, suff, error_factor in zip(imfs, libs, suffixes, error_factors)])
print "N hdus=",len(model_hdus)
#print model_hdus[0].header
prihdr = fits.Header()
prihdr['file'] = os.path.basename(filename)[:-4]
prihdr['models'] = 'Maraston_2011'
prihdr['fitter'] = 'FIREFLY'
return prihdr, model_hdus
def create_figure_add_chi2(filename, model_hdus):
sp=gs.GalaxySpectrumFIREFLY(filename, milky_way_reddening=False)
sp.openGAMAsimulatedSpectrum()
spec = interp1d(sp.restframe_wavelength, sp.flux)
err = interp1d(sp.restframe_wavelength, sp.error)
wl_data_min = n.min(sp.restframe_wavelength)
wl_data_max = n.max(sp.restframe_wavelength)
# now creates the figure per model
fig = p.figure(0, figsize = (7, 10), frameon=False)#, tight_layout=True)
rect = 0.2, 0.15, 0.85, 0.95
#ax = fig.add_axes(rect, frameon=False)
# panel with the spectrum
fig.add_subplot(3,1,1)
p.plot(sp.restframe_wavelength[::2], sp.flux[::2], 'k', rasterized =True, alpha=0.5, label='data')
p.plot(model_hdus[0].data['wavelength'], model_hdus[0].data['model_flux'], label='model')
p.yscale('log')
mean_data = n.median(sp.flux)
p.ylim((mean_data/8., mean_data*8.))
p.xlabel('Wavelength [Angstrom]')
p.ylabel(r'Flux [$f_\lambda$ $10^{-17}$ erg/cm2/s/A]')
p.title(os.path.basename(filename))
# second panel distribution of residuals
fig.add_subplot(3,1,2)
for hdu in model_hdus:
#print hdu
ok_model = (hdu.data['wavelength']>wl_data_min)&(hdu.data['wavelength']<wl_data_max)
wl_model = hdu.data['wavelength'][ok_model]
#print spec(wl_model),hdu.data['model_flux'][ok_model],err(wl_model)
chi2s=(spec(wl_model)-hdu.data['model_flux'][ok_model])/err(wl_model)
p.hist(chi2s, bins = n.arange(-2,2,0.1), normed = True, histtype='step', label="SNRpp="+str(n.round(hdu.header['SNR_multiplicative_value']*n.median(spec(wl_model)/err(wl_model)),3))+", EBV="+str(n.round(hdu.header['EBV'],3))+r", $\chi^2=$"+str(n.round(n.sum(chi2s**2.)/(len(chi2s)-2.),4)))
p.ylim((-0.02,1.02))
#p.yscale('log')
p.xlabel('(data-model)/error')
p.ylabel('Normed distribution')
hdu.header['chi2'] = n.sum(chi2s**2.)
hdu.header['ndof'] = len(chi2s)-2.
hdu.header['medianSNRpp'] = n.median(spec(wl_model)/err(wl_model))
p.plot(n.arange(-2,2,0.005), gaussD.pdf(n.arange(-2,2,0.005)), 'k--', label=r'N(0,1)', lw=0.5)
p.grid()
p.legend(frameon=False, loc=0, fontsize=8)
fig.add_subplot(3,1,3)
tpl = n.transpose(n.array([ [
hdu.header['age_lightW'],
hdu.header['stellar_mass'],
hdu.header['age_lightW_up']-hdu.header['age_lightW'],
hdu.header['age_lightW']-hdu.header['age_lightW_low'],
hdu.header['stellar_mass_up']-hdu.header['stellar_mass'],
hdu.header['stellar_mass']-hdu.header['stellar_mass_low']]
for hdu in model_hdus ]))
p.errorbar(tpl[0], tpl[1], xerr=[tpl[2], tpl[3]], yerr=[tpl[4], tpl[5]], barsabove=True, fmt='o')
#p.axvline(prihdr['age_universe'], color='r', ls='dashed')
idsUP = n.argsort(tpl[1])
iterList = model_hdus[idsUP]
for jj, hdu in enumerate(iterList):
p.annotate("SNRpp="+str(n.round(hdu.header['SNR_multiplicative_value']*n.median(spec(wl_model)/err(wl_model)),3))+r", $\log(Z/Z_\odot)=$"+str(n.round(hdu.header['metallicity_lightW'],4)),
xy = (hdu.header['age_lightW'], hdu.header['stellar_mass']), xycoords='data',
xytext=(0.85, (jj+0.5)/len(iterList)), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05, width=0.2, headwidth=3),
horizontalalignment='right', verticalalignment='top', fontsize=9)
p.ylabel(r'$\log_{10}(M/[M_\odot])$')
p.xlabel(r'$\log_{10}(age/[yr])$')
#p.ylim((9,12.5))
p.grid()
p.savefig(path_2_im(filename))
p.clf()
return model_hdus
def write_summary(filename):
t0t = time.time()
prihdr, model_hdus = create_hdu_list(filename)
bla = create_figure_add_chi2(filename, model_hdus)
model_hdus_list = bla.tolist()
prihdu = fits.PrimaryHDU(header=prihdr)
model_hdus_list.insert(0,prihdu)
thdulist = fits.HDUList( model_hdus_list )
path_2_out_file = path_2_out(filename)
if os.path.isfile(path_2_out_file ):
os.remove(path_2_out_file )
thdulist.writeto( path_2_out_file )
print time.time()-t0t, "seconds"
for filename in filenames:
write_summary(filename)
|
{"hexsha": "70ac254fe73b4cc60cf93afb21c4beec904fb417", "size": 8527, "ext": "py", "lang": "Python", "max_stars_repo_path": "spm/bin_simulated_spectra/combine_model_spectra_GAMA_SNR.py", "max_stars_repo_name": "AndresSixtos/pyeBOSS", "max_stars_repo_head_hexsha": "4750908c8bc409633bef8f790133e3a1f3f0c9e4", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-05-23T13:03:27.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-23T13:03:27.000Z", "max_issues_repo_path": "spm/bin_simulated_spectra/combine_model_spectra_GAMA_SNR.py", "max_issues_repo_name": "AndresSixtos/pyeBOSS", "max_issues_repo_head_hexsha": "4750908c8bc409633bef8f790133e3a1f3f0c9e4", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spm/bin_simulated_spectra/combine_model_spectra_GAMA_SNR.py", "max_forks_repo_name": "AndresSixtos/pyeBOSS", "max_forks_repo_head_hexsha": "4750908c8bc409633bef8f790133e3a1f3f0c9e4", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-09-26T11:17:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T06:09:18.000Z", "avg_line_length": 47.904494382, "max_line_length": 291, "alphanum_fraction": 0.6386771432, "include": true, "reason": "import numpy,from scipy,import astropy", "num_tokens": 2575}
|
#ifndef _PS_
#define _PS_
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <unistd.h>
#include "../Parameter_files/COSMOLOGY.H"
#include "../Parameter_files/INIT_PARAMS.H"
#include <gsl/gsl_interp.h>
#include <gsl/gsl_spline.h>
#include "cosmo_progs.c"
#include "misc.c"
#include "../Parameter_files/Variables.h"
/* New in v1.1 */
#define ERFC_NPTS (int) 75
#define ERFC_PARAM_DELTA (float) 0.1
static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS];
static gsl_interp_accel *erfc_acc;
static gsl_spline *erfc_spline;
#define NR_END 1
#define FREE_ARG char*
#define MM 7
#define NSTACK 50
#define FUNC(x,y,z,xx,yy) ((*func)(x,y,z,xx,yy))
#define FUNC2(x1,x2,x3,x4,x5,x6) ((*func)(x1,x2,x3,x4,x5,x6))
#define EPS2 3.0e-11
#define SWAP(a,b) tempr=(a);(a)=(b);(b)=tempr
#define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration
#define SPLINE_NPTS (int) 250
#define NGLhigh 100
#define NGLlow 100
#define Nhigh 200
#define Nlow 100
#define NMass 200
static double log_MFspline_table[SPLINE_NPTS], MFspline_params[SPLINE_NPTS];
static double log_MFspline_table_densgtr1[SPLINE_NPTS], MFspline_params_densgtr1[SPLINE_NPTS];
static gsl_interp_accel *MFspline_acc, *MFspline_densgtr1_acc;
static gsl_spline *MF_spline, *MF_spline_densgtr1;
static double Fcoll_spline_params[SPLINE_NPTS], log_Fcoll_spline_table[SPLINE_NPTS];
static gsl_interp_accel *Fcoll_spline_acc;
static gsl_spline *Fcoll_spline;
struct parameters_gsl_int_{
double z_obs;
double Mval;
double M_Feed;
double alpha_pl;
double del_traj_1;
double del_traj_2;
};
struct parameters_gsl_ST_int_{
double z_obs;
double M_Feed;
double alpha_pl;
};
unsigned long *lvector(long nl, long nh);
void free_lvector(unsigned long *v, long nl, long nh);
float *vector(long nl, long nh);
void free_vector(float *v, long nl, long nh);
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]);
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y);
void gauleg(float x1, float x2, float x[], float w[], int n);
double FgtrlnM_general(double lnM, void *params);
double FgtrM_general(float z, float M1, float M_Max, float M2, float MFeedback, float alpha, float delta1, float delta2);
float FgtrConditionalM_second(float z, float M1, float M2, float MFeedback, float alpha, float delta1, float delta2);
float dNdM_conditional_second(float z, float M1, float M2, float delta1, float delta2);
float FgtrConditionallnM(float M1, struct parameters_gsl_int_ parameters_gsl_int);
float GaussLegengreQuad_Fcoll(int n, float z, float M2, float MFeedback, float alpha, float delta1, float delta2);
float *Overdense_spline_gsl,*Overdense_spline_GL_high,*Fcoll_spline_gsl,*Fcoll_spline_GL_high,*xi_low,*xi_high,*wi_high,*wi_low;
float *second_derivs_low_GL,*second_derivs_high_GL,*Overdense_spline_GL_low,*Fcoll_spline_GL_low;
float *Mass_Spline, *Sigma_Spline, *dSigmadm_Spline, *second_derivs_sigma, *second_derivs_dsigma;
void initialiseSplinedSigmaM(float M_Min, float M_Max);
void initialiseGL_Fcoll(int n_low, int n_high, float M_Min, float M_Max);
void initialiseGL_FcollDblPl(int n_low, int n_high, float M_Min, float M_feedback, float M_Max);
void initialiseFcoll_spline(float z, float Mmin, float Mmax, float Mval, float MFeedback, float alphapl);
double dFdlnM_st_PL (double lnM, void *params);
double FgtrM_st_PL(double z, double Mmin, double MFeedback, double alpha_pl);
double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR;
/***** FUNCTION PROTOTYPES *****/
double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */
void free_ps(); /* deallocates the gsl structures from init_ps */
double splined_erfc(double); /* returns erfc for x>=0, using cubic spline in logy-x space */
double deltolindel(float del, float z); /* converts a non-linear overdensity, del, at z to a linear overdensity at z=0 */
double lindeltodel(float lindel, float z); /* converts a linear overdensity, del, at z=0 to a non-linear overdensity at redshift z */
double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double power_in_vcb(double k); /*JBM: Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double RtoM(double); /* R in Mpc, M in Msun */
double MtoR(double); /* R in Mpc, M in Msun */
double M_J_WDM(); /* returns the "effective Jeans mass" corresponding to the gas analog of WDM ; eq. 10 in BHO 2001 */
double sheth_delc(double del, double sig);
double dNdM_st(double z, double M);
double dNdM(double z, double M);
double dnbiasdM(double M, float z, double M_o, float del_o); /* dnbiasdM */
double FgtrM(double z, double M); //calculates the fraction of mass contained in haloes with mass > M at redshift z
double FgtrM_st(double z, double M); //calculates the fraction of mass contained in haloes with mass > M at redshift z, with Sheth-Tormen correction
double FgtrM_bias(double z, double M, double del_bias, double sig_bias); //calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias);/* Uses sigma parameters instead of Mass for scale */
double FgtrM_bias_BL08(double z, double M, double del_bias, double sig_bias); // as above, but this version uses the hybrid perscription of Barkana & Loeb 2004 (specifically the separate integral version of eq. 2 in Barkana & Loeb 2008)
double dicke(double z); //calculates the dicke growth function at redshift z
double ddickedz(double z); /* Redshift derivative of the growth function at z */
double ddickedt(double z); /* Time derivative of the growth function at z */
double sigma_z0(double M); //calculates sigma at z=0 (no dicke)
double dsigmasqdm_z0(double M); //calculates d(sigma^2)/dm at z=0 (i.e. does not include dicke growth)
double TFmdm(double k); //Eisenstien & Hu power spectrum transfer function
void TFset_parameters();
float get_R_c(); // returns R_CUTOFF
double get_M_min_ion(float z);
/***************************************/
/* Returns the minimum source mass for ionizing sources, according to user specifications */
double get_M_min_ion(float z){
double MMIN;
if (ION_M_MIN < 0){ // use the virial temperature for Mmin
if (ION_Tvir_MIN < 9.99999e3) // neutral IGM
MMIN = TtoM(z, ION_Tvir_MIN, 1.22);
else // ionized IGM
MMIN = TtoM(z, ION_Tvir_MIN, 0.6);
}
else if (ION_Tvir_MIN < 0){ // use the mass
MMIN = ION_M_MIN;
}
else{
fprintf(stderr, "You have to \"turn-off\" either the ION_M_MIN or \
the ION_Tvir_MIN option in ANAL_PARAMS.H\nAborting...\n");
return -1;
}
// check for WDM
if (P_CUTOFF && ( MMIN < M_J_WDM()))
MMIN = M_J_WDM();
// printf("Mmin is %e\n", MMIN);
return MMIN;
}
/* Returns the minimum source mass for x-ray sources, according to user specifications */
double get_M_min_xray(float z){
double MMIN;
if (X_RAY_Tvir_MIN < 9.99999e3) //neutral IGM
MMIN = TtoM(z, X_RAY_Tvir_MIN, 1.22);
else // ionized IGM
MMIN = TtoM(z, X_RAY_Tvir_MIN, 0.6);
// check for WDM
if (P_CUTOFF && ( MMIN < M_J_WDM()))
MMIN = M_J_WDM();
// printf("Mmin is %e\n", MMIN);
return MMIN;
}
/* returns the "effective Jeans mass" in Msun
corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */
double M_J_WDM(){
double z_eq, fudge=60;
if (!P_CUTOFF)
return 0;
z_eq = 3600*(OMm-OMb)*hlittle*hlittle/0.15;
return fudge*3.06e8 * (1.5/g_x) * sqrt((OMm-OMb)*hlittle*hlittle/0.15) * pow(M_WDM, -4) * pow(z_eq/3000.0, 1.5);
}
/* converts a non-linear overdensity, del, at z to a linear overdensity at z=0 */
double deltolindel(float del, float z){
float onepdel = 1.0+del;
return ( 1.68647 - 1.35*pow(onepdel,-2/3.0) + 0.78785*pow(onepdel,-0.58661) - 1.12431*pow(onepdel,-0.5) )/dicke(z);
}
/* converts a linear overdensity, del, at z=0 to a non-linear overdensity at redshift z */
double lindeltodel(float lindel, float z){
float prev_lindelguess, delcrit, delguess;
float lindelguess, delmin, delmax, epsilon = 1.0e-7;
// set the critical density corresponding to virialization
// this will be maximum allowed del
delcrit = Deltac_nonlinear(z)*rho_critz(z)/(OMm*RHOcrit*pow(1+z, 3)) - 1;
delmin = -1;
delmax = 500;
prev_lindelguess = -1e10;
while (1){
delguess = 0.5*(delmax+delmin);
lindelguess = deltolindel(delguess, z);
//fprintf(stderr, "%e\t%e\n", delmin, delmax);
// fprintf(stderr, "%e\t%e\t%e\n\n", delguess, lindelguess, lindel);
if ((fabs((lindelguess-lindel)/lindel) < epsilon ) ||
(fabs(lindelguess-lindel) < epsilon ) ||
(fabs(prev_lindelguess - lindelguess) < TINY ))// close enough, or resolution loop
return delguess;
if (lindelguess > lindel)
delmax = delguess;
else
delmin = delguess;
// check if we are above delcrit (see above)
if (delmin > delcrit){
// printf("exced max at lindel=%e\n", lindel);
return delcrit;
}
prev_lindelguess = lindelguess;
}
}
/* R in Mpc, M in Msun */
double RtoM(double R){
// set M according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H
if (FILTER == 0) //top hat M = (4/3) PI <rho> R^3
return (4.0/3.0)*PI*pow(R,3)*(OMm*RHOcrit);
else if (FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3
return pow(2*PI, 1.5) * OMm*RHOcrit * pow(R, 3);
else // filter not defined
fprintf(stderr, "No such filter = %i.\nResults are bogus.\n", FILTER);
return -1;
}
/* R in Mpc, M in Msun */
double MtoR(double M){
// set R according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H
if (FILTER == 0) //top hat M = (4/3) PI <rho> R^3
return pow(3*M/(4*PI*OMm*RHOcrit), 1.0/3.0);
else if (FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3
return pow( M/(pow(2*PI, 1.5) * OMm * RHOcrit), 1.0/3.0 );
else // filter not defined
fprintf(stderr, "No such filter = %i.\nResults are bogus.\n", FILTER);
return -1;
}
/* equation (5) from jenkis et al. (2001) */
double f_jenkins(float del, double sigsq){
if (del < 0){ fprintf(stderr, "ERROR: In function f_jenkins del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n"); return 0; }
// fprintf(stderr, "%f\t%f\n", del, sqrt(sigsq));
return sqrt(2/PI) * del/sqrt(sigsq) * pow(E, -0.5*del*del/sigsq);
}
float get_R_c(){
return R_CUTOFF;
}
/* sheth correction to delta crit */
double sheth_delc(double del, double sig){
return sqrt(SHETH_a)*del*(1 + SHETH_b*pow(sig*sig/(SHETH_a*del*del), SHETH_c));
}
/* dnbiasdM */
double dnbiasdM(double M, float z, double M_o, float del_o){
double sigsq, del, sig_one, sig_o;
if ((M_o-M) < TINY){
fprintf(stderr, "WARNING: In function dnbiasdM: M must be less than M_o!\nAborting...\n");
return -1;
}
del = Deltac/dicke(z) - del_o;
if (del < 0){ fprintf(stderr, "ERROR: In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n"); return 0; }
sig_o = sigma_z0(M_o);
sig_one = sigma_z0(M);
sigsq = sig_one*sig_one - sig_o*sig_o;
return -(RHOcrit*OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M);
}
/*
FUNCTION dNdM(z, M)
Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Sheth, Mo, Torman 2001
*/
double dNdM_st(double z, double M){
double sigma, dsigmadm, nuhat, dicke_growth;
dicke_growth = dicke(z);
sigma = sigma_z0(M) * dicke_growth;
dsigmadm = dsigmasqdm_z0(M) * dicke_growth*dicke_growth/(2.0*sigma);
// sigma = 1.0 * dicke_growth;
// dsigmadm = 1.0 * dicke_growth*dicke_growth/(2.0*sigma);
nuhat = sqrt(SHETH_a) * Deltac / sigma;
return (-OMm*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2/PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0);
}
/*
FUNCTION dNdM(z, M)
Computes the Press_schechter mass function at
redshift z, and dark matter halo mass M (in solar masses).
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Padmanabhan, pg. 214
*/
double dNdM(double z, double M){
double sigma, dsigmadm, dicke_growth;
dicke_growth = dicke(z);
sigma = sigma_z0(M) * dicke_growth;
dsigmadm = dsigmasqdm_z0(M) * (dicke_growth*dicke_growth/(2*sigma));
return (-OMm*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma));
}
/*
FUNCTION FgtrM_st(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Sheth-Torman correction
*/
double dFdlnM_st (double lnM, void *params){
double z = *(double *)params;
double M = exp(lnM);
return dNdM_st(z, M) * M * M;
}
double FgtrM_st(double z, double M){
// printf("Calculating ST coll fraction: M=%.2le, z=%.2le \n",M,z);
double result, error, lower_limit, upper_limit;
gsl_function F;
// double rel_tol = 0.001; //<- relative tolerance
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_st;
F.params = &z;
lower_limit = log(M);
upper_limit = log(FMAX(1e16, M*100));
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS51, w, &result, &error);
gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS41, w, &result, &error);
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS31, w, &result, &error);
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS21, w, &result, &error);
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS15, w, &result, &error);
gsl_integration_workspace_free (w);
return result / (OMm*RHOcrit);
}
/*
FUNCTION FgtrM(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
*/
double FgtrM(double z, double M){
double del, sig;
del = Deltac/dicke(z); //regular spherical collapse delta
sig = sigma_z0(M);
return splined_erfc(del / (sqrt(2)*sig));
}
/*
calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias
*/
double FgtrM_bias(double z, double M, double del_bias, double sig_bias){
double del, sig, sigsmallR;
sigsmallR = sigma_z0(M);
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/* Uses sigma parameters instead of Mass for scale */
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){
double del, sig;
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/*
Calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias.
This version uses the hybrid perscription of Barkana & Loeb 2004 (specifically the separate
integral version of eq. 2 in Barkana & Loeb 2008)
*/
double FgtrM_bias_BL08(double z, double M, double del_bias, double sig_bias){
return FgtrM_st(z, M) / FgtrM(z, M) * FgtrM_bias(z, M, del_bias, sig_bias);
}
/*
FUNCTION dicke(z)
Computes the dicke growth function at redshift z, i.e. the z dependance part of sigma
References: Peebles, "Large-Scale...", pg.53 (eq. 11.16). Includes omega<=1
Nonzero Lambda case from Liddle et al, astro-ph/9512102, eqs. 6-8.
and quintessence case from Wang et al, astro-ph/9804015
Normalized to dicke(z=0)=1
*/
double dicke(double z){
double omegaM_z, dick_z, dick_0, x, x_0;
double tiny = 1e-4;
if (fabs(OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter)
return 1.0/(1.0+z);
}
else if ( (OMl > (-tiny)) && (fabs(OMl+OMm+OMr-1.0) < 0.01) && (fabs(wl+1.0) < tiny) ){
//this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation
//it is taken from liddle et al.
omegaM_z = OMm*pow(1+z,3) / ( OMl + OMm*pow(1+z,3) + OMr*pow(1+z,4) );
dick_z = 2.5*omegaM_z / ( 1.0/70.0 + omegaM_z*(209-omegaM_z)/140.0 + pow(omegaM_z, 4.0/7.0) );
dick_0 = 2.5*OMm / ( 1.0/70.0 + OMm*(209-OMm)/140.0 + pow(OMm, 4.0/7.0) );
return dick_z / (dick_0 * (1.0+z));
}
else if ( (OMtot < (1+tiny)) && (fabs(OMl) < tiny) ){ //open, zero lambda case (peebles, pg. 53)
x_0 = 1.0/(OMm+0.0) - 1.0;
dick_0 = 1 + 3.0/x_0 + 3*log(sqrt(1+x_0)-sqrt(x_0))*sqrt(1+x_0)/pow(x_0,1.5);
x = fabs(1.0/(OMm+0.0) - 1.0) / (1+z);
dick_z = 1 + 3.0/x + 3*log(sqrt(1+x)-sqrt(x))*sqrt(1+x)/pow(x,1.5);
return dick_z/dick_0;
}
else if ( (OMl > (-tiny)) && (fabs(OMtot-1.0) < tiny) && (fabs(wl+1) > tiny) ){
fprintf(stderr, "IN WANG\n");
return -1;
}
fprintf(stderr, "No growth function!!! Output will be fucked up.");
return -1;
}
/* redshift derivative of the growth function at z */
double ddicke_dz(double z){
float dz = 1e-10;
double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz;
return (dicke(z+dz)-dicke(z))/dz;
}
/* Time derivative of the growth function at z */
double ddickedt(double z){
float dz = 1e-10;
double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz;
double tiny = 1e-4;
return (dicke(z+dz)-dicke(z))/dz/dtdz(z); // lazy non-analytic form getting
if (fabs(OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter)
return -pow(1+z,-2)/dtdz(z);
}
else if ( (OMl > (-tiny)) && (fabs(OMl+OMm+OMr-1.0) < 0.01) && (fabs(wl+1.0) < tiny) ){
//this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation
//it is taken from liddle et al.
omegaM_z = OMm*pow(1+z,3) / ( OMl + OMm*pow(1+z,3) + OMr*pow(1+z,4) );
domegaMdz = omegaM_z*3/(1+z) - OMm*pow(1+z,3)*pow(OMl + OMm*pow(1+z,3) + OMr*pow(1+z,4), -2) * (3*OMm*(1+z)*(1+z) + 4*OMr*pow(1+z,3));
dick_0 = OMm / ( 1.0/70.0 + OMm*(209-OMm)/140.0 + pow(OMm, 4.0/7.0) );
ddickdz = (domegaMdz/(1+z)) * (1.0/70.0*pow(omegaM_z,-2) + 1.0/140.0 + 3.0/7.0*pow(omegaM_z, -10.0/3.0)) * pow(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0) , -2);
ddickdz -= pow(1+z,-2)/(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0));
return ddickdz / dick_0 / dtdz(z);
}
fprintf(stderr, "No growth function!!! Output will be fucked up.");
return -1;
}
/*
JBM:
this function reads the z=0 matter (CDM+baryons) transfer function from CLASS
flag = 0 to initialize interpolator, flag = -1 to free memory, flag = else to interpolate.
similar to built-in function "double T_RECFAST(float z, int flag)"
*/
double TFm_CLASS(double k, int flag)
{
static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH];
static gsl_interp_accel *acc_class;
static gsl_spline *spline_class;
float trash, currk, currTm;
double ans;
int i;
FILE *F;
if (flag == 0) {// Initialize vectors and read file
if ( !(F=fopen(CLASS_FILENAME, "r")) ){
fprintf(stderr, "TFm_CLASS: Unable to open file: %s for reading\nAborting\n", CLASS_FILENAME);
return -1;
}
// for (i=(CLASS_LENGTH-1);i>=0;i--) {
for (i=0;i<CLASS_LENGTH;i++) {
fscanf(F, "%e %e %e ", &currk, &currTm, &trash);
kclass[i] = currk;
Tmclass[i] = currTm;// printf("k=%.1le Tm=%.1le \n", currk,currTm);
if(kclass[i]<=kclass[i-1] && i>0){
printf("WARNING, Tk table not ordered \n");
printf("k=%.1le kprev=%.1le \n\n",kclass[i],kclass[i-1]);
}
}
fclose(F);
// Set up spline table
acc_class = gsl_interp_accel_alloc ();
spline_class = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_spline_init(spline_class, kclass, Tmclass, CLASS_LENGTH);
return 0;
}
if (flag == -1) {
gsl_spline_free (spline_class);
gsl_interp_accel_free(acc_class);
return 0;
}
if (k > kclass[CLASS_LENGTH-1]) { // k>kmax
fprintf(stderr, "Called TFm_CLASS with k=%f, larger than kmax!\n", k);
return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]);
//JBM:we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much.
}
else { // Do spline
ans = gsl_spline_eval (spline_class, k, acc_class);
}
return ans/k/k;
//JBM:we have to divide by k^2 to agree with the old-fashioned convention.
}
/*
JBM:
this function reads the z=zkin relative velocity transfer function from CLASS
same caveats as for Tfm_CLASS
*/
double TFvcb_CLASS(double k, int flag)
{
static double kclass_vcb[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH];
static gsl_interp_accel *acc_vcb;
static gsl_spline *spline_vcb;
double trash, currk, currTv;
double ans;
int i;
FILE *F;
if (flag == 0) {
// Initialize vectors
if ( !(F=fopen(CLASS_FILENAME, "r")) ){
fprintf(stderr, "TFvcb_CLASS: Unable to open file: %s for reading\nAborting\n", CLASS_FILENAME);
return -1;
}
// for (i=(CLASS_LENGTH-1);i>=0;i--) {
for (i=0;i<CLASS_LENGTH;i++) {
fscanf(F, "%le %le %le ", &currk, &trash, &currTv);
kclass_vcb[i] = currk;
Tvclass_vcb[i] = currTv;
if(kclass_vcb[i]<=kclass_vcb[i-1] && i>0){
printf("WARNING, T_vcb table not ordered \n");
}
}
fclose(F);
// Set up spline table
acc_vcb = gsl_interp_accel_alloc ();
spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_spline_init(spline_vcb, kclass_vcb, Tvclass_vcb, CLASS_LENGTH);
return 0;
}
if (flag == -1) {
gsl_spline_free (spline_vcb);
gsl_interp_accel_free(acc_vcb);
return 0;
}
if (k > kclass_vcb[CLASS_LENGTH-1]) { // k>kmax
fprintf(stderr, "Called TFvcb_CLASS with k=%f, bailing out!\n", k);
return 0;
}
else { // Do spline
ans = gsl_spline_eval (spline_vcb, k, acc_vcb);
// printf("k=%.3le, T=%.1le \n", k, ans);
}
return ans/k/k;
//JBM:we have to divide by k^2 to agree with the old-fashioned convention.
}
/*
FUNCTION sigma_z0(M)
Returns the standard deviation of the normalized, density excess (delta(x)) field,
smoothed on the comoving scale of M (see filter definitions for M<->R conversion).
The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor,
i.e. sigma(M,z) = sigma_z0(m) * dicke(z)
normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H
NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation
M is in solar masses
References: Padmanabhan, pg. 210, eq. 5.107
*/
double dsigma_dk(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, kR;
// get the power spectrum.. choice of 5:
if (POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, POWER_INDEX) * T * T;
}
else if (POWER_SPECTRUM == 1){ // BBKS
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
q = k / (hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, POWER_INDEX) * T * T;
}
else if (POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(hlittle*gamma);
bb = 3.0/(hlittle*gamma);
cc = 1.7/(hlittle*gamma);
p = pow(k, POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 8.0 / (hlittle*gamma);
bb = 4.7 / pow(hlittle*gamma, 2);
p = pow(k, POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 1.7/(hlittle*gamma);
bb = 9.0/pow(hlittle*gamma, 1.5);
cc = 1.0/pow(hlittle*gamma, 2);
p = pow(k, POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (POWER_SPECTRUM == 5){ // JBM: CLASS
T = TFm_CLASS(k, 1); //read from z=0 output of CLASS
//JBM: flag = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, POWER_INDEX) * T * T;
}
else{
fprintf(stderr, "No such power spectrum defined: %i\nOutput is bogus.\n", POWER_SPECTRUM);
p = 0;
}
// now get the value of the window function
// NOTE: only use top hat for SIGMA8 normalization
kR = k*R;
if ( (FILTER == 0) || (sigma_norm < 0) ){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
}
else if (FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
}
else {
fprintf(stderr, "No such filter: %i\nOutput is bogus.\n", FILTER);
w=0;
}
return k*k*p*w*w;
}
double sigma_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
//OLD: double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
double kstart, kend;
R = MtoR(M);
// printf("sigmaz0 -> R=%.2le, from M=%.2le \n",R, M);
// now lets do the integral for sigma and scale it with sigma_norm
kstart = FMAX(1.0e-99/R,KBOT);
//JBM:we stablish a maximum k of 10^3 Mpc-1, since the CLASS transfer function has a max!
kend = FMIN(350.0/R, KTOP);
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
F.function = &dsigma_dk;
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS41, w, &result, &error);
gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS15, w, &result, &error);
gsl_integration_workspace_free (w);
return sigma_norm * sqrt(result);
}
/*
Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V)
at a given k mode linearly extrapolated to z=0
*/
double power_in_k(double k){
double p, T, gamma, q, aa, bb, cc;
// get the power spectrum.. choice of 5:
if (POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (POWER_SPECTRUM == 1){ // BBKS
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
q = k / (hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, POWER_INDEX) * T * T;
}
else if (POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(hlittle*gamma);
bb = 3.0/(hlittle*gamma);
cc = 1.7/(hlittle*gamma);
p = pow(k, POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 8.0 / (hlittle*gamma);
bb = 4.7 / pow(hlittle*gamma, 2);
p = pow(k, POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 1.7/(hlittle*gamma);
bb = 9.0/pow(hlittle*gamma, 1.5);
cc = 1.0/pow(hlittle*gamma, 2);
p = pow(k, POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (POWER_SPECTRUM == 5){ // JBM: CLASS
T = TFm_CLASS(k, 1); //read from z=0 output of CLASS
//JBM: flag = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, POWER_INDEX) * T * T;
}
else{
fprintf(stderr, "No such power spectrum defined: %i\nOutput is bogus.\n", POWER_SPECTRUM);
p = 0;
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
/*
JBM: Returns the value of the linear power spectrum of the DM-b relative velocity
at kinematic decoupling (which we set at zkin=1010)
*/
double power_in_vcb(double k){
double p, T, gamma, q, aa, bb, cc;
//only works if using CLASS
if (POWER_SPECTRUM == 5){ // CLASS
T = TFvcb_CLASS(k, 1.0); //read from CLASS file. flag=1 since we have initialized before
p = pow(k, POWER_INDEX) * T * T;
}
else{
fprintf(stderr, "Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", POWER_SPECTRUM);
p = 0;
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
/*
FUNCTION dsigmasqdm_z0(M)
returns d/dm (sigma^2) (see function sigma), in units of Msun^-1
*/
double dsigmasq_dm(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR;
// get the power spectrum.. choice of 5:
if (POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (POWER_SPECTRUM == 1){ // BBKS
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
q = k / (hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, POWER_INDEX) * T * T;
}
else if (POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(hlittle*gamma);
bb = 3.0/(hlittle*gamma);
cc = 1.7/(hlittle*gamma);
p = pow(k, POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 8.0 / (hlittle*gamma);
bb = 4.7 / (hlittle*gamma);
p = pow(k, POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = OMm * hlittle * pow(E, -OMb - OMb/OMm);
aa = 1.7/(hlittle*gamma);
bb = 9.0/pow(hlittle*gamma, 1.5);
cc = 1.0/pow(hlittle*gamma, 2);
p = pow(k, POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2);
}
else if (POWER_SPECTRUM == 5){ // JBM: CLASS
T = TFm_CLASS(k, 1); //read from z=0 output of CLASS
//JBM: flag = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, POWER_INDEX) * T * T;
}
else{
fprintf(stderr, "No such power spectrum defined: %i\nOutput is bogus.\n", POWER_SPECTRUM);
p = 0;
}
// now get the value of the window function
kR = k * R;
if (FILTER == 0){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
// now do d(w^2)/dm = 2 w dw/dr dr/dm
if ( (kR) < 1.0e-10 ){ dwdr = 0;}
else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*R);}
//3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );}
// dwdr = -1e8 * k / (R*1e3);
drdm = 1.0 / (4.0*PI * OMm*RHOcrit * R*R);
}
else if (FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
dwdr = - k*kR * w;
drdm = 1.0 / (pow(2*PI, 1.5) * OMm*RHOcrit * 3*R*R);
}
else {
fprintf(stderr, "No such filter: %i\nOutput is bogus.\n", FILTER);
w=0;
}
// printf("%e\t%e\t%e\t%e\t%e\t%e\t%e\n", k, R, p, w, dwdr, drdm, dsigmadk[1]);
// printf("k=%.1e and R=%.1e -> P=%.1e W(KR)=%.1e \n", k, R, p, w);
return k*k*p*2*w*dwdr*drdm * d2fact;
}
double dsigmasqdm_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
//OLD: double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (3000);
double kstart, kend;
R = MtoR(M);
// now lets do the integral for sigma and scale it with sigma_norm
kstart = FMAX(1.0e-99/R,KBOT);
kend = FMIN(350.0/R, KTOP);
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
//OLD: d2fact = M*10000/sigma_z0(M);
d2fact = 1.0;
//JBM:This is an irrelevant scaling to make te integral converge that was in 21cmmc originally, but it can be set to one and it works better.
//printf("dsigma/dm -> R=%.2le, from M=%.2le \n",R, M);
long unsigned int trash;
F.function = &dsigmasq_dm;
// gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
//OLD: gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS15, w, &result, &error);
gsl_integration_qng (&F, lower_limit, upper_limit, 3000, rel_tol, &result, &error, &trash);
gsl_integration_workspace_free (w);
return sigma_norm * sigma_norm * result /d2fact;
}
/*
FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5
*/
double TFmdm(double k){
double q, gamma_eff, q_eff, TF_m, q_nu;
q = k*pow(theta_cmb,2)/omhh;
gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4));
q_eff = q/gamma_eff;
TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff);
TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11)));
q_nu = 3.92*q/sqrt(f_nu/N_nu);
TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) /
(pow(q_nu,-1.6)+pow(q_nu,0.8));
// printf("%f %e %f %f %f %f\n",omhh,f_nu,f_baryon,N_nu,y_d,alpha_nu);
// printf("%f %f %f %f\n", beta_c,sound_horizon,theta_cmb,z_equality);
//printf("%f %e %f %f %f\n\n",q, k, gamma_eff, q_nu, TF_m);
return TF_m;
}
void TFset_parameters(){
double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality;
z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0;
k_equality = 0.0746*omhh/(theta_cmb*theta_cmb);
z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674));
z_drag = 1 + z_drag*pow(OMb*hlittle*hlittle, 0.238*pow(omhh, 0.223));
z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828));
y_d = (1 + z_equality) / (1.0 + z_drag);
R_drag = 31.5 * OMb*hlittle*hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag);
R_equality = 31.5 * OMb*hlittle*hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality);
sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) *
log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) );
p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0;
p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0;
f_c = 1 - f_nu - f_baryon;
f_cb = 1 - f_nu;
f_nub = f_nu+f_baryon;
alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0);
alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3);
alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu;
alpha_nu *= pow(1+y_d, p_c-p_cb);
alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d);
beta_c = 1.0/(1.0-0.949*f_nub);
}
double init_ps(){
double result, error, lower_limit, upper_limit;
gsl_function F;
// double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
double kstart, kend;
int i;
double x;
//JBM: we start the interpolator if using CLASS:
double temp_var;
if (POWER_SPECTRUM == 5){
temp_var = TFm_CLASS(1.0, 0);
temp_var = TFvcb_CLASS(1.0, 0);
}
// Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc
R_CUTOFF = 0.201*pow((OMm-OMb)*hlittle*hlittle/0.15, 0.15)*pow(g_x/1.5, -0.29)*pow(M_WDM, -1.15);
// fprintf(stderr, "For M_DM = %.2e keV, R_CUTOFF is: %.2e comoving Mpc\n", M_WDM, R_CUTOFF);
if (!P_CUTOFF)
// fprintf(stderr, "But you have selected CDM, so this is ignored\n");
omhh = OMm*hlittle*hlittle;
theta_cmb = T_cmb / 2.7;
// Translate Parameters into forms GLOBALVARIABLES form
f_nu = OMn/OMm;
f_baryon = OMb/OMm;
if (f_nu < TINY) f_nu = 1e-10;
if (f_baryon < TINY) f_baryon = 1e-10;
TFset_parameters();
sigma_norm = -1;
R = 8.0/hlittle;
kstart = FMAX(1.0e-99/R, KBOT);
kend = FMIN(350.0/R, KTOP);
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
F.function = &dsigma_dk;
gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
gsl_integration_workspace_free (w);
sigma_norm = SIGMA8/sqrt(result); //takes care of volume factor
/* initialize the lookup table for erfc */
/*
for (i=0; i<=ERFC_NPTS; i++){
erfc_params[i] = i*ERFC_PARAM_DELTA;
log_erfc_table[i] = log(erfcc(erfc_params[i]));
}
// Set up spline table
erfc_acc = gsl_interp_accel_alloc ();
erfc_spline = gsl_spline_alloc (gsl_interp_cspline, ERFC_NPTS);
gsl_spline_init(erfc_spline, erfc_params, log_erfc_table, ERFC_NPTS);
*/
return R_CUTOFF;
}
void free_ps(){
/* gsl_spline_free (erfc_spline);
gsl_interp_accel_free(erfc_acc);
*/
double temp_var;
//JBM: we free the interpolator if using CLASS:
if (POWER_SPECTRUM == 5){
temp_var = TFm_CLASS(1.0, -1);
temp_var = TFvcb_CLASS(1.0, -1);
}
return;
}
double splined_erfc(double x){
if (x < 0){
// fprintf(stderr, "WARNING: Negative value %e passed to splined_erfc. Returning 1\n", x);
return 1;
}
return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c
if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1))
return erfcc(x);
else
return exp(gsl_spline_eval(erfc_spline, x, erfc_acc));
}
float FgtrConditionalM_second(float z, float M1, float M2, float MFeedback, float alpha, float delta1, float delta2) {
return exp(M1)*pow(exp(M1)/MFeedback,alpha)*dNdM_conditional_second(z,M1,M2,delta1,delta2)/sqrt(2.*PI);
}
float dNdM_conditional_second(float z, float M1, float M2, float delta1, float delta2){
float sigma1, sigma2, dsigmadm, dicke_growth,dsigma_val;
M1 = exp(M1);
M2 = exp(M2);
dicke_growth = dicke(z);
splint(Mass_Spline-1,Sigma_Spline-1,second_derivs_sigma-1,(int)NMass,M1,&(sigma1));
splint(Mass_Spline-1,Sigma_Spline-1,second_derivs_sigma-1,(int)NMass,M2,&(sigma2));
sigma1 = sigma1*sigma1;
sigma2 = sigma2*sigma2;
splint(Mass_Spline-1,dSigmadm_Spline-1,second_derivs_dsigma-1,(int)NMass,M1,&(dsigma_val));
dsigmadm = -pow(10.,dsigma_val)/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know!
if((sigma1 > sigma2)) {
return -(( delta1 - delta2 )/dicke_growth)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*dicke_growth*dicke_growth*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5));
}
else if(sigma1==sigma2) {
return -(( delta1 - delta2 )/dicke_growth)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*dicke_growth*dicke_growth*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5));
}
else {
return 0.;
}
}
void gauleg(float x1, float x2, float x[], float w[], int n)
//Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n,
//containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula.
{
int m,j,i;
double z1,z,xm,xl,pp,p3,p2,p1;
m=(n+1)/2;
xm=0.5*(x2+x1);
xl=0.5*(x2-x1);
for (i=1;i<=m;i++) {
//High precision is a good idea for this routine.
//The roots are symmetric in the interval, so we only have to find half of them.
//Loop over the desired roots.
z=cos(3.141592654*(i-0.25)/(n+0.5));
//Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method.
do {
p1=1.0;
p2=0.0;
for (j=1;j<=n;j++) {
//Loop up the recurrence relation to get the Legendre polynomial evaluated at z.
p3=p2;
p2=p1;
p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j;
}
//p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2,
//the polynomial of one lower order.
pp=n*(z*p1-p2)/(z*z-1.0);
z1=z;
z=z1-p1/pp;
} while (fabs(z-z1) > EPS2);
x[i]=xm-xl*z;
x[n+1-i]=xm+xl*z;
w[i]=2.0*xl/((1.0-z*z)*pp*pp);
w[n+1-i]=w[i];
}
}
void nrerror(char error_text[])
{
fprintf(stderr,"Numerical Recipes run-time error...\n");
fprintf(stderr,"%s\n",error_text);
fprintf(stderr,"...now exiting to system...\n");
exit(1);
}
float *vector(long nl, long nh)
/* allocate a float vector with subscript range v[nl..nh] */
{
float *v;
v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
if(!v) nrerror("allocation failure in vector()");
return v - nl + NR_END;
}
void free_vector(float *v, long nl, long nh)
/* free a float vector allocated with vector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[])
/*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with
x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating
function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains
the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or
ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary
condition for a natural spline, with zero second derivative on that boundary.*/
{
int i,k;
float p,qn,sig,un,*u;
int na,nb,check;
u=vector(1,n-1);
if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural"
y2[1]=u[1]=0.0;
else { // or else to have a specified first derivative.
y2[1] = -0.5;
u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1);
}
for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm.
sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary
na = 1;
nb = 1;
check = 0;
while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]);
}
p=sig*y2[i-1]+2.0; //storage of the decomposed
y2[i]=(sig-1.0)/p; // factors.
u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]);
u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p;
if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) {
na = 0;
nb = 0;
check = 0;
while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
}
u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]);
u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p;
}
}
if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural"
qn=un=0.0;
else { //or else to have a specified first derivative.
qn=0.5;
un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1]));
}
y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0);
for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal
y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm.
}
free_vector(u,1,n-1);
}
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y)
/*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order),
and given the array y2a[1..n], which is the output from spline above, and given a value of
x, this routine returns a cubic-spline interpolated value y.*/
{
void nrerror(char error_text[]);
int klo,khi,k;
float h,b,a;
klo=1; // We will find the right place in the table by means of
khi=n; //bisection. This is optimal if sequential calls to this
while (khi-klo > 1) { //routine are at random values of x. If sequential calls
k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better
if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if
else klo=k; //they remain appropriate on the next call.
} // klo and khi now bracket the input value of x.
h=xa[khi]-xa[klo];
if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct.
a=(xa[khi]-x)/h;
b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated.
*y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0;
}
unsigned long *lvector(long nl, long nh)
/* allocate an unsigned long vector with subscript range v[nl..nh] */
{
unsigned long *v;
v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long)));
if(!v) nrerror("allocation failure in lvector()");
return v - nl + NR_END;
}
void free_lvector(unsigned long *v, long nl, long nh)
/* free an unsigned long vector allocated with lvector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
double FgtrlnM_general(double lnM, void *params) {
struct parameters_gsl_int_ vals = *(struct parameters_gsl_int_ *)params;
float z = vals.z_obs;
float M2 = vals.Mval;
float MFeedback = vals.M_Feed;
float alpha = vals.alpha_pl;
float delta1 = vals.del_traj_1;
float delta2 = vals.del_traj_2;
return FgtrConditionalM_second(z,lnM,M2,MFeedback,alpha,delta1,delta2);
}
double FgtrM_general(float z, float M1, float M_Max, float M2, float MFeedback, float alpha, float delta1, float delta2) {
double result, error, lower_limit, upper_limit;
double rel_tol = 0.01;
int size;
size = 1000;
// printf("delta1 = %e Deltac = %e\n",delta1,Deltac);
if((float)delta1==(float)Deltac) {
gsl_function Fx;
gsl_integration_workspace * w = gsl_integration_workspace_alloc (size);
Fx.function = &FgtrlnM_general;
struct parameters_gsl_int_ parameters_gsl_int = {
.z_obs = z,
.Mval = M2,
.M_Feed = MFeedback,
.alpha_pl = alpha,
.del_traj_1 = delta1,
.del_traj_2 = delta2
};
Fx.params = ¶meters_gsl_int;
lower_limit = M1;
upper_limit = M_Max;
// gsl_integration_qag (&Fx, lower_limit, upper_limit, 0, rel_tol, size, GSL_INTEG_GAUSS15, w, &result, &error);
gsl_integration_qag (&Fx, lower_limit, upper_limit, 0, rel_tol, size, GSL_INTEG_GAUSS61, w, &result, &error);
gsl_integration_workspace_free (w);
if(delta2 > delta1) {
return 1.;
}
else {
return result;
}
}
}
float FgtrConditionallnM(float M1, struct parameters_gsl_int_ parameters_gsl_int) {
float z = parameters_gsl_int.z_obs;
float M2 = parameters_gsl_int.Mval;
float MFeedback = parameters_gsl_int.M_Feed;
float alpha = parameters_gsl_int.alpha_pl;
float delta1 = parameters_gsl_int.del_traj_1;
float delta2 = parameters_gsl_int.del_traj_2;
return exp(M1)*pow(exp(M1)/MFeedback,alpha)*dNdM_conditional_second(z,M1,M2,delta1,delta2)/sqrt(2.*PI);
}
float GaussLegengreQuad_Fcoll(int n, float z, float M2, float MFeedback, float alpha, float delta1, float delta2)
{
//Performs the Gauss-Legendre quadrature.
int i;
float integrand,x;
integrand = 0.0;
struct parameters_gsl_int_ parameters_gsl_int = {
.z_obs = z,
.Mval = M2,
.M_Feed = MFeedback,
.alpha_pl = alpha,
.del_traj_1 = delta1,
.del_traj_2 = delta2
};
if(delta2>delta1) {
return 1.;
}
else {
for(i=1;i<(n+1);i++) {
x = xi_low[i];
integrand += wi_low[i]*FgtrConditionallnM(x,parameters_gsl_int);
}
return integrand;
}
}
/*
FUNCTION FgtrM_st(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Sheth-Torman correction
*/
double dFdlnM_st_PL (double lnM, void *params){
struct parameters_gsl_ST_int_ vals = *(struct parameters_gsl_ST_int_ *)params;
double M = exp(lnM);
float z = vals.z_obs;
float MFeedback = vals.M_Feed;
float alpha = vals.alpha_pl;
return dNdM_st(z, M) * M * M * pow((M/MFeedback),alpha);
}
double FgtrM_st_PL(double z, double Mmin, double MFeedback, double alpha_pl){
double result_lower, result_upper, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w_lower
= gsl_integration_workspace_alloc (1000);
gsl_integration_workspace * w_upper
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_ST_int_ parameters_gsl_ST_lower = {
.z_obs = z,
.M_Feed = MFeedback,
.alpha_pl = alpha_pl,
};
F.function = &dFdlnM_st_PL;
F.params = ¶meters_gsl_ST_lower;
lower_limit = log(Mmin);
upper_limit = log(1e16);
gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w_lower, &result_lower, &error);
gsl_integration_workspace_free (w_lower);
return (result_lower) / (OMm*RHOcrit);
}
void initialiseSplinedSigmaM(float M_Min, float M_Max)
{
int i;
float Mass;
Mass_Spline = calloc(NMass,sizeof(float));
Sigma_Spline = calloc(NMass,sizeof(float));
dSigmadm_Spline = calloc(NMass,sizeof(float));
second_derivs_sigma = calloc(NMass,sizeof(float));
second_derivs_dsigma = calloc(NMass,sizeof(float));
printf("Initializing mass spline \n");
for(i=0;i<NMass;i++) {
Mass_Spline[i] = pow(10., log10(M_Min) + (float)i/(NMass-1)*( log10(M_Max) - log10(M_Min) ) );
Sigma_Spline[i] = sigma_z0(Mass_Spline[i]);
dSigmadm_Spline[i] = log10(-dsigmasqdm_z0(Mass_Spline[i]));
}
spline(Mass_Spline-1,Sigma_Spline-1,NMass,0,0,second_derivs_sigma-1);
spline(Mass_Spline-1,dSigmadm_Spline-1,NMass,0,0,second_derivs_dsigma-1);
}
void initialiseGL_Fcoll(int n_low, int n_high, float M_Min, float M_Max)
{
//calculates the weightings and the positions for Gauss-Legendre quadrature.
gauleg(log(M_Min),log(M_Max),xi_low,wi_low,n_low);
gauleg(log(M_Min),log(M_Max),xi_high,wi_high,n_high);
}
void initialiseFcoll_spline(float z, float Mmin, float Mmax, float Mval, float MFeedback, float alphapl)
{
double overdense_val,overdense_small_low,overdense_small_high,overdense_large_low,overdense_large_high;
int i;
overdense_large_high = Deltac;
overdense_large_low = 1.5;
overdense_small_high = 1.5;
overdense_small_low = -1. + 9.e-8;
Fcoll_spline_acc = gsl_interp_accel_alloc ();
Fcoll_spline = gsl_spline_alloc (gsl_interp_cspline, SPLINE_NPTS);
for (i=0;i<SPLINE_NPTS;i++){
overdense_val = log10(1.+overdense_small_low) + (float)i/(SPLINE_NPTS-1.)*(log10(1.+overdense_small_high) - log10(1.+overdense_small_low));
log_Fcoll_spline_table[i] = log10(GaussLegengreQuad_Fcoll(NGLlow,z,log(Mval),MFeedback,alphapl,Deltac,pow(10.,overdense_val)-1.));
Fcoll_spline_params[i] = overdense_val;
if(log_Fcoll_spline_table[i]<-40.) {
log_Fcoll_spline_table[i] = -40.;
}
}
gsl_spline_init(Fcoll_spline, Fcoll_spline_params, log_Fcoll_spline_table, SPLINE_NPTS);
for(i=0;i<Nhigh;i++) {
Overdense_spline_GL_high[i] = overdense_large_low + (float)i/((float)Nhigh-1.)*(overdense_large_high - overdense_large_low);
Fcoll_spline_GL_high[i] = FgtrM_general(z,log(Mmin),log(Mmax),log(Mval),MFeedback,alphapl,Deltac,Overdense_spline_GL_high[i]);
if(Fcoll_spline_GL_high[i]<0.) {
Fcoll_spline_GL_high[i]=pow(10.,-40.0);
}
}
spline(Overdense_spline_GL_high-1,Fcoll_spline_GL_high-1,Nhigh,0,0,second_derivs_high_GL-1);
}
void FcollSpline(float Overdensity, float *splined_value)
{
int i;
float returned_value;
if(Overdensity<1.5) {
if(Overdensity<-1.) {
returned_value = 0;
}
else {
returned_value = gsl_spline_eval(Fcoll_spline, log10(Overdensity+1.), Fcoll_spline_acc);
returned_value = pow(10.,returned_value);
}
}
else {
if(Overdensity<Deltac) {
splint(Overdense_spline_GL_high-1,Fcoll_spline_GL_high-1,second_derivs_high_GL-1,(int)Nhigh,Overdensity,&(returned_value));
}
else {
returned_value = 1.;
}
}
*splined_value = returned_value;
}
#endif
|
{"hexsha": "38bb9b2f8a04e9bc4060d300282b7b180a9060e2", "size": 55391, "ext": "c", "lang": "C", "max_stars_repo_path": "public_21CMvFAST_MC/Cosmo_c_files/ps.c", "max_stars_repo_name": "JulianBMunoz/21cmvFAST", "max_stars_repo_head_hexsha": "14c053c301a7f10081071e815281f9c3879efa6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2019-04-18T11:31:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-15T03:29:14.000Z", "max_issues_repo_path": "public_21CMvFAST_MC/Cosmo_c_files/ps.c", "max_issues_repo_name": "JulianBMunoz/21cmvFAST", "max_issues_repo_head_hexsha": "14c053c301a7f10081071e815281f9c3879efa6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-12-17T05:27:25.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-18T19:59:55.000Z", "max_forks_repo_path": "public_21CMvFAST_MC/Cosmo_c_files/ps.c", "max_forks_repo_name": "JulianBMunoz/21cmvFAST", "max_forks_repo_head_hexsha": "14c053c301a7f10081071e815281f9c3879efa6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-11-14T13:54:46.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-08T17:16:53.000Z", "avg_line_length": 34.8371069182, "max_line_length": 238, "alphanum_fraction": 0.6357531007, "num_tokens": 18928}
|
from lcc.utils.data_analysis import to_PAA, normalize
import numpy as np
class SAX(object):
"""
This class manages symbolic representation of data series via
Symbolic Aggregate approXimation method. It translates
series of data to a words, which can then be compared with other
such words in symbolic distance space.
Attributes
-----------
word_size : int
Number of letters in transformed word
alphabet_size : int
Size of alphabet counted from A (3 means A, B, C)
scaling_factor : int, float
Scaling factor can be used to scale result dissimilarity of
two words created from light curves of different lengths
beta : list
Breakpoints for given alphabets size
"""
MIN_ALPH_SIZE = 3
MAX_ALPH_SIZE = 20
A_OFFSET = ord('a')
def __init__(self, word_size=8, alphabet_size=10, scaling_factor=1):
"""
Parameters
-----------
word_size : int
Number of letters in transformed word
alphabet_size : int
Size of alphabet counted from A (3 means A, B, C)
scaling_factor : int, float
Scaling factor can be used to scale result dissimilarity of
two words created from light curves of different lengths
"""
if (alphabet_size < self.MIN_ALPH_SIZE or
alphabet_size > self.MAX_ALPH_SIZE):
raise DictionarySizeIsNotSupported("%i " % alphabet_size)
self.word_size = word_size
self.alphabet_size = alphabet_size
self.beta = self._getBreakpoints()[str(int(self.alphabet_size))]
self.build_letter_compare_dict()
self.scaling_factor = scaling_factor
def to_letter_rep(self, x):
"""
Function takes a series of data, x, and transforms it
to a string representation.
Parameters
----------
x : list, iterable
Data series
Returns
-------
str
SAX word
list
Indices
"""
paaX, indices = to_PAA(normalize(x), self.word_size)
self.scaling_factor = np.sqrt(len(x) / self.word_size)
return self.alphabetize(paaX), indices
def alphabetize(self, paaX):
"""
Converts the Piecewise Aggregate Approximation of x
to a series of letters.
Parameters
---------
paaX : list, iterable
Data series (list of numbers)
Returns
-------
str
SAX word
"""
alphabetizedX = ''
for i in range(0, len(paaX)):
letterFound = False
for j in range(0, len(self.beta)):
if paaX[i] < self.beta[j]:
alphabetizedX += chr(self.A_OFFSET + j)
letterFound = True
break
if not letterFound:
alphabetizedX += chr(self.A_OFFSET + len(self.beta))
return alphabetizedX
def compare_strings(self, sA, sB):
"""
Compares two strings based on individual letter distances.
Parameters
----------
sA : str
Word to compare
aB : str
Word to compare
Returns
-------
float
Dissimilarity of two words
"""
if len(sA) != len(sB):
raise Exception("StringsAreDifferentLength")
list_letters_a = [x for x in sA]
list_letters_b = [x for x in sB]
mindist = 0.0
for i in range(0, len(list_letters_a)):
mindist += self.compare_letters(
list_letters_a[i], list_letters_b[i])**2
mindist = self.scaling_factor * np.sqrt(mindist)
return mindist
def compare_letters(self, la, lb):
"""
Compare two letters based on letter distance return distance between
Parameters
---------
la : str
First letter
lb : str
Second letter
Returns
-------
float
Distance between two letters
"""
return self.compare_dict[la + lb]
def build_letter_compare_dict(self):
"""
Builds up the lookup table to determine numeric distance
between two letters given an alphabet size.
Returns
-------
None
"""
number_rep = list(range(0, int(self.alphabet_size)))
letters = [chr(x + self.A_OFFSET) for x in number_rep]
self.compare_dict = {}
for i in range(0, len(letters)):
for j in range(0, len(letters)):
if np.abs(number_rep[i] - number_rep[j]) <= 1:
self.compare_dict[letters[i] + letters[j]] = 0
else:
high_num = np.max([number_rep[i], number_rep[j]]) - 1
low_num = np.min([number_rep[i], number_rep[j]])
self.compare_dict[
letters[i] + letters[j]] = self.beta[high_num] - self.beta[low_num]
def _sliding_window(self, x, window_size, overlapping_fraction=None):
"""
Parameters
----------
x : list, iterable
"""
self.windowSize = window_size
if not overlapping_fraction:
overlapping_fraction = 0.01
overlap = self.windowSize * overlapping_fraction
move_size = int(self.windowSize - overlap)
if move_size < 1:
raise OverlapSpecifiedIsNotSmallerThanWindowSize
move_size = 5
ptr = 0
n = len(x)
window_indices = []
string_rep = []
while ptr < n - self.windowSize + 1:
this_sub_range = x[ptr:ptr + self.windowSize]
this_string_rep, _ = self.to_letter_rep(this_sub_range)
string_rep.append(this_string_rep)
window_indices.append((ptr, ptr + self.windowSize))
ptr += move_size
return string_rep, window_indices
def _getBreakpoints(self):
return {'3': [-0.43, 0.43],
'4': [-0.67, 0, 0.67],
'5': [-0.84, -0.25, 0.25, 0.84],
'6': [-0.97, -0.43, 0, 0.43, 0.97],
'7': [-1.07, -0.57, -0.18, 0.18, 0.57, 1.07],
'8': [-1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15],
'9': [-1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22],
'10': [-1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28],
'11': [-1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91,
1.34],
'12': [-1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67,
0.97, 1.38],
'13': [-1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5,
0.74, 1.02, 1.43],
'14': [-1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37,
0.57, 0.79, 1.07, 1.47],
'15': [-1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08,
0.25, 0.43, 0.62, 0.84, 1.11, 1.5],
'16': [-1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0,
0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53],
'17': [-1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07,
0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56],
'18': [-1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14,
0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59],
'19': [-1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2,
-0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62],
'20': [-1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25,
-0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04,
1.28, 1.64]
}
class DictionarySizeIsNotSupported(ValueError):
pass
class OverlapSpecifiedIsNotSmallerThanWindowSize(ValueError):
pass
|
{"hexsha": "6a23fa50d5b12cccbb528df83af52255473f4508", "size": 8047, "ext": "py", "lang": "Python", "max_stars_repo_path": "lcc/stars_processing/utilities/sax.py", "max_stars_repo_name": "mavrix93/LightCurvesClassifier", "max_stars_repo_head_hexsha": "a0a51f033cb8adf45296913f0de0aa2568e0530c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-06-07T12:16:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-28T10:06:44.000Z", "max_issues_repo_path": "lcc/stars_processing/utilities/sax.py", "max_issues_repo_name": "mavrix93/LightCurvesClassifier", "max_issues_repo_head_hexsha": "a0a51f033cb8adf45296913f0de0aa2568e0530c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-03-10T21:52:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T15:02:35.000Z", "max_forks_repo_path": "lcc/stars_processing/utilities/sax.py", "max_forks_repo_name": "mavrix93/LightCurvesClassifier", "max_forks_repo_head_hexsha": "a0a51f033cb8adf45296913f0de0aa2568e0530c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-05-21T16:20:24.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-10T11:06:42.000Z", "avg_line_length": 33.2520661157, "max_line_length": 91, "alphanum_fraction": 0.5025475332, "include": true, "reason": "import numpy", "num_tokens": 2356}
|
(*
Copyright 2014 Cornell University
Copyright 2015 Cornell University
Copyright 2016 Cornell University
Copyright 2017 Cornell University
This file is part of VPrl (the Verified Nuprl project).
VPrl is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VPrl is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VPrl. If not, see <http://www.gnu.org/licenses/>.
Websites: http://nuprl.org/html/verification/
http://nuprl.org/html/Nuprl2Coq
https://github.com/vrahli/NuprlInCoq
Authors: Vincent Rahli
*)
Require Export bar_induction_cterm.
Require Export subst_tacs.
Require Export per_props_equality.
Require Export lsubstc_vars.
(**
Bar induction, where
X is the proposition
B is the bar
ext(s,n,t) = \m. if m=n then t else s m
<<
H |- squash (X 0 (norm c 0))
By bar_induction B i a s x m n t
H, n:nat, s: nat_n -> CNTerm |- B n s in Type(i) // B is a well-formed predicate on finite sequences
H, s: nat -> CNTerm |- squash(exists n:nat. B n s) // B is a bar
H, n:nat, s: nat_n -> CNTerm, m: B n s |- X n s // Base case: the conclusion is true at the bar
H, n:nat, s: nat_n -> CNTerm, x: (forall m: CNTerm. X (n + 1) (ext(s,n,m))) |- X n s // induction case
>>
*)
Definition rule_bar_induction_nout {o}
(f X c B e : @NTerm o)
(s n m v x : NVar)
(i : nat)
(H : barehypotheses) :=
mk_rule
(mk_bseq H (mk_conclax (mk_squash (mk_apply2 X mk_zero (mk_seq2kseq c (mk_nat 0) v)))))
[ mk_bseq (snoc (snoc H (mk_hyp n mk_tnat))
(mk_hyp s (mk_natk2nout (mk_var n))))
(mk_conclax (mk_member (mk_apply2 B (mk_var n) (mk_var s)) (mk_uni i))),
mk_bseq (snoc H (mk_hyp s mk_nat2nout))
(mk_conclax (mk_squash
(mk_exists mk_tnat
n
(mk_apply2 B (mk_var n) (mk_var s))))),
mk_bseq (snoc (snoc (snoc H (mk_hyp n mk_tnat))
(mk_hyp s (mk_natk2nout (mk_var n))))
(mk_hyp m (mk_apply2 B (mk_var n) (mk_var s))))
(mk_concl (mk_apply2 X (mk_var n) (mk_var s)) e),
mk_bseq (snoc (snoc (snoc H (mk_hyp n mk_tnat))
(mk_hyp s (mk_natk2nout (mk_var n))))
(mk_hyp x (mk_all
mk_nout
m
(mk_squash (mk_apply2 X (mk_plus1 (mk_var n)) (mk_update_seq (mk_var s) (mk_var n) (mk_var m) v))))))
(mk_conclax (mk_apply2 X (mk_var n) (mk_var s)))
]
[].
Lemma rule_bar_induction_nout_true {o} :
forall lib (f X c B e : @NTerm o)
(s n m v x : NVar)
(i : nat)
(H : @barehypotheses o)
(dxv : x <> v)
(dsv : s <> v)
(dnv : n <> v)
(dnv : m <> v)
(dnm : n <> m)
(dsm : s <> m)
(nvc : !LIn v (free_vars c))
(nnB : !LIn n (free_vars B))
(nsB : !LIn s (free_vars B)),
rule_true lib (rule_bar_induction_nout f X c B e s n m v x i H).
Proof.
unfold rule_bar_induction_nout, rule_true, closed_type_baresequent, closed_extract_baresequent; simpl.
intros.
clear cargs.
(* We prove the well-formedness of things *)
destseq; allsimpl.
dLin_hyp.
destruct Hyp as [wf1 hyp_wfd].
destruct Hyp0 as [wf2 hyp_bar].
destruct Hyp1 as [wf3 hyp_imp].
destruct Hyp2 as [wf4 hyp_ind].
destseq; allsimpl; proof_irr; GC.
unfold closed_extract; simpl.
exists (@covered_axiom o (nh_vars_hyps H)).
(* We prove some simple facts on our sequents *)
assert (s <> n
# s <> x
# n <> x
# !LIn x (free_vars c)
# !LIn s (free_vars c)
# !LIn n (free_vars c)
# !LIn x (free_vars X)
# !LIn s (free_vars X)
# !LIn n (free_vars X)
# !LIn m (free_vars X)
# !LIn x (vars_hyps H)
# !LIn s (vars_hyps H)
# !LIn n (vars_hyps H)) as vhyps.
{ clear hyp_wfd hyp_bar hyp_ind hyp_imp.
dwfseq.
assert (forall x : NVar, LIn x (free_vars c) -> x <> v -> LIn x (vars_hyps H)) as imp.
{ introv h1 h2.
apply cg.
repeat (first [rw remove_nvars_cons_r|rw remove_nvars_app_r]).
allrw memvar_singleton.
allrw <- beq_var_refl.
allrw remove_nvars_nil_r; allrw app_nil_r.
rw in_remove_nvars; rw in_single_iff; sp. }
sp; GC;
try (complete (discover; allapply @subset_hs_vars_hyps; sp)). }
destruct vhyps as [ nsn vhyps ].
destruct vhyps as [ nsx vhyps ].
destruct vhyps as [ nnx vhyps ].
destruct vhyps as [ nxc vhyps ].
destruct vhyps as [ nsc vhyps ].
destruct vhyps as [ nnc vhyps ].
destruct vhyps as [ nxX vhyps ].
destruct vhyps as [ nsX vhyps ].
destruct vhyps as [ nnX vhyps ].
destruct vhyps as [ nmX vhyps ].
destruct vhyps as [ nxH vhyps ].
destruct vhyps as [ nsH nnH ].
(* done with proving these simple facts *)
vr_seq_true.
lsubst_tac.
pose proof (lsubstc_mk_seq2kseq c 0 v w3 s1 c3) as sc1.
repeat (autodimp sc1 hyp).
exrepnd.
rw sc1.
pose proof (lsubstc_mk_seq2kseq c 0 v w3 s2 c7) as sc2.
autodimp sc2 hyp.
exrepnd.
rw sc2.
clear sc1 sc2.
clear_irr.
clear_wf_hyps.
rw @tequality_mkc_squash.
rw @member_mkc_squash.
assert (!LIn n (dom_csub s1)) as nns1.
{ apply similarity_dom in sim; repnd.
rw sim0; auto. }
assert (!LIn n (dom_csub s2)) as nns2.
{ apply similarity_dom in sim; repnd.
rw sim; auto. }
assert (!LIn s (dom_csub s1)) as nss1.
{ apply similarity_dom in sim; repnd.
rw sim0; auto. }
assert (!LIn s (dom_csub s2)) as nss2.
{ apply similarity_dom in sim; repnd.
rw sim; auto. }
assert (!LIn x (dom_csub s1)) as nxs1.
{ apply similarity_dom in sim; repnd.
rw sim0; auto. }
assert (!LIn x (dom_csub s2)) as nxs2.
{ apply similarity_dom in sim; repnd.
rw sim; auto. }
assert (wf_term B) as wB.
{ clear hyp_wfd.
allrw @wf_member_iff2.
allrw <- @wf_apply2_iff; sp.
}
assert (cover_vars B s1 # cover_vars B s2) as cB.
{ clear hyp_wfd.
allrw @covered_member.
allrw @covered_apply2; repnd.
allrw @vars_hyps_snoc; allsimpl.
apply covered_snoc_implies in ct6; auto.
apply covered_snoc_implies in ct6; auto.
dands.
- eapply s_cover_typ1;[exact ct6|exact sim].
- eapply s_cover_typ1;[exact ct6|].
apply similarity_sym in sim;[exact sim|]; auto.
}
destruct cB as [cB1 cB2].
assert (forall k seq1 seq2 s1a s2a cB1 cB2,
similarity lib s1a s2a H
-> hyps_functionality lib s1a H
-> eq_kseq_nout lib seq1 seq2 k
-> tequality
lib
(mkc_apply2 (lsubstc B wB s1a cB1) (mkc_nat k) seq1)
(mkc_apply2 (lsubstc B wB s2a cB2) (mkc_nat k) seq2)) as Bfunc.
{ introv sim0 hf0 eqk.
vr_seq_true in hyp_wfd.
pose proof (hyp_wfd
(snoc (snoc s1a (n,mkc_nat k)) (s,seq1))
(snoc (snoc s2a (n,mkc_nat k)) (s,seq2)))
as h; clear hyp_wfd.
repeat (autodimp h hyp).
{ apply hyps_functionality_snoc2; simpl; auto.
{ introv equ' sim'.
apply similarity_snoc in sim'; simpl in sim'.
exrepnd; subst; ginv; inj.
eapply tequality_respects_alphaeqc_left;
[apply alphaeqc_sym; apply lsubstc_mk_natk2nout_sp2; auto;
apply similarity_dom in sim'3; repnd; rw sim'0; auto
|].
eapply tequality_respects_alphaeqc_right;
[apply alphaeqc_sym; apply lsubstc_mk_natk2nout_sp2; auto;
apply similarity_dom in sim'3; repnd; rw sim'3; auto
|].
allrw @lsubstc_mkc_tnat.
apply equality_int_nat_implies_cequivc in sim'1.
eapply tequality_respects_cequivc_right;
[apply implies_cequivc_natk2nout; exact sim'1|].
eauto 3 with slow.
}
apply hyps_functionality_snoc2; simpl; auto.
introv equ' sim'.
allrw @lsubstc_mkc_tnat.
apply tnat_type.
}
{ assert (@wf_term o (mk_natk2nout (mk_var n))) as wfn.
{ apply wf_term_mk_natk2nout; auto. }
assert (cover_vars (mk_natk2nout (mk_var n)) (snoc s1a (n,mkc_nat k))) as cvn.
{ apply cover_vars_mk_natk2nout.
apply cover_vars_var.
rw @dom_csub_snoc.
rw in_snoc; simpl; sp. }
sim_snoc.
dands; auto.
{ pose proof (cover_vars_mk_tnat s1a) as cvs1.
pose proof (@wf_tnat o) as wftn.
sim_snoc.
dands; auto.
allrw @lsubstc_mkc_tnat.
apply equality_in_tnat_nat.
}
eapply alphaeqc_preserving_equality;
[|apply alphaeqc_sym;
apply lsubstc_mk_natk2nout_sp2; auto];
auto.
apply similarity_dom in sim0; repnd.
rw sim1; auto.
}
exrepnd.
lsubst_tac.
apply tequality_in_uni_implies_tequality in h0; auto.
apply member_if_inhabited in h1.
auto.
}
pose proof (bar_induction_cterm_meta
lib
(fun_sim_eq lib s1 H B wB)
(fun_sim_eq lib s1 H X w0)
(lsubstc B wB s1 cB1)
(lsubstc X w0 s1 c0)
(lsubstc c wt s1 ct3)
v)
as bi.
repeat (autodimp bi hyp);
[idtac
|idtac
|idtac
|pose proof (bi (lsubstc X w0 s2 c5) (seq2kseq (lsubstc c wt s2 ct4) 0 v)) as h;
allrw <- @mkc_zero_eq;
repeat (autodimp h hyp);[apply eq_kseq_nout_seq2kseq_0|idtac|repnd; dands; complete auto];
exists s2 c5;
dands; complete auto].
- intros seq1 iss.
vr_seq_true in hyp_bar.
pose proof (hyp_bar
(snoc s1 (s,seq1))
(snoc s1 (s,seq1)))
as hf; clear hyp_bar.
repeat (autodimp hf hyp).
{ apply hyps_functionality_snoc2; simpl; auto.
introv equ' sim'.
allrw @lsubstc_mk_nat2nout.
apply type_nat2nout.
}
{ assert (@wf_term o mk_nat2nout) as wfn.
{ apply wf_term_mk_nat2nout; auto. }
assert (cover_vars mk_nat2nout s1) as cvn.
{ apply cover_vars_mk_nat2nout. }
sim_snoc.
dands; auto.
{ eapply similarity_refl; eauto. }
allrw @lsubstc_mk_nat2nout.
auto.
}
exrepnd.
clear hf0.
lsubst_tac.
apply equality_in_mkc_squash in hf1; exrepnd.
clear hf0 hf2.
allunfold @mk_exists.
lsubst_tac.
allrw @lsubstc_mkc_tnat.
apply inhabited_product in hf1; exrepnd.
clear hf2.
apply member_tnat_implies_computes in hf1; exrepnd.
exists k.
introv eqs fse.
unfold fun_sim_eq in fse; exrepnd; subst.
repeat substc_lsubstc_vars3.
lsubst_tac.
clear_wf_hyps.
proof_irr.
pose proof (Bfunc k seq1 (seq2kseq seq1 k v) s1 s1 cB1 cB1) as h.
repeat (autodimp h hyp); eauto 3 with slow.
{ eapply similarity_refl; eauto. }
eapply inhabited_type_cequivc in hf3;
[|apply implies_cequivc_apply2;
[apply cequivc_refl
|apply computes_to_valc_implies_cequivc;eauto
|apply cequivc_refl]
].
eapply inhabited_type_tequality in hf3;[|eauto].
dands; auto.
- intros k seq1 iss sb C seq2 eqs fse.
clear iss.
unfold fun_sim_eq in fse; exrepnd; subst.
unfold nout_on_seq in sb.
rename fse0 into sim0.
assert (cover_vars B s0) as cB0.
{ eapply similarity_cover_vars;[exact sim0|]; auto. }
pose proof (sb (lsubstc B wB s0 cB0) seq2) as h; clear sb.
repeat (autodimp h hyp).
{ exists s0 cB0; dands; auto. }
repnd.
unfold inhabited_type in h0; exrepnd.
rename h1 into mem.
rename h into teq.
vr_seq_true in hyp_imp.
pose proof (hyp_imp
(snoc (snoc (snoc s1 (n,mkc_nat k)) (s,seq1)) (m,t))
(snoc (snoc (snoc s0 (n,mkc_nat k)) (s,seq2)) (m,t)))
as hf.
repeat (autodimp hf hyp).
{ apply hyps_functionality_snoc2; simpl; auto.
{ introv equ' sim'.
apply similarity_snoc in sim'; simpl in sim'.
exrepnd; subst; ginv; inj.
apply similarity_snoc in sim'3; simpl in sim'3.
exrepnd; subst; ginv; inj.
lsubst_tac.
allrw @lsubstc_mkc_tnat.
apply equality_int_nat_implies_cequivc in sim'2.
eapply alphaeqc_preserving_equality in sim'1;
[|apply lsubstc_mk_natk2nout_sp2; auto].
eapply tequality_respects_cequivc_right;
[apply implies_cequivc_apply2;
[apply cequivc_refl
|exact sim'2
|apply cequivc_refl]
|].
auto.
}
apply hyps_functionality_snoc2; simpl; auto.
{ introv equ' sim'.
apply similarity_snoc in sim'; simpl in sim'.
exrepnd; subst; ginv; cpx.
assert (!LIn n (dom_csub s2a)) as nns2a.
{ apply similarity_dom in sim'3; repnd.
rw sim'3; auto. }
eapply tequality_respects_alphaeqc_left;
[apply alphaeqc_sym;
apply lsubstc_mk_natk2nout_sp2; auto
|].
eapply tequality_respects_alphaeqc_right;
[apply alphaeqc_sym;
apply lsubstc_mk_natk2nout_sp2; auto
|].
rw @lsubstc_mkc_tnat in sim'1.
apply equality_int_nat_implies_cequivc in sim'1.
eapply tequality_respects_cequivc_right;
[apply implies_cequivc_natk2nout; exact sim'1|].
eauto 3 with slow.
}
apply hyps_functionality_snoc2; simpl; auto.
introv equ' sim'.
allrw @lsubstc_mkc_tnat.
apply tnat_type.
}
{ assert (wf_term (mk_apply2 B (mk_var n) (mk_var s))) as wfn.
{ apply wf_apply2; eauto 3 with slow. }
assert (cover_vars (mk_apply2 B (mk_var n) (mk_var s)) (snoc (snoc s1 (n,mkc_nat k)) (s,seq1))) as cvn.
{ apply cover_vars_apply2.
repeat (rw @cover_vars_var_iff).
repeat (rw @dom_csub_snoc); simpl.
repeat (rw in_snoc).
dands; tcsp.
repeat (apply cover_vars_snoc_weak); auto. }
sim_snoc.
dands; auto.
{ assert (@wf_term o (mk_natk2nout (mk_var n))) as wfk.
{ apply wf_term_mk_natk2nout; auto. }
assert (cover_vars (mk_natk2nout (mk_var n)) (snoc s1 (n,mkc_nat k))) as cvk.
{ apply cover_vars_mk_natk2nout.
apply cover_vars_var_iff.
repeat (rw @dom_csub_snoc); simpl.
repeat (rw in_snoc); sp. }
sim_snoc.
dands; auto.
{ assert (@wf_term o mk_tnat) as wft.
{ eauto 3 with slow. }
assert (cover_vars mk_tnat s1) as cvt.
{ apply cover_vars_mk_tnat. }
sim_snoc.
dands; auto.
allrw @lsubstc_mkc_tnat.
eauto 3 with slow.
}
eapply alphaeqc_preserving_equality;
[|apply alphaeqc_sym; apply lsubstc_mk_natk2nout_sp2; auto].
auto.
}
{ lsubst_tac; auto. }
}
exrepnd.
lsubst_tac.
apply inhabited_type_if_equality in hf1.
auto.
- intros k seq1 iss ind C seq2 eqs fse.
clear iss.
unfold fun_sim_eq in fse; exrepnd; subst.
vr_seq_true in hyp_ind.
pose proof (hyp_ind
(snoc (snoc (snoc s1 (n,mkc_nat k)) (s,seq1)) (x,lam_axiom))
(snoc (snoc (snoc s0 (n,mkc_nat k)) (s,seq2)) (x,lam_axiom)))
as hf; clear hyp_ind.
repeat (autodimp hf hyp).
{ apply hyps_functionality_snoc2; simpl; auto.
{ introv equ' sim'.
apply similarity_snoc in sim'; simpl in sim'.
exrepnd; subst; ginv; inj.
apply similarity_snoc in sim'3; simpl in sim'3.
exrepnd; subst; ginv; inj.
allunfold @mk_all.
lsubst_tac.
allrw @lsubstc_mkc_tnat.
allrw @lsubstc_mk_nout.
apply equality_int_nat_implies_cequivc in sim'2.
eapply alphaeqc_preserving_equality in sim'1;
[|apply lsubstc_mk_natk2nout_sp2; auto].
apply tequality_function; dands.
{ apply type_mkc_nout. }
introv en.
repeat substc_lsubstc_vars3.
lsubst_tac.
apply equality_in_nout in en.
exrepnd; spcast.
apply tequality_mkc_squash.
eapply tequality_respects_cequivc_left;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_lsubstc_mk_plus1_sp1;auto
|apply cequivc_lsubstc_mk_update_seq_sp0;auto]
|].
assert (!LIn n (dom_csub s2a0)) as nin2.
{ apply similarity_dom in sim'4; repnd.
rw sim'4; auto. }
assert (!LIn s (dom_csub s2a0)) as nis2.
{ apply similarity_dom in sim'4; repnd.
rw sim'4; auto. }
eapply tequality_respects_cequivc_right;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_lsubstc_mk_plus1_sp2; auto;
apply cequivc_sym;exact sim'2
|apply cequivc_lsubstc_mk_update_seq_sp3;auto;
apply cequivc_sym;eauto
]
|].
eapply tequality_respects_cequivc_left;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_refl
|apply implies_cequivc_update_seq_nout;eauto
]
|].
eapply tequality_respects_cequivc_right;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_refl
|apply implies_cequivc_update_seq_nout;eauto
]
|].
pose proof (ind (cterm2cnterm u en1)) as h; clear ind.
unfold nout_on_upd_seq in h.
unfold nout_on_seq in h; repnd.
allunfold @cnout_cterm.
allrw @cnterm2cterm2cnterm.
pose proof (h (lsubstc X w0 s2a0 c28) (update_seq_nout t2 k u v)) as q; clear h.
repeat (autodimp q hyp); repnd; auto;[|].
{ apply eq_kseq_nout_update2; auto. }
{ exists s2a0 c28; dands; auto. }
}
apply hyps_functionality_snoc2; simpl; auto.
{ introv equ' sim'.
apply similarity_snoc in sim'; simpl in sim'.
exrepnd; subst; ginv; inj.
assert (!LIn n (dom_csub s2a)) as nns2a.
{ apply similarity_dom in sim'3; repnd.
rw sim'3; auto. }
eapply tequality_respects_alphaeqc_left;
[apply alphaeqc_sym;
apply lsubstc_mk_natk2nout_sp2; auto
|].
eapply tequality_respects_alphaeqc_right;
[apply alphaeqc_sym;
apply lsubstc_mk_natk2nout_sp2; auto
|].
rw @lsubstc_mkc_tnat in sim'1.
apply equality_int_nat_implies_cequivc in sim'1.
eapply tequality_respects_cequivc_right;
[apply implies_cequivc_natk2nout; exact sim'1|].
eauto 3 with slow.
}
apply hyps_functionality_snoc2; simpl; auto.
introv equ' sim'.
allrw @lsubstc_mkc_tnat.
apply tnat_type.
}
{ assert (wf_term (mk_all mk_nout m
(mk_squash
(mk_apply2 X (mk_plus1 (mk_var n))
(mk_update_seq (mk_var s) (mk_var n) (mk_var m) v))))) as wa.
{ apply wf_function; auto.
apply wf_squash.
apply wf_apply2; auto. }
assert (cover_vars (mk_all mk_nout m
(mk_squash
(mk_apply2 X (mk_plus1 (mk_var n))
(mk_update_seq (mk_var s) (mk_var n) (mk_var m) v))))
(snoc (snoc s1 (n, mkc_nat k)) (s, seq1))) as ca.
{ apply cover_vars_function; dands; auto.
{ apply cover_vars_mk_tnat. }
apply cover_vars_upto_squash.
apply cover_vars_upto_apply2; dands; auto.
{ repeat (rw @csub_filter_snoc).
allrw memvar_singleton.
boolvar;tcsp;GC;[].
repeat (apply cover_vars_upto_snoc_weak).
apply cover_vars_upto_csub_filter_disjoint; auto.
apply disjoint_singleton_r; auto. }
{ apply cover_vars_upto_add; dands; eauto 3 with slow.
repeat (rw @csub_filter_snoc).
allrw memvar_singleton.
boolvar;tcsp;GC;[].
apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
{ unfold mk_update_seq.
apply cover_vars_upto_lam.
rw @csub_filter_swap.
rw <- @csub_filter_app_r; simpl.
repeat (rw @csub_filter_snoc).
allrw memvar_cons; simpl.
boolvar;tcsp;GC;[].
apply cover_vars_upto_int_eq; dands.
{ apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
{ apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
{ apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
{ apply cover_vars_upto_apply; dands.
{ apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
{ apply cover_vars_upto_var; simpl.
repeat (rw @dom_csub_snoc).
repeat (rw in_snoc;simpl).
sp. }
}
}
}
sim_snoc.
dands; auto.
{ assert (@wf_term o (mk_natk2nout (mk_var n))) as wfk.
{ apply wf_term_mk_natk2nout; auto. }
assert (cover_vars (mk_natk2nout (mk_var n)) (snoc s1 (n,mkc_nat k))) as cvk.
{ apply cover_vars_mk_natk2nout.
apply cover_vars_var_iff.
repeat (rw @dom_csub_snoc); simpl.
repeat (rw in_snoc); sp. }
sim_snoc.
dands; auto.
{ assert (@wf_term o mk_tnat) as wft.
{ eauto 3 with slow. }
assert (cover_vars mk_tnat s1) as cvt.
{ apply cover_vars_mk_tnat. }
sim_snoc.
dands; auto.
allrw @lsubstc_mkc_tnat.
eauto 3 with slow.
}
eapply alphaeqc_preserving_equality;
[|apply alphaeqc_sym; apply lsubstc_mk_natk2nout_sp2; auto].
auto.
}
{ unfold mk_all.
lsubst_tac.
allrw @lsubstc_mk_nout.
apply equality_in_function.
dands; auto.
{ apply type_mkc_nout. }
{ introv en.
repeat substc_lsubstc_vars3.
lsubst_tac.
apply equality_in_nout in en; exrepnd; spcast.
apply tequality_mkc_squash.
eapply tequality_respects_cequivc_left;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_lsubstc_mk_plus1_sp1;auto
|apply cequivc_lsubstc_mk_update_seq_sp0;auto]
|];[].
eapply tequality_respects_cequivc_right;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_lsubstc_mk_plus1_sp2; auto;
apply cequivc_sym;exact sim'2
|apply cequivc_lsubstc_mk_update_seq_sp0;auto]
|];[].
eapply tequality_respects_cequivc_left;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_refl
|apply implies_cequivc_update_seq_nout;eauto
]
|].
eapply tequality_respects_cequivc_right;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_refl
|apply implies_cequivc_update_seq_nout;eauto
]
|].
pose proof (ind (cterm2cnterm u en1)) as h; clear ind.
unfold nout_on_upd_seq in h.
unfold nout_on_seq in h; repnd.
allunfold @cnout_cterm.
allrw @cnterm2cterm2cnterm.
pose proof (h (lsubstc X w0 s1 c0) (update_seq_nout seq1 k u v)) as q; clear h.
repeat (autodimp q hyp); repnd; auto;[|].
{ apply eq_kseq_nout_update2; auto.
eapply equality_refl; eauto. }
{ exists s1 c0; dands; auto.
eapply similarity_refl; eauto. }
}
{ introv en.
repeat substc_lsubstc_vars3.
eapply equality_respects_cequivc_left;
[apply cequivc_sym;apply cequivc_mkc_apply_lam_axiom|].
eapply equality_respects_cequivc_right;
[apply cequivc_sym;apply cequivc_mkc_apply_lam_axiom|].
clear_wf_hyps.
proof_irr.
lsubst_tac.
apply equality_in_mkc_squash; dands; spcast;
try (apply computes_to_valc_refl; eauto 3 with slow).
apply equality_in_nout in en; exrepnd; spcast.
eapply inhabited_type_cequivc;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_lsubstc_mk_plus1_sp1;auto
|apply cequivc_lsubstc_mk_update_seq_sp0;auto]
|];[].
eapply inhabited_type_cequivc;
[apply cequivc_sym;
apply implies_cequivc_apply2;
[apply cequivc_refl
|apply cequivc_refl
|apply implies_cequivc_update_seq_nout;eauto
]
|];[].
pose proof (ind (cterm2cnterm u en1)) as h; clear ind.
unfold nout_on_upd_seq in h.
unfold nout_on_seq in h; repnd.
allunfold @cnout_cterm.
allrw @cnterm2cterm2cnterm.
pose proof (h (lsubstc X w0 s1 c0) (update_seq_nout seq1 k u v)) as q; clear h.
repeat (autodimp q hyp); repnd; auto;[|].
{ apply eq_kseq_nout_update2; auto.
eapply equality_refl; eauto. }
{ exists s1 c0; dands; auto.
eapply similarity_refl; eauto. }
}
}
}
exrepnd.
lsubst_tac.
apply inhabited_type_if_equality in hf1.
dands; auto.
Qed.
|
{"author": "vrahli", "repo": "NuprlInCoq", "sha": "0c3d7723836d3f615ea47f56e58b2ea6173e7d98", "save_path": "github-repos/coq/vrahli-NuprlInCoq", "path": "github-repos/coq/vrahli-NuprlInCoq/NuprlInCoq-0c3d7723836d3f615ea47f56e58b2ea6173e7d98/bar_induction/bar_induction_cterm2.v"}
|
import numpy as np
from utils import peak_skewness, peak_kurtosis
class Gene:
"""
Gene object, save a gene's information
"""
def __init__(self, gene_id, celltype, label, chr, start, end, step=10, signal=None, exp=None, cur_signal=None):
"""
:param gene_id: unique gene identifier
:param celltype: biosample term name
:param label: gene label, positive or control, 1 or 0
:param signal: dict, save the tracks of genomic markers
:param exp: gene expression
:param cur_signal: dict, save the tracks of genomic markers based on the current parameters
"""
self.gene_id = gene_id
self.celltype = celltype
self.label = label
self.chr = chr
self.start = start
self.end = end
self.step = step
if signal is not None:
self.signal = signal
else:
self.signal = {}
self.exp = exp
if cur_signal is not None:
self.cur_signal = cur_signal
else:
self.cur_signal = {}
@classmethod
def load_signal(cls, gene_id, celltype, label, chr, start, end, **kwargs):
"""
:param gene_id: unique gene identifier
:param celltype: biosample term name
:param label: positive or control
:param kwargs: key: genomic marker type, eg. H3K4me3.., value: numpy array of genomic tracks,
first row is genome index, 2nd row is genome value, step need to 10.
:return: gene object
"""
gene_obj = cls(gene_id, celltype, label, chr, start, end)
for key, value in kwargs.items():
gene_obj.signal[key] = value
return gene_obj
@classmethod
def load_exp(cls, gene_id, celltype, label, chr, start, end, exp):
"""
:param gene_id: unique gene identifier
:param celltype: biosample term name
:param label: positive or control
:param exp: RNA expression
:return: gene object
"""
gene_obj = cls(gene_id, celltype, label, chr, start, end)
gene_obj.exp = exp
return gene_obj
def add_exp(self, exp):
self.exp = exp
def add_signal(self, signals):
for key, value in signals.items():
self.signal[key] = value
def get_exp(self):
return self.exp
def get_signal(self, marker, start, end, step):
"""
:param marker: type of genomic marker, eg. H3K4me3
:param start: start pos, genome index
:param end: end pos, genome index
:param step: genome step
:return: total signal
"""
if end < start:
print "start position need to be smaller than end position"
return None
marker_tracks = self.signal[marker]
if end % step == 0:
start = start / step
end = end / step
else:
start = start / step
end = end / step + 1
start_index = np.searchsorted(marker_tracks[0, :], start)
end_index = np.searchsorted(marker_tracks[0, :], end)
return marker_tracks[:, start_index:end_index]
def update_cur_signal(self, marker, start, end, step):
cur_signal = self.get_signal(marker, start, end, step)
# print cur_signal
self.cur_signal[marker] = cur_signal
return
def get_total_signal(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
total_signal = 0
else:
total_signal = cur_values[1, cur_values[1, :] > height].sum() * step
return total_signal
def get_total_width(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
total_width = 0
else:
total_width = cur_values[1, cur_values[1, :] > height].shape[0] * step
return total_width
def get_height(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
max_height = 0
else:
max_height = cur_values[1, :].max()
if max_height >= height:
return max_height
else:
return 0
def get_kurtosis(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
return 0
else:
return peak_kurtosis(cur_values[:, cur_values[1, :] > height])
def get_skewness(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
return 0
else:
return peak_skewness(cur_values[:, cur_values[1, :] > height])
def get_coverage(self, marker, start, end, height, step):
self.update_cur_signal(marker, start, end, step)
cur_values = self.cur_signal[marker]
if cur_values is None or len(cur_values) == 0 or len(cur_values[1, :]) == 0:
return 0
else:
total_width = cur_values[1, cur_values[1, :] > height].shape[0] * step
coverage = total_width*1./(end-start)
return coverage
|
{"hexsha": "1d30f43350287fd67eed217088296d5b807eeafe", "size": 5784, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gene.py", "max_stars_repo_name": "bxia888/CEFCIG", "max_stars_repo_head_hexsha": "5431d3bba91847c99f9facc67d1996b08705e8e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-09T13:52:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T13:52:09.000Z", "max_issues_repo_path": "src/gene.py", "max_issues_repo_name": "bxia888/CEFCIG", "max_issues_repo_head_hexsha": "5431d3bba91847c99f9facc67d1996b08705e8e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gene.py", "max_forks_repo_name": "bxia888/CEFCIG", "max_forks_repo_head_hexsha": "5431d3bba91847c99f9facc67d1996b08705e8e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-12T21:45:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-14T00:03:36.000Z", "avg_line_length": 36.3773584906, "max_line_length": 115, "alphanum_fraction": 0.594571231, "include": true, "reason": "import numpy", "num_tokens": 1435}
|
#!/usr/bin/env
"""
ctd.py
Seabird CNV only
Built using Anaconda packaged Python:
Original code reference:
--------------
purpose: Some classes and functions to work with CTD data.
author: Filipe P. A. Fernandes
e-mail: ocefpaf@gmail
web: http://ocefpaf.tiddlyspot.com/
created: 22-Jun-2012
modified: Fri 19 Jul 2013 06:24:21 PM BRT
Todo
----
Read <xml> information too (sensor and cal information)
History
-------
Found bug in interp2sfc that wasn't allowing pandas frame to be indexed properly
Initially pressure field was hardcoded to prDM, now prSM is also available
"""
from __future__ import absolute_import
# Standard library.
import warnings, datetime, os, sys
# Scientific stack.
import numpy as np
from pandas import Series, DataFrame
from pandas import read_table, concat
from netCDF4 import Dataset
__all__ = ['CTD',
'interp2sfc',
'from_cnv',
'from_netCDF',
'rosette_summary']
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.4.0"
__status__ = "Development"
class CTD(DataFrame):
def __init__(self, data=None, index=None, columns=None, name=None,
longitude=None, latitude=None, header=None, SFC_EXTEND=None,
time_str=None, serial=None, config=None, dtype=None, copy=False):
super(CTD, self).__init__(data=data, index=index,
columns=columns, dtype=dtype,
copy=copy)
self.longitude = longitude
self.latitude = latitude
self.header = header
self.SFC_EXTEND = SFC_EXTEND
self.time_str = time_str
self.serial = serial
self.config = config
self.name = name
def __reduce__(self):
return self.__class__, (
DataFrame(self), # NOTE Using that type(data)==DataFrame and the
# the rest of the arguments of DataFrame.__init__
# to defaults, the constructors acts as a
# copy constructor.
None,
None,
self.longitude,
self.latitude,
self.header,
self.SFC_EXTEND,
self.time_str,
self.serial,
self.config,
self.name,
None,
False,
)
def remove_above_water(cast):
return cast[cast.index >= 0]
def remove_flagged(cast):
#print cast.time_str
#fails becasue dataframe attributes dont copy?
return cast.drop(cast.index[cast['flag'] == True])
def interp2sfc(cast, pressure_key='prDM'):
""" Horribly inefficient for large arrays """
try:
min_val_report = cast[pressure_key].values.min()
except:
min_val_report = 0.0
min_val = min_val_report
while min_val > 0.0:
print 'Extrapolating to surface %s' % min_val
cast = concat([cast.head(n=1), cast], ignore_index=True)
min_val = min_val - 1.
cast[pressure_key][0] = min_val #revalue at each copy
return (cast, min_val_report)
def from_cnv(fname, compression=None, below_water=False, lon=None,
lat=None, pressure_varname='prDM'):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from ctd import DataFrame
>>> cast = DataFrame.from_cnv('../test/data/CTD_big.cnv.bz2',
... compression='bz2')
>>> downcast, upcast = cast.split()
>>> fig, ax = downcast['t090c'].plot()
>>> ax.grid(True)
"""
f = open(fname)
header, config, names, PMELheader = [], [], [], []
has_NMEA = False
for k, line in enumerate(f.readlines()):
line = line.strip()
if '# name' in line: # Get columns names.
name, unit = line.split('=')[1].split(':')
name, unit = map(normalize_names, (name, unit))
names.append(name)
if line.startswith('*'): # Get header.
header.append(line)
if line.startswith('#'): # Get configuration file.
config.append(line)
if line.startswith('@'): # Get PMEL Header.
PMELheader.append(line)
if 'NMEA Latitude' in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split('=')[1].strip()
lat = np.float_(lat.split())
if hemisphere == 'S':
lat = -(lat[0] + lat[1] / 60.)
elif hemisphere == 'N':
lat = lat[0] + lat[1] / 60.
else:
raise ValueError("Latitude not recognized.")
if 'NMEA Longitude' in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split('=')[1].strip()
lon = np.float_(lon.split())
if hemisphere == 'W':
lon = -(lon[0] + lon[1] / 60.)
elif hemisphere == 'E':
lon = lon[0] + lon[1] / 60.
else:
raise ValueError("Latitude not recognized.")
if 'NMEA UTC' in line:
time_str = line.strip(hemisphere).split('=')[-1].strip()
has_NMEA = True
if '* System UTC' in line:
systime_str = line.split('=')[-1].strip()
elif '* System UpLoad Time' in line:
systime_str = line.split('=')[-1].strip()
if line == '*END*': # Get end of header.
skiprows = k + 1
if has_NMEA == False: # set time if NMEA not available
time_str = systime_str
lon = -999.9
lat = -999.9
break
f.seek(0)
cast = read_table(f, header=None, index_col=None, names=names,
skiprows=skiprows, delim_whitespace=True)
f.close()
(cast, min_value) = interp2sfc(cast, pressure_key = pressure_varname)
cast.set_index(pressure_varname, drop=False, inplace=True)
cast.index.name = 'Pressure [dbar]'
name = basename(fname)[0].split('/')[-2] + '_' + basename(fname)[1]
print name
dtypes = dict(bpos=int, pumps=bool, flag=bool)
for column in cast.columns:
if column in dtypes:
cast[column] = cast[column].astype(dtypes[column])
else:
try:
cast[column] = cast[column].astype(float)
except ValueError:
warnings.warn('Could not convert %s to float.' % column)
if below_water:
cast = remove_above_water(cast)
#TODO: Return interp2sfc min_value as "SFC_EXTEND" attribute
return CTD(cast, longitude=lon, latitude=lat, name=name, header=header,
config=config, SFC_EXTEND=min_value, time_str=time_str)
def rosette_summary(fname):
"""
Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> fname = '../test/data/CTD/g01l01s01.ros'
>>> ros = rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> np.int_(ros.pressure.values)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
"""
ros = from_cnv(fname)
ros['pressure'] = ros.index.values.astype(float)
ros['nbf'] = ros['nbf'].astype(int)
ros.set_index('nbf', drop=True, inplace=True, verify_integrity=False)
return ros
def archive_btl(fname):
"""Todo: ingest companion .btl file for nc archiving"""
pass
"""------------------------- Routines (from utilites) --------------------------------"""
class DataTimes(object):
"""
Purpose
-------
Convert a string time of the form 'mmm dd yyyy hh:mm:ss' to python ordinal date or
EPIC time (two time keys)
Example
-------
>>>import ctd
>>>timeinstance = ctd.DataTimes(time_str='jan 01 2013 12:00:00')
>>> EPIC_Day, EPIC_time = timeinstance.get_EPIC_date()
Reference
---------
PMEL-EPIC Conventions (misprint) says 2400000
http://www.epic.noaa.gov/epic/eps-manual/epslib_ch5.html#SEC57 says:
May 23 1968 is 2440000 and July4, 1994 is 2449538
"""
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
def __init__(self, time_str='jan 01 0001 00:00:00'):
self.time_str = time_str
self.date_time = datetime.datetime.strptime(time_str, '%b %d %Y %H:%M:%S')
def get_python_date(self):
intday = self.date_time.toordinal()
fracday = (self.date_time.hour / (24.)) + (self.date_time.minute / (24. * 60.)) +\
(self.date_time.second / (24. * 60. * 60.))
return(intday + fracday)
def get_EPIC_date(self):
time1 = self.date_time.toordinal() + DataTimes.offset
fracday = (self.date_time.hour / (24.)) + (self.date_time.minute / (24. * 60.)) +\
(self.date_time.second / (24. * 60. * 60.))
time2 = fracday * (24. * 60. * 60. * 1000.)
return(time1, int(time2))
def normalize_names(name):
name = name.strip()
name = name.strip('*')
return name
def basename(fname):
"""Return filename without path.
Examples
--------
>>> fname = '../data/tn265/ctd001.cnv' #unix
>>> fname = 'c:\data\tn265\ctd001.cnv' #windows
>>> basename(fname)
"""
fname = fname.replace('\\','/') #convert slashes to unix format
path, name = os.path.split(fname)
name, ext = os.path.splitext(name)
return path, name, ext
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{"hexsha": "0c2070ae3d9b7cb6c93c7f0cec5db8ccc5b6701d", "size": 9958, "ext": "py", "lang": "Python", "max_stars_repo_path": "OnCruiseRoutines/CTD_Vis/ctd.py", "max_stars_repo_name": "shaunwbell/AtSeaPrograms", "max_stars_repo_head_hexsha": "839ee4dc1cf7a85bce1de82b04379c6d1670c414", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-08-03T17:03:10.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-03T17:03:10.000Z", "max_issues_repo_path": "OnCruiseRoutines/CTD_Vis/ctd.py", "max_issues_repo_name": "NOAA-PMEL/AtSeaPrograms", "max_issues_repo_head_hexsha": "839ee4dc1cf7a85bce1de82b04379c6d1670c414", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-04-13T17:13:13.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-13T17:14:11.000Z", "max_forks_repo_path": "OnCruiseRoutines/CTD_Vis/ctd.py", "max_forks_repo_name": "shaunwbell/AtSeaPrograms", "max_forks_repo_head_hexsha": "839ee4dc1cf7a85bce1de82b04379c6d1670c414", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-15T06:22:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-15T06:22:23.000Z", "avg_line_length": 32.6491803279, "max_line_length": 90, "alphanum_fraction": 0.5721028319, "include": true, "reason": "import numpy", "num_tokens": 2539}
|
function jed = ymdf_to_jed_islamic_b ( y, m, d, f )
%*****************************************************************************80
%
%% YMDF_TO_JED_ISLAMIC_B converts an Islamic B YMDF date to a JED.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 13 March 2013
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Edward Richards,
% Algorithm E,
% Mapping Time, The Calendar and Its History,
% Oxford, 1999, pages 323-324.
%
% Parameters:
%
% Input, integer Y, M, D, real F, the YMDF date.
%
% Output, real JED, the corresponding Julian Ephemeris Date.
%
%
% Check the date.
%
[ y, m, d, ierror ] = ymd_check_islamic ( y, m, d );
if ( ierror ~= 0 )
jed = -1.0;
return
end
%
% Convert the calendar date to a computational date.
%
y_prime = y + 5519 - floor ( ( 12 - m ) / 12 );
m_prime = mod ( m + 11, 12 );
d_prime = d - 1;
%
% Convert the computational date to a JED.
%
j1 = floor ( ( 10631 * y_prime + 14 ) / 30 );
j2 = floor ( ( 2951 * m_prime + 51 ) / 100 );
jed = j1 + j2 + d_prime - 7664 - 0.5 + f;
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/calpak/ymdf_to_jed_islamic_b.m"}
|
// Copyright (c) 2012 - 2017 Object Computing, Inc.
// All rights reserved.
// See the file license.txt for licensing information.
#define BOOST_TEST_NO_MAIN LiquibookTest
#include <boost/test/unit_test.hpp>
#include "ut_utils.h"
#include "changed_checker.h"
#include <book/order_book.h>
#include <simple/simple_order.h>
#include <simple/simple_order_book.h>
#include <memory>
namespace liquibook {
using book::DepthLevel;
using book::OrderBook;
using book::OrderTracker;
using simple::SimpleOrder;
typedef std::shared_ptr<SimpleOrder> SimpleOrderPtr;
class SharedPtrOrderBook : public OrderBook<SimpleOrderPtr>
{
virtual void perform_callback(OrderBook<SimpleOrderPtr>::TypedCallback& cb)
{
switch(cb.type) {
case TypedCallback::cb_order_accept:
cb.order->accept();
break;
case TypedCallback::cb_order_fill: {
Cost fill_cost = cb.price * cb.quantity;
cb.order->fill(cb.quantity, fill_cost, 0);
cb.matched_order->fill(cb.quantity, fill_cost, 0);
break;
}
case TypedCallback::cb_order_cancel:
cb.order->cancel();
break;
case TypedCallback::cb_order_replace:
cb.order->replace(cb.delta, cb.price);
break;
default:
// Nothing
break;
}
}
};
typedef FillCheck<SimpleOrderPtr> SharedFillCheck;
BOOST_AUTO_TEST_CASE(TestSharedPointerBuild)
{
SharedPtrOrderBook order_book;
SimpleOrderPtr ask1(new SimpleOrder(false, 1252, 100));
SimpleOrderPtr ask0(new SimpleOrder(false, 1251, 100));
SimpleOrderPtr bid1(new SimpleOrder(true, 1251, 100));
SimpleOrderPtr bid0(new SimpleOrder(true, 1250, 100));
// No match
BOOST_CHECK(add_and_verify(order_book, bid0, false));
BOOST_CHECK(add_and_verify(order_book, ask0, false));
BOOST_CHECK(add_and_verify(order_book, ask1, false));
// Verify sizes
BOOST_CHECK_EQUAL(1, order_book.bids().size());
BOOST_CHECK_EQUAL(2, order_book.asks().size());
// Match - complete
{
SharedFillCheck fc1(bid1, 100, 125100);
SharedFillCheck fc2(ask0, 100, 125100);
BOOST_CHECK(add_and_verify(order_book, bid1, true, true));
}
// Verify sizes
BOOST_CHECK_EQUAL(1, order_book.bids().size());
BOOST_CHECK_EQUAL(1, order_book.asks().size());
}
BOOST_AUTO_TEST_CASE(TestSharedCancelBid)
{
SharedPtrOrderBook order_book;
SimpleOrderPtr ask1(new SimpleOrder(false, 1252, 100));
SimpleOrderPtr ask0(new SimpleOrder(false, 1251, 100));
SimpleOrderPtr bid0(new SimpleOrder(true, 1250, 100));
// No match
BOOST_CHECK(add_and_verify(order_book, bid0, false));
BOOST_CHECK(add_and_verify(order_book, ask0, false));
BOOST_CHECK(add_and_verify(order_book, ask1, false));
// Verify sizes
BOOST_CHECK_EQUAL(1, order_book.bids().size());
BOOST_CHECK_EQUAL(2, order_book.asks().size());
// Cancel bid
BOOST_CHECK(cancel_and_verify(order_book, bid0, simple::os_cancelled));
// Verify sizes
BOOST_CHECK_EQUAL(0, order_book.bids().size());
BOOST_CHECK_EQUAL(2, order_book.asks().size());
}
} // namespace
|
{"hexsha": "d1f1322653dc32ea8fa3cc05b7f7dc5c3a1685c6", "size": 3041, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit/ut_order_book_shared_ptr.cpp", "max_stars_repo_name": "coinderex/liquibook", "max_stars_repo_head_hexsha": "110695cf0a234888555323c94d8b65edb45fe8cf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 550.0, "max_stars_repo_stars_event_min_datetime": "2015-01-05T23:44:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-04T01:48:13.000Z", "max_issues_repo_path": "test/unit/ut_order_book_shared_ptr.cpp", "max_issues_repo_name": "swtcfoundation/liquibook", "max_issues_repo_head_hexsha": "110695cf0a234888555323c94d8b65edb45fe8cf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 22.0, "max_issues_repo_issues_event_min_datetime": "2015-04-24T15:35:43.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-09T04:54:49.000Z", "max_forks_repo_path": "test/unit/ut_order_book_shared_ptr.cpp", "max_forks_repo_name": "swtcfoundation/liquibook", "max_forks_repo_head_hexsha": "110695cf0a234888555323c94d8b65edb45fe8cf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 249.0, "max_forks_repo_forks_event_min_datetime": "2015-01-20T14:14:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-18T13:55:21.000Z", "avg_line_length": 28.4205607477, "max_line_length": 77, "alphanum_fraction": 0.71719829, "num_tokens": 791}
|
#include <boost/phoenix/statement/while.hpp>
|
{"hexsha": "7b177457ab074ecc679ae6fcd23015b111d5b3af", "size": 45, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_phoenix_statement_while.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_phoenix_statement_while.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_phoenix_statement_while.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 22.5, "max_line_length": 44, "alphanum_fraction": 0.8, "num_tokens": 11}
|
#ifndef GRAPHCLASS
#define GRAPHCLASS
#include "graph_node.hpp"
#include <Eigen/Dense>
#include <memory>
#include <stddef.h>
#include <vector>
class UndirectedGraph {
public:
// constructors
UndirectedGraph() = delete;
UndirectedGraph(const size_t num_nodes, const double lower_weight,
const double upper_weight, const double vertex_prob,
const int max_trials); // random initialization
UndirectedGraph(
const size_t num_nodes, const double lower_weight,
const double upper_weight, const double vertex_prob,
const std::string path_to_weights_file); // vertex initialization based on
// given weights file
// destructors
~UndirectedGraph() = default;
// methods
double get_shortest_path_costs() const;
double get_avg_vertex_costs() const;
std::vector<size_t> get_shortest_path_idxs() const;
void print_shortest_path_idxs() const;
void print_weight_matrix() const;
bool is_connected() const;
void find_path(const int &start_idx, const int &finish_idx);
private:
void create_random_graph();
void create_graph_based_on_file();
void calculate_avg_vertex_weights();
bool valid_start_end_nodes(const int &start_node_idx,
const int &end_node_idx);
double lower_vertex_weight_;
double upper_vertex_weight_;
double vertex_prob_;
double avg_vertex_cost_;
double shortest_path_cost_;
size_t num_nodes_;
size_t num_edges_;
bool dijkstra_run_;
Eigen::MatrixXd
vertex_weights_; // every row and column describes a node, the values
// within the matrix are the vertex weights
Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic> connection_matrix_;
std::vector<std::shared_ptr<GraphNode>> graph_nodes_;
std::vector<std::shared_ptr<GraphNode>> shortest_path_nodes_;
std::vector<int> shortest_path_idxs_;
};
#endif /* GRAPHCLASS */
|
{"hexsha": "9a67827514109914706659c064585d9c2a471304", "size": 1941, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/graph_utils/graph_class.hpp", "max_stars_repo_name": "jweber94/dijkstras_shortest_path", "max_stars_repo_head_hexsha": "ec175081895b5bca924c7c98d403fa61db59b992", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/graph_utils/graph_class.hpp", "max_issues_repo_name": "jweber94/dijkstras_shortest_path", "max_issues_repo_head_hexsha": "ec175081895b5bca924c7c98d403fa61db59b992", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/graph_utils/graph_class.hpp", "max_forks_repo_name": "jweber94/dijkstras_shortest_path", "max_forks_repo_head_hexsha": "ec175081895b5bca924c7c98d403fa61db59b992", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.35, "max_line_length": 80, "alphanum_fraction": 0.7161257084, "num_tokens": 416}
|
import numpy as np
def deprojectVis(data, incl=0., PA=0., offset=[0., 0.], wsc=1.):
# - read in, parse data
u, v, real, imag = data
# - convert keywords into relevant units
inclr = np.radians(incl)
PAr = np.radians(PA)
offr = 1e3*offset*np.pi/(180.*3600.)
# - change to an appropriate coordinate system
up = (u * np.cos(PAr) + v * np.sin(PAr)) * np.cos(inclr)
vp = (-u * np.sin(PAr) + v * np.cos(PAr))
rhop = np.sqrt(up**2 + vp**2)
# - phase shifts
realp = real*np.cos(-2.*np.pi*(offr[0]*u+offr[1]*v))
imagp = imag*np.sin(-2.*np.pi*(offr[0]*u+offr[1]*v))
# package for return
output = rhop, realp, imagp
return output
|
{"hexsha": "32b44303f7e2350a5f1e97020161e314d05160d8", "size": 689, "ext": "py", "lang": "Python", "max_stars_repo_path": "deprojectVis.py", "max_stars_repo_name": "seanandrews/discrete-SB", "max_stars_repo_head_hexsha": "28571ebcef2c01cc2403eff6d3183beb7bf3810e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deprojectVis.py", "max_issues_repo_name": "seanandrews/discrete-SB", "max_issues_repo_head_hexsha": "28571ebcef2c01cc2403eff6d3183beb7bf3810e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprojectVis.py", "max_forks_repo_name": "seanandrews/discrete-SB", "max_forks_repo_head_hexsha": "28571ebcef2c01cc2403eff6d3183beb7bf3810e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5, "max_line_length": 64, "alphanum_fraction": 0.5732946299, "include": true, "reason": "import numpy", "num_tokens": 248}
|
# import numpy as np
import pandas as pd
class PandasUtil():
def __init__(self, datetime_format=None):
self.datetime_format = datetime_format
def fix_string(self, series):
return series.astype(str)
def fix_bool(self, series):
return series.astype(bool)
def fix_float(self, series):
return pd.to_numeric(series)
def fix_int(self, series):
return series.astype(int)
def fix_timestamp(self, series):
if self.datetime_format:
series = pd.to_datetime(
series, format=self.datetime_format, errors="coerce")
else:
series = pd.to_datetime(series, errors="coerce")
# Drop the timezone if any
series = series.dt.tz_localize(None)
return series
|
{"hexsha": "0eba977abb2f03a7d418f095454d843b1753ccba", "size": 785, "ext": "py", "lang": "Python", "max_stars_repo_path": "bqsqoop/utils/pandas_util.py", "max_stars_repo_name": "therako/bqsqoop", "max_stars_repo_head_hexsha": "43bb120a34811bfc62dc49095c39c68be9a49aba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bqsqoop/utils/pandas_util.py", "max_issues_repo_name": "therako/bqsqoop", "max_issues_repo_head_hexsha": "43bb120a34811bfc62dc49095c39c68be9a49aba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-07-27T12:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-12T13:40:08.000Z", "max_forks_repo_path": "bqsqoop/utils/pandas_util.py", "max_forks_repo_name": "therako/bqsqoop", "max_forks_repo_head_hexsha": "43bb120a34811bfc62dc49095c39c68be9a49aba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1666666667, "max_line_length": 69, "alphanum_fraction": 0.6356687898, "include": true, "reason": "import numpy", "num_tokens": 165}
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Conversion of < to ≤, along with a number of properties
------------------------------------------------------------------------
-- Possible TODO: Prove that a conversion ≤ → < → ≤ returns a
-- relation equivalent to the original one (and similarly for
-- < → ≤ → <).
open import Relation.Binary
module Relation.Binary.StrictToNonStrict
{a ℓ₁ ℓ₂} {A : Set a}
(_≈_ : Rel A ℓ₁) (_<_ : Rel A ℓ₂)
where
open import Relation.Nullary
open import Relation.Binary.Consequences
open import Function
open import Data.Product
open import Data.Sum
open import Data.Empty
------------------------------------------------------------------------
-- Conversion
-- _<_ can be turned into _≤_ as follows:
_≤_ : Rel A _
x ≤ y = (x < y) ⊎ (x ≈ y)
------------------------------------------------------------------------
-- The converted relations have certain properties
-- (if the original relations have certain other properties)
reflexive : _≈_ ⇒ _≤_
reflexive = inj₂
antisym : IsEquivalence _≈_ →
Transitive _<_ →
Irreflexive _≈_ _<_ →
Antisymmetric _≈_ _≤_
antisym eq trans irrefl = as
where
module Eq = IsEquivalence eq
as : Antisymmetric _≈_ _≤_
as (inj₂ x≈y) _ = x≈y
as (inj₁ _) (inj₂ y≈x) = Eq.sym y≈x
as (inj₁ x<y) (inj₁ y<x) =
⊥-elim (trans∧irr⟶asym {_≈_ = _≈_} Eq.refl trans irrefl x<y y<x)
trans : IsEquivalence _≈_ → _<_ Respects₂ _≈_ →
Transitive _<_ → Transitive _≤_
trans eq <-resp-≈ <-trans = tr
where
module Eq = IsEquivalence eq
tr : Transitive _≤_
tr (inj₁ x<y) (inj₁ y<z) = inj₁ $ <-trans x<y y<z
tr (inj₁ x<y) (inj₂ y≈z) = inj₁ $ proj₁ <-resp-≈ y≈z x<y
tr (inj₂ x≈y) (inj₁ y<z) = inj₁ $ proj₂ <-resp-≈ (Eq.sym x≈y) y<z
tr (inj₂ x≈y) (inj₂ y≈z) = inj₂ $ Eq.trans x≈y y≈z
≤-resp-≈ : IsEquivalence _≈_ → _<_ Respects₂ _≈_ → _≤_ Respects₂ _≈_
≤-resp-≈ eq <-resp-≈ = ((λ {_ _ _} → resp₁) , (λ {_ _ _} → resp₂))
where
module Eq = IsEquivalence eq
resp₁ : ∀ {x y' y} → y' ≈ y → x ≤ y' → x ≤ y
resp₁ y'≈y (inj₁ x<y') = inj₁ (proj₁ <-resp-≈ y'≈y x<y')
resp₁ y'≈y (inj₂ x≈y') = inj₂ (Eq.trans x≈y' y'≈y)
resp₂ : ∀ {y x' x} → x' ≈ x → x' ≤ y → x ≤ y
resp₂ x'≈x (inj₁ x'<y) = inj₁ (proj₂ <-resp-≈ x'≈x x'<y)
resp₂ x'≈x (inj₂ x'≈y) = inj₂ (Eq.trans (Eq.sym x'≈x) x'≈y)
total : Trichotomous _≈_ _<_ → Total _≤_
total <-tri x y with <-tri x y
... | tri< x<y x≉y x≯y = inj₁ (inj₁ x<y)
... | tri≈ x≮y x≈y x≯y = inj₁ (inj₂ x≈y)
... | tri> x≮y x≉y x>y = inj₂ (inj₁ x>y)
decidable : Decidable _≈_ → Decidable _<_ → Decidable _≤_
decidable ≈-dec <-dec x y with ≈-dec x y | <-dec x y
... | yes x≈y | _ = yes (inj₂ x≈y)
... | no x≉y | yes x<y = yes (inj₁ x<y)
... | no x≉y | no x≮y = no helper
where
helper : x ≤ y → ⊥
helper (inj₁ x<y) = x≮y x<y
helper (inj₂ x≈y) = x≉y x≈y
decidable' : Trichotomous _≈_ _<_ → Decidable _≤_
decidable' compare x y with compare x y
... | tri< x<y _ _ = yes (inj₁ x<y)
... | tri≈ _ x≈y _ = yes (inj₂ x≈y)
... | tri> x≮y x≉y _ = no helper
where
helper : x ≤ y → ⊥
helper (inj₁ x<y) = x≮y x<y
helper (inj₂ x≈y) = x≉y x≈y
|
{"hexsha": "85434fdcf388404262c64bc5c5e2f7b2251b5305", "size": 3212, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda-stdlib-0.9/src/Relation/Binary/StrictToNonStrict.agda", "max_stars_repo_name": "qwe2/try-agda", "max_stars_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-10-20T15:52:05.000Z", "max_stars_repo_stars_event_max_datetime": "2016-10-20T15:52:05.000Z", "max_issues_repo_path": "agda-stdlib-0.9/src/Relation/Binary/StrictToNonStrict.agda", "max_issues_repo_name": "qwe2/try-agda", "max_issues_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda-stdlib-0.9/src/Relation/Binary/StrictToNonStrict.agda", "max_forks_repo_name": "qwe2/try-agda", "max_forks_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8846153846, "max_line_length": 72, "alphanum_fraction": 0.5261519303, "num_tokens": 1311}
|
#=
6 digit numbers
From
Gunnar Blom, Lars Holst, Dennis Sandell:
"Problems and Snapshots from the World of Probability"
Page 19f, Problem 2.5 Problems concerning random numbers
Given the 6 digits numbers:
a) Problem 1
find the probability that at least one of the digits 0..9
appears exactly twice.
Answer: 2943/4000 ~ 0.7358
b) Problem 2
find the probability that at least two of the digits 0..9
appears exactly once.
Answer: 1179/1250 ~ 0.9432
=#
using Turing # , StatsPlots, DataFrames
include("jl_utils.jl")
# Note that we collect the interesting data to post process it.
# This is because we cannot add non random values in the chain.
k_occ_coll1 = []
# occ_coll = []
@model function six_digits(n, k, m)
# d = TArray{Int}(undef, n)
d = Vector{Int}(undef, n)
for i in 1:n
d[i] ~ DiscreteUniform(0,9)
end
# Number of occurrences of each digit in digits
occ = make_hash(d)
# push!(occ_coll, occ)
# Number of digits that occurs exactly k times
k_occ = sum(values(occ).|>x->x==k)>=m
push!(k_occ_coll1, k_occ)
end
# 1) Find the probability that at least one of the digits 0..9
# appears exactly twice.
println("1) Find the probability that at least one of the digits 0..9\nappears exactly twice.")
model = six_digits(6,2,1)
num_chains = 4
num_samples = 10_000
chains = sample(model, MH(), MCMCThreads(), 1000, num_chains)
# It seems that MH() is the only sampler that can handle this
# chains = sample(model, MH(), 10000) #
# chains = sample(model, IS(), MCMCThreads(), num_samples, num_chains)
# chains = sample(model, PG(20), MCMCThreads(), 1000, num_chains)
# chains = sample(model, SMC(1000), MCMCThreads(), 1000, num_chains)
# NUTS, HMC, and HMCDA throws this error:
# chains = sample(model, NUTS(), 1000) # Error
# chains = sample(model, HMC(0.1, 5), 1000) # Same error as NUTS
# chains = sample(model, HMCDA(0.15, 0.65), 1000) # Same errors as NUTS
# display(chains)
# display(group(chains,:d))
# println("occ_coll:$occ_coll")
println("mean: $(sum(k_occ_coll1)/length(k_occ_coll1))")
display(sort(make_hash(k_occ_coll1)))
# Note: Since we are updating an external array we have to
# build a new model here...
# 2) Find the probability that at least two of the digits 0..9
# appears exactly once.
k_occ_coll2 = []
@model six_digits_2(n, k, m) = begin
# d2 = TArray{Int}(undef, n)
d2 = Vector{Int}(undef, n)
for i in 1:n
d2[i] ~ DiscreteUniform(0,9)
end
# Number of occurrences of each digit in digits
occ2 = make_hash(d2)
# Number of digits that occurs exactly k times
k_occ2 = sum(values(occ2).|>x->x==k)>=m
push!(k_occ_coll2, k_occ2)
end
println("\n2) Find the probability that at least two of the digits 0..9\nappears exactly once.")
model2 = six_digits_2(6,1,2)
num_chains = 4
num_samples = 10_000
chains = sample(model2, MH(), MCMCThreads(), num_samples, num_chains)
println("mean: $(sum(k_occ_coll2)/length(k_occ_coll2))")
display(sort(make_hash(k_occ_coll2)))
|
{"hexsha": "a17849009f77187cb2d8952aa17d3ede7a5d6e9a", "size": 3139, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/turing/6_digit_numbers.jl", "max_stars_repo_name": "Wikunia/hakank", "max_stars_repo_head_hexsha": "030bc928d2efe8dcbc5118bda3f8ae9575d0fd13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/turing/6_digit_numbers.jl", "max_issues_repo_name": "Wikunia/hakank", "max_issues_repo_head_hexsha": "030bc928d2efe8dcbc5118bda3f8ae9575d0fd13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/turing/6_digit_numbers.jl", "max_forks_repo_name": "Wikunia/hakank", "max_forks_repo_head_hexsha": "030bc928d2efe8dcbc5118bda3f8ae9575d0fd13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6132075472, "max_line_length": 96, "alphanum_fraction": 0.6629499841, "num_tokens": 952}
|
# Parameters
beta = .96
y = [1.0, 2.0]
b0 = 0.0
P = [0.8 0.2;
0.4 0.6]
cp = ConsumptionProblem(beta, y, b0, P)
Q = beta*P
N_simul = 150
c_bar, b1, b2 = consumption_complete(cp)
debt_complete = [b1, b2]
println("P = ", P)
println("Q= ", Q, "\n")
println("Govt expenditures in peace and war =", y)
println("Constant tax collections = ", c_bar)
println("Govt assets in two states = ", debt_complete)
msg = """
Now let's check the government's budget constraint in peace and war.
Our assumptions imply that the government always purchases 0 units of the
Arrow peace security.
"""
println(msg)
AS1 = Q[1,2] * b2
println("Spending on Arrow war security in peace = ", AS1)
AS2 = Q[2,2]*b2
println("Spending on Arrow war security in war = ", AS2)
println("\n")
println("Government tax collections plus asset levels in peace and war")
TB1=c_bar+b1
println("T+b in peace = ",TB1 )
TB2 = c_bar + b2
println("T+b in war = ", TB2)
println("\n")
println("Total government spending in peace and war")
G1= y[1] + AS1
G2 = y[2] + AS2
println("total govt spending in peace = ", G1)
println("total govt spending in war = ", G2)
println("\n")
println("Let's see ex post and ex ante returns on Arrow securities")
Pi= 1./Q#reciprocal(Q)
exret= Pi
println("Ex post returns to purchase of Arrow securities = $exret")
exant = Pi.*P
println("Ex ante returns to purchase of Arrow securities = $exant")
|
{"hexsha": "e099082e7b3f45a8f32a3c8f25039eb5bbc5359b", "size": 1449, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "smoothing/war_peace_example.jl", "max_stars_repo_name": "parkjt0506/QuantEconPractice", "max_stars_repo_head_hexsha": "2d83848dab7ed8d40efc9bbcf1e73aed7e5e532f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-06T04:09:21.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-06T04:09:21.000Z", "max_issues_repo_path": "smoothing/war_peace_example.jl", "max_issues_repo_name": "zhouweimin-econ/QuantEcon.lectures.code", "max_issues_repo_head_hexsha": "a8a17e753857e0157f18337264114ce7cb23e841", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smoothing/war_peace_example.jl", "max_forks_repo_name": "zhouweimin-econ/QuantEcon.lectures.code", "max_forks_repo_head_hexsha": "a8a17e753857e0157f18337264114ce7cb23e841", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-04-21T21:41:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T22:09:19.000Z", "avg_line_length": 24.9827586207, "max_line_length": 74, "alphanum_fraction": 0.6570048309, "num_tokens": 435}
|
import numpy as np
from pyscf import gto
from pyscf.dft import rks
from pyscf.pbc import gto as pbcgto
from pyscf.pbc.dft import rks as pbcrks
def test_ke_cutoff(pseudo=None):
# The periodic calculation
eke_cut = []
eno_cut = []
max_ke = []
Ls = [5, 10, 15, 20, 25, 30, 40, 50]
for L in Ls:
cell = pbcgto.Cell()
cell.unit = 'B'
cell.a = np.diag([L,L,L])
cell.gs = np.array([20,20,20])
cell.atom = [['He', (L/2.,L/2.,L/2.)]]
cell.basis = { 'He': [[0, (0.8, 1.0)],
[0, (1.0, 1.0)],
[0, (1.2, 1.0)]] }
cell.pseudo = pseudo
cell.ke_cutoff = 10
cell.build()
mf = pbcrks.RKS(cell)
max_ke.append(np.max(0.5*np.einsum('gi,gi->g', cell.Gv, cell.Gv)))
eke_cut.append(mf.scf())
cell.ke_cutoff = None
cell.build()
mf = pbcrks.RKS(cell)
eno_cut.append(mf.scf())
# The basic idea is that for a fixed Ke cutoff, the
# basis functions do not change too much even when
# the box volume is being changed. So, one should
# find that the energy dependence with box size is smaller
# when a KE cutoff is employed.
for i, L in enumerate(Ls):
print "Ke Cutoff, L: %d, %f, %f" % (L, eke_cut[i], max_ke[i])
# Ke Cutoff, L: 5, -2.468773, 947.482023
# Ke Cutoff, L: 10, -2.466350, 236.870506
# Ke Cutoff, L: 15, -2.465358, 105.275780
# Ke Cutoff, L: 20, -2.462961, 59.217626
# Ke Cutoff, L: 25, -2.421159, 37.899281
# Ke Cutoff, L: 30, -2.263560, 26.318945
# Ke Cutoff, L: 40, -2.278470, 14.804407
# Ke Cutoff, L: 50, -3.386092, 9.474820
for i, L in enumerate(Ls):
print "No Cutoff, L: %d, %f, %f" % (L, eno_cut[i], max_ke[i])
# No Cutoff, L: 5, -2.610023, 947.482023
# No Cutoff, L: 10, -2.603423, 236.870506
# No Cutoff, L: 15, -2.601986, 105.275780
# No Cutoff, L: 20, -2.570535, 59.217626
# No Cutoff, L: 25, -2.447315, 37.899281
# No Cutoff, L: 30, -2.262098, 26.318945
# No Cutoff, L: 40, -2.275831, 14.804407
# No Cutoff, L: 50, -3.386092, 9.474820
|
{"hexsha": "dca0e7da041ca1b7148c65a6194a921723d6a5c6", "size": 2167, "ext": "py", "lang": "Python", "max_stars_repo_path": "pbc/tests/test_ke_cutoff.py", "max_stars_repo_name": "gmwang18/pyscf", "max_stars_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pbc/tests/test_ke_cutoff.py", "max_issues_repo_name": "gmwang18/pyscf", "max_issues_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pbc/tests/test_ke_cutoff.py", "max_forks_repo_name": "gmwang18/pyscf", "max_forks_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2837837838, "max_line_length": 74, "alphanum_fraction": 0.5482233503, "include": true, "reason": "import numpy", "num_tokens": 841}
|
# -*- coding: utf-8 -*-
""" gutils/numpy_/test/test_numpy_ """
import unittest
import numpy as np
from scipy import linalg
from gutils.numpy_.numpy_ import colnorms_squared_new, normcols, LabelMatrixManager, \
scale_using_general_min_max_values, split_numpy_array
class MatrixMixin:
def setUp(self):
self.matrix = np.random.rand(3, 3)
class Test_colnorms_squared_new(MatrixMixin, unittest.TestCase):
def test_function(self):
self.assertTrue(
np.array_equal(np.sum(self.matrix**2, axis=0), colnorms_squared_new(self.matrix))
)
class Test_normcols(MatrixMixin, unittest.TestCase):
def test_normcols(self):
self.assertTrue(np.array_equal(
self.matrix/linalg.norm(self.matrix, axis=0), normcols(self.matrix)))
class Test_LabelMatrixManager(unittest.TestCase):
def setUp(self):
self.labels_1d = np.array([0, 1, 0, 2, 2])
self.labels_2d = np.array([[1, 0, 1, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 1]])
def test_get_2d_matrix_from_1d_array(self):
self.assertTrue(np.array_equal(
LabelMatrixManager.get_2d_matrix_from_1d_array(self.labels_1d, 3),
self.labels_2d
))
def test_get_2d_matrix_from_1d_array_with_empty_labels(self):
labels_1d = np.array([0, 0, 3, 2, 3, 0, 5])
labels_2d = np.array([
[1, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
])
self.assertTrue(np.array_equal(
LabelMatrixManager.get_2d_matrix_from_1d_array(labels_1d, 6),
labels_2d
))
def test_get_1d_array_from_2d_matrix(self):
self.assertTrue(np.array_equal(
LabelMatrixManager.get_1d_array_from_2d_matrix(self.labels_2d), self.labels_1d))
class Test_scale_using_general_min_max_values(unittest.TestCase):
def setUp(self):
self.vector = np.array([1, 2, 3])
self.matrix = np.array([[4, 5, 6], [7, 8, 9]])
def test_vector_default_args(self):
scaled_vector = (self.vector - self.vector.min()) / (self.vector.max() - self.vector.min())
self.assertTrue(np.array_equal(
scaled_vector,
scale_using_general_min_max_values(self.vector)
))
def test_vector_dtype(self):
scaled_vector = (self.vector - self.vector.min()) / (self.vector.max() - self.vector.min())
self.assertEqual(
np.float32,
scale_using_general_min_max_values(self.vector, dtype=np.float32).dtype
)
def test_vector_range(self):
feats_range = [10, 20]
scaled_vector = (self.vector - self.vector.min()) / (self.vector.max() - self.vector.min())
scaled_vector *= feats_range[1] - feats_range[0]
scaled_vector += feats_range[0]
self.assertTrue(np.array_equal(
scaled_vector,
scale_using_general_min_max_values(self.vector, feats_range=feats_range)
))
def test_vector_min_max_range(self):
min_val = -10
max_val = 10
feats_range = [10, 20]
scaled_vector = (self.vector - min_val) / (max_val - min_val)
scaled_vector *= feats_range[1] - feats_range[0]
scaled_vector += feats_range[0]
self.assertTrue(np.array_equal(
scaled_vector,
scale_using_general_min_max_values(self.vector, min_val, max_val, feats_range)
))
def test_matrix_default_args(self):
scaled_matrix = (self.matrix - self.matrix.min()) / (self.matrix.max() - self.matrix.min())
self.assertTrue(np.array_equal(
scaled_matrix,
scale_using_general_min_max_values(self.matrix)
))
def test_matrix_dtype(self):
scaled_matrix = (self.matrix - self.matrix.min()) / (self.matrix.max() - self.matrix.min())
self.assertEqual(
np.float64,
scale_using_general_min_max_values(self.matrix, dtype=np.float64).dtype
)
def test_matrix_range(self):
feats_range = [10, 20]
scaled_matrix = (self.matrix - self.matrix.min()) / (self.matrix.max() - self.matrix.min())
scaled_matrix *= feats_range[1] - feats_range[0]
scaled_matrix += feats_range[0]
self.assertTrue(np.array_equal(
scaled_matrix,
scale_using_general_min_max_values(self.matrix, feats_range=feats_range)
))
def test_matrix_min_max_range(self):
min_val = -10
max_val = 10
feats_range = [10, 20]
scaled_matrix = (self.matrix - min_val) / (max_val - min_val)
scaled_matrix *= feats_range[1] - feats_range[0]
scaled_matrix += feats_range[0]
self.assertTrue(np.array_equal(
scaled_matrix,
scale_using_general_min_max_values(self.matrix, min_val, max_val, feats_range)
))
class Test_split_numpy_array(unittest.TestCase):
def test_1D_array(self):
array = np.array(np.random.rand(10))
bit1, bit2 = split_numpy_array(array, .3, 0, False)
self.assertTrue((3, 7), (bit1.shape[0], bit2.shape[0]))
self.assertTrue(np.array_equal(bit1, array[:3]))
self.assertTrue(np.array_equal(bit2, array[3:]))
def test_1D_array_with_shuffle(self):
array = np.array(np.random.rand(10))
bit1, bit2 = split_numpy_array(array, .3, 0, True)
self.assertTrue((3, 7), (bit1.shape[0], bit2.shape[0]))
self.assertFalse(np.array_equal(bit1, array[:3]))
self.assertFalse(np.array_equal(bit2, array[3:]))
def test_2D_array_axis_0(self):
axis = 0
array_2D = np.random.rand(20, 10)
bit1, bit2 = split_numpy_array(array_2D, .3, axis, False)
self.assertTrue((6, 14), (bit1.shape[axis], bit2.shape[axis]))
self.assertEqual((6, 10), bit1.shape)
self.assertEqual((14, 10), bit2.shape)
self.assertTrue(np.array_equal(bit1, array_2D[:6, :]))
self.assertTrue(np.array_equal(bit2, array_2D[6:, :]))
def test_2D_array_axis_0_with_shuffle(self):
axis = 0
array_2D = np.random.rand(20, 10)
bit1, bit2 = split_numpy_array(array_2D, .3, axis, True)
self.assertTrue((6, 14), (bit1.shape[axis], bit2.shape[axis]))
self.assertEqual((6, 10), bit1.shape)
self.assertEqual((14, 10), bit2.shape)
self.assertFalse(np.array_equal(bit1, array_2D[:6, :]))
self.assertFalse(np.array_equal(bit2, array_2D[6:, :]))
def test_2D_array_axis_1(self):
axis = 1
array_2D = np.random.rand(20, 10)
bit1, bit2 = split_numpy_array(array_2D, .3, axis, False)
self.assertTrue((3, 7), (bit1.shape[axis], bit2.shape[axis]))
self.assertEqual((20, 3), bit1.shape)
self.assertEqual((20, 7), bit2.shape)
self.assertTrue(np.array_equal(bit1, array_2D[:, :3]))
self.assertTrue(np.array_equal(bit2, array_2D[:, 3:]))
def test_2D_array_axis_1_with_shuffle(self):
axis = 1
array_2D = np.random.rand(20, 10)
bit1, bit2 = split_numpy_array(array_2D, .3, axis, True)
self.assertTrue((3, 7), (bit1.shape[axis], bit2.shape[axis]))
self.assertEqual((20, 3), bit1.shape)
self.assertEqual((20, 7), bit2.shape)
self.assertFalse(np.array_equal(bit1, array_2D[:, :3]))
self.assertFalse(np.array_equal(bit2, array_2D[:, 3:]))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "55a699d0076a64d7c62c627952478dc8df5329c1", "size": 7536, "ext": "py", "lang": "Python", "max_stars_repo_path": "gutils/numpy_/test/test_numpy_.py", "max_stars_repo_name": "giussepi/gutils", "max_stars_repo_head_hexsha": "f43a207a4a10884096a5d92d81183c2213169c14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gutils/numpy_/test/test_numpy_.py", "max_issues_repo_name": "giussepi/gutils", "max_issues_repo_head_hexsha": "f43a207a4a10884096a5d92d81183c2213169c14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gutils/numpy_/test/test_numpy_.py", "max_forks_repo_name": "giussepi/gutils", "max_forks_repo_head_hexsha": "f43a207a4a10884096a5d92d81183c2213169c14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7609756098, "max_line_length": 99, "alphanum_fraction": 0.6239384289, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2034}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 20:35:22 2019
@author: icbab
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.font_manager as fm
font_location = 'C:/HANDotum.ttf'
# ex - 'C:/asiahead4.ttf'
font_name = fm.FontProperties(fname = font_location).get_name()
matplotlib.rc('font', family = font_name)
D = np.random.randn(2,100)
S = np.array([
[3, 1],
[1, 1]
])
D = np.dot(S,D)
D = D * 10 + 40
for i in range(D.shape[0]):
for j in range(D.shape[1]):
if D[i][j] < 0:
D[i][j] = np.abs(D[i][j])
if D[i][j] >= 100:
D[i][j] = 100
plt.scatter(D[0,:], D[1,:])
plt.title('시험 점수의 분포')
plt.xlabel('국어 점수')
plt.ylabel('영어 점수')
plt.ylim([0, 100])
plt.grid(b=True)
plt.savefig('C:/angeloyeo.github.io/pics/2019-07-27_PCA/pic1.png', dpi = 300)
plt.show()
plt.scatter(D[0,:], D[1,:])
plt.plot([0, 100], [0, 100], 'r--', linewidth = 2)
plt.plot([0, 100], [0, 100*4/6], 'b-.', linewidth = 2)
plt.title('시험 점수의 분포')
plt.xlabel('국어 점수')
plt.ylabel('영어 점수')
plt.ylim([0, 100])
plt.grid(b=True)
plt.savefig('C:/angeloyeo.github.io/pics/2019-07-27_PCA/pic2.png', dpi= 300)
plt.show()
|
{"hexsha": "d91d1d6f6990117bc6b0adebe63a66031e2c19e9", "size": 1222, "ext": "py", "lang": "Python", "max_stars_repo_path": "pics/2019-07-27_PCA/score_dist.py", "max_stars_repo_name": "reporeferences/angeloyeo.github.io", "max_stars_repo_head_hexsha": "c249e860665b918d1144f9cc5093dbe24b384224", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2019-06-16T00:33:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T05:25:37.000Z", "max_issues_repo_path": "pics/2019-07-27_PCA/score_dist.py", "max_issues_repo_name": "reporeferences/angeloyeo.github.io", "max_issues_repo_head_hexsha": "c249e860665b918d1144f9cc5093dbe24b384224", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pics/2019-07-27_PCA/score_dist.py", "max_forks_repo_name": "reporeferences/angeloyeo.github.io", "max_forks_repo_head_hexsha": "c249e860665b918d1144f9cc5093dbe24b384224", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2019-12-11T11:28:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:27:05.000Z", "avg_line_length": 21.0689655172, "max_line_length": 77, "alphanum_fraction": 0.5801963993, "include": true, "reason": "import numpy", "num_tokens": 456}
|
[STATEMENT]
lemma forward_UV_lists_arg_min_ex:
"\<lbrakk>finite xs; ys \<noteq> {};
ys = {x. set x = xs \<and> distinct x \<and> take 1 x = [r] \<and> forward x \<and> (\<forall>xs \<in> Y. sublist xs x)}\<rbrakk>
\<Longrightarrow> \<exists>y \<in> ys. \<forall>z \<in> ys. (f :: 'a list \<Rightarrow> real) y \<le> f z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>finite xs; ys \<noteq> {}; ys = {x. set x = xs \<and> distinct x \<and> take 1 x = [r] \<and> forward x \<and> (\<forall>xs\<in>Y. sublist xs x)}\<rbrakk> \<Longrightarrow> \<exists>y\<in>ys. \<forall>z\<in>ys. f y \<le> f z
[PROOF STEP]
using forward_UV_lists_finite forward_UV_lists_arg_min_ex_aux
[PROOF STATE]
proof (prove)
using this:
finite ?xs \<Longrightarrow> finite {x. set x = ?xs \<and> distinct x \<and> take 1 x = [?r] \<and> forward x \<and> (\<forall>xs\<in>?Y. sublist xs x)}
\<lbrakk>finite ?ys; ?ys \<noteq> {}; ?ys = {x. set x = ?xs \<and> distinct x \<and> take 1 x = [?r] \<and> forward x \<and> (\<forall>xs\<in>?Y. sublist xs x)}\<rbrakk> \<Longrightarrow> \<exists>y\<in>?ys. \<forall>z\<in>?ys. ?f y \<le> ?f z
goal (1 subgoal):
1. \<lbrakk>finite xs; ys \<noteq> {}; ys = {x. set x = xs \<and> distinct x \<and> take 1 x = [r] \<and> forward x \<and> (\<forall>xs\<in>Y. sublist xs x)}\<rbrakk> \<Longrightarrow> \<exists>y\<in>ys. \<forall>z\<in>ys. f y \<le> f z
[PROOF STEP]
by auto
|
{"llama_tokens": 583, "file": "Query_Optimization_IKKBZ_Optimality", "length": 2}
|
import os
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, classification_report
from plot_cm import plot_cm
def pat_meta_info(pro_data_dir):
"""
Get patient and scan metadata for chest CT
@params:
data_sitk - required : SimpleITK image, resulting from sitk.ImageFileReader().Execute()
new_spacing - required : desired spacing (equal for all the axes), in mm, of the output data
method - required : SimpleITK interpolation method (e.g., sitk.sitkLinear)
FIXME: change this into something like downsample_sitk (also, data_sitk to img_sitk for homog.)
(as this function is especially used for downsampling, right?)
"""
#-----------------------------------------
# CT scan artifacts
#-----------------------------------------
df = pd.read_csv(os.path.join(pro_data_dir, 'ContrastAnnotation_HN.csv'))
df.drop_duplicates(subset=['Patient ID'], keep='last', inplace=True)
df.dropna(subset=['Artifact-OP'], inplace=True)
print('annotation data with no duplicates:', df.shape[0])
train_data = pd.read_csv(os.path.join(pro_data_dir, 'train_pat_df.csv'))
val_data = pd.read_csv(os.path.join(pro_data_dir, 'val_pat_df.csv'))
test_data = pd.read_csv(os.path.join(pro_data_dir, 'test_pat_df.csv'))
tot_data = pd.concat([train_data, val_data, test_data])
datas = [tot_data, train_data, val_data, test_data]
names = ['tot', 'train', 'val', 'test']
## artifacts for train, val, test and tot dataset
for name, data in zip(names, datas):
pat_ids = []
artifacts = []
for patientid, artifact, note in zip(df['Patient ID'], df['Artifact-OP'], df['Notes']):
## use consistent ID
if patientid[3:7] == 'CHUM':
pat_id = 'CHUM' + patientid[-3:]
elif patientid[3:7] == 'CHUS':
pat_id = 'CHUS' + patientid[-3:]
elif patientid[:3] == 'OPC':
pat_id = 'PMH' + patientid[-3:]
elif patientid[:5] == 'HNSCC':
pat_id = 'MDACC' + patientid[-3:]
## find very severe artifacts
if note == 'really bad artifact':
artifact = 'very bad'
else:
artifact = artifact
## append artifacts
if pat_id in data['ID'].to_list():
pat_ids.append(pat_id)
artifacts.append(artifact)
df_af = pd.DataFrame({'ID': pat_ids, 'Artifact-OP': artifacts})
print('----------------------------')
print(name)
print('----------------------------')
print('data size:', df_af.shape[0])
print('data with artifact:', df_af.loc[df_af['Artifact-OP'].isin(['Bad', 'Yes', 'Minimal'])].shape[0])
print(df_af['Artifact-OP'].value_counts())
print(df_af['Artifact-OP'].value_counts(normalize=True).round(3))
#-----------------------------------------
# clean and group metadata
#-----------------------------------------
df = pd.read_csv(os.path.join(pro_data_dir, 'clinical_meta_data.csv'))
print('\nmeta data size:', df.shape[0])
df.drop_duplicates(subset=['patientid'], keep='last', inplace=True)
print('meta data with no duplicates:', df.shape[0])
## combine HPV info from tow cols
hpvs = []
df['hpv'] = df.iloc[:, 8].astype(str) + df.iloc[:, 9].astype(str)
for hpv in df['hpv']:
if hpv in ['nannan', 'Unknownnan', 'Nnan', 'Not testednan', 'no tissuenan']:
hpv = 'unknown'
elif hpv in [' positivenan', 'Pnan', '+nan', 'nanpositive', 'Positivenan',
'Positive -Strongnan', 'Positive -focalnan']:
hpv = 'positive'
elif hpv in [' Negativenan', 'Negativenan', '-nan', 'nannegative']:
hpv = 'negative'
hpvs.append(hpv)
df['hpv'] = hpvs
## overall stage
stages = []
for stage in df['ajccstage']:
if stage in ['I', 'Stade I']:
stage = 'I'
elif stage in ['II', 'Stade II', 'StageII']:
stage = 'II'
elif stage in ['III', 'Stade III', 'Stage III']:
stage = 'III'
elif stage in ['IVA', 'IV', 'IVB', 'Stade IVA', 'Stage IV', 'Stade IVB']:
stage = 'IV'
stages.append(stage)
df['ajccstage'] = stages
## primary cancer sites
sites = []
for site in df['diseasesite']:
if site in ['Oropharynx']:
site = site
elif site in ['Larynx', 'Hypopharynx', 'Nasopharynx']:
site = 'Larynx/Hypopharynx/Nasopharynx'
elif site in ['Oral cavity']:
site = site
else:
site = 'Unknown/Other'
sites.append(site)
df['diseasesite'] = sites
## sex
df['gender'].replace(['F'], 'Female', inplace=True)
df['gender'].replace(['M'], 'Male', inplace=True)
#-----------------------------------------
# patient meta data
#-----------------------------------------
## actual patient data with images
train_data = pd.read_csv(os.path.join(pro_data_dir, 'train_pat_df.csv'))
val_data = pd.read_csv(os.path.join(pro_data_dir, 'val_pat_df.csv'))
test_data = pd.read_csv(os.path.join(pro_data_dir, 'test_pat_df.csv'))
print('train data:', train_data.shape[0])
print('val data:', val_data.shape[0])
print('test data:', test_data.shape[0])
## print contrast info in train, val, test sets
datas = [train_data, val_data, test_data]
names = ['train', 'val', 'test']
for data, name in zip(datas, names):
print('\n')
print('----------------------------')
print(name)
print('----------------------------')
print(data['label'].value_counts())
print(data['label'].value_counts(normalize=True).round(3))
## find patient metadata
datas = [train_data, val_data, test_data]
metas = []
for data in datas:
ids = []
genders = []
ages = []
tcats = []
stages = []
sites = []
ncats = []
hpvs = []
## find meta info
for patientid, gender, age, t_cat, ajccstage, site, n_cat, hpv in zip(
df['patientid'], df['gender'], df['ageatdiag'], df['t-category'],
df['ajccstage'], df['diseasesite'], df['n-category'], df['hpv']):
## 4 datasets
if patientid[3:7] == 'CHUM':
pat_id = 'CHUM' + patientid[-3:]
elif patientid[3:7] == 'CHUS':
pat_id = 'CHUS' + patientid[-3:]
elif patientid[:3] == 'OPC':
pat_id = 'PMH' + patientid[-3:]
elif patientid[:5] == 'HNSCC':
pat_id = 'MDACC' + patientid[-3:]
if pat_id in data['ID'].to_list():
#print(pat_id)
ids.append(patientid)
genders.append(gender)
ages.append(age)
tcats.append(t_cat)
stages.append(ajccstage)
sites.append(site)
ncats.append(n_cat)
hpvs.append(hpv)
## create new df for train, val, test meta info
meta = pd.DataFrame(
{'id': ids,
'gender': genders,
'age': ages,
't_stage': tcats,
'stage': stages,
'site': sites,
'n_stage': ncats,
'hpv': hpvs}
)
metas.append(meta)
## concat 3 datasets to 1 big dataset
all_meta = pd.concat([metas[0], metas[1], metas[2]])
metas.append(all_meta)
## print meta info
datasets = ['train', 'val', 'test', 'all']
for df, dataset in zip(metas, datasets):
print('\n')
print('----------------------------')
print(dataset)
print('----------------------------')
print('patient number:', df.shape[0])
print('\n')
print(df['gender'].value_counts())
print(df['gender'].value_counts(normalize=True).round(3))
print('\n')
print(df['t_stage'].value_counts())
print(df['t_stage'].value_counts(normalize=True).round(3))
print('\n')
print(df['stage'].value_counts())
print(df['stage'].value_counts(normalize=True).round(3))
print('\n')
print(df['site'].value_counts())
print(df['site'].value_counts(normalize=True).round(3))
print('\n')
print(df['n_stage'].value_counts())
print(df['n_stage'].value_counts(normalize=True).round(3))
print('\n')
print(df['hpv'].value_counts())
print(df['hpv'].value_counts(normalize=True).round(3))
print('\n')
print('mediam age:', df['age'].median())
print('age max:', df['age'].max())
print('age min:', df['age'].min())
print('---------------------------------------------')
#------------------------------------------------------------
# CT meata data
#------------------------------------------------------------
df = pd.read_csv(os.path.join(pro_data_dir, 'clinical_meta_data.csv'))
df.drop_duplicates(subset=['patientid'], keep='last', inplace=True)
print(df.shape[0])
print(all_meta.shape[0])
df0 = df[~df['patientid'].isin(all_meta['id'].to_list())]
df = df[~df['patientid'].isin(df0['patientid'].to_list())]
print('patient not in list:', df.shape[0])
## combine CT scanner and model names
IDs = []
for manufacturer, model in zip(df['manufacturer'], df['manufacturermodelname']):
ID = str(manufacturer) + ' ' + str(model)
IDs.append(ID)
df['ID'] = IDs
#print(df['manufacturer'].value_counts())
print('-------------------')
print('CT scanner')
print('-------------------')
#print(df['manufacturermodelname'].value_counts())
#print(df['manufacturermodelname'].value_counts(normalize=True).round(3))
print(df['ID'].value_counts())
print(df['ID'].value_counts(normalize=True).round(3))
print(df.shape[0])
## KVP
print('\n')
print('-------------------')
print('KVP')
print('-------------------')
print('kvp mean:', df['kvp'].mean().round(3))
print('kvp median:', df['kvp'].median())
print('kvp mode:', df['kvp'].mode())
print('kvp std:', df['kvp'].std().round(3))
print('kvp min:', df['kvp'].min())
print('kvp max:', df['kvp'].max())
## slice thickness
print('\n')
print('-------------------')
print('slice thickness')
print('-------------------')
print('thk mean:', df['slicethickness'].mean().round(3))
print('thk median:', df['slicethickness'].median())
print('thk mode:', df['slicethickness'].mode())
print('thk std:', df['slicethickness'].std().round(3))
print('thk min:', df['slicethickness'].min())
print('thk max:', df['slicethickness'].max())
print(df['slicethickness'].value_counts())
print(df['slicethickness'].shape[0])
## spatial resolution
print('\n')
print(df['rows'].value_counts())
## pixel spacing
pixels = []
for pixel in df['pixelspacing']:
pixel = pixel.split("'")[1]
pixel = float(pixel)
pixels.append(pixel)
df['pixel'] = pixels
df['pixel'].round(3)
print('\n')
print('-------------------')
print('pixel size')
print('-------------------')
print('pixel mean:', df['pixel'].mean().round(3))
print('pixel median:', df['pixel'].median().round(3))
print('pixel mode:', df['pixel'].mode().round(3))
print('pixel std:', df['pixel'].std().round(3))
print('pixel min:', df['pixel'].min().round(3))
print('pixel max:', df['pixel'].max().round(3))
data = pd.concat([train_data, val_data, test_data])
#-----------------------------------------------------------------
# contrast information from mata data
#----------------------------------------------------------------
df = pd.read_csv(os.path.join(pro_data_dir, 'clinical_meta_data.csv'))
print('\n')
print('-----------------------------------')
print('contrast information from meta dta')
print('-----------------------------------')
print(df['contrastbolusagent'].value_counts())
print(df['contrastbolusagent'].value_counts(normalize=True).round(3))
list_contrast = set(df['contrastbolusagent'].to_list())
print('contrast agents bolus number:', len(list_contrast))
print(list_contrast)
df['contrastbolusagent'] = df['contrastbolusagent'].fillna(2)
pat_ids = []
contrasts = []
for patientid, contrast in zip(df['patientid'], df['contrastbolusagent']):
if patientid[3:7] == 'CHUM':
pat_id = 'CHUM' + patientid[-3:]
elif patientid[3:7] == 'CHUS':
pat_id = 'CHUS' + patientid[-3:]
elif patientid[:3] == 'OPC':
pat_id = 'PMH' + patientid[-3:]
elif patientid[:5] == 'HNSCC':
pat_id = 'MDACC' + patientid[-3:]
if pat_id in data['ID'].to_list():
pat_ids.append(pat_id)
## change contrast annotation in meta data
if contrast in ['N', 'n', 'NO']:
contrast = 0
elif contrast == 2:
contrast = contrast
else:
contrast = 1
contrasts.append(contrast)
df = pd.DataFrame({'ID': pat_ids, 'contrast': contrasts})
## match metadata annotations with clinical expert
ids = []
contrasts = []
labels = []
for ID, label in zip(data['ID'], data['label']):
for pat_id, contrast in zip(df['ID'], df['contrast']):
if pat_id == ID and contrast != 2 and contrast != label:
ids.append(pat_id)
contrasts.append(contrast)
labels.append(label)
print('\n')
print('-----------------------------------')
print('contrast information from meta dta')
print('-----------------------------------')
print('mismatch ID:', ids)
print('mismatch label:', labels)
print('mismatch label:', contrasts)
print('mismatch number:', len(contrasts))
print('total patient:', df['contrast'].shape[0])
print(df['contrast'].value_counts())
print(df['contrast'].value_counts(normalize=True).round(3))
## print contrast info in train, val, test sets
datas = [train_data, val_data, test_data]
names = ['train', 'val', 'test']
conss = []
for data, name in zip(datas, names):
cons = []
IDs = []
labels = []
for ID, label in zip(data['ID'], data['label']):
for pat_id, con in zip(df['ID'], df['contrast']):
if pat_id == ID:
cons.append(con)
labels.append(label)
IDs.append(pat_id)
df_con = pd.DataFrame({'ID': IDs, 'label': labels, 'contrast': cons})
conss.append(df_con)
names = ['train', 'val', 'test']
for name, con in zip(names, conss):
print('\n')
print('----------------------------')
print(name)
print('----------------------------')
print(con['contrast'].value_counts())
print(con['contrast'].value_counts(normalize=True).round(3))
#print(con['label'])
#--------------------------------------------------------------------
# calculate confusion matrix, accuracy and AUC for contrast metadata
#--------------------------------------------------------------------
for name, con in zip(['val', 'test'], [conss[1], conss[2]]):
contrasts = []
for contrast, label in zip(con['contrast'], con['label']):
if contrast == 2 and label == 0:
contrast = 1
elif contrast == 2 and label == 1:
contrast = 0
else:
contrast = contrast
contrasts.append(contrast)
con['contrast'] = contrasts
cm = confusion_matrix(con['label'], con['contrast'])
cm_norm = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]
cm_norm = np.around(cm_norm, 2)
print('\n')
print(name)
print(cm_norm)
print(cm)
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
ACC = (TP + TN)/(TP + FP + FN + TN)
TPR = TP/(TP + FN)
TNR = TN/(TN + FP)
AUC = (TPR + TNR)/2
report = classification_report(con['label'], con['contrast'])
print('AUC:', np.around(AUC[1], 3))
print('ACC:', np.around(ACC[1], 3))
print('report:', report)
# plot confusion matrix
save_dir = '/mnt/aertslab/USERS/Zezhong/contrast_detection/metadata'
for cm0, cm_type in zip([cm, cm_norm], ['raw', 'norm']):
plot_cm(
cm0=cm0,
cm_type=cm_type,
level=name,
save_dir=save_dir
)
if __name__ == '__main__':
pro_data_dir = '/home/bhkann/zezhong/git_repo/IV-Contrast-CNN-Project/pro_data'
pat_meta_info(pro_data_dir)
|
{"hexsha": "776fbf74cafd94ce127050ccea0e383062295e21", "size": 17319, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/HN_patient_meta.py", "max_stars_repo_name": "xmuyzz/IVContrast", "max_stars_repo_head_hexsha": "f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-23T09:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T20:18:18.000Z", "max_issues_repo_path": "src/utils/HN_patient_meta.py", "max_issues_repo_name": "xmuyzz/IVContrast", "max_issues_repo_head_hexsha": "f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/HN_patient_meta.py", "max_forks_repo_name": "xmuyzz/IVContrast", "max_forks_repo_head_hexsha": "f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.744966443, "max_line_length": 110, "alphanum_fraction": 0.5082856978, "include": true, "reason": "import numpy", "num_tokens": 4241}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 26 11:08:30 2018
@author: wangyf
"""
'''
Objective Oriented Version of Lattice Building functon
'''
import numpy as np
import math
import json
import networkx as nx
from networkx.algorithms import isomorphism as iso
from itertools import combinations
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%%
'''
Basic functions
'''
def two_points_D(A,B):
'''
Calculates the distance between two points A and B
'''
n = len(A)
s = 0
for i in range(n):
s = s+ (A[i]-B[i])**2
d = math.sqrt(s)
d = float(format(d, ' .3f')) # round off to three decimal digits
return d
def two_points_D_np(A, B):
'''
np norm method
'''
A = np.array(A)
B = np.array(B)
d = np.linalg.norm(A-B)
return d
def drawing(G):
'''
takes in graph g, draw it in 2D
'''
color = nx.get_node_attributes(G,'color')
pos= nx.get_node_attributes(G,'pos')
plt.figure()
nx.draw(G, pos, with_labels=False, node_color = list(color.values()))
return plt
def drawing3D(G, pos, sitetype, cell = []):
'''
Draw the 3D graph and color code the occupied
based on different site types
'''
cart_coords_3d = pos.copy()
neighbor_list = list(G.edges)
node_colors = list(nx.get_node_attributes(G,'color').values())
#nodes_layers = list(nx.get_node_attributes(G,'z').values())
plot_neighbs = True
y_rotate = -30
z_rotate = 10
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal')
if plot_neighbs:
for pair in neighbor_list: # neighbors
p1 = cart_coords_3d[pair[0]]
p2 = cart_coords_3d[pair[1]]
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], '--k', linewidth=0.05)
for i, point_i in enumerate(cart_coords_3d):
if node_colors[i] == 'grey':
point_color = 'grey'
ax.scatter(point_i[0], point_i[1], point_i[2], marker= 'o' , color=point_color, s= 30, alpha = 0.5)
if node_colors[i] == 'r':
if sitetype[i] == 'a' or sitetype[i] == 'top': point_color = 'green'
elif sitetype[i] == 'c' or sitetype[i] =='bridge': point_color = 'darkorange'
elif sitetype[i] == 'b' or sitetype[i] == 'hollow': point_color = 'blue'
ax.scatter(point_i[0], point_i[1], point_i[2], marker= 'o' , color=point_color, s= 100, edgecolors = 'k', alpha = 0.9)
'''
Set axis in equal scale
'''
X_scatter = cart_coords_3d[:,0]
Y_scatter = cart_coords_3d[:,1]
Z_scatter = cart_coords_3d[:,2]
max_range = np.array([X_scatter.max()-X_scatter.min(), Y_scatter.max()-Y_scatter.min(), Z_scatter.max()-Z_scatter.min()]).max()
ax.set_xlim(X_scatter.min(), X_scatter.min() + max_range)
ax.set_ylim(Y_scatter.min(), Y_scatter.min() + max_range)
ax.set_zlim(Z_scatter.min(), Z_scatter.min() + max_range)
ax.axis('off')
ax.view_init(z_rotate, y_rotate)
# Add the cell vector
if not cell == []:
ax = plot_3D_box(ax, pos, cell)
plt.tight_layout()
return fig, ax
def LeaveOneOut(A, a):
'''
takes in a list A and returns a new list B by leaving ath element out
'''
B = [x for i,x in enumerate(A) if i!=a]
return B
def add_z(v, z):
'''
takes in a np array and add z coordinates to it
'''
vd = np.concatenate((v, np.array([z*np.ones(len(v))]).T), axis =1)
return vd
def cal_layers(mother, dz, config):
'''
takes in mother coordinates and one configurations and
calculate how the layer number each atom is in
'''
config_layers = np.around(mother[np.array(config)][:,2]/dz, decimals = 0).astype(int)
return config_layers
def get_layers(mother, dz, config):
'''
takes in mother coordinates and one configurations and
returns how the number of layers there are
'''
config_layers = np.around(mother[np.array(config)][:,2]/dz, decimals = 0).astype(int)
n_layers = np.amax(config_layers)
return n_layers
def get_node_layer_dict(mother, dz):
'''
takes in mother coordinates and dz
returns the a dictionary containing which nodes in which layer
'''
node_layer_v = np.around(mother[:,2]/dz, decimals = 0).astype(int)
node_layer_dict = dict()
n_layer = np.amax(node_layer_v)
for layer_i in range(n_layer):
node_layer_dict[layer_i] = list(np.where(node_layer_v == layer_i +1)[0])
return node_layer_dict
def get_NPd_list(config_list):
'''
takes in configuration list
'''
# the number of Pd atoms in each structure
NPd_list = np.array([len(x) for x in config_list])
return NPd_list
#%%
'''
Construct periodic boundary conditions from ase package
Contains one cube and one fcc (ABC) structure
'''
from ase.build import bcc100, fcc111
def plot_3D_box(ax, pos, cell):
unit_points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1 ],
[1, 1, 1],
[0, 1, 1]])
Z = np.zeros((8,3))
for i in range(len(Z)): Z[i,:] = np.dot(unit_points[i,:], cell)
Z[:,2] = Z[:,2] + np.min(pos[:,2]) # Shift the cell to the base layer in z direction
for i in range(0, len(Z)):
ax.scatter(Z[i,0], Z[i, 1], Z[i, 2], color = 'k')
#ax.text(Z[i,0], Z[i, 1], Z[i, 2], '%s' % (str(i)) )
edges = [[0,1],[1,2],[2,3],[0,3],
[0,4],[1,5],[2,6],[3,7],
[4,5],[5,6],[6,7],[7,4]]
for ei in edges:
ax.plot(Z[ei,0], Z[ei,1], Z[ei,2], 'k--')
return ax
## Make a plot function for the lattice
def plot_CE_lattice(pos, view_lables = False, cell = [], initial_index = 0, input_color = 'lightgrey'):
'''
plot the cluster expansion lattice
and draw dash line for the cell vector
'''
cart_coords_3d = pos
fig = plt.figure(figsize=(16, 16))
ax = fig.add_subplot(111, projection='3d')
y_rotate = -30
z_rotate = 10
point_color = input_color
label_color = input_color
for i, point_i in enumerate(cart_coords_3d):
ax.scatter(point_i[0], point_i[1], point_i[2], marker= 'o' , color=point_color, s= 40, edgecolors = 'k', alpha = 0.9)
if view_lables:
ax.text(point_i[0], point_i[1], point_i[2], '%s' % (str(i + initial_index)), size=10, zorder=1, color=label_color)
'''
Set axis in equal scale
'''
X_scatter = cart_coords_3d[:,0]
Y_scatter = cart_coords_3d[:,1]
Z_scatter = cart_coords_3d[:,2]
max_range = np.array([X_scatter.max()-X_scatter.min(), Y_scatter.max()-Y_scatter.min(), Z_scatter.max()-Z_scatter.min()]).max()
ax.set_xlim(X_scatter.min(), X_scatter.min() + max_range)
ax.set_ylim(Y_scatter.min(), Y_scatter.min() + max_range)
ax.set_zlim(Z_scatter.min(), Z_scatter.min() + max_range)
ax.axis('off')
ax.view_init(z_rotate, y_rotate)
# Add the cell vector
if not cell == []:
ax = plot_3D_box(ax, pos, cell)
plt.tight_layout()
plt.show()
def drawing3D_super(G, pos, sitetype, cell = []):
'''
For super cell, larger graph
Draw the dash line for cell vector
Draw the 3D graph and color code the occupied
based on different site types
'''
cart_coords_3d = pos.copy()
neighbor_list = list(G.edges)
node_colors = list(nx.get_node_attributes(G,'color').values())
#nodes_layers = list(nx.get_node_attributes(G,'z').values())
plot_neighbs = True
y_rotate = -30
z_rotate = 10
fig = plt.figure(figsize=(16, 16))
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal')
if plot_neighbs:
for pair in neighbor_list: # neighbors
p1 = cart_coords_3d[pair[0]]
p2 = cart_coords_3d[pair[1]]
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], '--k', linewidth=0.05)
for i, point_i in enumerate(cart_coords_3d):
if node_colors[i] == 'grey':
point_color = 'grey'
ax.scatter(point_i[0], point_i[1], point_i[2], marker= 'o' , color=point_color, s= 10, alpha = 0.5)
if node_colors[i] == 'r':
if sitetype[i] == 'a': point_color = 'green'
elif sitetype[i] == 'c': point_color = 'darkorange'
elif sitetype[i] == 'b': point_color = 'blue'
ax.scatter(point_i[0], point_i[1], point_i[2], marker= 'o' , color=point_color, s= 100, edgecolors = 'k', alpha = 0.9)
'''
Set axis in equal scale
'''
X_scatter = cart_coords_3d[:,0]
Y_scatter = cart_coords_3d[:,1]
Z_scatter = cart_coords_3d[:,2]
max_range = np.array([X_scatter.max()-X_scatter.min(), Y_scatter.max()-Y_scatter.min(), Z_scatter.max()-Z_scatter.min()]).max()
ax.set_xlim(X_scatter.min(), X_scatter.min() + max_range)
ax.set_ylim(Y_scatter.min(), Y_scatter.min() + max_range)
ax.set_zlim(Z_scatter.min(), Z_scatter.min() + max_range)
ax.axis('off')
ax.view_init(z_rotate, y_rotate)
# Add the cell vector
if not cell == []:
ax = plot_3D_box(ax, pos, cell)
plt.tight_layout()
return fig, ax
def plot_CE_layers(pos, view_lables):
'''
Plot one layer per graph for a clutser expansion lattice
'''
z_pos = pos[:,2]
z_values = np.unique(z_pos)
pos_layer = []
pos_numbers = [0]
for zi in z_values:
# get the 2d position of layer a, b and c
pos_layer.append(pos[np.where(z_pos == zi)])
pos_numbers.append(len(pos[np.where(z_pos == zi)]))
pos_cum_numbers = np.cumsum(np.array(pos_numbers))
for pos_layer_i, initial_i in zip(pos_layer, pos_cum_numbers):
plot_CE_lattice(pos_layer_i, view_lables, initial_index = initial_i)
def build_pbc_cube(size_v = (1,1), dz = 1, lattice_c = 1, view_flag = False, view_labels = False):
'''
build 3D periodic cluster expansion lattice, only 2 layers in z direction
example: build_pbc_cube((2,2)) to create a 2 by 2 unite lattice
'''
size_x = size_v[0]
size_y = size_v[1]
size_z = 2
element = 'He' # space-filling element
# use the base layer of bcc100 plane
layer1 = bcc100(element, size=[size_x, size_y, 1], a = lattice_c)
pos1 = layer1.get_positions()
pos2 = layer1.get_positions()
pos2[:,2] = dz
# get the position of all lattice points
pos_all = np.concatenate((pos1, pos2) ,axis = 0)
# get the 3d cell vector
cell_all = layer1.get_cell() # get the total cell size
cell_all[2:,2] = size_z
if view_flag:
plot_CE_lattice(pos_all, view_labels, cell = cell_all)
return pos_all, cell_all
def build_pbc_fcc_abc(size_v = (1,1), layer_structure = ['a','bc','abc', 'abc'], lattice_c = 1, view_flag = False, view_labels = False):
'''
build 3D periodic cluster expansion lattice
z direction follows the abc rules
example: build_pbc_fcc_abc((3,3)) to create a 3 by 3 unit lattice
'''
size_x = size_v[0]
size_y = size_v[1]
size_z = len(layer_structure)
element = 'H' # space-filling element
# use 3 layer fcc structure
# somehow a is not equal to , we have to scale it
s_factor = lattice_c/0.57735027* (6**0.5/3)
layers_abc = fcc111(element, size=[size_x, size_y, 3], a = s_factor* lattice_c )
pos_abc = layers_abc.get_positions()
# get the z values in fcc lattice and get the average dz
z_pos = pos_abc[:,2]
z_values = np.unique(z_pos)
dz = np.mean(z_values[1:] - z_values[:-1]) #6**0.5/3 #
# get the 2d position of layer a, b and c
pos_a = pos_abc[np.where(z_pos == z_values[0])][:,0:2]
pos_b = pos_abc[np.where(z_pos == z_values[2])][:,0:2]
pos_c = pos_abc[np.where(z_pos == z_values[1])][:,0:2]
# construct the lattice based on input layer structure and save to pos_layer
pos_list = []
type_list = []
for layer_i, layer_si in enumerate(layer_structure):
pos_layer = []
type_layer = []
zi = dz * (layer_i+1)
for ci in list(layer_si):
if ci == 'a': pos_layer.append(add_z(pos_a, zi))
if ci == 'b': pos_layer.append(add_z(pos_b, zi))
if ci == 'c': pos_layer.append(add_z(pos_c, zi))
type_layer += list(np.repeat(ci, size_x*size_y))
pos_list.append(np.concatenate(pos_layer))
type_list += type_layer
# concatenate all layers together
pos_all = np.concatenate(pos_list)
type_all = type_list.copy()
# get 3d cell vector
cell_all = layers_abc.get_cell()
cell_all[2:,2] = np.max(pos_all[:,2])
if view_flag:
plot_CE_lattice(pos_all, view_labels, cell = cell_all)
return pos_all, cell_all, type_all
#%%
'''
The main object for cluster expansion to create network graphs
generate graphs for both configurations and clusters
'''
class graphs():
def __init__(self, occupancy, NN1, draw):
'''
takes in the occupancy color vector
occupancy[0] is the empty color
occupancy[1] is the filled color
'''
self.occupancy = occupancy
self.empty = self.occupancy[0]
self.filled = self.occupancy[1]
self.NN1 = NN1
self.draw = draw
def gmothers(self, mother, dz):
'''
takes in mother cooridate list
returns connected lattice graph
'''
draw_mother = self.draw[0]
self.mother = mother
self.nm = len(mother)
self.dz = dz
Gm = nx.Graph()
for i in range(self.nm):
Gm.add_node(i, pos = mother[i][:2], z = str(int(mother[i][2]/self.dz)), color = self.empty)
self.edge = []
self.edge_d = []
self.edge_z = []
# Add all egdes and calculate the edge distance
for i in range(self.nm):
for j in np.arange(i+1,self.nm):
self.edge.append((i,j))
self.edge_d.append(two_points_D(mother[i],mother[j]))
self.edge_z.append(str(int(mother[i][2]/self.dz))+str(int(mother[j][2]/self.dz)))
self.ne = len(self.edge)
for i in range(self.ne):
if self.NN1: # only draw 1st Nearest Neighbors
if self.edge_d[i] == 1.0:
Gm.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
# if self.NN1 == 2: # draw both 1NN and edges shorter than 1NN
# if self.edge_d[i] <= 1.0:
# Gm.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
else:
Gm.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
if draw_mother:
drawing(Gm)
plt.title('%d lattice points' %self.nm)
return Gm
def gconfigurations(self, son):
'''
takes in mother coordinate list and son's index number and occupancy vector
returns the shaded son graph
'''
draw_config = self.draw[1]
ns = len(son)
Gs = nx.Graph()
for i in range(self.nm):
Gs.add_node(i, pos = self.mother[i][:2], z = str(int(self.mother[i][2]/self.dz)), color = self.empty)
for i in range(self.ne):
if self.NN1: # only draw 1st Nearest Neighbors
if self.edge_d[i] == 1.0:
Gs.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
# if self.NN1 == 2: # draw both 1NN and edges shorter than 1NN
# if self.edge_d[i] <= 1.0:
# Gs.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
else:
Gs.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
for si in range(ns):
Gs.nodes[son[si]]['color'] = self.filled
if draw_config:
drawing(Gs)
plt.title('Pd %d' %ns)
return Gs
def gclusters(self, cmother, cson, cNN1 = False):
'''
takes in clusters
cmother are coordinates
return cluster graph objective
'''
draw_clusters = self.draw[2]
Gc = nx.Graph()
cns = len(cson)
for i in range(cns):
c = cson[i]
Gc.add_node(i, pos = cmother[c][:2], z = str(int(cmother[c][2]/self.dz)), color = self.filled)
cedge = []
cedge_d = [] # the distance of each edge
cedge_z = [] # the layer number of each edge
for i in range(cns):
for j in np.arange(i+1,cns):
c = cson[i]
d = cson[j]
cedge.append((i,j))
cedge_d.append(two_points_D(cmother[c],cmother[d]))
cedge_z.append(str(int(cmother[c][2]/self.dz))+str(int(cmother[d][2]/self.dz)))
cne = len(cedge)
for i in range(cne):
if cNN1: # only include neighboring edges in the graph
if cedge_d[i] == 1.0:
Gc.add_edges_from([cedge[i]], z = cedge_z[i], length = cedge_d[i])
# if cNN1 == 2: # draw both 1NN and edges shorter than 1NN
# if self.edge_d[i] <= 1.0:
# Gc.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i])
else:
Gc.add_edges_from([cedge[i]], z = cedge_z[i], length = cedge_d[i])
if draw_clusters:
drawing(Gc)
plt.title('Pd %d' %cns)
return Gc
def gclusters_kmc(self, Gm_NN1, cmother, cson, cNN1 = False):
'''
takes in clusters
cmother are coordinates
return cluster graph objective
'''
draw_clusters = self.draw[2]
Gc = nx.Graph()
cns = len(cson)
for i in range(cns):
c = cson[i]
Gc.add_node(i, pos = cmother[c][:2], z = str(int(cmother[c][2]/self.dz)), color = self.filled)
cedge = []
cedge_d = [] # the distance of each edge
cedge_z = [] # the layer number of each edge
for i in range(cns):
for j in np.arange(i+1,cns):
c = cson[i]
d = cson[j]
cedge.append((i,j))
cedge_d.append(two_points_D(cmother[c],cmother[d]))
cedge_z.append(str(int(cmother[c][2]/self.dz))+str(int(cmother[d][2]/self.dz)))
cne = len(cedge)
for i in range(cne):
if cNN1: # only include neighboring edges in the graph
if cedge_d[i] <= 1:
Gc.add_edges_from([cedge[i]], z = cedge_z[i], length = cedge_d[i])
else:
Gc.add_edges_from([cedge[i]], z = cedge_z[i], length = cedge_d[i])
if draw_clusters:
drawing(Gc)
plt.title('Pd %d' %cns)
return Gc
def get_mother(self, mother, dz):
'''
takes in mother coordinates list and
add mother attribute to the class
'''
self.Gm = self.gmothers(mother, dz)
def get_configs(self, config):
'''
takes in configuration index list
get a list of configurations as graph objects
'''
self.Gsv = []
self.nconfig = len(config)
for si in range(self.nconfig):
son_i = config[si]
Gs = self.gconfigurations(son_i)
self.Gsv.append(Gs)
def get_clusters(self, cmother, ccluster, cNN1 = False):
'''
takes in cluster coordinates list and cluster index list
returns a list of clusters as graph objects
'''
self.nc = len(ccluster) # number of clusers
self.Gcv = [] # list of clusters
for si in range(self.nc):
cson = ccluster[si]
Gc = self.gclusters(cmother,cson, cNN1)
self.Gcv.append(Gc)
#%%
'''
Cluster related functions
'''
def initialize_graph_object(mother, dz, NN1 = 1):
'''
NN1 == 0, draw all nodes
NN1 == 1, only draw 1st nearest neighbors
NN1 == 2, connect both 1st nearest neighbors
and edges with length smaller than 1NN
'''
'''
Initialize graph object function
'''
empty = 'grey'
filled = 'r'
occ = [empty, filled]
'''
Draw mother/conifgurations/clusters?
'''
draw = [0, 0, 0]
Graphs = graphs(occ, NN1, draw)
Graphs.get_mother(mother, dz)
return Graphs
#%%
class calculations():
'''
Perform statistical calculation for cluster expansion
'''
def __init__(self,occupancy):
'''
takes in the occupancy color vector
occupancy[0] is the empty color
occupancy[1] is the filled color
'''
self.occupancy = occupancy
self.empty = self.occupancy[0]
self.filled = self.occupancy[1]
def get_occupancy(self, G, i):
'''
Get the occupancy from the graph G for node i
Occupied is 1 and unoccupied is 0, ###!!! is it 0 or 1???
'''
if G.nodes[i]['color'] == self.empty: o = 0
if G.nodes[i]['color'] == self.filled: o = 1
return o
def get_delta_G(self, Gl, Gs):
'''
This code might be problematic
takes in larger graph Gl and smaller graph Gs
find sub isomorphic graphs of Gs from Gl
calculate the delta value in pi matrix
'''
'''
if there are more than 2 nodes in a cluster
'''
if len(Gs) > 1:
# '''
# find subgraphs using edge distance match
# '''
# GMl = iso.GraphMatcher(Gl, Gs, edge_match=iso.numerical_edge_match(['length'],[1.0]))
# '''
# find subgraphs using node layer match
# '''
# GMz= iso.GraphMatcher(Gl, Gs, edge_match= iso.categorical_edge_match(['z'],[1.0]) )
# '''
# list down total number of subgraphs niso GMz||GMl
# '''
# x = [y for y in GMz.subgraph_isomorphisms_iter() if y in GMl.subgraph_isomorphisms_iter()]
GMn = iso.GraphMatcher(Gl, Gs, node_match= iso.categorical_edge_match(['z'],[1]),
edge_match= iso.numerical_edge_match(['length'],[1.0]))
x = [y for y in GMn.subgraph_isomorphisms_iter()]
else:
'''
find subgraphs using node layer match
'''
GMn = iso.GraphMatcher(Gl, Gs, node_match= iso.categorical_edge_match(['z'],[1]) )
x = [y for y in GMn.subgraph_isomorphisms_iter()]
niso =len(x)
'''
save subgraphs to a list
'''
subg = list()
for i in range(niso):
subg.append(tuple(x[i].keys()))
'''
save product into a list
and caclulate the sum divid by total number of subgraphs
'''
subi = []
subs = []
for i in range(niso):
subi.append([])
for j in range(len(subg[i])):
subi[i].append(self.get_occupancy(Gl,subg[i][j]))
subs.append(np.product(subi[i]))
delta = np.sum(subs)/niso
return delta, niso
def get_delta_l(self, Gl, Gs):
'''
takes in larger graph Gl and smaller graph Gs
find sub isomorphic graphs of Gs from Gl
calculate the delta value in pi matrix
'''
'''
if there are more than 2 nodes in a cluster
'''
niso =len(Gs)
ncluster = len(Gs[0]) #size of the clusters
'''
save product into a list
and caclulate the sum divid by total number of subgraphs
'''
subi = []
subs = []
for i in range(niso):
subi.append([])
for j in range(len(Gs[i])):
subi[i].append(self.get_occupancy(Gl,Gs[i][j]))
subs.append(np.product(subi[i]))
delta = np.sum(subs)
return delta, niso
def get_pi_matrix_G(self, G1v, G2v):
'''
The function that gets
configuration graphs, G1v
cluster graphs, G2v
and returns the interaction correlation matrix pi
'''
n1 = len(G1v)
n2 = len(G2v)
pi = np.zeros((n1,n2))
niso_m = np.zeros((n1,n2))
progress = 0
for i in range(n1):
for j in range(n2):
pi[i][j], niso_m[i][j] = self.get_delta_G(G1v[i],G2v[j])
progress = progress + 1
per = progress/n1/n2 *100
#print('%.2f %% done!' %per)
self.pi = pi
self.niso_m = niso_m
return pi
def get_pi_matrix_l(self, G1v, G2v, print_progress = False):
'''
The function that gets
configuration graphs, G1v
cluster graphs, G2v
and returns the interaction correlation matrix pi
'''
n1 = len(G1v)
n2 = len(G2v)
pi = np.zeros((n1,n2))
niso_m = np.zeros((n1,n2))
progress = 0
for i in range(n1):
for j in range(n2):
pi[i][j], niso_m[i][j] = self.get_delta_l(G1v[i],G2v[j])
if print_progress:
progress = progress + 1
per = progress/n1/n2 *100
print('%.2f %% done!' %per)
self.pi = pi
self.niso_m = niso_m
return pi
def get_J(self, Ev):
'''
The function input energy of configurations, Ev
Returns cluster energy J from linear regression
'''
self.Ev = np.array(Ev)
J = np.linalg.lstsq(self.pi, self.Ev)[0]
self.J = J
return J
def get_MSE(self):
'''
Returns MSE of prediction and real cluster energy
'''
ns = len(self.Ev)
MSE = np.sum(np.power((np.dot(self.pi,self.J) - self.Ev),2))/ns
self.MSE = MSE
return MSE
#%%
class subgraphs():
'''
generate subgraph list with the nodes numbers under the mother graph
'''
def __init__(self, mother, dz):
self.index= np.arange(len(mother)) # generate the index of nodes
self.mother = mother
self.dz = dz
@staticmethod
def layer_tuple(mother, dz, ci):
'''
takes in a combo of index and returns tuple of layers they are in
'''
n = len(ci)
index = []
for i in range(n):
index.append(ci[i])
layers = []
for i in range(n):
layers.append(int(mother[index[i]][2]/dz))
layers= tuple(layers)
return layers
@staticmethod
def distance_tuple(mother, ci):
'''
takes in a combo of index and returns sorted distance between nodes
'''
n = len(ci)
index = []
for i in range(n):
index.append(ci[i])
combo = list(combinations(index,2))
ncombo = len(combo) #0 for 1 node, 2 for 2 nodes, 3 for 3 nodes
distances = []
for i in range(ncombo):
pt1 = mother[combo[i][0]]
pt2 = mother[combo[i][1]]
distances.append(two_points_D(pt1, pt2))
distances = tuple(sorted(distances))
return distances
@staticmethod
def unique_combo(combo, indices_list):
Gv_list = []
nclusters = len(indices_list)
for i in range(nclusters):
Gv_list.append([])
niso = len(indices_list[i])
for j in range(niso):
Gv_list[i].append(combo[indices_list[i][j]])
return Gv_list
def get_s(self, n_atoms):
'''
Input number of nodes in a subgraph
Generate combinations among the nodes
'''
self.n_atoms = n_atoms
combo = list(combinations(self.index, self.n_atoms))
ncombo = len(combo)
'''
generate the inform2tion list
store the sorted distance of nodes in tuple 1
+ the layer each node is in in tuple 2
'''
info = []
for i in range(ncombo):
ci = combo[i]
distances = self.distance_tuple(self.mother, ci)
layers = self.layer_tuple(self.mother, self.dz, ci)
info.append((distances, layers))
info_set = list(set(info))
index_list =[]
for i in info_set:
index_list.append(info.index(i))
index_list.sort() # sort the list and take out those indices
s_np = np.array(combo)[index_list]
'''
convert 2D np array to list
'''
s_list = []
for i in range(s_np.shape[0]):
s_list.append(list(s_np[i]))
return s_list
def get_s2(self, n_atoms):
'''
Input number of nodes in a subgraph
Generate combinations among the nodes
'''
print(n_atoms)
self.n_atoms = n_atoms
combo = list(combinations(self.index, self.n_atoms))
ncombo = len(combo)
'''
generate the information list
store the sorted distance of nodes in tuple 1
+ the layer each node is in in tuple 2
'''
info = []
for i in range(ncombo):
ci = combo[i]
distances = self.distance_tuple(self.mother, ci)
layers = self.layer_tuple(self.mother, self.dz, ci)
info.append((distances, layers))
info_set = list(set(info))
#print(info_set)
index_list =[]
indices_list = []
for i in info_set:
index_list.append(info.index(i))
index_list.sort() # sort the list and take out those indices
for i in index_list:
indices_list.append([a for a, x in enumerate(info) if x == info[i]])
Gcv_list = self.unique_combo(combo, indices_list)
return Gcv_list
def get_s3(self, n_atoms, cutoff_distance):
'''
Input number of nodes in a subgraph
Generate combinations among the nodes
'''
self.n_atoms = n_atoms
combo = list(combinations(self.index, self.n_atoms))
ncombo = len(combo)
'''
generate the information list
store the sorted distance of nodes in tuple 1
+ the layer each node is in in tuple 2
'''
info = []
combo_NN = []
for i in range(ncombo):
ci = combo[i]
distances = self.distance_tuple(self.mother, ci)
if np.max(np.array(distances)) > cutoff_distance:
continue
else:
layers = self.layer_tuple(self.mother, self.dz, ci)
info.append((distances, layers))
combo_NN.append(ci)
print('{}-body {} % done!'.format(n_atoms, np.round(i/ncombo*100, decimals = 3)))
info_set = list(set(info))
#print(info_set)
index_list =[]
indices_list = []
for i in info_set:
index_list.append(info.index(i))
index_list.sort() # sort the list and take out those indices
for i in index_list:
indices_list.append([a for a, x in enumerate(info) if x == info[i]])
Gcv_list = self.unique_combo(combo_NN, indices_list)
return Gcv_list
def generate_clusters(self, cutoff_distance, up_to_nbodies = 3, saveas_json = True):
'''
Generate clusters up to 3 body interactions
and save as a json file
'''
self.Gcv1 = self.get_s2(1) # 1-body interaction
self.Gcv2 = self.get_s3(2, cutoff_distance) # 2-body interaction
self.Gcv3 = self.get_s3(3, cutoff_distance) # 3-body interaction
self.Gcv = self.Gcv1 + self.Gcv2 + self.Gcv3
self.count = dict()
self.count['1 body'] = len(self.Gcv1)
self.count['2 body'] = len(self.Gcv2)
self.count['3 body'] = len(self.Gcv3)
if up_to_nbodies == 4:
self.Gcv = self.Gcv + self.get_s2(4)
if saveas_json:
#convert to jsonable format
Gcv_jsonable = []
for Gcv_i in self.Gcv:
Gcv_i_jsonable = []
for Gcv_j in Gcv_i:
Gcv_j_jsonable = [int(g) for g in Gcv_j]
Gcv_i_jsonable.append(Gcv_j_jsonable)
Gcv_jsonable.append(Gcv_i_jsonable)
Gcv_dict = {'Gcv': Gcv_jsonable}
with open('clusters.json', 'w') as outfile:
json.dump(Gcv_dict, outfile)
#%%
class coordination():
'''
calculate the coordination number (CN1, CN2)
and the general coordination number (GCN)
Use for CO oxidation onto clusters graph, not in use anymore
'''
def __init__(self,occupancy):
'''
takes in the occupancy color vector
occupancy[0] is the empty color
occupancy[1] is the filled color
'''
self.occupancy = occupancy
self.empty = self.occupancy[0]
self.filled = self.occupancy[1]
def num_1NN(self, G, i):
'''
G is a networkx graph
i is the index of node in G
returns number of nearest neighbors of node i
and a list of neighbor index number
'''
n_1NN = 0
list_1NN = []
if G.nodes[i]['color'] == self.filled: # check if the node is occupied
for j in list(G.neighbors(i)): #iterate through 1st NN
if G.nodes[j]['color'] == self.filled: #check if the node is occupied
n_1NN = n_1NN + 1
list_1NN.append(j)
else:
print('No atoms detected at this position')
return n_1NN, list_1NN
def num_2NN(self, G,i):
'''
G is a networkx graph
i is the index of node in G where CO adsorbs
returns a list of numbers of 2nd nearest neighbors of node i for each 1NN
and a 2D list of 2NN index numbers
'''
n_2NN = []
list_2NN = []
if G.nodes[i]['color'] == self.filled: # check if the node is occupied
for j in G.neighbors(i): # iterate through 1st NN
if G.nodes[j]['color'] == self.filled: # check if the node is occupied
n_2NN.append(self.num_1NN(G,j)[0]) # Add number of 2NN for 1NNs
list_2NN.append(self.num_1NN(G,j)[1]) # Add neighbor index number
else:
print('No atoms detected at this position')
return n_2NN, list_2NN
def cal_CN1(self, G,COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns 1st CN number
'''
CN1 = []
sitetype = len(COsites)
for i in range(sitetype):
CN1.append(self.num_1NN(G, COsites[i])[0])
'''
take arithmaric mean for CN1 for bridge and hollow sites
'''
CN1 = np.mean(np.array(CN1))
return CN1
def cal_CN2(self, G,COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns 2nd CN number
'''
list_CN2 = []
CN2 = []
sitetype = len(COsites)
for i in range(sitetype):
list_CN2.append(self.num_2NN(G, COsites[i])[0])
'''
sum up 2NN numbers for each 1NN
'''
for i in range(sitetype):
if len(list_CN2[i]) == 0: CN2.append(0)
else: CN2.append(np.sum(np.array(list_CN2[i])))
'''
take arithmaric mean for CN2 for bridge and hollow sites
'''
CN2 = np.mean(np.array(CN2))
return CN2
def cal_GCN(self, G, COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns general coordination number
'''
GCN = []
sitetype = len(COsites)
list_1NN = []
'''
find all avaiable 1NN index
'''
for i in range(sitetype):
list_1NN = list_1NN + self.num_1NN(G, COsites[i])[1]
'''
Use set to avoid double counting
'''
list_1NN = list(set(list_1NN))
'''
Get CN for these 1NN nodes
'''
for i in list_1NN:
GCN.append(self.num_1NN(G,i)[0])
'''
Set weight based on Pd(111)
'''
if len(COsites) == 1: weight = 12
if len(COsites) == 2: weight = 18
if len(COsites) == 3: weight = 22
GCN = np.sum(np.array(GCN))/weight
return GCN
def num_Ce1NN(self, G, i):
'''
G is a networkx graph
i is the index of node in G
returns the flag of whether the atom is next to a Ce atom
'''
nCe = 0
if G.nodes[i]['color'] == self.filled: # check if the node is occupied
if G.nodes[i]['z'] == '1': # check if the node is in base layer
nCe = 1 # 1 means the atom is in contact with 3 Ce atoms underneath
return nCe
def num_Ce2NN(self, G, i):
'''
G is a networkx graph
i is the index of node in G
returns the number of 2nd nearest Ce neighbors of node i
and a list of atom adjacent to Ce base layer
'''
n_2NN = 0
list_2NN = []
if G.nodes[i]['color'] == self.filled: # check if the node is occupied
for j in list(G.neighbors(i)): # iterate through 1st NN
n_2NN = n_2NN + self.num_Ce1NN(G,j) # check if the node is next to Ce
if self.num_Ce1NN(G,j): list_2NN.append(j) # Append the index to a list
else:
print('No atoms detected at this position')
return n_2NN, list_2NN
def cal_CeCN1(self, G, COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns coordination number of Ce
'''
CN1 = []
sitetype = len(COsites)
for i in range(sitetype):
CN1.append(self.num_Ce1NN(G, COsites[i]))
'''
take arithmaric mean for CN2 for bridge and hollow sites
'''
CN1 = np.mean(np.array(CN1)) * 3 # each Pd atom is coordinate by 3 Ce
return CN1
def cal_CeCN2(self, G, COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns 2nd coordination number of Ce
'''
CN2 = []
sitetype = len(COsites)
for i in range(sitetype):
CN2.append(self.num_Ce2NN(G, COsites[i])[0])
'''
take arithmaric mean for CN2 for bridge and hollow sites
'''
CN2 = np.mean(np.array(CN2)) *3
return CN2
def cal_CeGCN(self, G, COsites):
'''
G is a networkx graph
COsites are the index of adsorption site
returns general coordination number of Ce
'''
GCN = []
sitetype = len(COsites)
list_1NN = []
'''
find all avaiable 1NN next to Ce index
'''
for i in range(sitetype):
list_1NN = list_1NN + self.num_Ce2NN(G, COsites[i])[1]
list_1NN = list(set(list_1NN))
'''
Check if Ce is around for 1NN nodes
'''
for i in list_1NN:
GCN.append(self.num_Ce1NN(G,i))
if len(COsites) == 1: weight = 3
if len(COsites) == 2: weight = 5
if len(COsites) == 3: weight = 6
GCN = np.sum(np.array(GCN))/weight * 3
return GCN
def get_CNs(self, G, COsites):
'''
Take in configuration G
CO adsorption configuration index list
and CO sites index list
add properties to the self object
'''
self.CN1 = self.cal_CN1(G,COsites)
self.CN2 = self.cal_CN2(G,COsites)
self.GCN = self.cal_GCN(G,COsites)
self.CeCN1 = self.cal_CeCN1(G,COsites)
self.CeCN2 = self.cal_CeCN2(G,COsites)
self.CeGCN = self.cal_CeGCN(G,COsites)
def get_z(self, G, COsites):
'''
Take in configuration G
CO adsorption configuration index list
and CO sites index list
add average layer number to the self object
'''
list_z = []
sitetype = len(COsites)
for i in range(sitetype):
list_z.append(int(G.nodes[COsites[i]]['z']))
self.z = np.mean(np.array(list_z))
#%%
class isomorphs():
'''
A class object to generate isomorphs at given configurations
'''
def __init__(self, mother, dz):
'''
takes in essential variables from structure constants
'''
self.mother = mother
self.dz = dz
def get_iso_config(self, config_list, Ec_list, i_config, saveas_json = False, drawing_flag = False):
'''
Take each configuration (the configuration list, their energies and its index)
check isomorphoric subgraphs
and return a dictionary of nodes list
'''
config_i = config_list[i_config]
n_nodes = len(config_i)
n_layers = get_layers(self.mother, self.dz, config_i)
node_layer_dict = get_node_layer_dict(self.mother, self.dz)
node_index = []
#print(node_layer_dict)
for i in range(n_layers):
node_index = node_index + node_layer_dict[i]
sub_mother = self.mother[np.array(node_index)]
Clusters = initialize_graph_object(sub_mother, self.dz)
# Generate the mothe graph
G1 = Clusters.Gm
# Generate the configuration graph
Clusters.get_clusters(sub_mother, [config_i]) #one in layer 3 and one in layer 4
Gcv = Clusters.Gcv
G2 = Gcv[0]
if drawing_flag == True:
plt.figure()
drawing(G1)
plt.figure()
drawing(G2)
# Detect isomorphirc subgraphs
# the matching graphs are stored as key-value dictionary pairs
if len(G2) > 1:
GMn = iso.GraphMatcher(G1, G2, node_match= iso.categorical_edge_match(['z'],[1]),
edge_match= iso.numerical_edge_match(['length'],[1.0]))
iso_matches = [y for y in GMn.subgraph_isomorphisms_iter()]
#GMz= iso.GraphMatcher(G1, G2, edge_match= iso.categorical_edge_match(['z'],[1.0]) )
#GMl = iso.GraphMatcher(G1, G2, edge_match= iso.numerical_edge_match(['length'],[1.0]) )
#iso_matches = [y for y in GMz.subgraph_isomorphisms_iter() if y in GMl.subgraph_isomorphisms_iter()]
else:
GMn = iso.GraphMatcher(G1, G2, node_match= iso.categorical_edge_match(['z'],[1]) )
iso_matches = [y for y in GMn.subgraph_isomorphisms_iter()]
# Use set and np.unqiue to eliminate the repeated graphs
# return the graphs in indices list
iso_indices = [list(xi.keys()) for xi in iso_matches]
iso_indices = [list(yi) for yi in list(set(xi) for xi in iso_indices)]
iso_indices = [list(xi) for xi in np.unique(iso_indices, axis = 0)]
#Take all configurations with points fall on the right panel
# as cluster expansion can handle sysmetric graphs
#check if x of all the point > 0
iso_indices_pos = []
for iso_i in iso_indices:
cond1 = np.any(self.mother[np.array(iso_i)][:,0] > 0)
cond2 = np.all(self.mother[np.array(iso_i)][:,0] == 0)
if cond1 or cond2:
iso_indices_pos.append(sorted([int(xi) for xi in iso_i]))
niso = len(iso_indices_pos)
# save to a json file
if saveas_json:
output_dict = {'configuration': config_i,
'index': int(i_config),
'n_nodes': int(n_nodes),
'n_layers': int(n_layers),
'n_iso': int(niso),
'iso_graph_list': iso_indices_pos}
with open('iso_config_' + str(i_config) +'.json', 'w') as outfile:
json.dump(output_dict, outfile)
else:
E_iso_i = list(Ec_list[i_config] * np.ones(niso))
return E_iso_i, iso_indices_pos
def generate_all_iso(self, config, Ec, file_index = 0):
'''
Main part of the function
Take in some configurations and their corresponding energies
Generate all isomorphs
'''
E_iso = []
config_iso = []
for i in range(len(config)):
E_iso_i, iso_indices_pos = self.get_iso_config(config, Ec, i, saveas_json = False, drawing_flag = False)
print('{} batch {} % config done!'.format(file_index, i/len(config)*100))
E_iso = E_iso + E_iso_i
config_iso = config_iso + iso_indices_pos
# Attach to self properties
self.E_iso = E_iso
self.config_iso = config_iso
'''
save to one json file, containing all iso config list and their energies
'''
file_name = 'ES_iso_' + str(file_index) + '.json'
ES_dict = {'E_iso': E_iso, 'config_iso': config_iso}
with open(file_name, 'w') as outfile:
json.dump(ES_dict, outfile)
#%%
class graphs_CO():
'''
Graph object for CO-CO interactions
'''
def __init__(self, occupancy, NN1, unit_length):
'''
takes in the occupancy color vector
occupancy[0] is the empty color
occupancy[1] is the filled color
unit length is the distance between Pd atop - Pd atop site
'''
self.unit_length = unit_length
self.occupancy = occupancy
self.empty = self.occupancy[0]
self.filled = self.occupancy[1]
self.NN1 = NN1
#self.draw = draw
def gmothers(self, mother, sitetype_list):
'''
takes in mother cooridate list
returns connected lattice graph
'''
#draw_mother = self.draw[0]
self.mother = mother
self.nm = len(mother)
self.sitetype_list = sitetype_list
Gm = nx.Graph()
for i in range(self.nm):
Gm.add_node(i, pos = self.mother[i], sitetype = self.sitetype_list[i], color = self.empty)
self.edge = []
self.edge_d = []
self.edge_type = []
# Save all egdes into a list and calculate the edge distance
for i in range(self.nm):
for j in np.arange(i+1,self.nm):
self.edge.append((i,j))
self.edge_d.append(two_points_D(mother[i],mother[j]))
self.edge_type.append((self.sitetype_list[i], self.sitetype_list[j]))
self.ne = len(self.edge)
for i in range(self.ne):
if self.NN1: # only connect within 1st Nearest Neighbors
if self.edge_d[i] <= self.unit_length:
Gm.add_edges_from([self.edge[i]], length = self.edge_d[i], edge_type = self.edge_type[i])
else: # Add all edges, connect all nodes
Gm.add_edges_from([self.edge[i]], length = self.edge_d[i], edge_type = self.edge_type[i])
#Gm.add_edges_from([self.edge[i]], z = self.edge_z[i], length = self.edge_d[i], edge_type = self.edge_type[i])
# if draw_mother:
# drawing(Gm)
# plt.title('%d lattice points' %self.nm)
return Gm
def gconfigurations(self, son):
'''
takes in mother coordinate list and son's index number and occupancy vector
returns the shaded son graph
'''
# draw_config = self.draw[1]
ns = len(son)
Gs = nx.Graph()
for i in son:
Gs.add_node(i, pos = self.mother[i], sitetype = self.sitetype_list[i], color = self.filled)
cedge = []
cedge_d = []
cedge_type = []
# Save all egdes into a list and calculate the edge distance
for i in range(ns):
for j in np.arange(i+1, ns):
pt1_i = son[i] # the index of point 1
pt2_i = son[j] # the index of point 2
cedge.append((pt1_i, pt2_i))
cedge_d.append(two_points_D(self.mother[pt1_i], self.mother[pt2_i]))
cedge_type.append((self.sitetype_list[pt1_i], self.sitetype_list[pt2_i]))
cne = len(cedge)
for i in range(cne):
if self.NN1: # only draw 1st Nearest Neighbors
if cedge_d[i] <= self.unit_length:
Gs.add_edges_from([cedge[i]], length = cedge_d[i], edge_type = cedge_type[i])
else:
Gs.add_edges_from([cedge[i]], length = cedge_d[i], edge_type = cedge_type[i])
# # Can only draw 2D
# if draw_config:
# drawing(Gs)
# plt.title('Pd %d' %ns)
return Gs
|
{"hexsha": "7d8fb93fd591dafe012b1df1a3ee641ec49dd48c", "size": 52301, "ext": "py", "lang": "Python", "max_stars_repo_path": "lasso-assisted-CE/lattice_functions.py", "max_stars_repo_name": "VlachosGroup/Pdn-Cluster-Structure-Optimization", "max_stars_repo_head_hexsha": "470fe57bd0dcfdfc131c9b7138b6c6480dfafd4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-10-17T03:14:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T12:06:09.000Z", "max_issues_repo_path": "lasso-assisted-CE/lattice_functions.py", "max_issues_repo_name": "VlachosGroup/Pdn-Cluster-Structure-Optimization", "max_issues_repo_head_hexsha": "470fe57bd0dcfdfc131c9b7138b6c6480dfafd4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lasso-assisted-CE/lattice_functions.py", "max_forks_repo_name": "VlachosGroup/Pdn-Cluster-Structure-Optimization", "max_forks_repo_head_hexsha": "470fe57bd0dcfdfc131c9b7138b6c6480dfafd4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0926352129, "max_line_length": 136, "alphanum_fraction": 0.5220167874, "include": true, "reason": "import numpy,import networkx,from networkx", "num_tokens": 13347}
|
from os.path import dirname, join
import sys
import platform
from ctypes import *
from .xi_wintypes import *
from .xidefs import *
try:
import numpy as np
except ImportError:
pass
#import platform; platform.architecture - not reliable on Mac OSX
if platform.machine().startswith('arm') or platform.machine() == "aarch64":
if sys.maxsize > 2**32:
LIB_PATH = join(dirname(__file__), 'libs', 'xarm64')
else:
LIB_PATH = join(dirname(__file__), 'libs', 'xarm32')
else:
if sys.maxsize > 2**32:
LIB_PATH = join(dirname(__file__), 'libs', 'x64')
else:
LIB_PATH = join(dirname(__file__), 'libs', 'x32')
#library for operations with image data - to increase speed with which
#get_image_data_numpy() returns data
c_arr_ops = CDLL(join(LIB_PATH, 'xiArrOps.so'))
#library for communication with device
_device = CDLL('/usr/lib/libm3api.so.2')
class Xi_error(Exception):
'''
Camera error. Specified by return codes from camera c library.
'''
def __init__(self, status):
if status in ERROR_CODES:
self.status = status
self.descr = ERROR_CODES[status]
else:
self.descr = 'Unknown error'
def __str__(self):
return 'ERROR %i: %s' %(self.status, self.descr)
class Image(XI_IMG):
'''
Camera image class. It inherits from ctypes.Structure XI_IMG (see xidefs.py).
'''
def __init__(self):
'''
initialization of an image inst for image data and metadata.
'''
self.size = sizeof(self)
def get_image_data_raw(self):
'''
Return data (of types bytes) from memory specified by Image.bp.
NOTE: Call this function before closing the camera. After the camera
is closed, the memory is deallocated and it is impossible to retrieve
the data.
'''
output_length = self.get_bytes_per_pixel()*self.width*self.height+self.padding_x*self.height
return string_at(self.bp, output_length)
def get_image_data_numpy(self, invert_rgb_order=False):
'''
Return data as a numpy.Array type with dimension Image.height x
Image.width (in case imgdataformat is XI_MONO8, XI_MONO16, XI_RAW8 or
XI_RAW16), Image.height x Image.width x 3 (in case imgdataformat is
XI_RGB24) or Image.height x Image.width x 4 (in case imgdataformat is
XI_RGB24)
invert_rgb_order (bool) determines the order of bytes in case of
RGB and RGBA settings (if the order is R-G-B or B-G-R).
NOTE: Call this function before closing the camera. After the camera
is closed, the memory is deallocated and it is impossible to retrieve
the data.
'''
try:
if self.get_bytes_per_pixel() == 1:
c_array = c_ubyte*self.width*self.height
data = c_array()
c_arr_ops.arr8bit(
c_int(self.height),
c_int(self.width),
c_int(self.padding_x),
c_void_p(self.bp),
data
)
numpy_data = np.array(data, copy=False, dtype=np.uint8)
return numpy_data
elif self.get_bytes_per_pixel() == 2:
c_array = c_ushort*self.width*self.height
data = c_array()
c_arr_ops.arr16bit(
c_int(self.height),
c_int(self.width),
c_int(self.padding_x),
c_void_p(self.bp),
data
)
numpy_data = np.array(data, copy=False, dtype=np.uint16)
return numpy_data
elif self.get_bytes_per_pixel() == 3:
if invert_rgb_order: invRGB = 1
else: invRGB = 0
c_array = c_ubyte*3*self.width*self.height
data = c_array()
c_arr_ops.arrRGB(
c_int(self.height),
c_int(self.width),
c_int(self.padding_x),
c_void_p(self.bp),
data,
c_int(invRGB)
)
numpy_data = np.array(data, copy=False, dtype=np.uint8)
return numpy_data
elif self.get_bytes_per_pixel() == 4:
if invert_rgb_order: invRGB = 1
else: invRGB = 0
c_array = c_ubyte*4*self.width*self.height
data = c_array()
c_arr_ops.arrRGBA(
c_int(self.height),
c_int(self.width),
c_int(self.padding_x),
c_void_p(self.bp),
data,
c_int(invRGB)
)
numpy_data = np.array(data, copy=False, dtype=np.uint8)
return numpy_data
else:
raise Xi_error(108) #"Data format not supported"
except NameError:
raise ImportError('Numpy module is not installed.')
def get_bytes_per_pixel(self):
'''
Return number (int) of data image bytes per single pixel.
'''
if self.frm == XI_IMG_FORMAT["XI_MONO8"].value:
return 1
elif self.frm == XI_IMG_FORMAT["XI_RAW8"].value:
return 1
elif self.frm == XI_IMG_FORMAT["XI_MONO16"].value:
return 2
elif self.frm == XI_IMG_FORMAT["XI_RAW16"].value:
return 2
elif self.frm == XI_IMG_FORMAT["XI_RGB24"].value:
return 3
elif self.frm == XI_IMG_FORMAT["XI_RGB32"].value:
return 4
elif self.frm == XI_IMG_FORMAT["XI_RGB_PLANAR"].value:
return 3
else:
raise Xi_error(108) #"Data format not supported"
def _key_by_value(dictionary, val):
# v.value == val.value because c_float(1) == c_float(1) returns False
for k, v in dictionary.items():
if v.value == val.value:
return k
raise ValueError('Value not found')
class Camera(object):
'''
Camera class. It wrapps xiApi c library and provides its functionality.
'''
def __init__(self, dev_id=0):
'''
Device initialization. For opening more connected cameras, create new
instance with dev_id = 0, 1, 2, ...
'''
self.device = _device
self.CAM_OPEN = False
self.dev_id = dev_id
self.handle = 0
def open_device(self):
'''
Connect the camera specified by dev_id from __init__.
'''
if not self.CAM_OPEN:
self.handle = HANDLE()
stat = self.device.xiOpenDevice(self.dev_id, byref(self.handle))
if not stat == 0:
raise Xi_error(stat)
self.CAM_OPEN = True
else:
raise RuntimeError('Camera already open. Create new instance to open next camera')
def open_device_by(self, open_type, val):
'''
Connect the camera specified by open_type (string), see keys in
dictionary xidefs.XI_OPEN_BY
'''
if not self.CAM_OPEN:
self.get_number_devices()
self.handle = HANDLE()
if not open_type in XI_OPEN_BY:
raise RuntimeError('invalid value')
buf = create_string_buffer(val) #only python2.x
stat = self.device.xiOpenDeviceBy(
XI_OPEN_BY[open_type],
buf,
byref(self.handle)
)
if not stat == 0:
raise Xi_error(stat)
self.CAM_OPEN = True
else:
raise RuntimeError('Camera already open. Create new instance to open next camera')
def open_device_by_SN(self, serial_number):
'''
Connect the camera specified by its serial number (string).
'''
if not type(serial_number) == str:
raise TypeError('serial_number must be a string')
self.open_device_by('XI_OPEN_BY_SN', serial_number)
def open_device_by_path(self, path):
'''
Connect the camera specified by its path (string).
'''
if not type(path) == str:
raise TypeError('serial_number must be a string')
self.open_device_by('XI_OPEN_BY_INST_PATH', path)
def close_device(self):
'''
Close connection to the camera.
'''
stat = self.device.xiCloseDevice(self.handle)
if not stat == 0:
raise Xi_error(stat)
self.CAM_OPEN = False
def get_number_devices(self):
'''
Get number of cameras.
NOTE: This function must be called before connection is established.
'''
count = DWORD()
stat = self.device.xiGetNumberDevices(byref(count))
if not stat == 0:
raise Xi_error(stat)
return count.value
def start_acquisition(self):
'''
Start feeding data to the PC memory. Data can be retrieved with
function get_image().
'''
stat = self.device.xiStartAcquisition(self.handle)
if not stat == 0:
raise Xi_error(stat)
def stop_acquisition(self):
'''
Stop data acquisition.
'''
stat = self.device.xiStopAcquisition(self.handle)
if not stat == 0:
raise Xi_error(stat)
def get_image(self, image, timeout=50000):
'''
Pass data from memory to Image instance image.
Timeout is specified in microseconds.
NOTE: Call this function before closing the camera. After the camera
is closed, the memory is deallocated and it is impossible to retrieve
the data.
'''
stat = self.device.xiGetImage(
self.handle,
DWORD(timeout),
byref(image)
)
if not stat == 0:
raise Xi_error(stat)
def get_device_info_string(self, param):
'''
Return string with info specified by param (string). It is possible
to call this function before establishing connection with the camera.
param can be one of the following strings:
"device_sn"
"device_name"
"device_inst_path"
"device_loc_path"
"device_type"
'''
prm = create_string_buffer(param) #only python2.x
val_len = 100
val = create_string_buffer(val_len)
stat = self.device.xiGetDeviceInfoString(
self.dev_id,
prm,
val,
DWORD(val_len)
)
if not stat == 0:
raise Xi_error(stat)
return val.value
def set_param(self, param, val):
'''
Set value (data type depends on parameter) to a parameter
(string, see parameters in xidefs.py).
NOTE: Consider using function for specific parameter, e.g. if you want
to set exposure, instead of using set_param('exposure', 10000), use
set_exposure(10000).
'''
prm = create_string_buffer(param) #only python2.x
if not param in VAL_TYPE:
raise RuntimeError('invalid parameter')
val_type = VAL_TYPE[param]
if val_type == 'xiTypeString':
val_len = DWORD(len(val))
val = create_string_buffer(val) #only python2.x
elif val_type == 'xiTypeInteger':
val_len = DWORD(4)
val = pointer(c_int(val))
elif val_type == 'xiTypeFloat':
val_len = DWORD(4)
val = pointer(FLOAT(val))
elif val_type == 'xiTypeEnum':
val_len = DWORD(4)
val = pointer(ASSOC_ENUM[param][val])
elif val_type == 'xiTypeBoolean':
val_len = DWORD(4)
val = pointer(c_int(val))
elif val_type == 'xiTypeCommand':
val_len = DWORD(4)
val = pointer(c_int(val))
stat = self.device.xiSetParam(
self.handle,
prm,
val,
val_len,
XI_PRM_TYPE[val_type]
)
if not stat == 0:
raise Xi_error(stat)
def get_param(self, param, buffer_size=256):
'''
Get value (data type depends on parameter) of a parameter
(string, see parameters in xidefs). buffer_size (int) determines the
maximum size of output.
NOTE: Consider using function for specific parameter, e.g. if you want
to get exposure, instead of using get_param('exposure'), use
get_exposure().
'''
prm = create_string_buffer(param) #only python2.x
if not param.split(':')[0] in VAL_TYPE:
raise RuntimeError('invalid parameter')
val_type = VAL_TYPE[param.split(':')[0]]
if val_type == 'xiTypeString':
val_len = DWORD(buffer_size)
val = create_string_buffer(val_len.value)
elif val_type == 'xiTypeInteger' or \
val_type == 'xiTypeEnum' or \
val_type == 'xiTypeBoolean'or \
val_type == 'xiTypeCommand' :
val_len = DWORD(4)
val = pointer(c_int())
elif val_type == 'xiTypeFloat':
val_len = DWORD(4)
val = pointer(FLOAT())
stat = self.device.xiGetParam(
self.handle,
prm,
val,
byref(val_len),
byref(XI_PRM_TYPE[val_type])
)
if not stat == 0:
raise Xi_error(stat)
if val_type == 'xiTypeString':
return val.value[:val_len.value]
if val_type == 'xiTypeInteger' or val_type == 'xiTypeFloat':
return val.contents.value
if val_type == 'xiTypeEnum':
return _key_by_value(ASSOC_ENUM[param], val.contents)
if val_type == 'xiTypeBoolean':
return bool(val.contents.value)
#-------------------------------------------------------------------------------------------------------------------
# xiApi parameters
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Basic
#-------------------------------------------------------------------------------------------------------------------
def get_exposure(self):
'''
Exposure time in microsecondsXI_PRM_EXPOSURE
'''
return self.get_param('exposure')
def get_exposure_maximum(self):
'''
Exposure time in microsecondsXI_PRM_EXPOSURE
'''
return self.get_param('exposure:max')
def get_exposure_minimum(self):
'''
Exposure time in microsecondsXI_PRM_EXPOSURE
'''
return self.get_param('exposure:min')
def get_exposure_increment(self):
'''
Exposure time in microsecondsXI_PRM_EXPOSURE
'''
return self.get_param('exposure:inc')
def set_exposure(self, exposure):
'''
Exposure time in microsecondsXI_PRM_EXPOSURE
'''
self.set_param('exposure', exposure)
def get_exposure_burst_count(self):
'''
Sets the number of times of exposure in one frame.XI_PRM_EXPOSURE_BURST_COUNT
'''
return self.get_param('exposure_burst_count')
def get_exposure_burst_count_maximum(self):
'''
Sets the number of times of exposure in one frame.XI_PRM_EXPOSURE_BURST_COUNT
'''
return self.get_param('exposure_burst_count:max')
def get_exposure_burst_count_minimum(self):
'''
Sets the number of times of exposure in one frame.XI_PRM_EXPOSURE_BURST_COUNT
'''
return self.get_param('exposure_burst_count:min')
def get_exposure_burst_count_increment(self):
'''
Sets the number of times of exposure in one frame.XI_PRM_EXPOSURE_BURST_COUNT
'''
return self.get_param('exposure_burst_count:inc')
def set_exposure_burst_count(self, exposure_burst_count):
'''
Sets the number of times of exposure in one frame.XI_PRM_EXPOSURE_BURST_COUNT
'''
self.set_param('exposure_burst_count', exposure_burst_count)
def get_gain_selector(self):
'''
Gain selector for parameter Gain allows to select different type of gains.XI_PRM_GAIN_SELECTOR
'''
return self.get_param('gain_selector')
def get_gain_selector_maximum(self):
'''
Gain selector for parameter Gain allows to select different type of gains.XI_PRM_GAIN_SELECTOR
'''
return self.get_param('gain_selector:max')
def get_gain_selector_minimum(self):
'''
Gain selector for parameter Gain allows to select different type of gains.XI_PRM_GAIN_SELECTOR
'''
return self.get_param('gain_selector:min')
def get_gain_selector_increment(self):
'''
Gain selector for parameter Gain allows to select different type of gains.XI_PRM_GAIN_SELECTOR
'''
return self.get_param('gain_selector:inc')
def set_gain_selector(self, gain_selector):
'''
Gain selector for parameter Gain allows to select different type of gains.XI_PRM_GAIN_SELECTOR
'''
self.set_param('gain_selector', gain_selector)
def get_gain(self):
'''
Gain in dBXI_PRM_GAIN
'''
return self.get_param('gain')
def get_gain_maximum(self):
'''
Gain in dBXI_PRM_GAIN
'''
return self.get_param('gain:max')
def get_gain_minimum(self):
'''
Gain in dBXI_PRM_GAIN
'''
return self.get_param('gain:min')
def get_gain_increment(self):
'''
Gain in dBXI_PRM_GAIN
'''
return self.get_param('gain:inc')
def set_gain(self, gain):
'''
Gain in dBXI_PRM_GAIN
'''
self.set_param('gain', gain)
def get_downsampling(self):
'''
Change image resolution by binning or skipping.XI_PRM_DOWNSAMPLING
'''
return self.get_param('downsampling')
def get_downsampling_maximum(self):
'''
Change image resolution by binning or skipping.XI_PRM_DOWNSAMPLING
'''
return self.get_param('downsampling:max')
def get_downsampling_minimum(self):
'''
Change image resolution by binning or skipping.XI_PRM_DOWNSAMPLING
'''
return self.get_param('downsampling:min')
def get_downsampling_increment(self):
'''
Change image resolution by binning or skipping.XI_PRM_DOWNSAMPLING
'''
return self.get_param('downsampling:inc')
def set_downsampling(self, downsampling):
'''
Change image resolution by binning or skipping.XI_PRM_DOWNSAMPLING
'''
self.set_param('downsampling', downsampling)
def get_downsampling_type(self):
'''
Change image downsampling type.XI_PRM_DOWNSAMPLING_TYPE
'''
return self.get_param('downsampling_type')
def get_downsampling_type_maximum(self):
'''
Change image downsampling type.XI_PRM_DOWNSAMPLING_TYPE
'''
return self.get_param('downsampling_type:max')
def get_downsampling_type_minimum(self):
'''
Change image downsampling type.XI_PRM_DOWNSAMPLING_TYPE
'''
return self.get_param('downsampling_type:min')
def get_downsampling_type_increment(self):
'''
Change image downsampling type.XI_PRM_DOWNSAMPLING_TYPE
'''
return self.get_param('downsampling_type:inc')
def set_downsampling_type(self, downsampling_type):
'''
Change image downsampling type.XI_PRM_DOWNSAMPLING_TYPE
'''
self.set_param('downsampling_type', downsampling_type)
def get_binning_selector(self):
'''
Binning engine selector.XI_PRM_BINNING_SELECTOR
'''
return self.get_param('binning_selector')
def get_binning_selector_maximum(self):
'''
Binning engine selector.XI_PRM_BINNING_SELECTOR
'''
return self.get_param('binning_selector:max')
def get_binning_selector_minimum(self):
'''
Binning engine selector.XI_PRM_BINNING_SELECTOR
'''
return self.get_param('binning_selector:min')
def get_binning_selector_increment(self):
'''
Binning engine selector.XI_PRM_BINNING_SELECTOR
'''
return self.get_param('binning_selector:inc')
def set_binning_selector(self, binning_selector):
'''
Binning engine selector.XI_PRM_BINNING_SELECTOR
'''
self.set_param('binning_selector', binning_selector)
def get_binning_vertical_mode(self):
'''
Sets the mode to use to combine vertical pixel together.XI_PRM_BINNING_VERTICAL_MODE
'''
return self.get_param('binning_vertical_mode')
def get_binning_vertical_mode_maximum(self):
'''
Sets the mode to use to combine vertical pixel together.XI_PRM_BINNING_VERTICAL_MODE
'''
return self.get_param('binning_vertical_mode:max')
def get_binning_vertical_mode_minimum(self):
'''
Sets the mode to use to combine vertical pixel together.XI_PRM_BINNING_VERTICAL_MODE
'''
return self.get_param('binning_vertical_mode:min')
def get_binning_vertical_mode_increment(self):
'''
Sets the mode to use to combine vertical pixel together.XI_PRM_BINNING_VERTICAL_MODE
'''
return self.get_param('binning_vertical_mode:inc')
def set_binning_vertical_mode(self, binning_vertical_mode):
'''
Sets the mode to use to combine vertical pixel together.XI_PRM_BINNING_VERTICAL_MODE
'''
self.set_param('binning_vertical_mode', binning_vertical_mode)
def get_binning_vertical(self):
'''
Vertical Binning - number of vertical photo-sensitive cells to combine together.XI_PRM_BINNING_VERTICAL
'''
return self.get_param('binning_vertical')
def get_binning_vertical_maximum(self):
'''
Vertical Binning - number of vertical photo-sensitive cells to combine together.XI_PRM_BINNING_VERTICAL
'''
return self.get_param('binning_vertical:max')
def get_binning_vertical_minimum(self):
'''
Vertical Binning - number of vertical photo-sensitive cells to combine together.XI_PRM_BINNING_VERTICAL
'''
return self.get_param('binning_vertical:min')
def get_binning_vertical_increment(self):
'''
Vertical Binning - number of vertical photo-sensitive cells to combine together.XI_PRM_BINNING_VERTICAL
'''
return self.get_param('binning_vertical:inc')
def set_binning_vertical(self, binning_vertical):
'''
Vertical Binning - number of vertical photo-sensitive cells to combine together.XI_PRM_BINNING_VERTICAL
'''
self.set_param('binning_vertical', binning_vertical)
def get_binning_horizontal_mode(self):
'''
Sets the mode to use to combine horizontal pixel together.XI_PRM_BINNING_HORIZONTAL_MODE
'''
return self.get_param('binning_horizontal_mode')
def get_binning_horizontal_mode_maximum(self):
'''
Sets the mode to use to combine horizontal pixel together.XI_PRM_BINNING_HORIZONTAL_MODE
'''
return self.get_param('binning_horizontal_mode:max')
def get_binning_horizontal_mode_minimum(self):
'''
Sets the mode to use to combine horizontal pixel together.XI_PRM_BINNING_HORIZONTAL_MODE
'''
return self.get_param('binning_horizontal_mode:min')
def get_binning_horizontal_mode_increment(self):
'''
Sets the mode to use to combine horizontal pixel together.XI_PRM_BINNING_HORIZONTAL_MODE
'''
return self.get_param('binning_horizontal_mode:inc')
def set_binning_horizontal_mode(self, binning_horizontal_mode):
'''
Sets the mode to use to combine horizontal pixel together.XI_PRM_BINNING_HORIZONTAL_MODE
'''
self.set_param('binning_horizontal_mode', binning_horizontal_mode)
def get_binning_horizontal(self):
'''
Horizontal Binning - number of horizontal photo-sensitive cells to combine together.XI_PRM_BINNING_HORIZONTAL
'''
return self.get_param('binning_horizontal')
def get_binning_horizontal_maximum(self):
'''
Horizontal Binning - number of horizontal photo-sensitive cells to combine together.XI_PRM_BINNING_HORIZONTAL
'''
return self.get_param('binning_horizontal:max')
def get_binning_horizontal_minimum(self):
'''
Horizontal Binning - number of horizontal photo-sensitive cells to combine together.XI_PRM_BINNING_HORIZONTAL
'''
return self.get_param('binning_horizontal:min')
def get_binning_horizontal_increment(self):
'''
Horizontal Binning - number of horizontal photo-sensitive cells to combine together.XI_PRM_BINNING_HORIZONTAL
'''
return self.get_param('binning_horizontal:inc')
def set_binning_horizontal(self, binning_horizontal):
'''
Horizontal Binning - number of horizontal photo-sensitive cells to combine together.XI_PRM_BINNING_HORIZONTAL
'''
self.set_param('binning_horizontal', binning_horizontal)
def get_binning_horizontal_pattern(self):
'''
Binning horizontal pattern type.XI_PRM_BINNING_HORIZONTAL_PATTERN
'''
return self.get_param('binning_horizontal_pattern')
def get_binning_horizontal_pattern_maximum(self):
'''
Binning horizontal pattern type.XI_PRM_BINNING_HORIZONTAL_PATTERN
'''
return self.get_param('binning_horizontal_pattern:max')
def get_binning_horizontal_pattern_minimum(self):
'''
Binning horizontal pattern type.XI_PRM_BINNING_HORIZONTAL_PATTERN
'''
return self.get_param('binning_horizontal_pattern:min')
def get_binning_horizontal_pattern_increment(self):
'''
Binning horizontal pattern type.XI_PRM_BINNING_HORIZONTAL_PATTERN
'''
return self.get_param('binning_horizontal_pattern:inc')
def set_binning_horizontal_pattern(self, binning_horizontal_pattern):
'''
Binning horizontal pattern type.XI_PRM_BINNING_HORIZONTAL_PATTERN
'''
self.set_param('binning_horizontal_pattern', binning_horizontal_pattern)
def get_binning_vertical_pattern(self):
'''
Binning vertical pattern type.XI_PRM_BINNING_VERTICAL_PATTERN
'''
return self.get_param('binning_vertical_pattern')
def get_binning_vertical_pattern_maximum(self):
'''
Binning vertical pattern type.XI_PRM_BINNING_VERTICAL_PATTERN
'''
return self.get_param('binning_vertical_pattern:max')
def get_binning_vertical_pattern_minimum(self):
'''
Binning vertical pattern type.XI_PRM_BINNING_VERTICAL_PATTERN
'''
return self.get_param('binning_vertical_pattern:min')
def get_binning_vertical_pattern_increment(self):
'''
Binning vertical pattern type.XI_PRM_BINNING_VERTICAL_PATTERN
'''
return self.get_param('binning_vertical_pattern:inc')
def set_binning_vertical_pattern(self, binning_vertical_pattern):
'''
Binning vertical pattern type.XI_PRM_BINNING_VERTICAL_PATTERN
'''
self.set_param('binning_vertical_pattern', binning_vertical_pattern)
def get_decimation_selector(self):
'''
Decimation engine selector.XI_PRM_DECIMATION_SELECTOR
'''
return self.get_param('decimation_selector')
def get_decimation_selector_maximum(self):
'''
Decimation engine selector.XI_PRM_DECIMATION_SELECTOR
'''
return self.get_param('decimation_selector:max')
def get_decimation_selector_minimum(self):
'''
Decimation engine selector.XI_PRM_DECIMATION_SELECTOR
'''
return self.get_param('decimation_selector:min')
def get_decimation_selector_increment(self):
'''
Decimation engine selector.XI_PRM_DECIMATION_SELECTOR
'''
return self.get_param('decimation_selector:inc')
def set_decimation_selector(self, decimation_selector):
'''
Decimation engine selector.XI_PRM_DECIMATION_SELECTOR
'''
self.set_param('decimation_selector', decimation_selector)
def get_decimation_vertical(self):
'''
Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_VERTICAL
'''
return self.get_param('decimation_vertical')
def get_decimation_vertical_maximum(self):
'''
Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_VERTICAL
'''
return self.get_param('decimation_vertical:max')
def get_decimation_vertical_minimum(self):
'''
Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_VERTICAL
'''
return self.get_param('decimation_vertical:min')
def get_decimation_vertical_increment(self):
'''
Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_VERTICAL
'''
return self.get_param('decimation_vertical:inc')
def set_decimation_vertical(self, decimation_vertical):
'''
Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_VERTICAL
'''
self.set_param('decimation_vertical', decimation_vertical)
def get_decimation_horizontal(self):
'''
Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_HORIZONTAL
'''
return self.get_param('decimation_horizontal')
def get_decimation_horizontal_maximum(self):
'''
Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_HORIZONTAL
'''
return self.get_param('decimation_horizontal:max')
def get_decimation_horizontal_minimum(self):
'''
Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_HORIZONTAL
'''
return self.get_param('decimation_horizontal:min')
def get_decimation_horizontal_increment(self):
'''
Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_HORIZONTAL
'''
return self.get_param('decimation_horizontal:inc')
def set_decimation_horizontal(self, decimation_horizontal):
'''
Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.XI_PRM_DECIMATION_HORIZONTAL
'''
self.set_param('decimation_horizontal', decimation_horizontal)
def get_decimation_horizontal_pattern(self):
'''
Decimation horizontal pattern type.XI_PRM_DECIMATION_HORIZONTAL_PATTERN
'''
return self.get_param('decimation_horizontal_pattern')
def get_decimation_horizontal_pattern_maximum(self):
'''
Decimation horizontal pattern type.XI_PRM_DECIMATION_HORIZONTAL_PATTERN
'''
return self.get_param('decimation_horizontal_pattern:max')
def get_decimation_horizontal_pattern_minimum(self):
'''
Decimation horizontal pattern type.XI_PRM_DECIMATION_HORIZONTAL_PATTERN
'''
return self.get_param('decimation_horizontal_pattern:min')
def get_decimation_horizontal_pattern_increment(self):
'''
Decimation horizontal pattern type.XI_PRM_DECIMATION_HORIZONTAL_PATTERN
'''
return self.get_param('decimation_horizontal_pattern:inc')
def set_decimation_horizontal_pattern(self, decimation_horizontal_pattern):
'''
Decimation horizontal pattern type.XI_PRM_DECIMATION_HORIZONTAL_PATTERN
'''
self.set_param('decimation_horizontal_pattern', decimation_horizontal_pattern)
def get_decimation_vertical_pattern(self):
'''
Decimation vertical pattern type.XI_PRM_DECIMATION_VERTICAL_PATTERN
'''
return self.get_param('decimation_vertical_pattern')
def get_decimation_vertical_pattern_maximum(self):
'''
Decimation vertical pattern type.XI_PRM_DECIMATION_VERTICAL_PATTERN
'''
return self.get_param('decimation_vertical_pattern:max')
def get_decimation_vertical_pattern_minimum(self):
'''
Decimation vertical pattern type.XI_PRM_DECIMATION_VERTICAL_PATTERN
'''
return self.get_param('decimation_vertical_pattern:min')
def get_decimation_vertical_pattern_increment(self):
'''
Decimation vertical pattern type.XI_PRM_DECIMATION_VERTICAL_PATTERN
'''
return self.get_param('decimation_vertical_pattern:inc')
def set_decimation_vertical_pattern(self, decimation_vertical_pattern):
'''
Decimation vertical pattern type.XI_PRM_DECIMATION_VERTICAL_PATTERN
'''
self.set_param('decimation_vertical_pattern', decimation_vertical_pattern)
def get_test_pattern_generator_selector(self):
'''
Selects which test pattern generator is controlled by the TestPattern feature.XI_PRM_TEST_PATTERN_GENERATOR_SELECTOR
'''
return self.get_param('test_pattern_generator_selector')
def get_test_pattern_generator_selector_maximum(self):
'''
Selects which test pattern generator is controlled by the TestPattern feature.XI_PRM_TEST_PATTERN_GENERATOR_SELECTOR
'''
return self.get_param('test_pattern_generator_selector:max')
def get_test_pattern_generator_selector_minimum(self):
'''
Selects which test pattern generator is controlled by the TestPattern feature.XI_PRM_TEST_PATTERN_GENERATOR_SELECTOR
'''
return self.get_param('test_pattern_generator_selector:min')
def get_test_pattern_generator_selector_increment(self):
'''
Selects which test pattern generator is controlled by the TestPattern feature.XI_PRM_TEST_PATTERN_GENERATOR_SELECTOR
'''
return self.get_param('test_pattern_generator_selector:inc')
def set_test_pattern_generator_selector(self, test_pattern_generator_selector):
'''
Selects which test pattern generator is controlled by the TestPattern feature.XI_PRM_TEST_PATTERN_GENERATOR_SELECTOR
'''
self.set_param('test_pattern_generator_selector', test_pattern_generator_selector)
def get_test_pattern(self):
'''
Selects which test pattern type is generated by the selected generator.XI_PRM_TEST_PATTERN
'''
return self.get_param('test_pattern')
def get_test_pattern_maximum(self):
'''
Selects which test pattern type is generated by the selected generator.XI_PRM_TEST_PATTERN
'''
return self.get_param('test_pattern:max')
def get_test_pattern_minimum(self):
'''
Selects which test pattern type is generated by the selected generator.XI_PRM_TEST_PATTERN
'''
return self.get_param('test_pattern:min')
def get_test_pattern_increment(self):
'''
Selects which test pattern type is generated by the selected generator.XI_PRM_TEST_PATTERN
'''
return self.get_param('test_pattern:inc')
def set_test_pattern(self, test_pattern):
'''
Selects which test pattern type is generated by the selected generator.XI_PRM_TEST_PATTERN
'''
self.set_param('test_pattern', test_pattern)
def get_imgdataformat(self):
'''
Output data format.XI_PRM_IMAGE_DATA_FORMAT
'''
return self.get_param('imgdataformat')
def get_imgdataformat_maximum(self):
'''
Output data format.XI_PRM_IMAGE_DATA_FORMAT
'''
return self.get_param('imgdataformat:max')
def get_imgdataformat_minimum(self):
'''
Output data format.XI_PRM_IMAGE_DATA_FORMAT
'''
return self.get_param('imgdataformat:min')
def get_imgdataformat_increment(self):
'''
Output data format.XI_PRM_IMAGE_DATA_FORMAT
'''
return self.get_param('imgdataformat:inc')
def set_imgdataformat(self, imgdataformat):
'''
Output data format.XI_PRM_IMAGE_DATA_FORMAT
'''
self.set_param('imgdataformat', imgdataformat)
def get_shutter_type(self):
'''
Change sensor shutter type(CMOS sensor).XI_PRM_SHUTTER_TYPE
'''
return self.get_param('shutter_type')
def get_shutter_type_maximum(self):
'''
Change sensor shutter type(CMOS sensor).XI_PRM_SHUTTER_TYPE
'''
return self.get_param('shutter_type:max')
def get_shutter_type_minimum(self):
'''
Change sensor shutter type(CMOS sensor).XI_PRM_SHUTTER_TYPE
'''
return self.get_param('shutter_type:min')
def get_shutter_type_increment(self):
'''
Change sensor shutter type(CMOS sensor).XI_PRM_SHUTTER_TYPE
'''
return self.get_param('shutter_type:inc')
def set_shutter_type(self, shutter_type):
'''
Change sensor shutter type(CMOS sensor).XI_PRM_SHUTTER_TYPE
'''
self.set_param('shutter_type', shutter_type)
def get_sensor_taps(self):
'''
Number of tapsXI_PRM_SENSOR_TAPS
'''
return self.get_param('sensor_taps')
def get_sensor_taps_maximum(self):
'''
Number of tapsXI_PRM_SENSOR_TAPS
'''
return self.get_param('sensor_taps:max')
def get_sensor_taps_minimum(self):
'''
Number of tapsXI_PRM_SENSOR_TAPS
'''
return self.get_param('sensor_taps:min')
def get_sensor_taps_increment(self):
'''
Number of tapsXI_PRM_SENSOR_TAPS
'''
return self.get_param('sensor_taps:inc')
def set_sensor_taps(self, sensor_taps):
'''
Number of tapsXI_PRM_SENSOR_TAPS
'''
self.set_param('sensor_taps', sensor_taps)
def is_aeag(self):
'''
Automatic exposure/gainXI_PRM_AEAG
'''
return self.get_param('aeag')
def enable_aeag(self):
'''
Automatic exposure/gainXI_PRM_AEAG
'''
self.set_param('aeag', True)
def disable_aeag(self):
'''
Automatic exposure/gainXI_PRM_AEAG
'''
self.set_param('aeag', False)
def get_aeag_roi_offset_x(self):
'''
Automatic exposure/gain ROI offset XXI_PRM_AEAG_ROI_OFFSET_X
'''
return self.get_param('aeag_roi_offset_x')
def get_aeag_roi_offset_x_maximum(self):
'''
Automatic exposure/gain ROI offset XXI_PRM_AEAG_ROI_OFFSET_X
'''
return self.get_param('aeag_roi_offset_x:max')
def get_aeag_roi_offset_x_minimum(self):
'''
Automatic exposure/gain ROI offset XXI_PRM_AEAG_ROI_OFFSET_X
'''
return self.get_param('aeag_roi_offset_x:min')
def get_aeag_roi_offset_x_increment(self):
'''
Automatic exposure/gain ROI offset XXI_PRM_AEAG_ROI_OFFSET_X
'''
return self.get_param('aeag_roi_offset_x:inc')
def set_aeag_roi_offset_x(self, aeag_roi_offset_x):
'''
Automatic exposure/gain ROI offset XXI_PRM_AEAG_ROI_OFFSET_X
'''
self.set_param('aeag_roi_offset_x', aeag_roi_offset_x)
def get_aeag_roi_offset_y(self):
'''
Automatic exposure/gain ROI offset YXI_PRM_AEAG_ROI_OFFSET_Y
'''
return self.get_param('aeag_roi_offset_y')
def get_aeag_roi_offset_y_maximum(self):
'''
Automatic exposure/gain ROI offset YXI_PRM_AEAG_ROI_OFFSET_Y
'''
return self.get_param('aeag_roi_offset_y:max')
def get_aeag_roi_offset_y_minimum(self):
'''
Automatic exposure/gain ROI offset YXI_PRM_AEAG_ROI_OFFSET_Y
'''
return self.get_param('aeag_roi_offset_y:min')
def get_aeag_roi_offset_y_increment(self):
'''
Automatic exposure/gain ROI offset YXI_PRM_AEAG_ROI_OFFSET_Y
'''
return self.get_param('aeag_roi_offset_y:inc')
def set_aeag_roi_offset_y(self, aeag_roi_offset_y):
'''
Automatic exposure/gain ROI offset YXI_PRM_AEAG_ROI_OFFSET_Y
'''
self.set_param('aeag_roi_offset_y', aeag_roi_offset_y)
def get_aeag_roi_width(self):
'''
Automatic exposure/gain ROI WidthXI_PRM_AEAG_ROI_WIDTH
'''
return self.get_param('aeag_roi_width')
def get_aeag_roi_width_maximum(self):
'''
Automatic exposure/gain ROI WidthXI_PRM_AEAG_ROI_WIDTH
'''
return self.get_param('aeag_roi_width:max')
def get_aeag_roi_width_minimum(self):
'''
Automatic exposure/gain ROI WidthXI_PRM_AEAG_ROI_WIDTH
'''
return self.get_param('aeag_roi_width:min')
def get_aeag_roi_width_increment(self):
'''
Automatic exposure/gain ROI WidthXI_PRM_AEAG_ROI_WIDTH
'''
return self.get_param('aeag_roi_width:inc')
def set_aeag_roi_width(self, aeag_roi_width):
'''
Automatic exposure/gain ROI WidthXI_PRM_AEAG_ROI_WIDTH
'''
self.set_param('aeag_roi_width', aeag_roi_width)
def get_aeag_roi_height(self):
'''
Automatic exposure/gain ROI HeightXI_PRM_AEAG_ROI_HEIGHT
'''
return self.get_param('aeag_roi_height')
def get_aeag_roi_height_maximum(self):
'''
Automatic exposure/gain ROI HeightXI_PRM_AEAG_ROI_HEIGHT
'''
return self.get_param('aeag_roi_height:max')
def get_aeag_roi_height_minimum(self):
'''
Automatic exposure/gain ROI HeightXI_PRM_AEAG_ROI_HEIGHT
'''
return self.get_param('aeag_roi_height:min')
def get_aeag_roi_height_increment(self):
'''
Automatic exposure/gain ROI HeightXI_PRM_AEAG_ROI_HEIGHT
'''
return self.get_param('aeag_roi_height:inc')
def set_aeag_roi_height(self, aeag_roi_height):
'''
Automatic exposure/gain ROI HeightXI_PRM_AEAG_ROI_HEIGHT
'''
self.set_param('aeag_roi_height', aeag_roi_height)
def is_bpc(self):
'''
Correction of sensor defects (pixels, columns, rows) enable/disableXI_PRM_BPC
'''
return self.get_param('bpc')
def enable_bpc(self):
'''
Correction of sensor defects (pixels, columns, rows) enable/disableXI_PRM_BPC
'''
self.set_param('bpc', True)
def disable_bpc(self):
'''
Correction of sensor defects (pixels, columns, rows) enable/disableXI_PRM_BPC
'''
self.set_param('bpc', False)
def is_auto_wb(self):
'''
Automatic white balanceXI_PRM_AUTO_WB
'''
return self.get_param('auto_wb')
def enable_auto_wb(self):
'''
Automatic white balanceXI_PRM_AUTO_WB
'''
self.set_param('auto_wb', True)
def disable_auto_wb(self):
'''
Automatic white balanceXI_PRM_AUTO_WB
'''
self.set_param('auto_wb', False)
def get_manual_wb(self):
'''
Calculates White Balance(xiGetImage function must be called)XI_PRM_MANUAL_WB
'''
return self.get_param('manual_wb')
def get_manual_wb_maximum(self):
'''
Calculates White Balance(xiGetImage function must be called)XI_PRM_MANUAL_WB
'''
return self.get_param('manual_wb:max')
def get_manual_wb_minimum(self):
'''
Calculates White Balance(xiGetImage function must be called)XI_PRM_MANUAL_WB
'''
return self.get_param('manual_wb:min')
def get_manual_wb_increment(self):
'''
Calculates White Balance(xiGetImage function must be called)XI_PRM_MANUAL_WB
'''
return self.get_param('manual_wb:inc')
def set_manual_wb(self, manual_wb):
'''
Calculates White Balance(xiGetImage function must be called)XI_PRM_MANUAL_WB
'''
self.set_param('manual_wb', manual_wb)
def get_wb_kr(self):
'''
White balance red coefficientXI_PRM_WB_KR
'''
return self.get_param('wb_kr')
def get_wb_kr_maximum(self):
'''
White balance red coefficientXI_PRM_WB_KR
'''
return self.get_param('wb_kr:max')
def get_wb_kr_minimum(self):
'''
White balance red coefficientXI_PRM_WB_KR
'''
return self.get_param('wb_kr:min')
def get_wb_kr_increment(self):
'''
White balance red coefficientXI_PRM_WB_KR
'''
return self.get_param('wb_kr:inc')
def set_wb_kr(self, wb_kr):
'''
White balance red coefficientXI_PRM_WB_KR
'''
self.set_param('wb_kr', wb_kr)
def get_wb_kg(self):
'''
White balance green coefficientXI_PRM_WB_KG
'''
return self.get_param('wb_kg')
def get_wb_kg_maximum(self):
'''
White balance green coefficientXI_PRM_WB_KG
'''
return self.get_param('wb_kg:max')
def get_wb_kg_minimum(self):
'''
White balance green coefficientXI_PRM_WB_KG
'''
return self.get_param('wb_kg:min')
def get_wb_kg_increment(self):
'''
White balance green coefficientXI_PRM_WB_KG
'''
return self.get_param('wb_kg:inc')
def set_wb_kg(self, wb_kg):
'''
White balance green coefficientXI_PRM_WB_KG
'''
self.set_param('wb_kg', wb_kg)
def get_wb_kb(self):
'''
White balance blue coefficientXI_PRM_WB_KB
'''
return self.get_param('wb_kb')
def get_wb_kb_maximum(self):
'''
White balance blue coefficientXI_PRM_WB_KB
'''
return self.get_param('wb_kb:max')
def get_wb_kb_minimum(self):
'''
White balance blue coefficientXI_PRM_WB_KB
'''
return self.get_param('wb_kb:min')
def get_wb_kb_increment(self):
'''
White balance blue coefficientXI_PRM_WB_KB
'''
return self.get_param('wb_kb:inc')
def set_wb_kb(self, wb_kb):
'''
White balance blue coefficientXI_PRM_WB_KB
'''
self.set_param('wb_kb', wb_kb)
def get_width(self):
'''
Width of the Image provided by the device (in pixels).XI_PRM_WIDTH
'''
return self.get_param('width')
def get_width_maximum(self):
'''
Width of the Image provided by the device (in pixels).XI_PRM_WIDTH
'''
return self.get_param('width:max')
def get_width_minimum(self):
'''
Width of the Image provided by the device (in pixels).XI_PRM_WIDTH
'''
return self.get_param('width:min')
def get_width_increment(self):
'''
Width of the Image provided by the device (in pixels).XI_PRM_WIDTH
'''
return self.get_param('width:inc')
def set_width(self, width):
'''
Width of the Image provided by the device (in pixels).XI_PRM_WIDTH
'''
self.set_param('width', width)
def get_height(self):
'''
Height of the Image provided by the device (in pixels).XI_PRM_HEIGHT
'''
return self.get_param('height')
def get_height_maximum(self):
'''
Height of the Image provided by the device (in pixels).XI_PRM_HEIGHT
'''
return self.get_param('height:max')
def get_height_minimum(self):
'''
Height of the Image provided by the device (in pixels).XI_PRM_HEIGHT
'''
return self.get_param('height:min')
def get_height_increment(self):
'''
Height of the Image provided by the device (in pixels).XI_PRM_HEIGHT
'''
return self.get_param('height:inc')
def set_height(self, height):
'''
Height of the Image provided by the device (in pixels).XI_PRM_HEIGHT
'''
self.set_param('height', height)
def get_offsetX(self):
'''
Horizontal offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_X
'''
return self.get_param('offsetX')
def get_offsetX_maximum(self):
'''
Horizontal offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_X
'''
return self.get_param('offsetX:max')
def get_offsetX_minimum(self):
'''
Horizontal offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_X
'''
return self.get_param('offsetX:min')
def get_offsetX_increment(self):
'''
Horizontal offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_X
'''
return self.get_param('offsetX:inc')
def set_offsetX(self, offsetX):
'''
Horizontal offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_X
'''
self.set_param('offsetX', offsetX)
def get_offsetY(self):
'''
Vertical offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_Y
'''
return self.get_param('offsetY')
def get_offsetY_maximum(self):
'''
Vertical offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_Y
'''
return self.get_param('offsetY:max')
def get_offsetY_minimum(self):
'''
Vertical offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_Y
'''
return self.get_param('offsetY:min')
def get_offsetY_increment(self):
'''
Vertical offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_Y
'''
return self.get_param('offsetY:inc')
def set_offsetY(self, offsetY):
'''
Vertical offset from the origin to the area of interest (in pixels).XI_PRM_OFFSET_Y
'''
self.set_param('offsetY', offsetY)
def get_region_selector(self):
'''
Selects Region in Multiple ROI which parameters are set by width, height, ... ,region modeXI_PRM_REGION_SELECTOR
'''
return self.get_param('region_selector')
def get_region_selector_maximum(self):
'''
Selects Region in Multiple ROI which parameters are set by width, height, ... ,region modeXI_PRM_REGION_SELECTOR
'''
return self.get_param('region_selector:max')
def get_region_selector_minimum(self):
'''
Selects Region in Multiple ROI which parameters are set by width, height, ... ,region modeXI_PRM_REGION_SELECTOR
'''
return self.get_param('region_selector:min')
def get_region_selector_increment(self):
'''
Selects Region in Multiple ROI which parameters are set by width, height, ... ,region modeXI_PRM_REGION_SELECTOR
'''
return self.get_param('region_selector:inc')
def set_region_selector(self, region_selector):
'''
Selects Region in Multiple ROI which parameters are set by width, height, ... ,region modeXI_PRM_REGION_SELECTOR
'''
self.set_param('region_selector', region_selector)
def get_region_mode(self):
'''
Activates/deactivates Region selected by Region SelectorXI_PRM_REGION_MODE
'''
return self.get_param('region_mode')
def get_region_mode_maximum(self):
'''
Activates/deactivates Region selected by Region SelectorXI_PRM_REGION_MODE
'''
return self.get_param('region_mode:max')
def get_region_mode_minimum(self):
'''
Activates/deactivates Region selected by Region SelectorXI_PRM_REGION_MODE
'''
return self.get_param('region_mode:min')
def get_region_mode_increment(self):
'''
Activates/deactivates Region selected by Region SelectorXI_PRM_REGION_MODE
'''
return self.get_param('region_mode:inc')
def set_region_mode(self, region_mode):
'''
Activates/deactivates Region selected by Region SelectorXI_PRM_REGION_MODE
'''
self.set_param('region_mode', region_mode)
def is_horizontal_flip(self):
'''
Horizontal flip enableXI_PRM_HORIZONTAL_FLIP
'''
return self.get_param('horizontal_flip')
def enable_horizontal_flip(self):
'''
Horizontal flip enableXI_PRM_HORIZONTAL_FLIP
'''
self.set_param('horizontal_flip', True)
def disable_horizontal_flip(self):
'''
Horizontal flip enableXI_PRM_HORIZONTAL_FLIP
'''
self.set_param('horizontal_flip', False)
def is_vertical_flip(self):
'''
Vertical flip enableXI_PRM_VERTICAL_FLIP
'''
return self.get_param('vertical_flip')
def enable_vertical_flip(self):
'''
Vertical flip enableXI_PRM_VERTICAL_FLIP
'''
self.set_param('vertical_flip', True)
def disable_vertical_flip(self):
'''
Vertical flip enableXI_PRM_VERTICAL_FLIP
'''
self.set_param('vertical_flip', False)
def is_ffc(self):
'''
Image flat field correctionXI_PRM_FFC
'''
return self.get_param('ffc')
def enable_ffc(self):
'''
Image flat field correctionXI_PRM_FFC
'''
self.set_param('ffc', True)
def disable_ffc(self):
'''
Image flat field correctionXI_PRM_FFC
'''
self.set_param('ffc', False)
def get_ffc_flat_field_file_name(self,buffer_size=256):
'''
Set name of file to be applied for FFC processor.XI_PRM_FFC_FLAT_FIELD_FILE_NAME
'''
return self.get_param('ffc_flat_field_file_name',buffer_size)
def set_ffc_flat_field_file_name(self, ffc_flat_field_file_name):
'''
Set name of file to be applied for FFC processor.XI_PRM_FFC_FLAT_FIELD_FILE_NAME
'''
self.set_param('ffc_flat_field_file_name', ffc_flat_field_file_name)
def get_ffc_dark_field_file_name(self,buffer_size=256):
'''
Set name of file to be applied for FFC processor.XI_PRM_FFC_DARK_FIELD_FILE_NAME
'''
return self.get_param('ffc_dark_field_file_name',buffer_size)
def set_ffc_dark_field_file_name(self, ffc_dark_field_file_name):
'''
Set name of file to be applied for FFC processor.XI_PRM_FFC_DARK_FIELD_FILE_NAME
'''
self.set_param('ffc_dark_field_file_name', ffc_dark_field_file_name)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: AE Setup
#-------------------------------------------------------------------------------------------------------------------
def get_exp_priority(self):
'''
Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
'''
return self.get_param('exp_priority')
def get_exp_priority_maximum(self):
'''
Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
'''
return self.get_param('exp_priority:max')
def get_exp_priority_minimum(self):
'''
Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
'''
return self.get_param('exp_priority:min')
def get_exp_priority_increment(self):
'''
Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
'''
return self.get_param('exp_priority:inc')
def set_exp_priority(self, exp_priority):
'''
Exposure priority (0.8 - exposure 80%, gain 20%).XI_PRM_EXP_PRIORITY
'''
self.set_param('exp_priority', exp_priority)
def get_ag_max_limit(self):
'''
Maximum limit of gain in AEAG procedureXI_PRM_AG_MAX_LIMIT
'''
return self.get_param('ag_max_limit')
def get_ag_max_limit_maximum(self):
'''
Maximum limit of gain in AEAG procedureXI_PRM_AG_MAX_LIMIT
'''
return self.get_param('ag_max_limit:max')
def get_ag_max_limit_minimum(self):
'''
Maximum limit of gain in AEAG procedureXI_PRM_AG_MAX_LIMIT
'''
return self.get_param('ag_max_limit:min')
def get_ag_max_limit_increment(self):
'''
Maximum limit of gain in AEAG procedureXI_PRM_AG_MAX_LIMIT
'''
return self.get_param('ag_max_limit:inc')
def set_ag_max_limit(self, ag_max_limit):
'''
Maximum limit of gain in AEAG procedureXI_PRM_AG_MAX_LIMIT
'''
self.set_param('ag_max_limit', ag_max_limit)
def get_ae_max_limit(self):
'''
Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
'''
return self.get_param('ae_max_limit')
def get_ae_max_limit_maximum(self):
'''
Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
'''
return self.get_param('ae_max_limit:max')
def get_ae_max_limit_minimum(self):
'''
Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
'''
return self.get_param('ae_max_limit:min')
def get_ae_max_limit_increment(self):
'''
Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
'''
return self.get_param('ae_max_limit:inc')
def set_ae_max_limit(self, ae_max_limit):
'''
Maximum time (us) used for exposure in AEAG procedureXI_PRM_AE_MAX_LIMIT
'''
self.set_param('ae_max_limit', ae_max_limit)
def get_aeag_level(self):
'''
Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
'''
return self.get_param('aeag_level')
def get_aeag_level_maximum(self):
'''
Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
'''
return self.get_param('aeag_level:max')
def get_aeag_level_minimum(self):
'''
Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
'''
return self.get_param('aeag_level:min')
def get_aeag_level_increment(self):
'''
Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
'''
return self.get_param('aeag_level:inc')
def set_aeag_level(self, aeag_level):
'''
Average intensity of output signal AEAG should achieve(in %)XI_PRM_AEAG_LEVEL
'''
self.set_param('aeag_level', aeag_level)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Performance
#-------------------------------------------------------------------------------------------------------------------
def get_limit_bandwidth(self):
'''
Set/get bandwidth(datarate)(in Megabits)XI_PRM_LIMIT_BANDWIDTH
'''
return self.get_param('limit_bandwidth')
def get_limit_bandwidth_maximum(self):
'''
Set/get bandwidth(datarate)(in Megabits)XI_PRM_LIMIT_BANDWIDTH
'''
return self.get_param('limit_bandwidth:max')
def get_limit_bandwidth_minimum(self):
'''
Set/get bandwidth(datarate)(in Megabits)XI_PRM_LIMIT_BANDWIDTH
'''
return self.get_param('limit_bandwidth:min')
def get_limit_bandwidth_increment(self):
'''
Set/get bandwidth(datarate)(in Megabits)XI_PRM_LIMIT_BANDWIDTH
'''
return self.get_param('limit_bandwidth:inc')
def set_limit_bandwidth(self, limit_bandwidth):
'''
Set/get bandwidth(datarate)(in Megabits)XI_PRM_LIMIT_BANDWIDTH
'''
self.set_param('limit_bandwidth', limit_bandwidth)
def get_limit_bandwidth_mode(self):
'''
Bandwidth limit enabledXI_PRM_LIMIT_BANDWIDTH_MODE
'''
return self.get_param('limit_bandwidth_mode')
def get_limit_bandwidth_mode_maximum(self):
'''
Bandwidth limit enabledXI_PRM_LIMIT_BANDWIDTH_MODE
'''
return self.get_param('limit_bandwidth_mode:max')
def get_limit_bandwidth_mode_minimum(self):
'''
Bandwidth limit enabledXI_PRM_LIMIT_BANDWIDTH_MODE
'''
return self.get_param('limit_bandwidth_mode:min')
def get_limit_bandwidth_mode_increment(self):
'''
Bandwidth limit enabledXI_PRM_LIMIT_BANDWIDTH_MODE
'''
return self.get_param('limit_bandwidth_mode:inc')
def set_limit_bandwidth_mode(self, limit_bandwidth_mode):
'''
Bandwidth limit enabledXI_PRM_LIMIT_BANDWIDTH_MODE
'''
self.set_param('limit_bandwidth_mode', limit_bandwidth_mode)
def get_sensor_line_period(self):
'''
Image sensor line period in usXI_PRM_SENSOR_LINE_PERIOD
'''
return self.get_param('sensor_line_period')
def get_sensor_line_period_maximum(self):
'''
Image sensor line period in usXI_PRM_SENSOR_LINE_PERIOD
'''
return self.get_param('sensor_line_period:max')
def get_sensor_line_period_minimum(self):
'''
Image sensor line period in usXI_PRM_SENSOR_LINE_PERIOD
'''
return self.get_param('sensor_line_period:min')
def get_sensor_line_period_increment(self):
'''
Image sensor line period in usXI_PRM_SENSOR_LINE_PERIOD
'''
return self.get_param('sensor_line_period:inc')
def set_sensor_line_period(self, sensor_line_period):
'''
Image sensor line period in usXI_PRM_SENSOR_LINE_PERIOD
'''
self.set_param('sensor_line_period', sensor_line_period)
def get_sensor_bit_depth(self):
'''
Sensor output data bit depth.XI_PRM_SENSOR_DATA_BIT_DEPTH
'''
return self.get_param('sensor_bit_depth')
def get_sensor_bit_depth_maximum(self):
'''
Sensor output data bit depth.XI_PRM_SENSOR_DATA_BIT_DEPTH
'''
return self.get_param('sensor_bit_depth:max')
def get_sensor_bit_depth_minimum(self):
'''
Sensor output data bit depth.XI_PRM_SENSOR_DATA_BIT_DEPTH
'''
return self.get_param('sensor_bit_depth:min')
def get_sensor_bit_depth_increment(self):
'''
Sensor output data bit depth.XI_PRM_SENSOR_DATA_BIT_DEPTH
'''
return self.get_param('sensor_bit_depth:inc')
def set_sensor_bit_depth(self, sensor_bit_depth):
'''
Sensor output data bit depth.XI_PRM_SENSOR_DATA_BIT_DEPTH
'''
self.set_param('sensor_bit_depth', sensor_bit_depth)
def get_output_bit_depth(self):
'''
Device output data bit depth.XI_PRM_OUTPUT_DATA_BIT_DEPTH
'''
return self.get_param('output_bit_depth')
def get_output_bit_depth_maximum(self):
'''
Device output data bit depth.XI_PRM_OUTPUT_DATA_BIT_DEPTH
'''
return self.get_param('output_bit_depth:max')
def get_output_bit_depth_minimum(self):
'''
Device output data bit depth.XI_PRM_OUTPUT_DATA_BIT_DEPTH
'''
return self.get_param('output_bit_depth:min')
def get_output_bit_depth_increment(self):
'''
Device output data bit depth.XI_PRM_OUTPUT_DATA_BIT_DEPTH
'''
return self.get_param('output_bit_depth:inc')
def set_output_bit_depth(self, output_bit_depth):
'''
Device output data bit depth.XI_PRM_OUTPUT_DATA_BIT_DEPTH
'''
self.set_param('output_bit_depth', output_bit_depth)
def get_image_data_bit_depth(self):
'''
bitdepth of data returned by function xiGetImageXI_PRM_IMAGE_DATA_BIT_DEPTH
'''
return self.get_param('image_data_bit_depth')
def get_image_data_bit_depth_maximum(self):
'''
bitdepth of data returned by function xiGetImageXI_PRM_IMAGE_DATA_BIT_DEPTH
'''
return self.get_param('image_data_bit_depth:max')
def get_image_data_bit_depth_minimum(self):
'''
bitdepth of data returned by function xiGetImageXI_PRM_IMAGE_DATA_BIT_DEPTH
'''
return self.get_param('image_data_bit_depth:min')
def get_image_data_bit_depth_increment(self):
'''
bitdepth of data returned by function xiGetImageXI_PRM_IMAGE_DATA_BIT_DEPTH
'''
return self.get_param('image_data_bit_depth:inc')
def set_image_data_bit_depth(self, image_data_bit_depth):
'''
bitdepth of data returned by function xiGetImageXI_PRM_IMAGE_DATA_BIT_DEPTH
'''
self.set_param('image_data_bit_depth', image_data_bit_depth)
def is_output_bit_packing(self):
'''
Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available.XI_PRM_OUTPUT_DATA_PACKING
'''
return self.get_param('output_bit_packing')
def enable_output_bit_packing(self):
'''
Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available.XI_PRM_OUTPUT_DATA_PACKING
'''
self.set_param('output_bit_packing', True)
def disable_output_bit_packing(self):
'''
Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available.XI_PRM_OUTPUT_DATA_PACKING
'''
self.set_param('output_bit_packing', False)
def get_output_bit_packing_type(self):
'''
Data packing type. Some cameras supports only specific packing type.XI_PRM_OUTPUT_DATA_PACKING_TYPE
'''
return self.get_param('output_bit_packing_type')
def get_output_bit_packing_type_maximum(self):
'''
Data packing type. Some cameras supports only specific packing type.XI_PRM_OUTPUT_DATA_PACKING_TYPE
'''
return self.get_param('output_bit_packing_type:max')
def get_output_bit_packing_type_minimum(self):
'''
Data packing type. Some cameras supports only specific packing type.XI_PRM_OUTPUT_DATA_PACKING_TYPE
'''
return self.get_param('output_bit_packing_type:min')
def get_output_bit_packing_type_increment(self):
'''
Data packing type. Some cameras supports only specific packing type.XI_PRM_OUTPUT_DATA_PACKING_TYPE
'''
return self.get_param('output_bit_packing_type:inc')
def set_output_bit_packing_type(self, output_bit_packing_type):
'''
Data packing type. Some cameras supports only specific packing type.XI_PRM_OUTPUT_DATA_PACKING_TYPE
'''
self.set_param('output_bit_packing_type', output_bit_packing_type)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Temperature
#-------------------------------------------------------------------------------------------------------------------
def is_iscooled(self):
'''
Returns 1 for cameras that support cooling.XI_PRM_IS_COOLED
'''
return self.get_param('iscooled')
def get_cooling(self):
'''
Temperature control mode.XI_PRM_COOLING
'''
return self.get_param('cooling')
def get_cooling_maximum(self):
'''
Temperature control mode.XI_PRM_COOLING
'''
return self.get_param('cooling:max')
def get_cooling_minimum(self):
'''
Temperature control mode.XI_PRM_COOLING
'''
return self.get_param('cooling:min')
def get_cooling_increment(self):
'''
Temperature control mode.XI_PRM_COOLING
'''
return self.get_param('cooling:inc')
def set_cooling(self, cooling):
'''
Temperature control mode.XI_PRM_COOLING
'''
self.set_param('cooling', cooling)
def get_target_temp(self):
'''
Set sensor target temperature for cooling.XI_PRM_TARGET_TEMP
'''
return self.get_param('target_temp')
def get_target_temp_maximum(self):
'''
Set sensor target temperature for cooling.XI_PRM_TARGET_TEMP
'''
return self.get_param('target_temp:max')
def get_target_temp_minimum(self):
'''
Set sensor target temperature for cooling.XI_PRM_TARGET_TEMP
'''
return self.get_param('target_temp:min')
def get_target_temp_increment(self):
'''
Set sensor target temperature for cooling.XI_PRM_TARGET_TEMP
'''
return self.get_param('target_temp:inc')
def set_target_temp(self, target_temp):
'''
Set sensor target temperature for cooling.XI_PRM_TARGET_TEMP
'''
self.set_param('target_temp', target_temp)
def get_temp_selector(self):
'''
Selector of mechanical point where thermometer is located.XI_PRM_TEMP_SELECTOR
'''
return self.get_param('temp_selector')
def get_temp_selector_maximum(self):
'''
Selector of mechanical point where thermometer is located.XI_PRM_TEMP_SELECTOR
'''
return self.get_param('temp_selector:max')
def get_temp_selector_minimum(self):
'''
Selector of mechanical point where thermometer is located.XI_PRM_TEMP_SELECTOR
'''
return self.get_param('temp_selector:min')
def get_temp_selector_increment(self):
'''
Selector of mechanical point where thermometer is located.XI_PRM_TEMP_SELECTOR
'''
return self.get_param('temp_selector:inc')
def set_temp_selector(self, temp_selector):
'''
Selector of mechanical point where thermometer is located.XI_PRM_TEMP_SELECTOR
'''
self.set_param('temp_selector', temp_selector)
def get_temp(self):
'''
Camera temperature (selected by XI_PRM_TEMP_SELECTOR)XI_PRM_TEMP
'''
return self.get_param('temp')
def get_temp_maximum(self):
'''
Camera temperature (selected by XI_PRM_TEMP_SELECTOR)XI_PRM_TEMP
'''
return self.get_param('temp:max')
def get_temp_minimum(self):
'''
Camera temperature (selected by XI_PRM_TEMP_SELECTOR)XI_PRM_TEMP
'''
return self.get_param('temp:min')
def get_temp_increment(self):
'''
Camera temperature (selected by XI_PRM_TEMP_SELECTOR)XI_PRM_TEMP
'''
return self.get_param('temp:inc')
def get_device_temperature_ctrl_mode(self):
'''
Temperature control mode.XI_PRM_TEMP_CONTROL_MODE
'''
return self.get_param('device_temperature_ctrl_mode')
def get_device_temperature_ctrl_mode_maximum(self):
'''
Temperature control mode.XI_PRM_TEMP_CONTROL_MODE
'''
return self.get_param('device_temperature_ctrl_mode:max')
def get_device_temperature_ctrl_mode_minimum(self):
'''
Temperature control mode.XI_PRM_TEMP_CONTROL_MODE
'''
return self.get_param('device_temperature_ctrl_mode:min')
def get_device_temperature_ctrl_mode_increment(self):
'''
Temperature control mode.XI_PRM_TEMP_CONTROL_MODE
'''
return self.get_param('device_temperature_ctrl_mode:inc')
def set_device_temperature_ctrl_mode(self, device_temperature_ctrl_mode):
'''
Temperature control mode.XI_PRM_TEMP_CONTROL_MODE
'''
self.set_param('device_temperature_ctrl_mode', device_temperature_ctrl_mode)
def get_chip_temp(self):
'''
Camera sensor temperatureXI_PRM_CHIP_TEMP
'''
return self.get_param('chip_temp')
def get_chip_temp_maximum(self):
'''
Camera sensor temperatureXI_PRM_CHIP_TEMP
'''
return self.get_param('chip_temp:max')
def get_chip_temp_minimum(self):
'''
Camera sensor temperatureXI_PRM_CHIP_TEMP
'''
return self.get_param('chip_temp:min')
def get_chip_temp_increment(self):
'''
Camera sensor temperatureXI_PRM_CHIP_TEMP
'''
return self.get_param('chip_temp:inc')
def get_hous_temp(self):
'''
Camera housing tepmeratureXI_PRM_HOUS_TEMP
'''
return self.get_param('hous_temp')
def get_hous_temp_maximum(self):
'''
Camera housing tepmeratureXI_PRM_HOUS_TEMP
'''
return self.get_param('hous_temp:max')
def get_hous_temp_minimum(self):
'''
Camera housing tepmeratureXI_PRM_HOUS_TEMP
'''
return self.get_param('hous_temp:min')
def get_hous_temp_increment(self):
'''
Camera housing tepmeratureXI_PRM_HOUS_TEMP
'''
return self.get_param('hous_temp:inc')
def get_hous_back_side_temp(self):
'''
Camera housing back side tepmeratureXI_PRM_HOUS_BACK_SIDE_TEMP
'''
return self.get_param('hous_back_side_temp')
def get_hous_back_side_temp_maximum(self):
'''
Camera housing back side tepmeratureXI_PRM_HOUS_BACK_SIDE_TEMP
'''
return self.get_param('hous_back_side_temp:max')
def get_hous_back_side_temp_minimum(self):
'''
Camera housing back side tepmeratureXI_PRM_HOUS_BACK_SIDE_TEMP
'''
return self.get_param('hous_back_side_temp:min')
def get_hous_back_side_temp_increment(self):
'''
Camera housing back side tepmeratureXI_PRM_HOUS_BACK_SIDE_TEMP
'''
return self.get_param('hous_back_side_temp:inc')
def get_sensor_board_temp(self):
'''
Camera sensor board temperatureXI_PRM_SENSOR_BOARD_TEMP
'''
return self.get_param('sensor_board_temp')
def get_sensor_board_temp_maximum(self):
'''
Camera sensor board temperatureXI_PRM_SENSOR_BOARD_TEMP
'''
return self.get_param('sensor_board_temp:max')
def get_sensor_board_temp_minimum(self):
'''
Camera sensor board temperatureXI_PRM_SENSOR_BOARD_TEMP
'''
return self.get_param('sensor_board_temp:min')
def get_sensor_board_temp_increment(self):
'''
Camera sensor board temperatureXI_PRM_SENSOR_BOARD_TEMP
'''
return self.get_param('sensor_board_temp:inc')
def get_device_temperature_element_sel(self):
'''
Temperature element selector (TEC(Peltier), Fan).XI_PRM_TEMP_ELEMENT_SEL
'''
return self.get_param('device_temperature_element_sel')
def get_device_temperature_element_sel_maximum(self):
'''
Temperature element selector (TEC(Peltier), Fan).XI_PRM_TEMP_ELEMENT_SEL
'''
return self.get_param('device_temperature_element_sel:max')
def get_device_temperature_element_sel_minimum(self):
'''
Temperature element selector (TEC(Peltier), Fan).XI_PRM_TEMP_ELEMENT_SEL
'''
return self.get_param('device_temperature_element_sel:min')
def get_device_temperature_element_sel_increment(self):
'''
Temperature element selector (TEC(Peltier), Fan).XI_PRM_TEMP_ELEMENT_SEL
'''
return self.get_param('device_temperature_element_sel:inc')
def set_device_temperature_element_sel(self, device_temperature_element_sel):
'''
Temperature element selector (TEC(Peltier), Fan).XI_PRM_TEMP_ELEMENT_SEL
'''
self.set_param('device_temperature_element_sel', device_temperature_element_sel)
def get_device_temperature_element_val(self):
'''
Temperature element value in percents of full control rangeXI_PRM_TEMP_ELEMENT_VALUE
'''
return self.get_param('device_temperature_element_val')
def get_device_temperature_element_val_maximum(self):
'''
Temperature element value in percents of full control rangeXI_PRM_TEMP_ELEMENT_VALUE
'''
return self.get_param('device_temperature_element_val:max')
def get_device_temperature_element_val_minimum(self):
'''
Temperature element value in percents of full control rangeXI_PRM_TEMP_ELEMENT_VALUE
'''
return self.get_param('device_temperature_element_val:min')
def get_device_temperature_element_val_increment(self):
'''
Temperature element value in percents of full control rangeXI_PRM_TEMP_ELEMENT_VALUE
'''
return self.get_param('device_temperature_element_val:inc')
def set_device_temperature_element_val(self, device_temperature_element_val):
'''
Temperature element value in percents of full control rangeXI_PRM_TEMP_ELEMENT_VALUE
'''
self.set_param('device_temperature_element_val', device_temperature_element_val)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Color Correction
#-------------------------------------------------------------------------------------------------------------------
def get_cms(self):
'''
Mode of color management system.XI_PRM_CMS
'''
return self.get_param('cms')
def get_cms_maximum(self):
'''
Mode of color management system.XI_PRM_CMS
'''
return self.get_param('cms:max')
def get_cms_minimum(self):
'''
Mode of color management system.XI_PRM_CMS
'''
return self.get_param('cms:min')
def get_cms_increment(self):
'''
Mode of color management system.XI_PRM_CMS
'''
return self.get_param('cms:inc')
def set_cms(self, cms):
'''
Mode of color management system.XI_PRM_CMS
'''
self.set_param('cms', cms)
def get_cms_intent(self):
'''
Intent of color management system.XI_PRM_CMS_INTENT
'''
return self.get_param('cms_intent')
def get_cms_intent_maximum(self):
'''
Intent of color management system.XI_PRM_CMS_INTENT
'''
return self.get_param('cms_intent:max')
def get_cms_intent_minimum(self):
'''
Intent of color management system.XI_PRM_CMS_INTENT
'''
return self.get_param('cms_intent:min')
def get_cms_intent_increment(self):
'''
Intent of color management system.XI_PRM_CMS_INTENT
'''
return self.get_param('cms_intent:inc')
def set_cms_intent(self, cms_intent):
'''
Intent of color management system.XI_PRM_CMS_INTENT
'''
self.set_param('cms_intent', cms_intent)
def is_apply_cms(self):
'''
Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).XI_PRM_APPLY_CMS
'''
return self.get_param('apply_cms')
def enable_apply_cms(self):
'''
Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).XI_PRM_APPLY_CMS
'''
self.set_param('apply_cms', True)
def disable_apply_cms(self):
'''
Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).XI_PRM_APPLY_CMS
'''
self.set_param('apply_cms', False)
def get_input_cms_profile(self,buffer_size=256):
'''
Filename for input cms profile (e.g. input.icc)XI_PRM_INPUT_CMS_PROFILE
'''
return self.get_param('input_cms_profile',buffer_size)
def set_input_cms_profile(self, input_cms_profile):
'''
Filename for input cms profile (e.g. input.icc)XI_PRM_INPUT_CMS_PROFILE
'''
self.set_param('input_cms_profile', input_cms_profile)
def get_output_cms_profile(self,buffer_size=256):
'''
Filename for output cms profile (e.g. input.icc)XI_PRM_OUTPUT_CMS_PROFILE
'''
return self.get_param('output_cms_profile',buffer_size)
def set_output_cms_profile(self, output_cms_profile):
'''
Filename for output cms profile (e.g. input.icc)XI_PRM_OUTPUT_CMS_PROFILE
'''
self.set_param('output_cms_profile', output_cms_profile)
def is_iscolor(self):
'''
Returns 1 for color cameras.XI_PRM_IMAGE_IS_COLOR
'''
return self.get_param('iscolor')
def get_cfa(self):
'''
Returns color filter array type of RAW data.XI_PRM_COLOR_FILTER_ARRAY
'''
return self.get_param('cfa')
def get_cfa_maximum(self):
'''
Returns color filter array type of RAW data.XI_PRM_COLOR_FILTER_ARRAY
'''
return self.get_param('cfa:max')
def get_cfa_minimum(self):
'''
Returns color filter array type of RAW data.XI_PRM_COLOR_FILTER_ARRAY
'''
return self.get_param('cfa:min')
def get_cfa_increment(self):
'''
Returns color filter array type of RAW data.XI_PRM_COLOR_FILTER_ARRAY
'''
return self.get_param('cfa:inc')
def get_gammaY(self):
'''
Luminosity gammaXI_PRM_GAMMAY
'''
return self.get_param('gammaY')
def get_gammaY_maximum(self):
'''
Luminosity gammaXI_PRM_GAMMAY
'''
return self.get_param('gammaY:max')
def get_gammaY_minimum(self):
'''
Luminosity gammaXI_PRM_GAMMAY
'''
return self.get_param('gammaY:min')
def get_gammaY_increment(self):
'''
Luminosity gammaXI_PRM_GAMMAY
'''
return self.get_param('gammaY:inc')
def set_gammaY(self, gammaY):
'''
Luminosity gammaXI_PRM_GAMMAY
'''
self.set_param('gammaY', gammaY)
def get_gammaC(self):
'''
Chromaticity gammaXI_PRM_GAMMAC
'''
return self.get_param('gammaC')
def get_gammaC_maximum(self):
'''
Chromaticity gammaXI_PRM_GAMMAC
'''
return self.get_param('gammaC:max')
def get_gammaC_minimum(self):
'''
Chromaticity gammaXI_PRM_GAMMAC
'''
return self.get_param('gammaC:min')
def get_gammaC_increment(self):
'''
Chromaticity gammaXI_PRM_GAMMAC
'''
return self.get_param('gammaC:inc')
def set_gammaC(self, gammaC):
'''
Chromaticity gammaXI_PRM_GAMMAC
'''
self.set_param('gammaC', gammaC)
def get_sharpness(self):
'''
Sharpness StrenghtXI_PRM_SHARPNESS
'''
return self.get_param('sharpness')
def get_sharpness_maximum(self):
'''
Sharpness StrenghtXI_PRM_SHARPNESS
'''
return self.get_param('sharpness:max')
def get_sharpness_minimum(self):
'''
Sharpness StrenghtXI_PRM_SHARPNESS
'''
return self.get_param('sharpness:min')
def get_sharpness_increment(self):
'''
Sharpness StrenghtXI_PRM_SHARPNESS
'''
return self.get_param('sharpness:inc')
def set_sharpness(self, sharpness):
'''
Sharpness StrenghtXI_PRM_SHARPNESS
'''
self.set_param('sharpness', sharpness)
def get_ccMTX00(self):
'''
Color Correction Matrix element [0][0]XI_PRM_CC_MATRIX_00
'''
return self.get_param('ccMTX00')
def get_ccMTX00_maximum(self):
'''
Color Correction Matrix element [0][0]XI_PRM_CC_MATRIX_00
'''
return self.get_param('ccMTX00:max')
def get_ccMTX00_minimum(self):
'''
Color Correction Matrix element [0][0]XI_PRM_CC_MATRIX_00
'''
return self.get_param('ccMTX00:min')
def get_ccMTX00_increment(self):
'''
Color Correction Matrix element [0][0]XI_PRM_CC_MATRIX_00
'''
return self.get_param('ccMTX00:inc')
def set_ccMTX00(self, ccMTX00):
'''
Color Correction Matrix element [0][0]XI_PRM_CC_MATRIX_00
'''
self.set_param('ccMTX00', ccMTX00)
def get_ccMTX01(self):
'''
Color Correction Matrix element [0][1]XI_PRM_CC_MATRIX_01
'''
return self.get_param('ccMTX01')
def get_ccMTX01_maximum(self):
'''
Color Correction Matrix element [0][1]XI_PRM_CC_MATRIX_01
'''
return self.get_param('ccMTX01:max')
def get_ccMTX01_minimum(self):
'''
Color Correction Matrix element [0][1]XI_PRM_CC_MATRIX_01
'''
return self.get_param('ccMTX01:min')
def get_ccMTX01_increment(self):
'''
Color Correction Matrix element [0][1]XI_PRM_CC_MATRIX_01
'''
return self.get_param('ccMTX01:inc')
def set_ccMTX01(self, ccMTX01):
'''
Color Correction Matrix element [0][1]XI_PRM_CC_MATRIX_01
'''
self.set_param('ccMTX01', ccMTX01)
def get_ccMTX02(self):
'''
Color Correction Matrix element [0][2]XI_PRM_CC_MATRIX_02
'''
return self.get_param('ccMTX02')
def get_ccMTX02_maximum(self):
'''
Color Correction Matrix element [0][2]XI_PRM_CC_MATRIX_02
'''
return self.get_param('ccMTX02:max')
def get_ccMTX02_minimum(self):
'''
Color Correction Matrix element [0][2]XI_PRM_CC_MATRIX_02
'''
return self.get_param('ccMTX02:min')
def get_ccMTX02_increment(self):
'''
Color Correction Matrix element [0][2]XI_PRM_CC_MATRIX_02
'''
return self.get_param('ccMTX02:inc')
def set_ccMTX02(self, ccMTX02):
'''
Color Correction Matrix element [0][2]XI_PRM_CC_MATRIX_02
'''
self.set_param('ccMTX02', ccMTX02)
def get_ccMTX03(self):
'''
Color Correction Matrix element [0][3]XI_PRM_CC_MATRIX_03
'''
return self.get_param('ccMTX03')
def get_ccMTX03_maximum(self):
'''
Color Correction Matrix element [0][3]XI_PRM_CC_MATRIX_03
'''
return self.get_param('ccMTX03:max')
def get_ccMTX03_minimum(self):
'''
Color Correction Matrix element [0][3]XI_PRM_CC_MATRIX_03
'''
return self.get_param('ccMTX03:min')
def get_ccMTX03_increment(self):
'''
Color Correction Matrix element [0][3]XI_PRM_CC_MATRIX_03
'''
return self.get_param('ccMTX03:inc')
def set_ccMTX03(self, ccMTX03):
'''
Color Correction Matrix element [0][3]XI_PRM_CC_MATRIX_03
'''
self.set_param('ccMTX03', ccMTX03)
def get_ccMTX10(self):
'''
Color Correction Matrix element [1][0]XI_PRM_CC_MATRIX_10
'''
return self.get_param('ccMTX10')
def get_ccMTX10_maximum(self):
'''
Color Correction Matrix element [1][0]XI_PRM_CC_MATRIX_10
'''
return self.get_param('ccMTX10:max')
def get_ccMTX10_minimum(self):
'''
Color Correction Matrix element [1][0]XI_PRM_CC_MATRIX_10
'''
return self.get_param('ccMTX10:min')
def get_ccMTX10_increment(self):
'''
Color Correction Matrix element [1][0]XI_PRM_CC_MATRIX_10
'''
return self.get_param('ccMTX10:inc')
def set_ccMTX10(self, ccMTX10):
'''
Color Correction Matrix element [1][0]XI_PRM_CC_MATRIX_10
'''
self.set_param('ccMTX10', ccMTX10)
def get_ccMTX11(self):
'''
Color Correction Matrix element [1][1]XI_PRM_CC_MATRIX_11
'''
return self.get_param('ccMTX11')
def get_ccMTX11_maximum(self):
'''
Color Correction Matrix element [1][1]XI_PRM_CC_MATRIX_11
'''
return self.get_param('ccMTX11:max')
def get_ccMTX11_minimum(self):
'''
Color Correction Matrix element [1][1]XI_PRM_CC_MATRIX_11
'''
return self.get_param('ccMTX11:min')
def get_ccMTX11_increment(self):
'''
Color Correction Matrix element [1][1]XI_PRM_CC_MATRIX_11
'''
return self.get_param('ccMTX11:inc')
def set_ccMTX11(self, ccMTX11):
'''
Color Correction Matrix element [1][1]XI_PRM_CC_MATRIX_11
'''
self.set_param('ccMTX11', ccMTX11)
def get_ccMTX12(self):
'''
Color Correction Matrix element [1][2]XI_PRM_CC_MATRIX_12
'''
return self.get_param('ccMTX12')
def get_ccMTX12_maximum(self):
'''
Color Correction Matrix element [1][2]XI_PRM_CC_MATRIX_12
'''
return self.get_param('ccMTX12:max')
def get_ccMTX12_minimum(self):
'''
Color Correction Matrix element [1][2]XI_PRM_CC_MATRIX_12
'''
return self.get_param('ccMTX12:min')
def get_ccMTX12_increment(self):
'''
Color Correction Matrix element [1][2]XI_PRM_CC_MATRIX_12
'''
return self.get_param('ccMTX12:inc')
def set_ccMTX12(self, ccMTX12):
'''
Color Correction Matrix element [1][2]XI_PRM_CC_MATRIX_12
'''
self.set_param('ccMTX12', ccMTX12)
def get_ccMTX13(self):
'''
Color Correction Matrix element [1][3]XI_PRM_CC_MATRIX_13
'''
return self.get_param('ccMTX13')
def get_ccMTX13_maximum(self):
'''
Color Correction Matrix element [1][3]XI_PRM_CC_MATRIX_13
'''
return self.get_param('ccMTX13:max')
def get_ccMTX13_minimum(self):
'''
Color Correction Matrix element [1][3]XI_PRM_CC_MATRIX_13
'''
return self.get_param('ccMTX13:min')
def get_ccMTX13_increment(self):
'''
Color Correction Matrix element [1][3]XI_PRM_CC_MATRIX_13
'''
return self.get_param('ccMTX13:inc')
def set_ccMTX13(self, ccMTX13):
'''
Color Correction Matrix element [1][3]XI_PRM_CC_MATRIX_13
'''
self.set_param('ccMTX13', ccMTX13)
def get_ccMTX20(self):
'''
Color Correction Matrix element [2][0]XI_PRM_CC_MATRIX_20
'''
return self.get_param('ccMTX20')
def get_ccMTX20_maximum(self):
'''
Color Correction Matrix element [2][0]XI_PRM_CC_MATRIX_20
'''
return self.get_param('ccMTX20:max')
def get_ccMTX20_minimum(self):
'''
Color Correction Matrix element [2][0]XI_PRM_CC_MATRIX_20
'''
return self.get_param('ccMTX20:min')
def get_ccMTX20_increment(self):
'''
Color Correction Matrix element [2][0]XI_PRM_CC_MATRIX_20
'''
return self.get_param('ccMTX20:inc')
def set_ccMTX20(self, ccMTX20):
'''
Color Correction Matrix element [2][0]XI_PRM_CC_MATRIX_20
'''
self.set_param('ccMTX20', ccMTX20)
def get_ccMTX21(self):
'''
Color Correction Matrix element [2][1]XI_PRM_CC_MATRIX_21
'''
return self.get_param('ccMTX21')
def get_ccMTX21_maximum(self):
'''
Color Correction Matrix element [2][1]XI_PRM_CC_MATRIX_21
'''
return self.get_param('ccMTX21:max')
def get_ccMTX21_minimum(self):
'''
Color Correction Matrix element [2][1]XI_PRM_CC_MATRIX_21
'''
return self.get_param('ccMTX21:min')
def get_ccMTX21_increment(self):
'''
Color Correction Matrix element [2][1]XI_PRM_CC_MATRIX_21
'''
return self.get_param('ccMTX21:inc')
def set_ccMTX21(self, ccMTX21):
'''
Color Correction Matrix element [2][1]XI_PRM_CC_MATRIX_21
'''
self.set_param('ccMTX21', ccMTX21)
def get_ccMTX22(self):
'''
Color Correction Matrix element [2][2]XI_PRM_CC_MATRIX_22
'''
return self.get_param('ccMTX22')
def get_ccMTX22_maximum(self):
'''
Color Correction Matrix element [2][2]XI_PRM_CC_MATRIX_22
'''
return self.get_param('ccMTX22:max')
def get_ccMTX22_minimum(self):
'''
Color Correction Matrix element [2][2]XI_PRM_CC_MATRIX_22
'''
return self.get_param('ccMTX22:min')
def get_ccMTX22_increment(self):
'''
Color Correction Matrix element [2][2]XI_PRM_CC_MATRIX_22
'''
return self.get_param('ccMTX22:inc')
def set_ccMTX22(self, ccMTX22):
'''
Color Correction Matrix element [2][2]XI_PRM_CC_MATRIX_22
'''
self.set_param('ccMTX22', ccMTX22)
def get_ccMTX23(self):
'''
Color Correction Matrix element [2][3]XI_PRM_CC_MATRIX_23
'''
return self.get_param('ccMTX23')
def get_ccMTX23_maximum(self):
'''
Color Correction Matrix element [2][3]XI_PRM_CC_MATRIX_23
'''
return self.get_param('ccMTX23:max')
def get_ccMTX23_minimum(self):
'''
Color Correction Matrix element [2][3]XI_PRM_CC_MATRIX_23
'''
return self.get_param('ccMTX23:min')
def get_ccMTX23_increment(self):
'''
Color Correction Matrix element [2][3]XI_PRM_CC_MATRIX_23
'''
return self.get_param('ccMTX23:inc')
def set_ccMTX23(self, ccMTX23):
'''
Color Correction Matrix element [2][3]XI_PRM_CC_MATRIX_23
'''
self.set_param('ccMTX23', ccMTX23)
def get_ccMTX30(self):
'''
Color Correction Matrix element [3][0]XI_PRM_CC_MATRIX_30
'''
return self.get_param('ccMTX30')
def get_ccMTX30_maximum(self):
'''
Color Correction Matrix element [3][0]XI_PRM_CC_MATRIX_30
'''
return self.get_param('ccMTX30:max')
def get_ccMTX30_minimum(self):
'''
Color Correction Matrix element [3][0]XI_PRM_CC_MATRIX_30
'''
return self.get_param('ccMTX30:min')
def get_ccMTX30_increment(self):
'''
Color Correction Matrix element [3][0]XI_PRM_CC_MATRIX_30
'''
return self.get_param('ccMTX30:inc')
def set_ccMTX30(self, ccMTX30):
'''
Color Correction Matrix element [3][0]XI_PRM_CC_MATRIX_30
'''
self.set_param('ccMTX30', ccMTX30)
def get_ccMTX31(self):
'''
Color Correction Matrix element [3][1]XI_PRM_CC_MATRIX_31
'''
return self.get_param('ccMTX31')
def get_ccMTX31_maximum(self):
'''
Color Correction Matrix element [3][1]XI_PRM_CC_MATRIX_31
'''
return self.get_param('ccMTX31:max')
def get_ccMTX31_minimum(self):
'''
Color Correction Matrix element [3][1]XI_PRM_CC_MATRIX_31
'''
return self.get_param('ccMTX31:min')
def get_ccMTX31_increment(self):
'''
Color Correction Matrix element [3][1]XI_PRM_CC_MATRIX_31
'''
return self.get_param('ccMTX31:inc')
def set_ccMTX31(self, ccMTX31):
'''
Color Correction Matrix element [3][1]XI_PRM_CC_MATRIX_31
'''
self.set_param('ccMTX31', ccMTX31)
def get_ccMTX32(self):
'''
Color Correction Matrix element [3][2]XI_PRM_CC_MATRIX_32
'''
return self.get_param('ccMTX32')
def get_ccMTX32_maximum(self):
'''
Color Correction Matrix element [3][2]XI_PRM_CC_MATRIX_32
'''
return self.get_param('ccMTX32:max')
def get_ccMTX32_minimum(self):
'''
Color Correction Matrix element [3][2]XI_PRM_CC_MATRIX_32
'''
return self.get_param('ccMTX32:min')
def get_ccMTX32_increment(self):
'''
Color Correction Matrix element [3][2]XI_PRM_CC_MATRIX_32
'''
return self.get_param('ccMTX32:inc')
def set_ccMTX32(self, ccMTX32):
'''
Color Correction Matrix element [3][2]XI_PRM_CC_MATRIX_32
'''
self.set_param('ccMTX32', ccMTX32)
def get_ccMTX33(self):
'''
Color Correction Matrix element [3][3]XI_PRM_CC_MATRIX_33
'''
return self.get_param('ccMTX33')
def get_ccMTX33_maximum(self):
'''
Color Correction Matrix element [3][3]XI_PRM_CC_MATRIX_33
'''
return self.get_param('ccMTX33:max')
def get_ccMTX33_minimum(self):
'''
Color Correction Matrix element [3][3]XI_PRM_CC_MATRIX_33
'''
return self.get_param('ccMTX33:min')
def get_ccMTX33_increment(self):
'''
Color Correction Matrix element [3][3]XI_PRM_CC_MATRIX_33
'''
return self.get_param('ccMTX33:inc')
def set_ccMTX33(self, ccMTX33):
'''
Color Correction Matrix element [3][3]XI_PRM_CC_MATRIX_33
'''
self.set_param('ccMTX33', ccMTX33)
def get_defccMTX(self):
'''
Set default Color Correction MatrixXI_PRM_DEFAULT_CC_MATRIX
'''
return self.get_param('defccMTX')
def get_defccMTX_maximum(self):
'''
Set default Color Correction MatrixXI_PRM_DEFAULT_CC_MATRIX
'''
return self.get_param('defccMTX:max')
def get_defccMTX_minimum(self):
'''
Set default Color Correction MatrixXI_PRM_DEFAULT_CC_MATRIX
'''
return self.get_param('defccMTX:min')
def get_defccMTX_increment(self):
'''
Set default Color Correction MatrixXI_PRM_DEFAULT_CC_MATRIX
'''
return self.get_param('defccMTX:inc')
def set_defccMTX(self, defccMTX):
'''
Set default Color Correction MatrixXI_PRM_DEFAULT_CC_MATRIX
'''
self.set_param('defccMTX', defccMTX)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Device IO
#-------------------------------------------------------------------------------------------------------------------
def get_trigger_source(self):
'''
Defines source of trigger.XI_PRM_TRG_SOURCE
'''
return self.get_param('trigger_source')
def get_trigger_source_maximum(self):
'''
Defines source of trigger.XI_PRM_TRG_SOURCE
'''
return self.get_param('trigger_source:max')
def get_trigger_source_minimum(self):
'''
Defines source of trigger.XI_PRM_TRG_SOURCE
'''
return self.get_param('trigger_source:min')
def get_trigger_source_increment(self):
'''
Defines source of trigger.XI_PRM_TRG_SOURCE
'''
return self.get_param('trigger_source:inc')
def set_trigger_source(self, trigger_source):
'''
Defines source of trigger.XI_PRM_TRG_SOURCE
'''
self.set_param('trigger_source', trigger_source)
def get_trigger_software(self):
'''
Generates an internal trigger. XI_PRM_TRG_SOURCE must be set to TRG_SOFTWARE.XI_PRM_TRG_SOFTWARE
'''
return self.get_param('trigger_software')
def get_trigger_software_maximum(self):
'''
Generates an internal trigger. XI_PRM_TRG_SOURCE must be set to TRG_SOFTWARE.XI_PRM_TRG_SOFTWARE
'''
return self.get_param('trigger_software:max')
def get_trigger_software_minimum(self):
'''
Generates an internal trigger. XI_PRM_TRG_SOURCE must be set to TRG_SOFTWARE.XI_PRM_TRG_SOFTWARE
'''
return self.get_param('trigger_software:min')
def get_trigger_software_increment(self):
'''
Generates an internal trigger. XI_PRM_TRG_SOURCE must be set to TRG_SOFTWARE.XI_PRM_TRG_SOFTWARE
'''
return self.get_param('trigger_software:inc')
def set_trigger_software(self, trigger_software):
'''
Generates an internal trigger. XI_PRM_TRG_SOURCE must be set to TRG_SOFTWARE.XI_PRM_TRG_SOFTWARE
'''
self.set_param('trigger_software', trigger_software)
def get_trigger_selector(self):
'''
Selects the type of trigger.XI_PRM_TRG_SELECTOR
'''
return self.get_param('trigger_selector')
def get_trigger_selector_maximum(self):
'''
Selects the type of trigger.XI_PRM_TRG_SELECTOR
'''
return self.get_param('trigger_selector:max')
def get_trigger_selector_minimum(self):
'''
Selects the type of trigger.XI_PRM_TRG_SELECTOR
'''
return self.get_param('trigger_selector:min')
def get_trigger_selector_increment(self):
'''
Selects the type of trigger.XI_PRM_TRG_SELECTOR
'''
return self.get_param('trigger_selector:inc')
def set_trigger_selector(self, trigger_selector):
'''
Selects the type of trigger.XI_PRM_TRG_SELECTOR
'''
self.set_param('trigger_selector', trigger_selector)
def get_acq_frame_burst_count(self):
'''
Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStartXI_PRM_ACQ_FRAME_BURST_COUNT
'''
return self.get_param('acq_frame_burst_count')
def get_acq_frame_burst_count_maximum(self):
'''
Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStartXI_PRM_ACQ_FRAME_BURST_COUNT
'''
return self.get_param('acq_frame_burst_count:max')
def get_acq_frame_burst_count_minimum(self):
'''
Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStartXI_PRM_ACQ_FRAME_BURST_COUNT
'''
return self.get_param('acq_frame_burst_count:min')
def get_acq_frame_burst_count_increment(self):
'''
Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStartXI_PRM_ACQ_FRAME_BURST_COUNT
'''
return self.get_param('acq_frame_burst_count:inc')
def set_acq_frame_burst_count(self, acq_frame_burst_count):
'''
Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStartXI_PRM_ACQ_FRAME_BURST_COUNT
'''
self.set_param('acq_frame_burst_count', acq_frame_burst_count)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: GPIO Setup
#-------------------------------------------------------------------------------------------------------------------
def get_gpi_selector(self):
'''
Selects GPIXI_PRM_GPI_SELECTOR
'''
return self.get_param('gpi_selector')
def get_gpi_selector_maximum(self):
'''
Selects GPIXI_PRM_GPI_SELECTOR
'''
return self.get_param('gpi_selector:max')
def get_gpi_selector_minimum(self):
'''
Selects GPIXI_PRM_GPI_SELECTOR
'''
return self.get_param('gpi_selector:min')
def get_gpi_selector_increment(self):
'''
Selects GPIXI_PRM_GPI_SELECTOR
'''
return self.get_param('gpi_selector:inc')
def set_gpi_selector(self, gpi_selector):
'''
Selects GPIXI_PRM_GPI_SELECTOR
'''
self.set_param('gpi_selector', gpi_selector)
def get_gpi_mode(self):
'''
Defines GPI functionalityXI_PRM_GPI_MODE
'''
return self.get_param('gpi_mode')
def get_gpi_mode_maximum(self):
'''
Defines GPI functionalityXI_PRM_GPI_MODE
'''
return self.get_param('gpi_mode:max')
def get_gpi_mode_minimum(self):
'''
Defines GPI functionalityXI_PRM_GPI_MODE
'''
return self.get_param('gpi_mode:min')
def get_gpi_mode_increment(self):
'''
Defines GPI functionalityXI_PRM_GPI_MODE
'''
return self.get_param('gpi_mode:inc')
def set_gpi_mode(self, gpi_mode):
'''
Defines GPI functionalityXI_PRM_GPI_MODE
'''
self.set_param('gpi_mode', gpi_mode)
def get_gpi_level(self):
'''
GPI levelXI_PRM_GPI_LEVEL
'''
return self.get_param('gpi_level')
def get_gpi_level_maximum(self):
'''
GPI levelXI_PRM_GPI_LEVEL
'''
return self.get_param('gpi_level:max')
def get_gpi_level_minimum(self):
'''
GPI levelXI_PRM_GPI_LEVEL
'''
return self.get_param('gpi_level:min')
def get_gpi_level_increment(self):
'''
GPI levelXI_PRM_GPI_LEVEL
'''
return self.get_param('gpi_level:inc')
def get_gpo_selector(self):
'''
Selects GPOXI_PRM_GPO_SELECTOR
'''
return self.get_param('gpo_selector')
def get_gpo_selector_maximum(self):
'''
Selects GPOXI_PRM_GPO_SELECTOR
'''
return self.get_param('gpo_selector:max')
def get_gpo_selector_minimum(self):
'''
Selects GPOXI_PRM_GPO_SELECTOR
'''
return self.get_param('gpo_selector:min')
def get_gpo_selector_increment(self):
'''
Selects GPOXI_PRM_GPO_SELECTOR
'''
return self.get_param('gpo_selector:inc')
def set_gpo_selector(self, gpo_selector):
'''
Selects GPOXI_PRM_GPO_SELECTOR
'''
self.set_param('gpo_selector', gpo_selector)
def get_gpo_mode(self):
'''
Defines GPO functionalityXI_PRM_GPO_MODE
'''
return self.get_param('gpo_mode')
def get_gpo_mode_maximum(self):
'''
Defines GPO functionalityXI_PRM_GPO_MODE
'''
return self.get_param('gpo_mode:max')
def get_gpo_mode_minimum(self):
'''
Defines GPO functionalityXI_PRM_GPO_MODE
'''
return self.get_param('gpo_mode:min')
def get_gpo_mode_increment(self):
'''
Defines GPO functionalityXI_PRM_GPO_MODE
'''
return self.get_param('gpo_mode:inc')
def set_gpo_mode(self, gpo_mode):
'''
Defines GPO functionalityXI_PRM_GPO_MODE
'''
self.set_param('gpo_mode', gpo_mode)
def get_led_selector(self):
'''
Selects LEDXI_PRM_LED_SELECTOR
'''
return self.get_param('led_selector')
def get_led_selector_maximum(self):
'''
Selects LEDXI_PRM_LED_SELECTOR
'''
return self.get_param('led_selector:max')
def get_led_selector_minimum(self):
'''
Selects LEDXI_PRM_LED_SELECTOR
'''
return self.get_param('led_selector:min')
def get_led_selector_increment(self):
'''
Selects LEDXI_PRM_LED_SELECTOR
'''
return self.get_param('led_selector:inc')
def set_led_selector(self, led_selector):
'''
Selects LEDXI_PRM_LED_SELECTOR
'''
self.set_param('led_selector', led_selector)
def get_led_mode(self):
'''
Defines LED functionalityXI_PRM_LED_MODE
'''
return self.get_param('led_mode')
def get_led_mode_maximum(self):
'''
Defines LED functionalityXI_PRM_LED_MODE
'''
return self.get_param('led_mode:max')
def get_led_mode_minimum(self):
'''
Defines LED functionalityXI_PRM_LED_MODE
'''
return self.get_param('led_mode:min')
def get_led_mode_increment(self):
'''
Defines LED functionalityXI_PRM_LED_MODE
'''
return self.get_param('led_mode:inc')
def set_led_mode(self, led_mode):
'''
Defines LED functionalityXI_PRM_LED_MODE
'''
self.set_param('led_mode', led_mode)
def is_dbnc_en(self):
'''
Enable/Disable debounce to selected GPIXI_PRM_DEBOUNCE_EN
'''
return self.get_param('dbnc_en')
def enable_dbnc_en(self):
'''
Enable/Disable debounce to selected GPIXI_PRM_DEBOUNCE_EN
'''
self.set_param('dbnc_en', True)
def disable_dbnc_en(self):
'''
Enable/Disable debounce to selected GPIXI_PRM_DEBOUNCE_EN
'''
self.set_param('dbnc_en', False)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Debounce Setup
#-------------------------------------------------------------------------------------------------------------------
def get_dbnc_t0(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T0
'''
return self.get_param('dbnc_t0')
def get_dbnc_t0_maximum(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T0
'''
return self.get_param('dbnc_t0:max')
def get_dbnc_t0_minimum(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T0
'''
return self.get_param('dbnc_t0:min')
def get_dbnc_t0_increment(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T0
'''
return self.get_param('dbnc_t0:inc')
def set_dbnc_t0(self, dbnc_t0):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T0
'''
self.set_param('dbnc_t0', dbnc_t0)
def get_dbnc_t1(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T1
'''
return self.get_param('dbnc_t1')
def get_dbnc_t1_maximum(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T1
'''
return self.get_param('dbnc_t1:max')
def get_dbnc_t1_minimum(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T1
'''
return self.get_param('dbnc_t1:min')
def get_dbnc_t1_increment(self):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T1
'''
return self.get_param('dbnc_t1:inc')
def set_dbnc_t1(self, dbnc_t1):
'''
Debounce time (x * 10us)XI_PRM_DEBOUNCE_T1
'''
self.set_param('dbnc_t1', dbnc_t1)
def get_dbnc_pol(self):
'''
Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)XI_PRM_DEBOUNCE_POL
'''
return self.get_param('dbnc_pol')
def get_dbnc_pol_maximum(self):
'''
Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)XI_PRM_DEBOUNCE_POL
'''
return self.get_param('dbnc_pol:max')
def get_dbnc_pol_minimum(self):
'''
Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)XI_PRM_DEBOUNCE_POL
'''
return self.get_param('dbnc_pol:min')
def get_dbnc_pol_increment(self):
'''
Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)XI_PRM_DEBOUNCE_POL
'''
return self.get_param('dbnc_pol:inc')
def set_dbnc_pol(self, dbnc_pol):
'''
Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)XI_PRM_DEBOUNCE_POL
'''
self.set_param('dbnc_pol', dbnc_pol)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Lens Control
#-------------------------------------------------------------------------------------------------------------------
def is_lens_mode(self):
'''
Status of lens control interface. This shall be set to XI_ON before any Lens operations.XI_PRM_LENS_MODE
'''
return self.get_param('lens_mode')
def enable_lens_mode(self):
'''
Status of lens control interface. This shall be set to XI_ON before any Lens operations.XI_PRM_LENS_MODE
'''
self.set_param('lens_mode', True)
def disable_lens_mode(self):
'''
Status of lens control interface. This shall be set to XI_ON before any Lens operations.XI_PRM_LENS_MODE
'''
self.set_param('lens_mode', False)
def get_lens_aperture_value(self):
'''
Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11XI_PRM_LENS_APERTURE_VALUE
'''
return self.get_param('lens_aperture_value')
def get_lens_aperture_value_maximum(self):
'''
Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11XI_PRM_LENS_APERTURE_VALUE
'''
return self.get_param('lens_aperture_value:max')
def get_lens_aperture_value_minimum(self):
'''
Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11XI_PRM_LENS_APERTURE_VALUE
'''
return self.get_param('lens_aperture_value:min')
def get_lens_aperture_value_increment(self):
'''
Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11XI_PRM_LENS_APERTURE_VALUE
'''
return self.get_param('lens_aperture_value:inc')
def set_lens_aperture_value(self, lens_aperture_value):
'''
Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11XI_PRM_LENS_APERTURE_VALUE
'''
self.set_param('lens_aperture_value', lens_aperture_value)
def get_lens_focus_movement_value(self):
'''
Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.XI_PRM_LENS_FOCUS_MOVEMENT_VALUE
'''
return self.get_param('lens_focus_movement_value')
def get_lens_focus_movement_value_maximum(self):
'''
Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.XI_PRM_LENS_FOCUS_MOVEMENT_VALUE
'''
return self.get_param('lens_focus_movement_value:max')
def get_lens_focus_movement_value_minimum(self):
'''
Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.XI_PRM_LENS_FOCUS_MOVEMENT_VALUE
'''
return self.get_param('lens_focus_movement_value:min')
def get_lens_focus_movement_value_increment(self):
'''
Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.XI_PRM_LENS_FOCUS_MOVEMENT_VALUE
'''
return self.get_param('lens_focus_movement_value:inc')
def set_lens_focus_movement_value(self, lens_focus_movement_value):
'''
Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.XI_PRM_LENS_FOCUS_MOVEMENT_VALUE
'''
self.set_param('lens_focus_movement_value', lens_focus_movement_value)
def get_lens_focus_move(self):
'''
Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.XI_PRM_LENS_FOCUS_MOVE
'''
return self.get_param('lens_focus_move')
def get_lens_focus_move_maximum(self):
'''
Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.XI_PRM_LENS_FOCUS_MOVE
'''
return self.get_param('lens_focus_move:max')
def get_lens_focus_move_minimum(self):
'''
Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.XI_PRM_LENS_FOCUS_MOVE
'''
return self.get_param('lens_focus_move:min')
def get_lens_focus_move_increment(self):
'''
Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.XI_PRM_LENS_FOCUS_MOVE
'''
return self.get_param('lens_focus_move:inc')
def set_lens_focus_move(self, lens_focus_move):
'''
Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.XI_PRM_LENS_FOCUS_MOVE
'''
self.set_param('lens_focus_move', lens_focus_move)
def get_lens_focus_distance(self):
'''
Lens focus distance in cm.XI_PRM_LENS_FOCUS_DISTANCE
'''
return self.get_param('lens_focus_distance')
def get_lens_focus_distance_maximum(self):
'''
Lens focus distance in cm.XI_PRM_LENS_FOCUS_DISTANCE
'''
return self.get_param('lens_focus_distance:max')
def get_lens_focus_distance_minimum(self):
'''
Lens focus distance in cm.XI_PRM_LENS_FOCUS_DISTANCE
'''
return self.get_param('lens_focus_distance:min')
def get_lens_focus_distance_increment(self):
'''
Lens focus distance in cm.XI_PRM_LENS_FOCUS_DISTANCE
'''
return self.get_param('lens_focus_distance:inc')
def get_lens_focal_length(self):
'''
Lens focal distance in mm.XI_PRM_LENS_FOCAL_LENGTH
'''
return self.get_param('lens_focal_length')
def get_lens_focal_length_maximum(self):
'''
Lens focal distance in mm.XI_PRM_LENS_FOCAL_LENGTH
'''
return self.get_param('lens_focal_length:max')
def get_lens_focal_length_minimum(self):
'''
Lens focal distance in mm.XI_PRM_LENS_FOCAL_LENGTH
'''
return self.get_param('lens_focal_length:min')
def get_lens_focal_length_increment(self):
'''
Lens focal distance in mm.XI_PRM_LENS_FOCAL_LENGTH
'''
return self.get_param('lens_focal_length:inc')
def get_lens_feature_selector(self):
'''
Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.XI_PRM_LENS_FEATURE_SELECTOR
'''
return self.get_param('lens_feature_selector')
def get_lens_feature_selector_maximum(self):
'''
Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.XI_PRM_LENS_FEATURE_SELECTOR
'''
return self.get_param('lens_feature_selector:max')
def get_lens_feature_selector_minimum(self):
'''
Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.XI_PRM_LENS_FEATURE_SELECTOR
'''
return self.get_param('lens_feature_selector:min')
def get_lens_feature_selector_increment(self):
'''
Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.XI_PRM_LENS_FEATURE_SELECTOR
'''
return self.get_param('lens_feature_selector:inc')
def set_lens_feature_selector(self, lens_feature_selector):
'''
Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.XI_PRM_LENS_FEATURE_SELECTOR
'''
self.set_param('lens_feature_selector', lens_feature_selector)
def get_lens_feature(self):
'''
Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.XI_PRM_LENS_FEATURE
'''
return self.get_param('lens_feature')
def get_lens_feature_maximum(self):
'''
Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.XI_PRM_LENS_FEATURE
'''
return self.get_param('lens_feature:max')
def get_lens_feature_minimum(self):
'''
Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.XI_PRM_LENS_FEATURE
'''
return self.get_param('lens_feature:min')
def get_lens_feature_increment(self):
'''
Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.XI_PRM_LENS_FEATURE
'''
return self.get_param('lens_feature:inc')
def set_lens_feature(self, lens_feature):
'''
Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.XI_PRM_LENS_FEATURE
'''
self.set_param('lens_feature', lens_feature)
def get_lens_comm_data(self,buffer_size=256):
'''
Write/Read data sequences to/from lensXI_PRM_LENS_COMM_DATA
'''
return self.get_param('lens_comm_data',buffer_size)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Device info parameters
#-------------------------------------------------------------------------------------------------------------------
def get_device_name(self,buffer_size=256):
'''
Return device nameXI_PRM_DEVICE_NAME
'''
return self.get_param('device_name',buffer_size)
def get_device_type(self,buffer_size=256):
'''
Return device typeXI_PRM_DEVICE_TYPE
'''
return self.get_param('device_type',buffer_size)
def get_device_model_id(self):
'''
Return device model idXI_PRM_DEVICE_MODEL_ID
'''
return self.get_param('device_model_id')
def get_device_model_id_maximum(self):
'''
Return device model idXI_PRM_DEVICE_MODEL_ID
'''
return self.get_param('device_model_id:max')
def get_device_model_id_minimum(self):
'''
Return device model idXI_PRM_DEVICE_MODEL_ID
'''
return self.get_param('device_model_id:min')
def get_device_model_id_increment(self):
'''
Return device model idXI_PRM_DEVICE_MODEL_ID
'''
return self.get_param('device_model_id:inc')
def get_sensor_model_id(self):
'''
Return device sensor model idXI_PRM_SENSOR_MODEL_ID
'''
return self.get_param('sensor_model_id')
def get_sensor_model_id_maximum(self):
'''
Return device sensor model idXI_PRM_SENSOR_MODEL_ID
'''
return self.get_param('sensor_model_id:max')
def get_sensor_model_id_minimum(self):
'''
Return device sensor model idXI_PRM_SENSOR_MODEL_ID
'''
return self.get_param('sensor_model_id:min')
def get_sensor_model_id_increment(self):
'''
Return device sensor model idXI_PRM_SENSOR_MODEL_ID
'''
return self.get_param('sensor_model_id:inc')
def get_device_sn(self,buffer_size=256):
'''
Return device serial numberXI_PRM_DEVICE_SN
'''
return self.get_param('device_sn',buffer_size)
def get_device_sens_sn(self,buffer_size=256):
'''
Return sensor serial numberXI_PRM_DEVICE_SENS_SN
'''
return self.get_param('device_sens_sn',buffer_size)
def get_device_id(self,buffer_size=256):
'''
Return unique device IDXI_PRM_DEVICE_ID
'''
return self.get_param('device_id',buffer_size)
def get_device_inst_path(self,buffer_size=256):
'''
Return device system instance path.XI_PRM_DEVICE_INSTANCE_PATH
'''
return self.get_param('device_inst_path',buffer_size)
def get_device_loc_path(self,buffer_size=256):
'''
Represents the location of the device in the device tree.XI_PRM_DEVICE_LOCATION_PATH
'''
return self.get_param('device_loc_path',buffer_size)
def get_device_user_id(self,buffer_size=256):
'''
Return custom ID of camera.XI_PRM_DEVICE_USER_ID
'''
return self.get_param('device_user_id',buffer_size)
def get_device_manifest(self,buffer_size=256):
'''
Return device capability description XML.XI_PRM_DEVICE_MANIFEST
'''
return self.get_param('device_manifest',buffer_size)
def get_image_user_data(self):
'''
User image data at image header to track parameters synchronization.XI_PRM_IMAGE_USER_DATA
'''
return self.get_param('image_user_data')
def get_image_user_data_maximum(self):
'''
User image data at image header to track parameters synchronization.XI_PRM_IMAGE_USER_DATA
'''
return self.get_param('image_user_data:max')
def get_image_user_data_minimum(self):
'''
User image data at image header to track parameters synchronization.XI_PRM_IMAGE_USER_DATA
'''
return self.get_param('image_user_data:min')
def get_image_user_data_increment(self):
'''
User image data at image header to track parameters synchronization.XI_PRM_IMAGE_USER_DATA
'''
return self.get_param('image_user_data:inc')
def set_image_user_data(self, image_user_data):
'''
User image data at image header to track parameters synchronization.XI_PRM_IMAGE_USER_DATA
'''
self.set_param('image_user_data', image_user_data)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Device acquisition settings
#-------------------------------------------------------------------------------------------------------------------
def get_imgdataformatrgb32alpha(self):
'''
The alpha channel of RGB32 output image format.XI_PRM_IMAGE_DATA_FORMAT_RGB32_ALPHA
'''
return self.get_param('imgdataformatrgb32alpha')
def get_imgdataformatrgb32alpha_maximum(self):
'''
The alpha channel of RGB32 output image format.XI_PRM_IMAGE_DATA_FORMAT_RGB32_ALPHA
'''
return self.get_param('imgdataformatrgb32alpha:max')
def get_imgdataformatrgb32alpha_minimum(self):
'''
The alpha channel of RGB32 output image format.XI_PRM_IMAGE_DATA_FORMAT_RGB32_ALPHA
'''
return self.get_param('imgdataformatrgb32alpha:min')
def get_imgdataformatrgb32alpha_increment(self):
'''
The alpha channel of RGB32 output image format.XI_PRM_IMAGE_DATA_FORMAT_RGB32_ALPHA
'''
return self.get_param('imgdataformatrgb32alpha:inc')
def set_imgdataformatrgb32alpha(self, imgdataformatrgb32alpha):
'''
The alpha channel of RGB32 output image format.XI_PRM_IMAGE_DATA_FORMAT_RGB32_ALPHA
'''
self.set_param('imgdataformatrgb32alpha', imgdataformatrgb32alpha)
def get_imgpayloadsize(self):
'''
Buffer size in bytes sufficient for output image returned by xiGetImageXI_PRM_IMAGE_PAYLOAD_SIZE
'''
return self.get_param('imgpayloadsize')
def get_imgpayloadsize_maximum(self):
'''
Buffer size in bytes sufficient for output image returned by xiGetImageXI_PRM_IMAGE_PAYLOAD_SIZE
'''
return self.get_param('imgpayloadsize:max')
def get_imgpayloadsize_minimum(self):
'''
Buffer size in bytes sufficient for output image returned by xiGetImageXI_PRM_IMAGE_PAYLOAD_SIZE
'''
return self.get_param('imgpayloadsize:min')
def get_imgpayloadsize_increment(self):
'''
Buffer size in bytes sufficient for output image returned by xiGetImageXI_PRM_IMAGE_PAYLOAD_SIZE
'''
return self.get_param('imgpayloadsize:inc')
def get_transport_pixel_format(self):
'''
Current format of pixels on transport layer.XI_PRM_TRANSPORT_PIXEL_FORMAT
'''
return self.get_param('transport_pixel_format')
def get_transport_pixel_format_maximum(self):
'''
Current format of pixels on transport layer.XI_PRM_TRANSPORT_PIXEL_FORMAT
'''
return self.get_param('transport_pixel_format:max')
def get_transport_pixel_format_minimum(self):
'''
Current format of pixels on transport layer.XI_PRM_TRANSPORT_PIXEL_FORMAT
'''
return self.get_param('transport_pixel_format:min')
def get_transport_pixel_format_increment(self):
'''
Current format of pixels on transport layer.XI_PRM_TRANSPORT_PIXEL_FORMAT
'''
return self.get_param('transport_pixel_format:inc')
def set_transport_pixel_format(self, transport_pixel_format):
'''
Current format of pixels on transport layer.XI_PRM_TRANSPORT_PIXEL_FORMAT
'''
self.set_param('transport_pixel_format', transport_pixel_format)
def get_transport_data_target(self):
'''
Target selector for data - CPU RAM or GPU RAMXI_PRM_TRANSPORT_DATA_TARGET
'''
return self.get_param('transport_data_target')
def get_transport_data_target_maximum(self):
'''
Target selector for data - CPU RAM or GPU RAMXI_PRM_TRANSPORT_DATA_TARGET
'''
return self.get_param('transport_data_target:max')
def get_transport_data_target_minimum(self):
'''
Target selector for data - CPU RAM or GPU RAMXI_PRM_TRANSPORT_DATA_TARGET
'''
return self.get_param('transport_data_target:min')
def get_transport_data_target_increment(self):
'''
Target selector for data - CPU RAM or GPU RAMXI_PRM_TRANSPORT_DATA_TARGET
'''
return self.get_param('transport_data_target:inc')
def set_transport_data_target(self, transport_data_target):
'''
Target selector for data - CPU RAM or GPU RAMXI_PRM_TRANSPORT_DATA_TARGET
'''
self.set_param('transport_data_target', transport_data_target)
def get_sensor_clock_freq_hz(self):
'''
Sensor clock frequency in Hz.XI_PRM_SENSOR_CLOCK_FREQ_HZ
'''
return self.get_param('sensor_clock_freq_hz')
def get_sensor_clock_freq_hz_maximum(self):
'''
Sensor clock frequency in Hz.XI_PRM_SENSOR_CLOCK_FREQ_HZ
'''
return self.get_param('sensor_clock_freq_hz:max')
def get_sensor_clock_freq_hz_minimum(self):
'''
Sensor clock frequency in Hz.XI_PRM_SENSOR_CLOCK_FREQ_HZ
'''
return self.get_param('sensor_clock_freq_hz:min')
def get_sensor_clock_freq_hz_increment(self):
'''
Sensor clock frequency in Hz.XI_PRM_SENSOR_CLOCK_FREQ_HZ
'''
return self.get_param('sensor_clock_freq_hz:inc')
def set_sensor_clock_freq_hz(self, sensor_clock_freq_hz):
'''
Sensor clock frequency in Hz.XI_PRM_SENSOR_CLOCK_FREQ_HZ
'''
self.set_param('sensor_clock_freq_hz', sensor_clock_freq_hz)
def get_sensor_clock_freq_index(self):
'''
Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.XI_PRM_SENSOR_CLOCK_FREQ_INDEX
'''
return self.get_param('sensor_clock_freq_index')
def get_sensor_clock_freq_index_maximum(self):
'''
Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.XI_PRM_SENSOR_CLOCK_FREQ_INDEX
'''
return self.get_param('sensor_clock_freq_index:max')
def get_sensor_clock_freq_index_minimum(self):
'''
Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.XI_PRM_SENSOR_CLOCK_FREQ_INDEX
'''
return self.get_param('sensor_clock_freq_index:min')
def get_sensor_clock_freq_index_increment(self):
'''
Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.XI_PRM_SENSOR_CLOCK_FREQ_INDEX
'''
return self.get_param('sensor_clock_freq_index:inc')
def set_sensor_clock_freq_index(self, sensor_clock_freq_index):
'''
Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.XI_PRM_SENSOR_CLOCK_FREQ_INDEX
'''
self.set_param('sensor_clock_freq_index', sensor_clock_freq_index)
def get_sensor_output_channel_count(self):
'''
Number of output channels from sensor used for data transfer.XI_PRM_SENSOR_OUTPUT_CHANNEL_COUNT
'''
return self.get_param('sensor_output_channel_count')
def get_sensor_output_channel_count_maximum(self):
'''
Number of output channels from sensor used for data transfer.XI_PRM_SENSOR_OUTPUT_CHANNEL_COUNT
'''
return self.get_param('sensor_output_channel_count:max')
def get_sensor_output_channel_count_minimum(self):
'''
Number of output channels from sensor used for data transfer.XI_PRM_SENSOR_OUTPUT_CHANNEL_COUNT
'''
return self.get_param('sensor_output_channel_count:min')
def get_sensor_output_channel_count_increment(self):
'''
Number of output channels from sensor used for data transfer.XI_PRM_SENSOR_OUTPUT_CHANNEL_COUNT
'''
return self.get_param('sensor_output_channel_count:inc')
def set_sensor_output_channel_count(self, sensor_output_channel_count):
'''
Number of output channels from sensor used for data transfer.XI_PRM_SENSOR_OUTPUT_CHANNEL_COUNT
'''
self.set_param('sensor_output_channel_count', sensor_output_channel_count)
def get_framerate(self):
'''
Define framerate in HzXI_PRM_FRAMERATE
'''
return self.get_param('framerate')
def get_framerate_maximum(self):
'''
Define framerate in HzXI_PRM_FRAMERATE
'''
return self.get_param('framerate:max')
def get_framerate_minimum(self):
'''
Define framerate in HzXI_PRM_FRAMERATE
'''
return self.get_param('framerate:min')
def get_framerate_increment(self):
'''
Define framerate in HzXI_PRM_FRAMERATE
'''
return self.get_param('framerate:inc')
def set_framerate(self, framerate):
'''
Define framerate in HzXI_PRM_FRAMERATE
'''
self.set_param('framerate', framerate)
def get_counter_selector(self):
'''
Select counterXI_PRM_COUNTER_SELECTOR
'''
return self.get_param('counter_selector')
def get_counter_selector_maximum(self):
'''
Select counterXI_PRM_COUNTER_SELECTOR
'''
return self.get_param('counter_selector:max')
def get_counter_selector_minimum(self):
'''
Select counterXI_PRM_COUNTER_SELECTOR
'''
return self.get_param('counter_selector:min')
def get_counter_selector_increment(self):
'''
Select counterXI_PRM_COUNTER_SELECTOR
'''
return self.get_param('counter_selector:inc')
def set_counter_selector(self, counter_selector):
'''
Select counterXI_PRM_COUNTER_SELECTOR
'''
self.set_param('counter_selector', counter_selector)
def get_counter_value(self):
'''
Counter statusXI_PRM_COUNTER_VALUE
'''
return self.get_param('counter_value')
def get_counter_value_maximum(self):
'''
Counter statusXI_PRM_COUNTER_VALUE
'''
return self.get_param('counter_value:max')
def get_counter_value_minimum(self):
'''
Counter statusXI_PRM_COUNTER_VALUE
'''
return self.get_param('counter_value:min')
def get_counter_value_increment(self):
'''
Counter statusXI_PRM_COUNTER_VALUE
'''
return self.get_param('counter_value:inc')
def get_acq_timing_mode(self):
'''
Type of sensor frames timing.XI_PRM_ACQ_TIMING_MODE
'''
return self.get_param('acq_timing_mode')
def get_acq_timing_mode_maximum(self):
'''
Type of sensor frames timing.XI_PRM_ACQ_TIMING_MODE
'''
return self.get_param('acq_timing_mode:max')
def get_acq_timing_mode_minimum(self):
'''
Type of sensor frames timing.XI_PRM_ACQ_TIMING_MODE
'''
return self.get_param('acq_timing_mode:min')
def get_acq_timing_mode_increment(self):
'''
Type of sensor frames timing.XI_PRM_ACQ_TIMING_MODE
'''
return self.get_param('acq_timing_mode:inc')
def set_acq_timing_mode(self, acq_timing_mode):
'''
Type of sensor frames timing.XI_PRM_ACQ_TIMING_MODE
'''
self.set_param('acq_timing_mode', acq_timing_mode)
def get_available_bandwidth(self):
'''
Measure and return available interface bandwidth(int Megabits)XI_PRM_AVAILABLE_BANDWIDTH
'''
return self.get_param('available_bandwidth')
def get_available_bandwidth_maximum(self):
'''
Measure and return available interface bandwidth(int Megabits)XI_PRM_AVAILABLE_BANDWIDTH
'''
return self.get_param('available_bandwidth:max')
def get_available_bandwidth_minimum(self):
'''
Measure and return available interface bandwidth(int Megabits)XI_PRM_AVAILABLE_BANDWIDTH
'''
return self.get_param('available_bandwidth:min')
def get_available_bandwidth_increment(self):
'''
Measure and return available interface bandwidth(int Megabits)XI_PRM_AVAILABLE_BANDWIDTH
'''
return self.get_param('available_bandwidth:inc')
def get_buffer_policy(self):
'''
Data move policyXI_PRM_BUFFER_POLICY
'''
return self.get_param('buffer_policy')
def get_buffer_policy_maximum(self):
'''
Data move policyXI_PRM_BUFFER_POLICY
'''
return self.get_param('buffer_policy:max')
def get_buffer_policy_minimum(self):
'''
Data move policyXI_PRM_BUFFER_POLICY
'''
return self.get_param('buffer_policy:min')
def get_buffer_policy_increment(self):
'''
Data move policyXI_PRM_BUFFER_POLICY
'''
return self.get_param('buffer_policy:inc')
def set_buffer_policy(self, buffer_policy):
'''
Data move policyXI_PRM_BUFFER_POLICY
'''
self.set_param('buffer_policy', buffer_policy)
def is_LUTEnable(self):
'''
Activates LUT.XI_PRM_LUT_EN
'''
return self.get_param('LUTEnable')
def enable_LUTEnable(self):
'''
Activates LUT.XI_PRM_LUT_EN
'''
self.set_param('LUTEnable', True)
def disable_LUTEnable(self):
'''
Activates LUT.XI_PRM_LUT_EN
'''
self.set_param('LUTEnable', False)
def get_LUTIndex(self):
'''
Control the index (offset) of the coefficient to access in the LUT.XI_PRM_LUT_INDEX
'''
return self.get_param('LUTIndex')
def get_LUTIndex_maximum(self):
'''
Control the index (offset) of the coefficient to access in the LUT.XI_PRM_LUT_INDEX
'''
return self.get_param('LUTIndex:max')
def get_LUTIndex_minimum(self):
'''
Control the index (offset) of the coefficient to access in the LUT.XI_PRM_LUT_INDEX
'''
return self.get_param('LUTIndex:min')
def get_LUTIndex_increment(self):
'''
Control the index (offset) of the coefficient to access in the LUT.XI_PRM_LUT_INDEX
'''
return self.get_param('LUTIndex:inc')
def set_LUTIndex(self, LUTIndex):
'''
Control the index (offset) of the coefficient to access in the LUT.XI_PRM_LUT_INDEX
'''
self.set_param('LUTIndex', LUTIndex)
def get_LUTValue(self):
'''
Value at entry LUTIndex of the LUTXI_PRM_LUT_VALUE
'''
return self.get_param('LUTValue')
def get_LUTValue_maximum(self):
'''
Value at entry LUTIndex of the LUTXI_PRM_LUT_VALUE
'''
return self.get_param('LUTValue:max')
def get_LUTValue_minimum(self):
'''
Value at entry LUTIndex of the LUTXI_PRM_LUT_VALUE
'''
return self.get_param('LUTValue:min')
def get_LUTValue_increment(self):
'''
Value at entry LUTIndex of the LUTXI_PRM_LUT_VALUE
'''
return self.get_param('LUTValue:inc')
def set_LUTValue(self, LUTValue):
'''
Value at entry LUTIndex of the LUTXI_PRM_LUT_VALUE
'''
self.set_param('LUTValue', LUTValue)
def get_trigger_delay(self):
'''
Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.XI_PRM_TRG_DELAY
'''
return self.get_param('trigger_delay')
def get_trigger_delay_maximum(self):
'''
Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.XI_PRM_TRG_DELAY
'''
return self.get_param('trigger_delay:max')
def get_trigger_delay_minimum(self):
'''
Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.XI_PRM_TRG_DELAY
'''
return self.get_param('trigger_delay:min')
def get_trigger_delay_increment(self):
'''
Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.XI_PRM_TRG_DELAY
'''
return self.get_param('trigger_delay:inc')
def set_trigger_delay(self, trigger_delay):
'''
Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.XI_PRM_TRG_DELAY
'''
self.set_param('trigger_delay', trigger_delay)
def get_ts_rst_mode(self):
'''
Defines how time stamp reset engine will be armedXI_PRM_TS_RST_MODE
'''
return self.get_param('ts_rst_mode')
def get_ts_rst_mode_maximum(self):
'''
Defines how time stamp reset engine will be armedXI_PRM_TS_RST_MODE
'''
return self.get_param('ts_rst_mode:max')
def get_ts_rst_mode_minimum(self):
'''
Defines how time stamp reset engine will be armedXI_PRM_TS_RST_MODE
'''
return self.get_param('ts_rst_mode:min')
def get_ts_rst_mode_increment(self):
'''
Defines how time stamp reset engine will be armedXI_PRM_TS_RST_MODE
'''
return self.get_param('ts_rst_mode:inc')
def set_ts_rst_mode(self, ts_rst_mode):
'''
Defines how time stamp reset engine will be armedXI_PRM_TS_RST_MODE
'''
self.set_param('ts_rst_mode', ts_rst_mode)
def get_ts_rst_source(self):
'''
Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)XI_PRM_TS_RST_SOURCE
'''
return self.get_param('ts_rst_source')
def get_ts_rst_source_maximum(self):
'''
Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)XI_PRM_TS_RST_SOURCE
'''
return self.get_param('ts_rst_source:max')
def get_ts_rst_source_minimum(self):
'''
Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)XI_PRM_TS_RST_SOURCE
'''
return self.get_param('ts_rst_source:min')
def get_ts_rst_source_increment(self):
'''
Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)XI_PRM_TS_RST_SOURCE
'''
return self.get_param('ts_rst_source:inc')
def set_ts_rst_source(self, ts_rst_source):
'''
Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)XI_PRM_TS_RST_SOURCE
'''
self.set_param('ts_rst_source', ts_rst_source)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Extended Device parameters
#-------------------------------------------------------------------------------------------------------------------
def is_isexist(self):
'''
Returns 1 if camera connected and works properly.XI_PRM_IS_DEVICE_EXIST
'''
return self.get_param('isexist')
def get_acq_buffer_size(self):
'''
Acquisition buffer size in buffer_size_unit. Default bytes.XI_PRM_ACQ_BUFFER_SIZE
'''
return self.get_param('acq_buffer_size')
def get_acq_buffer_size_maximum(self):
'''
Acquisition buffer size in buffer_size_unit. Default bytes.XI_PRM_ACQ_BUFFER_SIZE
'''
return self.get_param('acq_buffer_size:max')
def get_acq_buffer_size_minimum(self):
'''
Acquisition buffer size in buffer_size_unit. Default bytes.XI_PRM_ACQ_BUFFER_SIZE
'''
return self.get_param('acq_buffer_size:min')
def get_acq_buffer_size_increment(self):
'''
Acquisition buffer size in buffer_size_unit. Default bytes.XI_PRM_ACQ_BUFFER_SIZE
'''
return self.get_param('acq_buffer_size:inc')
def set_acq_buffer_size(self, acq_buffer_size):
'''
Acquisition buffer size in buffer_size_unit. Default bytes.XI_PRM_ACQ_BUFFER_SIZE
'''
self.set_param('acq_buffer_size', acq_buffer_size)
def get_acq_buffer_size_unit(self):
'''
Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytesXI_PRM_ACQ_BUFFER_SIZE_UNIT
'''
return self.get_param('acq_buffer_size_unit')
def get_acq_buffer_size_unit_maximum(self):
'''
Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytesXI_PRM_ACQ_BUFFER_SIZE_UNIT
'''
return self.get_param('acq_buffer_size_unit:max')
def get_acq_buffer_size_unit_minimum(self):
'''
Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytesXI_PRM_ACQ_BUFFER_SIZE_UNIT
'''
return self.get_param('acq_buffer_size_unit:min')
def get_acq_buffer_size_unit_increment(self):
'''
Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytesXI_PRM_ACQ_BUFFER_SIZE_UNIT
'''
return self.get_param('acq_buffer_size_unit:inc')
def set_acq_buffer_size_unit(self, acq_buffer_size_unit):
'''
Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytesXI_PRM_ACQ_BUFFER_SIZE_UNIT
'''
self.set_param('acq_buffer_size_unit', acq_buffer_size_unit)
def get_acq_transport_buffer_size(self):
'''
Acquisition transport buffer size in bytesXI_PRM_ACQ_TRANSPORT_BUFFER_SIZE
'''
return self.get_param('acq_transport_buffer_size')
def get_acq_transport_buffer_size_maximum(self):
'''
Acquisition transport buffer size in bytesXI_PRM_ACQ_TRANSPORT_BUFFER_SIZE
'''
return self.get_param('acq_transport_buffer_size:max')
def get_acq_transport_buffer_size_minimum(self):
'''
Acquisition transport buffer size in bytesXI_PRM_ACQ_TRANSPORT_BUFFER_SIZE
'''
return self.get_param('acq_transport_buffer_size:min')
def get_acq_transport_buffer_size_increment(self):
'''
Acquisition transport buffer size in bytesXI_PRM_ACQ_TRANSPORT_BUFFER_SIZE
'''
return self.get_param('acq_transport_buffer_size:inc')
def set_acq_transport_buffer_size(self, acq_transport_buffer_size):
'''
Acquisition transport buffer size in bytesXI_PRM_ACQ_TRANSPORT_BUFFER_SIZE
'''
self.set_param('acq_transport_buffer_size', acq_transport_buffer_size)
def get_acq_transport_packet_size(self):
'''
Acquisition transport packet size in bytesXI_PRM_ACQ_TRANSPORT_PACKET_SIZE
'''
return self.get_param('acq_transport_packet_size')
def get_acq_transport_packet_size_maximum(self):
'''
Acquisition transport packet size in bytesXI_PRM_ACQ_TRANSPORT_PACKET_SIZE
'''
return self.get_param('acq_transport_packet_size:max')
def get_acq_transport_packet_size_minimum(self):
'''
Acquisition transport packet size in bytesXI_PRM_ACQ_TRANSPORT_PACKET_SIZE
'''
return self.get_param('acq_transport_packet_size:min')
def get_acq_transport_packet_size_increment(self):
'''
Acquisition transport packet size in bytesXI_PRM_ACQ_TRANSPORT_PACKET_SIZE
'''
return self.get_param('acq_transport_packet_size:inc')
def set_acq_transport_packet_size(self, acq_transport_packet_size):
'''
Acquisition transport packet size in bytesXI_PRM_ACQ_TRANSPORT_PACKET_SIZE
'''
self.set_param('acq_transport_packet_size', acq_transport_packet_size)
def get_buffers_queue_size(self):
'''
Queue of field/frame buffersXI_PRM_BUFFERS_QUEUE_SIZE
'''
return self.get_param('buffers_queue_size')
def get_buffers_queue_size_maximum(self):
'''
Queue of field/frame buffersXI_PRM_BUFFERS_QUEUE_SIZE
'''
return self.get_param('buffers_queue_size:max')
def get_buffers_queue_size_minimum(self):
'''
Queue of field/frame buffersXI_PRM_BUFFERS_QUEUE_SIZE
'''
return self.get_param('buffers_queue_size:min')
def get_buffers_queue_size_increment(self):
'''
Queue of field/frame buffersXI_PRM_BUFFERS_QUEUE_SIZE
'''
return self.get_param('buffers_queue_size:inc')
def set_buffers_queue_size(self, buffers_queue_size):
'''
Queue of field/frame buffersXI_PRM_BUFFERS_QUEUE_SIZE
'''
self.set_param('buffers_queue_size', buffers_queue_size)
def get_acq_transport_buffer_commit(self):
'''
Number of buffers to commit to low levelXI_PRM_ACQ_TRANSPORT_BUFFER_COMMIT
'''
return self.get_param('acq_transport_buffer_commit')
def get_acq_transport_buffer_commit_maximum(self):
'''
Number of buffers to commit to low levelXI_PRM_ACQ_TRANSPORT_BUFFER_COMMIT
'''
return self.get_param('acq_transport_buffer_commit:max')
def get_acq_transport_buffer_commit_minimum(self):
'''
Number of buffers to commit to low levelXI_PRM_ACQ_TRANSPORT_BUFFER_COMMIT
'''
return self.get_param('acq_transport_buffer_commit:min')
def get_acq_transport_buffer_commit_increment(self):
'''
Number of buffers to commit to low levelXI_PRM_ACQ_TRANSPORT_BUFFER_COMMIT
'''
return self.get_param('acq_transport_buffer_commit:inc')
def set_acq_transport_buffer_commit(self, acq_transport_buffer_commit):
'''
Number of buffers to commit to low levelXI_PRM_ACQ_TRANSPORT_BUFFER_COMMIT
'''
self.set_param('acq_transport_buffer_commit', acq_transport_buffer_commit)
def is_recent_frame(self):
'''
GetImage returns most recent frameXI_PRM_RECENT_FRAME
'''
return self.get_param('recent_frame')
def enable_recent_frame(self):
'''
GetImage returns most recent frameXI_PRM_RECENT_FRAME
'''
self.set_param('recent_frame', True)
def disable_recent_frame(self):
'''
GetImage returns most recent frameXI_PRM_RECENT_FRAME
'''
self.set_param('recent_frame', False)
def get_device_reset(self):
'''
Resets the camera to default state.XI_PRM_DEVICE_RESET
'''
return self.get_param('device_reset')
def get_device_reset_maximum(self):
'''
Resets the camera to default state.XI_PRM_DEVICE_RESET
'''
return self.get_param('device_reset:max')
def get_device_reset_minimum(self):
'''
Resets the camera to default state.XI_PRM_DEVICE_RESET
'''
return self.get_param('device_reset:min')
def get_device_reset_increment(self):
'''
Resets the camera to default state.XI_PRM_DEVICE_RESET
'''
return self.get_param('device_reset:inc')
def set_device_reset(self, device_reset):
'''
Resets the camera to default state.XI_PRM_DEVICE_RESET
'''
self.set_param('device_reset', device_reset)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Sensor Defects Correction
#-------------------------------------------------------------------------------------------------------------------
def get_column_fpn_correction(self):
'''
Correction of column FPNXI_PRM_COLUMN_FPN_CORRECTION
'''
return self.get_param('column_fpn_correction')
def get_column_fpn_correction_maximum(self):
'''
Correction of column FPNXI_PRM_COLUMN_FPN_CORRECTION
'''
return self.get_param('column_fpn_correction:max')
def get_column_fpn_correction_minimum(self):
'''
Correction of column FPNXI_PRM_COLUMN_FPN_CORRECTION
'''
return self.get_param('column_fpn_correction:min')
def get_column_fpn_correction_increment(self):
'''
Correction of column FPNXI_PRM_COLUMN_FPN_CORRECTION
'''
return self.get_param('column_fpn_correction:inc')
def set_column_fpn_correction(self, column_fpn_correction):
'''
Correction of column FPNXI_PRM_COLUMN_FPN_CORRECTION
'''
self.set_param('column_fpn_correction', column_fpn_correction)
def get_row_fpn_correction(self):
'''
Correction of row FPNXI_PRM_ROW_FPN_CORRECTION
'''
return self.get_param('row_fpn_correction')
def get_row_fpn_correction_maximum(self):
'''
Correction of row FPNXI_PRM_ROW_FPN_CORRECTION
'''
return self.get_param('row_fpn_correction:max')
def get_row_fpn_correction_minimum(self):
'''
Correction of row FPNXI_PRM_ROW_FPN_CORRECTION
'''
return self.get_param('row_fpn_correction:min')
def get_row_fpn_correction_increment(self):
'''
Correction of row FPNXI_PRM_ROW_FPN_CORRECTION
'''
return self.get_param('row_fpn_correction:inc')
def set_row_fpn_correction(self, row_fpn_correction):
'''
Correction of row FPNXI_PRM_ROW_FPN_CORRECTION
'''
self.set_param('row_fpn_correction', row_fpn_correction)
def get_image_correction_selector(self):
'''
Select image correction functionXI_PRM_IMAGE_CORRECTION_SELECTOR
'''
return self.get_param('image_correction_selector')
def get_image_correction_selector_maximum(self):
'''
Select image correction functionXI_PRM_IMAGE_CORRECTION_SELECTOR
'''
return self.get_param('image_correction_selector:max')
def get_image_correction_selector_minimum(self):
'''
Select image correction functionXI_PRM_IMAGE_CORRECTION_SELECTOR
'''
return self.get_param('image_correction_selector:min')
def get_image_correction_selector_increment(self):
'''
Select image correction functionXI_PRM_IMAGE_CORRECTION_SELECTOR
'''
return self.get_param('image_correction_selector:inc')
def set_image_correction_selector(self, image_correction_selector):
'''
Select image correction functionXI_PRM_IMAGE_CORRECTION_SELECTOR
'''
self.set_param('image_correction_selector', image_correction_selector)
def get_image_correction_value(self):
'''
Select image correction selected function valueXI_PRM_IMAGE_CORRECTION_VALUE
'''
return self.get_param('image_correction_value')
def get_image_correction_value_maximum(self):
'''
Select image correction selected function valueXI_PRM_IMAGE_CORRECTION_VALUE
'''
return self.get_param('image_correction_value:max')
def get_image_correction_value_minimum(self):
'''
Select image correction selected function valueXI_PRM_IMAGE_CORRECTION_VALUE
'''
return self.get_param('image_correction_value:min')
def get_image_correction_value_increment(self):
'''
Select image correction selected function valueXI_PRM_IMAGE_CORRECTION_VALUE
'''
return self.get_param('image_correction_value:inc')
def set_image_correction_value(self, image_correction_value):
'''
Select image correction selected function valueXI_PRM_IMAGE_CORRECTION_VALUE
'''
self.set_param('image_correction_value', image_correction_value)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Sensor features
#-------------------------------------------------------------------------------------------------------------------
def get_sensor_mode(self):
'''
Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.XI_PRM_SENSOR_MODE
'''
return self.get_param('sensor_mode')
def get_sensor_mode_maximum(self):
'''
Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.XI_PRM_SENSOR_MODE
'''
return self.get_param('sensor_mode:max')
def get_sensor_mode_minimum(self):
'''
Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.XI_PRM_SENSOR_MODE
'''
return self.get_param('sensor_mode:min')
def get_sensor_mode_increment(self):
'''
Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.XI_PRM_SENSOR_MODE
'''
return self.get_param('sensor_mode:inc')
def set_sensor_mode(self, sensor_mode):
'''
Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.XI_PRM_SENSOR_MODE
'''
self.set_param('sensor_mode', sensor_mode)
def is_hdr(self):
'''
Enable High Dynamic Range feature.XI_PRM_HDR
'''
return self.get_param('hdr')
def enable_hdr(self):
'''
Enable High Dynamic Range feature.XI_PRM_HDR
'''
self.set_param('hdr', True)
def disable_hdr(self):
'''
Enable High Dynamic Range feature.XI_PRM_HDR
'''
self.set_param('hdr', False)
def get_hdr_kneepoint_count(self):
'''
The number of kneepoints in the PWLR.XI_PRM_HDR_KNEEPOINT_COUNT
'''
return self.get_param('hdr_kneepoint_count')
def get_hdr_kneepoint_count_maximum(self):
'''
The number of kneepoints in the PWLR.XI_PRM_HDR_KNEEPOINT_COUNT
'''
return self.get_param('hdr_kneepoint_count:max')
def get_hdr_kneepoint_count_minimum(self):
'''
The number of kneepoints in the PWLR.XI_PRM_HDR_KNEEPOINT_COUNT
'''
return self.get_param('hdr_kneepoint_count:min')
def get_hdr_kneepoint_count_increment(self):
'''
The number of kneepoints in the PWLR.XI_PRM_HDR_KNEEPOINT_COUNT
'''
return self.get_param('hdr_kneepoint_count:inc')
def set_hdr_kneepoint_count(self, hdr_kneepoint_count):
'''
The number of kneepoints in the PWLR.XI_PRM_HDR_KNEEPOINT_COUNT
'''
self.set_param('hdr_kneepoint_count', hdr_kneepoint_count)
def get_hdr_t1(self):
'''
position of first kneepoint(in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T1
'''
return self.get_param('hdr_t1')
def get_hdr_t1_maximum(self):
'''
position of first kneepoint(in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T1
'''
return self.get_param('hdr_t1:max')
def get_hdr_t1_minimum(self):
'''
position of first kneepoint(in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T1
'''
return self.get_param('hdr_t1:min')
def get_hdr_t1_increment(self):
'''
position of first kneepoint(in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T1
'''
return self.get_param('hdr_t1:inc')
def set_hdr_t1(self, hdr_t1):
'''
position of first kneepoint(in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T1
'''
self.set_param('hdr_t1', hdr_t1)
def get_hdr_t2(self):
'''
position of second kneepoint (in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T2
'''
return self.get_param('hdr_t2')
def get_hdr_t2_maximum(self):
'''
position of second kneepoint (in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T2
'''
return self.get_param('hdr_t2:max')
def get_hdr_t2_minimum(self):
'''
position of second kneepoint (in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T2
'''
return self.get_param('hdr_t2:min')
def get_hdr_t2_increment(self):
'''
position of second kneepoint (in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T2
'''
return self.get_param('hdr_t2:inc')
def set_hdr_t2(self, hdr_t2):
'''
position of second kneepoint (in % of XI_PRM_EXPOSURE)XI_PRM_HDR_T2
'''
self.set_param('hdr_t2', hdr_t2)
def get_hdr_kneepoint1(self):
'''
value of first kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT1
'''
return self.get_param('hdr_kneepoint1')
def get_hdr_kneepoint1_maximum(self):
'''
value of first kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT1
'''
return self.get_param('hdr_kneepoint1:max')
def get_hdr_kneepoint1_minimum(self):
'''
value of first kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT1
'''
return self.get_param('hdr_kneepoint1:min')
def get_hdr_kneepoint1_increment(self):
'''
value of first kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT1
'''
return self.get_param('hdr_kneepoint1:inc')
def set_hdr_kneepoint1(self, hdr_kneepoint1):
'''
value of first kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT1
'''
self.set_param('hdr_kneepoint1', hdr_kneepoint1)
def get_hdr_kneepoint2(self):
'''
value of second kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT2
'''
return self.get_param('hdr_kneepoint2')
def get_hdr_kneepoint2_maximum(self):
'''
value of second kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT2
'''
return self.get_param('hdr_kneepoint2:max')
def get_hdr_kneepoint2_minimum(self):
'''
value of second kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT2
'''
return self.get_param('hdr_kneepoint2:min')
def get_hdr_kneepoint2_increment(self):
'''
value of second kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT2
'''
return self.get_param('hdr_kneepoint2:inc')
def set_hdr_kneepoint2(self, hdr_kneepoint2):
'''
value of second kneepoint (% of sensor saturation)XI_PRM_KNEEPOINT2
'''
self.set_param('hdr_kneepoint2', hdr_kneepoint2)
def get_image_black_level(self):
'''
Last image black level counts. Can be used for Offline processing to recall it.XI_PRM_IMAGE_BLACK_LEVEL
'''
return self.get_param('image_black_level')
def get_image_black_level_maximum(self):
'''
Last image black level counts. Can be used for Offline processing to recall it.XI_PRM_IMAGE_BLACK_LEVEL
'''
return self.get_param('image_black_level:max')
def get_image_black_level_minimum(self):
'''
Last image black level counts. Can be used for Offline processing to recall it.XI_PRM_IMAGE_BLACK_LEVEL
'''
return self.get_param('image_black_level:min')
def get_image_black_level_increment(self):
'''
Last image black level counts. Can be used for Offline processing to recall it.XI_PRM_IMAGE_BLACK_LEVEL
'''
return self.get_param('image_black_level:inc')
def set_image_black_level(self, image_black_level):
'''
Last image black level counts. Can be used for Offline processing to recall it.XI_PRM_IMAGE_BLACK_LEVEL
'''
self.set_param('image_black_level', image_black_level)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Version info
#-------------------------------------------------------------------------------------------------------------------
def get_api_version(self,buffer_size=256):
'''
Returns version of API.XI_PRM_API_VERSION
'''
return self.get_param('api_version',buffer_size)
def get_drv_version(self,buffer_size=256):
'''
Returns version of current device driver.XI_PRM_DRV_VERSION
'''
return self.get_param('drv_version',buffer_size)
def get_version_mcu1(self,buffer_size=256):
'''
Returns version of MCU1 firmware.XI_PRM_MCU1_VERSION
'''
return self.get_param('version_mcu1',buffer_size)
def get_version_mcu2(self,buffer_size=256):
'''
Returns version of MCU2 firmware.XI_PRM_MCU2_VERSION
'''
return self.get_param('version_mcu2',buffer_size)
def get_version_fpga1(self,buffer_size=256):
'''
Returns version of FPGA1 firmware.XI_PRM_FPGA1_VERSION
'''
return self.get_param('version_fpga1',buffer_size)
def get_version_xmlman(self,buffer_size=256):
'''
Returns version of XML manifest.XI_PRM_XMLMAN_VERSION
'''
return self.get_param('version_xmlman',buffer_size)
def get_hw_revision(self,buffer_size=256):
'''
Returns hardware revision number.XI_PRM_HW_REVISION
'''
return self.get_param('hw_revision',buffer_size)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: API features
#-------------------------------------------------------------------------------------------------------------------
def get_debug_level(self):
'''
Set debug levelXI_PRM_DEBUG_LEVEL
'''
return self.get_param('debug_level')
def get_debug_level_maximum(self):
'''
Set debug levelXI_PRM_DEBUG_LEVEL
'''
return self.get_param('debug_level:max')
def get_debug_level_minimum(self):
'''
Set debug levelXI_PRM_DEBUG_LEVEL
'''
return self.get_param('debug_level:min')
def get_debug_level_increment(self):
'''
Set debug levelXI_PRM_DEBUG_LEVEL
'''
return self.get_param('debug_level:inc')
def set_debug_level(self, debug_level):
'''
Set debug levelXI_PRM_DEBUG_LEVEL
'''
self.set_param('debug_level', debug_level)
def is_auto_bandwidth_calculation(self):
'''
Automatic bandwidth calculation,XI_PRM_AUTO_BANDWIDTH_CALCULATION
'''
return self.get_param('auto_bandwidth_calculation')
def enable_auto_bandwidth_calculation(self):
'''
Automatic bandwidth calculation,XI_PRM_AUTO_BANDWIDTH_CALCULATION
'''
self.set_param('auto_bandwidth_calculation', True)
def disable_auto_bandwidth_calculation(self):
'''
Automatic bandwidth calculation,XI_PRM_AUTO_BANDWIDTH_CALCULATION
'''
self.set_param('auto_bandwidth_calculation', False)
def is_new_process_chain_enable(self):
'''
Enables (2015/FAPI) processing chain for MQ MU camerasXI_PRM_NEW_PROCESS_CHAIN_ENABLE
'''
return self.get_param('new_process_chain_enable')
def enable_new_process_chain_enable(self):
'''
Enables (2015/FAPI) processing chain for MQ MU camerasXI_PRM_NEW_PROCESS_CHAIN_ENABLE
'''
self.set_param('new_process_chain_enable', True)
def disable_new_process_chain_enable(self):
'''
Enables (2015/FAPI) processing chain for MQ MU camerasXI_PRM_NEW_PROCESS_CHAIN_ENABLE
'''
self.set_param('new_process_chain_enable', False)
def is_cam_enum_golden_enabled(self):
'''
Enable enumeration of golden devicesXI_PRM_CAM_ENUM_GOLDEN_ENABLED
'''
return self.get_param('cam_enum_golden_enabled')
def enable_cam_enum_golden_enabled(self):
'''
Enable enumeration of golden devicesXI_PRM_CAM_ENUM_GOLDEN_ENABLED
'''
self.set_param('cam_enum_golden_enabled', True)
def disable_cam_enum_golden_enabled(self):
'''
Enable enumeration of golden devicesXI_PRM_CAM_ENUM_GOLDEN_ENABLED
'''
self.set_param('cam_enum_golden_enabled', False)
def is_reset_usb_if_bootloader(self):
'''
Resets USB device if started as bootloaderXI_PRM_RESET_USB_IF_BOOTLOADER
'''
return self.get_param('reset_usb_if_bootloader')
def enable_reset_usb_if_bootloader(self):
'''
Resets USB device if started as bootloaderXI_PRM_RESET_USB_IF_BOOTLOADER
'''
self.set_param('reset_usb_if_bootloader', True)
def disable_reset_usb_if_bootloader(self):
'''
Resets USB device if started as bootloaderXI_PRM_RESET_USB_IF_BOOTLOADER
'''
self.set_param('reset_usb_if_bootloader', False)
def get_cam_simulators_count(self):
'''
Number of camera simulators to be available.XI_PRM_CAM_SIMULATORS_COUNT
'''
return self.get_param('cam_simulators_count')
def get_cam_simulators_count_maximum(self):
'''
Number of camera simulators to be available.XI_PRM_CAM_SIMULATORS_COUNT
'''
return self.get_param('cam_simulators_count:max')
def get_cam_simulators_count_minimum(self):
'''
Number of camera simulators to be available.XI_PRM_CAM_SIMULATORS_COUNT
'''
return self.get_param('cam_simulators_count:min')
def get_cam_simulators_count_increment(self):
'''
Number of camera simulators to be available.XI_PRM_CAM_SIMULATORS_COUNT
'''
return self.get_param('cam_simulators_count:inc')
def set_cam_simulators_count(self, cam_simulators_count):
'''
Number of camera simulators to be available.XI_PRM_CAM_SIMULATORS_COUNT
'''
self.set_param('cam_simulators_count', cam_simulators_count)
def is_cam_sensor_init_disabled(self):
'''
Camera sensor will not be initialized when 1=XI_ON is set.XI_PRM_CAM_SENSOR_INIT_DISABLED
'''
return self.get_param('cam_sensor_init_disabled')
def enable_cam_sensor_init_disabled(self):
'''
Camera sensor will not be initialized when 1=XI_ON is set.XI_PRM_CAM_SENSOR_INIT_DISABLED
'''
self.set_param('cam_sensor_init_disabled', True)
def disable_cam_sensor_init_disabled(self):
'''
Camera sensor will not be initialized when 1=XI_ON is set.XI_PRM_CAM_SENSOR_INIT_DISABLED
'''
self.set_param('cam_sensor_init_disabled', False)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Camera FFS
#-------------------------------------------------------------------------------------------------------------------
def get_read_file_ffs(self,buffer_size=256):
'''
Read file from camera flash filesystem.XI_PRM_READ_FILE_FFS
'''
return self.get_param('read_file_ffs',buffer_size)
def get_write_file_ffs(self,buffer_size=256):
'''
Write file to camera flash filesystem.XI_PRM_WRITE_FILE_FFS
'''
return self.get_param('write_file_ffs',buffer_size)
def set_write_file_ffs(self, write_file_ffs):
'''
Write file to camera flash filesystem.XI_PRM_WRITE_FILE_FFS
'''
self.set_param('write_file_ffs', write_file_ffs)
def get_ffs_file_name(self,buffer_size=256):
'''
Set name of file to be written/read from camera FFS.XI_PRM_FFS_FILE_NAME
'''
return self.get_param('ffs_file_name',buffer_size)
def set_ffs_file_name(self, ffs_file_name):
'''
Set name of file to be written/read from camera FFS.XI_PRM_FFS_FILE_NAME
'''
self.set_param('ffs_file_name', ffs_file_name)
def get_ffs_file_id(self):
'''
File number.XI_PRM_FFS_FILE_ID
'''
return self.get_param('ffs_file_id')
def get_ffs_file_id_maximum(self):
'''
File number.XI_PRM_FFS_FILE_ID
'''
return self.get_param('ffs_file_id:max')
def get_ffs_file_id_minimum(self):
'''
File number.XI_PRM_FFS_FILE_ID
'''
return self.get_param('ffs_file_id:min')
def get_ffs_file_id_increment(self):
'''
File number.XI_PRM_FFS_FILE_ID
'''
return self.get_param('ffs_file_id:inc')
def get_ffs_file_size(self):
'''
Size of file.XI_PRM_FFS_FILE_SIZE
'''
return self.get_param('ffs_file_size')
def get_ffs_file_size_maximum(self):
'''
Size of file.XI_PRM_FFS_FILE_SIZE
'''
return self.get_param('ffs_file_size:max')
def get_ffs_file_size_minimum(self):
'''
Size of file.XI_PRM_FFS_FILE_SIZE
'''
return self.get_param('ffs_file_size:min')
def get_ffs_file_size_increment(self):
'''
Size of file.XI_PRM_FFS_FILE_SIZE
'''
return self.get_param('ffs_file_size:inc')
def get_free_ffs_size(self):
'''
Size of free camera FFS.XI_PRM_FREE_FFS_SIZE
'''
return self.get_param('free_ffs_size')
def get_free_ffs_size_maximum(self):
'''
Size of free camera FFS.XI_PRM_FREE_FFS_SIZE
'''
return self.get_param('free_ffs_size:max')
def get_free_ffs_size_minimum(self):
'''
Size of free camera FFS.XI_PRM_FREE_FFS_SIZE
'''
return self.get_param('free_ffs_size:min')
def get_free_ffs_size_increment(self):
'''
Size of free camera FFS.XI_PRM_FREE_FFS_SIZE
'''
return self.get_param('free_ffs_size:inc')
def get_used_ffs_size(self):
'''
Size of used camera FFS.XI_PRM_USED_FFS_SIZE
'''
return self.get_param('used_ffs_size')
def get_used_ffs_size_maximum(self):
'''
Size of used camera FFS.XI_PRM_USED_FFS_SIZE
'''
return self.get_param('used_ffs_size:max')
def get_used_ffs_size_minimum(self):
'''
Size of used camera FFS.XI_PRM_USED_FFS_SIZE
'''
return self.get_param('used_ffs_size:min')
def get_used_ffs_size_increment(self):
'''
Size of used camera FFS.XI_PRM_USED_FFS_SIZE
'''
return self.get_param('used_ffs_size:inc')
def get_ffs_access_key(self):
'''
Setting of key enables file operations on some cameras.XI_PRM_FFS_ACCESS_KEY
'''
return self.get_param('ffs_access_key')
def get_ffs_access_key_maximum(self):
'''
Setting of key enables file operations on some cameras.XI_PRM_FFS_ACCESS_KEY
'''
return self.get_param('ffs_access_key:max')
def get_ffs_access_key_minimum(self):
'''
Setting of key enables file operations on some cameras.XI_PRM_FFS_ACCESS_KEY
'''
return self.get_param('ffs_access_key:min')
def get_ffs_access_key_increment(self):
'''
Setting of key enables file operations on some cameras.XI_PRM_FFS_ACCESS_KEY
'''
return self.get_param('ffs_access_key:inc')
def set_ffs_access_key(self, ffs_access_key):
'''
Setting of key enables file operations on some cameras.XI_PRM_FFS_ACCESS_KEY
'''
self.set_param('ffs_access_key', ffs_access_key)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: APIContextControl
#-------------------------------------------------------------------------------------------------------------------
def get_xiapi_context_list(self,buffer_size=256):
'''
List of current parameters settings context - parameters with values. Used for offline processing.XI_PRM_API_CONTEXT_LIST
'''
return self.get_param('xiapi_context_list',buffer_size)
def set_xiapi_context_list(self, xiapi_context_list):
'''
List of current parameters settings context - parameters with values. Used for offline processing.XI_PRM_API_CONTEXT_LIST
'''
self.set_param('xiapi_context_list', xiapi_context_list)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Sensor Control
#-------------------------------------------------------------------------------------------------------------------
def get_sensor_feature_selector(self):
'''
Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.XI_PRM_SENSOR_FEATURE_SELECTOR
'''
return self.get_param('sensor_feature_selector')
def get_sensor_feature_selector_maximum(self):
'''
Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.XI_PRM_SENSOR_FEATURE_SELECTOR
'''
return self.get_param('sensor_feature_selector:max')
def get_sensor_feature_selector_minimum(self):
'''
Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.XI_PRM_SENSOR_FEATURE_SELECTOR
'''
return self.get_param('sensor_feature_selector:min')
def get_sensor_feature_selector_increment(self):
'''
Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.XI_PRM_SENSOR_FEATURE_SELECTOR
'''
return self.get_param('sensor_feature_selector:inc')
def set_sensor_feature_selector(self, sensor_feature_selector):
'''
Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.XI_PRM_SENSOR_FEATURE_SELECTOR
'''
self.set_param('sensor_feature_selector', sensor_feature_selector)
def get_sensor_feature_value(self):
'''
Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.XI_PRM_SENSOR_FEATURE_VALUE
'''
return self.get_param('sensor_feature_value')
def get_sensor_feature_value_maximum(self):
'''
Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.XI_PRM_SENSOR_FEATURE_VALUE
'''
return self.get_param('sensor_feature_value:max')
def get_sensor_feature_value_minimum(self):
'''
Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.XI_PRM_SENSOR_FEATURE_VALUE
'''
return self.get_param('sensor_feature_value:min')
def get_sensor_feature_value_increment(self):
'''
Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.XI_PRM_SENSOR_FEATURE_VALUE
'''
return self.get_param('sensor_feature_value:inc')
def set_sensor_feature_value(self, sensor_feature_value):
'''
Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.XI_PRM_SENSOR_FEATURE_VALUE
'''
self.set_param('sensor_feature_value', sensor_feature_value)
#-------------------------------------------------------------------------------------------------------------------
# ---- Parameter Group: Extended Features
#-------------------------------------------------------------------------------------------------------------------
def get_ext_feature_selector(self):
'''
Selection of extended feature.XI_PRM_EXTENDED_FEATURE_SELECTOR
'''
return self.get_param('ext_feature_selector')
def get_ext_feature_selector_maximum(self):
'''
Selection of extended feature.XI_PRM_EXTENDED_FEATURE_SELECTOR
'''
return self.get_param('ext_feature_selector:max')
def get_ext_feature_selector_minimum(self):
'''
Selection of extended feature.XI_PRM_EXTENDED_FEATURE_SELECTOR
'''
return self.get_param('ext_feature_selector:min')
def get_ext_feature_selector_increment(self):
'''
Selection of extended feature.XI_PRM_EXTENDED_FEATURE_SELECTOR
'''
return self.get_param('ext_feature_selector:inc')
def set_ext_feature_selector(self, ext_feature_selector):
'''
Selection of extended feature.XI_PRM_EXTENDED_FEATURE_SELECTOR
'''
self.set_param('ext_feature_selector', ext_feature_selector)
def get_ext_feature(self):
'''
Extended feature value.XI_PRM_EXTENDED_FEATURE
'''
return self.get_param('ext_feature')
def get_ext_feature_maximum(self):
'''
Extended feature value.XI_PRM_EXTENDED_FEATURE
'''
return self.get_param('ext_feature:max')
def get_ext_feature_minimum(self):
'''
Extended feature value.XI_PRM_EXTENDED_FEATURE
'''
return self.get_param('ext_feature:min')
def get_ext_feature_increment(self):
'''
Extended feature value.XI_PRM_EXTENDED_FEATURE
'''
return self.get_param('ext_feature:inc')
def set_ext_feature(self, ext_feature):
'''
Extended feature value.XI_PRM_EXTENDED_FEATURE
'''
self.set_param('ext_feature', ext_feature)
def get_device_unit_selector(self):
'''
Selects device unit.XI_PRM_DEVICE_UNIT_SELECTOR
'''
return self.get_param('device_unit_selector')
def get_device_unit_selector_maximum(self):
'''
Selects device unit.XI_PRM_DEVICE_UNIT_SELECTOR
'''
return self.get_param('device_unit_selector:max')
def get_device_unit_selector_minimum(self):
'''
Selects device unit.XI_PRM_DEVICE_UNIT_SELECTOR
'''
return self.get_param('device_unit_selector:min')
def get_device_unit_selector_increment(self):
'''
Selects device unit.XI_PRM_DEVICE_UNIT_SELECTOR
'''
return self.get_param('device_unit_selector:inc')
def set_device_unit_selector(self, device_unit_selector):
'''
Selects device unit.XI_PRM_DEVICE_UNIT_SELECTOR
'''
self.set_param('device_unit_selector', device_unit_selector)
def get_device_unit_register_selector(self):
'''
Selects register of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_SELECTOR
'''
return self.get_param('device_unit_register_selector')
def get_device_unit_register_selector_maximum(self):
'''
Selects register of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_SELECTOR
'''
return self.get_param('device_unit_register_selector:max')
def get_device_unit_register_selector_minimum(self):
'''
Selects register of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_SELECTOR
'''
return self.get_param('device_unit_register_selector:min')
def get_device_unit_register_selector_increment(self):
'''
Selects register of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_SELECTOR
'''
return self.get_param('device_unit_register_selector:inc')
def set_device_unit_register_selector(self, device_unit_register_selector):
'''
Selects register of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_SELECTOR
'''
self.set_param('device_unit_register_selector', device_unit_register_selector)
def get_device_unit_register_value(self):
'''
Sets/gets register value of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_VALUE
'''
return self.get_param('device_unit_register_value')
def get_device_unit_register_value_maximum(self):
'''
Sets/gets register value of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_VALUE
'''
return self.get_param('device_unit_register_value:max')
def get_device_unit_register_value_minimum(self):
'''
Sets/gets register value of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_VALUE
'''
return self.get_param('device_unit_register_value:min')
def get_device_unit_register_value_increment(self):
'''
Sets/gets register value of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_VALUE
'''
return self.get_param('device_unit_register_value:inc')
def set_device_unit_register_value(self, device_unit_register_value):
'''
Sets/gets register value of selected device unit(XI_PRM_DEVICE_UNIT_SELECTOR).XI_PRM_DEVICE_UNIT_REGISTER_VALUE
'''
self.set_param('device_unit_register_value', device_unit_register_value)
def get_api_progress_callback(self,buffer_size=256):
'''
Callback address of pointer that is called upon long tasks (e.g. XI_PRM_WRITE_FILE_FFS).XI_PRM_API_PROGRESS_CALLBACK
'''
return self.get_param('api_progress_callback',buffer_size)
def set_api_progress_callback(self, api_progress_callback):
'''
Callback address of pointer that is called upon long tasks (e.g. XI_PRM_WRITE_FILE_FFS).XI_PRM_API_PROGRESS_CALLBACK
'''
self.set_param('api_progress_callback', api_progress_callback)
def get_acquisition_status_selector(self):
'''
Selects the internal acquisition signal to read using XI_PRM_ACQUISITION_STATUS.XI_PRM_ACQUISITION_STATUS_SELECTOR
'''
return self.get_param('acquisition_status_selector')
def get_acquisition_status_selector_maximum(self):
'''
Selects the internal acquisition signal to read using XI_PRM_ACQUISITION_STATUS.XI_PRM_ACQUISITION_STATUS_SELECTOR
'''
return self.get_param('acquisition_status_selector:max')
def get_acquisition_status_selector_minimum(self):
'''
Selects the internal acquisition signal to read using XI_PRM_ACQUISITION_STATUS.XI_PRM_ACQUISITION_STATUS_SELECTOR
'''
return self.get_param('acquisition_status_selector:min')
def get_acquisition_status_selector_increment(self):
'''
Selects the internal acquisition signal to read using XI_PRM_ACQUISITION_STATUS.XI_PRM_ACQUISITION_STATUS_SELECTOR
'''
return self.get_param('acquisition_status_selector:inc')
def set_acquisition_status_selector(self, acquisition_status_selector):
'''
Selects the internal acquisition signal to read using XI_PRM_ACQUISITION_STATUS.XI_PRM_ACQUISITION_STATUS_SELECTOR
'''
self.set_param('acquisition_status_selector', acquisition_status_selector)
def get_acquisition_status(self):
'''
Acquisition status(True/False)XI_PRM_ACQUISITION_STATUS
'''
return self.get_param('acquisition_status')
def get_acquisition_status_maximum(self):
'''
Acquisition status(True/False)XI_PRM_ACQUISITION_STATUS
'''
return self.get_param('acquisition_status:max')
def get_acquisition_status_minimum(self):
'''
Acquisition status(True/False)XI_PRM_ACQUISITION_STATUS
'''
return self.get_param('acquisition_status:min')
def get_acquisition_status_increment(self):
'''
Acquisition status(True/False)XI_PRM_ACQUISITION_STATUS
'''
return self.get_param('acquisition_status:inc')
|
{"hexsha": "5c4275fd801db0afc31cd779218accf15d3c0d51", "size": 195478, "ext": "py", "lang": "Python", "max_stars_repo_path": "Catkin_PKG_Car/src/ximea_cam/src/ximea/xiapi.py", "max_stars_repo_name": "jessecha/OPCAS", "max_stars_repo_head_hexsha": "2b51543b4ad1ee37dba2e45a0c7d0b872309d418", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-28T05:58:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-28T05:58:50.000Z", "max_issues_repo_path": "Catkin_PKG_Car/src/ximea_cam/src/ximea/xiapi.py", "max_issues_repo_name": "jessecha/OPCAS", "max_issues_repo_head_hexsha": "2b51543b4ad1ee37dba2e45a0c7d0b872309d418", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Catkin_PKG_Car/src/ximea_cam/src/ximea/xiapi.py", "max_forks_repo_name": "jessecha/OPCAS", "max_forks_repo_head_hexsha": "2b51543b4ad1ee37dba2e45a0c7d0b872309d418", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2643295355, "max_line_length": 191, "alphanum_fraction": 0.6178546946, "include": true, "reason": "import numpy", "num_tokens": 41126}
|
#include <boost/filesystem/operations.hpp>
#include <palette_loader.hxx>
#include <global_state.hxx>
auto asset_loader<palette>::load_asset(const ::std::string& p_name) const
-> palette
{
// Retrieve data path
const auto t_dataPath = global_state<path_manager>().data_path();
// Build path to requested palette file
const auto t_palPath = (t_dataPath / "palettes" / (p_name + ".json"));
// Check if that file exists and is, in fact, a file
if(boost::filesystem::exists(t_palPath) &&
boost::filesystem::is_regular_file(t_palPath))
{
return palette{ t_palPath.string() };
}
else
throw ::std::runtime_error("asset_loader<palette>: File not found");
}
|
{"hexsha": "061c3c8aa401125ecf7a43577df9c2cdb755e1f5", "size": 670, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "src/palette_loader.cxx", "max_stars_repo_name": "nshcat/gl_app", "max_stars_repo_head_hexsha": "49b97f07f81dc252695a379c46213ad9c1d94e1a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-02-08T16:59:47.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-08T16:59:47.000Z", "max_issues_repo_path": "src/palette_loader.cxx", "max_issues_repo_name": "nshcat/roguelike", "max_issues_repo_head_hexsha": "49b97f07f81dc252695a379c46213ad9c1d94e1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/palette_loader.cxx", "max_forks_repo_name": "nshcat/roguelike", "max_forks_repo_head_hexsha": "49b97f07f81dc252695a379c46213ad9c1d94e1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9166666667, "max_line_length": 73, "alphanum_fraction": 0.7208955224, "num_tokens": 172}
|
#!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import numpy as np
from datetime import datetime as date
import sys
from Relay_QCheckBox import *
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.resize(785, 275)
self.setFixedWidth(785)
self.setFixedHeight(275)
self.setWindowTitle('Remote Relay Control v1.0')
self.setContentsMargins(0,0,0,0)
self.spdt_cb = [] #list to hold spdt relay check boxes
self.dpdt_cb = [] #list to hold dpdt relay check boxes
self.spdt_a_value = 0 #SPDT BANK A Value, 0-255
self.spdt_b_value = 0 #SPDT BANK B Value, 0-255
self.dpdt_a_value = 0 #DPDT BANK A Value, 0-255
self.dpdt_b_value = 0 #DPDT BANK B Value, 0-255
self.relays_cmd = [0,0,0,0] # Relay Register Value Commanded, 0-225, [SPDTA, SPDTB, DPDTA, DPDTB]
self.relay_callback = None #Callback accessor for remote relay control
self.set_relay_msg = '' # '$,R,AAA,BBB,CCC,DDD'
self.connected = False #Connection Status to remote relay control box
self.adc_interval = 1000 #ADC Auto Update Interval in milliseconds
self.initUI()
self.darken()
self.setFocus()
def initUI(self):
self.initFrames()
self.initSPDTCheckBoxes()
self.initDPDTCheckBoxes()
self.initADC()
self.initNet()
self.initControls()
self.connectSignals()
def connectSignals(self):
self.resetButton.clicked.connect(self.resetButtonEvent)
self.connectButton.clicked.connect(self.connectButtonEvent)
self.adc_auto_cb.stateChanged.connect(self.catchADCAutoEvent)
self.readStatusButton.clicked.connect(self.readStatusButtonEvent)
self.readRelayButton.clicked.connect(self.readRelayButtonEvent)
self.readVoltButton.clicked.connect(self.readVoltButtonEvent)
self.updateButton.clicked.connect(self.updateButtonEvent)
QtCore.QObject.connect(self.ADCtimer, QtCore.SIGNAL('timeout()'), self.readVoltButtonEvent)
QtCore.QObject.connect(self.adc_interval_le, QtCore.SIGNAL('editingFinished()'), self.updateADCInterval)
QtCore.QObject.connect(self.ipAddrTextBox, QtCore.SIGNAL('editingFinished()'), self.updateIPAddress)
QtCore.QObject.connect(self.portTextBox, QtCore.SIGNAL('editingFinished()'), self.updatePort)
def updateButtonEvent(self):
a = self.relay_callback.set_relays(self.relays_cmd)
if (a != -1): self.updateRelayStatus(a)
def catchCheckBoxEvent(self, reltype, relay_id, value):
#Catches Relay_QCheckBox Event
#print str(reltype) + str(relay_id) + " " + str(value)
if (reltype == 'SPDT'):
if (relay_id <= 8): self.relays_cmd[0] += value #SPDTA
else: self.relays_cmd[1] += value #SPDTB
elif (reltype == 'DPDT'):
if (relay_id <= 8): self.relays_cmd[2] += value #DPDTA
else: self.relays_cmd[3] += value #DPDTB
#self.formatSetRelayMsg()
def formatSetRelayMsg(self):
self.set_relay_msg = '$,R,'
#SPDT A
if (len(str(self.relays_cmd[0])) == 1): self.set_relay_msg += '00' + str(self.relays_cmd[0])
elif (len(str(self.relays_cmd[0])) == 2): self.set_relay_msg += '0' + str(self.relays_cmd[0])
elif (len(str(self.relays_cmd[0])) == 3): self.set_relay_msg += str(self.relays_cmd[0])
self.set_relay_msg += ','
#SPDT B
if (len(str(self.relays_cmd[1])) == 1): self.set_relay_msg += '00' + str(self.relays_cmd[1])
elif (len(str(self.relays_cmd[1])) == 2): self.set_relay_msg += '0' + str(self.relays_cmd[1])
elif (len(str(self.relays_cmd[1])) == 3): self.set_relay_msg += str(self.relays_cmd[1])
self.set_relay_msg += ','
#DPDT A
if (len(str(self.relays_cmd[2])) == 1): self.set_relay_msg += '00' + str(self.relays_cmd[2])
elif (len(str(self.relays_cmd[2])) == 2): self.set_relay_msg += '0' + str(self.relays_cmd[2])
elif (len(str(self.relays_cmd[2])) == 3): self.set_relay_msg += str(self.relays_cmd[2])
self.set_relay_msg += ','
#DPDT B
if (len(str(self.relays_cmd[3])) == 1): self.set_relay_msg += '00' + str(self.relays_cmd[3])
elif (len(str(self.relays_cmd[3])) == 2): self.set_relay_msg += '0' + str(self.relays_cmd[3])
elif (len(str(self.relays_cmd[3])) == 3): self.set_relay_msg += str(self.relays_cmd[3])
print self.set_relay_msg
def readVoltButtonEvent(self):
a = self.relay_callback.get_adcs()
if (a != -1): self.updateADC(a)
def readRelayButtonEvent(self):
a = self.relay_callback.get_relays()
if (a != -1): self.updateRelayStatus(a)
def readStatusButtonEvent(self):
#print 'GUI| Read Status Button Clicked'
a,b = self.relay_callback.get_status()
#print a,b
if (a != -1): self.updateRelayStatus(a)
if (b != -1): self.updateADC(b)
#else:
# print 'GUI| Not Connected to Relay Controller'
# print 'GUI| Must Connect to Relay Controller before reading Status'
def updateRelayStatus(self, rel):
mask = 0b00000001
for i in range(8):
#SPDT A
if ((rel[0]>>i) & mask): self.spdt_cb[i].setCheckState(QtCore.Qt.Checked)
else: self.spdt_cb[i].setCheckState(QtCore.Qt.Unchecked)
#SPDT B
if ((rel[1]>>i) & mask): self.spdt_cb[i+8].setCheckState(QtCore.Qt.Checked)
else: self.spdt_cb[i+8].setCheckState(QtCore.Qt.Unchecked)
#DPDT A
if ((rel[2]>>i) & mask): self.dpdt_cb[i].setCheckState(QtCore.Qt.Checked)
else: self.dpdt_cb[i].setCheckState(QtCore.Qt.Unchecked)
#DPDT B
if ((rel[3]>>i) & mask): self.dpdt_cb[i+8].setCheckState(QtCore.Qt.Checked)
else: self.dpdt_cb[i+8].setCheckState(QtCore.Qt.Unchecked)
def updateADC(self,adcs):
for i in range(len(adcs)):
self.field_value[i] = str(adcs[i]) + 'V'
self.adc_field_values_qlabels[i].setText(self.field_value[i])
def catchADCAutoEvent(self, state):
CheckState = (state == QtCore.Qt.Checked)
if CheckState == True:
self.ADCtimer.start()
print self.getTimeStampGMT() + "GUI| Started ADC Auto Update, Interval: " + str(self.adc_interval) + " [ms]"
else:
self.ADCtimer.stop()
print self.getTimeStampGMT() + "GUI| Stopped ADC Auto Update"
def updateADCInterval(self):
self.adc_interval = float(self.adc_interval_le.text()) * 1000.0
self.ADCtimer.setInterval(self.adc_interval)
print self.getTimeStampGMT() + "GUI| Updated ADC Auto Interval to " + str(self.adc_interval) + " [ms]"
def connectButtonEvent(self):
if (not self.connected): #Not connected, attempt to connect
self.connected = self.relay_callback.connect()
if (self.connected):
self.connectButton.setText('Disconnect')
self.net_label.setText("Connected")
self.net_label.setStyleSheet("QLabel { font-weight:bold; color:rgb(0,255,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(225,225,225); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(False)
self.portTextBox.setEnabled(False)
else:
self.connected = self.relay_callback.disconnect()
if (not self.connected):
self.connectButton.setText('Connect')
self.net_label.setText("Disconnected")
self.net_label.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,0,0) ; }")
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setEnabled(True)
self.portTextBox.setEnabled(True)
def resetButtonEvent(self):
for i in range(16):
if self.spdt_cb[i].CheckState==True: self.spdt_cb[i].setCheckState(QtCore.Qt.Unchecked)
if self.dpdt_cb[i].CheckState==True: self.dpdt_cb[i].setCheckState(QtCore.Qt.Unchecked)
print self.getTimeStampGMT() + "GUI| Cleared Relay Banks, Change Not Applied to RR Controller"
def setCallback(self, callback):
self.relay_callback = callback
def initADC(self):
field_name = [ 'ADC1:', 'ADC2:', 'ADC3:', 'ADC4:', 'ADC5:', 'ADC6:', 'ADC7:', 'ADC8:']
self.field_value = [ '0.00V', '0.00V', '0.00V', '0.00V', '0.00V', '0.00V', '0.00V', '0.00V' ]
self.adc_auto_cb = QtGui.QCheckBox("Auto", self) #Automatically update ADC voltages checkbox option
self.adc_auto_cb.setStyleSheet("QCheckBox { font-size: 12px; \
background-color:rgb(0,0,0); \
color:rgb(255,255,255); }")
self.adc_interval_le = QtGui.QLineEdit()
self.adc_interval_le.setText("1")
self.adc_validator = QtGui.QDoubleValidator()
self.adc_interval_le.setValidator(self.adc_validator)
self.adc_interval_le.setEchoMode(QtGui.QLineEdit.Normal)
self.adc_interval_le.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.adc_interval_le.setMaxLength(4)
self.adc_interval_le.setFixedWidth(30)
label = QtGui.QLabel('Interval[s]')
label.setAlignment(QtCore.Qt.AlignRight)
label.setAlignment(QtCore.Qt.AlignVCenter)
label.setStyleSheet("QLabel { font-size: 12px; background-color: rgb(0,0,0); color:rgb(255,255,255) ; }")
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.adc_interval_le)
hbox1.addWidget(label)
self.adc_field_labels_qlabels = [] #List containing Static field Qlabels, do not change
self.adc_field_values_qlabels = [] #List containing the value of the field, updated per packet
self.ADCtimer = QtCore.QTimer(self)
self.ADCtimer.setInterval(self.adc_interval)
vbox = QtGui.QVBoxLayout()
for i in range(len(field_name)):
hbox = QtGui.QHBoxLayout()
self.adc_field_labels_qlabels.append(QtGui.QLabel(field_name[i]))
self.adc_field_labels_qlabels[i].setAlignment(QtCore.Qt.AlignLeft)
self.adc_field_values_qlabels.append(QtGui.QLabel(self.field_value[i]))
self.adc_field_values_qlabels[i].setAlignment(QtCore.Qt.AlignLeft)
hbox.addWidget(self.adc_field_labels_qlabels[i])
hbox.addWidget(self.adc_field_values_qlabels[i])
vbox.addLayout(hbox)
vbox.addWidget(self.adc_auto_cb)
vbox.addLayout(hbox1)
self.adc_fr.setLayout(vbox)
def initNet(self):
self.ipAddrTextBox = QtGui.QLineEdit()
self.ipAddrTextBox.setText("192.168.42.11")
self.ipAddrTextBox.setInputMask("000.000.000.000;")
self.ipAddrTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.ipAddrTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.ipAddrTextBox.setMaxLength(15)
self.portTextBox = QtGui.QLineEdit()
self.portTextBox.setText("2000")
self.port_validator = QtGui.QIntValidator()
self.port_validator.setRange(0,65535)
self.portTextBox.setValidator(self.port_validator)
self.portTextBox.setEchoMode(QtGui.QLineEdit.Normal)
self.portTextBox.setStyleSheet("QLineEdit {background-color:rgb(255,255,255); color:rgb(0,0,0);}")
self.portTextBox.setMaxLength(5)
self.portTextBox.setFixedWidth(50)
label = QtGui.QLabel('Status:')
label.setAlignment(QtCore.Qt.AlignRight)
self.net_label = QtGui.QLabel('Disconnected')
self.net_label.setAlignment(QtCore.Qt.AlignLeft)
self.net_label.setFixedWidth(150)
self.connectButton = QtGui.QPushButton("Connect")
self.net_label.setStyleSheet("QLabel { font-weight:bold; color:rgb(255,0,0) ; }")
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.ipAddrTextBox)
hbox1.addWidget(self.portTextBox)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(label)
hbox2.addWidget(self.net_label)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(self.connectButton)
vbox.addLayout(hbox2)
self.net_fr.setLayout(vbox)
def updateIPAddress(self):
ip_addr = self.ipAddrTextBox.text()
self.relay_callback.set_ipaddr(ip_addr)
def updatePort(self):
port = self.portTextBox.text()
self.relay_callback.set_port(port)
def initControls(self):
self.updateButton = QtGui.QPushButton("Update")
self.resetButton = QtGui.QPushButton("Reset")
self.readRelayButton = QtGui.QPushButton("Read Relay")
self.readVoltButton = QtGui.QPushButton("Read ADCs")
self.readStatusButton = QtGui.QPushButton("Read Status")
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.readRelayButton)
hbox1.addWidget(self.readStatusButton)
hbox1.addWidget(self.readVoltButton)
hbox2 = QtGui.QHBoxLayout()
#hbox.addStretch(1)
hbox2.addWidget(self.updateButton)
hbox2.addWidget(self.resetButton)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
self.button_fr.setLayout(vbox)
def initSPDTCheckBoxes(self):
hbox1 = QtGui.QHBoxLayout()
for i in range(8):
self.spdt_cb.append(Relay_QCheckBox(self, i+1 , 'SPDT'+str(i+1) , 0, pow(2,i)))
hbox1.addWidget(self.spdt_cb[i])
hbox2 = QtGui.QHBoxLayout()
for i in range(8):
self.spdt_cb.append(Relay_QCheckBox(self, i+1+8, 'SPDT'+str(i+1+8), 0, pow(2,i)))
hbox2.addWidget(self.spdt_cb[i+8])
#for i in range(16): print str(self.spdt_cb[i].name)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
self.spdt_fr.setLayout(vbox)
def initDPDTCheckBoxes(self):
hbox1 = QtGui.QHBoxLayout()
for i in range(8):
self.dpdt_cb.append(Relay_QCheckBox(self, i+1, 'DPDT'+str(i+1), 1, pow(2,i)))
hbox1.addWidget(self.dpdt_cb[i])
hbox2 = QtGui.QHBoxLayout()
for i in range(8):
self.dpdt_cb.append(Relay_QCheckBox(self, i+1+8, 'DPDT'+str(i+1+8), 1, pow(2,i)))
hbox2.addWidget(self.dpdt_cb[i+8])
#for i in range(16): print str(self.dpdt_cb[i].name)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
self.dpdt_fr.setLayout(vbox)
def initFrames(self):
self.spdt_fr = QtGui.QFrame(self)
self.spdt_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.spdt_fr.setFixedWidth(650)
self.dpdt_fr = QtGui.QFrame(self)
self.dpdt_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.dpdt_fr.setFixedWidth(650)
self.adc_fr = QtGui.QFrame(self)
self.adc_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.button_fr = QtGui.QFrame(self)
self.button_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.button_fr.setFixedWidth(445)
self.net_fr = QtGui.QFrame(self)
self.net_fr.setFrameShape(QtGui.QFrame.StyledPanel)
self.net_fr.setFixedWidth(200)
vbox = QtGui.QVBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(self.net_fr)
hbox2.addWidget(self.button_fr)
vbox.addWidget(self.spdt_fr)
vbox.addWidget(self.dpdt_fr)
vbox.addLayout(hbox2)
hbox1.addLayout(vbox)
hbox1.addWidget(self.adc_fr)
self.setLayout(hbox1)
def darken(self):
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Background,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.WindowText,QtCore.Qt.white)
palette.setColor(QtGui.QPalette.Text,QtCore.Qt.white)
self.setPalette(palette)
def getTimeStampGMT(self):
return str(date.utcnow()) + " GMT | "
|
{"hexsha": "31bf7d8cf09a918fefed7540bfdcb7504feae807", "size": 16782, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/python/rr_main_gui.py", "max_stars_repo_name": "vt-gs/relay_daemon", "max_stars_repo_head_hexsha": "9d77cd3222a3fe3e588f7c2196a4a06e8a73a471", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "old/python/rr_main_gui.py", "max_issues_repo_name": "vt-gs/relay_daemon", "max_issues_repo_head_hexsha": "9d77cd3222a3fe3e588f7c2196a4a06e8a73a471", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old/python/rr_main_gui.py", "max_forks_repo_name": "vt-gs/relay_daemon", "max_forks_repo_head_hexsha": "9d77cd3222a3fe3e588f7c2196a4a06e8a73a471", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4766839378, "max_line_length": 121, "alphanum_fraction": 0.6313907758, "include": true, "reason": "import numpy", "num_tokens": 4255}
|
#redirect wiki:Sacramento:Sapor
|
{"hexsha": "eb4c22f3d7b12dcbfda15640ba03c062af7e8152", "size": 32, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Sapor.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Sapor.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Sapor.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0, "max_line_length": 31, "alphanum_fraction": 0.84375, "num_tokens": 9}
|
import argparse
import os
import pickle
import re
import glob
import numpy as np
import PIL.Image
from PIL import Image
from cv2 import VideoWriter, VideoWriter_fourcc, imread
import dnnlib
import dnnlib.tflib as tflib
def generate_images(arrs, network_pkl, truncation_psi=1.0,noise_mode='const', outdir='out', save=True, seed=1):
"""
Generates images from an array of latent vectors
Saves to outdir if save==True
Returns an array of nchw images
"""
tflib.init_tf()
print('Loading networks from "%s"...' % network_pkl)
with dnnlib.util.open_url(network_pkl) as fp:
_G, _D, G = pickle.load(fp)
os.makedirs(outdir, exist_ok=True)
imgs=[]
# Render images for dlatents initialized from random seeds.
G_kwargs = {
'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True),
'randomize_noise': False,
'truncation_psi': truncation_psi
}
noise_vars = [var for name, var in G.components.synthesis.vars.items() if name.startswith('noise')]
label = np.zeros([1] + G.input_shapes[1][1:])
for idx, w in enumerate(arrs):
rnd = np.random.RandomState(seed)
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = G.run(w, label, **G_kwargs) # [minibatch, height, width, channel]
if save:
PIL.Image.fromarray(images[0], 'RGB').save(f'{outdir}/{idx:04d}.png')
print(f'Generated {idx}/{len(arrs)-1}')
imgs.append(images[0])
return imgs
|
{"hexsha": "2704adc790c3a791b91340ffdda9f4be19e8f0e7", "size": 1590, "ext": "py", "lang": "Python", "max_stars_repo_path": "StyleganTools/custom_generate_tf.py", "max_stars_repo_name": "LeoLinRui/SSTP", "max_stars_repo_head_hexsha": "2cad02fd14d1fd76f24a58fe609421796dee59f9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-09T19:05:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-09T19:05:49.000Z", "max_issues_repo_path": "StyleganTools/custom_generate_tf.py", "max_issues_repo_name": "LeoLinRui/SSTP", "max_issues_repo_head_hexsha": "2cad02fd14d1fd76f24a58fe609421796dee59f9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "StyleganTools/custom_generate_tf.py", "max_forks_repo_name": "LeoLinRui/SSTP", "max_forks_repo_head_hexsha": "2cad02fd14d1fd76f24a58fe609421796dee59f9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5769230769, "max_line_length": 111, "alphanum_fraction": 0.6553459119, "include": true, "reason": "import numpy", "num_tokens": 430}
|
import os
import numpy as np
import pytest
import torch
from skimage.metrics import peak_signal_noise_ratio as ski_psnr
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import PSNR
from ignite.utils import manual_seed
def test_zero_div():
psnr = PSNR()
with pytest.raises(NotComputableError, match="PSNR must have at least one example before it can be computed"):
psnr.compute()
def test_invalid_psnr():
y_pred = torch.rand(1, 3, 8, 8)
y = torch.rand(1, 3, 8, 8)
psnr = PSNR()
with pytest.raises(TypeError, match="Expected y_pred and y to have the same data type."):
psnr.update((y_pred, y.double()))
with pytest.raises(ValueError, match="Expected y_pred and y to have the same shape."):
psnr.update((y_pred, y.squeeze(dim=0)))
with pytest.raises(ValueError, match="y has intensity values outside the range expected for its data type."):
psnr.update((y_pred, y))
# to catch ValueError for this batch
psnr.update((y_pred, y + 1.0))
with pytest.raises(ValueError, match="Range for this dtype cannot be automatically estimated."):
psnr.update((y_pred.long(), y.long()))
def _test_psnr(y_pred, y, data_range, device):
psnr = PSNR(data_range=data_range, device=device)
psnr.update((y_pred, y))
psnr_compute = psnr.compute()
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert isinstance(psnr_compute, torch.Tensor)
assert psnr_compute.dtype == torch.float64
assert psnr_compute.device == torch.device(device)
assert np.allclose(psnr_compute.numpy(), np_psnr / np_y.shape[0])
def test_psnr():
device = idist.device()
manual_seed(42)
y_pred = torch.rand(8, 3, 28, 28, device=device)
y = y_pred * 0.8
_test_psnr(y_pred, y, None, device)
_test_psnr(y_pred, y, 0.8, device)
_test_psnr(y_pred, y, 1.0, device)
# test for true_min < 0
manual_seed(42)
y_pred = torch.empty(2, 3, 12, 12, device=device).random_(-1, 2)
y = torch.empty(2, 3, 12, 12, device=device).random_(-1, 2)
_test_psnr(y_pred, y, None, device)
_test_psnr(y_pred, y, 0.5, device)
_test_psnr(y_pred, y, 1, device)
manual_seed(42)
y_pred = torch.randint(0, 256, (4, 3, 16, 16), dtype=torch.uint8, device=device)
y = (y_pred * 0.8).to(torch.uint8)
_test_psnr(y_pred, y, None, device)
_test_psnr(y_pred, y, 240, device)
_test_psnr(y_pred, y, 256, device)
# test with NHW shape
manual_seed(42)
y_pred = torch.rand(8, 28, 28, device=device)
y = y_pred * 0.8
_test_psnr(y_pred, y, None, device)
_test_psnr(y_pred, y, 0.3, device)
_test_psnr(y_pred, y, 1.0, device)
def _test_distrib_integration(device, atol=1e-8):
from ignite.engine import Engine
rank = idist.get_rank()
n_iters = 100
s = 10
offset = n_iters * s
def _test(y_pred, y, data_range, metric_device):
def update(engine, i):
return (
y_pred[i * s + offset * rank : (i + 1) * s + offset * rank],
y[i * s + offset * rank : (i + 1) * s + offset * rank],
)
engine = Engine(update)
PSNR(data_range=data_range, device=metric_device).attach(engine, "psnr")
data = list(range(n_iters))
engine.run(data=data, max_epochs=1)
result = engine.state.metrics["psnr"]
assert "psnr" in engine.state.metrics
np_y_pred = y_pred.cpu().numpy()
np_y = y.cpu().numpy()
np_psnr = 0
for np_y_pred_, np_y_ in zip(np_y_pred, np_y):
np_psnr += ski_psnr(np_y_, np_y_pred_, data_range=data_range)
assert np.allclose(result, np_psnr / np_y.shape[0], atol=atol)
manual_seed(42)
y_pred = torch.rand(offset * idist.get_world_size(), 3, 28, 28, device=device)
y = y_pred * 0.65
_test(y_pred, y, None, "cpu")
_test(y_pred, y, 0.5, "cpu")
_test(y_pred, y, 1, "cpu")
# test for true_min < 0
manual_seed(42)
y_pred = torch.empty(offset * idist.get_world_size(), 3, 12, 12, device=device).random_(-1, 2)
y = -1 * y_pred
_test(y_pred, y, None, "cpu")
_test(y_pred, y, 0.5, "cpu")
_test(y_pred, y, 1, "cpu")
manual_seed(42)
y_pred = torch.randint(0, 256, (offset * idist.get_world_size(), 3, 16, 16), device=device, dtype=torch.uint8)
y = (y_pred * 0.65).to(torch.uint8)
_test(y_pred, y, None, "cpu")
_test(y_pred, y, 240, "cpu")
_test(y_pred, y, 256, "cpu")
# test with NHW shape
manual_seed(42)
y_pred = torch.rand(offset * idist.get_world_size(), 28, 28, device=device)
y = y_pred * 0.8
_test(y_pred, y, None, "cpu")
_test(y_pred, y, 0.3, "cpu")
_test(y_pred, y, 1, "cpu")
if torch.device(device).type != "xla":
manual_seed(42)
y_pred = torch.rand(offset * idist.get_world_size(), 3, 28, 28, device=device)
y = y_pred * 0.65
_test(y_pred, y, None, idist.device())
_test(y_pred, y, 0.5, idist.device())
_test(y_pred, y, 1, idist.device())
# test for true_min < 0
manual_seed(42)
y_pred = torch.empty(offset * idist.get_world_size(), 3, 12, 12, device=device).random_(-1, 2)
y = -1 * y_pred
_test(y_pred, y, None, idist.device())
_test(y_pred, y, 0.5, idist.device())
_test(y_pred, y, 1, idist.device())
manual_seed(42)
y_pred = torch.randint(0, 256, (offset * idist.get_world_size(), 3, 16, 16), device=device, dtype=torch.uint8)
y = (y_pred * 0.65).to(torch.uint8)
_test(y_pred, y, None, idist.device())
_test(y_pred, y, 240, idist.device())
_test(y_pred, y, 256, idist.device())
# test with NHW shape
manual_seed(42)
y_pred = torch.rand(offset * idist.get_world_size(), 28, 28, device=device)
y = y_pred * 0.8
_test(y_pred, y, None, idist.device())
_test(y_pred, y, 0.3, idist.device())
_test(y_pred, y, 1, idist.device())
def _test_distrib_accumulator_device(device):
metric_devices = [torch.device("cpu")]
if torch.device(device).type != "xla":
metric_devices.append(idist.device())
for metric_device in metric_devices:
psnr = PSNR(data_range=1.0, device=metric_device)
dev = psnr._device
assert dev == metric_device, f"{dev} vs {metric_device}"
y_pred = torch.rand(2, 3, 28, 28, dtype=torch.float, device=device)
y = y_pred * 0.65
psnr.update((y_pred, y))
dev = psnr._sum_of_batchwise_psnr.device
assert dev == metric_device, f"{dev} vs {metric_device}"
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = f"cuda:{local_rank}"
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = "cpu"
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = f"cuda:{distributed_context_multi_node_nccl['local_rank']}"
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_integration(device)
_test_distrib_accumulator_device(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_integration, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_distrib_accumulator_device, (device,), np=nproc, do_init=True)
|
{"hexsha": "6374390fdb5f24d0fb5c0c809c058f8c8038b96d", "size": 9774, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ignite/metrics/test_psnr.py", "max_stars_repo_name": "DecSnowFlake/ignite", "max_stars_repo_head_hexsha": "b48354236a9993b75bcab12c8657fbf2f2318c70", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-30T11:43:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-30T11:43:26.000Z", "max_issues_repo_path": "tests/ignite/metrics/test_psnr.py", "max_issues_repo_name": "DecSnowFlake/ignite", "max_issues_repo_head_hexsha": "b48354236a9993b75bcab12c8657fbf2f2318c70", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/ignite/metrics/test_psnr.py", "max_forks_repo_name": "DecSnowFlake/ignite", "max_forks_repo_head_hexsha": "b48354236a9993b75bcab12c8657fbf2f2318c70", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4701492537, "max_line_length": 118, "alphanum_fraction": 0.6782279517, "include": true, "reason": "import numpy", "num_tokens": 2863}
|
using Knet
Pkg.test("Knet")
load_only = true
for (p,f,o1,o2,o3) =
(
(:LinReg, "linreg.jl", "--gcheck 2", "--fast", "--fast"),
(:Housing, "housing.jl", "--gcheck 2 --atype Array{Float64}", "--fast", "--fast"),
(:MNIST, "mnist.jl", "--gcheck 2", "--fast", "--fast"),
(:LeNet, "lenet.jl", "--gcheck 2", "--fast", "--fast"),
(:CharLM, "charlm.jl", "--gcheck 2 --winit 0.01", "--fast", "--fast"),
)
gpu() < 0 && p == :LeNet && continue
include(f)
m = eval(:($p.main))
m(o1); m(o2); m(o3)
gc(); gpu() >= 0 && Knet.knetgc()
end
|
{"hexsha": "9f0ddf07249e68b74cf376e375efffdd51646b2c", "size": 576, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/runexamples.jl", "max_stars_repo_name": "enzotarta/Knet.jl", "max_stars_repo_head_hexsha": "4330f3fec812ccbbfeabbcf6aa63450ca743f9c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/runexamples.jl", "max_issues_repo_name": "enzotarta/Knet.jl", "max_issues_repo_head_hexsha": "4330f3fec812ccbbfeabbcf6aa63450ca743f9c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/runexamples.jl", "max_forks_repo_name": "enzotarta/Knet.jl", "max_forks_repo_head_hexsha": "4330f3fec812ccbbfeabbcf6aa63450ca743f9c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8, "max_line_length": 87, "alphanum_fraction": 0.4791666667, "num_tokens": 217}
|
\documentclass[12pt,oneside,a4]{article}
\usepackage{float}
\usepackage[utf8]{inputenc}
\usepackage[a4paper,width=160mm,top=25mm,bottom=25mm]{geometry}
\usepackage[lining,tabular]{fbb} % so math uses tabular lining figures
\usepackage{graphicx}
\usepackage{enumitem}
\usepackage{listings}
\usepackage[svgnames]{xcolor}
\usepackage{subfig}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{svg}
\usepackage{tabu}
\usepackage{ltablex}
\usepackage{longtable}
\setlist{leftmargin=*}
\usepackage{listings}
\lstset{basicstyle=\ttfamily,frame=single,xleftmargin=3em,xrightmargin=3em}
\usepackage[os=win]{menukeys}
\renewmenumacro{\keys}[+]{shadowedroundedkeys}
\usepackage{framed}
\usepackage{etoolbox}
\AtBeginEnvironment{leftbar}{\sffamily\small}
\usepackage{array,lipsum}
\newenvironment{fulltable}[1][H]
{\begin{table}[#1]%
\hspace*{-\leftmarginwidth}%
\begin{minipage}{\fullwidth}}
{\end{minipage}\end{table}}
\usetikzlibrary{chains,arrows,shapes,positioning}
\usepackage{hyperref}
\graphicspath{{figures/}} %Setting the graphicspath
\renewcommand\abstractname{Introduction}
\title{Marble -- User Guide\\ \small{v1.0 2021}}
\author{}
\begin{document}
\maketitle
\begin{center}
\includegraphics[width=0.8\linewidth]{marble_top.png}
\end{center}
\begin{abstract}
%\includegraphics[width=1.0\linewidth]{PhysLogger.png}\\
Marble is a fully open source dual FMC carrier board designed for the Accelerator Technologies Group of the Engineering Division of Lawrence Berkeley National Laboratory. This document presents the technical documentation of the Marble module divided into individual functional sections.
Design files are made in KiCad and are licensed under the CERN OHL v.~1.2.
\end{abstract}
\clearpage
\tableofcontents
\clearpage
\section{Overview}
\begin{leftbar}
Design files are open source and can be downloaded from GitHub:
https://github.com/BerkeleyLab/Marble
\end{leftbar}
Marble is a dual FMC carrier module based on an Kintex-7 FPGA. The block diagram of the module is shown in figure \ref{block}.
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{block_k3.png}
\caption{Marble block diagram.}\label{block}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{marble_references.png}
\caption{Marble board components.}\label{block}
\end{center}
\end{figure}
\begin{longtable}[htbp]{@{}ccp{0.3\linewidth}p{0.3\linewidth}c@{}}
\toprule
\textbf{Callout} &
\textbf{\begin{tabular}[c]{@{}c@{}}Reference\\ Designator\end{tabular}} &
\textbf{Component Description} &
\textbf{Notes} &
\textbf{\begin{tabular}[c]{@{}c@{}}Schematic \\ Page\end{tabular}} \\ \midrule
1 & U1 & FPGA Xilinx Kintex-7 & XC7K160T-2FFG676C & \\
2 & SK1 & DD3 SO-DIMM memory module & VR7PU286458FBAMJT & \\
3 & U4 & 10/100/1000 Ethernet PHY & 88E1512-A0-NNP2I000 & \\
4 & J14, J6 & Microcontroller programing connectors & & \\
5 & U2 & 8x8 Clock Crosspoint Switch & ADN4600ACPZ & \\
6 & U35 & Power Management & XRP7724ILBTR-F & \\
7 & J1, J19 & 12V power input & FC68148(DC-10A), 641119-2 & \\
8 & J11 & FPGA JTAG connector & 87831-1420 & \\
9 & U40 & PoE module & AG5300 & \\
10 & M1, M2 & 3-pin PC fan connectors & SWR25X-NRTC-S03-ST-BA & \\
11 & J8 & QSFP connector & QSFP8-038-01-L-D-RA1 & \\
12 & J17 & QSFP connector & QSFP8-038-01-L-D-RA1 & \\
13 & P2 & FMC HPC connector & ASP-134486-01 & \\
14 & P1 & FMC HPC connector & ASP-134486-01 & \\
15 & J12 & PMOD connected to FPGA & PPTC062LJBN-RC & \\
16 & J16 & PMOD connected to microcontroller & PPTC062LJBN-RC & \\
17 & J13 & PMOD connected to FPGA & PPTC062LJBN-RC & \\
18 & SW3 & User button connected to microcontroller & KSS241GLFS & \\
19 & SW5 & FPGA reset button & KSS241GLFS & \\
20 & LD13, LD14 & User LEDs connected to shared I2C bus & KPH-1608CGCK & \\
21 & LD11-12, LD15 & User LEDs connected to microcontroller & KPH-1608CGCK & \\
22 & LD16, LD17 & User LEDs connected to FPGA & KPH-1608CGCK & \\
23 & SW1 & Memory write protection switch & A6SN-1101 & \\
24 & Y6 & 10-280MHz Clock generator & SI570 & \\
25 & J2, J5 & External clock source input & U.FL & \\
26 & J3, J7 & External clock source input & U.FL & \\
27 & U44-U50 & GTX Transceivers Mux & PI3DBS12212AZBSEX & \\
28 & J9 & Power Management programming header & 0.1 inch male 4-pin header & \\
29 & Y1-Y3, U20 & Internal 125 MHz \& 20MHz clock sources & CDCM61004RHBT & \\
30 & U30 & FPGA SPI flash memory & S25FL128SAGMFIR01 & \\
31 & U5 & I2C multiplexer & TCA9548ARGER & \\
32 & U23 & Double USB - UART bridge, USB-JTAG bridge for FPGA & FT4232H-56Q & \\
33 & U54 & Housekeeping Microcontroller & STM32F207VCTx & \\
34 & J10 & Micro USB connector for U23 & 10103594-0001LF & \\
35 & SW2 & \textcolor{red}{Not populated user button (physically connected to SW3)} & SKHHLQA010 & \\
36 & U60 & Over-voltage and Under-voltage Reset IC & TPS3703A7330DSERQ1 & \\
37 & LD4-10, LD18-19 & Power rails indicator LEDs & KPH-1608CGCK & \\ \bottomrule
\caption{}
\label{tab:my-table}
\end{longtable}
The board has the following functionalities and features:
\begin{enumerate}
\item Xilinx Kintex-7 FPGA XC7K160T-2FFG676C
\item Supports FPGA golden image
\item Housekeeping microcontroller (Module Management Controller) with UART console
\item DDR3 204-SODIMM memory module connector. The board supports up to 4 GB memory.
\item Two FMC HPC connectors, but not all signals are connected to the FPGA.
\item 1Gb Ethernet with PoE
\item Built-in clock generator that supports White Rabbit synchronization
\item Various input clock configurations
\item Two QSFP cages that support data transfer up to 40 Gb/s each
\item Built-in USB JTAG which works with OpenOCD
\end{enumerate}
\begin{table}[htbp]
\centering
\begin{tabular}{@{}lcl@{}}
\toprule
FPGA Bank & Bank Power Supply & Description \\ \midrule
Bank 12 HR & +2.5V & FMC2 LA 00-16 (plus SPI, Self JTAG) \\
Bank 13 HR & +2.5V & FMC2 HA\\
Bank 14 HR & +2.5V & FMC2 LA 17-33 (plus config, Pmod)\\
Bank 15 HR & +2.5V & FMC1 LA 00-16 (plus I2C, UART, Pmod)\\
Bank 16 HR & +2.5V & FMC1 LA 17-33 (plus RGMII)\\
Bank 32 HP & +1.5V & DDR3 \\
Bank 33 HP & +1.5V & DDR3 (plus Pmod, White Rabbit) \\
Bank 34 HP & +1.5V & DDR3 \\ \bottomrule
\end{tabular}
\caption{}
\label{tab:banks}
\end{table}
The S25FL128SAGMFIR01 configuration memory is connected to the FPGA chip. By default, the FPGA chip loads the configuration from the flash memory after correct power cycle. The module is equipped with a switch (fig \ref{bootsw}) that blocks the programming of the configuration memory. When the switch is in the ON position, Write Protection is enabled. WP signal status can be read via I2C IO expander.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.8\linewidth]{bootsw.png}
\caption{Memory write protection switch.}\label{bootsw}
\end{center}
\end{figure}
\subsection{FPGA reset}
During power system startup, the U60 chip keeps the PROGRAM\_B signal low. When a valid supply voltage is detected on the last power-up sequence, U60 changes state on the PROGRAM\_B signal which causes the configuration file to be loaded from flash memory.
A local operator can reset the FPGA manually using the SW5 button. Additionally, the MMC can reset the FPGA.
\subsection{JTAG}
There are 3 sources of JTAG for FPGA:
\begin{enumerate}
\item external JTAG (highest priority)
\item internal USB-JTAG (middle priority)
\item self JTAG (lowest priority)
\end{enumerate}
\subsubsection{External JTAG}
When the external JTAG is connected to J11, GNDDetect signal from the connector switches the multiplexer to pass JTAG signals from the connector to the FPGA. After unplugging cable, the GNDDetect signal is not present and the multiplexer connects internal USB-JTAG to FPGA.
When external JTAG is connected, any other JTAG sources are not available.
\subsubsection{Internal USB-JTAG}
Internal USB-JTAG is done by using the first data channel of FT4232, which can work as a JTAG. When the micro USB cable is connected, +5V
from USB bus switches the multiplexer to pass data from FT4232 to FPGA.
\subsubsection{Self-JTAG}
Internal self-JTAG can be used only when USB cable and external JTAG are not connected. In this configuration FPGA JTAG signals are connected to FPGA Bank 12:
\begin{table}[htbp]
\centering
\begin{tabular}{@{}ccc@{}}
\toprule
Signal name& Self JTAG signal & FPGA pin \\ \midrule
JTAG TDI & Self\_FPGA\_TDI & IO\_L10P \\
JTAG TCK & Self\_FPGA\_TCK & IO\_L10N \\
JTAG TMS & Self\_FPGA\_TMS & IO\_L20P \\
JTAG TDO & Self\_FPGA\_TDO & IO\_L20N \\ \bottomrule
\end{tabular}
\caption{}
\label{tab:selfjtag}
\end{table}
\subsection{LEDs}
Two general purpose LEDs are connected to the FPGA chip:
\begin{enumerate}
\item LD16 - connected to pin IO\_L18P\_33
\item LD17 - connected to pin IO\_25\_33
\end{enumerate}
\subsection{FPGA Programming}
Vivado reference design with a constraint file can be found here:
\begin{leftbar}
https://github.com/BerkeleyLab/bedrock
\end{leftbar}
See its projects/test\_marble\_family directory.
\subsubsection{Internal JTAG}
Download the latest version of the FPGA testing code from GitHub:
\begin{leftbar}
https://github.com/BerkeleyLab/Bedrock
\end{leftbar}
\begin{leftbar}
Before testing the FPGA, it is recommended to set up the current limit to 2A on the lab power supply.
\end{leftbar}
Program the FPGA using the following steps:
\begin{enumerate}
\item Plug micro USB cable
\item Go to the folder \textbf{Bedrock/projects/test\_marble\_family/}
\item Open command terminal and run command:
\begin{lstlisting}[backgroundcolor = \color{Gainsboro}, language=bash, frame=none]]
$ mutil usb
\end{lstlisting}
\item After the successful programming, LEDs LD16 and LD17 should blink alternately.
\end{enumerate}
\subsubsection{External JTAG}
Programming FPGA using Vivado and Digilent JTAG HS3 connected to J11:
\begin{enumerate}
\item Run Vivado
\item Go to \menu{Flow>Open Hardware Manager} and then \menu{Tools>Auto Connect}
\item Click \menu{Tools>Program Device>xc7k160t\_0} to open the programming window.
\item Choose the \textit{bitstream file} and click \menu{Program}
\item After the successful programming, LEDs LD16 and LD17 should blink alternately.
\end{enumerate}
\section{SO-DIMM}
The size of the DDR3 memory can be determined by the user by assembling the appropriate SO-DIMM module to the board. The use of a 204 pin SO-DIMM connector allows up to 4 GB of RAM to be connected to the FPGA. An I2C interface is provided to the memory module so that additional information about the module can be read. The default power supply for memory and HP banks is set to 1.5V and can only be changed by changing resistors. ECC is not supported by the module.\\
A reference design with a memory controller and a constraint file can be found here:
\begin{leftbar}
https://github.com/BerkeleyLab/bedrock
\end{leftbar}
See its projects/test\_marble\_family directory.
\section{GTH Routing}
Gigabit transceivers routing can be configured by the microcontroller. Transceivers from Bank 116 are permanently connected to module QSFP1. Transceivers from Bank 115 can be routed in 3 ways:
\begin{enumerate}
\item 4 transceivers connected to QSFP2
\item 4 transceivers connected to FMC P2
\item 2 transceivers connected to FMC P2 and 2 transceivers connected to FMC P1
\end{enumerate}
Transceiver routing configuration can be set by the microcontroller. Three signals control the multiplexers which provide high quality signal switching. Gigabit multiplexer switching can be done from the UART console. By default, the multiplexers are set to route signals to the second QSFP connector. In the table \ref{table}, the MUXx columns correspond to the controlling logical state.
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[htbp]
\begin{tabular}{@{}llllllll@{}}
\toprule
& MUX3 & MUX2 & MUX1 & MGT4 & MGT5 & MGT6 & MGT7 \\ \midrule
& 0 & 0 & 0 & FCM2-DP0 & FMC2-DP1 & FMC2-DP2 & FMC1-DP1 \\
& 0 & 0 & 1 & FCM2-DP0 & FMC2-DP1 & FMC1-DP0 & FMC1-DP1 \\
& 0 & 1 & 0 & FCM2-DP0 & FMC2-DP1 & FMC2-DP2 & FMC2-DP3 \\
& 0 & 1 & 1 & FCM2-DP0 & FMC2-DP1 & FMC1-DP0 & FMC2-DP3 \\
& 1 & X & X & QSFP2:3/10 & QSFP2:1/12 & QSFP2:2/11 & QSFP2:4/9 \\ \bottomrule
\end{tabular}
\caption{GTH transceivers routing table}\label{table}
\end{table}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{highspeed.png}
\caption{Transceivers routing block diagram.}\label{highspeed}
\end{center}
\end{figure}
Use UART console to change the transceiver routing style.
\section{Clocking}
This section describes how and where clock signals are routed. There are 7 on-board clock sources:
\begin{table}[htbp]
\centering
\begin{tabular}{@{}llp{0.6\linewidth}@{}}
\toprule
Clock Name &
Reference &
Description \\ \midrule
\multirow{2}{*}{System Clock} &
Y1, Y2 &
Y1 and Y2 can be soldered interchangeably and provide a reference clock source for the U20 chip \\[0.1cm]
&
U20 &
U20 is an ultra-low jitter clock generator that provides a 125 MHz clock for the FPGA bank 33 that can operate as a DDR3 reference clock. Additionally, U20 generates 125 MHz clock which is connected to the clock multiplexer.
\\ [0.3cm]
Additional Clock &
Y3 &
VCXO - provides 20 MHz clock for FPGA bank 33 \\[0.2cm]
User Clock &
Y6 &
Si570 - provides variable clock frequencies for 10 MHz to 280 MHz; connected to the clock multiplexer \\[0.1cm]
\begin{tabular}[c]{@{}l@{}}User U.FL Clock 1\\ (differential pair)\end{tabular} &
J2, J5 &
External clock source input \\[0.4cm]
\begin{tabular}[c]{@{}l@{}}User U.FLClock 2\\ (differential pair)\end{tabular} &
J3, J7 &
External clock source input \\[0.4cm]
FMC1 M2C &
P1 & GBTCLK 0 \& 1
\\[0.5cm]
FMC2 M2C &
P2 & GBTCLK 0 \& 1
\\ \bottomrule
\end{tabular}
\caption{}
\label{tab:clk-table}
\end{table}
Marble supports external clock sources from FMC and from U.FL connectors. Thanks to the clock multiplexer, any clock input can be connected to any FPGA MGT clock input. Clock routing and Si570 frequency can be changed over I2C from the house-keeping microcontroller or the FPGA.
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{clocking.png}
\caption{Marble clocking scheme}\label{clocking}
\end{center}
\end{figure}
\subsection{White Rabbit clock generator}
The clock chips available on the board allow implementation of the White Rabbit protocol. White Rabbit provides sub-nanosecond accuracy and picoseconds precision of synchronization for large distributed systems.
\section{Pmod connectors}
Marble has 3 Pmod connectors that support 3.3V logic level:
\begin{enumerate}
\item J12 and J13 are connected to FPGA
\item J6 is connected the MMC
\end{enumerate}
\begin{table}[htbp]
\centering
\begin{tabular}{@{}cccc@{}}
\toprule
&
\begin{tabular}[c]{@{}c@{}}Pmod 1 (J12)\\ FPGA\end{tabular} &
\begin{tabular}[c]{@{}c@{}}Pmod 2 (J13)\\ FPGA\end{tabular} &
\begin{tabular}[c]{@{}c@{}}Pmod (J16)\\ MMC\end{tabular} \\ \midrule
Pmodx\_C\_0 & IO\_L6N\_14 & IO\_L7P\_33 & PB9 (SEL) \\
Pmodx\_C\_1 & IO\_L7N\_14 & IO\_L2N\_33 & PC3 (MOSI) \\
Pmodx\_C\_2 & IO\_25\_14 & IO\_L4N\_33 & PC2 (MISO) \\
Pmodx\_C\_3 & IO\_L7P\_14 & IO\_L7N\_33 & PB10 (SCK) \\
Pmodx\_C\_4 & IO\_0\_14 & IO\_L2P\_33 & PB14 (EINT1) \\
Pmodx\_C\_5 & IO\_L5N\_15 & IO\_L8P\_33 & PB15 \\
Pmodx\_C\_6 & IO\_L4P\_15 & IO\_L4P\_33 & PD6 (UART4 RX) \\
Pmodx\_C\_7 & IO\_L5P\_15 & IO\_L3N\_33 & PD5 (UART4 TX) \\ \bottomrule
\end{tabular}
\caption{Pmod connectors pins assignment}
\label{tab:pmod}
\end{table}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{pmods.png}
\caption{Pmod pinout}\label{pmods}
\end{center}
\end{figure}
\section{QSFP}
Marble is equipped with 2 QSFP cages (\ref{qsfp}). Each can support 40 Gb/s of data transfer:
\begin{enumerate}
\item QSFP number 1 - high-speed lines are directly connected to the FPGA bank 116
\item QSFP number 2 - high-speed lines are connected to the FPGA bank 115 through the gigabit multiplexer.
\end{enumerate}
QSFPs control signals are connected to the I2C IO expander (U34)(\ref{qsfpio}) which is accessible from FPGA or MMC. The connection of the differential signals to the FPGA is shown in the tables (\ref{tab:qsfp1-table})(\ref{tab:qsfp2-table}). QSFP2 is not directly connected to the FPGA chip. It is connected via a gigabit multiplexer which is controlled from the MMC. By default, the multiplexer is configured to connect the QSFP2 to the FPGA. In order to choose a different configuration or restore the default, select the appropriate option from the MMC console.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.8\linewidth]{qsfp.png}
\caption{QSFP cages}\label{qsfp}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.8\linewidth]{qsfpio.png}
\caption{I2C IO expander which controls QSFPs signals}\label{qsfpio}
\end{center}
\end{figure}
\begin{table}[htbp]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
FPGA Pin & QSFP Signal \\ \midrule
MGTXTXP0\_116 & QSFP1\_TX\_3\_P \\
MGTXTXN0\_116 & QSFP1\_TX\_3\_N \\
MGTXRXP0\_116 & QSFP1\_RX\_3\_P \\
MGTXRXN0\_116 & QSFP1\_RX\_3\_N \\ \midrule
MGTXTXP1\_116 & QSFP1\_TX\_1\_P \\
MGTXTXN1\_116 & QSFP1\_TX\_1\_N \\
MGTXRXP1\_116 & QSFP1\_RX\_1\_P \\
MGTXRXN1\_116 & QSFP1\_RX\_1\_N \\ \midrule
MGTXTXP2\_116 & QSFP1\_TX\_2\_P \\
MGTXTXN2\_116 & QSFP1\_TX\_2\_N \\
MGTXRXP2\_116 & QSFP1\_RX\_2\_P \\
MGTXRXN2\_116 & QSFP1\_RX\_2\_N \\ \midrule
MGTXTXP3\_116 & QSFP1\_TX\_4\_P \\
MGTXTXN3\_116 & QSFP1\_TX\_4\_N \\
MGTXRXP3\_116 & QSFP1\_RX\_4\_P \\
MGTXRXN3\_116 & QSFP1\_RX\_4\_N \\ \bottomrule
\end{tabular}
\caption{QSFP1 pins connection}
\label{tab:qsfp1-table}
\end{table}
\begin{table}[htbp]
\centering
\begin{tabular}{@{}ll@{}}
\toprule
FPGA Pin & QSFP Signal \\ \midrule
MGTXTXP0\_115 & QSFP1\_TX\_3\_P \\
MGTXTXN0\_115 & QSFP1\_TX\_3\_N \\
MGTXRXP0\_115 & QSFP1\_RX\_3\_P \\
MGTXRXN0\_115 & QSFP1\_RX\_3\_N \\ \midrule
MGTXTXP1\_115 & QSFP1\_TX\_1\_P \\
MGTXTXN1\_115 & QSFP1\_TX\_1\_N \\
MGTXRXP1\_115 & QSFP1\_RX\_1\_P \\
MGTXRXN1\_115 & QSFP1\_RX\_1\_N \\ \midrule
MGTXTXP2\_115 & QSFP1\_TX\_2\_P \\
MGTXTXN2\_115 & QSFP1\_TX\_2\_N \\
MGTXRXP2\_115 & QSFP1\_RX\_2\_P \\
MGTXRXN2\_115 & QSFP1\_RX\_2\_N \\ \midrule
MGTXTXP3\_115 & QSFP1\_TX\_4\_P \\
MGTXTXN3\_115 & QSFP1\_TX\_4\_N \\
MGTXRXP3\_115 & QSFP1\_RX\_4\_P \\
MGTXRXN3\_115 & QSFP1\_RX\_4\_N \\ \bottomrule
\end{tabular}
\caption{QSFP2 pins connection}
\label{tab:qsfp2-table}
\end{table}
\section{Ethernet}
Marble is equipped with Ethernet PHY (88E1512) which supports 10/100/1000BASE-T. PHY is connected to the FPGA bank 16 via RGMII interface:
\begin{table}[htbp]
\centering
\begin{tabular}{@{}cc@{}}
\toprule
\textbf{RGMII signal} & \textbf{FPGA pin} \\ \midrule
RGMII\_RXD0 & IO\_L4N\_16 \\
RGMII\_RXD1 & IO\_0\_16 \\
RGMII\_RXD2 & IO\_L1P\_16 \\
RGMII\_RXD3 & IO\_L1N\_16 \\
RGMII\_RX\_DV & IO\_L4P\_16 \\
RGMII\_RX\_CLK & IO\_L14P\_16 \\
RGMII\_TXD0 & IO\_L6N\_16 \\
RGMII\_TXD1 & IO\_L6P\_16 \\
RGMII\_TXD2 & IO\_L8N\_16 \\
RGMII\_TXD3 & IO\_L8P\_16 \\
RGMII\_TX\_EN & IO\_L10P\_16 \\
\multicolumn{1}{l}{RGMII\_TX\_CLK} & \multicolumn{1}{l}{IO\_L11N\_16} \\
PHY\_RSTn & IO\_L10N\_16 \\ \bottomrule
\end{tabular}
\caption{RGMII pins assignment}
\label{tab:rgmii}
\end{table}
The Ethernet PHY can be monitored over MDIO by the MMC. IP address and MAC address are stored in the MMC's internal EEPROM memory (EEPROM functionality is emulated in the internal flash). To read the internal PHY configuration registers, select the appropriate option from the MMC console.
By default, the MDIO address is set to 1 but it can be changed by changing the resistor on the module. To set address to 1, desolder resistor R80 and solder resistor R65 with value 0R.
\section{FMC}
Both FMC sockets are equipped with HPC (High Pin Count) type connector but not all signals were connected to FPGA and MMC.
The connection of both FMC connectors is shown below:
\begin{enumerate}
\item \textbf{FMC P1} signal connection:
\begin{enumerate}
\item LA00...LA33 - connected to FPGA banks 15 and 16.
\item HA00...HA23 - \textit{not connected}.
\item HB00...HA21 - \textit{not connected}.
\item CLK[0..1]\_M2C - connected to FPGA bank 15.
\item GBTCLK[0..1]\_M2C - connected to CLK MUX IN4 and IN5.
\item DP[0]\_M2C - connected to high speed MUX.
\item DP[0]\_C2M - connected to high speed MUX.
\item JTAG - connected to MMC.
\item I2C - connected to I2C bus shared with MMC and FPGA
\end{enumerate}
\item \textbf{FMC P2} signal connection:
\begin{enumerate}
\item LA00...LA33 - connected to FPGA banks 12 and 14.
\item HA00...HA23 - connected to FPGA bank 13.
\item HB00...HA21 - \textit{not connected}.
\item CLK[0..1]\_M2C - connected to FPGA banks 12 and 14.
\item GBTCLK[0..1]\_M2C - connected to CLK MUX IN6 and IN7.
\item DP[0..3]\_M2C - connected to high speed MUX.
\item DP[0..3]\_C2M - connected to high speed MUX.
\item JTAG - connected to MMC.
\item I2C - connected to I2C bus shared with MMC and FPGA
\end{enumerate}
\end{enumerate}
\section{USB-UART}
The USB-UART bridge (FT4232H) has 2 channels of UART:
\subsection{FPGA UART}
FPGA UART occupies FT4232's channel 3. UART signals are connected to bank 15 pins:
\begin{enumerate}
\item TX input IO\_L1P\_15
\item RX output IO\_0\_15
\end{enumerate}
\subsection{MMC UART}
MMC UART occupies FT4232's channel 4 and is used as the MMC serial console. Terminal configuraton is 1N8 115200 baudrate. The FT4232's channel 2 DTR signal can be used to provide a reset signal for the microcontroller.
\section{Power}
Several hardware options are available to provide the nominal +12V power to the board.
It can come from TE Connector (3-641119-2) - J19 (fig. \ref{j19}) or by standard barrel connector (Type A: 5.5 mm OD, 2.1 mm ID) - J1. Input voltage range: 10V - 18V. Additionally, PoE can be used to power the module.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.6\linewidth]{j1j19.png}
\caption{J19 connector with indicated Vin and GND}\label{j19}
\end{center}
\end{figure}
When the power is connected, the board starts up automatically. If there is a failure of any power rail, the LED corresponding to it will not light up.
Block diagram of Marble's power tree is shown in figure \ref{pwr}. The USB-UART bridge is powered from the micro USB connector via a converter (U22). The supervising microcontroller has a separate converter (U18) connected directly to the power input. The main voltages supplying the FPGA are produced by U35 - the programmable power management system. U35 turns on the individual power channels in the proper sequence and provides power to the additional converter (U58) and LDOs (U31, U36, U37, U47).
\begin{figure}[H]
\begin{center}
\includegraphics[width=1.1\linewidth]{m_power.png}
\caption{Marble power routing}\label{pwr}
\end{center}
\end{figure}
Power supply features:
\begin{enumerate}
\item Over-temperature protection.
\item Power rails for the FPGA can be switched off and on by the microcontroller.
\item All power rails generated by XRP7724 can be monitored by the microcontroller and they are equipped with over-current protection.
\item Current consumption, voltage and rails status can be read by microcontroller.
\item The presence of the power rails is indicated by LED diodes
\item 12V power supply for both FMCs can be controlled independently by the microcontroller. Additionally, current can be measured.
\end{enumerate}
\subsection{Fan controller}
The fan control and temperature monitoring of the FPGA chip is done with the MAX6639 chip. Selection, mounting, and configuration of the fans is outside the scope of this document. The fans can be automatically controlled by measuring the temperature on the diode inside the FPGA. If the temperature exceeds a preset alarm threshold an ALERT signal will be issued. If the temperature continues to rise and exceeds another threshold, an "OVER-TEMP" signal will be issued and the FPGA will automatically power down. Through the I2C interface it is possible to read and write all MAX6639 configuration registers. Additionally, signals from both fan tachometers are monitored.
Two additional temperature sensors based on the LM75 chip provide temperature measurements around the main power converter and under the FMC P1 card. By default they are set to shut down the main power inverter when it exceeds 75 C.
\begin{figure}[H]
\begin{center}
\includegraphics[width=0.6\linewidth]{fans.png}
\caption{FANs connectors}\label{fans}
\end{center}
\end{figure}
\begin{table}[htbp]
\centering
\begin{tabular}{@{}llll@{}}
\toprule
\multicolumn{2}{c}{\textbf{M1}} & \multicolumn{2}{c}{\textbf{M2}} \\ \midrule
1 & GND & 1 & GND \\
2 & 12V & 2 & 12V \\
3 & Tacho & 3 & Tacho \\ \bottomrule
\end{tabular}
\caption{}
\label{tab:my-table}
\end{table}
\section{MMC}
Module Management Controller (MMC) is based on STM32F207 microcontroller and provides housekeeping functions such as:
\begin{enumerate}
\item Simple UART console over USB-UART bridge to control all functions
\item Monitoring voltage, current consumption and warning signals on power rails
\item Temperature monitoring at several locations
\item Controlling and monitoring fans
\item Configuring clock multiplexer
\item Configuring MGT switches
\item Resetting FPGA and controlling booting
\item Programming Power Management Controller
\item Controlling FMC power delivery and the presence of the cards
\item Ensuring communication over MDIO with Ethernet PHY
\item Ensuring communication over SPI with FPGA
\end{enumerate}
\subsection{Programing}
MMC programing can be done by using external tools such as STM Nucleo-SWD programmer, SEGGER J-LINK Mini (Fig. \ref{mmcjtag}, Fig. \ref{mmcjtagswd}).
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{mmcjtag.png}
\caption{MMC JTAG and SWD interfaces}\label{mmcjtag}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{mmcjtagswd.png}
\caption{MMC JTAG and SWD connectors}\label{mmcjtagswd}
\end{center}
\end{figure}
Download the latest version of the microcontroller testing code from GitHub:
\begin{leftbar}
https://https://gitlab.lbl.gov/spaiagua/marble\_mmc/-/tree/unified\_marble
\end{leftbar}
A recent version of OpenOCD (v0.10.0 or later) is required.
\begin{enumerate}
\item Connect JTAG module to \textbf{J14}
\item Connect the micro USB cable and using the serial terminal, connect to the last serial port for the new listed in the operating system. Use 115200 baudrate.
\item Power up the board.
\item Program the microcontroller using the following commands:
\begin{enumerate}
\item Go to the main folder of the downloaded repository.
\item Open command terminal and run command:
\begin{lstlisting}[backgroundcolor = \color{Gainsboro}, language=bash, frame=none]]
$ make marble_download
\end{lstlisting}
\end{enumerate}
\item After successful programming, a menu in the serial terminal should appeared and LEDs (LD15, LD11, LD12) should blink in the "snake" pattern.
\end{enumerate}
\subsection{LEDs}
Three general purpose LEDs are connected to the MMC chip:
\begin{enumerate}
\item LED11 - connected to pin PE1
\item LED12 - connected to pin PE2
\item LED15 - connected to pin PE0
\end{enumerate}
\subsection{I2C Tree}
Marble is equipped with two I2C buses (block diagram is shown in fig. \ref{i2c}):
\begin{enumerate}
\item I2C\_PM - supports devices for power management, temperature measurement and fan control
\item I2C\_FPGA - This bus is shared between the MMC and the FPGA chip. Through an I2C switch, it is connected to:
\begin{enumerate}
\item FMC 1 and FMC 2
\item Clock multiplexer
\item SO-DIMM module
\item QSFP 1 and QSFP 2
\item Current measurement devices, Si570 and IO expanders
\end{enumerate}
\end{enumerate}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{marble2_i2c.png}
\caption{I2C map}\label{i2c}
\end{center}
\end{figure}
\section{Mechanical dimensions}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{mechanical.png}
\caption{Mechanical dimensions}\label{mechanical}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{holes.png}
\caption{Mounting holes positioning}\label{holes}
\end{center}
\end{figure}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{connectors.png}
\caption{Protruding connectors positioning}\label{connectors}
\end{center}
\end{figure}
\section{Appendix}
\subsection{Power supply test points}
\begin{figure}[H]
\begin{center}
\includegraphics[width=1\linewidth]{testpointsnumb.png}
\caption{Test point map}\label{testpoints}
\end{center}
\end{figure}
\begin{enumerate}[label=(\alph*)]
\item Number \textbf{1} - \textbf{TP13 VTT\_DDR3 (0.75V)}.
\item Number \textbf{2} - \textbf{TP5 MGTAVCC\_DDR3 (1.05V)}.
\item Number \textbf{3} - \textbf{PoE}
\item Number \textbf{4} - \textbf{MGTAVTT (+1.2V)}
\item Number \textbf{5} - \textbf{MGTAVCC (+1.05V)}
\item Number \textbf{6} - \textbf{+1V5}
\item Number \textbf{7} - \textbf{VCCAUXIO2V0 (+2.0V)}
\item Number \textbf{8} - \textbf{VCCAUX (+1.8V)}
\item Number \textbf{9} - \textbf{VCCBRAM (+1.0V)}
\item Number \textbf{10} - \textbf{+2V5}
\item Number \textbf{11} - \textbf{+3V3}
\item Number \textbf{12} - \textbf{GND}
\item Number \textbf{13} - \textbf{VTT\_DDR3 (+0.75V)}
\item Number \textbf{14} - \textbf{+3.3P (always-on)}
\item Number \textbf{15} - \textbf{+3V3\_USB}
\end{enumerate}
Test points 12-15 are not present on Marble v1.0.
\begin{thebibliography}{99}
%\bibitem{web1} \url{http://bit.ly/PhysLab_Link01}
%\bibitem{web2} \url{http://bit.ly/PhysLab_Link02}
\end{thebibliography}
\end{document}
|
{"hexsha": "9e9710859c1fc0ddc4d4ef00fe85e5d9583d4526", "size": 31109, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/marble_user_guide/Marble_User_Guide.tex", "max_stars_repo_name": "BerkeleyLab/Marble", "max_stars_repo_head_hexsha": "df6742000361e6a103fdec6a5667684df13a7a9e", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-02-04T17:01:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:16:29.000Z", "max_issues_repo_path": "docs/marble_user_guide/Marble_User_Guide.tex", "max_issues_repo_name": "BerkeleyLab/Marble", "max_issues_repo_head_hexsha": "df6742000361e6a103fdec6a5667684df13a7a9e", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-06-03T12:07:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T01:58:08.000Z", "max_forks_repo_path": "docs/marble_user_guide/Marble_User_Guide.tex", "max_forks_repo_name": "BerkeleyLab/Marble", "max_forks_repo_head_hexsha": "df6742000361e6a103fdec6a5667684df13a7a9e", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-06-13T19:03:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T01:36:39.000Z", "avg_line_length": 42.6150684932, "max_line_length": 672, "alphanum_fraction": 0.711337555, "num_tokens": 9740}
|
import numpy as np
import csv
class Grid(object):
def __init__(self,n):
self.n = n
self.x = np.zeros(self.n,dtype=np.float64)
self.conc = np.zeros(self.n*4,dtype=np.float64)
self.concA = np.zeros(self.n,dtype=np.float64)
self.concB = np.zeros(self.n,dtype=np.float64)
self.concY = np.zeros(self.n,dtype=np.float64)
self.concZ = np.zeros(self.n,dtype=np.float64)
self.g = 0.0
def grid(self,dX,gamma):
self.x[0] = 1.0
for i in range(1,self.n):
self.x[i] = self.x[i-1] + dX
dX = dX* (1.0 +gamma )
# initialize the concentration matrix
def init_c(self,A:float,B:float,Y:float,Z:float,Theta:float):
self.conc[::4] = A
self.conc[1::4] = B
self.conc[2::4] = Y
self.conc[3::4] = Z
self.concA[:] = A
self.concB[:] = B
self.concY[:] = Y
self.concZ[:] = Z
# A better initialized concentration vector
"""
def init_c(self,A:float,B:float,Y:float,Z:float,Theta:float):
NernstB = B*1.0/(1.0 + np.exp(-Theta))
NernstY = B*1.0/(1.0 + np.exp(Theta))
self.concB = np.linspace(NernstB,B,num=self.n,endpoint=True)
self.concY = np.linspace(NernstY,Y,num=self.n,endpoint=True)
self.concZ[:] = Z
self.concA[:] = A
self.conc[::4] = self.concA
self.conc[1::4] = self.concB
self.conc[2::4] = self.concY
self.conc[3::4] = self.concZ
#print(self.conc)
"""
"""
def init_c(self,A:float,B:float,Y:float,Z:float,Theta:float):
NernstB = B*1.0/(1.0 + np.exp(-Theta))
NernstY = B*1.0/(1.0 + np.exp(Theta))
self.conc[::4] = A
self.conc[1::4] = B
self.conc[2::4] = Y
self.conc[3::4] = Z
self.conc[1] = NernstB
self.conc[2] = NernstY
self.concA = self.conc[::4]
self.concB = self.conc[1::4]
self.concY = self.conc[2::4]
self.concZ = self.conc[3::4]
"""
def grad(self):
self.g = -(self.conc[5]-self.conc[1]) / (self.x[1]-self.x[0])
return self.g
def updateAll(self):
self.concA = self.conc[::4]
self.concB = self.conc[1::4]
self.concY = self.conc[2::4]
self.concZ = self.conc[3::4]
def saveA(self,filename):
f=open(filename,mode='w',newline='')
writer = csv.writer(f)
for i in range(self.n):
writer.writerow([self.x[i],self.concA[i]])
f.close()
def saveB(self,filename):
f=open(filename,mode='w',newline='')
writer = csv.writer(f)
for i in range(self.n):
writer.writerow([self.x[i],self.concB[i]])
f.close()
def saveY(self,filename):
f=open(filename,mode='w',newline='')
writer = csv.writer(f)
for i in range(self.n):
writer.writerow([self.x[i],self.concY[i]])
f.close()
def saveZ(self,filename):
f=open(filename,mode='w',newline='')
writer = csv.writer(f)
for i in range(self.n):
writer.writerow([self.x[i],self.concZ[i]])
f.close()
def massConservation(self,A,B,Y,Z):
massInitial = 0.0
massA = 0.0
massB = 0.0
massY = 0.0
massZ = 0.0
tempA = 0.0
tempB = 0.0
tempY = 0.0
tempZ = 0.0
for i in range(self.n-1):
h = self.x[i+1] - self.x[i]
massInitial += h*(2*A+B+Y+Z)
tempA = (self.concA[i] + self.concA[i + 1]) / 2.0
massA += 2.0*tempA * h
tempB = (self.concB[i] + self.concB[i + 1]) / 2.0
massB += tempB * h
tempY = (self.concY[i] + self.concY[i + 1]) / 2.0
massY += tempY * h
tempZ = (self.concZ[i] + self.concZ[i + 1]) / 2.0
massZ += tempZ * h
massFinal = massA + massB + massY + massZ
return (massFinal-massInitial)/massInitial
|
{"hexsha": "6739c2896cff75fcf02831712353222f1fab5a00", "size": 4016, "ext": "py", "lang": "Python", "max_stars_repo_path": "Simulation Python/grid.py", "max_stars_repo_name": "nmerovingian/dissociativeCE-Simulation-MachineLearning", "max_stars_repo_head_hexsha": "cfbc8b8e6c9e3f2efc994fcf1d207c6266eedf2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Simulation Python/grid.py", "max_issues_repo_name": "nmerovingian/dissociativeCE-Simulation-MachineLearning", "max_issues_repo_head_hexsha": "cfbc8b8e6c9e3f2efc994fcf1d207c6266eedf2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Simulation Python/grid.py", "max_forks_repo_name": "nmerovingian/dissociativeCE-Simulation-MachineLearning", "max_forks_repo_head_hexsha": "cfbc8b8e6c9e3f2efc994fcf1d207c6266eedf2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4822695035, "max_line_length": 69, "alphanum_fraction": 0.5089641434, "include": true, "reason": "import numpy", "num_tokens": 1315}
|
from typing import Callable, Iterable, Sized
from itertools import product
import numpy as np
def convert_tuple_to_array(elements: Iterable, **kw) -> np.ndarray:
if "dtype" in kw:
dtype = kw["dtype"]
else:
dtype = np.result_type(*elements)
return np.array(elements, dtype=dtype)
def cartesian_product(*arrays: Sized, aggregator: Callable, **kw) -> np.ndarray:
"""
Computes transformations of cartesian product of all the elements in arrays.
Parameters
----------
arrays: iterable of Sized
The arrays to product.
aggregator:
Callable to handle an item from product iterator.
May return scalar or numpy ndarray.
Returns
-------
ret:
Array with dimension of arrays and one more dimension
for their cartesian product.
"""
res = np.stack([aggregator(x, **kw) for x in product(*arrays)])
shape = tuple(map(len, arrays))
if 1 < len(res.shape):
shape = shape + res.shape[1:]
return res.reshape(shape)
|
{"hexsha": "080d6517f0615fa74a48ca6f5647538f23f77769", "size": 1035, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mckit_meshes/utils/cartesian_product.py", "max_stars_repo_name": "MC-kit/mckit-meshes", "max_stars_repo_head_hexsha": "8aa06ff95ffd1dabac95f399d45047325c265f78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mckit_meshes/utils/cartesian_product.py", "max_issues_repo_name": "MC-kit/mckit-meshes", "max_issues_repo_head_hexsha": "8aa06ff95ffd1dabac95f399d45047325c265f78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-17T18:10:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T11:19:16.000Z", "max_forks_repo_path": "src/mckit_meshes/utils/cartesian_product.py", "max_forks_repo_name": "MC-kit/mckit-meshes", "max_forks_repo_head_hexsha": "8aa06ff95ffd1dabac95f399d45047325c265f78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.243902439, "max_line_length": 80, "alphanum_fraction": 0.6425120773, "include": true, "reason": "import numpy", "num_tokens": 227}
|
function y = tdis_prb(x,n)
% PURPOSE: calculates t-probabilities for elements in x-vector
%---------------------------------------------------
% USAGE: y = tdis_prb(x,n)
% where: x = vector containing computed t-values
% n = degrees of freedom parameter
%---------------------------------------------------
% RETURNS:
% y = a vector of marginal probability levels
% --------------------------------------------------
% SEE ALSO: fdis_prb(), chis_prb
%---------------------------------------------------
% written by:
% James P. LeSage, Dept of Economics
% University of Toledo
% 2801 W. Bancroft St,
% Toledo, OH 43606
% jpl@jpl.econ.utoledo.edu
if nargin ~= 2; error('Wrong # of arguments to tdis_prb'); end;
if n <=0; error('dof is negative or zero in tdis_prb'); end;
x2 = n./(n+x.^2);
one = find(x2 >= 1);
if length(one) > 0
x2(one,1) = 1-1e-12;
end;
zip = find(x2 <= 0);
if length(zip) > 0
x2(zip,1) = 1e-12;
end;
tmp = 1.0 - 0.5*betainc(x2,0.5*n,0.5);
y = 2*(1-tmp);
|
{"author": "ambropo", "repo": "VAR-Toolbox", "sha": "9fe5d763da307cdded2827851325766b3a7c60e1", "save_path": "github-repos/MATLAB/ambropo-VAR-Toolbox", "path": "github-repos/MATLAB/ambropo-VAR-Toolbox/VAR-Toolbox-9fe5d763da307cdded2827851325766b3a7c60e1/OldVersions/v2dot0/Auxiliary/tdis_prb.m"}
|
[STATEMENT]
theorem f0_asymptotic_space_complexity:
"f0_space_usage \<in> O[at_top \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / of_rat \<epsilon>) *
(ln (real n) + 1 / (of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / of_rat \<delta>))))"
(is "_ \<in> O[?F](?rhs)")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define n_of :: "nat \<times> rat \<times> rat \<Rightarrow> nat" where "n_of = (\<lambda>(n, \<epsilon>, \<delta>). n)"
[PROOF STATE]
proof (state)
this:
n_of = (\<lambda>(n, \<epsilon>, \<delta>). n)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define \<epsilon>_of :: "nat \<times> rat \<times> rat \<Rightarrow> rat" where "\<epsilon>_of = (\<lambda>(n, \<epsilon>, \<delta>). \<epsilon>)"
[PROOF STATE]
proof (state)
this:
\<epsilon>_of = (\<lambda>(n, \<epsilon>, \<delta>). \<epsilon>)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define \<delta>_of :: "nat \<times> rat \<times> rat \<Rightarrow> rat" where "\<delta>_of = (\<lambda>(n, \<epsilon>, \<delta>). \<delta>)"
[PROOF STATE]
proof (state)
this:
\<delta>_of = (\<lambda>(n, \<epsilon>, \<delta>). \<delta>)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define t_of where "t_of = (\<lambda>x. nat \<lceil>80 / (real_of_rat (\<delta>_of x))\<^sup>2\<rceil>)"
[PROOF STATE]
proof (state)
this:
t_of = (\<lambda>x. nat \<lceil>80 / (real_of_rat (\<delta>_of x))\<^sup>2\<rceil>)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define s_of where "s_of = (\<lambda>x. nat \<lceil>-(18 * ln (real_of_rat (\<epsilon>_of x)))\<rceil>)"
[PROOF STATE]
proof (state)
this:
s_of = (\<lambda>x. nat \<lceil>- (18 * ln (real_of_rat (\<epsilon>_of x)))\<rceil>)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define r_of where "r_of = (\<lambda>x. nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))"
[PROOF STATE]
proof (state)
this:
r_of = (\<lambda>x. nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
define g where "g = (\<lambda>x. ln (1 / of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) +
1 / (of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / of_rat (\<delta>_of x)))))"
[PROOF STATE]
proof (state)
this:
g = (\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have evt: "(\<And>x.
0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and>
1/real_of_rat (\<delta>_of x) \<ge> \<delta> \<and> 1/real_of_rat (\<epsilon>_of x) \<ge> \<epsilon> \<and>
real (n_of x) \<ge> n \<Longrightarrow> P x) \<Longrightarrow> eventually P ?F" (is "(\<And>x. ?prem x \<Longrightarrow> _) \<Longrightarrow> _")
for \<delta> \<epsilon> n P
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x) \<Longrightarrow> P x) \<Longrightarrow> eventually P (sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0)
[PROOF STEP]
apply (rule eventually_mono[where P="?prem" and Q="P"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. (\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x) \<Longrightarrow> P x) \<Longrightarrow> \<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x)
2. \<And>x. \<lbrakk>\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x) \<Longrightarrow> P x; 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x)\<rbrakk> \<Longrightarrow> P x
[PROOF STEP]
apply (simp add:\<epsilon>_of_def case_prod_beta' \<delta>_of_def n_of_def)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> \<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x)
2. \<And>x. \<lbrakk>\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x) \<Longrightarrow> P x; 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x)\<rbrakk> \<Longrightarrow> P x
[PROOF STEP]
apply (intro eventually_conj eventually_prod1' eventually_prod2'
sequentially_inf eventually_at_right_less inv_at_right_0_inf)
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> sequentially \<noteq> bot
2. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> at_right 0 \<noteq> bot
3. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> sequentially \<noteq> bot
4. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> at_right 0 \<noteq> bot
5. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> sequentially \<noteq> bot
6. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> at_right 0 \<noteq> bot
7. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> sequentially \<noteq> bot
8. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> at_right 0 \<noteq> bot
9. (\<And>x. 0 < snd (snd x) \<and> 0 < fst (snd x) \<and> \<delta> \<le> 1 / real_of_rat (snd (snd x)) \<and> \<epsilon> \<le> 1 / real_of_rat (fst (snd x)) \<and> n \<le> real (fst x) \<Longrightarrow> P x) \<Longrightarrow> at_right 0 \<times>\<^sub>F at_right 0 \<noteq> bot
10. \<And>x. \<lbrakk>\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x) \<Longrightarrow> P x; 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> \<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> \<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> n \<le> real (n_of x)\<rbrakk> \<Longrightarrow> P x
[PROOF STEP]
by (auto simp add:prod_filter_eq_bot)
[PROOF STATE]
proof (state)
this:
(\<And>x. 0 < real_of_rat (\<delta>_of x) \<and> 0 < real_of_rat (\<epsilon>_of x) \<and> ?\<delta> \<le> 1 / real_of_rat (\<delta>_of x) \<and> ?\<epsilon> \<le> 1 / real_of_rat (\<epsilon>_of x) \<and> ?n \<le> real (n_of x) \<Longrightarrow> ?P x) \<Longrightarrow> eventually ?P (sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have exp_pos: "exp k \<le> real x \<Longrightarrow> x > 0" for k x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exp k \<le> real x \<Longrightarrow> 0 < x
[PROOF STEP]
using exp_gt_zero gr0I
[PROOF STATE]
proof (prove)
using this:
0 < exp ?x
(?n = 0 \<Longrightarrow> False) \<Longrightarrow> 0 < ?n
goal (1 subgoal):
1. exp k \<le> real x \<Longrightarrow> 0 < x
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have exp_gt_1: "exp 1 \<ge> (1::real)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 \<le> exp 1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
1 \<le> exp 1
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 1: "(\<lambda>_. 1) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
[PROOF STEP]
by (auto intro!:landau_o.big_mono evt[where \<epsilon>="exp 1"] iffD2[OF ln_ge_iff] simp add:abs_ge_iff)
[PROOF STATE]
proof (state)
this:
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 2: "(\<lambda>_. 1) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (auto intro!:landau_o.big_mono evt[where \<delta>="exp 1"] iffD2[OF ln_ge_iff] simp add:abs_ge_iff)
[PROOF STATE]
proof (state)
this:
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 3: " (\<lambda>x. 1) \<in> O[?F](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (intro landau_sum_2 2 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero iffD2[OF ln_ge_iff], auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 4: "(\<lambda>_. 1) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
using one_le_power
[PROOF STATE]
proof (prove)
using this:
(1::?'a) \<le> ?a \<Longrightarrow> (1::?'a) \<le> ?a ^ ?n
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (intro landau_o.big_mono evt[where \<delta>="1"], auto simp add:power_one_over[symmetric])
[PROOF STATE]
proof (state)
this:
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. 80 * (1 / (real_of_rat (\<delta>_of x))\<^sup>2)) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 80 * (1 / (real_of_rat (\<delta>_of x))\<^sup>2)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (subst landau_o.big.cmult_in_iff, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 80 * (1 / (real_of_rat (\<delta>_of x))\<^sup>2)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 5: "(\<lambda>x. real (t_of x)) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 80 * (1 / (real_of_rat (\<delta>_of x))\<^sup>2)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. real (t_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
unfolding t_of_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 80 * (1 / (real_of_rat (\<delta>_of x))\<^sup>2)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. real (nat \<lceil>80 / (real_of_rat (\<delta>_of x))\<^sup>2\<rceil>)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (intro landau_real_nat landau_ceil 4, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (t_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. ln (real_of_rat (\<epsilon>_of x))) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (real_of_rat (\<epsilon>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
[PROOF STEP]
by (intro landau_o.big_mono evt[where \<epsilon>="1"], auto simp add:ln_div)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real_of_rat (\<epsilon>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 6: "(\<lambda>x. real (s_of x)) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real_of_rat (\<epsilon>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (s_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
[PROOF STEP]
unfolding s_of_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real_of_rat (\<epsilon>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (nat \<lceil>- (18 * ln (real_of_rat (\<epsilon>_of x)))\<rceil>)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
[PROOF STEP]
by (intro landau_nat_ceil 1, simp)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (s_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 7: " (\<lambda>x. 1) \<in> O[?F](\<lambda>x. ln (real (n_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
by (auto intro!: landau_o.big_mono evt[where n="exp 1"] iffD2[OF ln_ge_iff] simp: abs_ge_iff)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 8:" (\<lambda>_. 1) \<in>
O[?F](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using order_trans[OF exp_gt_1] exp_pos
[PROOF STATE]
proof (prove)
using this:
exp 1 \<le> ?z \<Longrightarrow> 1 \<le> ?z
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_sum_1 7 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero iffD2[OF ln_ge_iff]
mult_nonneg_nonneg add_nonneg_nonneg) auto
[PROOF STATE]
proof (state)
this:
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. ln (real (s_of x) + 1)) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
[PROOF STEP]
by (intro landau_ln_3 sum_in_bigo 6 1, simp)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 9: "(\<lambda>x. log 2 (real (s_of x) + 1)) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.big_mult_1 8, auto simp:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 10: "(\<lambda>x. 1) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.big_mult_1 8 1)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. ln (real (t_of x) + 1)) \<in>
O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using 5
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. real (t_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. ln (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_o.big_mult_1 3 landau_ln_3 sum_in_bigo 4, simp_all)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence " (\<lambda>x. log 2 (real (t_of x) + 1)) \<in>
O[?F](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using order_trans[OF exp_gt_1] exp_pos
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
exp 1 \<le> ?z \<Longrightarrow> 1 \<le> ?z
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_sum_2 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero iffD2[OF ln_ge_iff]
mult_nonneg_nonneg add_nonneg_nonneg) (auto simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 11: "(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.big_mult_1' 1, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have " (\<lambda>x. 1) \<in> O[?F](\<lambda>x. real (n_of x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
[PROOF STEP]
by (intro landau_o.big_mono evt[where n="1"], auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence "(\<lambda>x. ln (real (n_of x) + 21)) \<in> O[?F](\<lambda>x. ln (real (n_of x)))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
goal (1 subgoal):
1. (\<lambda>x. ln (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
by (intro landau_ln_2[where a="2"] evt[where n="2"] sum_in_bigo, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 12: "(\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
using exp_pos order_trans[OF exp_gt_1]
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
exp 1 \<le> ?z \<Longrightarrow> 1 \<le> ?z
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.big_mult_1' 1 landau_sum_1 evt[where n="exp 1" and \<delta>="1"]
ln_ge_zero iffD2[OF ln_ge_iff] mult_nonneg_nonneg add_nonneg_nonneg) (auto simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. ln (1 / real_of_rat (\<delta>_of x))) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (1 / real_of_rat (\<delta>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (intro landau_ln_3 evt[where \<delta>="1"] landau_o.big_mono)
(auto simp add:power_one_over[symmetric] self_le_power)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (1 / real_of_rat (\<delta>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence " (\<lambda>x. real (nat (4*\<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil>+23))) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (1 / real_of_rat (\<delta>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
using 4
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (1 / real_of_rat (\<delta>_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (auto intro!: landau_real_nat sum_in_bigo landau_ceil simp:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence " (\<lambda>x. ln (real (r_of x) + 1)) \<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. ln (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
unfolding r_of_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. ln (real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23)) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
[PROOF STEP]
by (intro landau_ln_3 sum_in_bigo 4, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence " (\<lambda>x. log 2 (real (r_of x) + 1)) \<in>
O[?F](\<lambda>x. (1 / (real_of_rat (\<delta>_of x))\<^sup>2) * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_o.big_mult_1 3, simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence " (\<lambda>x. log 2 (real (r_of x) + 1)) \<in>
O[?F](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using exp_pos order_trans[OF exp_gt_1]
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
exp 1 \<le> ?z \<Longrightarrow> 1 \<le> ?z
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_sum_2 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero
iffD2[OF ln_ge_iff] add_nonneg_nonneg mult_nonneg_nonneg) (auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 13: "(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.big_mult_1' 1, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have 14: "(\<lambda>x. 1) \<in> O[?F](\<lambda>x. real (n_of x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
[PROOF STEP]
by (intro landau_o.big_mono evt[where n="1"], auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. ln (real (n_of x) + 13)) \<in> O[?F](\<lambda>x. ln (real (n_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (real (n_of x) + 13)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
using 14
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
goal (1 subgoal):
1. (\<lambda>x. ln (real (n_of x) + 13)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
by (intro landau_ln_2[where a="2"] evt[where n="2"] sum_in_bigo, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (real (n_of x) + 13)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence "(\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[?F](\<lambda>x. ln (ln (real (n_of x))))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (n_of x) + 13)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (real (n_of x) + 13)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))))
[PROOF STEP]
by (intro landau_ln_2[where a="2"] iffD2[OF ln_ge_iff] evt[where n="exp 2"])
(auto simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence "(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[?F](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))))
goal (1 subgoal):
1. (\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))))
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (intro landau_sum_1 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero iffD2[OF ln_ge_iff])
(auto simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. real (r_of x)) \<in> O[?F](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
unfolding r_of_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
(\<lambda>_. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (nat (4 * \<lceil>log 2 (1 / real_of_rat (\<delta>_of x))\<rceil> + 23))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (auto intro!: landau_real_nat sum_in_bigo landau_ceil simp:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence "(\<lambda>x. real (r_of x)) \<in> O[?F](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<delta>_of x)))
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. (\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (intro landau_sum_2 evt[where n="exp 1" and \<delta>="1"] ln_ge_zero iffD2[OF ln_ge_iff], auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
have 15:" (\<lambda>x. real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))
\<in> O[?F](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using 5 3
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (log 2 (real (n_of x) + 13))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. real (r_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. real (t_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2)
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (intro landau_o.mult sum_in_bigo, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))
\<in> O[?F](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
have "\<forall>\<^sub>F x in ?F. 0 \<le> ln (real (n_of x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
[PROOF STEP]
by (intro evt[where n="1"] ln_ge_zero, auto)
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
have "\<forall>\<^sub>F x in ?F. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
using exp_pos
[PROOF STATE]
proof (prove)
using this:
exp ?k \<le> real ?x \<Longrightarrow> 0 < ?x
goal (1 subgoal):
1. \<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
[PROOF STEP]
by (intro evt[where n="exp 1" and \<delta>="1"] mult_nonneg_nonneg add_nonneg_nonneg
ln_ge_zero iffD2[OF ln_ge_iff]) auto
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
have " (\<lambda>x. ln (21 + real (n_of x))) \<in> O[?F](\<lambda>x. ln (real (n_of x)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. ln (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
using 14
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. real (n_of x))
goal (1 subgoal):
1. (\<lambda>x. ln (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
by (intro landau_ln_2[where a="2"] sum_in_bigo evt[where n="2"], auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. ln (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
hence "(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[?F](\<lambda>x. ln (real (n_of x)))"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
using 7
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. ln (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
by (intro sum_in_bigo, auto simp add:log_def)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
using 15
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> ln (real (n_of x))
\<forall>\<^sub>F x in sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0. 0 \<le> 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)))
(\<lambda>x. real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
[PROOF STEP]
by (rule landau_sum)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
hence 16: "(\<lambda>x. real (s_of x) * (5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) *
(13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))) \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. real (s_of x) * (5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
unfolding g_def
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. 5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13)))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x))))
goal (1 subgoal):
1. (\<lambda>x. real (s_of x) * (5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>x. ln (1 / real_of_rat (\<epsilon>_of x)) * (ln (real (n_of x)) + 1 / (real_of_rat (\<delta>_of x))\<^sup>2 * (ln (ln (real (n_of x))) + ln (1 / real_of_rat (\<delta>_of x)))))
[PROOF STEP]
by (intro landau_o.mult 6, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. real (s_of x) * (5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "f0_space_usage = (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f0_space_usage = (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x))
[PROOF STEP]
by (simp add:case_prod_beta' n_of_def \<epsilon>_of_def \<delta>_of_def)
[PROOF STATE]
proof (state)
this:
f0_space_usage = (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
f0_space_usage = (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "... \<in> O[?F](g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
using 9 10 11 12 13 16
[PROOF STATE]
proof (prove)
using this:
(\<lambda>x. log 2 (real (s_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
(\<lambda>x. 1) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
(\<lambda>x. log 2 (real (t_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
(\<lambda>x. log 2 (real (n_of x) + 21)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
(\<lambda>x. log 2 (real (r_of x) + 1)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
(\<lambda>x. real (s_of x) * (5 + 2 * log 2 (21 + real (n_of x)) + real (t_of x) * (13 + 4 * real (r_of x) + 2 * log 2 (log 2 (real (n_of x) + 13))))) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. (\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
[PROOF STEP]
by (simp add:fun_cong[OF s_of_def[symmetric]] fun_cong[OF t_of_def[symmetric]]
fun_cong[OF r_of_def[symmetric]] Let_def) (intro sum_in_bigo, auto)
[PROOF STATE]
proof (state)
this:
(\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<lambda>x. f0_space_usage (n_of x, \<epsilon>_of x, \<delta>_of x)) \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g)
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
have "... = O[?F](?rhs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g) = O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
by (simp add:case_prod_beta' g_def n_of_def \<epsilon>_of_def \<delta>_of_def)
[PROOF STATE]
proof (state)
this:
O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](g) = O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
goal (1 subgoal):
1. f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
f0_space_usage \<in> O[sequentially \<times>\<^sub>F at_right 0 \<times>\<^sub>F at_right 0](\<lambda>(n, \<epsilon>, \<delta>). ln (1 / real_of_rat \<epsilon>) * (ln (real n) + 1 / (real_of_rat \<delta>)\<^sup>2 * (ln (ln (real n)) + ln (1 / real_of_rat \<delta>))))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 35859, "file": "Frequency_Moments_Frequency_Moment_0", "length": 144}
|
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
\section{Understanding the "right-hand side"\label{RHS}}
%HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH
As can be seen from the above example, you need to know the exact appearance
of the "right-hand side"
in order to correctly write your semantic action.
Your method must be prepared
to handle a varying configuration that depends on the parsed text.
To have this configuiration in front of you while writing the code,
it is convenient to describe it by a comment as shown above.
(The author has learned this style from his colleague Bertil Steinholtz,
who used it to write very clear \tx{yacc++} code.)
The objects appearing on the "right-hand side"
correspond to the names and terminals appearing to the right of \tx{"="}
in the rule.
You can find the possible configurations of the "right-hand side"
like this:
\smallskip
\fbox{\quad\parbox{0.935\linewidth}{\upsp
Take the expression to the right of \tx{"="}
and remove from it all predicates, that is, sub-expressions
of the form $\text{\&}e$ and $\text{!}e$.
Replace each \tx{"/"} by \tx{"|"}, \tx{"*+"} by \tx{"*"},
and \tx{"++"} by \tx{"+"}.
The result is a regular expression on an alphabet consisting
of names and terminals treated as single letters.
The possible configurations of the "right-hand side"
are exactly the strings defined by this regular expression.\dnsp}\quad }
\medskip
As an example, the regular expression obtained for \Sum\ is:
\small
\begin{Verbatim}[samepage=true,xleftmargin=15mm,baselinestretch=0.8]
Number ("+" Number)*
\end{Verbatim}
\normalsize
which indeed defines these strings of symbols \Number\ and \tx{"+"}:
\small
\begin{Verbatim}[samepage=true,xleftmargin=15mm,baselinestretch=0.8]
Number "+" Number ... "+" Number
0 1 2 n-2 n-1
\end{Verbatim}
\normalsize
where $n = 2p + 1$ for $p = 0, 1, 2, \ldots\;$.
|
{"hexsha": "b9068821446117fee4b5c832d5ee28648e39fed3", "size": 1993, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Mouse/source/manual/RHS.tex", "max_stars_repo_name": "celer/mouse", "max_stars_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-01-30T11:17:56.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-08T14:06:28.000Z", "max_issues_repo_path": "Mouse/source/manual/RHS.tex", "max_issues_repo_name": "celer/mouse", "max_issues_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-04-07T06:22:47.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-31T11:00:12.000Z", "max_forks_repo_path": "Mouse/source/manual/RHS.tex", "max_forks_repo_name": "celer/mouse", "max_forks_repo_head_hexsha": "021a81f0c02fc079a944569ba382f2c9d7b9b9eb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2363636364, "max_line_length": 77, "alphanum_fraction": 0.7250376317, "num_tokens": 580}
|
# ======================================================
# Copyright (C) 2020 repa1030
# This program and the accompanying materials
# are made available under the terms of the MIT license.
# ======================================================
import numpy as np
import os
import math
class KNearestNeighbor:
def __init__(self, train_data, test_data, detection_classes, k_neighbors=3):
self.data_train = train_data
self.data_test = test_data
self.k = k_neighbors
self.det_cl = detection_classes
def predictClass(self):
# foreach test data
cnt_all = len(self.data_test)
cnt_det = 0
for test in self.data_test:
# initialize
dsts = []
neighbors = []
i = 0
# create a list with the distance from test data to each train data
for data in self.data_train:
dst = math.sqrt((test[1] - data[1])**2
+ (test[2] - data[2])**2
+ (test[3] - data[3])**2
+ (test[4] - data[4])**2
)
dsts.append((data, dst))
# sort the list with distances in ascending order
dsts.sort(key=lambda tup: tup[1])
# create a list that contains the k nearest neighbors to the test data
while i < self.k:
neighbors.append(dsts[i][0])
i += 1
# read the classes of the neighbors
out = [row[0] for row in neighbors]
# save the most common value in "pred"
pred = max(set(out), key=out.count)
pred_ct = out.count(pred)
percent = int(float(pred_ct) / self.k * 100.0)
# display output
dsp = 'Nearest Neighbors: ' + str(out) + '\n'
dsp = dsp + 'Object class prediction: ' + str(int(pred)) + ' (' + str(percent) + ' %)\n'
dsp = dsp + 'Object class ground truth: ' + str(int(test[0])) + '\n'
cl_set = False
for cl in self.det_cl:
if pred in cl:
pred_obj = cl[0]
if test[0] in cl:
gt_obj = cl[0]
if gt_obj == pred_obj:
dsp = dsp + 'This object was correctly classified as ' + gt_obj + '\n'
cnt_det += 1
else:
dsp = dsp + 'This object was falsely classified as ' + pred_obj + '\n'
dsp = dsp + '####################'
print(dsp)
print('\nSummary K Nearest Neighbor: ' + str(cnt_det) + "/" + str(cnt_all) + ' objects are correctly classified.')
|
{"hexsha": "4ec68ab564c60ba91bd907d45f2d91704ba72400", "size": 2699, "ext": "py", "lang": "Python", "max_stars_repo_path": "classificator1/k_nearest_neighbors.py", "max_stars_repo_name": "repa1030/simple_object_classification", "max_stars_repo_head_hexsha": "14c75f262c512ae6404174a96d818dfda10ed060", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classificator1/k_nearest_neighbors.py", "max_issues_repo_name": "repa1030/simple_object_classification", "max_issues_repo_head_hexsha": "14c75f262c512ae6404174a96d818dfda10ed060", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classificator1/k_nearest_neighbors.py", "max_forks_repo_name": "repa1030/simple_object_classification", "max_forks_repo_head_hexsha": "14c75f262c512ae6404174a96d818dfda10ed060", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5230769231, "max_line_length": 122, "alphanum_fraction": 0.4772137829, "include": true, "reason": "import numpy", "num_tokens": 609}
|
import pytest
import numpy as np
from FindTheTail.ftt import Ftt
@pytest.fixture
def ftt_with_parameters():
ftt = Ftt(np.arange(10), 'test_data', 100)
return ftt
@pytest.fixture
def ftt_with_data():
ftt = Ftt(np.arange(10), 'test_data')
return ftt
@pytest.fixture
def ftt_data_with_dublicates():
ftt = Ftt(np.ones(10, dtype='float'), 'test_data')
return ftt
def test_initialisation_with_parameters(ftt_with_parameters):
assert ftt_with_parameters.data_name == 'test_data'
# order should be changed to descanding
assert (ftt_with_parameters.data == np.arange(10)[::-1]).all()
assert ftt_with_parameters.mc_steps == 100
def test_ftt_with_dublicates_in_data(ftt_data_with_dublicates):
assert (np.abs(ftt_data_with_dublicates.data[:-1]-ftt_data_with_dublicates.data[1:]) > 0).all()
@pytest.mark.parametrize('number, expected', [
(5., 0),
(5.1, 1),
(5.12, 2),
(5.123, 3),
(5.123456789, 9),
(5.0001000, 4),
(0000.0001, 4),
(0.000, 0)
])
def test_get_significant_digit(number, expected):
assert Ftt.get_significant_digit(number) == expected
|
{"hexsha": "dc32cfcc078bcc5139ac9aa7ba665f061442d794", "size": 1128, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_ftt.py", "max_stars_repo_name": "fstroth/findthetail", "max_stars_repo_head_hexsha": "f4525a1393ab362886395bfb3a789446c1ac5143", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-07-26T23:08:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-03T20:09:02.000Z", "max_issues_repo_path": "tests/test_ftt.py", "max_issues_repo_name": "fstroth/findthetail", "max_issues_repo_head_hexsha": "f4525a1393ab362886395bfb3a789446c1ac5143", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-29T14:17:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-09T12:08:56.000Z", "max_forks_repo_path": "tests/test_ftt.py", "max_forks_repo_name": "fstroth/findthetail", "max_forks_repo_head_hexsha": "f4525a1393ab362886395bfb3a789446c1ac5143", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-04T12:59:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-04T12:59:21.000Z", "avg_line_length": 23.0204081633, "max_line_length": 99, "alphanum_fraction": 0.695035461, "include": true, "reason": "import numpy", "num_tokens": 340}
|
#ifndef INCLUDED_STDDEFX
#include "stddefx.h"
#define INCLUDED_STDDEFX
#endif
#ifndef INCLUDED_CALC_OBJECTLINKRUNTIME
#include "calc_objectlinkruntime.h"
#define INCLUDED_CALC_OBJECTLINKRUNTIME
#endif
// Library headers.
#ifndef INCLUDED_BOOST_FORMAT
#include <boost/format.hpp>
#define INCLUDED_BOOST_FORMAT
#endif
#ifndef INCLUDED_STDEXCEPT
#include <stdexcept>
#define INCLUDED_STDEXCEPT
#endif
// PCRaster library headers.
#ifndef INCLUDED_COM_EXCEPTION
#include "com_exception.h"
#define INCLUDED_COM_EXCEPTION
#endif
// Module headers.
#ifndef INCLUDED_CALC_OPERATOR
#include "calc_operator.h"
#define INCLUDED_CALC_OPERATOR
#endif
#ifndef INCLUDED_CALC_RUNTIMEENV
#include "calc_runtimeenv.h"
#define INCLUDED_CALC_RUNTIMEENV
#endif
#ifndef INCLUDED_CALC_OBJECTLINK
#include "calc_objectlink.h"
#define INCLUDED_CALC_OBJECTLINK
#endif
#ifndef INCLUDED_CALC_FIELD
#include "calc_field.h"
#define INCLUDED_CALC_FIELD
#endif
//------------------------------------------------------------------------------
// DEFINITION OF FREE FUNCTIONS
//------------------------------------------------------------------------------
/*!
* \todo
* lijkt eigenlijk heel veel op een method invocation
*/
void calc::createObjectLink(
const Operator& /* op */,
ObjectLinkFactoryPtr olf,
const std::string& /* stringArg */,
RunTimeEnv* rte,
size_t /* nrFieldArgs */)
{
// PRECOND(stringArg.empty()); // not yet implemented
// PRECOND(!nrFieldArgs); // not yet implemented
ObjectLink *o(0);
o = olf("",rte->rasterSpace(),0);
rte->pushDataValue(o);
}
/*!
* type1 execution
* \throws
* com::Exception if there is no ObjectLink on the rte.stack
* or not such methodName
*/
void calc::execObjectLinkMethod(
const Operator& op,
RunTimeEnv* rte,
size_t nrFieldArgs)
{
try {
std::vector<Field *> data;
try {
// both results and input
// results
for (size_t i=0; i < op.nrResults(); ++i)
data.push_back(rte->createResultField(op.resultType(i)));
// input
for (size_t i=0; i < nrFieldArgs; ++i)
data.push_back(rte->popField());
// reverse input part
std::reverse(data.begin()+op.nrResults(),data.end());
ObjectLink* o(0);
if (rte->stackSize())
o=dynamic_cast<ObjectLink *>(rte->popDataValue());
if (!o)
throw com::Exception((
boost::format("Method '%1%' called while no ObjectLink present")
% op.implName()).str());
o->exec1(op.implName(), data);
// put result on stack
size_t in=0;
for (;in < op.nrResults(); ++in) {
rte->pushField(data[in]);
data[in]=0;
}
// delete inputs
for (;in < op.nrResults()+nrFieldArgs; ++in)
deleteFromPcrme(data[in]);
} catch (...) {
for (size_t i=0;i < data.size(); ++i)
deleteFromPcrme(data[i]);
throw;
}
} catch (const std::out_of_range& ) {
// ObjectLinkProxy has checked vector access
throw com::Exception(
(boost::format(" '%1%' called with too few arguments")
% op.name()).str());
} catch (const ObjectLink::UnknownMethod& ) {
throw com::Exception("Unknown method/function name");
}
}
|
{"hexsha": "de7bfad6850806d4a538a7b98a4fae5a2cfe7b52", "size": 3230, "ext": "cc", "lang": "C++", "max_stars_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_model_engine/calc_objectlinkruntime.cc", "max_stars_repo_name": "quanpands/wflow", "max_stars_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_model_engine/calc_objectlinkruntime.cc", "max_issues_repo_name": "quanpands/wflow", "max_issues_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pcraster/pcraster-4.2.0/pcraster-4.2.0/source/pcraster_model_engine/calc_objectlinkruntime.cc", "max_forks_repo_name": "quanpands/wflow", "max_forks_repo_head_hexsha": "b454a55e4a63556eaac3fbabd97f8a0b80901e5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0483870968, "max_line_length": 80, "alphanum_fraction": 0.626006192, "num_tokens": 773}
|
from flask import Flask, jsonify
from flask_restplus import Resource, Api
from google_api import google
from pubmed_api import pubmed
from bioarchive_api import bioarchive
from medrxiv_api import medrxiv
import math
import json
from rake_nltk import Rake
import requests
import datetime
import re
from statistics import mean
import numpy as np
# Uses stopwords for english from NLTK, and all puntuation characters.
a = Rake(min_length=2, max_length=6)
app = Flask(__name__)
api = Api(app)
def PP_Index(gdc, pfc, years_passed):
num = (gdc * 6) + (pfc * 5)
den = (years_passed/2020)
index_calculated = num/den
return index_calculated
@api.route('/search/<string:search_param>/<int:numResults>')
class e(Resource):
def get(self, search_param, numResults):
ppindex_all = []
search_params = search_param
page_num = numResults
pubmed_result = pubmed(search_params, page_num)
biorxiv_result = bioarchive(search_params, page_num)
scholar_result = google(search_params, math.ceil(page_num/10))
medrxiv_result = medrxiv(search_params, math.ceil(page_num/10))
pubmed_arr = pubmed_result['results']
biorxiv_arr = biorxiv_result['results']
scholar_arr = scholar_result['results']
medrxiv_arr = medrxiv_result['results']
combined = pubmed_arr + biorxiv_arr + scholar_arr + medrxiv_arr
for i in range(len(combined)):
if combined[i]['abstract']:
text = combined[i]['abstract']
a.extract_keywords_from_text(text)
# To get keyword phrases ranked highest to lowest.
keywords_extracted = a.get_ranked_phrases()[0:4]
combined[i]['keywords'] = keywords_extracted
genes = (re.findall(
"[A-Z]+[-]?[0-9]?(?![a-z!@#$%^&*(.?\":{}|<>])", text))
genes = [x for x in genes if len(x) > 2 and len(
x) < 7 and x != "DNA" and x != "RNA"]
counts = genes.count
genes_unique = sorted(np.unique(genes), key=counts)[::-1]
combined[i]['genes'] = genes_unique[0:4]
gene = ",".join(genes_unique)
if (len(genes_unique) > 4):
genes_unique = genes_unique[0:4]
combined[i]['genes'] = genes_unique
# gene-disease count
gdc = 0
# protein function count
pfc = 0
PARAMS = {
'format': 'json',
'limit': 100
}
r = requests.get(
'https://www.disgenet.org/api/gda/gene/' + gene, params=PARAMS)
s = requests.get(
'https://www.disgenet.org/api/vda/variant/' + gene, params=PARAMS)
try:
for disease in r.json():
diseaseNames = [
x for x in disease['disease_name'].split(" ") if len(x) > 3]
for diseaseName in diseaseNames:
if (diseaseName in text):
gdc += 1
continue
proteinName = disease['protein_class_name']
if proteinName in text:
pfc += 1
except:
# No gene-disease assoc. found
pass
try:
for variantdisease in s.json():
variantdiseaseNames = [
x for x in variantdisease['disease_name'].split(" ") if len(x) > 3]
for diseaseName in variantdiseaseNames:
if (diseaseName in abstract):
gdc += 1
continue
proteinName = variantdisease['protein_class_name']
if proteinName in text:
pfc += 1
except:
# No variant-disease assoc. found
pass
combined[i]['gdc'] = gdc
combined[i]['pfc'] = pfc
else:
combined[i]['keywords'] = ["No Abstract Found"]
combined[i]['genes'] = ["No Genes Found"]
combined[i]['gdc'] = 0
combined[i]['pfc'] = 0
date = combined[i]['pubDate']
try:
date_str = "{} {} {}".format(
date['month'], date['day'], date['year'])
utcDatetime = datetime.datetime.strptime(date_str, '%b %d %Y')
combined[i]['UTCDatetime'] = utcDatetime
except:
combined[i]['UTCDatetime'] = None
#pp index
gdc = combined[i]['gdc']
gfc = combined[i]['pfc']
try:
yearsPassed = 2020 - combined[i]['pubDate']['year']
except:
yearsPassed = 1000
ppindex = PP_Index(gdc, gfc, yearsPassed)
ppindex_all.append(ppindex)
norm_ppindex = [x/mean(ppindex_all) for x in ppindex_all]
# \ return results of search here
for i in range(len(combined)):
combined[i]['ppindex'] = norm_ppindex[i]
return jsonify({'results': combined})
if __name__ == "__main__":
app.run(debug=True)
|
{"hexsha": "bdd5a24c05763c349b9ca76fed777d0ad1059765", "size": 5506, "ext": "py", "lang": "Python", "max_stars_repo_path": "sci_search_api.py", "max_stars_repo_name": "forensx/hatch2020", "max_stars_repo_head_hexsha": "982e6821ec8f0c1862c208a12bdabbf7fad15101", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sci_search_api.py", "max_issues_repo_name": "forensx/hatch2020", "max_issues_repo_head_hexsha": "982e6821ec8f0c1862c208a12bdabbf7fad15101", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sci_search_api.py", "max_forks_repo_name": "forensx/hatch2020", "max_forks_repo_head_hexsha": "982e6821ec8f0c1862c208a12bdabbf7fad15101", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7532467532, "max_line_length": 95, "alphanum_fraction": 0.5007264802, "include": true, "reason": "import numpy", "num_tokens": 1205}
|
# coding=utf-8
import numpy as np
import scipy.sparse as sp
from pymg.problem_base import ProblemBase
class Helmholtz1D_Periodic(ProblemBase):
"""Implementation of the 1D Helmholtz problem.
Here we define the 1D Poisson problem :math:`-\Delta u - \sigma u = 0` with
Dirichlet-Zero boundary conditions. This is the homogeneous problem,
derive from this class if you want to play around with different RHS.
Attributes:
dx (float): mesh size
"""
def __init__(self, ndofs, sigma=1, *args, **kwargs):
"""Initialization routine for the Poisson1D problem
Args:
ndofs (int): number of degrees of freedom (see
:attr:`pymg.problem_base.ProblemBase.ndofs`)
omega (float, optional): wave number
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
"""
self.dx = 1.0 / ndofs
# compute system matrix A, scale by 1/dx^2
A = self.__get_system_matrix(ndofs, self.dx, sigma)
A[0, -1] = A[0, 1]
A[-1, 0] = A[1, 0]
A = 1.0 / (self.dx ** 2) * A
rhs = self.__get_rhs(ndofs)
super(Helmholtz1D_Periodic, self).__init__(ndofs, A, rhs, *args, **kwargs)
@staticmethod
def __get_system_matrix(ndofs, dx, sigma):
"""Helper routine to get the system matrix discretizing the Helmholtz operator
with second order FD
Args:
ndofs (int): number of inner grid points (no boundaries!)
dx (float): mesh size
sigma (float): wave number
Returns:
scipy.sparse.csc_matrix: sparse system matrix A
of size :attr:`ndofs` x :attr:`ndofs`
"""
data = np.array([[2 - sigma * dx ** 2] * ndofs, [-1] * ndofs, [-1] * ndofs])
diags = np.array([0, -1, 1])
return sp.spdiags(data, diags, ndofs, ndofs, format='csc')
@staticmethod
def __get_rhs(ndofs):
"""Helper routine to set the right-hand side
Args:
ndofs (int): number of inner grid points (no boundaries!)
Returns:
numpy.ndarray: the right-hand side vector of size :attr:`ndofs`
"""
return np.zeros(ndofs)
# @property
# def u_exact(self):
# """Routine to compute the exact solution
#
# Returns:
# numpy.ndarray: exact solution array of size :attr:`ndofs`
# """
# return np.zeros(self.ndofs)
|
{"hexsha": "6a9b4e563cce0254d6859a5041bc1db5e637da84", "size": 2511, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/helmholtz1d_periodic.py", "max_stars_repo_name": "amit17133129/pyMG-2016", "max_stars_repo_head_hexsha": "b82a60811bb0a8b91d8793c47177a240221f9176", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-04-04T15:20:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-01T19:28:55.000Z", "max_issues_repo_path": "project/helmholtz1d_periodic.py", "max_issues_repo_name": "amit17133129/pyMG-2016", "max_issues_repo_head_hexsha": "b82a60811bb0a8b91d8793c47177a240221f9176", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-02T05:44:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-02T05:44:45.000Z", "max_forks_repo_path": "project/helmholtz1d_periodic.py", "max_forks_repo_name": "amit17133129/pyMG-2016", "max_forks_repo_head_hexsha": "b82a60811bb0a8b91d8793c47177a240221f9176", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-03-26T18:37:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T19:44:55.000Z", "avg_line_length": 33.48, "max_line_length": 86, "alphanum_fraction": 0.5826363998, "include": true, "reason": "import numpy,import scipy", "num_tokens": 660}
|
import numpy as np
import matplotlib.pyplot as plt
from ML_functions import gen_events
from density_funcs import rho_baryon
from astropy.io import fits
hdu = fits.open('galaxy1.fits')
data = hdu[1].data
def IMF(m):
#use a Kroupa IMF
#calculated in a seperate jupyter notebook
if (m<0.08):
alpha = 0.3
else:
if (m<0.5):
alpha = 1.3
else:
alpha = 2.3
return np.power(m, -1*alpha)
M = np.logspace(-2, 3, 60)
step = np.roll(M, -1) - M
step[-1] = step[-2]
events = np.zeros([100,30])
norm = 0
for (m,s) in zip(M,step):
i = IMF(m)*s
norm = norm+i
print('finished normalizing: ', norm)
#for (m,s) in zip(M,step):
# E, T = gen_events(m, 100, 1.0, rho_baryon, data, iso = False, baryons = False)
# E = E[1]
#
# E = E*IMF(m)*s/norm
#
#plt.plot(T, np.sum(E, axis=0))
#
# print('finished for mass: ', m)
# print(np.sum(E, axis=0))
#
# events = np.add(events,E)
#print(events)
#plt.xscale('log')
#plt.yscale('log')
events_core = np.zeros([1,30])
for (m,s) in zip(M,step):
E, T = gen_events(m, 1, 1.0, rho_baryon, 'core', iso = False, baryons = False)
E = E[1]
#
E = E*IMF(m)*s/norm
#
#plt.plot(T, np.sum(E, axis=0))
#
print('finished for mass: ', m)
print(np.sum(E, axis=0))
#
events_core = np.add(events_core,E)
events_LMC = np.zeros([1,30])
for (m,s) in zip(M,step):
E, T = gen_events(m, 1, 1.0, rho_baryon, 'LMC', iso = False, baryons = False)
E = E[1]
#
E = E*IMF(m)*s/norm
#
#plt.plot(T, np.sum(E, axis=0))
#
print('finished for mass: ', m)
print(np.sum(E, axis=0))
#
events_LMC = np.add(events_LMC,E)
#events_one, T = gen_events(0.36, 100, 1.0, rho_baryon, data, iso = False, baryons = False)
#events_one = events_one[1]
#events_iso, T = gen_events(0.36, 100, 1.0, rho_baryon, data, iso = True, baryons = False)
#events_iso = events_iso[1]
#plt.xlabel('crossing time (days)')
#plt.ylabel('number of events per bin')
#plt.plot(T, np.sum(events, axis=0))
#plt.savefig('baryon_event_histogram.png')
#plt.show()
#np.savetxt('baryon_events.txt', events)
np.savetxt('baryon_events_core.txt', events_core)
np.savetxt('baryon_events_LMC.txt', events_LMC)
#np.savetxt('baryon_events_one.txt', events_one)
#np.savetxt('baryon_events_iso.txt', events_iso)
|
{"hexsha": "182a885424d5da616fe7c65756c0593a6a942ba3", "size": 2420, "ext": "py", "lang": "Python", "max_stars_repo_path": "genbaryons.py", "max_stars_repo_name": "HarrisonWinch96/DarkDisk_Microlensing", "max_stars_repo_head_hexsha": "e25d59051771318239116a8d2036aca8ce70236d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "genbaryons.py", "max_issues_repo_name": "HarrisonWinch96/DarkDisk_Microlensing", "max_issues_repo_head_hexsha": "e25d59051771318239116a8d2036aca8ce70236d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "genbaryons.py", "max_forks_repo_name": "HarrisonWinch96/DarkDisk_Microlensing", "max_forks_repo_head_hexsha": "e25d59051771318239116a8d2036aca8ce70236d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4074074074, "max_line_length": 91, "alphanum_fraction": 0.5904958678, "include": true, "reason": "import numpy,from astropy", "num_tokens": 828}
|
#include <procedural_graph/reader/graph_reader_grammar.h>
#include <procedural_graph/reader/named_argument.h>
#include <procedural_graph/reader/node_definition_node.h>
#include <procedural_graph/reader/node_link_node.h>
#include <boost/spirit/include/qi.hpp>
#include <gtest/gtest.h>
using namespace pagoda;
using namespace boost::spirit;
class GraphReaderGrammarTest : public ::testing::Test
{
protected:
void SetUp() {}
void TearDown() {}
GraphReaderGrammar<std::string::const_iterator> m_grammar;
};
TEST_F(GraphReaderGrammarTest, test_literal_floats)
{
std::pair<std::string, float> literals[] = {{"123.0", 123.0}, {"0.0", 0.0}, {"-123.0", -123.0}};
for (auto p : literals)
{
std::string in = std::get<0>(p);
boost::variant<std::string, float> l;
std::string::const_iterator begin = std::begin(in);
std::string::const_iterator end = std::end(in);
bool r = qi::phrase_parse(begin, end, m_grammar.literal, qi::space, l);
EXPECT_TRUE(r);
EXPECT_EQ(begin, end);
EXPECT_EQ(boost::get<float>(l), std::get<1>(p));
}
}
TEST_F(GraphReaderGrammarTest, test_literal_strings)
{
std::pair<std::string, std::string> literals[] = {{"\"A\"", "A"}};
for (auto p : literals)
{
boost::variant<std::string, float> l;
std::string::const_iterator begin = std::begin(std::get<0>(p));
std::string::const_iterator end = std::end(std::get<0>(p));
bool r = qi::phrase_parse(begin, end, m_grammar.literal, qi::space, l);
EXPECT_TRUE(r);
EXPECT_EQ(begin, end);
EXPECT_EQ(boost::get<std::string>(l), std::get<1>(p));
}
}
TEST_F(GraphReaderGrammarTest, test_literal_failure)
{
std::string literals[] = {"A", "-a12"};
for (auto p : literals)
{
boost::variant<std::string, float> l;
std::string::const_iterator begin = std::begin(p);
std::string::const_iterator end = std::end(p);
bool r = qi::phrase_parse(begin, end, m_grammar.literal, qi::space, l);
EXPECT_FALSE(r) << "Should not have matched " << p;
}
}
TEST_F(GraphReaderGrammarTest, test_identifier)
{
std::string identifiers[] = {"abc", "_abc", "abc_", "ab_c", "_123", "_"};
for (auto p : identifiers)
{
std::string i;
std::string::const_iterator begin = std::begin(p);
std::string::const_iterator end = std::end(p);
bool r = qi::phrase_parse(begin, end, m_grammar.identifier, qi::space, i);
EXPECT_TRUE(r) << "Should have matched " << p;
EXPECT_EQ(begin, end);
EXPECT_EQ(i, p);
}
}
TEST_F(GraphReaderGrammarTest, test_identifier_fail)
{
std::string identifiers[] = {"1", "123", "1abc", "-"};
for (auto p : identifiers)
{
std::string i;
std::string::const_iterator begin = std::begin(p);
std::string::const_iterator end = std::end(p);
bool r = qi::phrase_parse(begin, end, m_grammar.identifier, qi::space, i);
EXPECT_FALSE(r) << "Should not have matched " << p;
}
}
TEST_F(GraphReaderGrammarTest, test_expression)
{
std::pair<std::string, std::string> expressions[] = {{"$<a;>$", "a;"}, {"$<1+2;>$", "1+2;"}};
for (auto e : expressions)
{
std::string out;
std::string::const_iterator begin = std::begin(std::get<0>(e));
std::string::const_iterator end = std::end(std::get<0>(e));
bool r = qi::phrase_parse(begin, end, m_grammar.expression, qi::space, out);
EXPECT_TRUE(r) << "Should have matched " << std::get<0>(e);
EXPECT_EQ(begin, end);
EXPECT_EQ(out, std::get<1>(e));
}
}
TEST_F(GraphReaderGrammarTest, test_named_simple_arg)
{
std::string args[] = {"a : \"abc\"", "b:123.0"};
for (auto a : args)
{
NamedArgumentPtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.named_simple_arg, qi::space, out);
EXPECT_TRUE(r) << "Should have matched " << a;
EXPECT_EQ(begin, end);
}
}
TEST_F(GraphReaderGrammarTest, test_named_simple_arg_construction_string)
{
NamedArgumentPtr out;
std::string a = "a : \"abc\"";
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.named_simple_arg, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetName(), "a");
EXPECT_EQ(out->GetArgumentType(), NamedArgument::ArgumentType::String);
EXPECT_EQ(out->GetArgumentValue(), "abc");
}
TEST_F(GraphReaderGrammarTest, test_named_simple_arg_construction_float)
{
NamedArgumentPtr out;
std::string a = "a : 123";
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.named_simple_arg, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetName(), "a");
EXPECT_EQ(out->GetArgumentType(), NamedArgument::ArgumentType::Float);
EXPECT_EQ(std::atof(out->GetArgumentValue().c_str()), 123.0f);
}
TEST_F(GraphReaderGrammarTest, test_construction_args)
{
std::string args[] = {"", "a : \"abc\", b:123.0", "b:123.0"};
std::size_t expectedSizes[] = {0, 2, 1};
for (auto i = 0u; i < 3; ++i)
{
auto a = args[i];
std::vector<NamedArgumentPtr> out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.construction_args, qi::space, out);
EXPECT_TRUE(r) << "Should have matched " << a;
EXPECT_EQ(begin, end);
EXPECT_EQ(out.size(), expectedSizes[i]);
}
}
TEST_F(GraphReaderGrammarTest, test_named_expression_arg)
{
std::string args[] = {"a : \"abc\"", "b:123.0", "c:$<1+2;>$"};
for (auto a : args)
{
NamedArgumentPtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.named_expression_arg, qi::space, out);
EXPECT_TRUE(r) << "Should have matched " << a;
EXPECT_EQ(begin, end);
}
}
TEST_F(GraphReaderGrammarTest, test_named_expression_arg_construction_string)
{
NamedArgumentPtr out;
std::string a = "a : \"abc\"";
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.named_expression_arg, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetName(), "a");
EXPECT_EQ(out->GetArgumentType(), NamedArgument::ArgumentType::String);
EXPECT_EQ(out->GetArgumentValue(), "abc");
}
TEST_F(GraphReaderGrammarTest, test_named_expression_arg_construction_float)
{
NamedArgumentPtr out;
std::string a = "a : 123";
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.named_expression_arg, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetName(), "a");
EXPECT_EQ(out->GetArgumentType(), NamedArgument::ArgumentType::Float);
EXPECT_EQ(std::atof(out->GetArgumentValue().c_str()), 123.0f);
}
TEST_F(GraphReaderGrammarTest, test_named_expression_arg_construction_expression)
{
NamedArgumentPtr out;
std::string a = "a : $<1+1>$";
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.named_expression_arg, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetName(), "a");
EXPECT_EQ(out->GetArgumentType(), NamedArgument::ArgumentType::Expression);
EXPECT_EQ(out->GetArgumentValue(), "1+1");
}
TEST_F(GraphReaderGrammarTest, test_execution_args)
{
std::string args[] = {"", "a : \"abc\", b:123.0, c:$<a*b;>$", "b:123.0"};
std::size_t expectedSizes[] = {0, 3, 1};
for (auto i = 0u; i < 3; ++i)
{
auto a = args[i];
std::vector<NamedArgumentPtr> out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.execution_args, qi::space, out);
EXPECT_TRUE(r) << "Should have matched " << a;
EXPECT_EQ(begin, end);
EXPECT_EQ(out.size(), expectedSizes[i]);
}
}
TEST_F(GraphReaderGrammarTest, test_node_definition)
{
std::string def[] = {"n = Operation(a:1,b:2)", "n = Operation(a:1){}", "n = Operation(a:1){a:$<1+1;>$}"};
for (auto a : def)
{
NodeDefinitionNodePtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.node_definition, qi::space, out);
EXPECT_TRUE(r);
EXPECT_EQ(begin, end) << "Should have matched " << a;
}
}
TEST_F(GraphReaderGrammarTest, test_node_definition_construction)
{
std::string a = "n = Operation(a:1,b:2){c:3}";
NodeDefinitionNodePtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.node_definition, qi::space, out);
ASSERT_NE(out, nullptr);
EXPECT_EQ(out->GetNodeName(), "n");
EXPECT_EQ(out->GetNodeType(), "Operation");
EXPECT_EQ(out->GetConstructionArguments().size(), 2);
EXPECT_EQ(out->GetExecutionArguments().size(), 1);
}
TEST_F(GraphReaderGrammarTest, test_node_links)
{
std::string links[] = {"n1 -> n2;", "n1->n2->n3;"};
for (auto a : links)
{
NodeLinkNodePtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
bool r = qi::phrase_parse(begin, end, m_grammar.node_links, qi::space, out);
EXPECT_TRUE(r);
EXPECT_EQ(begin, end) << "Should have matched " << a;
}
}
TEST_F(GraphReaderGrammarTest, test_node_links_construction)
{
std::string a = "n1->n2->n3;";
NodeLinkNodePtr out;
std::string::const_iterator begin = std::begin(a);
std::string::const_iterator end = std::end(a);
qi::phrase_parse(begin, end, m_grammar.node_links, qi::space, out);
ASSERT_NE(out, nullptr);
ASSERT_EQ(out->GetLinkedNodes().size(), 3);
std::string expectedNodeNames[] = {"n1", "n2", "n3"};
uint32_t i = 0;
for (const auto &n : out->GetLinkedNodes())
{
EXPECT_EQ(n, expectedNodeNames[i++]);
}
}
|
{"hexsha": "67e0bf57baff940bee5f4eb83a28a639c2351f9b", "size": 9787, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/unit_tests/procedural_graph/graph_reader_grammar.cpp", "max_stars_repo_name": "diegoarjz/selector", "max_stars_repo_head_hexsha": "976abd0d9e721639e6314e2599ef7e6f3dafdc4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2019-04-16T17:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-12T14:37:27.000Z", "max_issues_repo_path": "tests/unit_tests/procedural_graph/graph_reader_grammar.cpp", "max_issues_repo_name": "diegoarjz/selector", "max_issues_repo_head_hexsha": "976abd0d9e721639e6314e2599ef7e6f3dafdc4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47.0, "max_issues_repo_issues_event_min_datetime": "2019-05-27T15:24:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-27T17:54:54.000Z", "max_forks_repo_path": "tests/unit_tests/procedural_graph/graph_reader_grammar.cpp", "max_forks_repo_name": "diegoarjz/selector", "max_forks_repo_head_hexsha": "976abd0d9e721639e6314e2599ef7e6f3dafdc4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0214723926, "max_line_length": 106, "alphanum_fraction": 0.6842750588, "num_tokens": 2835}
|
###############################################################
# _ _ _ _ _
# | |__ (_) ___ _ __ __ _ _ __| |_(_) ___| | ___
# | '_ \| |/ _ \| '_ \ / _` | '__| __| |/ __| |/ _ \
# | |_) | | (_) | |_) | (_| | | | |_| | (__| | __/
# |_.__/|_|\___/| .__/ \__,_|_| \__|_|\___|_|\___|
# |_|
#
###############################################################
#
# $ python3 runLeakingPoints.py [CASES.CSV] [TEMPLATE.IN]
#
# Where:
# - [CASES.CSV] path to csv file with the list of
# parameters and the corresponding tags
# - [TEMPLATE.IN] input file template for GMSH and
# the corresponding tags
#
###############################################################
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from os import system
import sys
tagsCaseName = {
"Title" : "<Name>"
}
## Tags dictionary for variables in input file
tagsReplaceable = {
## GEOMETRY
"L" : "<AquiferLen>",
"H" : "<DomainDepth>",
## DISCRETIZATION
"dX" : "<dX>",
"dZ" : "<dZ>"
}
## Path to PFLOTRAN executable
GMSH_path = "/home/edwin/Apps/gmsh-4.6.0-Linux64/bin/gmsh "
## Table with the set of parameters
try:
parameters_file = str(sys.argv[1])
except IndexError:
sys.exit("Parameters file not defined :(")
## Template for the PFLOTRAN input file
try:
template_file = str(sys.argv[2])
except IndexError:
sys.exit("Template file not found :(")
# Read CSV file with cases
setParameters = read_csv(parameters_file)
total_rows = setParameters.shape[0]
# Check that tags in CSV are in dictionary
## Delete previous cases
system("rm -rf CASE*")
for i in range(total_rows):
## Create a folder for the case
current_folder = "./"
## Copy template input file to folder
fileName = setParameters.loc[i,tagsCaseName["Title"]]
system("cp " + template_file + " " + fileName + ".geo")
current_file = current_folder + "/" + fileName +".geo"
## Replace tags for values in case
for current_tag in tagsReplaceable:
if "nX" in current_tag or "nZ" in current_tag:
Value2Text = '{:}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])
else:
Value2Text = '{:.2E}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])
COMM = "sed -i 's/" + tagsReplaceable[current_tag] + "/"\
+ Value2Text \
+ "/g' " + current_file
system(COMM)
## Run case
#system(PFLOTRAN_path + "-pflotranin " + current_file + " &")
system(GMSH_path + " " + current_file)
|
{"hexsha": "ab3999a4fe3f9be668941a919d000ce4634e049d", "size": 2557, "ext": "py", "lang": "Python", "max_stars_repo_path": "doeTests/3DUnstructuredCylinder/createMesh.py", "max_stars_repo_name": "edsaac/bioparticle", "max_stars_repo_head_hexsha": "67e191329ef191fc539b290069524b42fbaf7e21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doeTests/3DUnstructuredCylinder/createMesh.py", "max_issues_repo_name": "edsaac/bioparticle", "max_issues_repo_head_hexsha": "67e191329ef191fc539b290069524b42fbaf7e21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-25T23:31:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-25T23:31:21.000Z", "max_forks_repo_path": "doeTests/3DUnstructuredCylinder/createMesh.py", "max_forks_repo_name": "edsaac/VirusTransport_RxSandbox", "max_forks_repo_head_hexsha": "67e191329ef191fc539b290069524b42fbaf7e21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-30T05:00:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T05:00:58.000Z", "avg_line_length": 28.4111111111, "max_line_length": 85, "alphanum_fraction": 0.5635510364, "include": true, "reason": "import numpy", "num_tokens": 705}
|
# https://github.com/JuliaDiffEq/DifferentialEquations.jl/issues/525
using OrdinaryDiffEq, StaticArrays, Test
mutable struct SimType{T} <: DEDataVector{T}
x::Array{T,1}
f1::T
end
function f(u,p,t) # new out-of-place definition
SimType([-0.5*u[1] + u.f1,
-0.5*u[2]],u.f1)
end
function f!(du,u,p,t) # old in-place definition
du[1] = -0.5*u[1] + u.f1
du[2] = -0.5*u[2]
end
const tstop1 = [5.]
const tstop2 = [8.]
function condition(u,t,integrator)
t in tstop1
end
function condition2(u,t,integrator)
t in tstop2
end
function affect!(integrator)
for c in full_cache(integrator)
c.f1 = 1.5
end
end
function affect2!(integrator)
for c in full_cache(integrator)
c.f1 = -1.5
end
end
function affect!_oop(integrator)
integrator.u.f1 = 1.5
end
function affect2!_oop(integrator)
integrator.u.f1 = 1.5
end
save_positions = (true,true)
cb = DiscreteCallback(condition, affect!, save_positions=save_positions)
save_positions = (false,true)
cb2 = DiscreteCallback(condition2, affect2!, save_positions=save_positions)
cbs = CallbackSet(cb,cb2)
cb_oop = DiscreteCallback(condition, affect!_oop, save_positions=save_positions)
save_positions = (false,true)
cb2_oop = DiscreteCallback(condition2, affect2!_oop, save_positions=save_positions)
cbs_oop = CallbackSet(cb_oop,cb2_oop)
u0 = SimType([10.0;10.0], 0.0)
prob_inplace = ODEProblem(f!,u0,(0.0,10.0))
prob = ODEProblem(f,u0,(0.0,10.0))
const tstop = [5.;8.]
sol = solve(prob_inplace,Tsit5(),callback = cbs, tstops=tstop)
sol = solve(prob,Tsit5(),callback = cbs_oop, tstops=tstop)
# https://github.com/JuliaDiffEq/DifferentialEquations.jl/issues/336
const A = SMatrix{2,2}([0 1;
0 0])
mutable struct MyStruct{T} <: DEDataVector{T}
x::MVector{2,T}
a::SVector{2,T}
end
function dyn(du,u,t,p)
u.a = SVector{2}(0.0, 0.1)
du .= A*u.x + u.a
return nothing
end
u0 = MyStruct(MVector{2}(0.,0.), SVector{2}(0.,0.))
prob = ODEProblem(dyn, u0, (0.,10.))
@test copy(u0) isa MyStruct
@test zero(u0) isa MyStruct
@test similar(u0) isa MyStruct
@test similar(u0,Float64) isa MyStruct
@test similar(u0,Float64,size(u0)) isa MyStruct
sol = solve(prob, Tsit5())
# https://github.com/JuliaDiffEq/StochasticDiffEq.jl/issues/247
using OrdinaryDiffEq
using StochasticDiffEq
mutable struct SimType{T} <: DEDataVector{T}
x::Array{T,1}
f1::T
end
function f(du,u,p,t)
du[1] = -0.5*u[1] + u.f1
du[2] = -0.5*u[2]
end
const tstop1 = [5.]
const tstop2 = [8.]
function condition(u,t,integrator)
t in tstop1
end
function condition2(u,t,integrator)
t in tstop2
end
function affect!(integrator)
for c in full_cache(integrator)
c.f1 = 1.5
end
end
function affect2!(integrator)
for c in full_cache(integrator)
c.f1 = -1.5
end
end
save_positions = (true,true)
cb = DiscreteCallback(condition, affect!, save_positions=save_positions)
save_positions = (false,true)
cb2 = DiscreteCallback(condition2, affect2!, save_positions=save_positions)
cbs = CallbackSet(cb,cb2)
u0 = SimType([10.0;10.0], 0.0)
prob = ODEProblem(f,u0,(0.0,10.0))
const tstop = [5.;8.]
# here the new part
function g(du,u,p,t)
du[1] = 1.0
du[2] = 1.2
end
dt = 1/2^4
prob2 = SDEProblem(f,g,u0,(0.0,10.0))
# this creates an error
sol = solve(prob2,callback = cbs,tstops=tstop,EM(),dt=dt,saveat=collect(8:0.1:10))
# https://github.com/JuliaDiffEq/DiffEqBase.jl/issues/327
struct SimulationState{T,U} <: DiffEqBase.DEDataArray{T, 1}
x::Vector{T}
u::U
end
#function Base.convert(::Type{SimulationState{T, U}}, a::AbstractArray{T}) where {T,U}
# SimulationState(a, zero(eltype(a)))
#end
function open_loop(state, parameters, time)
v = state[1] * -0.1 + state.u
return SimulationState([v],state.u)
end
initial_conditions = SimulationState([10.0], 0.0)
time_span = (0.0, 20.0)
ode_prob = ODEProblem(open_loop, initial_conditions, time_span, nothing)
sol = solve(ode_prob, Tsit5(), reltol=1e-8, abstol=1e-8)
|
{"hexsha": "3debbad17a3fcb2a544804ac71b1bcf1b7ab8e9d", "size": 4162, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/downstream/data_array_regression_tests.jl", "max_stars_repo_name": "frankschae/DiffEqBase.jl", "max_stars_repo_head_hexsha": "406640750d8bf6e38009a40d377fa6d95717fe04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/downstream/data_array_regression_tests.jl", "max_issues_repo_name": "frankschae/DiffEqBase.jl", "max_issues_repo_head_hexsha": "406640750d8bf6e38009a40d377fa6d95717fe04", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/downstream/data_array_regression_tests.jl", "max_forks_repo_name": "frankschae/DiffEqBase.jl", "max_forks_repo_head_hexsha": "406640750d8bf6e38009a40d377fa6d95717fe04", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7905759162, "max_line_length": 87, "alphanum_fraction": 0.6590581451, "num_tokens": 1379}
|
#!/usr/bin/env /usr/bin/python3
import numpy as np
################################################################################
#===============================================================================
# geometry.py
#===============================================================================
################################################################################
# Area of a polygon
def get_area(Nv,vertices):
#
cross = 0.
for i in range(0,Nv):
x1,y1 = vertices[i,:]
if i != Nv - 1:
x2,y2 = vertices[i+1,:]
if i == Nv - 1:
x2,y2 = vertices[0,:]
cross += ((x1 * y2) - (x2 * y1))
return 0.5 * cross
################################################################################
# Perimeter of a polygon
def get_perimeter(Nv,vertices):
#
lengths = np.zeros(Nv)
for i in range(0,Nv):
x1,y1 = vertices[i,:]
if i != Nv - 1:
x2,y2 = vertices[i+1,:]
if i == Nv - 1:
x2,y2 = vertices[0,:]
d = np.sqrt((x2-x1)**2 + (y2-y1)**2)
lengths[i] = d
return np.sum(lengths)
################################################################################
if __name__ == '__main__':
pass
################################################################################
# EOF
|
{"hexsha": "e97a81fc1db11353e8ac5b43c5bff45b44cdaf0f", "size": 1230, "ext": "py", "lang": "Python", "max_stars_repo_path": "flat_source/geometry.py", "max_stars_repo_name": "HopyanLab/ConPT2D", "max_stars_repo_head_hexsha": "4e5cb2de8959c88b79613431fb9fb71675db17ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flat_source/geometry.py", "max_issues_repo_name": "HopyanLab/ConPT2D", "max_issues_repo_head_hexsha": "4e5cb2de8959c88b79613431fb9fb71675db17ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flat_source/geometry.py", "max_forks_repo_name": "HopyanLab/ConPT2D", "max_forks_repo_head_hexsha": "4e5cb2de8959c88b79613431fb9fb71675db17ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7391304348, "max_line_length": 80, "alphanum_fraction": 0.3056910569, "include": true, "reason": "import numpy", "num_tokens": 279}
|
import numpy as np
from simple_convnet.helpers import (
filter2D, batch_filter3D, padarray, atleast, safe_exp, safe_log, choice, imshow
)
from matplotlib import pyplot as plt
from time import time
from skimage.transform import downscale_local_mean
class Layer(object):
def __init__(self, input_shape, rand_state=np.random):
"""
Layer constructor (abstract).
Parameters
----------
input_shape : tuple of ints specifying shape of a single input
rand_state : a RandomState object
"""
self.input_shape = np.array(input_shape)
self.output_shape = self.input_shape
def forward(self, input_act):
"""
Forward propagation. This class is mostly wraps around _forward and does some extra
asserts. Child classes should overwrite _forward rather than this method.
Parameters
----------
input_act : numpy array, activations from the layer below; shape must either be the same as
self.input_shape, or (NUMBER_OF_EXAMPLES,) + self.input_shape
Returns
-------
output_act : numpy array, output activations from this layer; shape will be
self.output_shape or (NUMBER_OF_EXAMPLES,) + self.output_shape, depending on the input
"""
input_ndim = len(self.input_shape)
assert input_act.shape[-input_ndim:] == tuple(self.input_shape), 'wrong input shape'
many = (input_act.ndim > input_ndim)
input_act = atleast(input_act, input_ndim+1)
act = self._forward(input_act)
assert act.shape[1:] == tuple(self.output_shape), 'wrong output shape'
return act if many else act[0,...]
def backward(self, grad_act, input_act):
"""
Backward propagation. This class is mostly wraps around _backward and does some extra
asserts. Child classes should overwrite _backward rather than this method.
Parameters
----------
grad_act : nump array, gradient of cost function with respect to the activations from this
layer (usually calculated in the layer above and passed down during backward
propagation), shape is self.output_shape or (NUMBER_OF_EXAMPLES,) + self.output_shape
input_act : numpy array, activations from the layer below; shape must either be the same as
self.input_shape, or (NUMBER_OF_EXAMPLES,) + self.input_shape
Returns
-------
grad_input_act : numpy array, gradient of cost function with respect to the input
activations this layer received, which is to be passed down to the layer below; shape
will be self.input_shape or (NUMBER_OF_EXAMPLES,) + self.input_shape, depending on the
input
grad_params : 1D numpy array of length self.num_params() (or None if self.num_params()==0),
gradient of cost function with respect to the params of this layer
"""
input_ndim = len(self.input_shape)
output_ndim = len(self.output_shape)
assert grad_act.shape[-output_ndim:] == tuple(self.output_shape), 'wrong grad input shape'
assert input_act.shape[-input_ndim:] == tuple(self.input_shape), 'wrong input shape'
assert ((grad_act.ndim==output_ndim and input_act.ndim==input_ndim)
or grad_act.shape[0] == input_act.shape[0]), 'wrong number of samples'
many = (input_act.ndim > input_ndim)
input_act = atleast(input_act, input_ndim+1)
grad_act = atleast(grad_act, output_ndim+1)
grad_input_act, grad_params = self._backward(grad_act, input_act)
assert grad_input_act.shape[1:] == tuple(self.input_shape), \
'wrong input act grad shape'
if self.num_params() > 0:
grad_params = grad_params.ravel()
assert grad_params.size == self.num_params(), 'wrong param grad shape'
return (grad_input_act if many else grad_input_act[0,...], grad_params)
################################################################################################
### METHODS TO OVERWRITE IN CHILD CLASSES
def num_params(self):
"""
Returns the number of parameters in this layer
"""
return 0
def get_params(self):
"""
Returns a 1D numpy array, length self.num_params(), with the parameters of this layer.
"""
return None
def set_params(self, params):
"""
Sets the parameters of this layer
Parameters
----------
params : 1D numpy array, length self.num_params(), with the parameters of this layer
"""
pass
def _forward(self, input_act):
"""
Forward propagation.
Parameters
----------
input_act : numpy array, activations from the layer below; shape is
(NUMBER_OF_EXAMPLES,) + self.input_shape
Returns
-------
output_act : numpy array, output activations from this layer; shape will be
(NUMBER_OF_EXAMPLES,) + self.output_shape
"""
raise NotImplemented
def _backward(self, grad_act, input_act):
"""
Backward propagation.
Parameters
----------
grad_act : nump array, gradient of cost function with respect to the activations from this
layer (usually calculated in the layer above and passed down during backward
propagation), shape is (NUMBER_OF_EXAMPLES,) + self.output_shape
input_act : numpy array, activations from the layer below; shape must either be the same as
(NUMBER_OF_EXAMPLES,) + self.input_shape
Returns
-------
grad_input_act : numpy array, gradient of cost function with respect to the input
activations this layer received, which is to be passed down to the layer below; shape
will be (NUMBER_OF_EXAMPLES,) + self.input_shape
grad_params : 1D numpy array of length self.num_params() (or None if self.num_params()==0),
gradient of cost function with respect to the params of this layer
"""
# returns next grad_act (layer below), and grad_params for this layer
raise NotImplemented
class ConvLayer(Layer):
def __init__(self,
input_shape,
num_filters=1,
filter_shape=(3,3),
init_from=None,
rand_state=np.random):
"""
Convolutional layer.
Parameters
----------
input_shape : tuple of ints specifying shape of a single input; this particular layer
expects the input shape to be 3D (height x width x channels)
num_filters : int, number of filters in this layer
filter_shape : tuple specifying height and width of the filters (current implementation
only square filters)
init_from : (experimental feature) a dataset to use in initializing filters
rand_state : a RandomState object
"""
super(ConvLayer, self).__init__(input_shape)
assert filter_shape[0]%2 == 1 and filter_shape[1]%2 ==1
assert filter_shape[0] == filter_shape[1], 'Only square filters currently supported'
if init_from is not None:
# a bit of a hack to try out...
assert init_from.shape[3] == input_shape[2]
assert init_from.shape[0] > 5
self.filters_ = np.zeros(filter_shape + (input_shape[2], num_filters), dtype='float32')
for i in xrange(num_filters):
sample = init_from[choice(15, init_from.shape[0]),...].mean(0)
r_start = rand_state.randint(init_from.shape[1] - filter_shape[0])
c_start = rand_state.randint(init_from.shape[2] - filter_shape[1])
self.filters_[...,i] = sample[r_start:r_start+filter_shape[0],
c_start:c_start+filter_shape[1],
...]/10
else:
self.filters_ = rand_state.randn(*(filter_shape + (input_shape[2], num_filters)))
self.filters_ /= np.sqrt(np.prod(self.filters_.shape[:-1]))
self.filters_ = self.filters_.astype('float32')
self.filter_shape = filter_shape
self.filter_pad = (filter_shape[0]/2, filter_shape[1]/2)
self.output_shape = np.array([self.input_shape[0] - filter_shape[0] + 1,
self.input_shape[1] - filter_shape[1] + 1,
num_filters])
def viz(self, num_row=1):
"""
Displays the filters in this layer (only makes sense for the first layer of a network)
"""
num_filters = self.filters_.shape[-1]
fig = plt.figure()
num_col = int(np.ceil(float(num_filters)/num_row))
for i in xrange(num_filters):
ax = fig.add_subplot(num_row, num_col, i)
imshow(self.filters_[...,i], ax=ax)
def num_params(self):
return np.prod(self.filters_.shape)
def get_params(self):
return self.filters_.ravel()
def set_params(self, params):
self.filters_ = params.reshape(self.filters_.shape)
def _forward(self, input_act):
fp = self.filter_pad
act = batch_filter3D(input_act, self.filters_)
act = act[:,fp[0]:-fp[0],fp[1]:-fp[1],:]
return act
def _backward(self, grad_act, input_act):
# this is probably the trickiest method in this entire module...
# input activation gradient -- notice that we have to flip the filters horizontally and
# vertically
rev_filters = np.fliplr(np.flipud(self.filters_))
# note: opencv doesn't like arbitrary slices of numpy arrays, so we need to shuffle the
# dimensions around a little bit
# rev_filters will now be NUM_FILTERS x NUM_CHANNELS x ...
rev_filters = np.rollaxis(np.rollaxis(rev_filters, 2, 0), 3, 0).copy()
padded_grad_act = padarray(grad_act, self.filter_pad)
# padded_grad_act will now be NUM_FILTERS x NUM_EXAMPLES x ...
padded_grad_act = np.rollaxis(padded_grad_act, 3, 0).copy()
grad_input_act = np.zeros(input_act.shape, dtype='float32')
for z in xrange(input_act.shape[0]):
for c in xrange(input_act.shape[-1]):
for f in xrange(self.filters_.shape[-1]):
grad_input_act[z,:,:,c] += filter2D(padded_grad_act[f,z], rev_filters[f,c])
# grad_input_act = grad_input_act.sum(-1)
# params gradient
grad_params = np.zeros((input_act.shape[1:4] + (grad_act.shape[-1],)), dtype='float32')
# grad_act_ will now be NUM_FILTERS x NUM_EXAMPLES x ...
grad_act_ = np.rollaxis(grad_act, 3, 0).copy()
# padded_grad_act will now be NUM_CHANNELS x NUM_EXAMPLES x ...
input_act = np.rollaxis(input_act, 3, 0).copy()
for n in xrange(input_act.shape[1]):
for c in xrange(input_act.shape[0]):
for f in xrange(grad_act.shape[-1]):
grad_params[:,:,c,f] += filter2D(input_act[c,n], grad_act_[f,n])
grad_params /= input_act.shape[1]
r_border, c_border = grad_act.shape[1]/2, grad_act.shape[2]/2
if grad_act.shape[1] %2 == 0:
grad_params = grad_params[r_border:-r_border+1, c_border:-c_border+1,...]
else:
grad_params = grad_params[r_border:-r_border, c_border:-c_border,...]
assert grad_params.shape == self.filters_.shape, 'wrong param grad shape'
return grad_input_act, grad_params.ravel()
class MeanPoolingLayer(Layer):
def __init__(self, input_shape, pool_size=2, rand_state=np.random):
"""
Mean pooling layer. There are no learnable parameters in this layer type.
Parameters
----------
input_shape : tuple of ints specifying shape of a single input
pool_size : int, size of the pooling window (stride will be the same as this size, in other
words no overlap in the pooling)
rand_state : a RandomState object
"""
super(MeanPoolingLayer, self).__init__(input_shape)
self.output_shape = self.input_shape / np.array([pool_size, pool_size, 1])
self.pool_size = pool_size
def _forward(self, input_act):
act = downscale_local_mean(np.rollaxis(input_act, 0, 4),
(self.pool_size, self.pool_size, 1, 1))
return np.rollaxis(act, 3, 0)
def _backward(self, grad_act, input_act):
kron_kernel = np.ones((self.pool_size,self.pool_size))[np.newaxis,...,np.newaxis]
grad_input_act = np.kron(grad_act, kron_kernel)/self.pool_size/self.pool_size
return grad_input_act, None
class ReluLayer(Layer):
"""
Rectified linear unit layer. There are no learnable parameters in this layer type.
"""
def _forward(self, input_act):
return input_act * (input_act>0)
def _backward(self, grad_act, input_act):
return (input_act>0).astype('float')*grad_act, None
class SigmoidLayer(Layer):
"""
Sigmoid unit layer. There are no learnable parameters in this layer type.
"""
@staticmethod
def _sigmoid(x):
return 1.0/(1.0+np.exp(-x))
def _forward(self, input_act):
return SigmoidLayer._sigmoid(input_act)
def _backward(self, grad_act, input_act):
out = SigmoidLayer._sigmoid(input_act)
return out*(1.0-out)*grad_act, None
class DenseLayer(Layer):
def __init__(self, input_shape, num_nodes=1, rand_state=np.random):
"""
Dense/fully connected layer.
Parameters
----------
input_shape : tuple of ints specifying shape of a single input
num_nodes : int, number of nodes in the layer
rand_state : a RandomState object
"""
super(DenseLayer, self).__init__(input_shape)
self.output_shape = np.array([num_nodes])
self.weights_ = rand_state.randn(np.prod(self.input_shape), num_nodes).astype('float32')
self.weights_ /= np.sqrt(np.prod(self.weights_.shape))
def num_params(self):
return self.weights_.size
def get_params(self):
return self.weights_.ravel()
def set_params(self, params):
self.weights_ = params.reshape(self.weights_.shape)
def _forward(self, input_act):
input_act = input_act.reshape((-1,self.weights_.shape[0]))
return np.dot(input_act, self.weights_)
def _backward(self, grad_act, input_act):
input_act = input_act.reshape((-1,self.weights_.shape[0]))
grad_input_act = np.dot(grad_act, self.weights_.T)
grad_input_act = grad_input_act.reshape((-1,) + tuple(self.input_shape))
grad_params = np.array([np.outer(act, grad) for act, grad in zip(input_act, grad_act)])
grad_params = grad_params.mean(0)
return grad_input_act, grad_params
class BiasLayer(Layer):
def __init__(self, input_shape, init_val=0, rand_state=np.random):
"""
Bias layer. For an input shape of [...] x N, this layer adds N bias terms. E.g., for a
convolutional layer with an output of shape WxHxC where C is the number of channels/filters,
this layer will contain C bias terms, one for each filter.
Parameters
----------
input_shape : tuple of ints specifying shape of a single input
init_val : float, value to initialize all weights with
rand_state : a RandomState object
"""
super(BiasLayer, self).__init__(input_shape)
# assert len(input_shape) == 3
self.output_shape = np.array(input_shape)
self.weights_ = np.ones(input_shape[-1]) * init_val
def num_params(self):
return self.weights_.size
def get_params(self):
return self.weights_.ravel()
def set_params(self, params):
self.weights_ = params.reshape(self.weights_.shape)
def _forward(self, input_act):
return input_act + self.weights_
def _backward(self, grad_act, input_act):
grad_input_act = grad_act
# sum over the width and height dimensions (if any), average over all input examples
grad_params = grad_act.mean(0)
while grad_params.ndim > 1:
grad_params = grad_params.sum(0)
return grad_input_act, grad_params
class NNet(object):
def __init__(self, layer_args, input_shape, rand_state=np.random):
"""
Abstract neural net class.
Parameters
----------
layer_args : list of (LayerClass, kwargs) tuples where LayerClass is a class that inherits
from the Layer class, and kwargs are to be passed into the constructor of that class.
layer_args[0] is the first layer, closest to the input, and layer_args[-1] is the
top-most layer. The kwargs need not include the input_shape argument -- this will be
determined automatically starting with the input_shape for the network (see below).
input_shape : tuple of ints specifying shape of a single input to the network
rand_state : a RandomState object
"""
# layer_args is a list of (layer_class, layer_init_args) for first through last layer
self.layers_ = []
self.input_shape = input_shape
for args in layer_args:
layer_class, args = args
args['rand_state'] = rand_state
layer = layer_class(input_shape, **args)
self.layers_.append(layer)
# get input shape for the next layer
input_shape = layer.output_shape
self._rand_state = rand_state
self._cache_acts = None
# this will keep track of how many batches and epochs have been trained
self.num_batch = 0
self.num_epoch = 0
def set_params(self, params):
"""
Set parameters to the network (i.e. all the layer parameters).
Parameters
----------
params : numpy array of length self.num_params()
"""
ind = 0
for layer in self.layers_:
num_params = layer.num_params()
if num_params:
layer.set_params(params[ind:ind+num_params])
ind += num_params
def get_params(self):
"""
Returns a single numpy array of length self.num_params() with all the parameters (i.e. all
the layer parameters concatenated into one vector).
"""
return np.concatenate([layer.get_params()
for layer in self.layers_ if layer.get_params() is not None])
def num_params(self):
"""
Returns the number of (learnable) parameters in the entire network.
"""
return np.sum([layer.num_params() for layer in self.layers_])
def num_nodes(self):
"""
Returns the number of nodes/neurons in the network.
"""
return (np.sum(np.prod(layer.output_shape) for layer in self.layers_) +
np.prod(self.input_shape))
def cost_for_params(self, params, x, y=None):
"""
Calculates the cost of the network for the specified inputs and the specified network
params.
Parameters
----------
params : numpy array of length self.num_params() specified network parameters
x : input examples
y : labels of the examples
Returns
-------
cost : float
"""
curr_params = self.get_params()
self.set_params(params)
cost = self.cost(x, y=y)
# revert params
self.set_params(curr_params)
return cost
def cost(self, x, y=None, final_acts=None):
"""
Calculates the cost of the network for the specified inputs. Child classes should
implement _cost rather than this method.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
y : numpy array, training labels, shape should be (NUMBER_OF_EXAMPLES, shape of labels)
final_acts : (optional) output of top-most layer in the network for the set of examples
Returns
-------
cost : float
"""
if final_acts is None:
final_acts = self.forward(x)[-1]
return self._cost(final_acts, y)
def forward(self, x, batch_size=None):
"""
Forward propagation through the whole network.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
Returns
-------
acts : list that contains a numpy array for each layer in the network; the first element in
the list is the array x itself, and each following array is the output of that layer for
the given examples x
"""
acts = [x]
for layer in self.layers_:
act = layer.forward(acts[-1])
acts.append(act)
return acts
def forward_final(self, x, batch_size=None):
"""
Forward propagation through the whole network; returns only output of final layer.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
batch_size : number of samples to process at a time (conserves memory)
Returns
-------
acts : activations of the final layer
"""
if batch_size is None or batch_size > x.shape[0]:
batch_size = x.shape[0]
ind = 0
res = []
while ind < x.shape[0]:
acts = x[ind:ind+batch_size,...]
for layer in self.layers_:
acts = layer.forward(acts)
res.append(acts)
ind += batch_size
return np.concatenate(res) if len(res)>1 else res[0]
def param_grad(self, x, y=None, acts=None):
"""
Calculate the gradient of the cost function with respect to all learnable parameters of this
network.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
y : numpy array, training labels, shape should be (NUMBER_OF_EXAMPLES, shape of labels)
acts : (optional) list that contains a numpy array for each layer in the network; the first
element in the list is the array x itself, and each following array is the output of
that layer for the given examples x
Returns
-------
param_grad : numpy array of length self.num_params()
"""
if acts is None:
acts = self.forward(x)
curr_act_grad = self.cost_grad(final_acts=acts[-1], y=y)
param_grad = []
for ind_from_end, layer in enumerate(reversed(self.layers_)):
curr_act_grad, curr_param_grad = layer.backward(curr_act_grad, acts[-2-ind_from_end])
if curr_param_grad is not None:
param_grad.append(curr_param_grad)
param_grad.reverse()
return np.concatenate(param_grad)
@staticmethod
def get_batch(x, y=None, batch_size=128, batch_ind=0, inds=None):
"""
Calculate the gradient of the cost function with respect to all learnable parameters of this
network.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
y : numpy array, training labels, shape should be (NUMBER_OF_EXAMPLES, shape of labels)
batch_size : number of examples to use in each batch
batch_ind : which batch to return
inds : a permuation of indexes for this dataset (numpy array of length x.shape[0])
Returns
-------
batch_x : subset of at most batch_size examples in x
batch_y : corresponding labels for this batch
"""
if inds is None:
inds = np.arange(x.shape[0])
batch_x = x[inds[batch_ind*batch_size:(batch_ind+1)*batch_size],...]
batch_y = None
if y is not None:
batch_y = y[inds[batch_ind*batch_size:(batch_ind+1)*batch_size],...]
return batch_x, batch_y
@staticmethod
def get_num_batch(num_examples, batch_size):
"""
Returns the number of batches for a given number of examples and given batch size.
"""
return int(np.ceil(num_examples/float(batch_size)))
def split_per_layer(self, vec):
""" Given a vector with entries for each learnable parameter in the net, this method sums
up the entries for each layer and returns a vector of such sums. E.g., can be used to
calculate absolute mean of weights in each layer."""
split = []
ind = 0
for layer in self.layers_:
split.append(vec[ind:layer.num_params()])
ind += layer.num_params()
return split
def fit(self,
x,
y=None,
val_x=None,
val_y=None,
val_freq=10,
batch_size=128,
num_epoch=10,
momentum=0.9,
learn_rate=0.01,
learn_rate_decay=0.05,
chill_out_iters=10,
weight_decay=.0005,
verbose=False):
"""
Train the neural network via mini-batch gradient descent.
Parameters
----------
x : numpy array, training examples; shape should be (NUMBER_OF_EXAMPLES,) + self.input_shape
y : numpy array, training labels, shape should be (NUMBER_OF_EXAMPLES, shape of labels)
val_x : validation examples, similar shape as x
val_y : validation labels, similar shape as y
val_freq : validation will be performed every val_freq iterations
batch_size : number of examples to use in each batch
num_epoch : number of epochs to train for (maximum, may terminate earlier)
learn_rate : initial learning rate, will decay as learning proceeds
learn_rate_decay : at each iteration i the learning rate will be
learn_rate/(i*learn_rate_decay+1)
chill_out_iters : if there is no improvement in validation error after this many iterations
of validation, the learning rate will be cut in half and the network will go back to
the set of parameters that achieved the lower cost so far
weight_decay : amount of weight decay to apply
verbose : whether to print debug messages during training or not
"""
if verbose:
print '='*80
print 'training net on %d samples' % x.shape[0]
if val_x is not None:
print 'using %d validation samples' % val_x.shape[0]
print '='*80
min_cost = 1e8
velocity = np.zeros(self.num_params(), dtype='float32')
best_params = self.get_params()
stop = False
no_improvement_iters = 0
num_train = x.shape[0]
num_batch = NNet.get_num_batch(num_train, batch_size)
init_learn_rate = learn_rate
start_time = time()
for epoch in xrange(self.num_epoch, self.num_epoch + num_epoch):
inds = self._rand_state.permutation(x.shape[0])
if stop:
break
for batch in xrange(num_batch):
batch_x, batch_y = self.get_batch(
x, y=y, batch_size=batch_size, inds=inds, batch_ind=batch)
assert batch_x.shape[0] > 0
param_grad = self.param_grad(batch_x, y=batch_y)
params = self.get_params()
learn_rate = init_learn_rate/((epoch*num_batch + batch)*learn_rate_decay+1)
velocity = (
momentum*velocity -
learn_rate*param_grad -
learn_rate*weight_decay*params)
self.set_params(params + velocity)
# check validation error every once in a while
if (batch%val_freq == 0 and batch>0) or batch == num_batch-1:
if val_x is None:
val_x, val_y = batch_x, batch_y
val_acts = self.forward_final(val_x, batch_size=batch_size)
cost = self.cost(val_x, val_y, final_acts=val_acts)
# child classes don't necessarily have a concept of "accuracy" and might not
# implement the accuracy method
try:
acc = self.accuracy(val_acts, val_y)
except NotImplemented:
acc = np.nan
cost_diff = cost - min_cost
# if there has been significant regression in cost, chill out
if ((cost_diff > 0 and cost_diff/min_cost > 1) or
no_improvement_iters>chill_out_iters):
self.set_params(best_params)
no_improvement_iters = 0
init_learn_rate /= 2
velocity = np.zeros_like(velocity, dtype='float32')
print 'cost was %.3e, chilling out...' % cost
cost = min_cost
elif cost < min_cost:
best_params = self.get_params()
min_cost = cost
no_improvement_iters = 0
else:
no_improvement_iters += 1
if verbose:
print 'epoch %03d, batch=%04d/%04d' % (epoch, batch+1, num_batch)
print 'cost=%.3e, min_cost=%.3e, acc=%.2f' % (cost, min_cost, acc)
print 'learn_rate=%.3e, velocity L1 norm %f' % (learn_rate,
np.abs(velocity).sum(0))
print '-'*80
self.num_epoch = epoch
end_time = time()
print 'training complete [%.2f min]' % ((end_time-start_time)/60)
def predict(self, x, batch_size=None):
"""
Retruns the output of the final layer of this network.
"""
return self.forward_final(x, batch_size)
################################################################################################
### METHODS TO OVERWRITE IN CHILD CLASSES
def _cost(self, final_acts, y):
"""
Calculates the cost of the network for the specified inputs.
Parameters
----------
final_acts : output of top-most layer in the network for a set of examples
y : labels of the examples
Returns
-------
cost : float
"""
raise NotImplemented
def accuracy(self, final_acts, y):
"""
Child class can optionally implement this in case there is a notion of accuracy that is
separate from cost (e.g. cross entropy cost versus classifcation accuracy).
Parameters
----------
final_acts : output of top-most layer in the network for a set of examples
y : labels of the examples
Returns
-------
accuracy : float
"""
raise NotImplemented
def cost_grad(self, final_acts, y):
"""
Calculates the gradient of the cost function with respect to the top-most layer activations.
Parameters
----------
final_acts : output of top-most layer in the network for a set of examples
y : labels of the examples
Returns
-------
cost_grad : numpy array, same shape as the output_shape of the top-most layer.
"""
raise NotImplemented
class SoftmaxNet(NNet):
def __init__(self, layer_args, input_shape, rand_state=np.random):
"""
Softmax (cross entropy) cost neural net.
"""
super(SoftmaxNet, self).__init__(layer_args, input_shape, rand_state=rand_state)
self.num_classes = self.layers_[-1].output_shape[0]
def fit(self, x, y=None, **kwargs):
assert y is not None, 'Labels must be passed in'
assert tuple(np.unique(y)) == tuple(range(self.num_classes)), \
'Labels should range from 0 to C-1 where C is the number of nodes in the last layer'
binary_y = self.binarize_labels(y)
if 'val_y' in kwargs:
kwargs['val_y'] = self.binarize_labels(kwargs['val_y'])
super(SoftmaxNet, self).fit(x, binary_y, **kwargs)
def binarize_labels(self, y):
"""
Turns discrete labels into binary vector labels.
Parameters
----------
y : numpy array of N integers from 0 to C-1
Returns
-------
b : numpy array of shape Nx(C-1) s.t. b[i,j]=1 if y[i]==j, and b[i,k] for all k!=j
"""
binary_y = np.zeros((len(y), self.num_classes))
for c in xrange(self.num_classes):
binary_y[y==c,c] = 1
return binary_y
def predict(self, x, batch_size=None):
acts = self.forward_final(x, batch_size)
return np.argmax(acts, axis=1)
def _cost(self, final_acts, y):
exp_act = safe_exp(final_acts)
lse_act = safe_log(np.sum(exp_act, axis=1))
return -np.mean(np.sum((y * (final_acts - lse_act[:,np.newaxis])), axis=1))
def accuracy(self, final_acts, y):
yp = np.argmax(final_acts, axis=1)
if y.ndim == 2:
y = np.argmax(y, axis=1)
return np.mean(yp == y)*100
def cost_grad(self, final_acts, y):
exp_act = safe_exp(final_acts)
sum_exp = np.sum(exp_act, axis=1)
return exp_act/sum_exp[:,np.newaxis] - y
|
{"hexsha": "a178e7757858331e2d0db6e767e6d1fd3f157c95", "size": 34064, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_convnet/convnet.py", "max_stars_repo_name": "bbabenko/simple_convnet", "max_stars_repo_head_hexsha": "015dd33be80365f704049790a547a2b3e0251f2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2015-01-04T11:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-27T18:51:57.000Z", "max_issues_repo_path": "simple_convnet/convnet.py", "max_issues_repo_name": "bbabenko/simple_convnet", "max_issues_repo_head_hexsha": "015dd33be80365f704049790a547a2b3e0251f2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple_convnet/convnet.py", "max_forks_repo_name": "bbabenko/simple_convnet", "max_forks_repo_head_hexsha": "015dd33be80365f704049790a547a2b3e0251f2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-04-01T01:07:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-30T05:09:49.000Z", "avg_line_length": 38.8415051311, "max_line_length": 100, "alphanum_fraction": 0.5959957727, "include": true, "reason": "import numpy", "num_tokens": 7434}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 18 06:01:03 2021
@author: hakimbmkg
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas as pd
import librosa
import librosa.display
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize'] = 1000000
from tqdm import tqdm
import numpy as np
from obspy import read
from obspy import UTCDateTime
import tensorflow as tf
import shutil
class Transform:
"""
Class for preprocessing data step two, transform data waveform.
"""
global directory
directory = os.getcwd()
def conditioning_waveform(path):
"""
funtion for conditioning 3 channels, BHZ, BHN, BHE, SHZ, SHN, SHE
resample to 100Hz for standardization all mseed
"""
if os.path.exists(directory+'/input/labels_csv/inaseisform.csv'):
os.remove(directory+'/input/labels_csv/inaseisform.csv')
read_csv = pd.read_csv(directory+'/'+path)
init_stations = read_csv[['files_name','start_time','end_time','network','stations_code','classID','class']]
arr_init_stations = init_stations.to_numpy()
df = pd.DataFrame(columns=[
'files_name',
'start_time',
'end_time',
'network',
'stations_code',
'classID',
'class'
])
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=True, index=False)
for n,i in enumerate(tqdm(arr_init_stations, desc='Conditioning/Split Process')):
st = read(directory+'/input/waveform/'+i[0])
st_net = st[0].stats['network']
st_code = st[0].stats['station']
st_loc = st[0].stats['location']
st_fn = st_net+'.'+st_code+'.'+st_loc+'.'
if st.select(id=st_fn+'BHN'):
new_st = st.select(id=st_fn+'BHN').resample(100.0)
# print(f'resample to 100Hz for station {i[4]}')
# print(new_st)
new_fnN = st_fn+'BHN.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnN,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnN):
new_st.write(directory+'/input/waveform/'+new_fnN, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
except:
print(f'***!warning!*** >> cant write mseed for station {i[4]}')
if st.select(id=st_fn+'BHE'):
new_st = st.select(id=st_fn+'BHE').resample(100.0)
new_fnE = st_fn+'BHE.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnE,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnE):
new_st.write(directory+'/input/waveform/'+new_fnE, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
except:
print(f'***!warning!*** >> cant write mseed for station {i[4]}')
if st.select(id=st_fn+'BHZ'):
new_st = st.select(id=st_fn+'BHZ').resample(100.0)
new_fnZ = st_fn+'BHZ.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnZ,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnZ):
new_st.write(directory+'/input/waveform/'+new_fnZ, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
except:
print(f'***!warning!*** >> cant write mseed for station {i[4]}')
if st.select(id=st_fn+'SHN'):
new_st = st.select(id=st_fn+'SHN').resample(100.0)
new_fnSN = st_fn+'SHN.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnSN,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnSN):
new_st.write(directory+'/input/waveform/'+new_fnSN, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
except:
print(f'***!warning!*** >> cant write mseed for station {i[4]}')
if st.select(id=st_fn+'SHE'):
new_st = st.select(id=st_fn+'SHE').resample(100.0)
new_fnSE = st_fn+'SHE.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnSE,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnSE):
new_st.write(directory+'/input/waveform/'+new_fnSE, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
except:
print(f'***!warning!*** >> cant write mseed for station {i[4]}')
if st.select(id=st_fn+'SHZ'):
new_st = st.select(id=st_fn+'SHZ').resample(100.0)
new_fnSZ = st_fn+'SHZ.'+str(UTCDateTime(i[1]))+'__'+str(UTCDateTime(i[2]))
data_ = {
'files_name' : new_fnSZ,
'start_time' : str(UTCDateTime(i[1])),
'end_time' : str(UTCDateTime(i[2])),
'network' : [i[3]],
'stations_code' : [i[4]],
'classID' : [i[5]],
'class' : [i[6]]
}
df = pd.DataFrame(data_)
df.to_csv(directory+'/input/labels_csv/inaseisform.csv', mode='a', header=False, index=False)
try:
if not os.path.exists(directory+'/input/waveform/'+new_fnSZ):
new_st.write(directory+'/input/waveform/'+new_fnSZ, format='MSEED')
print(f'Success write mseed file for station {i[4]}')
print(f'===========================================')
else:
print(f'file mseed for station {i[4]} is exist')
print(f'===========================================')
# except:
# print(f'***!warning!*** >> cant write mseed for station {i[4]}')
except i.Error as e:
print("line: {}, error: {}".format(n.line_num, e))
except StopIteration:
break
print (f'\n ***finished*** \n')
# print("\n NdasQ Mumet Mas, dikongkon lopang loping wae ... ora mari mari ...!\n")
def make_spectogram(path):
if not os.path.exists(directory+'/input/spectogram/'):
os.makedirs(directory+'/input/spectogram')
print('== folder /input/spectogram/ created')
read_csv = pd.read_csv(directory+'/'+path)
init_stations = read_csv[['files_name','start_time','end_time','network','stations_code','classID','class']]
arr_init_stations = init_stations.to_numpy()
for i in arr_init_stations:
st = read(directory+'/input/waveform/'+i[0])
# print(st.__str__(extended=True))
data = st[0].data.astype('float32')
sr = int(st[0].stats.sampling_rate)
max_points = int(st[0].stats.npts)
offset = 0
hop_length = 128
n_fft = 256
cmap = 'jet'
bins_per_octave = 12
auto_aspect = False
y_axis = "linear" # linear or log
fmin = None
fmax = 5.0
# Librosa spectrogram
D = librosa.amplitude_to_db(
np.abs(librosa.stft(data, hop_length=hop_length, n_fft=n_fft)), ref=np.max)
fig, ax = plt.subplots()
img = librosa.display.specshow(D, y_axis=y_axis, sr=sr,
hop_length=hop_length, x_axis='time', ax=ax, cmap=cmap, bins_per_octave=bins_per_octave,
auto_aspect=auto_aspect)
if fmin is not None:
fmin0 = fmin
else:
fmin0 = 0
if fmax is not None:
fmax0 = fmax
else:
fmax0 = sr/2
ax.set_ylim([fmin, fmax])
fig.colorbar(img, ax=ax, format="%+2.f dB")
plt.savefig(directory+'/input/spectogram/'+i[0]+'.png', bbox_inches='tight', dpi=300)
plt.close()
@tf.function
def make_spectogram_mags(path):
if not os.path.exists(directory+'/input/dataset_EQ/spectogram/'):
os.makedirs(directory+'/input/dataset_EQ/spectogram')
print('== folder /input/spectogram/dataset_EQ/ created')
read_csv = pd.read_csv(directory+'/'+path)
init_stations = read_csv[['trace_name','source_magnitude']]
arr_init_stations = init_stations.to_numpy()
for i in arr_init_stations:
if os.path.exists(directory+'/input/dataset_EQ/event/'+i[0]):
st = read(directory+'/input/dataset_EQ/event/'+i[0])
# print(st.__str__(extended=True))
data = st[0].data.astype('float32')
sr = int(st[0].stats.sampling_rate)
max_points = int(st[0].stats.npts)
offset = 0
hop_length = 128
n_fft = 256
cmap = 'jet'
bins_per_octave = 12
auto_aspect = False
y_axis = "linear" # linear or log
fmin = None
fmax = 5.0
# Librosa spectrogram
D = librosa.amplitude_to_db(
np.abs(librosa.stft(data, hop_length=hop_length, n_fft=n_fft)), ref=np.max)
fig, ax = plt.subplots()
img = librosa.display.specshow(D, y_axis=y_axis, sr=sr,
hop_length=hop_length, x_axis='time', ax=ax, cmap=cmap, bins_per_octave=bins_per_octave,
auto_aspect=auto_aspect)
if fmin is not None:
fmin0 = fmin
else:
fmin0 = 0
if fmax is not None:
fmax0 = fmax
else:
fmax0 = sr/2
ax.set_ylim([fmin, fmax])
fig.colorbar(img, ax=ax, format="%+2.f dB")
plt.savefig(directory+'/input/dataset_EQ/spectogram/'+i[0]+'.png', bbox_inches='tight', dpi=300)
plt.close()
continue
def cp_spectogram(path):
if not os.path.exists(directory+'/input/dataset/'):
os.makedirs(directory+'/input/dataset')
print('== folder /input/dataset/ created')
data_path = pd.read_csv(path)
data_path_class = data_path[['files_name','class']]
arr_data_path = data_path_class.to_numpy()
labels = data_path['class'].value_counts().index.tolist()
for a,b in enumerate(labels):
if not os.path.exists(directory+'/input/dataset_spectogram/'+b):
os.makedirs(directory+'/input/dataset_spectogram/'+b)
print(f'== folder /input/dataset/{b} created')
for x in arr_data_path:
if x[1] == b:
src_ = directory+'/input/spectogram/'+x[0]+'.png'
des_ = directory+'/input/dataset_spectogram/'+b+'/'
if not os.path.exists(directory+'/input/dataset_spectogram/'+b+'/'+x[0]):
try:
shutil.copy(src_, des_)
except shutil.SameFileError:
print(f'Source and destination represents the same file.')
except IsADirectoryError:
print(f'Destination is a directory.')
except PermissionError:
print(f'Permission denied. check your permision')
except:
print(f'Error occurred while copying file.')
def cp_spectogram_mags(path):
if not os.path.exists(directory+'/input/dataset_EQ/datasEQ_spectogram/'):
os.makedirs(directory+'/input/dataset_EQ/datasEQ_spectogram/')
print('== folder /input/dataset_EQ/datasEQ_spectogram/ created')
data_path = pd.read_csv(path)
data_path_class = data_path[['trace_name','source_magnitude','receiver_code']]
arr_data_path = data_path_class.to_numpy()
labels = data_path['source_magnitude'].value_counts().index.tolist()
for a,b in enumerate(labels):
fold_b = str(b)
# print(fold_b)
if not os.path.exists(directory+'/input/dataset_EQ/datasEQ_spectogram/'+fold_b):
os.makedirs(directory+'/input/dataset_EQ/datasEQ_spectogram/'+fold_b)
print(f'== folder /input/dataset_EQ/datasEQ_spectogram/{fold_b} created')
for x in arr_data_path:
if x[1] == b:
src_ = directory+'/input/dataset_EQ/spectogram/'+x[0]+'.png'
des_ = directory+'/input/dataset_EQ/datasEQ_spectogram/'+fold_b+'/'
if not os.path.exists(directory+'/input/dataset_EQ/datasEQ_spectogram/'+fold_b+'/'+x[0]):
try:
shutil.copy(src_, des_)
except shutil.SameFileError:
print(f'Source and destination represents the same file.')
except IsADirectoryError:
print(f'Destination is a directory.')
except PermissionError:
print(f'Permission denied. check your permision')
except:
print(f'Error occurred while copying file. {x[0]} - {x[2]} - please check path files')
|
{"hexsha": "8011e3b1d463d765905ca870bdd66ed4e534fb3b", "size": 18234, "ext": "py", "lang": "Python", "max_stars_repo_path": "LinduAI/preprocessing/transform.py", "max_stars_repo_name": "hakimbmkg/linduAI", "max_stars_repo_head_hexsha": "7d413ba03d54498dfe4871136748d95527e6d575", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LinduAI/preprocessing/transform.py", "max_issues_repo_name": "hakimbmkg/linduAI", "max_issues_repo_head_hexsha": "7d413ba03d54498dfe4871136748d95527e6d575", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LinduAI/preprocessing/transform.py", "max_forks_repo_name": "hakimbmkg/linduAI", "max_forks_repo_head_hexsha": "7d413ba03d54498dfe4871136748d95527e6d575", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3582089552, "max_line_length": 135, "alphanum_fraction": 0.4630360864, "include": true, "reason": "import numpy", "num_tokens": 4030}
|
"""
Benchmarks for code in pandas/_libs, excluding pandas/_libs/tslibs,
which has its own directory
"""
import numpy as np
from pandas._libs.lib import (
is_list_like,
is_scalar,
)
from pandas import (
NA,
NaT,
)
# TODO: share with something in pd._testing?
scalars = [
0,
1.0,
1 + 2j,
True,
"foo",
b"bar",
None,
np.datetime64(123, "ns"),
np.timedelta64(123, "ns"),
NaT,
NA,
]
zero_dims = [np.array("123")]
listlikes = [np.array([1, 2, 3]), {0: 1}, {1, 2, 3}, [1, 2, 3], (1, 2, 3)]
class ScalarListLike:
params = scalars + zero_dims + listlikes
def time_is_list_like(self, param):
is_list_like(param)
def time_is_scalar(self, param):
is_scalar(param)
|
{"hexsha": "f5c2397945cea52fd66be72ca57beaa9f654cc9d", "size": 746, "ext": "py", "lang": "Python", "max_stars_repo_path": "asv_bench/benchmarks/libs.py", "max_stars_repo_name": "oricou/pandas", "max_stars_repo_head_hexsha": "9405e58d9268041f5416711c051cf5429a19bf49", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-13T17:34:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-13T17:34:58.000Z", "max_issues_repo_path": "asv_bench/benchmarks/libs.py", "max_issues_repo_name": "oricou/pandas", "max_issues_repo_head_hexsha": "9405e58d9268041f5416711c051cf5429a19bf49", "max_issues_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asv_bench/benchmarks/libs.py", "max_forks_repo_name": "oricou/pandas", "max_forks_repo_head_hexsha": "9405e58d9268041f5416711c051cf5429a19bf49", "max_forks_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "MIT", "ECL-2.0", "BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-04T09:25:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T09:25:40.000Z", "avg_line_length": 17.3488372093, "max_line_length": 74, "alphanum_fraction": 0.5991957105, "include": true, "reason": "import numpy", "num_tokens": 240}
|
import os
import pickle
import numpy as np
from numbers import Number
from typing import Union, Optional, Dict
from .common import get_datetime, mkdir
from .metric import Metric
class Logger:
"""
Log training statistics and visualize them via Tensorboard.
Parameters
----------
root : str
Root directory to save log files
log_basename : str, optional
Base name of the log file
tensorboard : bool, optional, default=False
Enable tensorboard or not (``tensorboard`` package is required)
verbose: bool, optional, default=True
"""
def __init__(
self,
root: str,
log_basename: Optional[str] = None,
tensorboard: bool = False,
verbose: bool = True
) -> None:
self.root = os.path.expanduser(root)
self.timestamp = get_datetime()
self.log_basename = log_basename
self.verbose = verbose
self.text_path = os.path.join(self.root, 'text')
mkdir(self.text_path)
self.pkl_path = os.path.join(self.root, 'pkl')
mkdir(self.pkl_path)
self.writter = None
if tensorboard:
self.tensorboard_path = os.path.join(self.root, 'tensorboard', self.log_name)
mkdir(self.tensorboard_path)
try:
from torch.utils.tensorboard import SummaryWriter
self.writter = SummaryWriter(self.tensorboard_path)
except ImportError:
print(
"Warning: Tensorboard is configured to use, but currently not "
"installed on this machine. Please install Tensorboard with "
"'pip install tensorboard' or set ``tensorboard`` to ``False``."
)
@property
def log_name(self) -> str:
if self.log_basename and self.log_basename != '':
return self.log_basename + '_' + self.timestamp
else:
return self.timestamp
def _write_tensorboard(
self, key: str, x: Union[Number, np.number], y: Union[Number, np.number]
) -> None:
"""
Log data into Tensorboard.
Parameters
----------
key : str
Namespace which the input data tuple belongs to
x : Union[Number, np.number]
Ordinate of the input data
y : Union[Number, np.number]
Abscissa of the input data
"""
self.writter.add_scalar(key, y, global_step=x)
def _write_text(self, text: str) -> None:
"""
Log data into text files.
Parameters
----------
text : str
A string to be logged
"""
log_file_path = os.path.join(self.text_path, self.log_name + '.log')
with open(log_file_path, "a") as f:
f.write(text)
def log(
self, data: Dict[str, Metric], step: int, addition: Optional[str] = None
) -> None:
"""
Log statistics generated during updating in human readable format.
Parameters
----------
data : dict
Data to be logged
step : int
Step of the data to be logged
addition : str, optional
Additional information to be logged
"""
text = f"step: {step:8.2e}\t"
if addition:
text = f"{addition}\t" + text
for name, value in data.items():
# log statistics to Tensorboard
if self.writter is not None:
self._write_tensorboard(name, step, value.recent)
# log statistics to text files
text += '{name}: {recent:7.2f}\t'.format(name=name, recent=value.recent)
self._write_text(text + '\n')
if self.verbose:
print(text)
self.last_log_step = step
def log_to_pkl(self, data: Dict[str, np.ndarray]) -> None:
log_file_path = os.path.join(self.pkl_path, self.log_name + '.pkl')
pickle.dump(data, open(log_file_path, 'wb'))
|
{"hexsha": "e0b0099f96ea27c1a5f2188c749a84f9d1f17dd8", "size": 4006, "ext": "py", "lang": "Python", "max_stars_repo_path": "alkaid/utils/logger.py", "max_stars_repo_name": "Renovamen/alkaid", "max_stars_repo_head_hexsha": "78bb19c3d18856234dec9444235b749c6006655f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-04T10:33:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-04T10:33:44.000Z", "max_issues_repo_path": "alkaid/utils/logger.py", "max_issues_repo_name": "Renovamen/alkaid", "max_issues_repo_head_hexsha": "78bb19c3d18856234dec9444235b749c6006655f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alkaid/utils/logger.py", "max_forks_repo_name": "Renovamen/alkaid", "max_forks_repo_head_hexsha": "78bb19c3d18856234dec9444235b749c6006655f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8201438849, "max_line_length": 89, "alphanum_fraction": 0.5698951573, "include": true, "reason": "import numpy", "num_tokens": 879}
|
SUBROUTINE setvars ! set variables we have found are undefined
C--------------------------------
common /cantenna/ numants,iats(20),anttype(20),antname(20),
+ xfqs(20),xfqe(20),designfreq(20),antfile(20),
+ beammain(20),offazim(20),cond(20),diel(20),
+ array(30,91,22),aeff(30,20)
character anttype*10,antname*70,antfile*24
COMMON/GEOG/GYZ(5),RAT(5),GMDIP(5),CLCK(5),ABIY(5),ARTIC(5),SIGPAT
A(5), EPSPAT(5)
COMMON /RON /CLAT(5), CLONG(5), GLAT(5), RD(5), FI(3,5), YI(3,5),
1HI(3,5), HPRIM(30,5), HTRUE(30,5), FVERT(30,5),KM,KFX, AFAC(30,5),
2HTR(50,3), FNSQ(50,3)
COMMON /RAYS/ ANG(40), IFOB(40,30,5), NANG
COMMON/INFORM/INFO,IHSHR,IHLNG
COMMON / ZON / ABPS(7), CREL(7), EFF(7), FLDST(7), GRLOS(7),
1 HN(7), HP(7), PROB(7), RELY(7), RGAIN(7), SIGPOW(7), SN(7),
2 SPRO(7), TGAIN(7), TIMED(7), TLOSS(7), B(7), FSLOS(7), ADV(7),
3 OBF(7),NMODE(7),TLLOW(7),TLHGH(7)
c------------------------------------------------------------------------
zero=0.
numants=0
do 10 i=1,5
10 ARTIC(i)=zero
do 20 j=1,5
do 20 i=1,30
HTRUE(i,j)=zero
FVERT(i,j)=zero
AFAC(i,j)=zero
20 HPRIM(i,j)=zero
do 30 k=1,5
do 30 j=1,30
do 30 i=1,40
30 IFOB(i,j,k)=zero
IHSHR=0
IHLNG=0
do 40 i=1,7
HP(i)=zero
rely(i)=zero
hn(i)=zero
nmode(i)=0
sn(i)=zero
fldst(i)=zero
sigpow(i)=zero
b(i)=zero
timed(i)=zero
abps(i)=zero
prob(i)=zero
rgain(i)=zero
tgain(i)=zero
fslos(i)=zero
spro(i)=zero
EFF(i)=zero
grlos(i)=zero
adv(i)=zero
obf(i)=zero
40 CREL(i)=zero
return
end
C--------------------------------
|
{"hexsha": "7de8e50775ec691e905b297fbe28d4dd6257af85", "size": 1892, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "src/voacapw/setvars.for", "max_stars_repo_name": "jawatson/voacapl", "max_stars_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2015-11-19T16:11:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:20:21.000Z", "max_issues_repo_path": "src/voacapw/setvars.for", "max_issues_repo_name": "jawatson/voacapl", "max_issues_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-08-17T11:46:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-05T20:14:16.000Z", "max_forks_repo_path": "src/voacapw/setvars.for", "max_forks_repo_name": "jawatson/voacapl", "max_forks_repo_head_hexsha": "6861fbad041ff7a56b8875c735c9242c81a94320", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-02-04T10:18:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-05T08:16:58.000Z", "avg_line_length": 31.5333333333, "max_line_length": 74, "alphanum_fraction": 0.4640591966, "num_tokens": 730}
|
"""
Purpose: Sample size
Date created: 2020-11-19
Ref:
https://www.qualtrics.com/experience-management/research/determine-sample-size/
https://github.com/shawnohare/samplesize/blob/master/samplesize.py
Contributor(s):
Mark M.
"""
try:
from secrets import SystemRandom
except ModuleNotFoundError:
from random import SystemRandom
from math import exp, inf, pi, sqrt, erf
from statistics import mean, stdev
rnd = SystemRandom()
# import numpy as np
# x = np.linspace(-4, 4, num = 100)
# def get_z_scr_attrs(a = 0.05, n_trials = 100, samples_per_trial = 1000):
# --- Calc. Probability Density Function (PDF) for standard normal distribution --- #
def linspace_(minval, maxval, n_steps = None, stepsize = None):
minval *= 1.
maxval *= 1.
if not n_steps is None:
n_steps -= 1
stepsize = float(maxval - minval) / float(n_steps)
elif not stepsize is None:
n = float(maxval - minval) / float(stepsize)
output = list()
while minval <= maxval:
output.append(minval)
minval += stepsize
return output
lsp = linspace_(-4, 4, maxval = 100)
lsp = linspace_(0., 3.5, stepsize=0.1)
def norm_probability_density(x):
# lhs_constant = 1. / sqrt(2 * pi)
def phi(x):
'Cumulative distribution function for the standard normal distribution'
return (1.0 + erf(x / sqrt(2.0))) / 2.0
value = 0.
for i in range(-inf, x):
value += phi()
idx_range = list(map(lambda x: round(x/10, 2), range(0, 36)))
col_range = list(map(lambda x: round(x/100, 2), range(0, 11)))
matrix = [[0.] * len(idx_range) for _ in range(len(col_range))]
for r in range(len(idx_range)):
for c in range(len(col_range)):
matrix[c][r] = round(idx_range[r] + col_range[c], 4)
constant = 1 / sqrt(2 * pi)
alpha = 0.05
conf_level = 1 - alpha
n_trials = 1000
epochs = 100
results = []
for n in range(epochs):
# rand_vals = [rnd.uniform(0, 1) for _ in range(n_trials)]
rand_vals = [rnd.random() for _ in range(n_trials)]
results.append(sum([1 if i <= conf_level else 0 for i in rand_vals]) / n_trials)
mean(results)
def cls_prop(name, datatype):
"""Class property helper function."""
mask_name = f"__{name}"
@property
def this_prop(self):
return getattr(self, mask_name)
@this_prop.setter
def this_prop(self, value):
if not isinstance(value, datatype):
raise TypeError(f"Expected data type {datatype}!")
setattr(self, mask_name, value)
return this_prop
class SampleSize:
cls_prop("population_size", str)
cls_prop("alpha", float)
cls_prop("margin_of_error", float)
def __init__(self, population_size, alpha=0.05, margin_of_error = 0.05):
self.population_size = population_size
self.alpha = alpha
self.ci = 1 - alpha
self.margin_of_error = margin_of_error
# Calculated
import scipy.stats as ss
def _get_conf_lvl(a):
return round(ss.norm.ppf(1 - (a/2)), 4)
def sampleSize(population_size, margin_error = .05, confidence_level = .99, sigma = 1/2):
"""
Calculate the minimal sample size to use to achieve a certain
margin of error and confidence level for a sample estimate
of the population mean.
Inputs
-------
population_size: integer
Total size of the population that the sample is to be drawn from.
margin_error: number
Maximum expected difference between the true population parameter,
such as the mean, and the sample estimate.
confidence_level: number in the interval (0, 1)
If we were to draw a large number of equal-size samples
from the population, the true population parameter
should lie within this percentage
of the intervals (sample_parameter - e, sample_parameter + e)
where e is the margin_error.
sigma: number
The standard deviation of the population. For the case
of estimating a parameter in the interval [0, 1], sigma=1/2
should be sufficient.
"""
alpha = 1 - (confidence_level)
zdict = {
.90: 1.645,
.91: 1.695,
.99: 2.576,
.97: 2.17,
.94: 1.881,
.93: 1.812,
.95: 1.96,
.98: 2.326,
.96: 2.054,
.92: 1.751
}
if confidence_level in zdict:
z = zdict[confidence_level]
else:
from scipy.stats import norm
z = norm.ppf(1 - (alpha/2))
z = _get_conf_lvl(alpha)
N = population_size
M = margin_error
numerator = z**2 * sigma**2 * (N / (N-1))
denom = M**2 + ((z**2 * sigma**2)/(N-1))
return numerator/denom
n = 768
moe = 0.05
alpha = 0.01
std = 0.5
z = _get_conf_lvl(alpha)
const = z**2 * std**2
numerator = const * (n / (n-1))
denom = (moe ** 2) + (const / (n-1))
samplesize = (numerator//denom)+1
|
{"hexsha": "e1115fe56b498d3201b93529f7eeb195c50a5133", "size": 4840, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebook-samples/sample_size.py", "max_stars_repo_name": "MarkMoretto/python-examples-main", "max_stars_repo_head_hexsha": "37b8c41d2f175029f4536ca970f037ff19b4e951", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-21T23:24:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-21T23:24:25.000Z", "max_issues_repo_path": "notebook-samples/sample_size.py", "max_issues_repo_name": "MarkMoretto/python-examples-main", "max_issues_repo_head_hexsha": "37b8c41d2f175029f4536ca970f037ff19b4e951", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-29T00:38:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-15T00:22:15.000Z", "max_forks_repo_path": "notebook-samples/sample_size.py", "max_forks_repo_name": "MarkMoretto/python-examples-main", "max_forks_repo_head_hexsha": "37b8c41d2f175029f4536ca970f037ff19b4e951", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0777202073, "max_line_length": 89, "alphanum_fraction": 0.6316115702, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1367}
|
import numpy as np
from ..constants import log
def load_assimp(file_obj, file_type=None):
'''
Use the assimp library to load a mesh, from a file object and type,
or filename (if file_obj is a string)
Assimp supports a huge number of mesh formats.
Performance notes: in tests on binary STL pyassimp was ~10x
slower than the native loader included in this package.
This is probably due to their recursive prettifying of the data structure.
Also, you need a very recent version of PyAssimp for this function to work
(the commit was merged into the assimp github master on roughly 9/5/2014)
'''
def LPMesh_to_Trimesh(lp):
colors = (np.reshape(lp.colors, (-1,4))[:,0:3] * 255).astype(np.int)
return {'vertices' : lp.vertices,
'vertex_normals' : lp.normals,
'faces' : lp.faces,
'vertex_colors' : colors}
if not hasattr(file_obj, 'read'):
# if there is no read attribute, we assume we've been passed a file name
file_type = (str(file_obj).split('.')[-1]).lower()
file_obj = open(file_obj, 'rb')
scene = pyassimp.load(file_obj, file_type=file_type)
meshes = list(map(LPMesh_to_Trimesh, scene.meshes))
pyassimp.release(scene)
if len(meshes) == 1:
return meshes[0]
return meshes
_assimp_loaders = {}
try:
import pyassimp
if hasattr(pyassimp, 'available_formats'):
_assimp_formats = [i.lower() for i in pyassimp.available_formats()]
else:
log.warning('Older version of assimp detected, using hardcoded format list')
_assimp_formats = ['dae', 'blend', '3ds', 'ase', 'obj',
'ifc', 'xgl', 'zgl', 'ply', 'lwo',
'lxo', 'x', 'ac', 'ms3d', 'cob', 'scn']
_assimp_loaders.update(zip(_assimp_formats,
[load_assimp]*len(_assimp_formats)))
except ImportError:
log.warning('pyassimp unavailable, using only native loaders')
|
{"hexsha": "ad57009bb8fb670e41ca2595c2bfd2ec3102bf9b", "size": 2042, "ext": "py", "lang": "Python", "max_stars_repo_path": "basis/trimesh/io/assimp.py", "max_stars_repo_name": "takuya-ki/wrs", "max_stars_repo_head_hexsha": "f6e1009b94332504042fbde9b39323410394ecde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-04-02T09:02:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T05:31:03.000Z", "max_issues_repo_path": "basis/trimesh/io/assimp.py", "max_issues_repo_name": "takuya-ki/wrs", "max_issues_repo_head_hexsha": "f6e1009b94332504042fbde9b39323410394ecde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2021-04-12T09:41:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T13:32:46.000Z", "max_forks_repo_path": "basis/trimesh/io/assimp.py", "max_forks_repo_name": "takuya-ki/wrs", "max_forks_repo_head_hexsha": "f6e1009b94332504042fbde9b39323410394ecde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2021-03-30T11:55:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:10:59.000Z", "avg_line_length": 37.8148148148, "max_line_length": 84, "alphanum_fraction": 0.6180215475, "include": true, "reason": "import numpy", "num_tokens": 526}
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
if config.HAVE_TORCH:
from numbers import Number
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
from pymor.algorithms.pod import pod
from pymor.core.base import BasicObject
from pymor.core.exceptions import NeuralNetworkTrainingFailed
from pymor.models.neural_network import FullyConnectedNN, NeuralNetworkModel, NeuralNetworkInstationaryModel
class NeuralNetworkReductor(BasicObject):
"""Reduced Basis reductor relying on artificial neural networks.
This is a reductor that constructs a reduced basis using proper
orthogonal decomposition and trains a neural network that approximates
the mapping from parameter space to coefficients of the full-order
solution in the reduced basis.
The approach is described in :cite:`HU18`.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
basis_size
Desired size of the reduced basis. If `None`, rtol, atol or l2_err must
be provided.
rtol
Relative tolerance the basis should guarantee on the training set.
atol
Absolute tolerance the basis should guarantee on the training set.
l2_err
L2-approximation error the basis should not exceed on the training
set.
pod_params
Dict of additional parameters for the POD-method.
ann_mse
If `'like_basis'`, the mean squared error of the neural network on
the training set should not exceed the error of projecting onto the basis.
If `None`, the neural network with smallest validation error is
used to build the ROM.
If a tolerance is prescribed, the mean squared error of the neural
network on the training set should not exceed this threshold.
Training is interrupted if a neural network that undercuts the
error tolerance is found.
"""
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params=None,
ann_mse='like_basis'):
assert 0 < validation_ratio < 1 or validation_set
self.__auto_init(locals())
def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.tanh,
optimizer=optim.LBFGS, epochs=1000, batch_size=20, learning_rate=1.,
restarts=10, seed=0):
"""Reduce by training artificial neural networks.
Parameters
----------
hidden_layers
Number of neurons in the hidden layers. Can either be fixed or
a Python expression string depending on the reduced basis size
`N` and the total dimension of the |Parameters| `P`.
activation_function
Activation function to use between the hidden layers.
optimizer
Algorithm to use as optimizer during training.
epochs
Maximum number of epochs for training.
batch_size
Batch size to use if optimizer allows mini-batching.
learning_rate
Step size to use in each optimization step.
restarts
Number of restarts of the training algorithm. Since the training
results highly depend on the initial starting point, i.e. the
initial weights and biases, it is advisable to train multiple
neural networks by starting with different initial values and
choose that one performing best on the validation set.
seed
Seed to use for various functions in PyTorch. Using a fixed seed,
it is possible to reproduce former results.
Returns
-------
rom
Reduced-order |NeuralNetworkModel|.
"""
assert restarts > 0
assert epochs > 0
assert batch_size > 0
assert learning_rate > 0.
# set a seed for the PyTorch initialization of weights and biases and further PyTorch methods
torch.manual_seed(seed)
# build a reduced basis using POD and compute training data
if not hasattr(self, 'reduced_basis'):
self.reduced_basis, self.mse_basis = self.build_basis()
# determine the numbers of neurons in the hidden layers
if isinstance(hidden_layers, str):
hidden_layers = eval(hidden_layers, {'N': len(self.reduced_basis), 'P': self.fom.parameters.dim})
# input and output size of the neural network are prescribed by the dimension of the parameter space
# and the reduced basis size
assert isinstance(hidden_layers, list)
layers = self._compute_layers_sizes(hidden_layers)
# compute validation data
if not hasattr(self, 'validation_data'):
with self.logger.block('Computing validation snapshots ...'):
if self.validation_set:
self.validation_data = []
for mu in self.validation_set:
sample = self._compute_sample(mu, self.fom.solve(mu), self.reduced_basis)
self.validation_data.extend(sample)
else:
number_validation_snapshots = int(len(self.training_data)*self.validation_ratio)
# randomly shuffle training data before splitting into two sets
np.random.shuffle(self.training_data)
# split training data into validation and training set
self.validation_data = self.training_data[0:number_validation_snapshots]
self.training_data = self.training_data[number_validation_snapshots+1:]
# run the actual training of the neural network
with self.logger.block(f'Performing {restarts} restarts for training ...'):
for run in range(restarts):
neural_network, current_losses = self._train(layers, activation_function, optimizer,
epochs, batch_size, learning_rate)
if not hasattr(self, 'losses') or current_losses['val'] < self.losses['val']:
self.losses = current_losses
self.neural_network = neural_network
# check if neural network is sufficient to guarantee certain error bounds
with self.logger.block('Checking tolerances for error of neural network ...'):
if isinstance(self.ann_mse, Number) and self.losses['full'] <= self.ann_mse:
self.logger.info(f'Aborting training after {run} restarts ...')
return self._build_rom()
elif self.ann_mse == 'like_basis' and self.losses['full'] <= self.mse_basis:
self.logger.info(f'Aborting training after {run} restarts ...')
return self._build_rom()
# check if neural network is sufficient to guarantee certain error bounds
with self.logger.block('Checking tolerances for error of neural network ...'):
if isinstance(self.ann_mse, Number) and self.losses['full'] > self.ann_mse:
raise NeuralNetworkTrainingFailed('Could not train a neural network that '
'guarantees prescribed tolerance!')
elif self.ann_mse == 'like_basis' and self.losses['full'] > self.mse_basis:
raise NeuralNetworkTrainingFailed('Could not train a neural network with an error as small as the '
'reduced basis error! Maybe you can try a different neural '
'network architecture or change the value of `ann_mse`.')
elif self.ann_mse is None:
self.logger.info('Using neural network with smallest validation error ...')
self.logger.info(f'Finished training with a validation loss of {self.losses["val"]} ...')
return self._build_rom()
else:
raise ValueError('Unknown value for mean squared error of neural network')
def _compute_layers_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network."""
return [len(self.fom.parameters),] + hidden_layers + [len(self.reduced_basis),]
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkModel(self.neural_network, self.fom.parameters, name=f'{self.fom.name}_reduced')
return rom
def _train(self, layers, activation_function, optimizer, epochs, batch_size, learning_rate):
"""Perform a single training iteration and return the resulting neural network."""
assert hasattr(self, 'training_data')
assert hasattr(self, 'validation_data')
# LBFGS-optimizer does not support mini-batching, so the batch size needs to be adjusted
if optimizer == optim.LBFGS:
batch_size = max(len(self.training_data), len(self.validation_data))
with self.logger.block('Training the neural network ...'):
# initialize the neural network
neural_network = FullyConnectedNN(layers,
activation_function=activation_function).double()
# initialize the optimizer
optimizer = optimizer(neural_network.parameters(),
lr=learning_rate)
loss_function = nn.MSELoss()
early_stopping_scheduler = EarlyStoppingScheduler(len(self.training_data) + len(self.validation_data))
# create the training and validation sets as well as the respective data loaders
training_dataset = CustomDataset(self.training_data)
validation_dataset = CustomDataset(self.validation_data)
phases = ['train', 'val']
training_loader = utils.data.DataLoader(training_dataset,
batch_size=batch_size)
validation_loader = utils.data.DataLoader(validation_dataset,
batch_size=batch_size)
dataloaders = {'train': training_loader, 'val': validation_loader}
self.logger.info('Starting optimization procedure ...')
# perform optimization procedure
for epoch in range(epochs):
losses = {'full': 0.}
# alternate between training and validation phase
for phase in phases:
if phase == 'train':
neural_network.train()
else:
neural_network.eval()
running_loss = 0.0
# iterate over batches
for batch in dataloaders[phase]:
inputs = batch[0]
targets = batch[1]
with torch.set_grad_enabled(phase == 'train'):
def closure():
if torch.is_grad_enabled():
optimizer.zero_grad()
outputs = neural_network(inputs)
loss = loss_function(outputs, targets)
if loss.requires_grad:
loss.backward()
return loss
# perform optimization step
if phase == 'train':
optimizer.step(closure)
# compute loss of current batch
loss = closure()
# update overall absolute loss
running_loss += loss.item() * len(batch[0])
# compute average loss
epoch_loss = running_loss / len(dataloaders[phase].dataset)
losses[phase] = epoch_loss
losses['full'] += running_loss
# check for early stopping
if phase == 'val' and early_stopping_scheduler(losses, neural_network):
if not self.logging_disabled:
self.logger.info(f'Early stopping training process after {epoch + 1} epochs ...')
self.logger.info('Minimum validation loss: '
f'{early_stopping_scheduler.best_losses["val"]}')
return early_stopping_scheduler.best_neural_network, early_stopping_scheduler.best_losses
return early_stopping_scheduler.best_neural_network, early_stopping_scheduler.best_losses
def build_basis(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
with self.logger.block('Building reduced basis ...'):
# compute snapshots for POD and training of neural networks
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
U.append(self.fom.solve(mu))
# compute reduced basis via POD
reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
self.training_data = []
for mu, u in zip(self.training_set, U):
sample = self._compute_sample(mu, u, reduced_basis)
self.training_data.extend(sample)
# compute mean square loss
mean_square_loss = (sum(U.norm2()) - sum(svals**2)) / len(U)
return reduced_basis, mean_square_loss
def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors."""
# determine the coefficients of the full-order solutions in the reduced basis to obtain the
# training data; convert everything into tensors that are compatible with PyTorch
mu_tensor = torch.DoubleTensor(mu.to_numpy())
u_tensor = torch.DoubleTensor(reduced_basis.inner(u)[:,0])
return [(mu_tensor, u_tensor),]
def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
assert hasattr(self, 'reduced_basis')
return self.reduced_basis.lincomb(u.to_numpy())
class NeuralNetworkInstationaryReductor(NeuralNetworkReductor):
"""Reduced Basis reductor for instationary problems relying on
artificial neural networks.
This is a reductor that constructs a reduced basis using proper
orthogonal decomposition and trains a neural network that approximates
the mapping from parameter and time space to coefficients of the
full-order solution in the reduced basis.
The approach is described in :cite:`WHR19`.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
basis_size
Desired size of the reduced basis. If `None`, rtol, atol or l2_err must
be provided.
rtol
Relative tolerance the basis should guarantee on the training set.
atol
Absolute tolerance the basis should guarantee on the training set.
l2_err
L2-approximation error the basis should not exceed on the training
set.
pod_params
Dict of additional parameters for the POD-method.
ann_mse
If `'like_basis'`, the mean squared error of the neural network on
the training set should not exceed the error of projecting onto the basis.
If `None`, the neural network with smallest validation error is
used to build the ROM.
If a tolerance is prescribed, the mean squared error of the neural
network on the training set should not exceed this threshold.
Training is interrupted if a neural network that undercuts the
error tolerance is found.
"""
def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params=None,
ann_mse='like_basis'):
assert 0 < validation_ratio < 1 or validation_set
self.__auto_init(locals())
def _compute_layers_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network
(make sure to increase the input dimension to account for the time)."""
return [len(self.fom.parameters) + 1,] + hidden_layers + [len(self.reduced_basis),]
def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkInstationaryModel(self.fom.T, self.nt, self.neural_network,
self.fom.parameters, name=f'{self.fom.name}_reduced')
return rom
def build_basis(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
with self.logger.block('Building reduced basis ...'):
# compute snapshots for POD and training of neural networks
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
u = self.fom.solve(mu)
if hasattr(self, 'nt'):
assert self.nt == len(u)
else:
self.nt = len(u)
U.append(u)
# compute reduced basis via POD
reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))
self.training_data = []
for i, mu in enumerate(self.training_set):
sample = self._compute_sample(mu, U[i*self.nt:(i+1)*self.nt], reduced_basis)
self.training_data.extend(sample)
# compute mean square loss
mean_square_loss = (sum(U.norm2()) - sum(svals**2)) / len(U)
return reduced_basis, mean_square_loss
def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors
(make sure to include the time instances in the inputs)."""
parameters_with_time = [mu.with_(t=t) for t in np.linspace(0, self.fom.T, self.nt)]
samples = [(torch.DoubleTensor(mu.to_numpy()), torch.DoubleTensor(reduced_basis.inner(u_t)[:,0]))
for mu, u_t in zip(parameters_with_time, u)]
return samples
class EarlyStoppingScheduler(BasicObject):
"""Class for performing early stopping in training of neural networks.
If the validation loss does not decrease over a certain amount of epochs, the
training should be aborted to avoid overfitting the training data.
This class implements an early stopping scheduler that recommends to stop the
training process if the validation loss did not decrease by at least `delta`
over `patience` epochs.
Parameters
----------
size_training_validation_set
Size of both, training and validation set together.
patience
Number of epochs of non-decreasing validation loss allowed, before early
stopping the training process.
delta
Minimal amount of decrease in the validation loss that is required to reset
the counter of non-decreasing epochs.
"""
def __init__(self, size_training_validation_set, patience=10, delta=0.):
self.__auto_init(locals())
self.best_losses = None
self.best_neural_network = None
self.counter = 0
def __call__(self, losses, neural_network=None):
"""Returns `True` if early stopping of training is suggested.
Parameters
----------
losses
Dictionary of losses on the validation and the training set in
the current epoch.
neural_network
Neural network that produces the current validation loss.
Returns
-------
`True` if early stopping is suggested, `False` otherwise.
"""
if self.best_losses is None:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
elif self.best_losses['val'] - self.delta <= losses['val']:
self.counter += 1
if self.counter >= self.patience:
return True
else:
self.best_losses = losses
self.best_losses['full'] /= self.size_training_validation_set
self.best_neural_network = neural_network
self.counter = 0
return False
class CustomDataset(utils.data.Dataset):
"""Class that represents the dataset to use in PyTorch.
Parameters
----------
training_data
Set of training parameters and the respective coefficients of the
solution in the reduced basis.
"""
def __init__(self, training_data):
self.training_data = training_data
def __len__(self):
return len(self.training_data)
def __getitem__(self, idx):
t = self.training_data[idx]
return t
|
{"hexsha": "f41c80b8a97130bbfcfcbd769a79cc83c383f9f8", "size": 24315, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pymor/reductors/neural_network.py", "max_stars_repo_name": "TreeerT/pymor", "max_stars_repo_head_hexsha": "e8b18d2d4c4b5998f0bd84f6728e365e0693b753", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-17T15:55:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-17T15:55:12.000Z", "max_issues_repo_path": "src/pymor/reductors/neural_network.py", "max_issues_repo_name": "TreeerT/pymor", "max_issues_repo_head_hexsha": "e8b18d2d4c4b5998f0bd84f6728e365e0693b753", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pymor/reductors/neural_network.py", "max_forks_repo_name": "TreeerT/pymor", "max_forks_repo_head_hexsha": "e8b18d2d4c4b5998f0bd84f6728e365e0693b753", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6764705882, "max_line_length": 119, "alphanum_fraction": 0.5726095003, "include": true, "reason": "import numpy", "num_tokens": 4383}
|
module SD_ale_boundary_operator
#include <messenger.h>
use mod_kinds, only: rk,ik
use mod_constants, only: ZERO,ONE,TWO,HALF
use type_operator, only: operator_t
use type_chidg_worker, only: chidg_worker_t
use type_properties, only: properties_t
use DNAD_D
use ieee_arithmetic
implicit none
private
!>
!!
!! @author Nathan A. Wukie
!!
!!
!!
!!
!--------------------------------------------------------------------------------
type, extends(operator_t), public :: SD_ale_boundary_operator_t
contains
procedure :: init
procedure :: compute
end type SD_ale_boundary_operator_t
!********************************************************************************
contains
!>
!!
!! @author Nathan A. Wukie (AFRL)
!! @date 8/29/2016
!!
!--------------------------------------------------------------------------------
subroutine init(self)
class(SD_ale_boundary_operator_t), intent(inout) :: self
!
! Set operator name
!
call self%set_name("Scalar Diffusion ALE Boundary Average Operator")
!
! Set operator type
!
call self%set_operator_type("Boundary Diffusive Operator")
!
! Set operator equations
!
call self%add_primary_field("u")
end subroutine init
!********************************************************************************
!> Compute the diffusive boundary flux for scalar linear diffusion.
!!
!! @author Nathan A. Wukie
!!
!! @param[in] mesh Mesh data
!! @param[inout] sdata Solver data. Solution, RHS, Linearization etc.
!! @param[in] ielem Element index
!! @param[in] iface Face index
!! @param[in] iblk Block index indicating the linearization direction
!!
!-----------------------------------------------------------------------------------------
subroutine compute(self,worker,prop)
class(SD_ale_boundary_operator_t), intent(inout) :: self
type(chidg_worker_t), intent(inout) :: worker
class(properties_t), intent(inout) :: prop
type(AD_D), allocatable, dimension(:) :: &
flux_1, flux_2, flux_3, &
flux_m, flux_p, integrand, &
mu_m, mu_p
real(rk), allocatable, dimension(:) :: &
norm_1, norm_2, norm_3
type(AD_D), allocatable, dimension(:,:) :: &
gradu_m, gradu_p, &
flux_ref
!
! Interpolate solution to quadrature nodes
!
gradu_m = worker%get_primary_field_grad_ale_face('u', 'gradient + lift','face interior')
gradu_p = worker%get_primary_field_grad_ale_face('u', 'gradient + lift','face exterior')
norm_1 = worker%normal(1)
norm_2 = worker%normal(2)
norm_3 = worker%normal(3)
!
! Compute scalar coefficient
!
mu_m = worker%get_model_field_face('Scalar Diffusion Coefficient', 'value', 'face interior')
mu_p = worker%get_model_field_face('Scalar Diffusion Coefficient', 'value', 'face exterior')
flux_m = -mu_m*gradu_m(:,1)
flux_p = -mu_p*gradu_p(:,1)
flux_1 = HALF*(flux_m + flux_p)
flux_m = -mu_m*gradu_m(:,2)
flux_p = -mu_p*gradu_p(:,2)
flux_2 = HALF*(flux_m + flux_p)
flux_m = -mu_m*gradu_m(:,3)
flux_p = -mu_p*gradu_p(:,3)
flux_3 = HALF*(flux_m + flux_p)
flux_ref = worker%post_process_boundary_diffusive_flux_ale(flux_1, flux_2, flux_3, 'face interior')
!
! Compute boundary average flux
!
integrand = flux_ref(:,1)*norm_1 + flux_ref(:,2)*norm_2 + flux_ref(:,3)*norm_3
!
! Integrate flux
!
call worker%integrate_boundary('u',integrand)
end subroutine compute
!**************************************************************************************************
end module SD_ale_boundary_operator
|
{"hexsha": "b29cd5909cd835efd4abf865c73fe40aeeb66294", "size": 4211, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/equations/scalar/scalar_diffusion_ale/SD_ale_boundary_operator.f90", "max_stars_repo_name": "wanglican/ChiDG", "max_stars_repo_head_hexsha": "d3177b87cc2f611e66e26bb51616f9385168f338", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2016-10-05T15:12:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T02:08:23.000Z", "max_issues_repo_path": "src/equations/scalar/scalar_diffusion_ale/SD_ale_boundary_operator.f90", "max_issues_repo_name": "haohb/ChiDG", "max_issues_repo_head_hexsha": "d3177b87cc2f611e66e26bb51616f9385168f338", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2016-05-17T02:21:05.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-10T16:33:07.000Z", "max_forks_repo_path": "src/equations/scalar/scalar_diffusion_ale/SD_ale_boundary_operator.f90", "max_forks_repo_name": "haohb/ChiDG", "max_forks_repo_head_hexsha": "d3177b87cc2f611e66e26bb51616f9385168f338", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2016-07-18T16:20:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-27T19:26:12.000Z", "avg_line_length": 26.821656051, "max_line_length": 107, "alphanum_fraction": 0.4977440038, "num_tokens": 1004}
|
#!/usr/bin/env python
# Author: Tony Zheng
import rospy
import time
import roslib
import sys
import cv2
import scipy.linalg
import numpy as np
from geometry_msgs.msg import Twist
from std_msgs.msg import String, Int32, Float32, Float32MultiArray, Bool, Float64
from sensor_msgs.msg import Image, CompressedImage
from math import sqrt, atan, pi, pow, cos, sin, asin, tan, atan2
from barc.msg import barc_state,ECU, Input, Moving
from cv_bridge import CvBridge
# state estimation node
class image_processing_node():
def __init__(self):
# Define camera settings
# put bright, contrast, saturation, hue into image_processing_param.yaml file
self.vid = cv2.VideoCapture(rospy.get_param("/videoDevicePath")) # Sets the port /dev/video6 as the video device
self.vid.set(10, rospy.get_param("/brightness")) # brightness
self.vid.set(11, rospy.get_param("/contrast")) # contrast
self.vid.set(12, rospy.get_param("/saturation")) # saturation
self.vid.set(13, rospy.get_param("/hue")) # hue
# Decalre calibration matrices to rectify the image
self.mtx = np.array(rospy.get_param("/mtx"))
self.dist = np.array(rospy.get_param("/dist"))
# Camera resolution
self.width = rospy.get_param("/width")
self.height = rospy.get_param("/height")
# Reference velocity
self.v_ref = rospy.get_param("/reference_velocity")
# Number of points for moving average filter
self.numMovingAveragePoints = rospy.get_param("/numMovingAveragePoints")
self.movingAverageValue = np.zeros([2,self.numMovingAveragePoints])
# Number of sample points at the reference velocity to check along the path
self.numpoints = rospy.get_param("/numStepsToLookAhead")
# Set node loop rate (30 hz)
self.loop_rate = rospy.get_param("/loop_rate")
self.dt = 1.0 / self.loop_rate
self.rate = rospy.Rate(self.loop_rate)
self.f1Matrix = rospy.get_param("/f1Matrix")
self.f2Matrix = rospy.get_param("/f2Matrix")
self.bMatrix = rospy.get_param("/bMatrix")
self.yPixel_to_xInertial_Matrix = rospy.get_param("/yPixel_to_xInertial_Matrix")
self.xInertial_to_yPixel_Matrix = rospy.get_param("/xInertial_to_yPixel_Matrix")
self.furthest_distance = rospy.get_param("/furthest_distance")
self.camera_offset_distance = rospy.get_param("/camera_offset_distance")
self.flipped_camera = rospy.get_param("/flipped_camera")
# Compute the udistortion and rectification transformation map
self.newcameramtx, self.roi = cv2.getOptimalNewCameraMatrix(self.mtx,self.dist,(self.width,self.height),0,(self.width,self.height))
self.mapx,self.mapy = cv2.initUndistortRectifyMap(self.mtx,self.dist,None,self.newcameramtx,(self.width,self.height),5)
# Messages to be filled
self.state_constraints = barc_state()
self.reference_trajectory = barc_state()
self.bridge = CvBridge()
# Initialize publishers and subscribers
self.moving_pub = rospy.Publisher("moving", Moving, queue_size=1)
self.hold_previous_turn_pub = rospy.Publisher("hold_previous_turn", Bool, queue_size=1)
self.moving_pub.publish(True)
self.reference_trajectory_pub = rospy.Publisher("reference_trajectory", barc_state, queue_size = 1)
self.reference_image_pub = rospy.Publisher("image_raw", Image, queue_size = 1)
self.uOpt_pub = rospy.Publisher("uOpt", Input, queue_size=1)
self.optimal_state_sub = rospy.Subscriber("optimal_state_trajectory", barc_state, self.convertDistanceToPixels)
self.dt_pub = rospy.Publisher("dt", Float64, queue_size=1)
# The boolean messages passed to these topics are not used, we only want them for the independently threaded callback function.
self.draw_lines_pub = rospy.Publisher("draw_lines", Bool, queue_size=1)
self.draw_lines_sub = rospy.Subscriber("draw_lines", Bool, self.draw_lines,queue_size=1)
self.publish_states_pub = rospy.Publisher("publish_states", Bool, queue_size=1)
self.publish_states_sub = rospy.Subscriber("publish_states", Bool, self.publish_states,queue_size=1)
self.show_Image_pub = rospy.Publisher("show_Image", Bool, queue_size=1)
self.show_Image_sub = rospy.Subscriber("show_Image", Bool, self.show_Image,queue_size=1)
self.count = 0
self.totalTimeCounter = 0
self.totalTime = 0
self.averageTime = 0
self.publish_image = True;
self.previousTime = time.time()
self.printme = False
self.statepoints=''
self.camera_distance_calibrated = False
print("Press Up Arrow to start moving. Press Down Arrow to stop moving.")
while not rospy.is_shutdown():
try:
self.count = self.count +1 # updates the count
self.rel,self.dst = self.vid.read() # gets the current frame from the camera
# Updates the sample time
self.dt = time.time() - self.previousTime
self.previousTime = time.time()
if self.flipped_camera:
self.cv_image = cv2.flip(cv2.remap(self.dst,self.mapx,self.mapy,cv2.INTER_LINEAR),-1) #Undistorts the fisheye image to rectangular
else:
self.cv_image = cv2.remap(self.dst,self.mapx,self.mapy,cv2.INTER_LINEAR) #Undistorts the fisheye image to rectangular
self.x,self.y,self.width,self.height = self.roi
# colorFilter = True makes the edge detection search for a red/white track using HSV. False will use grayscale and search for any edge regardless of color
colorFilter = rospy.get_param("/colorFilter")
kernel_size = rospy.get_param("/kernel_size")
if colorFilter:
imageToFilter = self.cv_image
imageToFilter[0:280,0:self.width] = 0 #blacks out the top portion of the image (not used)
#self.hsv = cv2.cvtColor(imageToFilter, cv2.COLOR_BGR2HSV) #.004
# define range of color thresholds in (B,G,R)
lower_red = np.flipud(np.array(rospy.get_param("/lower_red")))
upper_red = np.flipud(np.array(rospy.get_param("/upper_red")))
lower_white = np.flipud(np.array(rospy.get_param("/lower_white")))
upper_white = np.flipud(np.array(rospy.get_param("/upper_white")))
# Threshold the image to only have the red/white track appear
self.reds = cv2.inRange(imageToFilter, lower_red, upper_red)
self.whites = cv2.inRange(imageToFilter, lower_white, upper_white)
self.edges = cv2.bitwise_or(self.reds,self.whites) # combines the red filter and white filter images
self.edges = cv2.GaussianBlur(self.edges,(kernel_size,kernel_size),0) # blurs the image
retval, self.edges = cv2.threshold(self.edges,127,255,cv2.THRESH_BINARY) # converts the blurred greyscale to binary to filter once more
else:
# Convert Color Image to Grayscale
gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
gray_image[0:270,0:self.width] = 0
gray_image = cv2.GaussianBlur(gray_image, (kernel_size, kernel_size), 0)
self.edges = cv2.Canny(gray_image,40,80) # openCV edge detection function
# Parameters to combine image: dst = alpha*img1+beta*img2+gamma
alpha = 0.6
beta = 1
gamma = 0
# overlayPointsOnColoredImage = True makes the path show up on top of the colored image.
# Draw lanes over image (color or black/white)
overlayPointsOnColoredImage = rospy.get_param("/overlayPointsOnColoredImage")
if overlayPointsOnColoredImage:
self.line_img_color = np.zeros(self.cv_image.shape, dtype=np.uint8)
self.pathOverlayedImage = cv2.addWeighted(self.cv_image,alpha,self.line_img_color,beta,gamma)
else:
self.edges_color = cv2.cvtColor(self.edges, cv2.COLOR_GRAY2RGB)
self.line_img_color = np.zeros(self.edges_color.shape, dtype=np.uint8)
self.pathOverlayedImage = cv2.addWeighted(self.edges_color,alpha,self.line_img_color,beta,gamma)
# Collect 100 images before running image processing
if self.count>100:
self.draw_lines_pub.publish(True)
# Publish image with lanes
if self.publish_image:
if True:
self.reference_image_pub.publish(self.bridge.cv2_to_imgmsg(cv2.cvtColor(self.edges,cv2.COLOR_GRAY2RGB), "bgr8"))
else:
print('Could not publish reference image')
# Check the true loop rate of incoming images from camera (i.e. frames per second should match parameter specified in launch file)
if (self.count > 100 and self.count%3==0):
self.totalTimeCounter +=1
self.timenext = time.time()
self.timeElapsed = self.timenext - self.previousTime
self.totalTime = self.totalTime+self.timeElapsed
self.averageTime = self.totalTime/(self.totalTimeCounter)
self.dt_pub.publish(self.averageTime)
#print('Average Time: ',self.averageTime)
self.rate.sleep()
except IOError, (ErrorNumber, ErrorMessage):
print('HERE')
print('HERE')
print(ErrorMessage)
pass
#################################################################################
def show_Image(self,data):
#cv2.imshow("Advanced Lane Detection ed", self.edges[270:480,:])
#cv2.imshow("Advanced Lane Detection", self.pathOverlayedImage)
#cv2.imshow('cv_image',self.cv_image[270:480,:])
cv2.imshow("Advanced Lane Detection", self.pathOverlayedImage[270:480,:])
cv2.waitKey(3) # Waitkey is necesarry to update image
##############################################################################################
def draw_lines(self,data):
thickness = 3
color = [0, 0, 255 ]
img = self.line_img_color
height, width = self.edges.shape
index_x = (width)//2
offset = 0
previous_x = index_x
previous_y = 0
endtrack = False
self.stopMoving = False
converge_limit = rospy.get_param("/converge_limit") # if the left and rath paths converge within 100 pixels, it indicates an obstacle or dead end which stops the vehicle
i=0
dt = self.averageTime
y_base = -1
for k in xrange(1,self.numpoints+1,1):
# Starting with one time step ahead, finds the pixel corresponding to that distance
while self.camera_distance_calibrated == False:
xIforward = (self.v_ref*dt*k)+self.camera_offset_distance
y_base = int(self.calc_x_Inertial_to_y_newPixel(xIforward))
if y_base < 1:
self.camera_offset_distance = self.camera_offset_distance+0.005
else:
self.camera_distance_calibrated = True
xIforward = (self.v_ref*dt*k)+self.camera_offset_distance
y_base = int(self.calc_x_Inertial_to_y_newPixel(xIforward))
index_y = height - y_base
index_x = previous_x
# finds the lane edges at the x and y value of the pixel
x_left, x_right,y_left,y_right = self.find_lane_edges(self.edges, index_x, index_y, width)
if (not(k==1)and (x_right-x_left)<converge_limit):
# Vehicle stops moving when the lane edges converge. Previous left and right lane edge pixel values are held.
x_left = leftpts[-1][0]
x_right = rightpts[-1][0]
y_left = leftpts[-1][1]
y_right = rightpts[-1][1]
self.stopMoving = True
midpointx = (x_right + x_left)//2
midpointy = (y_right + y_left)//2
midpts = np.array([(midpointx,midpointy)],dtype = np.int32)
leftpts = np.array([(x_left,y_left)],dtype = np.int32)
rightpts = np.array([(x_right,y_right)],dtype = np.int32)
# if it is the first longitudal point, the lists are made of the pixel values of the centerlane and lane edges. Otherwise, the pixel values are appended to the array.
if (k==1):
midpointlist = midpts
leftlist = leftpts
rightlist = rightpts
else:
midpointlist = np.concatenate((midpointlist,midpts))
leftlist = np.concatenate((leftlist,leftpts))
rightlist = np.concatenate((rightlist,rightpts))
# Draws circles on the image where the lane edges are.
if (not(k==1)):
cv2.line(self.pathOverlayedImage, (midpointx, midpointy),(previous_x, previous_y), (0,255,255),3)
cv2.circle(self.pathOverlayedImage, (x_right,y_right), 4, (0,255,255), -1)
cv2.circle(self.pathOverlayedImage, (x_left, y_left), 4, (0,255,255), -1)
cv2.circle(self.pathOverlayedImage, (midpointx, midpointy), 3, (0,255,255), -1)
previous_x = midpointx
previous_y = midpointy
if self.statepoints:
# if there is an optimal trajectory found, plots the points
j = k-1
if (self.statepoints[1][j]>self.height):
self.statepoints[1][j] = self.height-2
if ((j>0) and self.count > 10):
previous_statex = self.statepoints[0][j-1]
previous_statey = self.statepoints[1][j-1]
"""
print(self.statepoints[0][j])
print(self.statepoints[1][j])
print('')"""
cv2.line(self.pathOverlayedImage, (self.statepoints[0][j], self.statepoints[1][j]),(previous_statex, previous_statey), (0, 255,0),3)
cv2.circle(self.pathOverlayedImage, (self.statepoints[0][j],self.statepoints[1][j]), 4, (0, 255,0), -1)
#self.statepoints = ''
if endtrack:
break
i+=1
self.globalMidpointList = midpointlist
self.globalLeftTrackPointList = leftlist
self.globalRightTrackPointList = rightlist
if np.unique(leftlist[:,0]).shape[0] == 1 or np.unique(rightlist[:,0]).shape[0] == 1:
self.hold_previous_turn_pub.publish(True)
else:
self.hold_previous_turn_pub.publish(False)
self.show_Image_pub.publish(True)
self.publish_states_pub.publish(True)
#################################################################################
def find_lane_edges(self,img,x,y,width):
"""
Finds the edge of the track by searching starting from the center of the image and towards the edge along that row of pixels. Also removes some noise with a custom filter
"""
leftempty = True;
rightempty = True;
y_left = y
y_right = y
boxsize = rospy.get_param("/boxsize") # checks a box of +-boxsize points around the pixel to see if there are any breaks
while leftempty:
xleftarray = np.arange(x) # number range from 0 to the center point
x_left_index = np.where(img[y_left,xleftarray]>0) #finds the values along a certain row where the pixel value >0 which indicates the track
try:
i = -1
while leftempty:
# starts from the last value in the row to see if the pixel is noise or part of the track by checking for continuity along the edges of the box
x_left = xleftarray[x_left_index[0][i]]
leftbound = x_left-boxsize
if leftbound <0:
leftbound=0
Top = img[y_left-boxsize,np.arange(leftbound,x_left+boxsize)]
Bottom = img[y_left+boxsize,np.arange(leftbound,x_left+boxsize)]
Right = img[np.arange(y_left-boxsize,y_left+boxsize),x_left+boxsize]
Left = img[np.arange(y_left-boxsize,y_left+boxsize),leftbound]
# if the box around the pixel does not have any bright values along all four edges, this is likely a noisy pixel rather the track
if (all(Top==0) and all(Bottom==0) and all(Right==0) and all(Left==0)):
i-=1
else:
leftempty = False;
except:
x_left = 0
leftempty = False;
while rightempty:
xrightarray = np.arange(x,width) # number range from the center point to the right edge of the image
x_right_index = np.where(img[y_right,xrightarray]>0)
try:
i = 0
while rightempty:
# starts from the first value in the row to see if the pixel is noise or part of the track by checking for continuity along the edges of the box
x_right = xrightarray[x_right_index[0][i]]
rightbound = x_right+boxsize
if rightbound >=self.width:
rightbound=self.width-1
Top = img[y_right-boxsize,np.arange(x_right-boxsize,rightbound)]
Bottom = img[y_right+boxsize,np.arange(x_right-boxsize,rightbound)]
Right = img[np.arange(y_right-boxsize,y_right+boxsize),rightbound]
Left = img[np.arange(y_right-boxsize,y_right+boxsize),x_right-10]
# if the box around the pixel does not have any bright values along all four edges, this is likely a noisy pixel rather the track
if (all(Top==0) and all(Bottom==0) and all(Right==0) and all(Left==0)):
i+=1
else:
rightempty = False;
except:
x_right = self.width
rightempty = False;
return (x_left, x_right,y_left,y_right)
######################################################################################
def publish_states(self,data):
""" Converts the centerlane from pixel coordinates to inertial coordinates. Then, publishes the reference trajectory.
"""
midpointlist = self.globalMidpointList
leftlist = self.globalLeftTrackPointList
rightlist = self.globalRightTrackPointList
midpointlist[:,0] = midpointlist[:,0]-self.width/2 # Convert x_pixel to x_newpixel
midpointlist[:,1] = self.height-midpointlist[:,1] # Convert y_pixel to y_newpixel
if ((self.count%1000 == 1) and self.printme):
print("\nReference Trajectory")
print(midpointlist)
midlist_x_Inertial,midlist_y_Inertial = self.convertPixelsToDistance(midpointlist)
self.reference_trajectory.x = midlist_x_Inertial.tolist()
self.reference_trajectory.y = midlist_y_Inertial.tolist()
if (midlist_x_Inertial[-1] <self.furthest_distance):
self.stopMoving = True
self.moving_pub.publish(False)
else:
self.moving_pub.publish(True)
self.stopMoving = False
if ((self.count%10 == 1) and self.printme):
print(self.reference_trajectory)
self.reference_trajectory_pub.publish(self.reference_trajectory)
######################################################################################
def convertPixelsToDistance(self,inputarray):
x_newPixel_list = inputarray[:,0]
y_newPixel_list = inputarray[:,1]
transformed_y_Inertial_list = np.float32(x_newPixel_list)
transformed_x_Inertial_list = np.float32(y_newPixel_list)
for i in np.arange(len(x_newPixel_list)):
x = x_newPixel_list[i]
y = y_newPixel_list[i]
transformed_y_Inertial_list[i] = self.calc_x_newPixel_to_y_Inertial(x,y) #number of xpixels from center divided by xpixels per foot
transformed_x_Inertial_list[i] = self.calc_y_newPixel_to_x_Inertial(y)
return transformed_x_Inertial_list,transformed_y_Inertial_list
def calc_x_newPixel_to_y_Inertial(self,x_newPixel,y_newPixel):
# Transforms the xnewpixel into yinertial frame
x_Inertial = self.calc_y_newPixel_to_x_Inertial(y_newPixel)
y_Inertial = (x_newPixel-self.b_eq(x_Inertial))/self.f2(x_Inertial)
y_newPixelskewed = self.f1(y_Inertial)
x_Inertial = self.calc_y_newPixel_to_x_Inertial(y_newPixel-y_newPixelskewed)
y_Inertial = (x_newPixel-self.b_eq(x_Inertial))/self.f2(x_Inertial)
y_Inertial = -y_Inertial
return y_Inertial
# define auxiliary functions for mapping from pixel coordinate to inertial frame coordinate
# these mapping are 3-rd order polynomials
# coefficients from polynomials computed in MATLAB ....
################################################################################
def f1(self,y_Inertial):
m1 = np.polyval(self.f1Matrix,y_Inertial)
return m1
def f2(self,x_Inertial):
m2 = np.polyval(self.f2Matrix,x_Inertial)
return m2
def b_eq(self,x_Inertial):
b = np.polyval(self.bMatrix,x_Inertial)
return b
def calc_y_newPixel_to_x_Inertial(self,y_newPixel):
# Transforms the ynewpixel into xinertial frame
x_Inertial = np.polyval(self.yPixel_to_xInertial_Matrix,y_newPixel)
x_Inertial=x_Inertial
return x_Inertial
def calc_x_Inertial_to_y_newPixel(self,x_Inertial):
# Transforms the ynewpixel into xinertial frame
y_newPixel = np.polyval(self.xInertial_to_yPixel_Matrix,x_Inertial)
return y_newPixel
def convertDistanceToPixels(self,inputarray):
if len(inputarray.x)>0:
xlist = inputarray.x
ylist = inputarray.y
xPixelList = list(xlist)
yPixelList = list(ylist)
for i in np.arange(len(xlist)):
if i == 0:
xPixelList[i] = self.width/2
yPixelList[i] = self.height-1
else:
x = xlist[i]
y = ylist[i]
xPixelList[i] = self.width/2-int(self.f2(x+self.camera_offset_distance)*y+self.b_eq(x+self.camera_offset_distance))
yPixelList[i] = self.height-int(self.calc_x_Inertial_to_y_newPixel(x+self.camera_offset_distance))-1
self.statepoints = (xPixelList, yPixelList)
def shutdown_func():
cv2.destroyAllWindows()
def main(args):
# Intialize the node
rospy.init_node('image_processing_node', anonymous=True)
rospy.on_shutdown(shutdown_func)
image_processor_global = image_processing_node()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main(sys.argv)
except rospy.ROSInterruptException:
pass
|
{"hexsha": "9415ed165b5f0ec75a45d328542366bf9e875a4d", "size": 23839, "ext": "py", "lang": "Python", "max_stars_repo_path": "workspace/src/labs/src/lab7/image_processing_lane_keeping.py", "max_stars_repo_name": "treyfortmuller/barc", "max_stars_repo_head_hexsha": "29c1e6e2f5ad530b4020c552eea8e82411c2a0af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 191, "max_stars_repo_stars_event_min_datetime": "2016-09-20T13:42:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T12:38:31.000Z", "max_issues_repo_path": "workspace/src/labs/src/lab7/image_processing_lane_keeping.py", "max_issues_repo_name": "treyfortmuller/barc", "max_issues_repo_head_hexsha": "29c1e6e2f5ad530b4020c552eea8e82411c2a0af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2016-10-27T18:46:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-10T05:11:31.000Z", "max_forks_repo_path": "workspace/src/labs/src/lab7/image_processing_lane_keeping.py", "max_forks_repo_name": "arunj088/barc", "max_forks_repo_head_hexsha": "859b385480e16de16cc4c4adb57f49e98bfd3ade", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 285, "max_forks_repo_forks_event_min_datetime": "2016-10-17T05:38:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:11:18.000Z", "avg_line_length": 48.8504098361, "max_line_length": 178, "alphanum_fraction": 0.6027937413, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5335}
|
import numpy as np
def bridge(var, steps, state=None):
"""1D Brownian bridge in the time interval [0,1]
# Arguments
var: variance of the Brownian bridge
steps: number of time steps to simulate
state: state of random number generator
# Result
trace of the bridge
"""
if state==None: state = np.random.get_state()
np.random.set_state(state)
incs = np.random.randn(steps)
incs = np.insert(incs, 0, 0)
incs = incs * np.sqrt(var / steps )
brownian = np.cumsum(incs)
return brownian - brownian[-1] * np.linspace(0, 1, num=steps+1, endpoint=True)
|
{"hexsha": "ce283dc38e469488f503a939fd9f39ab37d1d148", "size": 620, "ext": "py", "lang": "Python", "max_stars_repo_path": "jellium/bridge.py", "max_stars_repo_name": "Christian-Hirsch/jellium", "max_stars_repo_head_hexsha": "f3887af722a1793207ecfeeb2fd2d8f81bbfc5c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jellium/bridge.py", "max_issues_repo_name": "Christian-Hirsch/jellium", "max_issues_repo_head_hexsha": "f3887af722a1793207ecfeeb2fd2d8f81bbfc5c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jellium/bridge.py", "max_forks_repo_name": "Christian-Hirsch/jellium", "max_forks_repo_head_hexsha": "f3887af722a1793207ecfeeb2fd2d8f81bbfc5c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5238095238, "max_line_length": 83, "alphanum_fraction": 0.6370967742, "include": true, "reason": "import numpy", "num_tokens": 173}
|
"""
This module provides the EvaluateModel class.
"""
import logging
import os
import warnings
import numpy as np
import torch
import torch.nn as nn
from selene_sdk.sequences import Genome
from selene_sdk.utils import (
PerformanceMetrics,
initialize_logger,
load_model_from_state_dict,
)
from sklearn.metrics import average_precision_score, roc_auc_score
from tqdm import trange
logger = logging.getLogger("selene")
class EvaluateModel(object):
"""
Evaluate model on a test set of sequences with known targets.
Mostly copied from
https://github.com/FunctionLab/selene/blob/master/selene_sdk/evaluate_model.py
Parameters
----------
model : torch.nn.Module
The model architecture.
criterion : torch.nn._Loss
The loss function that was optimized during training.
data_sampler : selene_sdk.samplers.Sampler
Used to retrieve samples from the test set for evaluation.
features : list(str)
List of distinct features the model predicts.
trained_model_path : str
Path to the trained model file, saved using `torch.save`.
batch_size : int, optional
Default is 64. Specify the batch size to process examples.
n_test_samples : int or None, optional
Default is `None`. Use `n_test_samples` if you want to limit the
number of samples on which you evaluate your model. If you are
using a sampler of type `selene_sdk.samplers.OnlineSampler`,
by default it will draw 640000 samples if `n_test_samples` is `None`.
report_gt_feature_n_positives : int, optional
Default is 10. In the final test set, each class/feature must have
more than `report_gt_feature_n_positives` positive samples in order to
be considered in the test performance computation. The output file that
states each class' performance will report 'NA' for classes that do
not have enough positive samples.
use_cuda : bool, optional
Default is `False`. Specify whether a CUDA-enabled GPU is available
for torch to use during training.
data_parallel : bool, optional
Default is `False`. Specify whether multiple GPUs are available
for torch to use during training.
log_cell_type_embeddings_to_tensorboard : bool, optional
Default is `True`. Wether to publish cell type embeddings to tensorboard.
NOTE: If True, a model should have `log_cell_type_embeddings_to_tensorboard`
method.
Attributes
----------
model : torch.nn.Module
The trained model.
criterion : torch.nn._Loss
The model was trained using this loss function.
sampler : selene_sdk.samplers.Sampler
The example generator.
features : list(str)
List of distinct features the model predicts.
use_cuda : bool
If `True`, use a CUDA-enabled GPU. If `False`, use the CPU.
"""
def __init__(
self,
model,
criterion,
data_sampler,
features,
trained_model_path,
batch_size=64,
n_test_samples=None,
report_gt_feature_n_positives=10,
use_cuda=False,
data_parallel=False,
log_cell_type_embeddings_to_tensorboard=True,
metrics=dict(roc_auc=roc_auc_score, average_precision=average_precision_score),
):
self.n_test_samples = n_test_samples
self.batch_size = batch_size
self.sampler = data_sampler
self.features = features
self.use_cuda = use_cuda
self.output_dir = os.path.join(
os.path.dirname(trained_model_path), "evaluation/"
)
os.makedirs(self.output_dir, exist_ok=True)
initialize_logger(
os.path.join(self.output_dir, "{0}.log".format(__name__)), verbosity=2
)
logger.info("Evaluation results will be saved at\n{}".format(self.output_dir))
self.criterion = criterion
trained_model = torch.load(
trained_model_path, map_location=lambda storage, location: storage
)
if "state_dict" in trained_model:
self.model = load_model_from_state_dict(trained_model["state_dict"], model)
else:
self.model = load_model_from_state_dict(trained_model, model)
if log_cell_type_embeddings_to_tensorboard:
self.model.model.log_cell_type_embeddings_to_tensorboard(
self.features, self.output_dir
)
self.model.eval()
if data_parallel:
self.model = nn.DataParallel(self.model)
logger.debug("Wrapped model in DataParallel")
if self.use_cuda:
self.model.cuda()
self._metrics = PerformanceMetrics(
self._get_feature_from_index,
report_gt_feature_n_positives=report_gt_feature_n_positives,
metrics=metrics,
)
def evaluate(self):
"""
Passes all samples retrieved from the sampler to the model in batches and
returns the predictions. Also reports the model's performance on these examples.
Returns
-------
dict
A dictionary, where keys are the features and the values are each a dict of
the performance metrics (currently ROC AUC and AUPR) reported for each
feature the model predicts.
"""
assert self.n_test_samples % self.batch_size == 0
batch_losses = []
all_predictions = []
all_test_targets = []
for _ in trange(
self.n_test_samples // self.batch_size, desc="Evaluating batch..."
):
samples_batch = self.sampler.sample(self.batch_size)
all_test_targets.append(samples_batch.targets())
inputs, targets = samples_batch.torch_inputs_and_targets(self.use_cuda)
with torch.no_grad():
predictions = self.model.forward(inputs)
loss = self.criterion(predictions.reshape(targets.shape), targets)
all_predictions.append(
predictions.data.cpu().numpy().reshape(targets.shape)
)
batch_losses.append(loss.item())
all_predictions = np.vstack(all_predictions)
all_test_targets = np.vstack(all_test_targets)
average_scores = self._metrics.update(all_predictions, all_test_targets)
self._metrics.visualize(all_predictions, all_test_targets, self.output_dir)
loss = np.average(batch_losses)
logger.info("test loss: {0}".format(loss))
for name, score in average_scores.items():
logger.info("test {0}: {1}".format(name, score))
test_performance = os.path.join(self.output_dir, "test_performance.txt")
feature_scores_dict = self._metrics.write_feature_scores_to_file(
test_performance
)
return feature_scores_dict
def _get_feature_from_index(self, index):
"""
Gets the feature at an index in the features list.
Parameters
----------
index : int
Returns
-------
str
The name of the feature/target at the specified index.
"""
return self.features[index]
|
{"hexsha": "8a691c6aef7419221efe5ffbeaeab2da92ab23b6", "size": 7243, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluation/evaluate_model.py", "max_stars_repo_name": "AIRI-Institute/DeepCT", "max_stars_repo_head_hexsha": "8e23fda101bd4a2bce2c98c5a73d97072a3892de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/evaluation/evaluate_model.py", "max_issues_repo_name": "AIRI-Institute/DeepCT", "max_issues_repo_head_hexsha": "8e23fda101bd4a2bce2c98c5a73d97072a3892de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/evaluation/evaluate_model.py", "max_forks_repo_name": "AIRI-Institute/DeepCT", "max_forks_repo_head_hexsha": "8e23fda101bd4a2bce2c98c5a73d97072a3892de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4904761905, "max_line_length": 88, "alphanum_fraction": 0.6545630264, "include": true, "reason": "import numpy", "num_tokens": 1459}
|
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import ipdb
blabels = pd.read_csv('bad_label_losses.csv')
glabels = pd.read_csv('good_label_losses.csv')
blabels['label'] = 0
glabels['label'] = 1
glabels = glabels[glabels['confidence'] > 0.9]
blabels = blabels[blabels['confidence'] > 0.9]
train_file = pd.concat((glabels,blabels))
train_file['xy_loss'] = train_file['x_loss'] + train_file['y_loss']
train_file['wh_loss'] = train_file['w_loss'] + train_file['h_loss']
train_file['total_loss'] = train_file['xy_loss'] + train_file['wh_loss']
train_file.replace([np.inf], np.nan).dropna(subset=['total_loss'], how='all')
X = np.empty(shape=(train_file['total_loss'].shape[0],1))
# X[:,0] = train_file['xy_loss']
# X[:,1] = train_file['wh_loss']
y = train_file['label']
clf = LogisticRegression()
# clf = SVC(probability=True)
clf.fit(X,y)
probs = clf.predict_proba(X)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(y, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('roc.png')
|
{"hexsha": "46c2f9ebc86355e490e1e7f71a31dc200dc6c76b", "size": 1422, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainLR.py", "max_stars_repo_name": "bhaprayan/keras-yolo3", "max_stars_repo_head_hexsha": "9ad5ff761f3baf9042a17327c0149591ce1307c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trainLR.py", "max_issues_repo_name": "bhaprayan/keras-yolo3", "max_issues_repo_head_hexsha": "9ad5ff761f3baf9042a17327c0149591ce1307c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainLR.py", "max_forks_repo_name": "bhaprayan/keras-yolo3", "max_forks_repo_head_hexsha": "9ad5ff761f3baf9042a17327c0149591ce1307c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8301886792, "max_line_length": 77, "alphanum_fraction": 0.7109704641, "include": true, "reason": "import numpy", "num_tokens": 418}
|
c
c ----------------------------------------------------------------------
c
subroutine fsicxx(fic,sc,gc,fmass,fwidth , fsic)
c
c this subroutine computes an off-shell antifermion wavefunction from a
c flowing-in external antifermion and a vector boson.
c
c input:
c complex fic(6) : flow-in antifermion |fic>
c complex sc(3) : input scalar s
c complex gc(2) : coupling constants gchf
c real fmass : mass of output antifermion fc'
c real fwidth : width of output antifermion fc'
c
c output:
c complex fsic(6) : off-shell fermion |fc',s,fic>
c
implicit none
double complex fic(6),sc(3),fsic(6),gc(2),sl1,sl2,sr1,sr2,ds
double precision pf(0:3),fmass,fwidth,pf2,p0p3,p0m3
c
fsic(5) = fic(5)-sc(2)
fsic(6) = fic(6)-sc(3)
pf(0) = dble( fsic(5))
pf(1) = dble( fsic(6))
pf(2) = dimag(fsic(6))
pf(3) = dimag(fsic(5))
pf2 = pf(0)**2-(pf(1)**2+pf(2)**2+pf(3)**2)
ds = -sc(1)/dcmplx( pf2-fmass**2, fmass*fwidth )
p0p3 = pf(0)+pf(3)
p0m3 = pf(0)-pf(3)
sl1 = gc(1)*(p0p3*fic(1)+dconjg(fsic(6))*fic(2))
sl2 = gc(1)*(p0m3*fic(2) +fsic(6) *fic(1))
sr1 = gc(2)*(p0m3*fic(3)-dconjg(fsic(6))*fic(4))
sr2 = gc(2)*(p0p3*fic(4) -fsic(6) *fic(3))
fsic(1) = ( gc(1)*fmass*fic(1) + sr1 )*ds
fsic(2) = ( gc(1)*fmass*fic(2) + sr2 )*ds
fsic(3) = ( gc(2)*fmass*fic(3) + sl1 )*ds
fsic(4) = ( gc(2)*fmass*fic(4) + sl2 )*ds
c
return
end
|
{"hexsha": "549de7c5ba112c72c43162dfaa7ee4b7d6a45e13", "size": 1910, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "HELAS/fsicxx.f", "max_stars_repo_name": "valassi/mg5amc_test", "max_stars_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-10-23T14:37:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T20:59:02.000Z", "max_issues_repo_path": "HELAS/fsicxx.f", "max_issues_repo_name": "valassi/mg5amc_test", "max_issues_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_issues_repo_licenses": ["NCSA"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2018-10-08T15:49:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-15T13:33:36.000Z", "max_forks_repo_path": "HELAS/fsicxx.f", "max_forks_repo_name": "valassi/mg5amc_test", "max_forks_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869", "max_forks_repo_licenses": ["NCSA"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-02-18T11:42:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T20:46:08.000Z", "avg_line_length": 40.6382978723, "max_line_length": 72, "alphanum_fraction": 0.4178010471, "num_tokens": 639}
|
""" Class average finetuning functions. Before using any of these finetuning
functions, ensure that the model is set up with nb_classes=2.
"""
from __future__ import print_function
import sys
import uuid
import numpy as np
from os.path import dirname
from time import sleep
from keras.optimizers import Adam
from global_variables import (
FINETUNING_METHODS,
WEIGHTS_DIR)
from finetuning import (
freeze_layers,
sampling_generator,
finetuning_callbacks,
train_by_chain_thaw,
find_f1_threshold)
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def class_avg_finetune(model, texts, labels, nb_classes, batch_size,
method, epoch_size=5000,
nb_epochs=1000, error_checking=True,
verbose=True):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py. Note that the model
should be defined accordingly (see docstring for deepmoji_transfer())
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
error_checking: If set to True, warnings will be printed when the label
list has the wrong dimensions.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the class average F1 metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (class_avg_tune_trainable): '
'Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/deepmoji-checkpoint-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
f1_init_path = '{}/deepmoji-f1-init-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
# Check dimension of labels
if error_checking:
# Binary classification has two classes but one value
expected_shape = 1 if nb_classes == 2 else nb_classes
for ls in [y_train, y_val, y_test]:
if len(ls.shape) <= 1 or not ls.shape[1] == expected_shape:
print('WARNING (class_avg_tune_trainable): '
'The dimension of the provided '
'labels do not match the expected value. '
'Expected: {}, actual: {}'
.format(expected_shape, ls.shape[1]))
break
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss = 'binary_crossentropy'
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='softmax')
# Compile model, for chain-thaw we compile it later (after freezing)
if method != 'chain-thaw':
adam = Adam(clipnorm=1, lr=lr)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
# Training
if verbose:
print('Method: {}'.format(method))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = class_avg_chainthaw(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size, loss=loss,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
f1_init_weight_path=f1_init_path,
verbose=verbose)
else:
result = class_avg_tune_trainable(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
init_weight_path=f1_init_path,
checkpoint_weight_path=checkpoint_path,
verbose=verbose)
return model, result
def prepare_labels(y_train, y_val, y_test, iter_i, nb_classes):
# Relabel into binary classification
y_train_new = relabel(y_train, iter_i, nb_classes)
y_val_new = relabel(y_val, iter_i, nb_classes)
y_test_new = relabel(y_test, iter_i, nb_classes)
return y_train_new, y_val_new, y_test_new
def prepare_generators(X_train, y_train_new, X_val, y_val_new, batch_size, epoch_size):
# Create sample generators
# Make a fixed validation set to avoid fluctuations in validation
train_gen = sampling_generator(X_train, y_train_new, batch_size,
upsample=False)
val_gen = sampling_generator(X_val, y_val_new,
epoch_size, upsample=False)
X_val_resamp, y_val_resamp = next(val_gen)
return train_gen, X_val_resamp, y_val_resamp
def class_avg_tune_trainable(model, nb_classes, train, val, test, epoch_size,
nb_epochs, batch_size, init_weight_path,
checkpoint_weight_path, patience=5,
verbose=True):
""" Finetunes the given model using the F1 measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
init_weight_path: Filepath where weights will be initially saved before
training each class. This file will be rewritten by the function.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
verbose: Verbosity flag.
# Returns:
F1 score of the trained model
"""
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
# Save and reload initial weights after running for
# each class to avoid learning across classes
model.save_weights(init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i + 1, nb_iter))
model.load_weights(init_weight_path, by_name=False)
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose=2)
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
max_q_size=2, epochs=nb_epochs,
validation_data=(X_val_resamp, y_val_resamp),
callbacks=callbacks, verbose=0)
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
# Evaluate
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
def class_avg_chainthaw(model, nb_classes, train, val, test, batch_size,
loss, epoch_size, nb_epochs, checkpoint_weight_path,
f1_init_weight_path, patience=5,
initial_lr=0.001, next_lr=0.0001,
seed=None, verbose=True):
""" Finetunes given model using chain-thaw and evaluates using F1.
For a dataset with multiple classes, the model is trained once for
each class, relabeling those classes into a binary classification task.
The result is an average of all F1 scores for each class.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
f1_init_weight_path: Filepath where weights will be saved to and
reloaded from before training each class. This ensures that
each class is trained independently. This file will be rewritten.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
# Returns:
Averaged F1 score.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
model.save_weights(f1_init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i + 1, nb_iter))
model.load_weights(f1_init_weight_path, by_name=False)
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
callbacks = finetuning_callbacks(checkpoint_weight_path, patience=patience, verbose=2)
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_data=(X_val_resamp, y_val_resamp),
loss=loss, callbacks=callbacks,
epoch_size=epoch_size, nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_weight_path,
initial_lr=initial_lr, next_lr=next_lr,
batch_size=batch_size, verbose=verbose)
# Evaluate
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
|
{"hexsha": "13bc1b6b4db0459eef58b85f4a7175fca07ba0e4", "size": 13300, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepmoji/class_avg_finetuning.py", "max_stars_repo_name": "Bodzio-2/DeepMoji", "max_stars_repo_head_hexsha": "4081db22f9bcb0305ee930bf75c1e4cc76aa1c2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1401, "max_stars_repo_stars_event_min_datetime": "2017-08-03T07:03:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T15:26:16.000Z", "max_issues_repo_path": "deepmoji/class_avg_finetuning.py", "max_issues_repo_name": "Bodzio-2/DeepMoji", "max_issues_repo_head_hexsha": "4081db22f9bcb0305ee930bf75c1e4cc76aa1c2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 62, "max_issues_repo_issues_event_min_datetime": "2017-08-08T11:15:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T17:26:57.000Z", "max_forks_repo_path": "deepmoji/class_avg_finetuning.py", "max_forks_repo_name": "Bodzio-2/DeepMoji", "max_forks_repo_head_hexsha": "4081db22f9bcb0305ee930bf75c1e4cc76aa1c2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 343, "max_forks_repo_forks_event_min_datetime": "2017-08-05T17:39:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T17:38:01.000Z", "avg_line_length": 40.6727828746, "max_line_length": 94, "alphanum_fraction": 0.6007518797, "include": true, "reason": "import numpy", "num_tokens": 2877}
|
'''
xECG Project Repository (https://github.com/jtrpinto/xECG)
File: train_model_uoftdb.py
- Uses data from prepare_data.py and the Model class from models.py to train a model
for biometric identification on the UofTDB database. The training routine can be found
at trainers.py.
"Explaining ECG Biometrics: Is It All In The QRS?"
João Ribeiro Pinto and Jaime S. Cardoso
19th International Conference of the Biometrics Special Interest Group (BIOSIG 2020)
joao.t.pinto@inesctec.pt | https://jtrpinto.github.io
'''
import os
import torch
import numpy as np
import pickle as pk
from torch import nn
from torch import optim
from models import Model
from trainers import train_model
from torchvision import transforms
from datasets import Dataset
from utils import stratified_train_validation_split
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
DSET_FILE = '/ctm-hdd-pool01/jtrp/xECG/uoftdb_data.pk' # Pickle file obtained with prepare_data.py
FS = 200.0 # Data sampling frequency
N_IDS = 5 # Number of identities
SAVE_MODEL = "models/uoftdb_" + str(N_IDS) + "s" # Where to save the model
N_EPOCHS = 5000 # number of training epochs
BATCH_SIZE = N_IDS * 2 # number of samples to get from the dataset at each iteration
VALID_SPLIT = 0.1 # number of enrollment samples per subject to be used for validation
PATIENCE = 250 # for early stopping
DROPOUT = 0.5
LEARN_RATE = 1e-3
REG = 1e-3
# Building datasets
train_set = Dataset(DSET_FILE, FS, dataset='train', n_ids=N_IDS)
valid_set = Dataset(DSET_FILE, FS, dataset='validation', n_ids=N_IDS)
# creating data indices for training and validation splits
train_indices, valid_indices = stratified_train_validation_split(train_set.y, n_valid_per_class=VALID_SPLIT)
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indices)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4,
sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=BATCH_SIZE,
shuffle=False, num_workers=4,
sampler=valid_sampler)
# TRAINING THE MODEL ==============================================================================
print('\n ======= TRAINING MODEL ' + SAVE_MODEL + ' ======= \n')
model = Model(N=N_IDS, dropout=DROPOUT).to(DEVICE)
loss_fn = nn.CrossEntropyLoss()
optimiser = optim.Adam(model.parameters(), lr=LEARN_RATE, weight_decay=REG)
out = train_model(model, loss_fn, optimiser, train_loader, N_EPOCHS, DEVICE, patience=PATIENCE, valid_loader=valid_loader, filename=SAVE_MODEL)
# TESTING =========================================================================================
model.load_state_dict(torch.load(SAVE_MODEL + '.pth', map_location=DEVICE))
model = model.to(DEVICE)
test_set = Dataset(DSET_FILE, FS, dataset='test', n_ids=N_IDS)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=4)
print('\n ======= TEST MODEL ' + SAVE_MODEL + ' ======= \n')
model.eval()
with torch.no_grad():
test_loss = 0.
t_corrects = 0
t_total = 0
for i, (X, y) in enumerate(test_loader):
# copy the mini-batch to GPU
X = X.float().to(DEVICE)
y = y.to(DEVICE)
ypred = model(X) # forward pass
test_loss += loss_fn(ypred, y) # accumulate the loss of the mini-batch
t_corrects += (torch.argmax(ypred, 1) == y).float().sum()
t_total += y.shape[0]
test_loss /= i + 1
t_idr = t_corrects / t_total
print('....test loss: {:.4f} :: IDR {:.4f}'.format(test_loss.item(), t_idr))
|
{"hexsha": "8a159e5be644219e8fa919a7e35e40985893172c", "size": 4060, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_model_uoftdb.py", "max_stars_repo_name": "jtrpinto/xECG", "max_stars_repo_head_hexsha": "e062cd94defb49ae946c1874267a36713733baba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-09T17:22:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T17:22:27.000Z", "max_issues_repo_path": "train_model_uoftdb.py", "max_issues_repo_name": "jtrpinto/xECG", "max_issues_repo_head_hexsha": "e062cd94defb49ae946c1874267a36713733baba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_model_uoftdb.py", "max_forks_repo_name": "jtrpinto/xECG", "max_forks_repo_head_hexsha": "e062cd94defb49ae946c1874267a36713733baba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9439252336, "max_line_length": 144, "alphanum_fraction": 0.6305418719, "include": true, "reason": "import numpy", "num_tokens": 952}
|
import numpy as np
class MeshIOInterface:
""" Mesh reader/writer interface for meshio """
def read(self, mesh_file):
assert mesh_file[-4:] == '.ply', "Only PLY format for input mesh"
# Read mesh
import meshio
mesh = meshio.read(mesh_file)
# Check that it is triangulated
assert len(mesh.cells) == 1 and mesh.cells[0].type == 'triangle', "Mesh must be triangulated!"
# Extract normals
if all(k in mesh.point_data for k in ('nx', 'ny', 'nz')):
normals = np.hstack((mesh.point_data['nx'][:, None],
mesh.point_data['ny'][:, None],
mesh.point_data['nz'][:, None]))
else:
normals = None
# Extract texture coordinates
if all(k in mesh.point_data for k in ('s', 't')):
tcoords = np.hstack((mesh.point_data['s'][:, None],
mesh.point_data['t'][:, None]))
else:
tcoords = None
return mesh.points, mesh.cells[0].data, normals, tcoords
def write(self, mesh_file, vertices, faces):
import meshio
meshio.write_points_cells(mesh_file, vertices, [("triangle", faces)])
class PyMeshInterface:
""" Mesh reader/writer interface for pymesh """
def read(self, mesh_file):
assert mesh_file[-4:] == '.ply', "Only PLY format for input mesh"
# Read mesh
import pymesh
mesh = pymesh.load_mesh(mesh_file)
# Check that it is triangulated
assert mesh.faces.shape[1] == 3, "Mesh must be triangulated!"
# Extract normals
if all(k in mesh.get_attribute_names() for k in ('vertex_nx', 'vertex_ny', 'vertex_nz')):
normals = np.hstack((mesh.get_attribute('vertex_nx')[:, None],
mesh.get_attribute('vertex_ny')[:, None],
mesh.get_attribute('vertex_nz')[:, None]))
else:
normals = None
# Extract texture coordinates
if all(k in mesh.get_attribute_names() for k in ('vertex_s', 'vertex_t')):
tcoords = np.hstack((mesh.get_attribute('vertex_s')[:, None],
mesh.get_attribute('vertex_t')[:, None]))
else:
tcoords = None
return mesh.vertices, mesh.faces, normals, tcoords
def clean(self, vertices, faces, normals=None, tcoords=None, tol=1e-12):
import pymesh
# Removing duplicated vertices
vertices, faces, info = pymesh.remove_duplicated_vertices_raw(vertices, faces, tol)
if normals is not None:
cleaned_normals = np.empty((vertices.shape[0], normals.shape[1]), dtype=normals.dtype)
cleaned_normals[info['index_map'], :] = normals
else:
cleaned_normals = None
if tcoords is not None:
cleaned_tcoords = np.empty((vertices.shape[0], tcoords.shape[1]), dtype=tcoords.dtype)
cleaned_tcoords[info['index_map'], :] = tcoords
else:
cleaned_tcoords = None
# Removing degenerated triangles
# FIXME: Returned vertices are modified (order probably) without a way
# to get the map so that to update the attributes...
#vertices, faces, info = pymesh.remove_degenerated_triangles_raw(vertices, faces)
# Removing degenerated triangles
# Only the triangles with duplicated vertices
mask = np.logical_or(np.logical_or(faces[:, 0] == faces[:, 1],
faces[:, 0] == faces[:, 2]),
faces[:, 1] == faces[:, 2])
faces = faces[np.logical_not(mask), :]
return vertices, faces, cleaned_normals, cleaned_tcoords
def write(self, mesh_file, vertices, faces):
import pymesh
pymesh.save_mesh_raw(mesh_file, vertices, faces)
class TriMeshInterface:
""" Mesh reader/writer interface for trimesh """
def read(self, mesh_file):
assert mesh_file[-4:] == '.ply', "Only PLY format for input mesh"
# Read mesh (no cleaning before extracting vertex attributes)
import trimesh
mesh = trimesh.load(mesh_file, process=False)
# Check that it is triangulated
assert mesh.faces.shape[1] == 3, "Mesh must be triangulated!"
# Additional fields are not extracted as vertex attributes
# but still accessible in the metadata
extra_fields = mesh.metadata['ply_raw']['vertex']['data']
# Extract normals
if 'vertex_normals' in mesh._cache:
normals = mesh.vertex_normals
elif all(k in extra_fields.dtype.names for k in ('nx', 'ny', 'nz')):
normals = np.hstack((extra_fields['nx'][:, None],
extra_fields['ny'][:, None],
extra_fields['nz'][:, None]))
else:
normals = None
# Extract texture coordinates
if all(k in extra_fields.dtype.names for k in ('s', 't')):
tcoords = np.hstack((extra_fields['s'][:, None],
extra_fields['t'][:, None]))
else:
tcoords = None
return mesh.vertices, mesh.faces, normals, tcoords
def clean(self, vertices, faces, normals=None, tcoords=None, tol=1e-12):
""" Remove duplicated vertices and degenerated triangles """
import trimesh
# Creating mesh
# Not adding vertex normals since they disappear during cleaning pass
mesh = trimesh.Trimesh(vertices=vertices,
faces=faces,
process=False)
# Populating vertex attributes
if tcoords is not None:
mesh.vertex_attributes['tcoords'] = tcoords
if normals is not None:
mesh.vertex_attributes['normals'] = normals
# Cleaning
trimesh.constants.tol.merge = tol
mesh.merge_vertices()
mesh.remove_degenerate_faces()
return (mesh.vertices,
mesh.faces,
mesh.vertex_attributes.get('normals', None),
mesh.vertex_attributes.get('tcoords', None))
def write(self, mesh_file, vertices, faces):
import trimesh
# Creating mesh
mesh = trimesh.Trimesh(vertices=vertices,
faces=faces,
process=False)
trimesh.exchange.export.export_mesh(mesh, mesh_file)
|
{"hexsha": "50f2bc0eea6962f97ad19d6da4aa1eaba88d0ba3", "size": 6533, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/mesh_interfaces.py", "max_stars_repo_name": "rolanddenis/MeshDD", "max_stars_repo_head_hexsha": "83e2f0faae3f4e90075a7f3135c8c66fd8721b7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tools/mesh_interfaces.py", "max_issues_repo_name": "rolanddenis/MeshDD", "max_issues_repo_head_hexsha": "83e2f0faae3f4e90075a7f3135c8c66fd8721b7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tools/mesh_interfaces.py", "max_forks_repo_name": "rolanddenis/MeshDD", "max_forks_repo_head_hexsha": "83e2f0faae3f4e90075a7f3135c8c66fd8721b7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2944444444, "max_line_length": 102, "alphanum_fraction": 0.569569876, "include": true, "reason": "import numpy", "num_tokens": 1400}
|
# # -*- coding: utf-8 -*-
# """
# Created on Wed Feb 3 12:49:07 2021
# @author: user
# """
import mph
from polygen import random_poly
from polygen import poly_add
from polygen import poly_draw
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import jpype
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
client = mph.Client(cores=1)
N_Vert=5
N_poly=3
box1 = [ -150, -165, -25, 150];
poly_box=[]
for _ in range(N_poly):
poly_box.append(random_poly(N_Vert,box1))
model = client.load('Comsol2pics_add_curl_curv.mph')
model=poly_add(model,poly_box)
model.build()
model.mesh()
model.solve()
poly_draw(model)
|
{"hexsha": "49be98cc777aa17fad93c30f5590b5c7df3b86f0", "size": 673, "ext": "py", "lang": "Python", "max_stars_repo_path": "comsol/comsol_connect.py", "max_stars_repo_name": "ITMO-NSS-team/cell-trap-design", "max_stars_repo_head_hexsha": "161c4c98c0c068516277ab096d407013427cf340", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "comsol/comsol_connect.py", "max_issues_repo_name": "ITMO-NSS-team/cell-trap-design", "max_issues_repo_head_hexsha": "161c4c98c0c068516277ab096d407013427cf340", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "comsol/comsol_connect.py", "max_forks_repo_name": "ITMO-NSS-team/cell-trap-design", "max_forks_repo_head_hexsha": "161c4c98c0c068516277ab096d407013427cf340", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.8548387097, "max_line_length": 52, "alphanum_fraction": 0.6924219911, "include": true, "reason": "import numpy", "num_tokens": 198}
|
Subroutine ptoh
Parameter (maxstr=150001)
Double Precision gxp, gyp, gzp, ftp, pxp, pyp, pzp, pep, pmp
Double Precision gxp0, gyp0, gzp0, ft0fom, drlocl
Double Precision enenew, pxnew, pynew, pznew, beta2, gam
Double Precision ftavg0, gxavg0, gyavg0, gzavg0, bex, bey, bez
Double Precision pxsgs, pysgs, pzsgs, pesgs, pmsgs, gxsgs, gysgs, gzsgs, ftsgs
Double Precision xmdiag, px1, py1, pz1, e1, px2, py2, pz2, e2, px3, py3, pz3, e3, xmpair, etot
Double Precision p1, p2, p3
Common /loclco/gxp(3), gyp(3), gzp(3), ftp(3), pxp(3), pyp(3), pzp(3), pep(3), pmp(3)
Common /hmain1/eatt, jatt, natt, nt, np, n0, n01, n10, n11
Common /hmain2/katt(maxstr, 4), patt(maxstr, 4)
Common /hjjet2/nsg, njsg(maxstr), iasg(maxstr, 3), k1sg(maxstr, 100), k2sg(maxstr, 100), pxsg(maxstr, 100), pysg(maxstr, 100), pzsg(maxstr, 100), pesg(maxstr, 100), pmsg(maxstr, 100)
Common /arprnt/arpar1(100), iapar2(50), arint1(100), iaint2(50)
Common /arprc/itypar(maxstr), gxar(maxstr), gyar(maxstr), gzar(maxstr), ftar(maxstr), pxar(maxstr), pyar(maxstr), pzar(maxstr), pear(maxstr), xmar(maxstr)
Common /soft/pxsgs(maxstr, 3), pysgs(maxstr, 3), pzsgs(maxstr, 3), pesgs(maxstr, 3), pmsgs(maxstr, 3), gxsgs(maxstr, 3), gysgs(maxstr, 3), gzsgs(maxstr, 3), ftsgs(maxstr, 3), k1sgs(maxstr, 3), k2sgs(maxstr, 3), njsgs(maxstr)
Common /rndf77/nseed
Common /anim/nevent, isoft, isflag, izpc
Common /prtn23/gxp0(3), gyp0(3), gzp0(3), ft0fom
Common /nzpc/nattzp
Common /lor/enenew, pxnew, pynew, pznew
Common /ludat1/mstu(200), paru(200), mstj(200), parj(200)
Common /lastt/itimeh, bimp
Common /hjglbr/nelt, ninthj, nelp, ninp
Common /arevt/iaevt, iarun, miss
Common /para7/ioscar, nsmbbbar, nsmmeson
Common /input1/masspr, massta, iseed, iavoid, dt
Dimension xmdiag(maxstr), indx(maxstr), ndiag(maxstr)
Save
Call coales
mstj24 = mstj(24)
mstj(24) = 0
nuudd = 0
npich = 0
nrhoch = 0
ppi0 = 1.
prho0 = 0.
Do isg = 1, nsg
If (njsgs(isg)/=0) Then
natt = natt + 1
k1 = k2sgs(isg, 1)
k1abs = iabs(k1)
px1 = pxsgs(isg, 1)
py1 = pysgs(isg, 1)
pz1 = pzsgs(isg, 1)
k2 = k2sgs(isg, 2)
k2abs = iabs(k2)
px2 = pxsgs(isg, 2)
py2 = pysgs(isg, 2)
pz2 = pzsgs(isg, 2)
e1 = pesgs(isg, 1)
e2 = pesgs(isg, 2)
xmpair = dsqrt((e1+e2)**2-(px1+px2)**2-(py1+py2)**2-(pz1+pz2)**2)
ibs = 2
imspin = 0
If (k1==-k2 .And. iabs(k1)<=2 .And. njsgs(isg)==2) Then
nuudd = nuudd + 1
xmdiag(nuudd) = xmpair
ndiag(nuudd) = natt
End If
k3 = 0
If ((isoft==4 .Or. isoft==5) .And. njsgs(isg)==3) Then
k3 = k2sgs(isg, 3)
k3abs = iabs(k3)
px3 = pxsgs(isg, 3)
py3 = pysgs(isg, 3)
pz3 = pzsgs(isg, 3)
e3 = pesgs(isg, 3)
xmpair = dsqrt((e1+e2+e3)**2-(px1+px2+px3)**2-(py1+py2+py3)**2-(pz1+pz2+pz3)**2)
End If
If (isoft==3 .And. (k1abs>1000 .Or. k2abs>1000)) Then
If (k1abs>1000) Then
kdq = k1abs
kk = k2abs
Else
kdq = k2abs
kk = k1abs
End If
ki = mod(kdq/1000, 10)
kj = mod(kdq/100, 10)
If (mod(kdq,10)==1) Then
idqspn = 0
Else
idqspn = 1
End If
If (kk>ki) Then
ktemp = kk
kk = kj
kj = ki
ki = ktemp
Else If (kk>kj) Then
ktemp = kk
kk = kj
kj = ktemp
End If
If (ki/=kj .And. ki/=kk .And. kj/=kk) Then
If (idqspn==0) Then
kf = 1000*ki + 100*kk + 10*kj + ibs
Else
kf = 1000*ki + 100*kj + 10*kk + ibs
End If
Else If (ki==kj .And. ki==kk) Then
kf = 1000*ki + 100*kj + 10*kk + 4
Else
kf = 1000*ki + 100*kj + 10*kk + ibs
End If
If (kf==2112 .Or. kf==2212) Then
If (abs(sngl(xmpair)-ulmass(kf))>abs(sngl(xmpair)-ulmass(kf+2))) kf = kf + 2
End If
If (k1<0) kf = -kf
Else If ((isoft==4 .Or. isoft==5) .And. njsgs(isg)==3) Then
If (k1abs>k2abs) Then
ki = k1abs
kk = k2abs
Else
ki = k2abs
kk = k1abs
End If
If (k3abs>ki) Then
kj = ki
ki = k3abs
Else If (k3abs<kk) Then
kj = kk
kk = k3abs
Else
kj = k3abs
End If
If (ki==kj .And. ki==kk) Then
ibs = 4
kf = 1000*ki + 100*kj + 10*kk + ibs
Else If (ki/=kj .And. ki/=kk .And. kj/=kk) Then
ibs = 2
kf1 = 1000*ki + 100*kj + 10*kk + ibs
kf2 = 1000*ki + 100*kk + 10*kj + ibs
kf = kf1
If (abs(sngl(xmpair)-ulmass(kf1))>abs(sngl(xmpair)-ulmass(kf2))) kf = kf2
Else
ibs = 2
kf = 1000*ki + 100*kj + 10*kk + ibs
If (kf==2112 .Or. kf==2212) Then
If (abs(sngl(xmpair)-ulmass(kf))>abs(sngl(xmpair)-ulmass(kf+2))) kf = kf + 2
End If
End If
If (k1<0) kf = -kf
Else
If (k1abs==k2abs) Then
If (k1abs<=2) Then
kf = 0
Else If (k1abs<=3) Then
kf = 333
Else
kf = 100*k1abs + 10*k1abs + 2*imspin + 1
End If
Else
If (k1abs>k2abs) Then
kmax = k1abs
kmin = k2abs
Else If (k1abs<k2abs) Then
kmax = k2abs
kmin = k1abs
End If
kf = (100*kmax+10*kmin+2*imspin+1)*isign(1, k1+k2)*(-1)**kmax
If (mod(iabs(kf),10)==1) Then
If (abs(sngl(xmpair)-ulmass(iabs(kf)))>abs(sngl(xmpair)-ulmass(iabs(kf)+2))) kf = (iabs(kf)+2)*isign(1, kf)
End If
End If
End If
itypar(natt) = kf
katt(natt, 1) = kf
If (iabs(kf)==211) Then
npich = npich + 1
Else If (iabs(kf)==213) Then
nrhoch = nrhoch + 1
End If
End If
End Do
If (nuudd/=0) Then
ppi0 = float(npich/2)/float(nuudd)
prho0 = float(nrhoch/2)/float(nuudd)
End If
npi0 = 0
Do isg = 1, nsg
If (k2sgs(isg,1)==-k2sgs(isg,2) .And. iabs(k2sgs(isg,1))<=2 .And. njsgs(isg)==2) Then
If (ranart(nseed)<=ppi0) npi0 = npi0 + 1
End If
End Do
If (nuudd>1) Then
Call index1(maxstr, nuudd, xmdiag, indx)
Else
indx(1) = 1
End If
Do ix = 1, nuudd
iuudd = indx(ix)
inatt = ndiag(iuudd)
If (ix<=npi0) Then
kf = 111
Else If (ranart(nseed)<=(prho0/(1-ppi0+0.00001))) Then
kf = 113
Else
If (ranart(nseed)<=0.5) Then
kf = 221
Else
kf = 223
End If
End If
itypar(inatt) = kf
katt(inatt, 1) = kf
End Do
inatt = 0
If (ioscar==3) Then
Write (85, 395) iaevt, 3*nsmbbbar + 2*nsmmeson, nsmbbbar, nsmmeson, bimp, nelp, ninp, nelt, ninthj, miss
End If
Do isg = 1, nsg
If (njsgs(isg)/=0) Then
inatt = inatt + 1
k1 = k2sgs(isg, 1)
k1abs = iabs(k1)
px1 = pxsgs(isg, 1)
py1 = pysgs(isg, 1)
pz1 = pzsgs(isg, 1)
k2 = k2sgs(isg, 2)
k2abs = iabs(k2)
px2 = pxsgs(isg, 2)
py2 = pysgs(isg, 2)
pz2 = pzsgs(isg, 2)
e1 = pesgs(isg, 1)
e2 = pesgs(isg, 2)
If (njsgs(isg)==2) Then
pxar(inatt) = sngl(px1+px2)
pyar(inatt) = sngl(py1+py2)
pzar(inatt) = sngl(pz1+pz2)
patt(inatt, 1) = pxar(inatt)
patt(inatt, 2) = pyar(inatt)
patt(inatt, 3) = pzar(inatt)
etot = e1 + e2
p1 = px1 + px2
p2 = py1 + py2
p3 = pz1 + pz2
Else If ((isoft==4 .Or. isoft==5) .And. njsgs(isg)==3) Then
px3 = pxsgs(isg, 3)
py3 = pysgs(isg, 3)
pz3 = pzsgs(isg, 3)
e3 = pesgs(isg, 3)
pxar(inatt) = sngl(px1+px2+px3)
pyar(inatt) = sngl(py1+py2+py3)
pzar(inatt) = sngl(pz1+pz2+pz3)
patt(inatt, 1) = pxar(inatt)
patt(inatt, 2) = pyar(inatt)
patt(inatt, 3) = pzar(inatt)
etot = e1 + e2 + e3
p1 = px1 + px2 + px3
p2 = py1 + py2 + py3
p3 = pz1 + pz2 + pz3
End If
xmar(inatt) = ulmass(itypar(inatt))
kf = katt(inatt, 1)
If (kf==113 .Or. abs(kf)==213 .Or. kf==221 .Or. kf==223 .Or. abs(kf)==313 .Or. abs(kf)==323 .Or. kf==333 .Or. abs(kf)==1114 .Or. abs(kf)==2114 .Or. abs(kf)==2214 .Or. abs(kf)==2224) Then
xmar(inatt) = resmass(kf)
End If
pear(inatt) = sqrt(pxar(inatt)**2+pyar(inatt)**2+pzar(inatt)**2+xmar(inatt)**2)
patt(inatt, 4) = pear(inatt)
eatt = eatt + pear(inatt)
ipartn = njsgs(isg)
Do i = 1, ipartn
ftp(i) = ftsgs(isg, i)
gxp(i) = gxsgs(isg, i)
gyp(i) = gysgs(isg, i)
gzp(i) = gzsgs(isg, i)
pxp(i) = pxsgs(isg, i)
pyp(i) = pysgs(isg, i)
pzp(i) = pzsgs(isg, i)
pmp(i) = pmsgs(isg, i)
pep(i) = pesgs(isg, i)
End Do
Call locldr(ipartn, drlocl)
tau0 = arpar1(1)
ftavg0 = ft0fom + dble(tau0)
gxavg0 = 0D0
gyavg0 = 0D0
gzavg0 = 0D0
Do i = 1, ipartn
gxavg0 = gxavg0 + gxp0(i)/ipartn
gyavg0 = gyavg0 + gyp0(i)/ipartn
gzavg0 = gzavg0 + gzp0(i)/ipartn
End Do
bex = p1/etot
bey = p2/etot
bez = p3/etot
beta2 = bex**2 + bey**2 + bez**2
gam = 1.D0/dsqrt(1.D0-beta2)
If (beta2>=0.9999999999999D0) Then
Write (6, *) '2', bex, bey, bez, beta2, gam
End If
Call lorenz(ftavg0, gxavg0, gyavg0, gzavg0, -bex, -bey, -bez)
gxar(inatt) = sngl(pxnew)
gyar(inatt) = sngl(pynew)
gzar(inatt) = sngl(pznew)
ftar(inatt) = sngl(enenew)
If (ioscar==3) Then
Write (85, 313) k2sgs(isg, 1), px1, py1, pz1, pmsgs(isg, 1), inatt, katt(inatt, 1), xmar(inatt)
Write (85, 312) k2sgs(isg, 2), px2, py2, pz2, pmsgs(isg, 2), inatt, katt(inatt, 1)
If (njsgs(isg)==3) Write (85, 312) k2sgs(isg, 3), px3, py3, pz3, pmsgs(isg, 3), inatt, katt(inatt, 1)
End If
End If
End Do
nattzp = natt
mstj(24) = mstj24
Return
395 Format (4I8, F10.4, 5I5)
312 Format (I6, 4(1X,F10.3), 1X, I6, 1X, I6)
313 Format (I6, 4(1X,F10.3), 1X, I6, 1X, I6, 1X, F10.3)
End Subroutine ptoh
|
{"hexsha": "ec13827f7ef6733cccea14bfaa89fd8c68732818", "size": 10915, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/ptoh.f90", "max_stars_repo_name": "xiaohaijin/AMPT", "max_stars_repo_head_hexsha": "90c7a1ab4dc04a092e64af759d53e22f6fea5b02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-24T19:37:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-14T12:58:59.000Z", "max_issues_repo_path": "src/ptoh.f90", "max_issues_repo_name": "xiaohaijin/AMPT", "max_issues_repo_head_hexsha": "90c7a1ab4dc04a092e64af759d53e22f6fea5b02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ptoh.f90", "max_forks_repo_name": "xiaohaijin/AMPT", "max_forks_repo_head_hexsha": "90c7a1ab4dc04a092e64af759d53e22f6fea5b02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8722044728, "max_line_length": 226, "alphanum_fraction": 0.4870361887, "num_tokens": 4438}
|
[STATEMENT]
lemma typing_swp:
assumes "\<Gamma>(a \<mapsto> \<sigma>) \<turnstile> M : \<tau>" "b \<notin> fvs M"
shows "\<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
[PROOF STEP]
have "y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)" for y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
assume "y \<in> fvs M"
[PROOF STATE]
proof (state)
this:
y \<in> fvs M
goal (1 subgoal):
1. y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
hence "y \<noteq> b"
[PROOF STATE]
proof (prove)
using this:
y \<in> fvs M
goal (1 subgoal):
1. y \<noteq> b
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
y \<in> fvs M
b \<notin> fvs M
goal (1 subgoal):
1. y \<noteq> b
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<noteq> b
goal (1 subgoal):
1. y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
consider "y = a" | "y \<noteq> a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>y = a \<Longrightarrow> thesis; y \<noteq> a \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>y = a \<Longrightarrow> ?thesis; y \<noteq> a \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
thus "(\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>y = a \<Longrightarrow> ?thesis; y \<noteq> a \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
[PROOF STEP]
by(cases, simp add: prm_unit_action, simp add: prm_unit_inaction \<open>y \<noteq> b\<close>)
[PROOF STATE]
proof (state)
this:
(\<Gamma>(a \<mapsto> \<sigma>)) y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ y)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) ?y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ ?y)
goal (1 subgoal):
1. \<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
?y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) ?y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ ?y)
goal (1 subgoal):
1. \<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
[PROOF STEP]
using typing_prm assms(1)
[PROOF STATE]
proof (prove)
using this:
?y \<in> fvs M \<Longrightarrow> (\<Gamma>(a \<mapsto> \<sigma>)) ?y = (\<Gamma>(b \<mapsto> \<sigma>)) ([a \<leftrightarrow> b] $ ?y)
\<lbrakk>?\<Gamma> \<turnstile> ?M : ?\<tau>; \<And>y. y \<in> fvs ?M \<Longrightarrow> ?\<Gamma> y = ?\<Delta> (?\<pi> $ y)\<rbrakk> \<Longrightarrow> ?\<Delta> \<turnstile> ?\<pi> \<cdot> ?M : ?\<tau>
\<Gamma>(a \<mapsto> \<sigma>) \<turnstile> M : \<tau>
goal (1 subgoal):
1. \<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<Gamma>(b \<mapsto> \<sigma>) \<turnstile> [a \<leftrightarrow> b] \<cdot> M : \<tau>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1736, "file": "Name_Carrying_Type_Inference_SimplyTyped", "length": 16}
|
import numpy as np
import aoc_helper
FLOOR = np.array(list(map(list, aoc_helper.day(25).splitlines())))
EMPTY, EAST, SOUTH = ".>v"
def step():
moving_east = (FLOOR == EAST) & np.roll(FLOOR == EMPTY, -1, 1)
FLOOR[moving_east] = EMPTY
FLOOR[np.roll(moving_east, 1, 1)] = EAST
moving_south = (FLOOR == SOUTH) & np.roll(FLOOR == EMPTY, -1, 0)
FLOOR[moving_south] = EMPTY
FLOOR[np.roll(moving_south, 1, 0)] = SOUTH
return (moving_east | moving_south).any()
def part_one():
return sum(iter(step, False)) + 1
aoc_helper.submit(25, part_one)
|
{"hexsha": "a9409507828f50d5dd86e749c4b051fc5db56d7c", "size": 575, "ext": "py", "lang": "Python", "max_stars_repo_path": "2021/day_25.py", "max_stars_repo_name": "salt-die/Advent-of-Code", "max_stars_repo_head_hexsha": "b0344dafda4576b41023134b8289c08af877a77a", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 105, "max_stars_repo_stars_event_min_datetime": "2019-12-09T07:27:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T16:34:37.000Z", "max_issues_repo_path": "2021/day_25.py", "max_issues_repo_name": "salt-die/Advent-of-Code", "max_issues_repo_head_hexsha": "b0344dafda4576b41023134b8289c08af877a77a", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-11T21:25:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-12T21:21:35.000Z", "max_forks_repo_path": "2021/day_25.py", "max_forks_repo_name": "salt-die/Advent-of-Code", "max_forks_repo_head_hexsha": "b0344dafda4576b41023134b8289c08af877a77a", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-12-06T01:00:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T00:48:43.000Z", "avg_line_length": 23.0, "max_line_length": 68, "alphanum_fraction": 0.6469565217, "include": true, "reason": "import numpy", "num_tokens": 191}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.