content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# coding: utf-8
"""Unit tests for operator validator."""
import unittest
import pynairus.validators.operator_validator as ov
from pynairus.errors.app_error import ValidateError
from pynairus.errors.app_error import BadArgumentError
class ValidatorsTest(unittest.TestCase):
"""Unit tests for all Validator classes."""
def test_base_validator(self):
"""Test the base validator."""
with self.assertRaisesRegex(ValidateError, "ValidatorTest"):
class ValidatorTest(ov.BaseValidator):
"""Testing class for BaseValidator."""
pass
validator_test = ValidatorTest()
validator_test.validate(1, 2, 2)
def test_addition_validate(self):
"""Test the AdditionValidator.validate method."""
validator = ov.AdditionValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate(2, 1, 1))
self.assertFalse(validator.validate(2, 2, 1))
with self.assertRaises(ValueError):
validator.validate("a", 1, 1)
def test_substraction_validate(self):
"""Test the SubstractionValidator.validate method."""
validator = ov.SubstractionValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate(2, 3, 1))
self.assertFalse(validator.validate(2, 2, 1))
with self.assertRaises(ValueError):
validator.validate("a", 1, 1)
def test_multiplication_validate(self):
"""Test the MultiplicationValidator.validate method."""
validator = ov.MultiplicationValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate(9, 3, 3))
self.assertFalse(validator.validate(8, 3, 3))
with self.assertRaises(ValueError):
validator.validate("a", 2, 2)
def test_division_validate(self):
"""Test the DivisionValidator.validate method."""
validator = ov.DivisionValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate("8r1", 65, 8))
self.assertTrue(validator.validate("8", 64, 8))
with self.assertRaisesRegex(
ValidateError,
r"The first number \(8\) isn't greater than 9"
):
validator.validate(None, 8, 9)
def test_time_addition_validate(self):
"""Test the TimeAdditionValidator.validate method."""
validator = ov.TimeAdditionValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate("1h31m10s", "50m56s", "40m14s"))
self.assertTrue(validator.validate("51m10s", "30m56s", "20m14s"))
self.assertFalse(validator.validate("50m10s", "30m56s", "20m14s"))
with self.assertRaisesRegex(
ValidateError,
r"An error occured while validating: 50m \+ 40m14s"
) as ctx:
validator.validate(None, "50m", "40m14s")
self.assertEqual(2, len(ctx.exception.args))
self.assertIsInstance(ctx.exception.args[1], BadArgumentError)
def test_time_substraction_validate(self):
"""Test the TimeSubstractionValidator.validate method."""
validator = ov.TimeSubstractionValidator()
self.assertIsInstance(validator, ov.BaseValidator)
self.assertTrue(validator.validate("1h30m10s", "2h10m20s", "40m10s"))
self.assertTrue(validator.validate("30m10s", "1h10m20s", "40m10s"))
# test parsing time try/except parsing exception
with self.assertRaisesRegex(
ValidateError,
r"An error occured while validating: 50m \- 40m14s"
) as ctx:
validator.validate(None, "50m", "40m14s")
self.assertEqual(2, len(ctx.exception.args))
self.assertIsInstance(ctx.exception.args[1], BadArgumentError)
# test raise exception in case of bad numbers
with self.assertRaisesRegex(
ValidateError,
r"The first time \(50m10s\) isn't greater than 1h10m14s"
) as ctx2:
validator.validate(None, "50m10s", "1h10m14s")
self.assertEqual(1, len(ctx2.exception.args))
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
26453,
5254,
329,
10088,
4938,
1352,
526,
15931,
198,
198,
11748,
555,
715,
395,
198,
11748,
279,
2047,
958,
385,
13,
12102,
2024,
13,
46616,
62,
12102,
1352,
355,
19643,
198,
6738,
2... | 2.288264 | 1,849 |
#!/usr/bin/env python
#Thanks to https://github.com/kyle-elsalhi/opencv-examples
import numpy as np
#Computer Vision Imports
import cv2
import cv2.aruco as aruco
import os
import pickle
import time
import sys
#ROS imports
import rospy
from std_msgs.msg import String
from rospy_tutorials.msg import Floats
from rospy.numpy_msg import numpy_msg
import roslib; roslib.load_manifest('cubestack')
import tf2_ros
import tf2_geometry_msgs
import geometry_msgs.msg
from geometry_msgs.msg import Pose, PoseStamped, Quaternion
import tf
from tf.transformations import quaternion_from_euler
pub = rospy.Publisher('floats', numpy_msg(Floats), queue_size=10)
#initialise the node
rospy.init_node('talker', anonymous=True)
#Set loop rate
rate = rospy.Rate(1)
calibrationPath = '/home/dym30/catkin_ws/src/cubestack/src/visionScripts/calibration.pckl'
# Check for camera calibration data
if not os.path.exists(calibrationPath):
print("No calibration found!")
exit()
else:
f = open(calibrationPath, 'rb')
(cameraMatrix, distCoeffs, _, _) = pickle.load(f)
f.close()
if cameraMatrix is None or distCoeffs is None:
print("Calibration issue. Remove ./calibration.pckl and recalibrate your camera with CalibrateCamera.py.")
exit()
# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
#For a 5x5 aruco marker
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_1000)
# Create grid board object we're using in our stream
board = aruco.GridBoard_create(
markersX=2,
markersY=2,
markerLength=0.09,
markerSeparation=0.01,
dictionary=ARUCO_DICT)
# Create vectors
rvecs, tvecs = None, None
calibratedDistance = False
cam = cv2.VideoCapture('/home/dym30/catkin_ws/src/cubestack/src/visionScripts/stillVideo.mp4')
while(cam.isOpened()) and not rospy.is_shutdown():
# Capturing each frame of our video stream
ret, QueryImg = cam.read()
if ret == True:
# grayscale image
gray = cv2.cvtColor(QueryImg, cv2.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
# Refine detected markers
# Eliminates markers not part of our board, adds missing markers to the board
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image = gray,
board = board,
detectedCorners = corners,
detectedIds = ids,
rejectedCorners = rejectedImgPoints,
cameraMatrix = cameraMatrix,
distCoeffs = distCoeffs)
QueryImg = aruco.drawDetectedMarkers(QueryImg, corners, borderColor=(0, 0, 255))
if ids is not None and len(ids) >= 0:
rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.067, cameraMatrix, distCoeffs)
#For visualisations
for rvec, tvec in zip(rvecs, tvecs):
QueryImg = aruco.drawAxis(QueryImg, cameraMatrix, distCoeffs, rvec, tvec, 0.067)
arrID = []
arrTvec = []
p = geometry_msgs.msg.Pose() #Create pose object
#Start loop
for id, tvec in zip(ids,tvecs):
p.position.x = tvec[0][0]
p.position.y = tvec[0][1]
p.position.z = tvec[0][2]
p.orientation.x = rvec[0][0]
p.orientation.y = rvec[0][1]
p.orientation.z = rvec[0][2]
p.orientation.w = 0
tf_buffer = tf2_ros.Buffer(rospy.Duration(12))
tf_listener = tf2_ros.TransformListener(tf_buffer)
#transform from camera frame to world frame
transform = tf_buffer.lookup_transform("world", "2d_marker_cam", rospy.Time(0), rospy.Duration(0.1))
ps = geometry_msgs.msg.PoseStamped(pose=p)
transformedPose = tf2_geometry_msgs.do_transform_pose(ps, transform)
#take transformed values and add them to arrID array
arrID.append(id)
arrID.append(transformedPose.pose.position.x)
arrID.append(transformedPose.pose.position.y)
arrID.append(transformedPose.pose.position.z)
#prepare array for publishing
a = np.array(arrID, dtype=np.float32)
print(a)
pub.publish(a) #publish!
rate.sleep()
#Repeat
# Display image
cv2.imshow('QueryImage', QueryImg)
#print("n")
# Press q to exit if the video preview pane is selected
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
9690,
284,
3740,
1378,
12567,
13,
785,
14,
74,
2349,
12,
1424,
282,
5303,
14,
9654,
33967,
12,
1069,
12629,
198,
198,
11748,
299,
32152,
355,
45941,
198,
2,
34556,
19009,
1846,
37... | 2.159672 | 2,192 |
"""
Functions for coordinate transformations.
Contains trasformations from/to the following coordinate systems:
GSE, GSM, SM, GEI, GEO, MAG, J2000
Times are in Unix seconds for consistency.
Notes
-----
These functions are in cotrans_lib.pro of IDL SPEDAS.
For a comparison to IDL, see: http://spedas.org/wiki/index.php?title=Cotrans
"""
import numpy as np
from datetime import datetime
from pyspedas.utilities.igrf import set_igrf_params
from pyspedas.utilities.j2000 import set_j2000_params
def get_time_parts(time_in):
"""
Split time into year, doy, hours, minutes, seconds.fsec.
Parameters
----------
time_in: list of float
Time array.
Returns
-------
iyear: array of int
Year.
idoy: array of int
Day of year.
ih: array of int
Hours.
im: array of int
Minutes.
isec: array of float
Seconds and milliseconds.
"""
tnp = np.vectorize(datetime.utcfromtimestamp)(time_in[:])
iyear = np.array([tt.year for tt in tnp])
idoy = np.array([tt.timetuple().tm_yday for tt in tnp])
ih = np.array([tt.hour for tt in tnp])
im = np.array([tt.minute for tt in tnp])
isec = np.array([tt.second + tt.microsecond/1000000.0 for tt in tnp])
return iyear, idoy, ih, im, isec
def csundir_vect(time_in):
"""
Calculate the direction of the sun.
Parameters
----------
time_in: list of float
Time array.
Returns
-------
gst: list of float
Greenwich mean sideral time (radians).
slong: list of float
Longitude along ecliptic (radians).
sra: list of float
Right ascension (radians).
sdec: list of float
Declination of the sun (radians).
obliq: list of float
Inclination of Earth's axis (radians).
"""
iyear, idoy, ih, im, isec = get_time_parts(time_in)
# Julian day and greenwich mean sideral time
pisd = np.pi / 180.0
fday = (ih * 3600.0 + im * 60.0 + isec)/86400.0
jj = 365 * (iyear-1900) + np.fix((iyear-1901)/4) + idoy
dj = jj - 0.5 + fday
gst = np.mod(279.690983 + 0.9856473354 * dj + 360.0 * fday + 180.0,
360.0) * pisd
# longitude along ecliptic
vl = np.mod(279.696678 + 0.9856473354 * dj, 360.0)
t = dj / 36525.0
g = np.mod(358.475845 + 0.985600267 * dj, 360.0) * pisd
slong = (vl + (1.91946 - 0.004789 * t) * np.sin(g) + 0.020094 *
np.sin(2.0 * g)) * pisd
# inclination of Earth's axis
obliq = (23.45229 - 0.0130125 * t) * pisd
sob = np.sin(obliq)
cob = np.cos(obliq)
# Aberration due to Earth's motion around the sun (about 0.0056 deg)
pre = (0.005686 - 0.025e-4 * t) * pisd
# declination of the sun
slp = slong - pre
sind = sob * np.sin(slp)
cosd = np.sqrt(1.0 - sind**2)
sc = sind / cosd
sdec = np.arctan(sc)
# right ascension of the sun
sra = np.pi - np.arctan2((cob/sob) * sc, -np.cos(slp)/cosd)
return gst, slong, sra, sdec, obliq
def cdipdir(time_in=None, iyear=None, idoy=None):
"""
Compute dipole direction in GEO coordinates.
Parameters
----------
time_in: float
iyear: int
idoy: int
Returns
-------
list of float
Notes
-----
Compute geodipole axis direction from International Geomagnetic Reference
Field (IGRF-13) model for time interval 1970 to 2020.
For time out of interval, computation is made for nearest boundary.
Same as SPEDAS cdipdir.
"""
if (time_in is None) and (iyear is None) and (idoy is None):
print("Error: No time was provided.")
return
if (iyear is None) or (idoy is None):
iyear, idoy, ih, im, isec = get_time_parts(time_in)
# IGRF-13 parameters, 1965-2020.
minyear, maxyear, ga, ha, dg, dh = set_igrf_params()
y = iyear - (iyear % 5)
if y < minyear:
y = minyear
elif y > maxyear:
y = maxyear
year0 = y
year1 = y + 5
g0 = ga[year0]
h0 = ha[year0]
maxind = max(ga.keys())
g = g0
h = h0
# Interpolate for dates.
f2 = (iyear + (idoy-1)/365.25 - year0)/5.
f1 = 1.0 - f2
f3 = iyear + (idoy-1)/365.25 - maxind
nloop = len(g0)
if year1 <= maxind:
# years 1970-2020
g1 = ga[year1]
h1 = ha[year1]
for i in range(nloop):
g[i] = g0[i]*f1 + g1[i]*f2
h[i] = h0[i]*f1 + h1[i]*f2
else:
# years 2020-2025
for i in range(nloop):
g[i] = g0[i] + dg[i]*f3
h[i] = h0[i] + dh[i]*f3
s = 1.0
for i in range(2, 15):
mn = int(i*(i-1.0)/2.0 + 1.0)
s = int(s*(2.0*i-3.0)/(i-1.0))
g[mn] *= s
h[mn] *= s
g[mn-1] *= s
h[mn-1] *= s
p = s
for j in range(2, i):
aa = 1.0
if j == 2:
aa = 2.0
p = p * np.sqrt(aa*(i-j+1)/(i+j-2))
mnn = int(mn + j - 1)
g[mnn] *= p
h[mnn] *= p
g[mnn-1] *= p
h[mnn-1] *= p
g10 = -g[1]
g11 = g[2]
h11 = h[2]
sq = g11**2 + h11**2
sqq = np.sqrt(sq)
sqr = np.sqrt(g10**2 + sq)
s10 = -h11/sqq
c10 = -g11/sqq
st0 = sqq/sqr
ct0 = g10/sqr
stc1 = st0*c10
sts1 = st0*s10
d1 = stc1
d2 = sts1
d3 = ct0
return d1, d2, d3
def cdipdir_vect(time_in=None, iyear=None, idoy=None):
"""
Compute dipole direction in GEO coordinates.
Similar to cdipdir but for arrays.
Parameters
----------
time_in: list of floats
iyear: list of int
idoy: list of int
Returns
-------
list of float
Notes
-----
Same as SPEDAS cdipdir_vec.
"""
if ((time_in is None or not isinstance(time_in, list))
and (iyear is None or not isinstance(iyear, list))
and (idoy is None or not isinstance(idoy, list))):
return cdipdir(time_in, iyear, idoy)
if (iyear is None) or (idoy is None):
iyear, idoy, ih, im, isec = get_time_parts(time_in)
d1 = []
d2 = []
d3 = []
cdipdir_cache = {}
for i in range(len(idoy)):
# check the cache before re-calculating the dipole direction
if cdipdir_cache.get(iyear[i] + idoy[i]) != None:
d1.append(cdipdir_cache.get(iyear[i] + idoy[i])[0])
d2.append(cdipdir_cache.get(iyear[i] + idoy[i])[1])
d3.append(cdipdir_cache.get(iyear[i] + idoy[i])[2])
continue
_d1, _d2, _d3 = cdipdir(None, iyear[i], idoy[i])
d1.append(_d1)
d2.append(_d2)
d3.append(_d3)
cdipdir_cache[iyear[i] + idoy[i]] = [_d1, _d2, _d3]
return np.array(d1), np.array(d2), np.array(d3)
def tgeigse_vect(time_in, data_in):
"""
GEI to GSE transformation.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgei, ygei, zgei cartesian GEI coordinates.
Returns
-------
xgse: list of float
Cartesian GSE coordinates.
ygse: list of float
Cartesian GSE coordinates.
zgse: list of float
Cartesian GSE coordinates.
"""
xgse, ygse, zgse = 0, 0, 0
d = np.array(data_in)
xgei, ygei, zgei = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gegs1 = ge2 * gs3 - ge3 * gs2
gegs2 = ge3 * gs1 - ge1 * gs3
gegs3 = ge1 * gs2 - ge2 * gs1
xgse = gs1 * xgei + gs2 * ygei + gs3 * zgei
ygse = gegs1 * xgei + gegs2 * ygei + gegs3 * zgei
zgse = ge1 * xgei + ge2 * ygei + ge3 * zgei
return xgse, ygse, zgse
def subgei2gse(time_in, data_in):
"""
Transform data from GEI to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEI.
Returns
-------
Array of float
Coordinates in GSE.
"""
xgse, ygse, zgse = tgeigse_vect(time_in, data_in)
print("Running transformation: subgei2gse")
return np.column_stack([xgse, ygse, zgse])
def tgsegei_vect(time_in, data_in):
"""
GSE to GEI transformation.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgei, ygei, zgei cartesian GEI coordinates.
Returns
-------
xgei: list of float
Cartesian GEI coordinates.
ygei: list of float
Cartesian GEI coordinates.
zgei: list of float
Cartesian GEI coordinates.
"""
xgei, ygei, zgei = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gegs1 = ge2 * gs3 - ge3 * gs2
gegs2 = ge3 * gs1 - ge1 * gs3
gegs3 = ge1 * gs2 - ge2 * gs1
xgei = gs1 * xgse + gegs1 * ygse + ge1 * zgse
ygei = gs2 * xgse + gegs2 * ygse + ge2 * zgse
zgei = gs3 * xgse + gegs3 * ygse + ge3 * zgse
return xgei, ygei, zgei
def subgse2gei(time_in, data_in):
"""
Transform data from GSE to GEI.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GEI.
"""
xgei, ygei, zgei = tgsegei_vect(time_in, data_in)
print("Running transformation: subgse2gei")
return np.column_stack([xgei, ygei, zgei])
def tgsegsm_vect(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgse, ygse, zgse cartesian GSE coordinates.
Returns
-------
xgsm: list of float
Cartesian GSM coordinates.
ygsm: list of float
Cartesian GSM coordinates.
zgsm: list of float
Cartesian GSM coordinates.
"""
xgsm, ygsm, zgsm = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgsm = xgse
ygsm = cdze * ygse + sdze * zgse
zgsm = -sdze * ygse + cdze * zgse
return xgsm, ygsm, zgsm
def subgse2gsm(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GSM.
"""
xgsm, ygsm, zgsm = tgsegsm_vect(time_in, data_in)
print("Running transformation: subgse2gsm")
return np.column_stack([xgsm, ygsm, zgsm])
def tgsmgse_vect(time_in, data_in):
"""
Transform data from GSM to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgsm, ygsm, zgsm GSM coordinates.
Returns
-------
xgse: list of float
Cartesian GSE coordinates.
ygse: list of float
Cartesian GSE coordinates.
zgse: list of float
Cartesian GSE coordinates.
"""
xgse, ygse, zgse = 0, 0, 0
d = np.array(data_in)
xgsm, ygsm, zgsm = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
# Dipole direction in GEI system
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgse = xgsm
ygse = cdze * ygsm - sdze * zgsm
zgse = sdze * ygsm + cdze * zgsm
return xgse, ygse, zgse
def subgsm2gse(time_in, data_in):
"""
Transform data from GSM to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GSE.
"""
xgse, ygse, zgse = tgsmgse_vect(time_in, data_in)
print("Running transformation: subgsm2gse")
return np.column_stack([xgse, ygse, zgse])
def tgsmsm_vect(time_in, data_in):
"""
Transform data from GSM to SM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgsm, ygsm, zgsm GSM coordinates.
Returns
-------
xsm: list of float
Cartesian SM coordinates.
ysm: list of float
Cartesian SM coordinates.
zsm: list of float
Cartesian SM coordinates.
"""
xsm, ysm, zsm = 0, 0, 0
d = np.array(data_in)
xgsm, ygsm, zgsm = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
# Direction of the sun in GEO system
ps1 = gs1 * cgst + gs2 * sgst
ps2 = -gs1 * sgst + gs2 * cgst
ps3 = gs3
# Computation of mu angle
smu = ps1 * gd1 + ps2 * gd2 + ps3 * gd3
cmu = np.sqrt(1.0 - smu * smu)
xsm = cmu * xgsm - smu * zgsm
ysm = ygsm
zsm = smu * xgsm + cmu * zgsm
return xsm, ysm, zsm
def subgsm2sm(time_in, data_in):
"""
Transform data from GSM to SM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSM.
Returns
-------
Array of float
Coordinates in SM.
"""
xsm, ysm, zsm = tgsmsm_vect(time_in, data_in)
print("Running transformation: subgsm2sm")
return np.column_stack([xsm, ysm, zsm])
def tsmgsm_vect(time_in, data_in):
"""
Transform data from SM to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xsm, ysm, zsm SM coordinates.
Returns
-------
xsm: list of float
GSM coordinates.
ysm: list of float
GSM coordinates.
zsm: list of float
GSM coordinates.
"""
xgsm, ygsm, zgsm = 0, 0, 0
d = np.array(data_in)
xsm, ysm, zsm = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
# Direction of the sun in GEO system
ps1 = gs1 * cgst + gs2 * sgst
ps2 = -gs1 * sgst + gs2 * cgst
ps3 = gs3
# Computation of mu angle
smu = ps1 * gd1 + ps2 * gd2 + ps3 * gd3
cmu = np.sqrt(1.0 - smu * smu)
xgsm = cmu * xsm + smu * zsm
ygsm = ysm
zgsm = -smu * xsm + cmu * zsm
return xgsm, ygsm, zgsm
def subsm2gsm(time_in, data_in):
"""
Transform data from SM to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in SM.
Returns
-------
Array of float
Coordinates in GSM.
"""
xgsm, ygsm, zgsm = tsmgsm_vect(time_in, data_in)
print("Running transformation: subsm2gsm")
return np.column_stack([xgsm, ygsm, zgsm])
def subgei2geo(time_in, data_in):
"""
Transform data from GEI to GEO.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEI.
Returns
-------
Array of float
Coordinates in GEO.
"""
xgeo, ygeo, zgeo = 0, 0, 0
d = np.array(data_in)
xgei, ygei, zgei = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
sgst = np.sin(gst)
cgst = np.cos(gst)
xgeo = cgst * xgei + sgst * ygei
ygeo = -sgst * xgei + cgst * ygei
zgeo = zgei
print("Running transformation: subgei2geo")
return np.column_stack([xgeo, ygeo, zgeo])
def subgeo2gei(time_in, data_in):
"""
Transform data from GEO to GEI.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEO.
Returns
-------
Array of float
Coordinates in GEI.
"""
xgei, ygei, zgei = 0, 0, 0
d = np.array(data_in)
xgeo, ygeo, zgeo = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
sgst = np.sin(gst)
cgst = np.cos(gst)
xgei = cgst * xgeo - sgst * ygeo
ygei = sgst * xgeo + cgst * ygeo
zgei = zgeo
print("Running transformation: subgeo2gei")
return np.column_stack([xgei, ygei, zgei])
def subgeo2mag(time_in, data_in):
"""
Transform data from GEO to MAG.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEO.
Returns
-------
Array of float
Coordinates in MAG.
Notes
-----
Adapted from spedas IDL file geo2mag.pro.
"""
d = np.array(data_in)
# Step 1. Transform SM to GEO: SM -> GSM -> GSE -> GEI -> GEO
n = len(time_in)
sm = np.zeros((n, 3), float)
sm[:, 2] = 1.0
gsm = subsm2gsm(time_in, sm)
gse = subgsm2gse(time_in, gsm)
gei = subgse2gei(time_in, gse)
geo = subgei2geo(time_in, gei)
mag = geo # the output
# Step 2. Transform cartesian to spherical.
x2y2 = geo[:, 0]**2 + geo[:, 1]**2
# r = np.sqrt(x2y2 + geo[:, 2]**2)
theta = np.arctan2(geo[:, 2], np.sqrt(x2y2)) # lat
phi = np.arctan2(geo[:, 1], geo[:, 0]) # long
for i in range(n):
# Step 3. Apply rotations.
mlong = np.zeros((3, 3), float)
mlong[0, 0] = np.cos(phi[i])
mlong[0, 1] = np.sin(phi[i])
mlong[1, 0] = -np.sin(phi[i])
mlong[1, 1] = np.cos(phi[i])
mlong[2, 2] = 1.0
out = mlong @ d[i]
mlat = np.zeros((3, 3), float)
mlat[0, 0] = np.cos(np.pi/2.0 - theta[i])
mlat[0, 2] = -np.sin(np.pi/2.0 - theta[i])
mlat[2, 0] = np.sin(np.pi/2.0 - theta[i])
mlat[2, 2] = np.cos(np.pi/2.0 - theta[i])
mlat[1, 1] = 1.0
mag[i] = mlat @ out
print("Running transformation: subgeo2mag")
return mag
def submag2geo(time_in, data_in):
"""
Transform data from MAG to GEO.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in MAG.
Returns
-------
Array of float
Coordinates in GEO.
Notes
-----
Adapted from spedas IDL file mag2geo.pro.
"""
d = np.array(data_in)
# Step 1. Transform SM to GEO: SM -> GSM -> GSE -> GEI -> GEO
n = len(time_in)
sm = np.zeros((n, 3), float)
sm[:, 2] = 1.0
gsm = subsm2gsm(time_in, sm)
gse = subgsm2gse(time_in, gsm)
gei = subgse2gei(time_in, gse)
geo = subgei2geo(time_in, gei)
# Step 2. Transform cartesian to spherical.
x2y2 = geo[:, 0]**2 + geo[:, 1]**2
# r = np.sqrt(x2y2 + geo[:, 2]**2)
theta = np.arctan2(geo[:, 2], np.sqrt(x2y2)) # lat
phi = np.arctan2(geo[:, 1], geo[:, 0]) # long
for i in range(n):
# Step 3. Apply rotations.
glat = np.zeros((3, 3), float)
glat[0, 0] = np.cos(np.pi/2.0 - theta[i])
glat[0, 2] = np.sin(np.pi/2.0 - theta[i])
glat[2, 0] = -np.sin(np.pi/2.0 - theta[i])
glat[2, 2] = np.cos(np.pi/2.0 - theta[i])
glat[1, 1] = 1.0
out = glat @ d[i]
glong = np.zeros((3, 3), float)
glong[0, 0] = np.cos(phi[i])
glong[0, 1] = -np.sin(phi[i])
glong[1, 0] = np.sin(phi[i])
glong[1, 1] = np.cos(phi[i])
glong[2, 2] = 1.0
geo[i] = glong @ out
print("Running transformation: submag2geo")
return geo
def ctv_mm_mult(m1, m2):
"""
Vectorized multiplication of two lists of 3x3 matrices.
Parameters
----------
m1: array of float
Array (3, 3, n). List of n 3x3 matrices.
m2: array of float
Array (3, 3, n). List of n 3x3 matrices.
Returns
-------
Array of float
Array (3, 3, n). List of n 3x3 matrices.
Notes
-----
Adapted from spedas IDL file matrix_array_lib.pro.
"""
n = m1.shape[2]
out = np.zeros((3, 3, n), float)
out[0, 0, :] = np.sum(m1[0, :, :] * m2[:, 0, :], 0)
out[1, 0, :] = np.sum(m1[1, :, :] * m2[:, 0, :], 0)
out[2, 0, :] = np.sum(m1[2, :, :] * m2[:, 0, :], 0)
out[0, 1, :] = np.sum(m1[0, :, :] * m2[:, 1, :], 0)
out[1, 1, :] = np.sum(m1[1, :, :] * m2[:, 1, :], 0)
out[2, 1, :] = np.sum(m1[2, :, :] * m2[:, 1, :], 0)
out[0, 2, :] = np.sum(m1[0, :, :] * m2[:, 2, :], 0)
out[1, 2, :] = np.sum(m1[1, :, :] * m2[:, 2, :], 0)
out[2, 2, :] = np.sum(m1[2, :, :] * m2[:, 2, :], 0)
return out
def j2000_matrix_vec(time_in):
"""
Get the conversion matrix for J2000 coordinates.
Gives a matrix that transforms from mean earth equator and equinox
of J2000 into the true earth equator and equinox for the dates and times.
Parameters
----------
time_in: list of float
Time array.
Returns
-------
Matrix of float
Transformation matrix.
Notes
-----
Adapted from spedas IDL file spd_make_j2000_matrix_vec.pro.
"""
iyear, idoy, ih, im, isec = get_time_parts(time_in)
n = len(time_in)
cmatrix = np.zeros((3, 3, n), float)
nutmat = np.zeros((3, 3, n), float)
premat = np.zeros((3, 3, n), float)
# Julian time 2440587.5 = Unix time 0
# Julian time = Unix time/(60*60*24.0) + 2440587.5
# J2000 is January 1, 2000 12:00:00
# = 2451545.0 Julian days
# One Julian year = 365.25 days
# One Julian century is 36525 days
# J2000 time array in Julian centuries:
time = (np.array(time_in)/(60.0*60.0*24) + 2440587.5 - 2451545.0)/36525.0
t2 = time**2
t3 = time**3
zeta = (0.11180860865024398e-01 * time
+ 0.14635555405334670e-05 * t2
+ 0.87256766326094274e-07 * t3)
theta = (0.97171734551696701e-02 * time
- 0.20684575704538352e-05 * t2
- 0.20281210721855218e-06 * t3)
zee = (0.11180860865024398e-01 * time
+ 0.53071584043698687e-05 * t2
+ 0.88250634372368822e-07 * t3)
sinzet = np.sin(zeta)
coszet = np.cos(zeta)
sinzee = np.sin(zee)
coszee = np.cos(zee)
sinthe = np.sin(theta)
costhe = np.cos(theta)
premat[0, 0, :] = -sinzet * sinzee + coszet * coszee * costhe
premat[1, 0, :] = coszee * sinzet + sinzee * costhe * coszet
premat[2, 0, :] = sinthe * coszet
premat[0, 1, :] = -sinzee * coszet - coszee * costhe * sinzet
premat[1, 1, :] = coszee * coszet - sinzee * costhe * sinzet
premat[2, 1, :] = -sinthe * sinzet
premat[0, 2, :] = -coszee * sinthe
premat[1, 2, :] = -sinzee * sinthe
premat[2, 2, :] = costhe
r = 1296000.0
dtr = np.pi/(180.0)
st = dtr/(3600.0)
epso = st*(1.813e-3*t3-5.9e-4*t2-4.6815e+1*time + 8.4381448e+4)
# Start: Calculations inside spd_get_nut_angles_vec.pro
funar, sinco, cosco = set_j2000_params()
fund = [0, 0, 0, 0, 0]
fund[0] = st*(335778.877+(1342.0*r+295263.137)*time-13.257*t2+1.1e-2*t3)
fund[1] = st*(450160.28-(5.0*r+482890.539)*time+7.455*t2+8.0e-3*t3)
fund[2] = st*(1287099.804+(99.0*r+1292581.224)*time-5.77e-1*t2-1.2e-2*t3)
fund[3] = st*(485866.733+(1325.0*r+715922.633)*time+31.31*t2+6.4e-2*t3)
fund[4] = st*(1072261.307+(1236.0*r+1105601.328)*time-6.891*t2+1.9e-2*t3)
arg = funar @ fund
t = [np.ones(n), time]
sumpsi = np.sum(0.0001 * (sinco @ t) * np.sin(arg), 0)
sumeps = np.sum(0.0001 * (cosco @ t) * np.cos(arg), 0)
delpsi = st*sumpsi
deleps = st*sumeps
eps = epso + deleps
# End: Calculations inside spd_get_nut_angles_vec.pro
cosep = np.cos(eps)
cosepO = np.cos(epso)
cospsi = np.cos(delpsi)
sinep = np.sin(eps)
sinepO = np.sin(epso)
sinpsi = np.sin(delpsi)
nutmat[0, 0, :] = cospsi
nutmat[0, 1, :] = -sinpsi*cosepO
nutmat[0, 2, :] = -sinpsi*sinepO
nutmat[1, 0, :] = sinpsi*cosep
nutmat[1, 1, :] = cospsi*cosep*cosepO + sinep*sinepO
nutmat[1, 2, :] = cospsi*cosep*sinepO - sinep*cosepO
nutmat[2, 0, :] = sinpsi*sinep
nutmat[2, 1, :] = cospsi*sinep*cosepO - cosep*sinepO
nutmat[2, 2, :] = cospsi*sinep*sinepO + cosep*cosepO
# ctv_mm_mult
cmatrix = ctv_mm_mult(premat, nutmat)
return cmatrix
def ctv_mx_vec_rot(m, v):
"""
Vectorized multiplication of n matrices by n vectors.
Parameters
----------
m: array of float
Array (k, k, n). List of n kxk matrices.
Unually, it is 3x3 matrices, ie. k=3.
v: array of float
Array (n, k). List of n vectors.
Returns
-------
Array of float
Array (n, k). List of n vectors.
Notes
-----
Adapted from spedas IDL file matrix_array_lib.pro.
"""
n = m.shape[2]
k = m.shape[1] # This should be 3 for 3x3 matrices.
out = np.zeros((n, k), float)
a = np.zeros((k, k, n), float)
for i in range(k):
a[i] = v[:, i]
out = np.sum(m * a, 0)
return out
def subgei2j2000(time_in, data_in):
"""
Transform data from GEI to J2000.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEI.
Returns
-------
Array of float
Coordinates in J2000.
"""
n = len(time_in)
d_out = np.zeros((3, n), float)
d = np.array(data_in)
cmatrix = j2000_matrix_vec(time_in)
d_out = ctv_mx_vec_rot(cmatrix, d)
print("Running transformation: subgei2j2000")
return np.transpose(d_out)
def subj20002gei(time_in, data_in):
"""
Transform data from J2000 to GEI.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in J2000.
Returns
-------
Array of float
Coordinates in GEI.
"""
n = len(time_in)
d_out = np.zeros((3, n), float)
d = np.array(data_in)
cmatrix = j2000_matrix_vec(time_in)
# Get the inverse of the 3x3 matrices.
icmatrix = np.transpose(cmatrix, (1, 0, 2))
d_out = ctv_mx_vec_rot(icmatrix, d)
print("Running transformation: subj20002gei")
return np.transpose(d_out)
def get_all_paths_t1_t2():
"""
Give a dictionary of existing sub functions in this file.
Parameters
----------
None
Returns
-------
Dictionary of strings.
"""
p = {'gei': {'gse': 'subgei2gse',
'geo': 'subgei2geo',
'j2000': 'subgei2j2000'},
'gse': {'gei': 'subgse2gei',
'gsm': 'subgse2gsm'},
'gsm': {'gse': 'subgsm2gse',
'sm': 'subgsm2sm'},
'geo': {'gei': 'subgeo2gei',
'mag': 'subgeo2mag'},
'sm': {'gsm': 'subsm2gsm'},
'mag': {'geo': 'submag2geo'},
'j2000': {'gei': 'subj20002gei'}}
return p
def find_path_t1_t2(c1, c2, cpath=None):
"""
Find path from c1 to c2.
Parameters
----------
c1: string
Coordinate system.
c2: string
Coordinate system.
Returns
-------
List of strings.
Path from c1 to c2.
"""
if cpath is None:
cpath = [c1]
elif c1 in cpath:
return
elif c2 in cpath:
return
else:
cpath.append(c1)
# Existing transformations.
c_tr = get_all_paths_t1_t2()
cn = c_tr[c1].keys()
if len(cn) == 0:
return
if c2 in cn:
cpath.append(c2)
return cpath
else:
for c in cn:
find_path_t1_t2(c, c2, cpath)
return cpath
def shorten_path_t1_t2(cpath):
"""
Find a shorter.
Parameters
----------
cpath: list of string
Coordinate system.
Returns
-------
List of strings.
Path from c1 to c2.
"""
p = get_all_paths_t1_t2()
out = []
newx = cpath.copy()
tobreak = False
for i in cpath:
out.append(i)
newx.remove(i)
for j in reversed(range(len(newx))):
if j > 0 and newx[j] in p[i]:
out = out + newx[j:]
tobreak = True
break
if tobreak:
break
return out
def subcotrans(time_in, data_in, coord_in, coord_out):
"""
Transform data from coord_in to coord_out.
Calls the other sub functions in this file.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in coord_in.
coord_in: string
One of GSE, GSM, SM, GEI, GEO, MAG, J2000.
coord_out: string
One of GSE, GSM, SM, GEI, GEO, MAG, J2000.
Returns
-------
Array of float
Coordinates in coord_out.
"""
data_out = data_in
coord_systems = ['GSE', 'GSM', 'SM', 'GEI', 'GEO', 'MAG', 'J2000']
coord_all = [a.lower() for a in coord_systems]
coord_in = coord_in.lower()
coord_out = coord_out.lower()
if (coord_in not in coord_all) or (coord_out not in coord_all):
print("Error: coordinate system cannot be found.")
return None
if coord_in == coord_out:
print("Warning: coord_in equal to coord_out.")
return data_out
# Construct a list of transformations.
p = find_path_t1_t2(coord_in, coord_out)
p = shorten_path_t1_t2(p)
p = shorten_path_t1_t2(p)
print(p)
# Daisy chain the list of transformations.
for i in range(len(p)-1):
c1 = p[i]
c2 = p[i+1]
subname = "sub" + c1 + "2" + c2
data_out = globals()[subname](time_in, data_out)
return data_out
| [
37811,
198,
24629,
2733,
329,
20435,
38226,
13,
198,
198,
4264,
1299,
491,
292,
687,
602,
422,
14,
1462,
262,
1708,
20435,
3341,
25,
198,
38,
5188,
11,
402,
12310,
11,
9447,
11,
22319,
40,
11,
402,
4720,
11,
28263,
11,
449,
11024,
... | 1.946845 | 15,878 |
from typing import Union, List, Optional, Any, Callable, Iterable, Dict
from pathlib import Path
from pydantic import BaseModel, validator
from torch.utils.data.distributed import DistributedSampler
import abc
class TrainerParams(BaseModel):
"""A config for parameters :class:`Trainer`
Args:
gpus: A list or comma separated string of gpus
cuda: Whether to use cuda (gpus) or not
seed: Seed with which torch will be initialized
resume_best: Resume from the previously best state
resume_checkpoint: Resume from given weights
resume: Whether to resume or not.
metrics: A list of names of which values to track
val_frequency: How often to validate terms of `epoch`
log_frequency: How often to log terms of `epoch`
test_frequency: How often in terms of `epoch` to run the test loop
dump_frequency: How often in terms of `epoch` to dump the trainer state
max_epochs: Maximum epochs to train
The behaviour of resuming from a previous state depends on both :code:`resume` and
the params given. If however, :code:`resume` is :code:`True` then :code:`resume_best` is
checked first and :code:`trainer._resume_path` is set to that. Otherwise if
:code:`resume_checkpoint` is given, then the state (including model weights) is
resumed from there.
Otherwise we resume from the last checkpoint.
"""
gpus: Union[List[int], int, str, None]
cuda: bool
seed: int
resume_checkpoint: Optional[Path]
resume_best: Optional[bool]
resume: bool
metrics: List[str]
val_frequency: Optional[int] = 1
log_frequency: Optional[int] = 5
test_frequency: Optional[int] = 5
dump_frequency: Optional[int] = 5
max_epochs: int
save_best_on: Optional[str]
save_best_by: Optional[str]
_validate_gpus = validator("gpus", allow_reuse=True)(gpus_must_evaluate_to_list_of_int)
@validator("resume_checkpoint")
@validator("resume_best")
| [
6738,
19720,
1330,
4479,
11,
7343,
11,
32233,
11,
4377,
11,
4889,
540,
11,
40806,
540,
11,
360,
713,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
4938,
1352,
198,
6738,
28034,
13,
26791,
13,
... | 2.811798 | 712 |
import requests
| [
11748,
7007,
628
] | 5.666667 | 3 |
import getopt
import json
import sys
import opsgenie_sdk
from opsgenie_sdk.metrics.observer import Observer
from opsgenie_sdk.rest import ApiException
if __name__ == '__main__':
main(sys.argv[1:])
| [
11748,
651,
8738,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
11748,
39628,
5235,
494,
62,
21282,
74,
198,
6738,
39628,
5235,
494,
62,
21282,
74,
13,
4164,
10466,
13,
672,
15388,
1330,
27058,
198,
6738,
39628,
5235,
494,
62,
21282,
... | 2.688312 | 77 |
'''
This file implements the Heap data structure as a subclass
of the BinaryTree.
The book implements Heaps using an *implicit* tree with an
*explicit* vector implementation,
so the code in the book is likely to be less helpful than the
code for the other data structures.
The book's implementation is the traditional implementation
because it has a faster constant factor
(but the same asymptotics).
This homework is using an explicit tree implementation to help
you get more practice with OOP-style programming and classes.
'''
from containers.BinaryTree import BinaryTree, Node
class Heap(BinaryTree):
'''
FIXME:
Heap is currently not a subclass of BinaryTree.
You should make the necessary changes in the class
declaration line above
and in the constructor below.
'''
def __init__(self, xs=None):
'''
FIXME:
If xs is a list (i.e. xs is not None),
then each element of xs needs to be inserted into the Heap.
'''
super().__init__()
if xs is not None:
self.insert_list(xs)
def __repr__(self):
'''
Notice that in the BinaryTree class,
we defined a __str__ function,
but not a __repr__ function.
Recall that the __repr__ function should return a string
that can be used to recreate a valid instance of the class.
Thus, if you create a variable using the command Heap([1,2,3])
it's __repr__ will return "Heap([1,2,3])"
For the Heap, type(self).__name__ will be the string "Heap",
but for the AVLTree, this expression will be "AVLTree".
Using this expression ensures that all subclasses of
Heap will have a correct implementation of __repr__,
and that they won't have to reimplement it.
'''
return type(self).__name__ + '(' + str(self.to_list('inorder')) + ')'
def is_heap_satisfied(self):
'''
Whenever you implement a data structure,
the first thing to do is to implement a function that
checks whether
the structure obeys all of its laws.
This makes it possible to automatically test whether
insert/delete functions
are actually working.
'''
if self.root:
return Heap._is_heap_satisfied(self.root)
return True
@staticmethod
def _is_heap_satisfied(node):
'''
FIXME:
Implement this method.
'''
left = True
right = True
if node.left:
if node.value > node.left.value:
return False
else:
left = Heap._is_heap_satisfied(node.left)
if node.right:
if node.value > node.right.value:
return False
else:
right = Heap._is_heap_satisfied(node.right)
return right and left
def insert(self, value):
'''
Inserts value into the heap.
FIXME:
Implement this function.
HINT:
The pseudo code is
1. Find the next position in the tree using the
binary representation of the total number of nodes
1. You will have to explicitly store the size of your
heap in a variable (rather than compute it) to
maintain the O(log n) runtime
1. See https://stackoverflow.com/questions/18241192/
implement-heap-using-a-binary-tree for hints
1. Add `value` into the next position
1. Recursively swap value with its parent until
the heap property is satisfied
HINT:
Create a @staticmethod helper function,
following the same pattern used in the BST and AVLTree
insert functions.
'''
if self.root is None:
self.root = Node(value)
self.root.descendents = 1
else:
self.root = Heap._insert(self.root, value)
@staticmethod
@staticmethod
def insert_list(self, xs):
'''
Given a list xs, insert each element of xs into self.
FIXME:
Implement this function.
'''
for x in xs:
self.insert(x)
def find_smallest(self):
'''
Returns the smallest value in the tree.
FIXME:
Implement this function.
'''
if self.root:
return Heap._find_smallest(self.root)
@staticmethod
def remove_min(self):
'''
Removes the minimum value from the Heap.
If the heap is empty, it does nothing.
FIXME:
Implement this function.
HINT:
The pseudocode is
1. remove the bottom right node from the tree
2. replace the root node with what was formerly the bottom right
3. "trickle down" the root node: recursively swap it with
its largest child until the heap property is satisfied
HINT:
I created two @staticmethod helper functions:
_remove_bottom_right and _trickle.
It's possible to do it with only a single helper
(or no helper at all),
but I personally found dividing up the code into two
made the most sense.
'''
if self.root is None:
return None
elif self.root.left is None and self.root.right is None:
self.root = None
else:
replace_right = Heap._find_right(self.root)
self.root = Heap._remove(self.root)
if replace_right == self.root.value:
return
else:
self.root.value = replace_right
if not Heap._is_heap_satisfied(self.root):
return Heap._move_down(self.root)
@staticmethod
@staticmethod
@staticmethod
| [
7061,
6,
198,
1212,
2393,
23986,
262,
679,
499,
1366,
4645,
355,
257,
47611,
198,
1659,
262,
45755,
27660,
13,
198,
464,
1492,
23986,
679,
1686,
1262,
281,
1635,
23928,
3628,
9,
5509,
351,
281,
198,
9,
20676,
3628,
9,
15879,
7822,
1... | 2.35122 | 2,460 |
#! /usr/bin/env python3
try:
import cffi
except ImportError:
raise SystemExit("Module cffi is missing, install it with `pip install cffi`")
import os
import argparse
BASEDIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate CFFI binding .cpp file.")
parser.add_argument("--output", "-o", default=BASEDIR + "/cffi_bindings.gen.c")
parser.add_argument("--cdef", "-c", default=BASEDIR + "/cdef.gen.h")
args = parser.parse_args()
generate_cffi_bindings(args.output, args.cdef)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
269,
487,
72,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
5298,
4482,
30337,
7203,
26796,
269,
487,
72,
318,
4814,
11,
2721,
3... | 2.604444 | 225 |
#!/usr/bin/python3
'''
[ RN2483 Library ]
Python library for RN2483 LoRaWAN transceiver
This implementation is meant to be used on Raspberry Pi.
[-------------------------------------------------------]
Version:
[0.1]:
+ Changes:
- Completed command set dictionary
- Added compatibility for Raspberry Pi 3B+
- Added LoRaWAN configuration method
- Added dbeug mode for terminal response
- Used Pylint to improve quality of code
- Added DOC stirngs to methods and global
+ TODO:
- Add error handling
- shrink for microcontrollers
- Add ESP32 compatibility
'''
import os
import serial
from yaml import load, Loader
from src.exceptions import HostError
LICENSE = "Apache License 2.0 - Copyright (c) 2019 Alexandros Antoniades"
AUTHOR = "Alexandros Antoniades"
DESCRIPTION = "A python module for interfacing with the\r\n \
RN2483 and RN2903 LoRaWAN transceiver"
COMPATIBLE = "- Raspberry Pi 3B+\r\n \
- Raspberry Pi 2\r\n \
- ESP32 (Coming soon) [micropython]\r\n \
- ESP8266 (Coming soon) [micropython]\r\n \
- PyBoard (Coming soon) [micropython]\r\n"
VERSION = "0.1"
DOC = {
"UserGuide": "https://ww1.microchip.com/downloads/en/DeviceDoc/40001784B.pdf",
"Datasheet": "http://ww1.microchip.com/downloads/en/devicedoc/50002346c.pdf"
}
GIT = "https://github.com/alexantoniades/python-RN2483"
INTRO = """
Welcome to RN2483 Command-Line Interface
[----------------------------------------------------------]
{description}
Version {version}
Github: {git}
Compatible with:
{compatible}
Author: {author}
Github: {github}
License: {license}
Documentation can be found at:
Datasheet - {datasheet}
User Guide - {user_guide}
[----------------------------------------------------------]
""".format(
description=DESCRIPTION,
version=VERSION,
git=GIT,
compatible=COMPATIBLE,
author=AUTHOR,
github="https://github.com/alexantoniades",
license=LICENSE,
datasheet=DOC["Datasheet"],
user_guide=DOC["UserGuide"]
)
PORT = "/dev/tty"
BAUDRATE = 57600
class Lora:
"""Commands for RN2483 and RN2903 can be found in the product user guide by Microchip"""
def __init__(self, host=None, connection=None):
''' Class init, check if serial connection is open '''
self.connection = connection
self.host = host
with open(os.path.join(os.path.dirname(__file__), 'src/commands.yml')) as command_file:
self.commands = load(command_file, Loader=Loader)
if self.connection is None:
raise HostError
def serial_connection(self):
''' Serial connection info '''
return(self.connection)
def close_connection(self):
''' Close serial connection '''
return(self.connection.close())
def execute(self, command):
''' Passes and Executes command to device, returns devices response '''
self.connection.write(bytes(str(command) + "\r\n", "utf-8"))
response = (self.connection.readline()).decode("utf-8")
return(response)
def version(self):
''' Returns RN2483 version '''
return(self.execute("sys get ver"))
def voltage(self):
''' Returns RN2483 Voltage '''
return(self.execute("sys get vdd"))
def get_heui(self):
''' Returns RN2483 Hardware EUI '''
return(self.execute("sys get hweui"))
def reset(self):
''' Resets RN2483 '''
return(self.execute("sys reset"))
# Factory reset device
def factory_reset(self):
''' Factory resets RN2483 '''
return(self.execute("sys factoryRESET"))
def get_value_at_address(self, address):
''' Returns value at memory address - address is in HEXadecimal'''
return(self.execute("sys get nvm {0}".format(str(address))))
def set_value_at_address(self, address, value):
''' Sets value at memory address - value and address are in HEXadecimal'''
return(self.execute("sys set nvm {0} {1}".format(address, value)))
def sleep(self, duration):
''' Sets device to sleep - duration is in milliseconds'''
return(self.execute("sys sleep {0}".format(duration)))
def set_pin(self, pin, state):
''' Sets state of GPIO pin (1 = UP / 0 = DOWN). GPIO is given as GPIO[0-14] '''
if str(state) in ("high", "HIGH", "up", "UP", "true", "TRUE", "1"):
return(self.execute("sys set pindig {0} {1}".format(str(pin), "1")))
elif str(state) in ("low", "LOW", "down", "DOWN", "false", "FALSE", "0"):
return(self.execute("sys set pindig {0} {1}".format(str(pin), "0")))
def adaptive_datarate(self, state):
''' Sets the adaptive datarate to on or off '''
return(self.execute("mac set adr {0}".format(str(state))))
def snr(self):
''' Returns transceiver Signal to Noise ratio '''
return(self.execute("radio get snr"))
def send(self, data):
''' Send data '''
self.execute("mac pause")
return(self.execute("radio tx {0}".format(str((data.encode('utf-8')).hex()))))
def receive(self):
''' Receive data '''
self.execute("mac pause")
self.execute("radio rx 0")
return(str((self.connection.readline()).decode("utf-8")))
def config_otaa(self, appkey=None, appeui=None):
''' Configure Over The Air Authentication '''
response = {"hweui": None, "appkey": None, "appeui": None, "status": None}
response["hweui"] = self.execute("sys get hweui")
response["appkey"] = self.execute("mac set appkey {0}".format(appkey))
response["appeui"] = self.execute("mac set appeui {0}".format(appeui))
response["status"] = self.execute("mac save")
return(response)
def config_abp(self, nwskey=None, appskey=None, devaddr=None):
''' Configure Authentication By Personalization on LoRaWAN '''
response = {"hweui": None, "nwkskey": None, "appskey": None, "devaddr": None, "status": None}
response["hweui"] = self.execute("sys get hweui")
response["nwkskey"] = self.execute("mac set nwkskey {0}".format(nwskey))
response["appskey"] = self.execute("mac set appskey {0}".format(appskey))
response["devaddr"] = self.execute("mac set devaddr {0}".format(devaddr))
response["status"] = self.execute("mac save")
return(response)
def main():
''' Main function '''
#print(INTRO)
uart = serial.Serial(PORT, BAUDRATE)
device = Lora(connection=uart)
print(device.commands["SYSTEM"]["RESET"])
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
7061,
6,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
685,
43112,
1731,
5999,
10074,
2361,
198,
220,
220,
220,
11361,
... | 2.493161 | 2,705 |
#! /usr/bin/env python
from __future__ import division, print_function
import sys
import unittest
import os
from datetime import datetime, timedelta
from pandas import notnull
from numpy import log
sys.path.append("../../")
from src.preprocessing import read_data, extract_hr_acc
if __name__ == '__main__':
unittest.main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
125... | 3.092593 | 108 |
import torch
class SkewedSyncInt(torch.nn.Module):
"""
synchronize two input bit streams in using integer stochastic computing
"VLSI Implementation of Deep Neural Network Using Integral Stochastic Computing"
"""
| [
11748,
28034,
198,
198,
4871,
19460,
19103,
28985,
5317,
7,
13165,
354,
13,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
18305,
1096,
734,
5128,
1643,
15190,
287,
1262,
18253,
3995,
354,
3477,
14492,
198,
220,
... | 3.328571 | 70 |
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2019 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from skimpy.utils.tabdict import TabDict
from sympy import Symbol
from pandas import Series
import h5py
import numpy as np
import pandas as pd
class ParameterValues(object):
"""
Parameters set for kinetic models wich can be indexed with symbols or
"""
def __init__(self,parameter_values, kmodel=None):
"""
:param kmodel: KineticModel class
:param parameter_values: a dict contaning parameter names and values
"""
if parameter_values.__class__ is Series:
parameter_values = parameter_values.to_dict()
self._parameter_values = TabDict([(str(p),v) for p,v in parameter_values.items()])
# Check if this is a good solution
if kmodel is not None:
model_params = kmodel.parameters
self._str_to_sym = { p:model_params[p].symbol for p in model_params}
else:
self._str_to_sym = { p:Symbol(p) for p in parameter_values}
self._sym_to_str = {self._str_to_sym[p]:p for p in self._parameter_values}
#self._str_to_param = {p:model_params[p] for p in self._parameter_values}
## TODO Lets see this should maybe
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
8265,
3712,
39080,
9078,
198,
220,
220,
1058,
24254,
25,
33501,
11,
3964,
198,
220,
220,
1058,
28869,
24608,
25,
17427,
16645,
5139,
32329,
287,
11361,
... | 2.885174 | 688 |
import re
password = input('Enter your password: ')
x = True
while x:
if 6 >= len(password) or len(password) >= 16:
break
elif not re.search('[a-z]', password):
break
elif not re.search('[A-Z]', password):
break
elif not re.search('[0-9]', password):
break
elif not re.search('[$,#,@]', password):
break
else:
print('Password correct')
x = False
break
if x:
print('Password is incorrect') | [
11748,
302,
198,
28712,
796,
5128,
10786,
17469,
534,
9206,
25,
705,
8,
198,
87,
796,
6407,
198,
4514,
2124,
25,
198,
220,
220,
220,
611,
718,
18189,
18896,
7,
28712,
8,
393,
18896,
7,
28712,
8,
18189,
1467,
25,
198,
220,
220,
220... | 2.232558 | 215 |
from public import public
from .numeric import NumericColumn, NumericScalar, NumericValue
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
@public
| [
6738,
1171,
1330,
1171,
198,
198,
6738,
764,
77,
39223,
1330,
399,
39223,
39470,
11,
399,
39223,
3351,
282,
283,
11,
399,
39223,
11395,
628,
198,
31,
11377,
628,
198,
31,
11377,
628,
198,
31,
11377,
628,
198,
31,
11377,
628,
198,
31... | 2.761468 | 109 |
import discord
from discord.errors import NotFound
from discord.ext.commands.errors import BadArgument
from utils.permissions.checks import PermissionsFailure
from utils.context import BlooContext
| [
11748,
36446,
198,
6738,
36446,
13,
48277,
1330,
1892,
21077,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
13,
48277,
1330,
7772,
28100,
1713,
198,
198,
6738,
3384,
4487,
13,
525,
8481,
13,
42116,
1330,
2448,
8481,
50015,
198,
6738,
3384... | 3.960784 | 51 |
#
# @lc app=leetcode id=706 lang=python3
#
# [706] Design HashMap
#
# @lc code=start
# Your MyHashMap object will be instantiated and called as such:
# obj = MyHashMap()
# obj.put(key,value)
# param_2 = obj.get(key)
# obj.remove(key)
# @lc code=end
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
35402,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
35402,
60,
8495,
21059,
13912,
198,
2,
198,
198,
2,
2488,
44601,
2438,
28,
9688,
628,
198,
2,
3406,
2011,
26257,
13912,
... | 2.53 | 100 |
#!/usr/bin/env python
"""
Setup Info
==========
Returns information about properties in setup.py
"""
import json
import sys
import mock
import setuptools
from pathlib import Path
if len(sys.argv) > 1:
prop = sys.argv[1]
else:
prop = ""
# Setting the path to the base level of the repository.
base_dir = Path(__file__).absolute().parent.parent
sys.path.insert(0, str(base_dir))
# importing setup.py in a mocked context
with mock.patch.object(setuptools, "setup") as setup_:
import setup
# info variable contains the property key values from setup.py
args, info = setup_.call_args
if prop is not None:
print(json.dumps(info[prop]))
else:
print(json.dumps(info))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
40786,
14151,
198,
2559,
855,
198,
198,
35561,
1321,
546,
6608,
287,
9058,
13,
9078,
198,
198,
37811,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
15290,
198,
11748... | 2.965517 | 232 |
import base64
import os
import random
import unittest
import six
import splunksecrets
splunk_secret = six.b(
"JX7cQAnH6Nznmild8MvfN8/BLQnGr8C3UYg3mqvc3ArFkaxj4gUt1RUCaRBD/r0CNn8xOA2oKX8"
"/0uyyChyGRiFKhp6h2FA+ydNIRnN46N8rZov8QGkchmebZa5GAM5U50GbCCgzJFObPyWi5yT8Cr"
"SCYmv9cpRtpKyiX+wkhJwltoJzAxWbBERiLp+oXZnN3lsRn6YkljmYBqN9tZLTVVpsLvqvkezPg"
"pv727Fd//5dRoWsWBv2zRp0mwDv3t\n"
)
| [
11748,
2779,
2414,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
555,
715,
395,
198,
198,
11748,
2237,
198,
198,
11748,
4328,
2954,
2363,
8004,
628,
198,
22018,
2954,
62,
21078,
796,
2237,
13,
65,
7,
198,
220,
220,
220,
220,
220,
... | 1.420118 | 338 |
"""This module contains functions for parsing shell commands from docker history."""
def get_shell() -> str:
"""Return marker for shell, which can be a NOP docker command or shell command."""
return "/bin/sh -c"
def is_shell(command: str) -> bool:
"""Check if command is shell."""
return command.startswith(get_shell())
def strip_shell(command: str) -> str:
"""Return command with shell marker removed."""
return command.removeprefix(get_shell())
| [
37811,
1212,
8265,
4909,
5499,
329,
32096,
7582,
9729,
422,
36253,
2106,
526,
15931,
628,
198,
4299,
651,
62,
29149,
3419,
4613,
965,
25,
198,
220,
220,
220,
37227,
13615,
18364,
329,
7582,
11,
543,
460,
307,
257,
399,
3185,
36253,
31... | 3.359155 | 142 |
from threading import Thread, Event
from bobocep.setup.task.bobo_task import BoboTask
class BoboTaskThread(Thread):
"""
A thread to run BoboTask instances. It runs the setup task once, and
then runs the loop task continually until the thread is cancelled.
There is a delay between each loop.
:param task: The task to perform.
:type task: BoboTask
:param delay: The delay between loops.
:type delay: float
"""
def run(self):
"""Runs the thread."""
try:
if (not self._cancel_event.is_set()) and \
(not self._task.is_cancelled()):
self._task.setup()
# thread has been cancelled if True
while not self._cancel_event.wait(self._delay):
# task has been cancelled if True
if not self._task.is_cancelled():
self._task.loop()
else:
break
except Exception:
pass
finally:
try:
self._task.cancel()
except Exception:
pass
self._cancel_event.set()
def is_cancelled(self) -> bool:
"""
:return: True if thread is cancelled, False otherwise.
"""
return self._cancel_event.is_set()
def cancel(self):
"""Cancels the thread."""
self._cancel_event.set()
| [
6738,
4704,
278,
1330,
14122,
11,
8558,
198,
198,
6738,
275,
20391,
344,
79,
13,
40406,
13,
35943,
13,
65,
20391,
62,
35943,
1330,
5811,
78,
25714,
628,
198,
4871,
5811,
78,
25714,
16818,
7,
16818,
2599,
198,
220,
220,
220,
37227,
1... | 2.088406 | 690 |
import nflgame
from math import fabs
from operator import itemgetter, attrgetter, methodcaller
year = raw_input("Year: ")
week = raw_input("Week: ")
kind = raw_input("Kind (PRE/REG/POST): ")
games = nflgame.games(int(year), week=int(week), kind=kind)
#filtered_games = filter(lambda x: x.away == "CAR", games)
filtered_games = games
for g in filtered_games:
g.totalScore = (g.score_home + g.score_away)
g.scoreDiff = fabs(g.score_home - g.score_away)
g.totalYards = g.stats_home.total_yds + g.stats_away.total_yds
g.totalTurnovers = g.stats_home.turnovers + g.stats_away.turnovers
g.totalFirstDowns = g.stats_home.first_downs + g.stats_away.first_downs
g.totalScoringAttempts = 0
g.totalPosessionLosts = 0
g.drivesCount = 0
for d in g.drives:
g.drivesCount += 1
if d.result == "Touchdown" or d.result == "Field Goal" or d.result == "Missed FG":
g.totalScoringAttempts += 1
elif d.result == "Interception" or d.result == "Fumble":
g.totalPosessionLosts += 1
g.overallRating = (g.totalScore /g.totalScoringAttempts) + (g.totalYards/g.drivesCount) + g.totalScoringAttempts + g.totalPosessionLosts + (8 - g.scoreDiff)
print "AWAY\tHOME\tOVR"
for g in sorted(filtered_games,key=attrgetter('overallRating')):
print g.away + "\t" + g.home + "\t" + str(g.overallRating) | [
11748,
299,
2704,
6057,
198,
6738,
10688,
1330,
7843,
82,
198,
6738,
10088,
1330,
2378,
1136,
353,
11,
708,
81,
1136,
353,
11,
2446,
13345,
263,
198,
198,
1941,
796,
8246,
62,
15414,
7203,
17688,
25,
366,
8,
198,
10464,
796,
8246,
6... | 2.449458 | 554 |
#-*-coding utf-8-*-
import json
import io
import sys
import urllib.request, json
url = "http://3.94.119.184/location"
response = urllib.request.urlopen(url)
data = json.loads(response.read())
# 한글 깨짐 막기 위한 코드. python3부터 지원
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
# # utf-8 형식으로 연다
# with io.open('getnames.json', 'r', encoding='utf-8') as f:
# data = json.load(f)
# 유니코드 x 위해서 ensure_ascii 옵션을 false 설정
# dumps 는 예쁘게 프린트하는 용도
# print(json.dumps(data, indent=2, sort_keys=True, ensure_ascii=False))
# print(data["data"][3]["company_name"])
newkeywords = set()
oldkeywords = set()
# old keywords set
with io.open('oldkeywords.txt', 'r', encoding='utf-8') as old:
lines = old.readlines()
for line in lines:
oldkeywords.add(line.strip())
# crawl new keywords set
for i in range(len(data["data"])):
keyword = data["data"][i]["company_name"]
newkeywords.add(keyword)
onlyNew = newkeywords - oldkeywords
output = ''
for new in onlyNew:
print(new)
output += new + '\n'
with io.open('keywords.txt', 'w', encoding='utf-8') as new:
new.write(output)
# Closing file
# f.close()
old.close()
new.close() | [
2,
12,
9,
12,
66,
7656,
3384,
69,
12,
23,
12,
9,
12,
198,
11748,
33918,
198,
11748,
33245,
198,
11748,
25064,
198,
11748,
2956,
297,
571,
13,
25927,
11,
33918,
198,
198,
6371,
796,
366,
4023,
1378,
18,
13,
5824,
13,
16315,
13,
2... | 2.040717 | 614 |
import urllib3 as url
from bs4 import BeautifulSoup
| [
11748,
2956,
297,
571,
18,
355,
19016,
201,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198
] | 2.842105 | 19 |
# Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume copy throttling helpers."""
import contextlib
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder import exception
import cinder.privsep.cgroup
from cinder import utils
LOG = logging.getLogger(__name__)
class Throttle(object):
"""Base class for throttling disk I/O bandwidth"""
DEFAULT = None
@staticmethod
@staticmethod
@contextlib.contextmanager
def subcommand(self, srcpath, dstpath):
"""Sub-command that reads from srcpath and writes to dstpath.
Throttle disk I/O bandwidth used by a sub-command, such as 'dd',
that reads from srcpath and writes to dstpath. The sub-command
must be executed with the generated prefix command.
"""
yield {'prefix': self.prefix}
class BlkioCgroup(Throttle):
"""Throttle disk I/O bandwidth using blkio cgroups."""
@utils.synchronized('BlkioCgroup')
@utils.synchronized('BlkioCgroup')
@contextlib.contextmanager
| [
2,
15069,
357,
66,
8,
1853,
7286,
14299,
6060,
11998,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
... | 3.097928 | 531 |
"""Tests to make some huge Gmaps. See README.md for more info."""
from hugegmaps import create_map
def test_calibration():
"""Quick single screenshot of Philly Art Museum for calibration.
Takes about 10 seconds to run.
"""
create_map(
lat_start=39.9644273,
long_start=-75.1801129,
number_rows=1,
number_cols=1,
scale=0.5,
sleep_time=0,
offset_left=0, # My value: 0.05
offset_top=0, # My value: 0.17
offset_right=0, # My value: 0.03
offset_bottom=0, # My value: 0.09
outfile='huge_gmap_calibration.png',
)
def test_small_area():
"""Small 3x3 grid of images to test combining images.
Takes about 60 seconds to run.
"""
create_map(
lat_start=39.9644273,
long_start=-75.1801129,
number_rows=3,
number_cols=3,
scale=0.2,
sleep_time=3,
offset_left=0.05,
offset_top=0.17,
offset_right=0.03,
offset_bottom=0.09,
outfile='huge_gmap_small_area.png',
)
def test_philly_high_res():
"""High-res map of Philly. Creates the final version I hung on my wall.
Takes about 20 minutes to run.
"""
# 37.027491,126.8389479 // pyong taek
# 37.0410402,126.9306334
# 37.0770916,126.8154896
create_map(
lat_start=37.0770916,
long_start=126.8154896,
number_rows=5,
number_cols=5,
scale=1,
sleep_time=3,
offset_left=0.3,
offset_top=0.17,
offset_right=0.1,
offset_bottom=0.09,
outfile='huge_gmap_high_res_pyongtaek.png',
)
if __name__ == '__main__': main() | [
37811,
51,
3558,
284,
787,
617,
3236,
402,
31803,
13,
4091,
20832,
11682,
13,
9132,
329,
517,
7508,
526,
15931,
198,
198,
6738,
3236,
70,
31803,
1330,
2251,
62,
8899,
628,
198,
4299,
1332,
62,
9948,
571,
1358,
33529,
198,
220,
220,
... | 2.01199 | 834 |
# Generated by Django 2.0.1 on 2018-02-03 17:04
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
2999,
12,
3070,
1596,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# -*- coding: utf-8 -*-
""" Functional tests universitas.no """
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.contrib.staticfiles.testing import StaticLiveServerCase
from django.conf import settings
from os import path
PHANTOMJS_EXECUTABLE_PATH = '/home/haakenlid/node_modules/phantomjs/lib/phantom/bin/phantomjs'
PHANTOMJS_LOG_PATH = path.join(settings.LOG_FOLDER, 'phantom.log')
# PHANTOMJS_LOG_PATH = settings.LOG_FOLDER
WEBDRIVER = 'PhantomJS'
# WEBDRIVER = 'Firefox'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
44224,
5254,
5820,
21416,
13,
3919,
37227,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330... | 2.816216 | 185 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VoiceConnectorGroupArgs', 'VoiceConnectorGroup']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.518248 | 137 |
from setuptools import setup, find_packages
setup(
name="blinking",
version="0.0.1",
author="wemakesoftware.com",
author_email="a.bresser@greyrook.com",
description="arduino control software using kivy and firmata",
install_requires=['pyfirmata', 'pybluez', 'kivy'],
zip_save=False,
packages=find_packages(),
dependency_links=[
"https://bitbucket.org/tino/pyfirmata/get/0.9.4.tar.gz#egg=pyfirmata"
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
2436,
8040,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
15,
13,
16,
1600,
198,
220,
220,
220,
1772,
2625,
86,
368,
112... | 2.336788 | 193 |
from src.server.instance import server
from flask_restplus import fields
new_login = server.api.model('newLogin', {
'oldEmail': fields.String(required=True, description='Email'),
'oldPassword': fields.String(required=True, description='Password'),
'newEmail': fields.String(required=True, description='Email'),
'newPassword': fields.String(required=True, description='Password')
}) | [
6738,
12351,
13,
15388,
13,
39098,
1330,
4382,
198,
6738,
42903,
62,
2118,
9541,
1330,
7032,
628,
198,
3605,
62,
38235,
796,
4382,
13,
15042,
13,
19849,
10786,
3605,
47790,
3256,
1391,
198,
220,
220,
220,
705,
727,
15333,
10354,
7032,
... | 3.5 | 114 |
import os
import requests
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
| [
11748,
28686,
198,
11748,
7007,
628,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
628,
198,
2,
13610,
534,
5009,
994,
13,
628
] | 3.823529 | 34 |
from flask import Blueprint, render_template, redirect, request, url_for, flash
from flask_login import current_user
from sqlalchemy import or_
from app.admin.forms import UserForm, TicketForm, TicketUpdateForm, CategoryForm, PriorityForm, StatusForm, EmailUpdateForm, PasswordChangeForm
from app.models import User, Ticket, Category, Priority, Status, Comment
from app.decorators import login_required
from app import db, bcrypt
import uuid
admin_blueprint = Blueprint('admin', __name__)
@admin_blueprint.route('/dashboard')
@login_required(role='admin')
@admin_blueprint.route('/tickets', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/ticket/update/<int:id>/<public_id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/ticket/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/ticket/comments/<int:id>/<public_id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/ticket/open/<int:id>/<public_id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/ticket/close/<int:id>/<public_id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/categories', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/category/update', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/category/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/priorities', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/priority/update', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/priority/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/statuses', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/status/update', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/status/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/user_management', methods=['GET', 'POST'])
@admin_blueprint.route('/user/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/account_settings', methods=['GET', 'POST'])
@login_required(role='admin')
@admin_blueprint.route('/account/delete/<int:id>', methods=['GET', 'POST'])
@login_required(role='admin') | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
18941,
11,
2581,
11,
19016,
62,
1640,
11,
7644,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
6738,
44161,
282,
26599,
1330,
393,
62,
198,
6738,
598,
13,
28482,
13,
23914,
... | 2.892486 | 865 |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# Imports
# =======
# Library imports
# ---------------
import copy
import glob
import os
import pytest
import re
import subprocess
import sys
import inspect
import textwrap
import io
import shutil
from contextlib import suppress
# Third-party imports
# -------------------
import py
import psutil # Manages subprocess timeout.
# Set a handler for the root-logger to inhibit 'basicConfig()' (called in
# PyInstaller.log) is setting up a stream handler writing to stderr. This
# avoids log messages to be written (and captured) twice: once on stderr and
# once by pytests's caplog.
import logging
logging.getLogger().addHandler(logging.NullHandler())
# Local imports
# -------------
# Expand sys.path with PyInstaller source.
_ROOT_DIR = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
sys.path.append(_ROOT_DIR)
from PyInstaller import configure, config
from PyInstaller import __main__ as pyi_main
from PyInstaller.utils.tests import gen_sourcefile
from PyInstaller.utils.cliutils import archive_viewer
from PyInstaller.compat import is_darwin, is_win, safe_repr, \
architecture, is_linux, text_read_mode
from PyInstaller.depend.analysis import initialize_modgraph
from PyInstaller.utils.win32 import winutils
from PyInstaller.utils.hooks.qt import pyqt5_library_info, pyside2_library_info
# Globals
# =======
# Timeout for running the executable. If executable does not exit in this time
# then it is interpreted as test failure.
_EXE_TIMEOUT = 30 # In sec.
# Number of retries we should attempt if the executable times out.
_MAX_RETRIES = 2
# All currently supported platforms
SUPPORTED_OSES = {"darwin", "linux", "win32"}
# Code
# ====
# Fixtures
# --------
@pytest.fixture
def SPEC_DIR(request):
"""Return the directory where the test spec-files reside"""
return py.path.local(_get_spec_dir(request))
@pytest.fixture
def SCRIPT_DIR(request):
"""Return the directory where the test scripts reside"""
return py.path.local(_get_script_dir(request))
def pytest_runtest_setup(item):
"""Markers to skip tests based on the current platform.
https://pytest.org/en/stable/example/markers.html#marking-platform-specific-tests-with-pytest
Available markers: see setup.cfg [tool:pytest] markers
- @pytest.mark.darwin (macOS)
- @pytest.mark.linux (GNU/Linux)
- @pytest.mark.win32 (Windows)
"""
supported_platforms = SUPPORTED_OSES.intersection(
mark.name for mark in item.iter_markers())
plat = sys.platform
if supported_platforms and plat not in supported_platforms:
pytest.skip("only runs on %s" % plat)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
# Return the base directory which contains the current test module.
# Directory with Python scripts for functional tests. E.g. main scripts, etc.
# Directory with testing modules used in some tests.
# Directory with .toc log files.
# Return the directory where data for tests is located.
# Directory with .spec files used in some tests.
@pytest.fixture
# A helper function to copy from data/dir to tmpdir/data.
# Define a fixure for the DataDir object.
@pytest.fixture
# Scope 'session' should keep the object unchanged for whole tests.
# This fixture caches basic module graph dependencies that are same
# for every executable.
@pytest.fixture(scope='session')
# Run by default test as onedir and onefile.
@pytest.fixture(params=['onedir', 'onefile'])
# Fixture for .spec based tests.
# With .spec it does not make sense to differentiate onefile/onedir mode.
@pytest.fixture
# Define a fixture which compiles the data/load_dll_using_ctypes/ctypes_dylib.c
# program in the tmpdir, returning the tmpdir object.
@pytest.fixture()
| [
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
5075,
12,
42334,
11,
9485,
15798,
263,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
13789,
357,
9641,
362,
198,
2,
393,
1568,
8,
351,
6631,
... | 3.317225 | 1,283 |
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.io as scio
def vis_gt(im, bboxes, plt_name='output', ext='.png', visualization_folder=None):
"""
A function to visualize the detections
:param im: The image
:param bboxes: ground truth
:param plt_name: The name of the plot
:param ext: The save extension (if visualization_folder is not None)
:param visualization_folder: The folder to save the results
:param thresh: The detections with a score less than thresh are not visualized
"""
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
if bboxes.shape[0] != 0:
for i in range(bboxes.shape[0]):
bbox = bboxes[i, :]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=(0, bbox[4], 0), linewidth=3)
)
plt.axis('off')
plt.tight_layout()
plt.draw()
if visualization_folder is not None:
if not os.path.exists(visualization_folder):
os.makedirs(visualization_folder)
plt_name += ext
plt.savefig(os.path.join(visualization_folder, plt_name), bbox_inches='tight')
print('Saved {}'.format(os.path.join(visualization_folder, plt_name)))
else:
print('Visualizing {}!'.format(plt_name))
plt.show()
plt.clf()
plt.cla()
if __name__ == '__main__':
imdb = scio.loadmat("../data/datasets/wider/wider_face_split/wider_face_val.mat")
print(imdb.items)
print("well done!")
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
629,
541,
88,
13,
952,
355,
629,
952,
198,
198,
4299,
1490,
62,
13655,
7,
320,
11,
275,
29305,
11,
458,
83,
... | 2.140827 | 774 |
"""
FastAPI provides Dependency Injection system, so we implement a few.
More info about dependencies: https://fastapi.tiangolo.com/tutorial/dependencies/
It is better to create a new dependency if you want to use
something in your endpoint handler (telegram bot, security check, external library).
"""
from aiogram import (
Dispatcher,
Bot,
)
from ppm_telegram_bot.telegram.dispatcher import dispatcher
def bot_dispatcher() -> Dispatcher:
"""
Set context manually for properly processing webhook updates.
Source: https://t.me/aiogram_ru/167051
"""
Bot.set_current(dispatcher.bot)
Dispatcher.set_current(dispatcher)
return dispatcher
| [
37811,
198,
22968,
17614,
3769,
37947,
1387,
554,
29192,
1080,
11,
523,
356,
3494,
257,
1178,
13,
198,
198,
5167,
7508,
546,
20086,
25,
3740,
1378,
7217,
15042,
13,
83,
15483,
14057,
13,
785,
14,
83,
44917,
14,
45841,
3976,
14,
198,
... | 3.178404 | 213 |
from conans import ConanFile, CMake, tools
import os
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
198,
11748,
28686,
198
] | 3.533333 | 15 |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
10662,
17721,
21282,
74,
7295,
13,
25927,
1330,
19390,
198
] | 2.576923 | 26 |
cel = float(input("Enter The Temperature in Celsius :"))
ctof(cel)
| [
198,
198,
5276,
796,
12178,
7,
15414,
7203,
17469,
383,
34467,
287,
34186,
1058,
48774,
198,
310,
1659,
7,
5276,
8,
198
] | 3.136364 | 22 |
from django.conf.urls import url
from .views import TagSearch, SaveTags, TagList
from .ajax import GetTags
urlpatterns = (
url(r'^save-tags/(?P<app>\w+)/(?P<model>\w+)/(?P<pk>\d+)/$',
SaveTags.as_view(), name='save-tags'),
url(r'^tag-search/(?P<tag>[\w !:\.,;_+-]+)/$',
TagSearch.as_view(), name='tag-search'),
url(r'^tag-list/$',
TagList.as_view(), name='tag-list'),
# ajax
url(r'^get-tags/$', GetTags.as_view(), name="get-tags"),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
33571,
1330,
17467,
18243,
11,
12793,
36142,
11,
17467,
8053,
198,
6738,
764,
1228,
897,
1330,
3497,
36142,
198,
198,
6371,
33279,
82,
796,
357,
198,
220,
... | 2.085837 | 233 |
import numpy as np
from sklearn.decomposition import TruncatedSVD
def get_word_weight(weight_filename, alpha=1e-3):
""" Get word term-frequency from large corpus file """
# when the parameter makes no sense, use unweighted
if alpha <= 0:
alpha = 1.0
words_weight = {}
with open(weight_filename) as f:
lines = f.readlines()
# total term-frequency
N = 0
for word_tf in lines:
word_tf = word_tf.strip()
if len(word_tf) == 0:
continue
word_tf = word_tf.split()
if len(word_tf) == 2:
word = word_tf[0]
tf = word_tf[1]
words_weight[word] = float(tf)
N += float(tf)
else:
print("{} is not a valid (word, termfrequency) record".format(word_tf))
# normalize weights by alpha and N
for word, tf in words_weight.items():
words_weight[word] = alpha / (alpha + tf / N)
return words_weight
def get_pretrained_index_weight(word_pretrained_index, words_weight):
""" Get the map from word index in pretrained embeddings and weights """
index_weights = {}
for word, idx in word_pretrained_index.items():
if word in words_weight:
index_weights[idx] = words_weight[word]
else:
index_weights[idx] = 1.0
return index_weights
def get_sentence_pretrained_index(sentences, words_pretrained_index):
"""
Given a list of sentences, output array of word indices
that can be fed into the algorithms.
Since sentences have different length, 0 will be padded at
the end for sentence length less than max length
Parameters
----------
sentences
words_pretrained_index
Returns
-------
word_index_sentence, mask.
word_index_sentence[i, :] is the word indices in sentence i
mask[i,:] is the mask for sentence i (0 means no word at the location)
"""
sequence = [get_sequence(sentence, words_pretrained_index) for sentence in sentences]
word_index_sentence, mask = pad_sequences(sequence)
return word_index_sentence, mask
def pad_sequences(sequences):
""" Padding 0 to sequences that shorter than max length """
lengths = [len(s) for s in sequences]
n_samples = len(sequences)
maxlen = np.max(lengths)
x = np.zeros((n_samples, maxlen)).astype("int32")
x_mask = np.zeros((n_samples, maxlen)).astype("float32")
for idx, sentence in enumerate(sequences):
x[idx, :lengths[idx]] = sentence
x_mask[idx, :lengths[idx]] = 1.
x_mask = np.asarray(x_mask, dtype="float32")
return x, x_mask
def get_word_weights_sequence(sequences, mask, index_weights):
""" Get word weights for sentences """
weight = np.zeros(sequences.shape).astype("float32")
for i in range(sequences.shape[0]):
for j in range(sequences.shape[1]):
if mask[i, j] > 0 and sequences[i, j] >= 0:
weight[i, j] = index_weights[sequences[i, j]]
weight = np.asarray(weight, dtype="float32")
return weight
def get_weighted_average(embedding_matrix, word_index_sentence, weights):
""" Compute the weighted average word embeddings """
n_samples = word_index_sentence.shape[0]
embedding_matrix_avg = np.zeros((n_samples, embedding_matrix.shape[1]))
for i in range(n_samples):
total_weights = np.count_nonzero(weights[i, :])
embedding_matrix_avg[i, :] = weights[i, :].dot(embedding_matrix[word_index_sentence[i, :], :]) / total_weights
return embedding_matrix_avg
def remove_pc(X, n_components_rm=1, **kwargs):
""" Remove the projection on the principal components """
n_components = kwargs.get("n_components", 1)
random_state = kwargs.get("random_state", 2020)
svd = TruncatedSVD(n_components=n_components, random_state=random_state)
svd.fit(X)
pc = svd.components_
if n_components_rm == 1:
X_processed = X - X.dot(pc.transpose()) * pc
else:
X_processed = X - X.dot(pc.transpose()).dot(pc)
return X_processed
def sif_embeddings(embeddings_matrix, word_index_sentence, weights):
""" Get SIF embeddings """
embeddings_matrix_avg = get_weighted_average(embeddings_matrix, word_index_sentence, weights)
embeddings_matrix_avg[np.isnan(embeddings_matrix_avg)] = 0.0
embeddings_matrix_avg_rm_pc = remove_pc(embeddings_matrix_avg)
return embeddings_matrix_avg_rm_pc
| [
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
833,
19524,
515,
50,
8898,
628,
198,
4299,
651,
62,
4775,
62,
6551,
7,
6551,
62,
34345,
11,
17130,
28,
16,
68,
12,
18,
2599,
198,
220,
220,
22... | 2.458287 | 1,798 |
# Copyright (c) 2017-2021, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tests scrypt implementations using hypothesis"""
import sys
import unittest
from hypothesis import given, settings
from hypothesis.strategies import (
binary, integers, none, one_of, sampled_from, text)
from .common import (
SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY)
# Strategies for producing parameters
class ScryptTests(unittest.TestCase):
"""Tests an scrypt implementation from module"""
set_up_lambda = lambda self:None
tear_down_lambda = lambda self:None
module = None
ref = None
@given(valid_pass(), valid_salt(), valid_olen())
@settings(deadline=500)
@given(valid_mcf_pass(), valid_mcf_salt(), mcf_prefix())
@settings(deadline=500)
if __name__ == "__main__":
suite = unittest.TestSuite()
ref = None
try:
from . import hashlibscrypt
suite.addTest(load_scrypt_suite('hashlibscryptTests', hashlibscrypt, ref))
ref = hashlibscrypt
except ImportError:
suite.addTest(load_scrypt_suite('hashlibscryptTests', None, ref))
try:
from . import pylibscrypt
suite.addTest(load_scrypt_suite('pylibscryptTests', pylibscrypt, ref))
ref = ref or pylibscrypt
except ImportError:
suite.addTest(load_scrypt_suite('pylibscryptTests', None, ref))
try:
from . import pyscrypt
suite.addTest(load_scrypt_suite('pyscryptTests', pyscrypt, ref))
ref = ref or pyscrypt
except ImportError:
suite.addTest(load_scrypt_suite('pyscryptTests', None, ref))
try:
from . import pylibsodium
suite.addTest(load_scrypt_suite('pylibsodiumTests',
pylibsodium, ref))
from . import pylibscrypt
loader = unittest.defaultTestLoader
tmp = type(
'pylibsodiumFallbackTests', (ScryptTests,),
{
'module': pylibsodium,
'fast': False, # supports only large parameters
'set_up_lambda': set_up_ll,
'tear_down_lambda': tear_down_ll,
}
)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(tmp))
except ImportError:
suite.addTest(load_scrypt_suite('pylibsodiumTests', None, ref))
try:
from . import pypyscrypt_inline as pypyscrypt
suite.addTest(load_scrypt_suite('pypyscryptTests', pypyscrypt, ref))
except ImportError:
suite.addTest(load_scrypt_suite('pypyscryptTests', None, ref))
result = unittest.TextTestRunner().run(suite)
sys.exit(not result.wasSuccessful())
| [
2,
15069,
357,
66,
8,
2177,
12,
1238,
2481,
11,
2365,
12372,
8873,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,
2,
4007,
351,
393,
1231,
6838,
318,
29376,
7520,
11... | 2.441558 | 1,386 |
# -*- coding: utf-8 -*-
"""
Miscellaneous utility functions for Excel files
This module implements utility functions for Excel functionality that is common
to different kinds of spreadsheets used in Atomica (e.g. Databooks and Program Books).
For example, Excel formatting, and time-varying data entry tables, are implemented here.
"""
from xlsxwriter.utility import xl_rowcol_to_cell as xlrc
import sciris as sc
import io
import numpy as np
from .system import FrameworkSettings as FS
import pandas as pd
from .utils import format_duration, datetime_to_year
import xlsxwriter
# Suppress known warning in Openpyxl
# Warnings are:
# - C:\ProgramData\Miniconda3\envs\atomica37\lib\site-packages\openpyxl\worksheet\_reader.py:300: UserWarning: Conditional Formatting extension is not supported and will be removed
# warn(msg)
# - C:\ProgramData\Miniconda3\envs\atomica37\lib\site-packages\openpyxl\worksheet\_reader.py:300: UserWarning: Data Validation extension is not supported and will be removed
# warn(msg)
# This means that conditional formatting and data valuation rules aren't being loaded, but since `data_only=True` these don't matter and can be safely ignored
import warnings
warnings.filterwarnings(action="ignore", category=UserWarning, module="openpyxl.worksheet", lineno=300)
def standard_formats(workbook):
# Add standard formatting to a workbook and return the set of format objects
# for use when writing within the workbook
"""the formats used in the spreadsheet"""
# darkgray = '#413839'
# optima_blue = '#18C1FF'
atomica_blue = "#98E0FA"
optional_orange = "#FFA500"
BG_COLOR = atomica_blue
OPT_COLOR = optional_orange
BORDER_COLOR = "white"
formats = {}
# Locked formats
formats["bold"] = workbook.add_format({"bold": 1})
formats["center"] = workbook.add_format({"align": "center"})
formats["center_bold"] = workbook.add_format({"bold": 1, "align": "center"})
formats["rc_title"] = {}
formats["rc_title"]["right"] = {}
formats["rc_title"]["right"]["T"] = workbook.add_format({"bold": 1, "align": "right", "text_wrap": True})
formats["rc_title"]["right"]["F"] = workbook.add_format({"bold": 1, "align": "right", "text_wrap": False})
formats["rc_title"]["left"] = {}
formats["rc_title"]["left"]["T"] = workbook.add_format({"bold": 1, "align": "left", "text_wrap": True})
formats["rc_title"]["left"]["F"] = workbook.add_format({"bold": 1, "align": "left", "text_wrap": False})
# Unlocked formats
formats["unlocked"] = workbook.add_format({"locked": 0, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR})
formats["center_unlocked"] = workbook.add_format({"align": "center", "locked": 0, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR})
formats["general"] = workbook.add_format({"locked": 0, "num_format": 0x00, "bg_color": BG_COLOR, "border": 1, "border_color": BORDER_COLOR})
# Conditional formats
formats["unlocked_boolean_true"] = workbook.add_format({"bg_color": OPT_COLOR})
formats["unlocked_boolean_false"] = workbook.add_format({"bg_color": BG_COLOR})
formats["not_required"] = workbook.add_format({"bg_color": "#EEEEEE", "border": 1, "border_color": "#CCCCCC"})
formats["white_bg"] = workbook.add_format({"bg_color": "#FFFFFF", "border": 1, "border_color": "#CCCCCC"})
formats["ignored"] = workbook.add_format({"pattern": 14}) # Hatched with diagonal lines - this represents a cell whose value will not be used in the model run (e.g., an assumption that also has time-specific points)
formats["warning"] = workbook.add_format({"bg_color": "#FF0000"})
formats["ignored_warning"] = workbook.add_format({"pattern": 14, "bg_color": "#FF0000"}) # hatched, with red background
formats["ignored_not_required"] = workbook.add_format({"pattern": 14, "bg_color": "#EEEEEE", "border": 1, "border_color": "#CCCCCC"}) # hatched, with grey background
return formats
def update_widths(width_dict: dict, column_index: int, contents: str) -> None:
"""
Keep track of required width for a column
``width_dict`` is a dict that is keyed by column index e.g. 0,1,2
and the value is the length of the longest contents seen for that column
:param width_dict: Storage dictionary
:param column_index: Index of the column value has been inserted in
:param contents: Content, length of which is used to set width
"""
if width_dict is None or contents is None or not sc.isstring(contents):
return
if len(contents) == 0:
return
if column_index not in width_dict:
width_dict[column_index] = len(contents)
else:
width_dict[column_index] = max(width_dict[column_index], len(contents))
def transfer_comments(target: sc.Spreadsheet, comment_source: sc.Spreadsheet) -> None:
"""
Copy comments between spreadsheets
This function copies comments from one spreadsheet to another. Under the hood,
a new spreadsheet is created with values from the ``target`` Spreadsheet
and cell-wise formatting from the ``comment_source`` Spreadsheet. If a cell exists in
this spreadsheet and not in the source, it will be retained as-is. If more cells exist in
the ``comment_source`` than in this spreadsheet, those cells will be dropped. If a sheet exists in
the ``comment_source`` and not in the current workbook, it will be added
:param target: The target spreadsheet to write comments into
:param comment_source: The source spreadsheet containing comments
"""
import openpyxl
from openpyxl.comments import Comment
assert isinstance(target, sc.Spreadsheet)
assert isinstance(comment_source, sc.Spreadsheet)
this_workbook = openpyxl.load_workbook(target.tofile(), data_only=False) # This is the value source workbook
old_workbook = openpyxl.load_workbook(comment_source.tofile(), data_only=False) # A openpyxl workbook for the old content
for sheet in this_workbook.worksheets:
# If this sheet isn't in the old workbook, do nothing
if sheet.title not in old_workbook.sheetnames:
continue
# Transfer comments
for row in old_workbook[sheet.title].rows:
for cell in row:
if cell.comment:
sheet[cell.coordinate].comment = Comment(cell.comment.text, "")
# Save the modified spreadsheet to a new buffer
f = io.BytesIO()
this_workbook.save(f)
f.flush()
f.seek(0)
target.load(f)
def copy_sheet(source: str, sheet_name: str, workbook: xlsxwriter.Workbook) -> None:
"""
Copy a sheet into a Workbook
This function allows values to be copied from a file into a Workbook. The
main use case is to support custom sheets in databooks that are not otherwise parsed
but which might need to be retained. In particular, the ``ProjectData`` class does
not parse ignored sheets at all, because no guarantees can be made about the quantity and
type of the content, and whether there are formulas etc. that would be lost. In some cases
though, it may be necessary to edit the databook and preserve specific sheets. In general,
this can be done by using the ``to_workbook()`` method and then manually performing whatever
operations are necessary to preserve the content on the extra sheets. However, when the
extra sheet content is extremely simple e.g. just a table of values, then this helper
function can be used to facilitate copying the content.
Warning - note that Excel functions, formatting, and comments will NOT be preserved.
:param source: File name of the spreadsheet to read the source sheet from or an ``sc.Spreadsheet`` instance
:param sheet_name: Name of the sheet to write to
:param workbook: A Workbook instance to add the sheet to
:return: None - the sheet will be added to the Workbook in-place
"""
import openpyxl
if sc.isstring(source):
source = sc.Spreadsheet(source)
src_workbook = openpyxl.load_workbook(source.tofile(), read_only=True, data_only=True) # Load in read-only mode for performance, since we don't parse comments etc.
src_worksheet = src_workbook[sheet_name]
dst_worksheet = workbook.add_worksheet(sheet_name)
for i, row in enumerate(src_worksheet.rows):
for j, cell in enumerate(row):
dst_worksheet.write(i, j, cell.value)
src_workbook.close()
def read_tables(worksheet) -> tuple:
"""
Read tables from sheet
:param worksheet: An openpyxl worksheet
:return: A tuple containing - A list of tables (which is a list of rows, terminated by an empty row in the original spreadsheet),
and a list of start row indices for each table read in
"""
# This function takes in a openpyxl worksheet, and returns tables
# A table consists of a block of rows with any #ignore rows skipped
# This function will start at the top of the worksheet, read rows into a buffer
# until it gets to the first entirely empty row
# And then returns the contents of that buffer as a table. So a table is a list of openpyxl rows
# This function continues until it has exhausted all of the rows in the sheet
buffer = []
tables = []
start_rows = []
start = None
for i, row in enumerate(worksheet.rows):
# Skip any rows starting with '#ignore'
if len(row) > 0 and row[0].data_type == "s" and row[0].value.startswith("#ignore"):
continue # Move on to the next row if row skipping is marked True
# Find out whether we need to add the row to the buffer
for cell in row:
if cell.value: # If the row has a non-empty cell, add the row to the buffer
if not buffer:
start = i + 1 # Excel rows are indexed starting at 1
buffer.append(row)
break
else: # If the row was empty, then yield the buffer and flag that it should be cleared at the next iteration
if buffer:
tables.append(buffer) # Only append the buffer if it is not empty
start_rows.append(start)
buffer = []
# After the last row, if the buffer has some un-flushed contents, then yield it
if buffer:
tables.append(buffer)
start_rows.append(start)
return tables, start_rows
def read_dataframes(worksheet, merge=False) -> list:
"""
Read dataframes from sheet
This function operates similarly to ``read_tables`` except it returns Dataframes instead of
cells. This enables the dataframes to be constructed more quickly, at the expense of being
able to track the cell references and row numbers. These are shown for databooks (via ``read_tables``)
but not for frameworks (which go via ``read_dataframes``)
:param worksheet: An openpyxl worksheet
:param merge: If False (default) then blank rows will be used to split the dataframes. If True, only one
DataFrame will be returned
:return: A list of DataFrames
"""
# This function takes in a openpyxl worksheet, and returns tables
# A table consists of a block of rows with any #ignore rows skipped
# This function will start at the top of the worksheet, read rows into a buffer
# until it gets to the first entirely empty row
# And then returns the contents of that buffer as a table. So a table is a list of openpyxl rows
# This function continues until it has exhausted all of the rows in the sheet
content = np.empty((worksheet.max_row, worksheet.max_column), dtype="object")
ignore = np.zeros((worksheet.max_row), dtype=bool)
empty = np.zeros((worksheet.max_row), dtype=bool) # True for index where a new table begins
for i, row in enumerate(worksheet.rows):
if len(row) > 0 and (row[0].data_type == "s" and row[0].value.startswith("#ignore")):
ignore[i] = True
continue
any_values = False
for j, cell in enumerate(row):
v = cell.value
try:
v = v.strip()
has_value = bool(v) # If it's a string type, call strip() before checking truthiness
except AttributeError:
has_value = v is not None # If it's not a string type, then only consider it empty if it's type is None (otherwise, a numerical value of 0 would be treated as empty)
if has_value:
any_values = True
content[i, j] = v
if not any_values:
empty[i] = True
tables = []
if merge:
ignore[empty] = True
if all(ignore):
return []
tables.append(content[~ignore, :])
else:
# A change from False to True means that we need to start a new table
# A True followed by a True doesn't start a new table but instead gets ignored
content = content[~ignore, :]
empty = empty[~ignore]
# If there is no content at all, return immediately
if all(empty):
return []
idx = []
for i in range(len(empty) - 1):
if not empty[i] and not idx:
# Write the first line. This could be followed by an empty row, so need to a separate block for this
idx.append(i)
if not empty[i] and empty[i + 1]:
# row i is the last row in the table (so need to include it in the range, hence +1)
idx.append(i + 1)
elif empty[i] and not empty[i + 1]:
# Row i+1 marks the start of a table
idx.append(i + 1)
if not empty[-1]:
# If the last row has content, then make sure that the last table goes all the way up
idx.append(empty.size)
assert not len(idx) % 2, "Error in table parsing routine, did not correctly identify table breaks"
tables = []
for i in range(0, len(idx) - 1, 2):
tables.append(content[idx[i] : idx[i + 1]].copy())
dfs = []
for table in tables:
df = pd.DataFrame(table)
df.dropna(axis=1, how="all", inplace=True)
df.columns = df.iloc[0]
df = df[1:]
dfs.append(df)
return dfs
class TimeDependentConnections:
"""
Structure for reading/writing interactions
A :class:`TimeDependentConnections` object is suitable when there are time dependent interactions between two quantities
This class is used for both transfers and interactions. The content that it writes consists of
- A connection matrix table that has Y/N selection of which interactions are present between two things
- A set of pairwise connections specifying to, from, units, assumption, and time
Interactions can have a diagonal, whereas transfers cannot (e.g. a population can infect itself but cannot transfer to itself).
In Excel, a :class:`TimeDependentConnections` maps to three tables
1. A table to enter the code name and full name
2. An interactions matrix with Y/N indicating whether an interaction between two populations exists
3. A set of rows for entering time varying data for each pair of populations
:param code_name: the code name of this quantity e.g. 'aging'
:param full_name: the full name of this quantity e.g. 'Aging'
:param tvec: time values for the time-dependent rows
:param pops: list of strings to use as the rows and columns - these are typically lists of population code names
:param type: 'transfer' or 'interaction'. A transfer cannot have diagonal entries, and can have Number or Probability formats. An Interaction can have
diagonal entries and only has N.A. formats
:param ts: Optionally specify a dict containing all of the non-empty TimeSeries objects used. The format is ``{(from_pop, to_pop):TimeSeries}``.
An interaction can only be Y/N for clarity, if it is Y then a row is displayed for the TimeSeries. Actually, the Y/N can be
decided in the first instance based on the provided TimeSeries i.e. if a TimeSeries is provided for an interaction, then the
interaction must have been marked with Y
:param pop_type: Specify pop_type, which is used by :meth:`ProjectData.add_pop` to determine which TDCs to add new populations to
"""
@staticmethod
def from_tables(tables: list, interaction_type):
"""
Instantiate based on list of tables
This method instantiates and initializes a new :class:`TimeDependentConnections` object from
tables that have been read in using :func:`read_tables`. Note that the parent object
such as :class:`ProjectData` is responsible for extracting the tables and passing them
to this function. For instance, the transfers sheet might contain more than one set of
tables, so it is the calling function's responsibility to split those tables up into
the groups of three expected by this method.
:param tables: A list of tables. A table here is a list of rows, and a row is a list of cells.
:param interaction_type: A string identifying the interaction type - either 'transfer' or 'interaction'
:return: A new :class:`TimeDependentConnections` instance
"""
from .utils import TimeSeries # Import here to avoid circular reference
assert interaction_type in {"transfer", "interaction"}, "Unknown interaction type"
# Read the TDC definition table (including attributes)
code_name = None
full_name = None
from_pop_type = None
to_pop_type = None
attributes = {}
for header_cell, value_cell in zip(tables[0][0], tables[0][1]):
if header_cell.value is None:
continue
header = cell_get_string(header_cell)
lowered_header = header.lower()
if lowered_header == "abbreviation":
code_name = cell_get_string(value_cell)
elif lowered_header == "full name":
full_name = cell_get_string(value_cell)
elif lowered_header == "from population type":
from_pop_type = cell_get_string(value_cell, True)
elif lowered_header == "to population type":
to_pop_type = cell_get_string(value_cell, True)
else:
attributes[header] = value_cell.value
if interaction_type == "transfer":
assert from_pop_type == to_pop_type, "Transfers can only occur between populations of the same type"
if code_name is None:
raise Exception("Code name/abbreviation missing")
if full_name is None:
raise Exception("Full name missing")
# Read the pops from the Y/N table. The Y/N content of the table depends on the timeseries objects that
# are present. That is, if the Y/N matrix contains a Y then a TimeSeries must be read in, and vice versa.
# Therefore, we don't actually parse the matrix, and instead just read in all the TimeSeries instances
# that are defined and infer the matrix that way.
to_pops = [x.value for x in tables[1][0][1:] if x.value]
from_pops = []
for row in tables[1][1:]:
from_pops.append(row[0].value)
# Instantiate it
tdc = TimeDependentConnections(code_name, full_name, None, from_pops=from_pops, to_pops=to_pops, interpop_type=interaction_type, from_pop_type=from_pop_type, to_pop_type=to_pop_type)
tdc.attributes = attributes
# Read the time series table
headings = {}
times = {}
known_headings = {"from population", "to population", "units", "uncertainty", "constant", "assumption"}
for i, cell in enumerate(tables[2][0]):
v = cell.value
if i == 0 or v is None:
continue
elif cell.data_type in {"s", "str"}:
v = v.strip()
if v.lower() in known_headings:
headings[v.lower()] = i
else:
headings[v] = i
elif cell.data_type == "n":
if cell.is_date:
times[datetime_to_year(v)] = i
else:
times[v] = i
else:
raise Exception("Unknown data type in cell %s of the spreadsheet - quantity must be a string or a number" % cell.coordinate)
tdc.tvec = np.array(sorted(times), dtype=float)
# Validate and process headings
if not times and "constant" not in headings:
raise Exception("Could not find an assumption or time-specific value - all tables must contain at least one of these values")
tdc.write_units = True if "units" in headings else None
tdc.write_uncertainty = True if "uncertainty" in headings else None
tdc.write_assumption = True if "constant" in headings else None
if "assumption" in headings:
tdc.write_assumption = True
tdc.assumption_heading = "Assumption"
for heading in headings:
if heading not in known_headings:
# If it's not a known heading and it's a string, then it must be an attribute
# Note that the way `headings` is populated by skipping i=0 ensures that the table name
# is not interpreted as a heading
tdc.ts_attributes[heading] = {}
tdc.ts = sc.odict()
for row in tables[2][1:]:
if row[0].value != "...":
assert row[0].value in from_pops, 'Population "%s" not found - should be contained in %s' % (row[0].value, from_pops)
assert row[2].value in to_pops, 'Population "%s" not found - should be contained in %s' % (row[2].value, to_pops)
vals = [x.value for x in row]
from_pop = vals[0]
to_pop = vals[2]
if "units" in headings:
units = cell_get_string(row[headings["units"]], allow_empty=True)
if units.lower().strip() in FS.STANDARD_UNITS:
units = units.lower().strip() # Only lower and strip units if they are standard units
else:
units = None
ts = TimeSeries(units=units)
if "uncertainty" in headings:
ts.sigma = cell_get_number(row[headings["uncertainty"]])
else:
ts.sigma = None
if "constant" in headings:
ts.assumption = cell_get_number(row[headings["constant"]])
elif "assumption" in headings:
ts.assumption = cell_get_number(row[headings["assumption"]])
else:
ts.assumption = None
for attribute in tdc.ts_attributes:
if attribute in headings:
tdc.ts_attributes[attribute][(from_pop, to_pop)] = row[headings[attribute]].value
for t, idx in times.items():
ts.insert(t, cell_get_number(row[idx])) # If cell_get_number returns None, this gets handled accordingly by ts.insert()
tdc.ts[(from_pop, to_pop)] = ts
return tdc
def write(self, worksheet, start_row, formats, references: dict = None, widths: dict = None) -> int:
"""
Write to cells in a worksheet
:param worksheet: An xlsxwriter worksheet instance
:param start_row: The first row in which to write values
:param formats: Format dict for the opened workbook - typically the return value of :func:`standard_formats` when the workbook was opened
:param references: References dict containing cell references for strings in the current workbook
:param widths: ``dict`` storing column widths
:return: The row index for the next available row for writing in the spreadsheet
"""
assert self.assumption_heading in {"Constant", "Assumption"}, "Unsupported assumption heading"
write_units = self.write_units if self.write_units is not None else any((ts.units is not None for ts in self.ts.values()))
write_uncertainty = self.write_uncertainty if self.write_uncertainty is not None else any((ts.sigma is not None for ts in self.ts.values()))
write_assumption = self.write_assumption if self.write_assumption is not None else any((ts.assumption is not None for ts in self.ts.values()))
if not references:
references = {x: x for x in self.from_pops + self.to_pops} # Default null mapping for populations
# First, write the name entry table
current_row = start_row
column = 0
worksheet.write(current_row, column, "Abbreviation", formats["center_bold"])
update_widths(widths, column, "Abbreviation")
worksheet.write(current_row + 1, column, self.code_name)
update_widths(widths, column, self.code_name)
column += 1
worksheet.write(current_row, column, "Full Name", formats["center_bold"])
update_widths(widths, column, "Full Name")
worksheet.write(current_row + 1, column, self.full_name)
update_widths(widths, column, self.full_name)
column += 1
worksheet.write(current_row, column, "From population type", formats["center_bold"])
update_widths(widths, column, "From population type")
worksheet.write(current_row + 1, column, self.from_pop_type)
update_widths(widths, column, self.from_pop_type)
column += 1
worksheet.write(current_row, column, "To population type", formats["center_bold"])
update_widths(widths, column, "To population type")
worksheet.write(current_row + 1, column, self.to_pop_type)
update_widths(widths, column, self.to_pop_type)
for attribute, value in self.attributes.items():
column += 1
worksheet.write(current_row, column, attribute, formats["center_bold"])
update_widths(widths, column, attribute)
worksheet.write(current_row + 1, column, value)
update_widths(widths, column, value)
references[self.code_name] = "='%s'!%s" % (worksheet.name, xlrc(current_row, 0, True, True))
references[self.full_name] = "='%s'!%s" % (worksheet.name, xlrc(current_row, 1, True, True)) # Reference to the full name
# Then, write the Y/N matrix
current_row += 3 # Leave a blank row below the matrix
# Note - table_references are local to this TimeDependentConnections instance
# For example, there could be two transfers, and each of them could potentially transfer between 0-4 and 5-14
# so the worksheet might contain two references from 0-4 to 5-14 but they would be for different transfers and thus
# the time-dependent rows would depend on different boolean table cells
current_row, table_references, values_written = self._write_pop_matrix(worksheet, current_row, formats, references, boolean_choice=True, widths=widths)
# Finally, write the time dependent part
headings = []
headings.append("From population")
headings.append("") # --->
headings.append("To population")
offset = len(headings)
attribute_index = {}
for attribute in self.ts_attributes:
attribute_index[attribute] = offset
headings.append(attribute)
offset += 1
if write_units:
headings.append("Units")
units_index = offset # Column to write the units in
offset += 1
if write_uncertainty:
headings.append("Uncertainty")
uncertainty_index = offset # Column to write the units in
offset += 1
if write_assumption:
headings.append(self.assumption_heading)
headings.append("")
constant_index = offset
offset += 1
if len(self.tvec):
offset += 1 # Additional offset for the 'OR' column
headings += [float(x) for x in self.tvec]
for i, entry in enumerate(headings):
if entry in references:
worksheet.write_formula(current_row, 0, references[entry], formats["center_bold"], value=entry)
else:
worksheet.write(current_row, i, entry, formats["center_bold"])
update_widths(widths, i, entry)
# Now, we will write a wrapper that gates the content
# If the gating cell is 'Y', then the content will be displayed, otherwise not
for from_idx in range(0, len(self.from_pops)):
for to_idx in range(0, len(self.to_pops)):
current_row += 1
from_pop = self.from_pops[from_idx]
to_pop = self.to_pops[to_idx]
entry_tuple = (from_pop, to_pop)
entry_cell = table_references[entry_tuple]
# Write hyperlink
if values_written[entry_cell] != FS.DEFAULT_SYMBOL_INAPPLICABLE:
worksheet.write_url(entry_cell, "internal:%s!%s" % (worksheet.name, xlrc(current_row, 1)), cell_format=formats["center_unlocked"], string=values_written[entry_cell])
worksheet.write_url(xlrc(current_row, 1), "internal:%s!%s" % (worksheet.name, entry_cell), cell_format=formats["center_unlocked"])
if entry_tuple in self.ts:
ts = self.ts[entry_tuple]
format = formats["not_required"]
else:
ts = None
format = formats["unlocked"]
if ts:
worksheet.write_formula(current_row, 0, gate_content(references[from_pop], entry_cell), formats["center_bold"], value=from_pop)
update_widths(widths, 0, from_pop)
worksheet.write_formula(current_row, 1, gate_content("--->", entry_cell), formats["center"], value="--->")
worksheet.write_formula(current_row, 2, gate_content(references[to_pop], entry_cell), formats["center_bold"], value=to_pop)
update_widths(widths, 2, to_pop)
# Write the attributes
for attribute in self.ts_attributes:
if isinstance(self.ts_attributes[attribute], dict):
if entry_tuple in self.ts_attributes[attribute]:
val = self.ts_attributes[attribute][entry_tuple]
else:
val = None
else:
val = self.ts_attributes[attribute]
if val is not None:
worksheet.write(current_row, attribute_index[attribute], val)
update_widths(widths, attribute_index[attribute], val)
if self.write_units:
worksheet.write(current_row, units_index, ts.units, format)
update_widths(widths, units_index, ts.units)
if self.allowed_units:
worksheet.data_validation(xlrc(current_row, units_index), {"validate": "list", "source": [x for x in self.allowed_units]})
if self.write_uncertainty:
worksheet.write(current_row, uncertainty_index, ts.sigma, formats["not_required"])
if self.write_assumption:
worksheet.write(current_row, constant_index, ts.assumption, format)
if len(self.tvec):
worksheet.write_formula(current_row, constant_index + 1, gate_content("OR", entry_cell), formats["center"], value="OR")
update_widths(widths, constant_index + 1, "OR")
else:
worksheet.write_formula(current_row, 0, gate_content(references[from_pop], entry_cell), formats["center_bold"], value="...")
worksheet.write_formula(current_row, 1, gate_content("--->", entry_cell), formats["center"], value="...")
worksheet.write_formula(current_row, 2, gate_content(references[to_pop], entry_cell), formats["center_bold"], value="...")
if self.write_units:
worksheet.write_blank(current_row, units_index, "", format)
if self.allowed_units:
worksheet.data_validation(xlrc(current_row, units_index), {"validate": "list", "source": [x for x in self.allowed_units]})
if self.write_uncertainty:
worksheet.write_blank(current_row, uncertainty_index, "", formats["not_required"])
if self.write_assumption:
worksheet.write_blank(current_row, constant_index, "", format)
if len(self.tvec):
worksheet.write_formula(current_row, constant_index + 1, gate_content("OR", entry_cell), formats["center"], value="...")
update_widths(widths, constant_index + 1, "...")
content = [None] * len(self.tvec)
if ts:
for t, v in zip(ts.t, ts.vals):
idx = np.where(self.tvec == t)[0][0]
content[idx] = v
for idx, v in enumerate(content):
if v is None:
worksheet.write_blank(current_row, offset + idx, v, format)
else:
worksheet.write(current_row, offset + idx, v, format)
widths[offset + idx] = max(widths[offset + idx], 7) if offset + idx in widths else 7
if not content:
idx = 0
if self.write_assumption and len(self.tvec):
# Conditional formatting for the assumption, depending on whether time-values were entered
fcn_empty_times = 'COUNTIF(%s:%s,"<>" & "")>0' % (xlrc(current_row, offset), xlrc(current_row, offset + idx))
worksheet.conditional_format(xlrc(current_row, constant_index), {"type": "formula", "criteria": "=" + fcn_empty_times, "format": formats["ignored"]})
worksheet.conditional_format(xlrc(current_row, constant_index), {"type": "formula", "criteria": "=AND(%s,NOT(ISBLANK(%s)))" % (fcn_empty_times, xlrc(current_row, constant_index)), "format": formats["ignored_warning"]})
# Conditional formatting for the row - it has a white background if the gating cell is 'N'
worksheet.conditional_format("%s:%s" % (xlrc(current_row, 3), xlrc(current_row, offset + idx)), {"type": "formula", "criteria": '=%s<>"Y"' % (entry_cell), "format": formats["white_bg"]})
current_row += 2
return current_row
def _write_pop_matrix(self, worksheet, start_row, formats, references: dict = None, boolean_choice=False, widths: dict = None):
"""
Write a square matrix to Excel
This function writes the Y/N matrix
- Transfer matrix
- Interactions matrix
If ``self.enable_diagonal`` is ``False`` then the diagonal will be forced to be ``'N.A.'``. If an entry
is specified for an entry on the diagonal and ``enable_diagonal=False``, an error will be thrown
:param worksheet: An xlsxwriter worksheet instance
:param start_row: The first row in which to write values
:param formats: Format dict for the opened workbook - typically the return value of :func:`standard_formats` when the workbook was opened
:param references: Optionally supply dict with references, used to link population names in Excel
:param boolean_choice: If True, values will be coerced to Y/N and an Excel validation will be added
:param widths: ``dict`` storing column widths
:return: Tuple with ``(next_row, table_references, values_written)``. The references are used for hyperlinking to the Excel matrix
"""
entries = self.ts
if not references:
references = {x: x for x in self.from_pops + self.to_pops} # This is a null-mapping that takes say 'adults'->'adults' thus simplifying the workflow. Otherwise, it's assumed a reference exists for every node
table_references = {}
values_written = {}
# Write the headers
for i, node in enumerate(self.to_pops):
worksheet.write_formula(start_row, i + 1, references[node], formats["center_bold"], value=node)
update_widths(widths, i + 1, node)
for i, node in enumerate(self.from_pops):
worksheet.write_formula(start_row + i + 1, 0, references[node], formats["center_bold"], value=node)
update_widths(widths, 0, node)
# Prepare the content - first replace the dict with one keyed by index. This is because we cannot apply formatting
# after writing content, so have to do the writing in a single pass over the entire matrix
if boolean_choice:
content = np.full((len(self.from_pops), len(self.to_pops)), "N", dtype=object) # This will also coerce the value to string in preparation for writing
else:
content = np.full((len(self.from_pops), len(self.to_pops)), "", dtype=object) # This will also coerce the value to string in preparation for writing
for interaction, value in entries.items():
from_pop, to_pop = interaction
if not self.enable_diagonal and from_pop == to_pop:
raise Exception("Trying to write a diagonal entry to a table that is not allowed to contain diagonal terms") # This is because data loss will occur if the user adds entries on the diagonal, then writes the table, and then reads it back in
from_idx = self.from_pops.index(from_pop)
to_idx = self.to_pops.index(to_pop)
if boolean_choice:
value = "Y" if value else "N"
content[from_idx, to_idx] = value
# Write the content
for from_idx in range(0, len(self.from_pops)):
for to_idx in range(0, len(self.to_pops)):
row = start_row + 1 + from_idx
col = to_idx + 1
if not self.enable_diagonal and self.to_pops[to_idx] == self.from_pops[from_idx]: # Disable the diagonal if it's linking the same two quantities and that's desired
val = FS.DEFAULT_SYMBOL_INAPPLICABLE
worksheet.write(row, col, val, formats["center"])
worksheet.data_validation(xlrc(row, col), {"validate": "list", "source": ["N.A."]})
else:
val = content[from_idx, to_idx]
worksheet.write(row, col, content[from_idx, to_idx], formats["center_unlocked"])
if boolean_choice:
worksheet.data_validation(xlrc(row, col), {"validate": "list", "source": ["Y", "N"]})
worksheet.conditional_format(xlrc(row, col), {"type": "cell", "criteria": "equal to", "value": '"Y"', "format": formats["unlocked_boolean_true"]})
worksheet.conditional_format(xlrc(row, col), {"type": "cell", "criteria": "equal to", "value": '"N"', "format": formats["unlocked_boolean_false"]})
table_references[(self.from_pops[from_idx], self.to_pops[to_idx])] = xlrc(row, col, True, True) # Store reference to this interaction
values_written[table_references[(self.from_pops[from_idx], self.to_pops[to_idx])]] = val
next_row = start_row + 1 + len(self.from_pops) + 1
return next_row, table_references, values_written
class TimeDependentValuesEntry:
"""Table for time-dependent data entry
This class is Databooks and Program books to enter potentially time-varying data.
Conceptually, it maps a set of TimeSeries object to a single name and table in the
spreadsheet. For example, a Characteristic might contain a TimeSeries for each population,
and the resulting TimeDependentValuesEntry (TDVE) table would have a `name` matching the
population, and TimeSeries for each population.
The TDVE class optionally allows the specification of units, assumptions, and uncertainty,
which each map to properties on the underlying TimeSeries objects. It also contains a
time vector corresponding to the time values that appear or will appear in the spreadsheet.
Note that the units are stored within the TimeSeries objects, which means that they can
are able to differ across rows.
:param name: The name/title for this table
:param tvec: Specify the time values for this table. All TimeSeries in the ts dict should have corresponding time values
:param ts: Optionally specify an odict() of TimeSeries objects populating the rows. Could be populated after
:param allowed_units: Optionally specify a list of allowed units that will appear as a dropdown
:param comment: Optionally specify descriptive text that will be added as a comment to the name cell
"""
@property
def has_data(self) -> bool:
"""
Check whether all time series have data entered
:return: True if all of the TimeSeries objects stored in the TDVE have data
"""
return all([x.has_data for x in self.ts.values()])
@staticmethod
def from_rows(rows: list):
"""
Create new instance from Excel rows
Given a set of openpyxl rows, instantiate a :class:`TimeDependentValuesEntry` object
That is, the parent object e.g. :class:`ProjectData` is responsible for finding where the TDVE table is,
and reading all of the rows associated with it (skipping ``#ignored`` rows) and then passing those rows,
unparsed, to this function
Headings for 'units', 'uncertainty', and 'assumption'/'constant' are optional and will be read in
if they are present in the spreadsheet.
:param rows: A list of rows
:return: A new :class:`TimeDependentValuesEntry` instance
"""
from .utils import TimeSeries # Import here to avoid circular reference
# Retrieve the name
name = rows[0][0].value
if name is None:
raise Exception('The name of the table is missing. This can also happen if extra rows have been added without a "#ignore" entry in the first column')
elif not sc.isstring(name):
raise Exception("In cell %s of the spreadsheet, the name of the quantity assigned to this table needs to be a string" % rows[0][0].coordinate)
name = name.strip() # The name needs to be written back in a case sensitive form
tdve = TimeDependentValuesEntry(name)
# Read the headings
headings = {}
times = {}
known_headings = {"units", "uncertainty", "constant", "assumption"}
for i, cell in enumerate(rows[0]):
v = cell.value
if i == 0 or v is None:
continue
elif cell.data_type in {"s", "str"}:
v = v.strip()
if v.lower() in known_headings:
headings[v.lower()] = i
else:
headings[v] = i
elif cell.is_date:
times[datetime_to_year(v)] = i
elif cell.data_type == "n":
times[v] = i
else:
raise Exception("Unknown data type in cell %s of the spreadsheet - quantity must be a string or a number" % cell.coordinate)
tdve.tvec = np.array(sorted(times), dtype=float)
# Validate and process headings
if not times and "constant" not in headings:
raise Exception("Could not find an assumption or time-specific value - all tables must contain at least one of these values")
tdve.write_units = True if "units" in headings else None
tdve.write_uncertainty = True if "uncertainty" in headings else None
tdve.write_assumption = True if "constant" in headings else None
if "assumption" in headings:
tdve.write_assumption = True
tdve.assumption_heading = "Assumption"
for heading in headings:
if heading not in known_headings:
# If it's not a known heading and it's a string, then it must be an attribute
# Note that the way `headings` is populated by skipping i=0 ensures that the table name
# is not interpreted as a heading
tdve.ts_attributes[heading] = {}
ts_entries = sc.odict()
for row in rows[1:]:
if not row[0].data_type in {"s", "str"}:
raise Exception("In cell %s of the spreadsheet, the name of the entry was expected to be a string, but it was not. The left-most column is expected to be a name. If you are certain the value is correct, add an single quote character at the start of the cell to ensure it remains as text" % row[0].coordinate)
series_name = row[0].value.strip()
if "units" in headings:
units = cell_get_string(row[headings["units"]], allow_empty=True)
if units.lower().strip() in FS.STANDARD_UNITS:
units = units.lower().strip() # Only lower and strip units if they are standard units
else:
units = None
ts = TimeSeries(units=units)
if "uncertainty" in headings:
ts.sigma = cell_get_number(row[headings["uncertainty"]])
else:
ts.sigma = None
if "constant" in headings:
ts.assumption = cell_get_number(row[headings["constant"]])
elif "assumption" in headings:
ts.assumption = cell_get_number(row[headings["assumption"]])
else:
ts.assumption = None
for attribute in tdve.ts_attributes:
if attribute in headings:
# If it's a default attribute e.g. provenance, and it is missing from the databook, then don't populate it
tdve.ts_attributes[attribute][series_name] = row[headings[attribute]].value
for t, idx in times.items():
ts.insert(t, cell_get_number(row[idx])) # If cell_get_number returns None, this gets handled accordingly by ts.insert()
ts_entries[series_name] = ts
tdve.ts = ts_entries
return tdve
def write(self, worksheet, start_row, formats, references: dict = None, widths: dict = None) -> int:
"""
Write to cells in a worksheet
Note that the year columns are drawn from the ``tvec`` attribute. To suppress the year columns (e.g. for the user to enter only an assumption)
then set ``tvec`` to an empty array/list.
:param worksheet: An xlsxwriter worksheet instance
:param start_row: The first row in which to write values
:param formats: Format dict for the opened workbook - typically the return value of :func:`standard_formats` when the workbook was opened
:param references: References dict containing cell references for strings in the current workbook
:param widths: ``dict`` storing column widths
:return: The row index for the next available row for writing in the spreadsheet
"""
assert self.assumption_heading in {"Constant", "Assumption"}, "Unsupported assumption heading"
write_units = self.write_units if self.write_units is not None else any((ts.units is not None for ts in self.ts.values()))
write_uncertainty = self.write_uncertainty if self.write_uncertainty is not None else any((ts.sigma is not None for ts in self.ts.values()))
write_assumption = self.write_assumption if self.write_assumption is not None else any((ts.assumption is not None for ts in self.ts.values()))
if not references:
references = dict()
current_row = start_row
# First, assemble and write the headings
headings = []
headings.append(self.name)
offset = 1 # This is the column where the time values start (after the 'or')
# Next allocate attributes
attribute_index = {}
for attribute in self.ts_attributes:
attribute_index[attribute] = offset
headings.append(attribute)
offset += 1
if write_units:
headings.append("Units")
units_index = offset # Column to write the units in
offset += 1
if write_uncertainty:
headings.append("Uncertainty")
uncertainty_index = offset # Column to write the units in
offset += 1
if write_assumption:
headings.append(self.assumption_heading)
headings.append("")
constant_index = offset
offset += 2
headings += [float(x) for x in self.tvec]
for i, entry in enumerate(headings):
if entry in references:
worksheet.write_formula(current_row, 0, references[entry], formats["center_bold"], value=entry)
else:
worksheet.write(current_row, i, entry, formats["center_bold"])
update_widths(widths, i, entry)
if self.comment:
worksheet.write_comment(xlrc(current_row, 0), self.comment)
# Now, write the TimeSeries objects - self.ts is an odict and whatever pops are present will be written in whatever order they are in
for row_name, row_ts in self.ts.items():
current_row += 1
# Write the name
if row_name in references:
worksheet.write_formula(current_row, 0, references[row_name], formats["center_bold"], value=row_name)
update_widths(widths, 0, row_name)
else:
worksheet.write_string(current_row, 0, row_name, formats["center_bold"])
update_widths(widths, 0, row_name)
# Write the attributes
for attribute in self.ts_attributes:
if isinstance(self.ts_attributes[attribute], dict):
if row_name in self.ts_attributes[attribute]:
val = self.ts_attributes[attribute][row_name]
else:
val = None
else:
val = self.ts_attributes[attribute]
if val is not None:
worksheet.write(current_row, attribute_index[attribute], val)
update_widths(widths, attribute_index[attribute], val)
# Write the units
if write_units:
if row_ts.units:
if row_ts.units.lower().strip() in FS.STANDARD_UNITS: # Preserve case if nonstandard unit
unit = row_ts.units.title().strip()
else:
unit = row_ts.units.strip()
worksheet.write(current_row, units_index, unit)
update_widths(widths, units_index, unit)
else:
worksheet.write(current_row, units_index, FS.DEFAULT_SYMBOL_INAPPLICABLE)
if self.allowed_units and isinstance(self.allowed_units, dict) and row_name in self.allowed_units: # Add dropdown selection if there is more than one valid choice for the units
allowed = self.allowed_units[row_name]
elif self.allowed_units and not isinstance(self.allowed_units, dict):
allowed = self.allowed_units
else:
allowed = None
if allowed:
worksheet.data_validation(xlrc(current_row, units_index), {"validate": "list", "source": allowed})
if write_uncertainty:
if row_ts.sigma is None:
worksheet.write(current_row, uncertainty_index, row_ts.sigma, formats["not_required"]) # NB. For now, uncertainty is always optional
else:
worksheet.write(current_row, uncertainty_index, row_ts.sigma, formats["not_required"])
if row_ts.has_data:
format = formats["not_required"]
else:
format = formats["unlocked"]
if write_assumption:
worksheet.write(current_row, constant_index, row_ts.assumption, format)
if len(self.tvec):
worksheet.write(current_row, constant_index + 1, "OR", formats["center"])
update_widths(widths, constant_index + 1, "OR")
# Write the time values if they are present
if len(self.tvec):
content = [None] * len(self.tvec) # Initialize an empty entry for every time in the TDVE's tvec
for t, v in zip(row_ts.t, row_ts.vals):
# If the TimeSeries contains data for that time point, then insert it now
idx = np.where(self.tvec == t)[0]
if len(idx):
content[idx[0]] = v
for idx, v in enumerate(content):
if v is None:
worksheet.write_blank(current_row, offset + idx, v, format)
else:
worksheet.write(current_row, offset + idx, v, format)
widths[offset + idx] = max(widths[offset + idx], 7) if offset + idx in widths else 7
if write_assumption:
# Conditional formatting for the assumption
# Do this here, because after the loop above, we have easy and clear access to the range of cells to include in the formula
fcn_empty_times = 'COUNTIF(%s:%s,"<>" & "")>0' % (xlrc(current_row, offset), xlrc(current_row, offset + len(content) - 1))
# Hatched out if the cell will be ignored
worksheet.conditional_format(xlrc(current_row, constant_index), {"type": "formula", "criteria": "=" + fcn_empty_times, "format": formats["ignored"]})
worksheet.conditional_format(xlrc(current_row, constant_index), {"type": "formula", "criteria": "=AND(%s,NOT(ISBLANK(%s)))" % (fcn_empty_times, xlrc(current_row, constant_index)), "format": formats["ignored_warning"]})
return current_row + 2 # Add two so there is a blank line after this table
def cell_get_string(cell, allow_empty=False) -> str:
"""
Return string value from cell
This function checks if a cell contains a string. If it does, the stripped value
will be returned. Otherwise, an informative error will be raised
Note that the string type is determined from the cell's value rather than
the openpyxl cell data type.
:param cell: An openpyxl cell
:return: A string with the contents of the cell
"""
if cell.value is None and allow_empty:
return None
elif not sc.isstring(cell.value):
raise Exception("Cell %s needs to contain a string (i.e. not a number, date, or other cell type)" % cell.coordinate)
else:
return cell.value.strip()
def cell_get_number(cell, dtype=float):
"""
Return numeric value from cell
This function is to guard against accidentally having the Excel cell contain a string
instead of a number. If a string has been entered, an error will be raised. The added value
from this function is that if the Excel cell type is empty but the value is
empty or ``N.A.`` then the value will be treated as though the cell was correctly set to a
numeric type but had been left empty.
The output is cast to ``dtype`` which means that code that requires numeric input from Excel
can use this input to guarantee that the resulting number is of the correct type, or ``None``.
:param cell: An openpyxl cell
:param dtype: If the cell is numeric, cast to this type (default is `float` but could be `int` for example)
:return: A scalar instance of ``dtype`` (e.g. ``float``) or ``None`` if cell is empty or being treated as empty
:raises: :class:`Exception` if the cell contains a string
"""
if cell.value is None:
return None
elif cell.data_type == "n": # Numeric type
return dtype(cell.value)
elif cell.data_type == "s": # Only do relatively expensive string processing if it's actually a string type
s = cell.value.lower().strip()
if s == FS.DEFAULT_SYMBOL_INAPPLICABLE:
return None
elif not s.replace("-", ""):
return None
raise Exception("Cell %s needs to contain a number" % cell.coordinate)
def validate_category(workbook, expected_category) -> None:
"""
Check Atomica workbook type
This function makes sure that a workbook has a particular category property
stored within it, and displays an appropriate error message if not. If the
category isn't present or doesn't start with 'atomica', just ignore it for
robustness (instead, a parsing error will likely be raised)
:param workbook: An openpyxl workbook
:param category: The expected string category
:raises: :class:`Exception` if the workbook category is not valid
"""
category = workbook.properties.category
if category and sc.isstring(category) and category.startswith("atomica:"):
if category.strip() != expected_category.strip():
expected_type = expected_category.split(":")[1].title()
actual_type = category.split(":")[1].title()
message = "Error loading %s - the provided file was a %s file" % (expected_type, actual_type)
raise Exception(message)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31281,
25673,
10361,
5499,
329,
24134,
3696,
198,
198,
1212,
8265,
23986,
10361,
5499,
329,
24134,
11244,
326,
318,
2219,
198,
1462,
1180,
6982,
286,
4104,
420... | 2.466397 | 23,480 |
# -*- coding: utf-8 -*-
"""
Created on Thu May 25 09:57:51 2017
@author: azkei
The index objects are responsible for the labels on the axes and other metadata
as the name of the axes.
We have already seen as an array containing abels is converted into an Index object
We need to specify index option within the constructor
"""
# 1. Index Objects
ser = pd.Series([5,0,3,8,4], index=['red','blue','yellow','white','green'])
ser.index
# 2. Methods on Index
# Return the index with the lowest value
ser.idxmin()
# Return the index with the Largest value
ser.idxmax()
# 3. Index with Duplicate Labels
serd = pd.Series(range(6), index = ['white','white','blue','green','green','yellow'])
serd
# Return back values with index 'white'
serd['white']
# 4. Identifying unique/duplicate indexes
# Valuable for Big Data
# Series
serd.index.is_unique
# DataFrame
frame.index.is_unique | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
1737,
1679,
7769,
25,
3553,
25,
4349,
2177,
198,
198,
31,
9800,
25,
35560,
365,
72,
198,
464,
6376,
5563,
389,
4497,
329,
262,
14722,
31... | 3.189781 | 274 |
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
import unittest
from hphp.hack.src.hh_codesynthesis.hackGenerator import (
_HackInterfaceGenerator,
_HackClassGenerator,
HackCodeGenerator,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12972,
260,
12,
301,
2012,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
28... | 3.099174 | 121 |
"""Propagation algorithms
"""
import numpy as np
from scipy.integrate import ode
from astropy import units as u
from poliastro.twobody.rv import rv2coe
from poliastro.twobody.classical import coe2rv
from poliastro.twobody.angles import nu_to_M, M_to_nu
from poliastro.jit import jit
from poliastro.stumpff import c2, c3
def func_twobody(t0, u_, k, ad):
"""Differential equation for the initial value two body problem.
This function follows Cowell's formulation.
Parameters
----------
t0 : float
Time.
u_ : ~numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
Standard gravitational parameter.
ad : function(t0, u, k)
Non Keplerian acceleration (km/s2).
"""
ax, ay, az = ad(t0, u_, k)
x, y, z, vx, vy, vz = u_
r3 = (x**2 + y**2 + z**2)**1.5
du = np.array([
vx,
vy,
vz,
-k * x / r3 + ax,
-k * y / r3 + ay,
-k * z / r3 + az
])
return du
def cowell(k, r0, v0, tof, rtol=1e-10, *, ad=None, callback=None, nsteps=1000):
"""Propagates orbit using Cowell's formulation.
Parameters
----------
k : float
Gravitational constant of main attractor (km^3 / s^2).
r0 : array
Initial position (km).
v0 : array
Initial velocity (km).
ad : function(t0, u, k), optional
Non Keplerian acceleration (km/s2), default to None.
tof : float
Time of flight (s).
rtol : float, optional
Maximum relative error permitted, default to 1e-10.
nsteps : int, optional
Maximum number of internal steps, default to 1000.
callback : callable, optional
Function called at each internal integrator step.
Raises
------
RuntimeError
If the algorithm didn't converge.
Note
-----
This method uses a Dormand & Prince method of order 8(5,3) available
in the :py:class:`scipy.integrate.ode` module.
"""
x, y, z = r0
vx, vy, vz = v0
u0 = np.array([x, y, z, vx, vy, vz])
# Set the non Keplerian acceleration
if ad is None:
ad = lambda t0, u_, k_: (0, 0, 0)
# Set the integrator
rr = ode(func_twobody).set_integrator('dop853', rtol=rtol, nsteps=nsteps)
rr.set_initial_value(u0) # Initial time equal to 0.0
rr.set_f_params(k, ad) # Parameters of the integration
if callback:
rr.set_solout(callback)
# Make integration step
rr.integrate(tof)
if rr.successful():
r, v = rr.y[:3], rr.y[3:]
else:
raise RuntimeError("Integration failed")
return r, v
def mean_motion(k, r0, v0, tof, **kwargs):
r"""Propagates orbit using mean motion
Parameters
----------
k : float
Gravitational constant of main attractor (km^3 / s^2).
r0 : array
Initial position (km).
v0 : array
Initial velocity (km).
ad : function(t0, u, k), optional
Non Keplerian acceleration (km/s2), default to None.
tof : float
Time of flight (s).
Notes
-----
This method takes initial :math:`\vec{r}, \vec{v}`, calculates classical orbit parameters,
increases mean anomaly and performs inverse transformation to get final :math:`\vec{r}, \vec{v}`
The logic is based on formulae (4), (6) and (7) from http://dx.doi.org/10.1007/s10569-013-9476-9
"""
# get the initial true anomaly and orbit parameters that are constant over time
p, ecc, inc, raan, argp, nu0 = rv2coe(k, r0, v0)
# get the initial mean anomaly
M0 = nu_to_M(nu0, ecc)
# elliptic or hyperbolic orbits
if not np.isclose(ecc, 1.0, rtol=1e-06):
a = p / (1.0 - ecc ** 2)
# given the initial mean anomaly, calculate mean anomaly
# at the end, mean motion (n) equals sqrt(mu / |a^3|)
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = M0 + tof * np.sqrt(k / np.abs(a ** 3)) * u.rad
nu = M_to_nu(M, ecc)
# parabolic orbit
else:
q = p / 2.0
# mean motion n = sqrt(mu / 2 q^3) for parabolic orbit
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = M0 + tof * np.sqrt(k / (2.0 * q ** 3))
# using Barker's equation, which is solved analytically
# for parabolic orbit, get true anomaly
B = 3.0 * M / 2.0
A = (B + np.sqrt(1.0 + B ** 2)) ** (2.0 / 3.0)
D = 2.0 * A * B / (1.0 + A + A ** 2)
nu = 2.0 * np.arctan(D)
with u.set_enabled_equivalencies(u.dimensionless_angles()):
return coe2rv(k, p, ecc, inc, raan, argp, nu)
def kepler(k, r0, v0, tof, rtol=1e-10, *, numiter=35):
"""Propagates Keplerian orbit.
Parameters
----------
k : float
Gravitational constant of main attractor (km^3 / s^2).
r0 : array
Initial position (km).
v0 : array
Initial velocity (km).
tof : float
Time of flight (s).
rtol : float, optional
Maximum relative error permitted, default to 1e-10.
numiter : int, optional
Maximum number of iterations, default to 35.
Raises
------
RuntimeError
If the algorithm didn't converge.
Note
-----
This algorithm is based on Vallado implementation, and does basic Newton
iteration on the Kepler equation written using universal variables. Battin
claims his algorithm uses the same amount of memory but is between 40 %
and 85 % faster.
"""
# Compute Lagrange coefficients
f, g, fdot, gdot = _kepler(k, r0, v0, tof, numiter, rtol)
assert np.abs(f * gdot - fdot * g - 1) < 1e-5 # Fixed tolerance
# Return position and velocity vectors
r = f * r0 + g * v0
v = fdot * r0 + gdot * v0
return r, v
def propagate(orbit, time_of_flight, *, method=mean_motion, rtol=1e-10, **kwargs):
"""Propagate an orbit some time and return the result.
"""
r, v = method(orbit.attractor.k.to(u.km ** 3 / u.s ** 2).value,
orbit.r.to(u.km).value, orbit.v.to(u.km / u.s).value,
time_of_flight.to(u.s).value,
rtol=rtol,
**kwargs)
return orbit.from_vectors(orbit.attractor, r * u.km, v * u.km / u.s, orbit.epoch + time_of_flight)
@jit
| [
37811,
24331,
363,
341,
16113,
198,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
629,
541,
88,
13,
18908,
4873,
1330,
267,
2934,
198,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
6738,
755,
72,
459,
305,
13,
... | 2.278261 | 2,760 |
import logging
import requests
import distutils
import hashlib
import urllib
import json
from pprint import pformat
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from me4storage.common.exceptions import LoginError, ApiStatusError
from me4storage.models.status import Status
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
7007,
198,
11748,
1233,
26791,
198,
11748,
12234,
8019,
198,
11748,
2956,
297,
571,
198,
11748,
33918,
198,
198,
6738,
279,
4798,
1330,
279,
18982,
198,
6738,
7007,
13,
43789,
13,
333,
297,
571,
18,
13,
1069,
... | 3.641667 | 120 |
__version__ = "1.4"
import csv
import os # for linesep
import pandas as pd
import numpy as np
from numpy import array as arr
import h5py as h5
from inspect import signature
import uncertainties as unc
import uncertainties.unumpy as unp
from warnings import warn
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as fit
import scipy.optimize as opt
import scipy.special as special
import scipy.interpolate as interp
import warnings
import MatplotlibPlotters as mp
import PhysicsConstants as mc
import Miscellaneous as misc
from Miscellaneous import what
from copy import copy, deepcopy
from fitters import ( #cython_poissonian as poissonian,
poissonian as poissonian,
FullBalisticMotExpansion, LargeBeamMotExpansion, exponential_saturation )
from fitters.Gaussian import double as double_gaussian, gaussian_2d, arb_2d_sum, bump
import MainAnalysis as ma
import AtomThreshold
import ThresholdOptions
import ExpFile as exp
from ExpFile import ExpFile, dataAddress
# from .TimeTracker import TimeTracker
import PictureWindow as pw
import TransferAnalysisOptions as tao
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
from statsmodels.stats.proportion import proportion_confint as confidenceInterval
import imageio
import matplotlib as mpl
import matplotlib.cm
from IPython.display import Image, HTML, display
def fitManyGaussianImage(im, numGauss, neighborhood_size=20, threshold=1, direct=True, widthGuess=1):
"""
Maxima finding is based on the answer to this question:
https://stackoverflow.com/questions/9111711/get-coordinates-of-local-maxima-in-2d-array-above-certain-value
"""
maximaLocs = findImageMaxima(im, neighborhood_size=neighborhood_size, threshold=threshold)
if len(maximaLocs) != numGauss:
raise ValueError("ERROR: didn't find the right number of maxima!")
guess = [min(im.flatten())]
for loc in maximaLocs:
guess += [im[loc[1],loc[0]], loc[0], loc[1], widthGuess, widthGuess]
xpts = np.arange(len(im[0]))
ypts = np.arange(len(im))
X,Y = np.meshgrid(xpts,ypts)
zpts = arb_2d_sum.f((X,Y), *guess).reshape(X.shape)
f, ax = plt.subplots(1,5,figsize=(20,10))
ax[0].imshow(im)
ax[0].set_title('Orig')
ax[1].imshow(zpts)
ax[1].set_title('Guess')
ax[2].imshow(im-zpts)
ax[2].set_title('Guess-Diff')
optParam, optCov = opt.curve_fit(arb_2d_sum.f, (X,Y), im.flatten(), p0=guess)
zpts_fit = arb_2d_sum.f((X,Y), *optParam).reshape(X.shape)
ax[3].imshow(zpts_fit)
ax[3].set_title('Fit')
ax[4].imshow(im-zpts_fit)
ax[4].set_title('Fit-Diff')
return optParam
def handleKeyModifications(hdf5Key, numVariations, keyInput=None, keyOffset=0, groupData=False, keyConversion=None, keySlice=None ):
"""
keySlice: mostly for handling the case of two concurrent variables that are varying the same, so it's not quite a multidimensional
slice but I need to specify which value to use for the x-axis etc.
"""
key = None
key = hdf5Key if keyInput is None else keyInput
if key is None:
key = arr([0]) if numVariations == 1 else arr([])
if groupData:
key = [0]
if len(key.shape) == 1:
key -= keyOffset
if keyConversion is not None:
key = [keyConversion.f(k) for k in key]
#keyName += "; " + keyConversion.units()
if len(key) != numVariations:
raise ValueError("ERROR: The Length of the key doesn't match the data found. "
"Did you want to use a transfer-based function instead of a population-based function? Key:",
len(key), "vars:", numVariations)
if keySlice is not None:
key = key[:,keySlice]
return key
def genAvgDiscrepancyImage(data, shape, locs):
"""
generates an image and determines color mins and maxes to
make the mean white on a normal diverging colormap.
"""
me = np.mean(data)
pic = np.ones(shape) * me
for i, loc in enumerate(locs):
pic[loc[0], loc[1]] = data[i]
mi = min(pic.flatten())
ma = max(pic.flatten())
if me - mi > ma - me:
vmin = mi
vmax = 2*me - mi
else:
vmin = 2*me-ma
vmax = ma
return pic, vmin, vmax
def extrapolateEveningBiases(hBiasIn, vBiasIn, depthIn, sign=1):
"""
depth in is some measure of the trap depth which is assumed to be roughly linear with the trap depth. It need not be in the right units.
"""
# normalize biases
hBiasIn /= np.sum(hBiasIn)
vBiasIn /= np.sum(vBiasIn)
guess = np.concatenate((hBiasIn, vBiasIn))
f = lambda g: modFitFunc(sign, hBiasIn, vBiasIn, depthIn, *g, )
result = opt.minimize(f, guess)
return result, extrapolateModDepth(sign, hBiasIn, vBiasIn, depthIn, result['x'])
def extrapolateModDepth(sign, hBiasIn, vBiasIn, depthIn, testBiases):
"""
assumes that hBiasIn and vBiasIn are normalized.
This function extrapolates what the depth of each tweezer should be based on the
current depths and current biases. Basically, it assumes that if you change the bias by x%,
then the depth for every atom in that row/column will change by x%.
"""
hBiasTest = testBiases[:len(hBiasIn)]
if len(hBiasTest) > 1:
for b in hBiasTest:
if b <= 0 or b > 1:
return None
vBiasTest = testBiases[len(hBiasIn):len(hBiasIn) + len(vBiasIn)]
if len(vBiasTest) > 1:
for b in vBiasTest:
if b <= 0 or b > 1:
return None
# normalize tests
hBiasTest /= np.sum(hBiasTest)
vBiasTest /= np.sum(vBiasTest)
modDepth = deepcopy(depthIn)
for rowInc, _ in enumerate(depthIn):
dif = (vBiasTest[rowInc] - vBiasIn[rowInc])/vBiasIn[rowInc]
modDepth[rowInc] = modDepth[rowInc] * (1- sign * dif)
for colInc, _ in enumerate(misc.transpose(depthIn)):
dif = (hBiasTest[colInc] - hBiasIn[colInc])/hBiasIn[colInc]
modDepth[:, colInc] = modDepth[:, colInc] * (1-sign * dif)
return modDepth
def combineData(data, key):
"""
combines similar key value data entries. data will be in order that unique key items appear in key.
For example, if key = [1,3,5,3,7,1], returned key and corresponding data will be newKey = [1, 3, 5, 7]
:param data:
:param key:
:return:
"""
items = {}
newKey = []
newData = []
for elem in key:
if str(elem) not in items:
indexes = [i for i, x in enumerate(key) if x == elem]
# don't get it again
items[str(elem)] = "!"
newKey.append(elem)
newItem = np.zeros((data.shape[1], data.shape[2]))
# average together the corresponding data.
for index in indexes:
newItem += data[index]
newItem /= len(indexes)
newData.append(newItem)
return arr(newData), arr(newKey)
def fitPic(picture, showFit=True, guessSigma_x=1, guessSigma_y=1, guess_x=None, guess_y=None, fitF=gaussian_2d.f_notheta, guessOffset=None, extraGuess=None):
"""
Fit an individual picture with a 2d gaussian, and fit the horizontal and vertical averages with 1d gaussians
"""
pos = arr(np.unravel_index(np.argmax(picture), picture.shape))
pos[1] = guess_x if guess_x is not None else pos[1]
pos[0] = guess_y if guess_x is not None else pos[0]
pic = picture.flatten()
x = np.linspace(0, picture.shape[1], picture.shape[1])
y = np.linspace(0, picture.shape[0], picture.shape[0])
X, Y = np.meshgrid(x, y)
### 2D Fit
initial_guess = [(np.max(pic) - np.min(pic)), pos[1], pos[0], guessSigma_x, guessSigma_y, np.min(pic) if guessOffset is None else guessOffset]
# for fitting functions with more arguments
if extraGuess is not None:
initial_guess += extraGuess
try:
print('fitting...')
popt, pcov = opt.curve_fit(fitF, (X, Y), pic, p0=initial_guess)#, epsfcn=0.01, ftol=0)
except RuntimeError:
popt = np.zeros(len(initial_guess))
pcov = np.zeros((len(initial_guess), len(initial_guess)))
warn('2D Gaussian Picture Fitting Failed!')
### Vertical (i.e. collapse in the vertical direction) Average Fit
vAvg = np.zeros(len(picture[0]))
for r in picture:
vAvg += r
vAvg /= len(picture)
vGuess = [np.max(vAvg) - np.min(vAvg), x[np.argmax(vAvg)], guessSigma_x, np.min(vAvg)]
try:
popt_v, pcov_v = opt.curve_fit(bump.f, x, vAvg, vGuess)
except RuntimeError:
popt_v = np.zeros(len(vGuess))
pcov_v = np.zeros((len(vGuess), len(vGuess)))
warn('Vertical Average Picture Fitting Failed!')
### Horizontal Average Fit
hAvg = np.zeros(len(picture))
for c in misc.transpose(picture):
hAvg += c
hAvg /= len(picture[0])
hGuess = [np.max(hAvg) - np.min(hAvg), y[np.argmax(hAvg)], guessSigma_y, np.min(hAvg)]
try:
popt_h, pcov_h = opt.curve_fit(bump.f, y, hAvg, hGuess)
except RuntimeError:
popt_h = np.zeros(len(hGuess))
pcov_h = np.zeros((len(hGuess), len(hGuess)))
warn('Horizontal Average Picture Fitting Failed!')
if showFit:
print(fitF)
data_fitted = fitF((X,Y), *popt)
fig, axs = plt.subplots(1, 3)
plt.grid(False)
im = axs[0].imshow(picture, origin='lower')#, extent=(x.min(), x.max(), y.min(), y.max()))
data_fitted = data_fitted.reshape(picture.shape[0],picture.shape[1])
axs[0].contour(x, y, data_fitted, 4, colors='w', alpha=0.2)
mp.addAxColorbar(fig, axs[0], im)
axs[0].set_title('Raw Data')
im = axs[1].imshow( data_fitted, origin='lower')
mp.addAxColorbar(fig, axs[1], im)
axs[1].set_title('Fit')
im = axs[2].imshow( picture - data_fitted, origin='lower' )
mp.addAxColorbar(fig, axs[2], im)
axs[2].contour(x, y, data_fitted, 4, colors='w', alpha=0.2)
axs[2].set_title('Residuals')
return initial_guess, popt, np.sqrt(np.diag(pcov)), popt_v, np.sqrt(np.diag(pcov_v)), popt_h, np.sqrt(np.diag(pcov_h))
def fitPictures(pictures, dataRange, guessSigma_x=1, guessSigma_y=1, quiet=False, firstIsGuide=True):
"""
fit an array of pictures with gaussians
if firstIsGuide is true then use the fit from the first pic as the guide for the next pictures.
:param pictures:
:param dataRange:
:param guessSigma_x:
:param guessSigma_y:
:return:
"""
fitParameters, fitErrors, vParams, vErrs, hParams, hErrs = [[] for _ in range(6)]
count = 0
warningHasBeenThrown = False
if not quiet:
print('fitting picture Number...')
for picInc, picture in enumerate(pictures):
if not quiet:
print(picInc, ',', end='')
if count not in dataRange:
parameters, errors = [np.zeros(7) for _ in range(2)]
v_param, v_err, h_param, h_err = [np.zeros(4) for _ in range(4)]
else:
try:
if firstIsGuide and picInc != 0:
# amplitude, xo, yo, sigma_x, sigma_y, theta, offset
_, parameters, errors, v_param, v_err, h_param, h_err = fitPic(picture, showFit=False,
guess_x = fitParameters[0][1], guess_y = fitParameters[0][2],
guessSigma_x=fitParameters[0][3], guessSigma_y=fitParameters[0][4])
else:
_, parameters, errors, v_param, v_err, h_param, h_err = fitPic(picture, showFit=False,
guessSigma_x=guessSigma_x, guessSigma_y=guessSigma_y)
except RuntimeError:
if not warningHasBeenThrown:
print("Warning! Not all picture fits were able to fit the picture signal to a 2D Gaussian.\n"
"When the fit fails, the fit parameters are all set to zero.")
warningHasBeenThrown = True
parameters, errors = [np.zeros(7) for _ in range(2)]
v_param, v_err, h_param, h_err = [np.zeros(4) for _ in range(2)]
# append things regardless of whether the fit succeeds or not in order to keep things the right length.
fitParameters.append(parameters)
fitErrors.append(errors)
vParams.append(v_param)
vErrs.append(v_err)
hParams.append(h_param)
hErrs.append(h_err)
count += 1
return np.array(fitParameters), np.array(fitErrors), np.array(vParams), np.array(vErrs), np.array(hParams), np.array(hErrs)
# #############################
# ### Analyzing machine outputs
def load_SRS_SR780(fileAddress):
"""
from a TXT file from the SRS, returns the frequencies (the [0] element) and the powers (the [1] element)
"""
data = pd.read_csv(fileAddress, delimiter=',', header=None)
return data[0], data[1]
def load_HP_4395A(fileAddress):
"""
Analyzing HP 4395A Spectrum & Network Analyzer Data
"""
data = pd.read_csv(fileAddress, delimiter='\t', header=11)
return data["Frequency"], data["Data Trace"]
def load_RSA_6114A(fileLocation):
"""
return xData, yData, yUnits, xUnits
"""
lines = []
count = 0
yUnits = ""
xUnits = ""
xPointNum, xStart, xEnd = [0, 0, 0]
with open(fileLocation) as file:
for line in iter(file.readline, ''):
count += 1
# 18 lines to skip.
if count == 11:
yUnits = str(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 12:
xUnits = str(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 16:
xPointNum = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 17:
xStart = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 18:
xEnd = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count <= 18:
continue
try:
lines.append(line[line[:].index('>')+1:line[1:].index('<')+1])
except ValueError:
pass
yData = np.float64(arr(lines))
xData = np.linspace(xStart, xEnd, xPointNum)
return xData, yData, yUnits, xUnits
# ##########################
# ### Some AOM Optimizations
def maximizeAomPerformance(horCenterFreq, vertCenterFreq, spacing, numTweezersHor, numTweezersVert, iterations=10, paperGuess=True, metric='max',
vertAmps=None, horAmps=None, carrierFre=255):
"""
computes the amplitudes and phases to maximize the AOM performance.
:param horCenterFreq:
:param vertCenterFreq:
:param spacing:
:param numTweezersHor:
:param numTweezersVert:
:param iterations:
:return:
"""
horFreqs = [horCenterFreq - spacing * (numTweezersHor - 1) / 2.0 + i * spacing for i in range(numTweezersHor )]
vertFreqs = [horCenterFreq - spacing * (numTweezersVert - 1) / 2.0 + i * spacing for i in range(numTweezersVert)]
actualHFreqs = 255 - arr(horFreqs)
actualVFreqs = 255 - arr(vertFreqs)
if vertAmps is None:
vertAmps = np.ones(numTweezersVert)
if horAmps is None:
horAmps = np.ones(numTweezersHor)
xBounds = [(0, 2 * mc.pi) for _ in range(numTweezersHor-1)]
#
if paperGuess:
xGuess = arr([np.pi * i**2/numTweezersHor for i in range(numTweezersHor-1)])
else:
xGuess = arr([0 for _ in range(numTweezersHor-1)])
minimizer_kwargs = dict(method="L-BFGS-B", bounds=xBounds)
xPhases = opt.basinhopping(getXMetric, xGuess, minimizer_kwargs=minimizer_kwargs, niter=iterations, stepsize=0.2)
xPhases = list(xPhases.x) + [0]
print('horFreqs', horFreqs)
print('horAmps', horAmps)
print('Hor-Phases:', [misc.round_sig_str(x,10) for x in xPhases])
if paperGuess:
yGuess = arr([np.pi * i**2/numTweezersVert for i in range(numTweezersVert-1)])
else:
yGuess = arr([0 for _ in range(numTweezersVert-1)])
yBounds = [(0, 2 * mc.pi) for _ in range(numTweezersVert-1)]
minimizer_kwargs = dict(method="L-BFGS-B", bounds=yBounds)
yPhases = opt.basinhopping(getYMetric, yGuess, minimizer_kwargs=minimizer_kwargs, niter=iterations, stepsize=0.2)
yPhases = list(yPhases.x) + [0]
for i, xp in enumerate(yPhases):
yPhases[i] = misc.round_sig(xp, 10)
print('vertFreqs', vertFreqs)
print('vertAmps', vertAmps)
print('Vert-Phases:', [misc.round_sig_str(y,10) for y in yPhases])
xpts = np.linspace(0, 1e-6, 10000)
ypts_x = calcWave(xpts, xPhases, actualHFreqs, horAmps)
yptsOrig = calcWaveCos(xpts, arr([0 for _ in range(numTweezersHor)]), actualHFreqs, horAmps)
plt.title('X-Axis')
plt.plot(xpts, ypts_x, ':', label='X-Optimization')
plt.plot(xpts, yptsOrig, ':', label='X-Worst-Case')
plt.legend()
plt.figure()
yptsOrig = calcWave(xpts, arr([0 for _ in range(numTweezersVert)]), actualVFreqs, vertAmps)
ypts_y = calcWaveCos(xpts, yPhases, actualVFreqs, vertAmps)
plt.title('Y-Axis')
plt.plot(xpts, ypts_y, ':', label='Y-Optimization')
plt.plot(xpts, yptsOrig, ':', label='Y-Worst-Case')
plt.legend()
return xpts, ypts_x, ypts_y,
def integrateData(pictures):
"""
:param pictures:
:return:
"""
if len(pictures.shape) == 3:
integratedData = np.zeros(pictures.shape[0])
picNum = 0
for pic in pictures:
for row in pic:
for elem in row:
integratedData[picNum] += elem
picNum += 1
else:
integratedData = 0
for row in pictures:
for elem in row:
integratedData += elem
return integratedData
def beamIntensity(power, waist, radiusOfInterest=0):
"""
computes the average beam intensity, in mW/cm^2, of a beam over some radius of interest.
:param power: power of the laser beam, in mW
:param waist: waist of the laser beam, in cm.
:param radiusOfInterest: the radius of interest. In the case that this is << waist, the equation below
reduces to a simpler, more commonly referenced form. The literal math gives 0/0 though, so I
include the reduced form.
"""
if radiusOfInterest == 0:
return 2 * power / (mc.pi * waist ** 2)
else:
return power * (1 - np.exp(-2 * radiusOfInterest ** 2 / waist ** 2)) / (mc.pi * radiusOfInterest ** 2)
def computeBaslerGainDB(rawGain):
"""
Gain (NOT currently used in fluorescence calc...)
"""
G_c = 20 * np.log10((658 + rawGain)/(658 - rawGain))
if 110 <= rawGain <= 511:
gainDB = 20 * np.log10((658 + rawGain)/(658 - rawGain)) - G_c
elif 511 <= rawGain <= 1023:
gainDB = 0.0354 * rawGain - G_c
else:
gainDB = None
warn('raw gain out of range! gainDB set to None/')
return gainDB
def computeScatterRate(totalIntensity, D2Line_Detuning):
"""
Computes the rate of photons scattering off of a single atom. From steck, equation 48.
Assumes 2-Level approximation, good for near resonant light since the near-resonant transition
will be dominant.
Assumes D2 2 to 3' transition.
:param totalIntensity: the total intensity (from all beams) shining on the atoms.
:param D2Line_Detuning: the detuning, in Hz, of the light shining on the atoms from the D2 transition.
"""
isat = mc.Rb87_I_Sat_ResonantIsotropic_2_to_3
rate = (mc.Rb87_D2Gamma / 2) * (totalIntensity / isat) / (1 + 4 * (D2Line_Detuning / mc.Rb87_D2Gamma) ** 2
+ totalIntensity / isat)
return rate
def computeFlorescence(greyscaleReading, imagingLoss, imagingLensDiameter, imagingLensFocalLength, exposure ):
"""
TODO: incorporate gain into the calculation, currently assumes gain = X1... need to check proper conversion
from basler software. I'd expect a power conversion so a factor of 20,
Fluorescence
:param greyscaleReading:
:param imagingLoss:
:param imagingLensDiameter:
:param imagingLensFocalLength:
:param exposure:
:return:
"""
term1 = greyscaleReading * mc.cameraConversion / (mc.h * mc.c / mc.Rb87_D2LineWavelength)
term2 = 1 * imagingLoss * (imagingLensDiameter**2 / (16 * imagingLensFocalLength**2)) * exposure
fluorescence = term1 / term2
return fluorescence
# mot radius is in cm
def computeMotNumber(sidemotPower, diagonalPower, motRadius, exposure, imagingLoss, greyscaleReading, detuning=10e6):
"""
:param sidemotPower: power in the sidemot beam, in mW. Code Assumes 3.3mm sidemot waist
:param diagonalPower: power in an individual diagonal mot beam, in mW
:param motRadius: the approximate radius of the MOT. Used as a higher order part of the calculation which takes into
account the spread of the intensity of the beams over the finite size of the MOT. Less needed for
big MOT beams.
:param exposure: exposure time of the camera, in seconds.
:param imagingLoss: Approximate amount of light lost in the imaging line due to mirrors efficiency, filter
efficiency, etc.
:param greyscaleReading: the integrated greyscale count reading from the camera.
===
The mot number is determined via the following formula:
MOT # = (Scattered Light Collected) / (Scattered light predicted per atom)
Here with sideMOT power in mW assuming 3.3mm radius waist and a very rough estimation of main MOT diameter
one inch, motRadius using the sigma of the MOT size but should not matter if it's small enough, and exposure
in sec, typically 0.8 for the imaging loss accounting for the line filter, greyscaleReading is the integrated gray
scale count with 4by4 binning on the Basler camera, and assuming gain set to 260 which is unity gain for Basler
"""
# in cm
sidemotWaist = .33 / (2 * np.sqrt(2))
# in cm
diagonalWaist = 2.54 / 2
# intensities
sidemotIntensity = beamIntensity(sidemotPower, sidemotWaist, motRadius)
diagonalIntensity = beamIntensity(diagonalPower, diagonalWaist, motRadius)
totalIntensity = sidemotIntensity + 2 * diagonalIntensity
rate = computeScatterRate(totalIntensity, detuning)
imagingLensDiameter = 2.54
imagingLensFocalLength = 10
fluorescence = computeFlorescence(greyscaleReading, imagingLoss, imagingLensDiameter, imagingLensFocalLength,
exposure)
motNumber = fluorescence / rate
return motNumber, fluorescence
def calcBallisticTemperature(times, sizeSigmas, guess = LargeBeamMotExpansion.guess(), sizeErrors=None):
""" Small wrapper around a fit
expects time in s, sigma in m
return temp, vals, cov
"""
warnings.simplefilter("error", opt.OptimizeWarning)
try:
fitVals, fitCovariances = opt.curve_fit(LargeBeamMotExpansion.f, times, sizeSigmas, p0=guess, sigma = sizeErrors)
temperature = fitVals[2]
except opt.OptimizeWarning as error:
warn('Mot Temperature Expansion Fit Failed!' + str(error))
try:
fitValsTemp, fitCovTemp = opt.curve_fit(lambda t,x,y: LargeBeamMotExpansion.f(t, x, 0, y), times, sizeSigmas, p0=[guess[0], guess[2]], sigma = sizeErrors)
temperature = fitValsTemp[1]
fitVals = [fitValsTemp[0], 0, fitValsTemp[1]]
fitCovariances = np.zeros((len(guess),len(guess)))
fitCovariances[0,0] = fitCovTemp[0,0]
fitCovariances[2,0] = fitCovTemp[1,0]
fitCovariances[0,2] = fitCovTemp[0,1]
fitCovariances[2,2] = fitCovTemp[1,1]
except opt.OptimizeWarning:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Restricted Mot Temperature Expansion Fit Failed Too with optimize error!')
except RuntimeError:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Mot Temperature Expansion Fit Failed with Runtime error!')
except RuntimeError:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Mot Temperature Expansion Fit Failed!')
warnings.simplefilter("default", opt.OptimizeWarning)
return temperature, fitVals, fitCovariances
def orderData(data, key, keyDim=None, otherDimValues=None):
"""
return arr(data), arr(key), arr(otherDimValues)
"""
zipObj = (zip(key, data, otherDimValues) if otherDimValues is not None else zip(key, data))
if keyDim is not None:
key, data, otherDimValues = list(zip(*sorted(zipObj, key=lambda x: x[0][keyDim])))
# assuming 2D
count = 0
for val in key:
if val[keyDim] == key[0][keyDim]:
count += 1
majorKeySize = int(len(key) / count)
tmpKey = arr(key[:])
tmpVals = arr(data[:])
tmpKey.resize([majorKeySize, count, 2])
tmpVals.resize([majorKeySize, count, arr(data).shape[1], arr(data).shape[2], arr(data).shape[3]])
finKey = []
finData = []
for k, d in zip(tmpKey, tmpVals):
k1, d1 = list(zip(*sorted(zip(k, d), key=lambda x: x[0][int(not keyDim)])))
for k2, d2 in zip(k1, d1):
finKey.append(arr(k2))
finData.append(arr(d2))
return arr(finData), arr(finKey), arr(otherDimValues)
else:
if otherDimValues is None:
key, data = list(zip(*sorted(zipObj, key=lambda x: x[0])))
else:
key, data, otherDimValues = list(zip(*sorted(zipObj, key=lambda x: x[0])))
return arr(data), arr(key), arr(otherDimValues)
def groupMultidimensionalData(key, varyingDim, atomLocations, survivalData, survivalErrs, loadingRate):
"""
Normally my code takes all the variations and looks at different locations for all those variations.
In the multi-dim case, this there are multiple variations for the same primary key value. I need to
split up those multiple variations.
"""
if len(key.shape) == 1:
# no grouping needed
return (key, atomLocations, survivalErrs, survivalData, loadingRate,
[None for _ in range(len(key)*len(atomLocations))])
# make list of unique indexes for each dimension
uniqueSecondaryAxisValues = []
newKey = []
for keyValNum, secondaryValues in enumerate(misc.transpose(key)):
if keyValNum == varyingDim:
for val in secondaryValues:
if val not in newKey:
newKey.append(val)
continue
uniqueSecondaryAxisValues.append([])
for val in secondaryValues:
if val not in uniqueSecondaryAxisValues[-1]:
uniqueSecondaryAxisValues[-1].append(val)
extraDimValues = 1
for i, dim in enumerate(uniqueSecondaryAxisValues):
extraDimValues *= len(dim)
newLoadingRate, newTransferData, newErrorData, locationsList, otherDimsList = [[] for _ in range(5)]
allSecondaryDimVals = arr(uniqueSecondaryAxisValues).flatten()
# iterate through all locations
for loc, locData, locErrs, locLoad in zip(atomLocations, survivalData, survivalErrs, loadingRate):
newData = locData[:]
newErr = locErrs[:]
newLoad = locLoad[:]
newData.resize(int(len(locData)/extraDimValues), extraDimValues)
newData = misc.transpose(newData)
newErr.resize(int(len(locData)/extraDimValues), extraDimValues)
newErr = misc.transpose(newErr)
newLoad.resize(int(len(locData)/extraDimValues), extraDimValues)
newLoad = misc.transpose(newLoad)
# iterate through all extra dimensions in the locations
secondIndex = 0
for val, err, load in zip(newData, newErr, newLoad):
newTransferData.append(val)
newErrorData.append(err)
newLoadingRate.append(load)
locationsList.append(loc)
otherDimsList.append(allSecondaryDimVals[secondIndex])
secondIndex += 1
return (arr(newKey), arr(locationsList), arr(newErrorData), arr(newTransferData), arr(newLoadingRate),
arr(otherDimsList))
def getNormalizedRmsDeviationOfResiduals(xdata, ydata, function, fitVals):
"""
calculates residuals, calculates the rms average of them, and then normalizes by the average of the actual data.
"""
residuals = ydata - function(xdata, *fitVals)
return np.sqrt(sum(residuals**2) / len(residuals)) / np.mean(ydata)
def getSurvivalBoolData(ICounts, TCounts, IThreshold, TThreshold):
"""
I stands for init, as in initialCounts, T stands for Transfered, as in transfered Counts.
"""
IAtoms, TAtoms = [[] for _ in range(2)]
for IPoint, TPoint in zip(ICounts, TCounts):
IAtoms.append(IPoint > IThreshold)
TAtoms.append(TPoint > TThreshold)
return IAtoms, TAtoms
def calculateAtomThreshold(fitVals):
"""
:param fitVals = [Amplitude1, center1, sigma1, amp2, center2, sigma2]
"""
if fitVals[5] + fitVals[2] == 0:
return 200, 0
else:
TCalc = (fitVals[4] - fitVals[1])/(np.abs(fitVals[5]) + np.abs(fitVals[2]))
threshold = abs(fitVals[1] + TCalc * fitVals[2])
if np.isnan(threshold):
threshold = 200
fidelity = getFidelity(threshold, fitVals)
return threshold, fidelity
def getConditionHits(atomPresenceData, hitCondition, verbose=False):
"""
Returns:
ensembleHits (1D array of bool or int): ensembleHits[whichPicture] one to one with each picture in the
atomList. This list is the answer to whether a given picture matched the hit condition or not.
If partial credit, it is instead of a bool an int which records the number of aotms in the picture.
"""
#assert(type(hitCondition) == type(tao.condition()))
ensembleHits = []
for picInc, _ in enumerate(atomPresenceData[0][0]):
numMatch = 0
for atomInc, whichAtom in enumerate(hitCondition.whichAtoms):
#atoms = misc.transpose(atomPresenceData[hitCondition.whichPic[atomInc]])[picInc]
needAtom = hitCondition.conditions[atomInc]
if needAtom == None:
continue # no requirement
if ((atomPresenceData[hitCondition.whichPic[atomInc]][whichAtom][picInc] and needAtom)
or (not atomPresenceData[hitCondition.whichPic[atomInc]][whichAtom][picInc] and not needAtom)):
numMatch += 1
#if (atoms[whichAtom] and needAtom) or (not atoms[whichAtom] and not needAtom):
# numMatch += 1
hit = False
if type(hitCondition.numRequired) == list:
# interpret a list of numbers as an inclusive "or" condition.
for num in hitCondition.numRequired:
if (num == -1 and numMatch == len(hitCondition.whichAtoms)) or numMatch == num:
hit = True
elif (hitCondition.numRequired == -1 and numMatch == len(hitCondition.whichAtoms)) or numMatch == hitCondition.numRequired:
hit = True
ensembleHits.append(hit)
return ensembleHits
def getEnsembleHits(atomPresenceData, hitCondition=None, requireConsecutive=False, partialCredit=False):
"""
This function determines whether an ensemble of atoms was hit in a given picture. Give it whichever
picture data you need.
NEW: this function is now involved in post-selection work
Args:
atomPresenceData (2D Array of bool): atomPresenceData[whichAtom][whichPicture]
First dimension is atom index, second dimension is the picture, and the value is whether the
given atom was present in the given picture.
hitCondition (1D array of bool or int):
if 1D array of bool:
The picture of the expected configuration which counts as a hit.
if int:
In this case, the number of atoms in the atomPresenceData which must be present to count as a hit.
requireConsecutive (bool):
(only relevant for int hitCondition.) An option which specifies if the number of atoms
specified by the hit condition must be consequtive in the list in order to count as a hit.
partialCredit (bool):
(only relevant for array hitCondition). An option which specifies if the user wants a relative
measurement of how many atoms made it to the given configuration. Note: currently doesn't
actually use the hit condition for some reason?
Returns:
ensembleHits (1D array of bool or int): ensembleHits[whichPicture] one to one with each picture in the
atomList. This list is the answer to whether a given picture matched the hit condition or not.
If partial credit, it is instead of a bool an int which records the number of aotms in the picture.
"""
if hitCondition is None:
hitCondition = np.ones(atomPresenceData.shape[0])
ensembleHits = []
if type(hitCondition) is int:
# condition is, e.g, 5 out of 6 of the ref pic, and if consecutive, all atoms should be connected somehow.
for atoms in misc.transpose(atomPresenceData):
matches = 0
consecutive = True
for atom in atoms:
if atom:
matches += 1
# else there's no atom. 3 possibilities: before string of atoms, after string, or in middle.
# if in middle, consecutive requirement is not met.
elif 0 < matches < hitCondition:
consecutive = False
if requireConsecutive:
ensembleHits.append((matches == hitCondition) and consecutive)
else:
ensembleHits.append(matches == hitCondition)
else:
if partialCredit:
for inc, atoms in enumerate(misc.transpose(atomPresenceData)):
ensembleHits.append(sum(atoms)) # / len(atoms))
else:
for inc, atoms in enumerate(misc.transpose(atomPresenceData)):
ensembleHits.append(True)
for atom, needAtom in zip(atoms, hitCondition):
if needAtom == None:
continue # no condition
if not atom and needAtom:
ensembleHits[inc] = False
if atom and not needAtom:
ensembleHits[inc] = False
return ensembleHits
def getAvgBorderCount(data, p, ppe):
"""
data: the array of pictures
p: which picture to start on
ppe: pictures Per Experiment
"""
if len(data.shape) == 4:
rawData = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
rawData = data
normFactor = (2*len(rawData[0][0][:])+2*len(rawData[0][:][0]))
avgBorderCount = (np.sum(rawData[p::ppe,0,:], axis=1) + np.sum(rawData[p::ppe,-1,:], axis=1)
+ np.sum(rawData[p::ppe,:,0], axis=1) + np.sum(rawData[p::ppe,:,-1], axis=1)).astype(float)
corners = rawData[p::ppe,0,0] + rawData[p::ppe,0,-1] + rawData[p::ppe,-1,0] + rawData[p::ppe,-1,-1]
avgBorderCount -= corners
avgBorderCount /= normFactor - 4
return avgBorderCount
def normalizeData(data, atomLocation, picture, picturesPerExperiment, borders):
"""
:param picturesPerExperiment:
:param picture:
:param subtractBorders:
:param data: the array of pictures
:param atomLocation: The location to analyze
:return: The data at atomLocation with the background subtracted away (commented out at the moment).
"""
allData = arr([])
# if given data separated into different variations, flatten the variation separation.
dimensions = data.shape
if len(dimensions) == 4:
rawData = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
rawData = data
dimensions = rawData.shape
count=0
for imageInc in range(0, dimensions[0]):
if (imageInc + picturesPerExperiment - picture) % picturesPerExperiment == 0:
if len(atomLocation) != 2:
raise TypeError('AtomLocation, which has value ' + str(atomLocation) + ', should be 2 elements.')
if len(borders) <= count:
raise IndexError('borders, of len ' + str(len(borders)), 'is not long enough!')
try:
allData = np.append(allData, rawData[imageInc][atomLocation[0]][atomLocation[1]] - borders[count])
except IndexError:
raise
count += 1
return allData
def guessGaussianPeaks(binCenters, binnedData):
"""
This function guesses where the gaussian peaks of the data are. It assumes one is near the maximum of the binned
data. Then, from the binned data it subtracts an over-weighted (i.e. extra tall) poissonion distribution e^-k k^n/n!
From the binned data. This should squelch the peak that it found. It then assumes that the second peak is near the
maximum of the (data-poissonian) array.
:param binCenters: The pixel-numbers corresponding to the binned data data points.
:param binnedData: the binned data data points.
:return: the two guesses.
"""
if len(binCenters) == 0 or len(binnedData) == 0:
raise ValueError("inputted data was empty?!?" + str(binCenters) + str(binnedData))
# This offset is to prevent negative x values while working with the poissonian. If set wrong guesses can start to work funny.
# The offset is only use to get an appropriate width for the no-atoms peak. Arguably since I use this to manually shift the width, I should
# just use a gaussian instead of a poissonian.
randomOffset = 800
poisonAmplitude = 2
binCenters += randomOffset
# get index corresponding to global max
guess1Index = np.argmax(binnedData)
# get location of global max
guess1Location = binCenters[guess1Index]
binnedDataNoPoissonian = []
for binInc in range(0, len(binCenters)):
binnedDataNoPoissonian.append(binnedData[binInc]
- poissonian.f(binCenters[binInc], guess1Location, poisonAmplitude * max(binnedData) /
poissonian.f(guess1Location, guess1Location, 1)))
guess2Index = np.argmax(binnedDataNoPoissonian)
guess2Location = binCenters[guess2Index]
binCenters -= randomOffset
return guess1Location - randomOffset, guess2Location - randomOffset
def getGenerationEvents(loadAtoms, finAtomsAtoms):
"""
This is more or less the opposite of "GetSurvivalEvents". It counts events as +1 when you start with no atom and end
with an atom. This could be used to characterize all sorts of things, e.g. hopping, background catches, rearranging, etc.
:param loadAtoms:
:param finAtomsAtoms:
:return:
"""
# this will include entries for when there is no atom in the first picture.
genData = np.array([])
genData.astype(int)
# this doesn't take into account loss, since these experiments are feeding-back on loss.
# there shoukld be a smarter / faster way to do this like the survival method.
for atom1, atom2 in zip(loadAtoms, finAtomsAtoms):
if atom1:
# not interesting for generation
genData = np.append(genData, [-1])
elif atom2:
# atom was generated.
genData = np.append(genData, [1])
else:
# never an atom.
genData = np.append(genData, [0])
return genData
def processSingleImage(rawData, bg, window, xMin, xMax, yMin, yMax, accumulations, zeroCorners, smartWindow,
manuallyAccumulate=True):
"""
Process the original data, giving back data that has been ordered and windowed as well as two other versions that
have either the background or the average of the pictures subtracted out.
This is a helper function that is expected to be embedded in a package. As such, many parameters are simply
passed through some other function in order to reach this function, and all parameters are required.
"""
# handle manual accumulations, where the code just sums pictures together.
if manuallyAccumulate and not len(rawData.shape) == 3:
print('ERROR: Requested manual accumulation but raw data doesn"t have the correct shape for that.')
if manuallyAccumulate:
avgPics = np.zeros((rawData.shape[1], rawData.shape[2]))
count = 0
for pic in rawData:
avgPics += pic
count += 1
rawData = avgPics
# handle windowing defaults
allXPts = np.arange(1, rawData.shape[1])
allYPts = np.arange(1, rawData.shape[0])
if smartWindow:
maxLocs = coordMax(rawData)
xMin = maxLocs[1] - rawData.shape[1] / 5
xMax = maxLocs[1] + rawData.shape[1] / 5
yMin = maxLocs[0] - rawData.shape[0] / 5
yMax = maxLocs[0] + rawData.shape[0] / 5
elif window != (0, 0, 0, 0):
xMin = window[0]
xMax = window[1]
yMin = window[2]
yMax = window[3]
else:
if xMax == 0:
xMax = len(rawData[0])
if yMax == 0:
yMax = len(rawData)
if xMax < 0:
xMax = 0
if yMax < 0:
yMax = 0
xPts = allXPts[xMin:xMax]
yPts = allYPts[yMin:yMax]
# window images.
rawData = np.copy(arr(rawData[yMin:yMax, xMin:xMax]))
# final normalized data
normData = rawData / accumulations
# ### -Background Analysis
# if user just entered a number, assume that it's a file number.
if type(bg) == int and not bg == 0:
print('loading background file ', bg)
bg, _, _, _ = loadHDF5(bg)
if manuallyAccumulate:
avgPics = np.zeros((bg.shape[1], bg.shape[2]))
count = 0
for pic in bg:
avgPics += pic
count += 1
bg = avgPics
else:
bg = bg[0]
bg /= accumulations
# window the background
if not bg.size == 1:
bg = np.copy(arr(bg[yMin:yMax, xMin:xMax]))
dataMinusBg = np.copy(normData)
dataMinusBg -= bg
# it's important and consequential that the zeroing here is done after the background / corner is subtracted.
if zeroCorners:
cornerAvg = (pic[0, 0] + pic[0, -1] + pic[-1, 0] + pic[-1, -1]) / 4
dataMinusBg -= cornerAvg
cornerAvg = (pic[0, 0] + pic[0, -1] + pic[-1, 0] + pic[-1, -1]) / 4
normData -= cornerAvg
return normData, dataMinusBg, xPts, yPts
def processImageData(key, rawData, bg, window, accumulations, dataRange, zeroCorners,
smartWindow, manuallyAccumulate=False):
"""
Process the orignal data, giving back data that has been ordered and windowed as well as two other versions that
have either the background or the average of the pictures subtracted out.
This is a helper function that is expected to be embedded in a package. As such, many parameters are simply
passed through some other function in order to reach this function, and all parameters are required.
"""
# handle windowing defaults
if smartWindow:
maxLocs = []
for dat in rawData:
maxLocs.append(coordMax(dat))
maxLocs = arr(maxLocs)
xMin = min(maxLocs[:, 0])
xMax = max(maxLocs[:, 0])
yMin = min(maxLocs[:, 1])
yMax = max(maxLocs[:, 1])
xRange = rawData.shape[2] / 2
yRange = rawData.shape[1] / 2
if xRange < xMax - xMin:
xRange = xMax - xMin
if yRange < yMax - yMin:
yRange = yMax - yMin
xMin -= 0.2 * xRange
xMax += 0.2 * xRange
yMin -= 0.2 * yRange
yMax += 0.2 * yRange
window = pw.PictureWindow( xMin, xMax, yMin, yMax )
if manuallyAccumulate:
# TODO: either remove this or keep this but change the average order since we scan all variation and then
# repeat the rep. -ZZP
# ignore shape[1], which is the number of pics in each variation. These are what are getting averaged.
avgPics = np.zeros((int(rawData.shape[0] / accumulations), rawData.shape[1], rawData.shape[2]))
varCount = 0
for var in avgPics:
for picNum in range(accumulations):
var += rawData[varCount * accumulations + picNum]
varCount += 1
rawData = avgPics
if rawData.shape[0] != len(key):
raise ValueError("ERROR: number of pictures (after manual accumulations) " + str(rawData.shape[0]) +
" data doesn't match length of key " + str(len(key)) + "!")
# combine and order data.
rawData, key = combineData(rawData, key)
rawData, key, _ = orderData(rawData, key)
# window images.
rawData = np.array([window.window(pic) for pic in rawData])
# pull out the images to be used for analysis.
if dataRange is not None:
rawData = rawData[dataRange[0]:dataRange[-1]]
key = key[dataRange[0]:dataRange[-1]]
# final normalized data
normData = rawData #/ accumulations, I don't see why the img is further divied by variation number. -ZZP
# ### -Background Analysis
# if user just entered a number, assume that it's a file number.
if type(bg) == int and not bg == 0:
with exp.ExpFile() as fid:
fid.open_hdf5(bg)
bg = np.mean(fid.get_mako_pics(),0)
# window the background
if not bg.size == 1:
bg = np.array(window.window(bg))
dataMinusBg = np.copy(normData)
for pic in dataMinusBg:
pic -= bg
# ### -Average Analysis
# make a picture which is an average of all pictures over the run.
avgPic = 0
for pic in normData:
avgPic += pic
avgPic /= len(normData)
dataMinusAvg = np.copy(normData)
for pic in dataMinusAvg:
pic -= avgPic
# it's important and consequential that the zeroing here is done after the background / corner is subtracted.
if zeroCorners:
for pic in dataMinusBg:
cornerAvg = (pic[0, 0] + pic[0, -1] + pic[-1, 0] + pic[-1, -1]) / 4
pic -= cornerAvg
for pic in dataMinusAvg:
cornerAvg = (pic[0, 0] + pic[0, -1] + pic[-1, 0] + pic[-1, -1]) / 4
pic -= cornerAvg
for pic in normData:
cornerAvg = (pic[0, 0] + pic[0, -1] + pic[-1, 0] + pic[-1, -1]) / 4
pic -= cornerAvg
return key, normData, dataMinusBg, dataMinusAvg, avgPic
def unpackAtomLocations(locs, avgPic=None):
"""
:param locs: expects locs to be format [bottomLeftRow, bottomLeftColumn, spacing, width, height]
:return: a list of the coordinates of each tweezer in the image.
"""
if type(locs) == type(9): # location is an integer??? I forget how this was used...
return locs
if not (type(locs[0]) == int): # already unpacked
return locs
# assume atom grid format.
bottomLeftRow, bottomLeftColumn, spacing, width, height = locs
locArray = []
for widthInc in range(width):
for heightInc in range(height):
locArray.append([bottomLeftRow + spacing * heightInc, bottomLeftColumn + spacing * widthInc])
# this option looks for the X brightest spots in the average picture and assumes that this is where the
# atoms are. Note that it can mess up the ordering of different locations.
if type(locArray) == type(9) and avgPic is not None:
res = np.unravel_index(avgPic.flatten().argsort()[-locArray:][::-1],avgPic.shape)
locArray = [x for x in zip(res[0],res[1])]
return locArray
def sliceMultidimensionalData(dimSlice, origKey, rawData, varyingDim=None):
"""
:param dimSlice: e.g. [80, None]
:param origKey:
:param rawData:
:param varyingDim:
:return:
"""
key = origKey[:]
if dimSlice is not None:
runningKey = key[:]
runningData = rawData[:]
for dimnum, dimSpec in enumerate(dimSlice):
if dimSpec is None:
varyingDim = dimnum
continue
tempKey = []
tempData = []
for elemnum, elem in enumerate(misc.transpose(runningKey)[dimnum]):
if abs(elem - dimSpec) < 1e-6:
tempKey.append(runningKey[elemnum])
tempData.append(runningData[elemnum])
runningKey = tempKey[:]
runningData = tempData[:]
key = runningKey[:]
rawData = runningData[:]
otherDimValues = None
if varyingDim is not None:
otherDimValues = []
print('key',key)
for keyVal in key:
otherDimValues.append('')
for valNum, dimVal in enumerate(keyVal):
if not valNum == varyingDim:
otherDimValues[-1] += str(dimVal) + ","
if dimSlice is not None:
key = arr(misc.transpose(key)[varyingDim])
if varyingDim is None and len(arr(key).shape) > 1:
key = arr(misc.transpose(key)[0])
return arr(key), arr(rawData), otherDimValues, varyingDim
def getNetLoss(pic1Atoms, pic2Atoms):
"""
Calculates the net loss fraction for every experiment. Assumes 2 pics per experiment.
Useful for experiments where atoms move around, e.g. rearranging.
"""
netLoss = []
for inc, (atoms1, atoms2) in enumerate(zip(misc.transpose(pic1Atoms), misc.transpose(pic2Atoms))):
loadNum, finNum = [0.0 for _ in range(2)]
for atom1, atom2 in zip(atoms1, atoms2):
if atom1:
loadNum += 1.0
if atom2:
finNum += 1.0
if loadNum == 0:
netLoss.append(0)
else:
netLoss.append(1 - float(finNum) / loadNum)
return netLoss
def getAtomInPictureStatistics(atomsInPicData, reps):
"""
assumes atomsInPicData is a 2D array. atomsInPicData[0,:] refers to all of the atom events for a single location,
atomsInPicData[1,:] refers to all the events for the second, etc.
"""
stats = []
for singleLocData in atomsInPicData:
singleLocData = arr(singleLocData)
variationData = singleLocData.reshape([int(len(singleLocData)/reps), reps])
avgs = [np.average(singleVarData) for singleVarData in variationData]
errs = [np.std(singleVarData)/np.sqrt(len(singleVarData)) for singleVarData in variationData]
stats.append({'avg': avgs, 'err': errs})
return stats
def getEnhancement(loadAtoms, assemblyAtoms, normalized=False):
"""
determines how many atoms were added to the assembly, another measure of how well the rearranging is working.
"""
enhancement = []
for inc, (loaded, assembled) in enumerate(zip(misc.transpose(loadAtoms), misc.transpose(assemblyAtoms))):
enhancement.append(sum(assembled) - sum(loaded))
if normalized:
enhancement[-1] /= len(assembled)
return enhancement
def getEnsembleStatistics(ensembleData, reps):
"""
EnsembleData is a list of "hits" of the deisgnated ensemble of atoms in a given picture, for different variations.
This function calculates some statistics on that list.
"""
ensembleAverages = np.array([])
ensembleErrors = np.array([])
for variationInc in range(0, int(len(ensembleData) / reps)):
ensembleList = np.array([])
# pull together the data for only this variation
for repetitionInc in range(0, reps):
if ensembleData[variationInc * reps + repetitionInc] != -1:
ensembleList = np.append(ensembleList, ensembleData[variationInc * reps + repetitionInc])
if ensembleList.size == 0:
# catch the case where there's no relevant data, typically if laser becomes unlocked.
ensembleErrors = np.append(ensembleErrors, [0])
ensembleAverages = np.append(ensembleAverages, [0])
else:
# normal case, compute statistics
ensembleErrors = np.append(ensembleErrors, np.std(ensembleList)/np.sqrt(ensembleList.size))
ensembleAverages = np.append(ensembleAverages, np.average(ensembleList))
ensembleStats = {'avg': ensembleAverages, 'err': ensembleErrors}
return ensembleStats
| [
834,
9641,
834,
796,
366,
16,
13,
19,
1,
198,
198,
11748,
269,
21370,
198,
11748,
28686,
1303,
329,
3951,
538,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
7177,
355,
5240,
1... | 2.353915 | 22,463 |
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.Results.BeamParameters
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
MCXRay beam parameters from results file.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules
# Globals and constants variables.
KEY_BEAM_PARAMETERS = "Beam Parameters"
KEY_INCIDENT_ENERGY_keV = "Electron incident energy"
KEY_CURRENT_A = "Beam Current"
KEY_ACQUISITION_TIME_s = "Acquisition Time"
KEY_DIAMETER_90_A = "Diameter with 90% of the electrons"
KEY_TILT_ANGLE_deg = "Tilt angle"
KEY_GAUSSIAN_MEAN = "Gaussian Mean"
KEY_GAUSSIAN_SIGMA = "Gaussian Sigma"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
37811,
201,
198,
492,
12972,
25,
14421,
21412,
3712,
9220,
26227,
13,
25468,
13,
3856,
321,
48944,
201,
198,
492,
8265,
9800,
3712,
14666,
8609,
1897,
364,
1279,
15631,
8609,
13,
... | 2.574359 | 390 |
"""
The purpose of this code is to create the raw directory folder and include the following files
starting protein receptor
starting ligand
target ligand
glide pose viewer file
It can be run on sherlock using
/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python add_basic_files.py create /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data
/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python add_basic_files.py check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --type prot
/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python add_basic_files.py check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --type lig
/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python add_basic_files.py check /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /oak/stanford/groups/rondror/projects/ligand-docking/pdbbind_2019/data --type pv
/home/groups/rondror/software/sidhikab/miniconda/envs/test_env/bin/python add_basic_files.py MAPK14 /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/bpp_data/MAPK14/structures
"""
import argparse
import os
N = 25 #number of files in each group
def get_prots(docked_prot_file):
"""
gets list of all protein, target ligands, and starting ligands in the index file
:param docked_prot_file: (string) file listing proteins to process
:return: process (list) list of all protein, target ligands, and starting ligands to process
"""
process = []
with open(docked_prot_file) as fp:
for line in fp:
if line[0] == '#': continue
protein, target, start = line.strip().split()
process.append((protein, target, start))
return process
def group_files(n, process):
"""
groups pairs into sublists of size n
:param n: (int) sublist size
:param process: (list) list of pairs to process
:return: grouped_files (list) list of sublists of pairs
"""
grouped_files = []
for i in range(0, len(process), n):
grouped_files += [process[i: i + n]]
return grouped_files
if __name__=="__main__":
main()
| [
37811,
198,
464,
4007,
286,
428,
2438,
318,
284,
2251,
262,
8246,
8619,
9483,
290,
2291,
262,
1708,
3696,
198,
38690,
7532,
17408,
198,
38690,
26106,
392,
198,
16793,
26106,
392,
198,
4743,
485,
12705,
19091,
2393,
198,
198,
1026,
460,
... | 2.706608 | 1,135 |
'''OpenGL extension EXT.separate_specular_color
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_separate_specular_color'
_DEPRECATED = False
GL_LIGHT_MODEL_COLOR_CONTROL_EXT = constant.Constant( 'GL_LIGHT_MODEL_COLOR_CONTROL_EXT', 0x81F8 )
glget.addGLGetConstant( GL_LIGHT_MODEL_COLOR_CONTROL_EXT, (1,) )
GL_SINGLE_COLOR_EXT = constant.Constant( 'GL_SINGLE_COLOR_EXT', 0x81F9 )
GL_SEPARATE_SPECULAR_COLOR_EXT = constant.Constant( 'GL_SEPARATE_SPECULAR_COLOR_EXT', 0x81FA )
def glInitSeparateSpecularColorEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| [
7061,
6,
11505,
8763,
7552,
27489,
13,
25512,
378,
62,
4125,
10440,
62,
8043,
198,
198,
38062,
4142,
7560,
416,
262,
651,
62,
4743,
62,
2302,
5736,
4226,
11,
466,
407,
4370,
0,
198,
7061,
6,
198,
6738,
30672,
1330,
3859,
11,
38491,
... | 2.880702 | 285 |
#!/usr/bin/env python
@dxpy.entry_point('main')
dxpy.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
31,
34350,
9078,
13,
13000,
62,
4122,
10786,
12417,
11537,
198,
198,
34350,
9078,
13,
5143,
3419,
198
] | 2.178571 | 28 |
"""
Data Visualization Project
Parse data from an ugly CSV or Excel file, and render it in
JSON, save to a database, and visualize in graph form.
Part I: Taking data from a CSV/Excel file, and return it into a format
that is easier for Python to play with.
Copyright (c) 2013 E. Lynn Root
Distributed under the zlib png license. See LICENSE for details.
"""
import csv
import json
MY_FILE = "../data/sample_sfpd_incident_all.csv"
def parse(raw_file, delimiter):
"""Parse a raw CSV file to a JSON-line object."""
# Open and Read CSV file
with open(raw_file) as opened_file:
csv_data = csv.reader(opened_file, delimiter=delimiter)
fields = csv_data.__next__()
parsed_data = [dict(zip(fields, row)) for row in csv_data]
return parsed_data
def save_to_json(data, filename, indent=4):
"""Save JSON-line object to JSON file."""
with open(filename, 'w') as json_file:
json.dump(data, json_file, indent=indent)
if __name__ == "__main__":
main()
| [
37811,
198,
6601,
15612,
1634,
4935,
198,
198,
10044,
325,
1366,
422,
281,
13400,
44189,
393,
24134,
2393,
11,
290,
8543,
340,
287,
198,
40386,
11,
3613,
284,
257,
6831,
11,
290,
38350,
287,
4823,
1296,
13,
198,
198,
7841,
314,
25,
... | 2.792818 | 362 |
name = 'Melek'
surname = 'duran'
age = '21'
greeting = 'My name is '+ name + ' '+ surname + ' and \nI am '+ age + ' years old.'
# print(greeting[5])
print(greeting)
| [
3672,
796,
705,
5308,
293,
74,
6,
198,
82,
700,
480,
796,
705,
67,
42211,
6,
198,
496,
796,
705,
2481,
6,
198,
198,
70,
2871,
278,
796,
705,
3666,
1438,
318,
705,
10,
1438,
1343,
705,
705,
10,
40358,
1343,
705,
290,
3467,
77,
... | 2.256757 | 74 |
from fastapi import APIRouter, File, Security, UploadFile
from fastapi import HTTPException, status
from fastapi import Depends
from fastapi.security import APIKeyHeader
import time
from datetime import datetime
from main_processes.frontend.routers.api.v2.pipelines.root import _register_pipeline
from main_processes.frontend.utils import (
async_download_to_file,
download_from_direct_link,
get_db,
get_storage,
)
from nxs_libs.storage.nxs_blobstore import NxsAzureBlobStorage
from nxs_libs.storage.nxs_blobstore_async import NxsAsyncAzureBlobStorage
from nxs_utils.nxs_helper import *
from nxs_utils.common import *
from nxs_types.model import *
from configs import *
from main_processes.frontend.args import parse_args
args = parse_args()
router = APIRouter(prefix="/models")
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=True)
# FIXME: find a better way to do x-api-key check
@router.get("/public", response_model=List[NxsPipelineDescription])
@router.post("/register", response_model=NxsModelRegistrationResponse)
"""
@router.post("/register-from-files")
async def infer_from_file(
registering_model: NxsBaseModel,
model: UploadFile = File(...),
preprocessing: UploadFile = File(...),
postprocessing: UploadFile = File(...),
transforming: Optional[UploadFile] = File(...),
authenticated: bool = Depends(check_api_key),
):
model_uuid = generate_uuid()
model_tmp_dir = os.path.join(args.tmp_dir, model_uuid)
delete_and_create_dir(model_tmp_dir)
if not registering_model.profile:
raise HTTPException(status.HTTP_400_BAD_REQUEST, "Missing profile.")
has_bs1 = False
for profile_unit in registering_model.profile:
if profile_unit.batch_size == 1:
has_bs1 = True
break
if not has_bs1:
raise HTTPException(
status.HTTP_400_BAD_REQUEST, "Missing profile for batch size of 1."
)
preproc_dir = os.path.join(model_tmp_dir, "preproc")
create_dir_if_needed(preproc_dir)
preproc_path = os.path.join(preproc_dir, f"{model_uuid}.py")
with open(preproc_path, "wb") as f:
data = await preprocessing.read()
f.write(data)
postproc_dir = os.path.join(model_tmp_dir, "postproc")
create_dir_if_needed(postproc_dir)
postproc_path = os.path.join(postproc_dir, f"{model_uuid}.py")
with open(postproc_path, "wb") as f:
data = await postprocessing.read()
f.write(data)
transform_path = ""
if transforming:
transform_dir = os.path.join(model_tmp_dir, "transform")
create_dir_if_needed(transform_dir)
transform_path = os.path.join(transform_dir, f"{model_uuid}.py")
with open(transform_path, "wb") as f:
data = await transforming.read()
f.write(data)
model_path = os.path.join(model_tmp_dir, f"{model_uuid}")
with open(model_path, "wb") as f:
data = await model.read()
f.write(data)
await _register_model(
model_uuid,
model_path,
preproc_path,
postproc_path,
transform_path,
registering_model,
)
return NxsModelRegistrationResponse(model_uuid=model_uuid)
"""
@router.post("/register-w4-model", response_model=NxsPipelineRegistrationResponse)
| [
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
9220,
11,
4765,
11,
36803,
8979,
198,
6738,
3049,
15042,
1330,
14626,
16922,
11,
3722,
198,
6738,
3049,
15042,
1330,
2129,
2412,
198,
6738,
3049,
15042,
13,
12961,
1330,
7824,
9218,
39681,
... | 2.501893 | 1,321 |
from ngenix_test.processor.dump import DumpProcessor
from ngenix_test.processor.report import ReportProcessor
__all__ = ['Factory']
factory = Factory()
| [
6738,
299,
5235,
844,
62,
9288,
13,
41341,
13,
39455,
1330,
360,
931,
18709,
273,
198,
6738,
299,
5235,
844,
62,
9288,
13,
41341,
13,
13116,
1330,
6358,
18709,
273,
198,
198,
834,
439,
834,
796,
37250,
22810,
20520,
628,
198,
69,
95... | 3.297872 | 47 |
import json
from rest_framework import serializers
from . import models
MODIFIABLE_READ_ONLY_FIELDS = ['modified', 'created']
TASK_EVENT_READ_ONLY_FIELDS = ['status', 'failure_reason']
SPATIAL_ENTRY_EXCLUDE = ['footprint', 'outline']
class RelatedField(serializers.PrimaryKeyRelatedField):
"""Handle GET/POST in a single field.
Reference: https://stackoverflow.com/a/52246232
"""
class ChecksumFileSerializer(serializers.ModelSerializer):
"""Serializer for ChecksumFiles.
On POST, this can only handle URL files.
"""
| [
11748,
33918,
198,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
1330,
4981,
198,
198,
33365,
5064,
3539,
19146,
62,
15675,
62,
1340,
11319,
62,
11674,
3698,
5258,
796,
37250,
41771,
3256,
705,
25598,
20520,
198,
... | 2.952128 | 188 |
# Bibliotheken und Klassen
from gpiozero import LED, Button
from gpiozero.pins.pigpio import PiGPIOFactory
from time import sleep
# GPIO der Status-LED
pin_status = 17
# GPIO des Tasters
pin_button = 25
status = True
led_blink = False
my_factory = PiGPIOFactory()
# Initialisiere LED am GPIO-Pin 17
led = LED(pin_status, pin_factory = my_factory)
# Initialisiere Button am GPIO-Pin 25
schalter = Button(pin_button, pull_up = True, hold_time = 2, pin_factory = my_factory)
schalter.when_pressed = led_function
schalter.when_held = stop_function
# Try-Catch-Block
try:
# Hauptschleife
while status:
if led_blink:
led.toggle()
sleep(1)
else:
led.on
# Fangen eines Fehlers/Signals
except KeyboardInterrupt:
print("Programm abgebrochen.")
| [
2,
347,
29142,
20388,
3464,
3318,
14770,
562,
268,
201,
198,
6738,
27809,
952,
22570,
1330,
12365,
11,
20969,
201,
198,
6738,
27809,
952,
22570,
13,
49556,
13,
79,
328,
79,
952,
1330,
13993,
16960,
9399,
22810,
201,
198,
6738,
640,
13... | 2.377841 | 352 |
import os
import sys
import time
import shutil
import argparse
import contextlib
import subprocess
import collections
dirname = os.path.dirname(__file__)
repodir = os.path.dirname(dirname)
packages_path = os.environ["REZ_RELEASE_PACKAGES_PATH"]
# Some packages depend on other packages
# having been built first.
order = [
"rezutil",
"welcome",
"ftrack",
"gitlab",
"base",
"core_pipeline",
"maya",
"maya_base",
]
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--clean", action="store_true")
opts = parser.parse_args()
@contextlib.contextmanager
print("-" * 30)
print("")
print("Auto-building to %s.." % packages_path)
print("Using python-%s.%s" % sys.version_info[:2])
print("")
print("-" * 30)
path = os.path.join(repodir, "packages")
_, existing, _ = next(os.walk(path), ([], [], [])) # just directories
if opts.clean and existing:
with stage("Cleaning %s.. " % "packages"):
for attempt in range(3):
try:
for package in existing:
shutil.rmtree(os.path.join(path, package))
except OSError:
sys.stderr.write(" retrying..")
time.sleep(1)
continue
else:
break
count = 0
with stage("Scanning.. "):
root = os.path.join(repodir, "dev")
packages = collections.defaultdict(list)
for base, dirs, files in os.walk(root):
for fname in files:
if fname != "package.py":
continue
dirs[:] = [] # Stop traversing
abspath = os.path.join(base, fname)
with open(abspath) as f:
for line in f:
if line.startswith("name"):
name = line.split(" = ")[-1]
name = name.rstrip() # newline
name = name.replace("\"", "") # quotes
if line.startswith("version"):
version = line.split(" = ")[-1]
version = version.rstrip() # newline
version = version.replace("\"", "") # quotes
packages[name] += [{
"name": name,
"base": base,
"version": version,
"abspath": abspath,
}]
# Order relevant packages by above criteria
with stage("Sorting.. "):
sorted_packages = []
for name in order:
sorted_packages += packages.pop(name)
# Add remainder
for _, package in packages.items():
sorted_packages += package
with stage("Establishing layout.. "):
for subdir in ("proj",
"app",
"td",
"int",
"ext",
):
try:
os.makedirs(os.path.join(packages_path, subdir))
except OSError:
pass
with stage("Building.. "):
for package in sorted_packages:
if exists(package["name"]):
continue
print(" - {name}-{version}".format(**package))
call("rez build --clean --install --release", cwd=package["base"])
count += 1
print("-" * 30)
if not count:
print("Already up-to-date, use --clean to start fresh")
else:
print("Auto-built %d packages for you" % count)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4423,
346,
198,
11748,
1822,
29572,
198,
11748,
4732,
8019,
198,
11748,
850,
14681,
198,
11748,
17268,
198,
198,
15908,
3672,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7... | 2.069453 | 1,627 |
# YouTube restructure
import os, time
import xlsxwriter
from tqdm import tqdm
g=open('unbalanced_train_segments.csv').read()
h=g.split('\n')
ids=list()
starts=list()
ends=list()
classes=list()
for i in tqdm(range(len(h)), desc='reading data'):
try:
temp=h[i].split(', ')
# print(temp)
id_=temp[0]
start=temp[1]
end=temp[2]
class_=temp[3].split(',')
for j in range(len(class_)):
ids.append(id_)
starts.append(start)
ends.append(end)
classes.append(class_[j].replace('"',''))
# print(len(ids))
# print(len(starts))
# print(len(ends))
# print(len(classes))
# time.sleep(1)
except:
pass
# now make a dataframe
workbook = xlsxwriter.Workbook('unbalanced_train_segments.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Ids')
worksheet.write('B1', 'Start')
worksheet.write('C1', 'End')
worksheet.write('D1', 'Class')
for i in tqdm(range(len(ids)), desc='writing excelsheet'):
worksheet.write('A%s'%(str(i+2)), ids[i])
worksheet.write('B%s'%(str(i+2)), starts[i])
worksheet.write('C%s'%(str(i+2)), ends[i])
worksheet.write('D%s'%(str(i+2)), classes[i])
workbook.close() | [
198,
2,
7444,
1334,
5620,
220,
198,
11748,
28686,
11,
640,
198,
11748,
2124,
7278,
87,
16002,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
70,
28,
9654,
10786,
403,
27753,
62,
27432,
62,
325,
11726,
13,
40664,
27691,
9... | 2.231827 | 509 |
# Generated by Django 3.0.6 on 2020-06-01 20:52
import django.core.files.storage
from django.db import migrations, models
import src.contrib.validators
import src.apps.trainings.models.network
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
3312,
12,
486,
1160,
25,
4309,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
16624,
13,
35350,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
... | 3.095238 | 63 |
"""Command-line interface to correctionlib
"""
import argparse
import sys
from rich.console import Console
import correctionlib.version
from correctionlib.highlevel import model_auto, open_auto
def validate(console: Console, args: argparse.Namespace) -> int:
"""Check if all files are valid"""
retcode = 0
for file in args.files:
try:
if not args.quiet:
console.rule(f"[blue]Validating file {file}")
cset = model_auto(open_auto(file))
if args.version and cset.schema_version != args.version:
raise ValueError(
f"Schema version {cset.schema_version} does not match the required version {args.version}"
)
except Exception as ex:
if not args.quiet:
console.print(str(ex))
retcode = 1
if args.failfast:
break
else:
if not args.quiet:
console.print("[green]All OK :heavy_check_mark:")
return retcode
if __name__ == "__main__":
exit(main())
| [
37811,
21575,
12,
1370,
7071,
284,
17137,
8019,
198,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
198,
6738,
5527,
13,
41947,
1330,
24371,
198,
198,
11748,
17137,
8019,
13,
9641,
198,
6738,
17137,
8019,
13,
8929,
5715,
1... | 2.219067 | 493 |
#!/usr/bin/env python
# Copyright (C) 2013 Jive Software. All rights reserved.
import logging
import mock
import os
import StringIO
import sys
import unittest
from platform_cli import config, protected_file_path
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
34,
8,
2211,
449,
425,
10442,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
18931,
198,
11748,
15290,
198,
11748,
28686,
198,
11748,
10903,
9399,
198,
11748,
25064,
198,
1... | 3.214286 | 70 |
import os
import re
from pyspark import SparkContext
from pyspark.rdd import PipelinedRDD
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, IntegerType
if __name__ == "__main__":
spark = SparkSession.builder.appName("Inverted index").getOrCreate()
sc: SparkContext = spark.sparkContext
header_re = re.compile(r"[\w-]+:.+")
word_re = re.compile(r"([a-zA-Z']{2,})")
index: PipelinedRDD = sc.wholeTextFiles("./data/20_newsgroup/*/*").cache() \
.map(lambda x: (os.path.split(x[0])[1], x[1])) \
.flatMap(
lambda x: [(word, (1, [x[0]])) for word in remove_headers(x[1])]) \
.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))
index.map(lambda x: f"{x[0]},{x[1][0]},{' '.join(x[1][1])}").saveAsTextFile("./out/output.txt")
| [
11748,
28686,
198,
11748,
302,
198,
198,
6738,
279,
893,
20928,
1330,
17732,
21947,
198,
6738,
279,
893,
20928,
13,
81,
1860,
1330,
25149,
417,
1389,
49,
16458,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
11,
6060,
19778,... | 2.294906 | 373 |
fp = open("data.txt","r+")
s = ""
for i in fp:
s += i
fin = ""
ans = ""
for i in range(1,len(s)):
if s[i-1].islower() and s[i].isupper() and s[i+1].isupper() and s[i+2].isupper() and s[i+3].islower() and s[i+4].isupper() and s[i+5].isupper() and s[i+6].isupper() and s[i+7].islower():
ans = s[i:i+7]
fin += s[i+3]
print(fin)
# fp = open("data.txt","r+")
# s = ""
# for i in fp:
# s += i
# uni = set()
# from collections import Counter
# dd = Counter(s)
# ans = ""
# for i in s:
# if dd[i]==1:
# ans += i
# uni.add(i)
# print(uni)
#
# print(dd)
# print(ans)
# # eaitluqy
# # tequyila | [
46428,
796,
1280,
7203,
7890,
13,
14116,
2430,
81,
10,
4943,
198,
82,
796,
13538,
198,
1640,
1312,
287,
277,
79,
25,
198,
220,
220,
220,
264,
15853,
1312,
198,
15643,
796,
13538,
198,
504,
796,
13538,
198,
198,
1640,
1312,
287,
2837... | 1.923547 | 327 |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Libs
from __future__ import absolute_import
import os
import traceback
# Import Salt Testing Libs
from salttesting.helpers import ensure_in_syspath
from salttesting.mixins import RUNTIME_VARS
ensure_in_syspath('../../')
# Import Salt libs
import integration
import salt.config
from salt.output import display_output
import salt.config
class OutputReturnTest(integration.ShellCase):
'''
Integration tests to ensure outputters return their expected format.
Tests against situations where the loader might not be returning the
right outputter even though it was explicitly requested.
'''
def test_output_json(self):
'''
Tests the return of json-formatted data
'''
expected = ['{', ' "local": true', '}']
ret = self.run_call('test.ping --out=json')
self.assertEqual(ret, expected)
def test_output_nested(self):
'''
Tests the return of nested-formatted data
'''
expected = ['local:', ' True']
ret = self.run_call('test.ping --out=nested')
self.assertEqual(ret, expected)
def test_output_quiet(self):
'''
Tests the return of an out=quiet query
'''
expected = []
ret = self.run_call('test.ping --out=quiet')
self.assertEqual(ret, expected)
def test_output_pprint(self):
'''
Tests the return of pprint-formatted data
'''
expected = ["{'local': True}"]
ret = self.run_call('test.ping --out=pprint')
self.assertEqual(ret, expected)
def test_output_raw(self):
'''
Tests the return of raw-formatted data
'''
expected = ["{'local': True}"]
ret = self.run_call('test.ping --out=raw')
self.assertEqual(ret, expected)
def test_output_txt(self):
'''
Tests the return of txt-formatted data
'''
expected = ['local: True']
ret = self.run_call('test.ping --out=txt')
self.assertEqual(ret, expected)
def test_output_yaml(self):
'''
Tests the return of yaml-formatted data
'''
expected = ['local: true']
ret = self.run_call('test.ping --out=yaml')
self.assertEqual(ret, expected)
def test_output_unicodebad(self):
'''
Tests outputter reliability with utf8
'''
opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
opts['output_file'] = os.path.join(
integration.SYS_TMP_DIR,
'salt-tests-tmpdir',
'outputtest'
)
data = {'foo': {'result': False,
'aaa': 'azerzaeréééé',
'comment': u'ééééàààà'}}
try:
# this should not raises UnicodeEncodeError
display_output(data, opts=opts)
self.assertTrue(True)
except Exception:
# display trace in error message for debugging on jenkins
trace = traceback.format_exc()
self.assertEqual(trace, '')
if __name__ == '__main__':
from integration import run_tests
run_tests(OutputReturnTest)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
1058,
8189,
9800,
25,
1058,
12888,
25,
63,
30403,
2305,
5658,
1279,
6988,
2305,
31,
82,
2501,
25558,
13,
785,
29,
63,
198,
7061,
6,
198,... | 2.24726 | 1,460 |
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dotdiff import dotdiff
from ansible.module_utils.kong.consumer import KongConsumer
from ansible.module_utils.kong.helpers import *
DOCUMENTATION = '''
---
module: kong_consumer
short_description: Configure a Kong Consumer object.
'''
EXAMPLES = '''
Setting custom_id's on Consumers is currently not supported;
their usefulness is limited, and they require more lookups (round-trips)
for actions that require either a username or the consumer's UUID.
- name: Configure a Consumer
kong_consumer:
kong_admin_uri: http://localhost:8001
username: apiconsumer
state: present
- name: Configure a list of Consumers
kong_consumer:
kong_admin_uri: http://localhost:8001
username:
- one
- two
- three
- apiconsumers
state: present
- name: Delete a Consumer
kong_consumer:
kong_admin_uri: http://localhost:8001
username: apiconsumer
state: absent
'''
if __name__ == '__main__':
main() | [
6738,
9093,
856,
13,
21412,
62,
26791,
13,
35487,
1330,
28038,
856,
26796,
198,
6738,
9093,
856,
13,
21412,
62,
26791,
13,
26518,
26069,
1330,
16605,
26069,
198,
6738,
9093,
856,
13,
21412,
62,
26791,
13,
74,
506,
13,
49827,
1330,
907... | 2.95977 | 348 |
import numpy as np
import re
import astunparse
import ast
import sys
import utils as u
import pdb
one_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_<.+")
two_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_<.+")
three_index_re = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_(\d+)_<.+")
one_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_?$")
two_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_?$")
three_index_re2 = re.compile("[a-zA-Z0-9]+_(\d+)_(\d+)_(\d+)_?$")
shared_index_re2 = re.compile("\w+SH")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
302,
198,
11748,
6468,
403,
29572,
198,
11748,
6468,
198,
11748,
25064,
198,
11748,
3384,
4487,
355,
334,
198,
11748,
279,
9945,
198,
198,
505,
62,
9630,
62,
260,
796,
302,
13,
5589,
576,
720... | 1.851852 | 270 |
"""
Scrapybox crawler settings
"""
BOT_NAME = 'Scrapybox'
SPIDER_MODULES = ['scrapybox.crawler.spiders']
NEWSPIDER_MODULE = 'scrapybox.crawler.spiders'
USER_AGENT = 'Mozilla/4.0 Scrapybox (Linux x86_64) Scrapy/1.2.0dev2 Python/3.5.0+'
ROBOTSTXT_OBEY = True
CONCURRENT_REQUESTS = 1
HTTPCACHE_ENABLED = True
LOG_STDOUT = True
# DOWNLOAD_DELAY = 3
# COOKIES_ENABLED = False
| [
37811,
198,
3351,
2416,
88,
3524,
27784,
1754,
6460,
198,
37811,
198,
33,
2394,
62,
20608,
796,
705,
3351,
2416,
88,
3524,
6,
198,
198,
4303,
41237,
62,
33365,
6239,
1546,
796,
37250,
1416,
2416,
88,
3524,
13,
66,
39464,
13,
2777,
4... | 2.135593 | 177 |
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
tool to store its hinting source data.
TSI0 is the index table containing the lengths and offsets for the glyph
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
in the TSI1 table.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from . import DefaultTable
import struct
tsi0Format = '>HHL'
| [
37811,
309,
11584,
90,
15,
11,
16,
11,
17,
11,
18,
11,
20,
92,
389,
2839,
8893,
973,
416,
5413,
15612,
6407,
6030,
357,
53,
15751,
8,
198,
25981,
284,
3650,
663,
9254,
278,
2723,
1366,
13,
198,
198,
4694,
40,
15,
318,
262,
6376,... | 3.330882 | 136 |
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
3169,
12423,
46445,
32289,
628
] | 3.346154 | 26 |
##usiing classes
#to use a class a need to create a object
primo = test()
secondo = test()
terzo = test()
primo.name_call('luca')
secondo.name_call('anakin')
terzo.name_call('buddy')
primo.introd()
secondo.introd()
terzo.introd()
print primo.display()
| [
198,
198,
2235,
385,
72,
278,
6097,
198,
2,
1462,
220,
779,
257,
1398,
257,
761,
284,
2251,
257,
2134,
628,
198,
19795,
78,
796,
1332,
3419,
198,
12227,
78,
796,
1332,
3419,
198,
353,
10872,
796,
1332,
3419,
198,
198,
19795,
78,
1... | 2.514563 | 103 |
import gym
import os
import random
import time
import pickle
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
microtime = lambda: int(round(time.time() * 1000))
start_t = microtime()
# Store experiences
# Retrtieve a random sample of experiences
# Setup
train = True
batch_train = True
test = True
pre_train_steps = 5000
train_freq = 1
num_episodes = 1000
num_episodes_test = 100
num_steps = 100
e_start = 0.1
e_end = 0.001
#QN1 = QNetwork(16, 4, save_file="FrozenLake-v0.p", gamma=0.99, lr=0.1)
QN1 = QNetwork(16, 4, gamma=0.99, lr=0.1)
# Variables
from gym.envs.registration import register
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
env = gym.make('FrozenLakeNotSlippery-v0')
#env = gym.make("FrozenLake-v0")
env = gym.wrappers.Monitor(env, "tmp/FrozenLake-0.1", force=True)
exp_buf = ExperienceBuffer(1000)
e_factor = ((e_start - e_end) / (num_episodes * (num_steps / 5)))
e = e_start
bench = [[], [], [], [], []]
# Add an operation to initialize global variables.
init_op = tf.global_variables_initializer()
# Training
with tf.Session() as sess:
sess.run(init_op)
if train == True:
print("Training started\n")
batch_training_started = False
total_batch_trained = 0
all_rewards = []
all_steps = []
total_steps = 0
for episode in range(num_episodes):
if episode % 100 == 0 and episode != 0:
t = microtime()
W_val = sess.run(QN1.W)
QN1.save(W_val)
print("Episodes %04d - %04d: %i succeeded, %.2f avg steps/episode, e=%.4f" % (
episode - 100,
episode,
sum(all_rewards[-100:]),
np.mean(all_steps[-100:]),
e
)
)
bench[0].append((microtime() - t))
# Reset episode-specific parameters
state = env.reset()
steps = 0
episode_reward = 0
done = False
# Do steps in the game
while steps <= num_steps:
if done == True:
#if reward==1:
#print("***Goal Reached***")
#time.sleep(3)
break
t = microtime()
# An e chance of randomly selection an action
if (np.random.rand(1) < e) or (total_steps < pre_train_steps):
act = env.action_space.sample()
else:
# Obtain the best action and current Q_values for this state
act = sess.run(QN1.best_action, feed_dict={
QN1.input_state: [state]
})
act = act[0]
bench[1].append((microtime() - t))
# Advance a state
t = microtime()
new_state, reward, done, _ = env.step(act)
bench[2].append((microtime() - t))
#My addition - Render the environment
#env.render()
# Decrease the random % for every action
e -= e_factor
if e < e_end:
e = e_end
# Store this experience
exp_buf.add((state, act, reward, new_state, done))
# Train from memory
if (batch_train == True) and (total_steps > pre_train_steps) and ((total_steps % train_freq) == 0):
if batch_training_started == False:
batch_training_started = True
print("Batch training started")
training_batch = exp_buf.sample(16)
t = microtime()
batch_new_Qs = sess.run(QN1.Q, feed_dict={
QN1.input_state: training_batch[:,3]
}) # Q(s', a')
batch_curr_Qs = sess.run(QN1.Q, feed_dict={
QN1.input_state: training_batch[:,0]
}) # Q(s, a)
bench[3].append((microtime() - t))
# Best possible outcome of the new states (per state)
new_Qs_max = np.max(batch_new_Qs, 1) # max a' for Q(s', a')
target_Qs = batch_curr_Qs.copy()
for i, experience in enumerate(training_batch):
s, a, r, ss, d = experience # s a r s' d
target_Qs[i][int(a)] = r + QN1.gamma * new_Qs_max[i]
# target for a = r + y*maxa'Q(s', a')
# Train with the given state(s) and target_Qs
t = microtime()
sess.run(QN1.train_op, feed_dict={
QN1.input_state: training_batch[:,0],
QN1.Q_target: target_Qs
}) # train with target and s
bench[4].append((microtime() - t))
total_batch_trained += len(training_batch)
steps += 1
total_steps += 1
episode_reward += reward
state = new_state
all_rewards.append(episode_reward)
all_steps.append(steps)
W_val = sess.run(QN1.W)
QN1.save(W_val)
print("\nCompleted %i organic steps" % sum(all_steps))
print("Completed %i batch-trained steps" % total_batch_trained)
if test == True:
# Testing
print("\nTesting...")
all_rewards = []
all_steps = []
for episode in range(num_episodes_test):
# Reset episode-specific parameters
state = env.reset()
steps = 0
episode_reward = 0
done = False
# Do steps in the game
while steps <= num_steps:
if done == True:
break
act = sess.run(QN1.best_action, feed_dict={
QN1.input_state: [state]
})
act = act[0]
new_state, reward, done, _ = env.step(act)
steps += 1
episode_reward += reward
state = new_state
all_rewards.append(episode_reward)
all_steps.append(steps)
print("Finished. %i/%i succeeded, avg. steps %.2f" % (
sum(all_rewards),
num_episodes_test,
np.mean(all_steps)
))
print("\nQ network:")
W_val = sess.run(QN1.W)
print("\n left down right up")
print(W_val)
print("\nTimes:\nsave, get_act, step, get_new_Qs, train:")
print(", ".join([str(sum(t)) for t in bench]))
print("\nTotal took %i ms" % (microtime() - start_t))
env.close()
| [
11748,
11550,
201,
198,
11748,
28686,
201,
198,
11748,
4738,
201,
198,
11748,
640,
201,
198,
11748,
2298,
293,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
... | 1.747871 | 4,228 |
import ServerUsers
import BasePlayer
default_cfg = {
"host": "localhost",
"port": 3306,
"database": "bansync",
"user": "root",
"password": "CHANGEME"
}
banlist = lambda: {ban.steamid:ban for ban in ServerUsers.GetAll(ServerUsers.UserGroup.Banned)} | [
11748,
9652,
14490,
198,
11748,
7308,
14140,
198,
198,
12286,
62,
37581,
796,
1391,
198,
197,
1,
4774,
1298,
366,
36750,
1600,
198,
197,
1,
634,
1298,
513,
20548,
11,
198,
197,
1,
48806,
1298,
366,
65,
504,
13361,
1600,
198,
197,
1,... | 2.731183 | 93 |
import torch
import csv
import string
import random
from src.solver import BaseSolver
from src.asr import ASR
from src.optim import Optimizer
from src.data import load_dataset
from src.util import human_format, cal_er, feat_to_fig
class Solver(BaseSolver):
''' Solver for training'''
def fetch_data(self, data):
''' Move data to device and compute text seq. length'''
_, feat, feat_len, txt = data
feat = feat.to(self.device)
feat_len = feat_len.to(self.device)
txt = txt.to(self.device)
txt_len = torch.sum(txt != 0, dim=-1)
return feat, feat_len, txt, txt_len
def load_data(self):
''' Load data for training/validation, store tokenizer and input/output shape'''
self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
self.curriculum > 0, **self.config['data'])
self.verbose(msg)
def set_model(self):
''' Setup ASR model and optimizer '''
# Model
init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'
self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
model_paras = [{'params': self.model.parameters()}]
self.csv_log_name = self.rand_str(10)
self.verbose('Initialized model {}'.format(self.csv_log_name))
# Losses
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
# Note: zero_infinity=False is unstable?
self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)
# Plug-ins
self.emb_fuse = False
self.emb_reg = ('emb' in self.config) and (
self.config['emb']['enable'])
if self.emb_reg:
from src.plugin import EmbeddingRegularizer
self.emb_decoder = EmbeddingRegularizer(
self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)
model_paras.append({'params': self.emb_decoder.parameters()})
self.emb_fuse = self.emb_decoder.apply_fuse
if self.emb_fuse:
self.seq_loss = torch.nn.NLLLoss(ignore_index=0)
self.verbose(self.emb_decoder.create_msg())
# Optimizer
self.optimizer = Optimizer(model_paras, **self.config['hparas'])
self.verbose(self.optimizer.create_msg())
# Enable AMP if needed
self.enable_apex()
# Automatically load pre-trained model if self.paras.load is given
self.load_ckpt()
# ToDo: other training methods
def exec(self):
''' Training End-to-end ASR system '''
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
ctc_loss, att_loss, emb_loss = None, None, None
n_epochs = 0
self.timer.set()
while self.step < self.max_step:
# Renew dataloader to enable random sampling
if self.curriculum > 0 and n_epochs == self.curriculum:
self.verbose(
'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))
self.tr_set, _, _, _, _, _ = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
False, **self.config['data'])
for data in self.tr_set:
# Pre-step : update tf_rate/lr_rate and do zero_grad
tf_rate = self.optimizer.pre_step(self.step)
total_loss = 0
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
# Forward model
# Note: txt should NOT start w/ <sos>
ctc_output, encode_len, att_output, att_align, dec_state, esn_state = \
self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,
teacher=txt, get_dec_state=self.emb_reg)
#self.verbose('CTC Output: {}'.format(ctc_output))
#self.verbose('CTC Output Shape: {}'.format(ctc_output.shape))
#self.verbose('Encoder lengths of {} should be <= CTC Prob Input Length of {}'.format(encode_len,ctc_output.transpose(0,1).shape))
# Plugins
if self.emb_reg:
emb_loss, fuse_output = self.emb_decoder(
dec_state, att_output, label=txt)
total_loss += self.emb_decoder.weight*emb_loss
# Compute all objectives
if ctc_output is not None:
if self.paras.cudnn_ctc:
ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),
txt.to_sparse().values().to(device='cpu', dtype=torch.int32),
[ctc_output.shape[1]] *
len(ctc_output),
txt_len.cpu().tolist())
else:
ctc_loss = self.ctc_loss(ctc_output.transpose(
0, 1), txt, encode_len, txt_len)
total_loss += ctc_loss*self.model.ctc_weight
if att_output is not None:
b, t, _ = att_output.shape
att_output = fuse_output if self.emb_fuse else att_output
att_loss = self.seq_loss(
att_output.view(b*t, -1), txt.view(-1))
total_loss += att_loss*(1-self.model.ctc_weight)
self.timer.cnt('fw')
# Backprop
grad_norm = self.backward(total_loss)
self.step += 1
# Logger
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(total_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log(
'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})
self.write_log('emb_loss', {'tr': emb_loss})
self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),
'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})
if self.emb_fuse:
if self.emb_decoder.fuse_learnable:
self.write_log('fuse_lambda', {
'emb': self.emb_decoder.get_weight()})
self.write_log(
'fuse_temp', {'temp': self.emb_decoder.get_temp()})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354
torch.cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close()
| [
11748,
28034,
198,
11748,
269,
21370,
198,
11748,
4731,
198,
11748,
4738,
198,
6738,
12351,
13,
82,
14375,
1330,
7308,
50,
14375,
198,
198,
6738,
12351,
13,
292,
81,
1330,
7054,
49,
198,
6738,
12351,
13,
40085,
1330,
30011,
7509,
198,
... | 1.840426 | 4,042 |
import os
from tests.terraform.graph.checks_infra.test_base import TestBaseSolver
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
| [
11748,
28686,
198,
6738,
5254,
13,
353,
430,
687,
13,
34960,
13,
42116,
62,
10745,
430,
13,
9288,
62,
8692,
1330,
6208,
14881,
50,
14375,
198,
198,
51,
6465,
62,
34720,
20608,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,... | 2.75 | 52 |
#Version 0.0.0
fEnd = sc.textFile("/u01/data/base/ENDERECOS")
fPF = sc.textFile("/u01/data/base/PESSOAS_FISICAS")
fCert = sc.textFile("/u01/data/base/CERTIDOES_CIVIS")
fRes = sc.textFile("/u01/data/base/RESUMO_RELACOES_TRABALHISTAS")
'''
CERTIDOES_CIVIS_PART
'''
#Retorno da Linha: [[id_certidao_civil, id_pessoa_fisica, dt_evento, dt_emissao, dt_separacao, dt_divorcio]]
rddCertidoes = fCert.map(lambda line: line.split("|"))
#Retorno [('1027040844', 'Inc.'), ('1665214649', '1976-12-08;155259490')] -> (PF_ID, 'data mais recente entre dt_evento e dt_emissão; id_certidao_civil')
rddDates = rddCertidoes.map(filterByEvent).reduceByKey(lambda a, b: a + ("|" + b))
#Retorno [('113927770', ('1229477534', '2006-07-15', 0))] -> (CERT_ID, (PF_ID, Data_Mais_Recente, Check_Inconsistencia (0-OK, 1-Inc)))
rddInfo = rddDates.map(filterDates)
#Retorno [('13047457', ('', ''))] -> (CERT_ID, (dt_separacao, dt_divorcio))
rddCertIds = rddCertidoes.map(lambda x: (str(x[0]).strip(), (str(x[4]).strip(), str(x[5]).strip())))
#Retorno [('14733170', 'NC')] -> (CERT_ID, Data Mais Antiga separacao e divorcio (Caso não conste data retorna NC))
rddDivorcio = rddInfo.map(lambda x: (x[0], 1)).join(rddCertIds).map(filterDivorceDates)
#Retorno [('1116310950', 0, 'NC', 1)] -> (PF_ID, dt_atualização mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia)
fullJoined = rddDivorcio.join(rddInfo).map(finalFiltering)
'''
ENDERECOS PART
'''
rddEnderecos = fEnd.map(lambda line: line.split('|'))
#('2090398103', '310267648;2015-05-29|310267648;2015-05-29')
rddEndByPF = rddEnderecos.map(lambda x: (str(x[1]).strip(), (str(x[0]).strip() + ";" + str(x[4]).strip() + ";" + str(x[2]).strip() + ";" + str(x[3]).strip()))).reduceByKey(lambda a, b: a + ("|" + b))
#(ID_PF, (ID_MUNICIPIO, CEP, CEP[:4], 0))
rddPFbyRecentEnd = rddEndByPF.map(getNewstEnd)
partialResult = rddPFbyRecentEnd.fullOuterJoin(fullJoined)
'''
PESSOAS FISICAS
'''
rddPF = fPF.map(lambda line: line.split('|')).map(lambda x: (str(x[0]).strip(), (str(x[1]).strip(), str(x[2]).strip(), str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip(), str(x[8]).strip())))
rddResult = rddPF.fullOuterJoin(partialResult).map(finalFormat).filter(lambda line: line != None)
'''
RELACOES TRABALHISTAS
'''
rddRelacoes = fRes.map(lambda line: line.split("|")).map(lambda x: (str(x[1]).strip(), ((str(x[3]).strip(), str(x[4]).strip(), str(x[5]).strip(), str(x[6]).strip(), str(x[7]).strip()),)))
rddAgg = rddRelacoes.reduceByKey(lambda a, b: a + b).map(calculateRelacoes)
#('1238510985', ((58, 0, '6762958.48', '772990.68', '0.00', 0), (None, None, ('1999-06-14', 'NC', 0))))
finalResult = rddAgg.fullOuterJoin(rddResult).map(finalAggFormat).filter(lambda line: line != None)
#id_pessoa_fisica|id_pessoa_fisica_dv|dt_nascimento|dt_obito|cs_etnia|cs_estado_civil|cs_grau_instrucao|cs_nacionalidade|cs_sexo
#(id_pessoa_fisica,((id_pessoa_fisica_dv,dt_nascimento,dt_obito,cs_etnia,cs_estado_civil,cs_grau_instrucao,cs_nacionalidade,cs_sexo),(dt_atualização mais nova, data de divorcio ou separacao mais antiga, check_inconsistencia),(ID_MUNICIPIO, CEP, distrito, id_inconcistencia)))
finalResult.sortBy(filtering).saveAsTextFile("/u01/data/Resultados/")
(id_pessoa_fisica: '1283983077',
arr_objetos:
(
vida_trabalhista:
(15, 0, '128.51', '0.00', '0.00', 0),
dados_PF:
('0', '2067-06-13', '', '', '', '7', '10', '3'),
dados_endereco:
('354850', '11020150', '1102', 0),
dados_relacao_civil
(0, 'NC', 1)
)
)
('1281289112', ((486, 0, '3133.34', '260.00', '0.00', 0), ('4', '1980-08-21', '', '', '', '', '10', '3'), ('Jesus', '11020130', '1102', 0), ('1996-04-29', 'NC', 0)))
a = {
'ID_PF':x[0],
'OBJETOS':{
'VIDA_TRABALHISTA':{
'DIAS_CONTR':x[1][0][0],
'DATA_FIRST_TRAB':x[1][0][1],
}
}
}
('1214281582', ((647, 0, '463519.16', '8963.99', '0.00', 0), ('2', '2067-06-28', '', '', '4', '9', '10', '3'), ('354850', '11020080', '1102', 0), ('1991-09-06', '2005-12-21', 0)))
| [
2,
14815,
657,
13,
15,
13,
15,
628,
198,
198,
69,
12915,
796,
629,
13,
5239,
8979,
7203,
14,
84,
486,
14,
7890,
14,
8692,
14,
10619,
1137,
2943,
2640,
4943,
198,
69,
42668,
796,
629,
13,
5239,
8979,
7203,
14,
84,
486,
14,
7890,
... | 2.104657 | 1,911 |
from math import sin,cos,atan2,sqrt
from . import PondingLoadCell2d, PondingLoadCell3d
from . import opensees as ops
# Define vertices in counterclockwise (CCW) direction | [
6738,
10688,
1330,
7813,
11,
6966,
11,
39036,
17,
11,
31166,
17034,
198,
6738,
764,
1330,
41598,
278,
8912,
28780,
17,
67,
11,
41598,
278,
8912,
28780,
18,
67,
198,
6738,
764,
1330,
1034,
1072,
274,
355,
39628,
628,
220,
220,
220,
2... | 2.724638 | 69 |
from setuptools import setup, find_packages
from os import path
from io import open
# Get current directory and long description from README
current_dir = path.abspath(path.dirname(__file__))
with open(path.join(current_dir, "README.md")) as f:
longDescription = f.read()
# the setup
setup(
name="FinanceML",
version="0.1.0",
description="Machine Learning for Finance",
long_description=longDescription,
authors="Amogh Reddy, Matt Buckley",
packages=find_packages(exclude=['contrib', 'docs', 'tests'])
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
6738,
33245,
1330,
1280,
198,
198,
2,
3497,
1459,
8619,
290,
890,
6764,
422,
20832,
11682,
198,
14421,
62,
15908,
796,
3108,
13,
397,
2777,
7... | 3.005618 | 178 |
# coding: utf-8
| [
2,
19617,
25,
3384,
69,
12,
23,
198
] | 2 | 8 |
from habitat.tests import Test
| [
6738,
20018,
13,
41989,
1330,
6208,
628
] | 4.571429 | 7 |
# -*- coding: UTF-8 -*-
'''
Initialization Routine for library
Routine is call on any import of a module in the library
'''
__pgmname__ = '__init__'
__version__ = '@version: $Rev: 383 $'
__author__ = "@author: AJ Reynolds"
__copyright__ = "@copyright: Copyright 2014, AJ Reynolds"
__license__ = "@license: GPL"
__maintainer__ = "@organization: AJ Reynolds"
__status__ = "@status: Development"
__credits__ = []
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
20768,
1634,
371,
28399,
329,
5888,
628,
220,
220,
220,
371,
28399,
318,
869,
319,
597,
1330,
286,
257,
8265,
287,
262,
5888,
198,
198,
7061,... | 2.609195 | 174 |
import torch
from self_attention_cv import ResNet50ViT
from self_attention_cv import ViT
| [
11748,
28034,
198,
198,
6738,
2116,
62,
1078,
1463,
62,
33967,
1330,
1874,
7934,
1120,
38432,
51,
198,
6738,
2116,
62,
1078,
1463,
62,
33967,
1330,
16049,
51,
628
] | 3.137931 | 29 |
#!/usr/bin/env python2
import argparse
from subprocess import call, check_output, CalledProcessError, Popen
import re
import sys
import logging
from functools import total_ordering
import signal
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@total_ordering
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='build and start the Jekyll site')
parser.add_argument('-r', '--refetch', help='refetch dependencies before running', action='store_true')
sys.exit(main(parser.parse_args()))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
11748,
1822,
29572,
198,
6738,
850,
14681,
1330,
869,
11,
2198,
62,
22915,
11,
34099,
18709,
12331,
11,
8099,
268,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
18931,
198,
... | 3.173184 | 179 |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def combineGates(df: pd.DataFrame, gate1: str, gate2: str):
'''
Combine boolean 'gate' columns on annotated dataframe.
Parameters:
df: pd.DataFrame
FACS data.
gate1: str
Name of gate1 column. Assumes a wavelength followed by a '+', e.g. 405nm+
gate2: str
Name of gate2 column. Assumes a wavelength followed by a '+', e.g. 488nm+
Returns:
df: pd.DataFrame
FACS data with new columns for the combined gates. e.g '405nm_neg_488nm_neg'.
'''
assert gate1 in df.columns
assert gate2 in df.columns
#If there is no + at the end of the input string
if gate1[-1] != '+':
gate1 = gate1 + '+'
if gate2[-1] != '+':
gate2 = gate2 + '+'
double_neg = ~df[gate1] & ~df[gate2]
c1_pos = df[gate1] & ~df[gate2]
c2_pos = ~df[gate1] & df[gate2]
double_pos = df[gate1] & df[gate2]
#handle gate or channel inputs
channel1=gate1.replace('+', '', 1)
channel2=gate2.replace('+', '', 1)
n_n = channel1+"_neg_"+channel2+"_neg"
p_n = channel1+"_pos_"+channel2+"_neg"
n_p = channel1+"_neg_"+channel2+"_pos"
p_p = channel1+"_pos_"+channel2+"_pos"
df.loc[:, n_n] = double_neg
df.loc[:, p_n] = c1_pos
df.loc[:, n_p] = c2_pos
df.loc[:, p_p] = double_pos
return df
def hitRate(df, double_neg_col, c1_pos_col, c2_pos_col, double_pos_col):
'''
Calculate the hit rate (%); double_pos / (double_pos + c1_pos) *100,
and the false detection rate; c2_pos / (double_neg + c2_pos) *100.
Parameters:
df: pd.DataFrame
FACS data.
double_neg_col, c1_pos_col, c2_pos_col, double_pos_col:str
The names of columns in df containing the combined gating information. c1 is channel1, c2 is channel2.
Returns:
HR:
Hit Rate.
FDR:
False Detection Rate.
'''
#Calculate Hit Rate
HR = (df[double_pos_col]/(df[double_pos_col]+df[c1_pos_col]))*100
#Calculate False Detection Rate
FDR = (df[c2_pos_col]/(df[double_neg_col]+df[c2_pos_col]))*100
#Calculate False Negative Rate
#This is just the inverse of HR (no need to calculate)
#FNR = 100-HR
return HR, FDR
def countPlot(counts: pd.DataFrame, stat='HR', **kwargs) -> plt.Figure:
'''
Plot (and save) bar graphs comparing experiment to controls
Parameters:
counts: pd.DataFrame
A dataframe with stats in different columns e.g. HR, or FDR.
stat: str
Optional Parameters:
plot:
title:
width:
con_color:
exp_color:
legend:
save:
'''
#Expects a dataframe with stats in different columns e.g. HR, or FDR
#Get **kwargs
plot = kwargs.get('plot', True)
title = kwargs.get('title', 'countPlot_figure')
width = kwargs.get('width', 0.35) # the width of the bars
con_color = kwargs.get('con_color', 'lightgrey')
exp_color = kwargs.get('exp_color', 'green')
legend = kwargs.get('legend', True)
save = kwargs.get('save', False)
#Calculate mean and std
mean_counts = counts.groupby(by=['landing_pad', 'condition'])[stat].mean().reset_index()
std_counts = counts.groupby(by=['landing_pad', 'condition'])[stat].std().reset_index()
##Get labels
labels = mean_counts.landing_pad.unique()
#Get means
control_mean_HR = mean_counts[mean_counts.condition.eq('control')][stat]
experiment_mean_HR = mean_counts[mean_counts.condition.eq('experiment')][stat]
#Get standard deviations
control_std_HR = std_counts[mean_counts.condition.eq('control')][stat]
experiment_std_HR = std_counts[mean_counts.condition.eq('experiment')][stat]
#Generate two colour bar plot
x = np.arange(len(labels)) # the label locations
#Turn interactive plotting off
plt.ioff()
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, control_mean_HR, width, label='control', yerr=control_std_HR, color=con_color)
rects2 = ax.bar(x + width/2, experiment_mean_HR, width, label='experiment', yerr=experiment_std_HR, color=exp_color)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(stat+' (%)')
#ax.set_title("mCherry -> Halo HitRate for different GFP&Spy Fusion Architectures")
ax.set_xticks(x)
ax.set_xticklabels(labels)
if legend:
ax.legend()
fig.tight_layout()
if save:
fig.savefig(title)
if plot:
plt.show(fig)
else:
plt.close(fig) | [
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
4299,
12082,
38,
689,
7,
7568,
25,
279,
67,
13,
6601,
19778,
11,
8946,
1... | 2.185373 | 2,174 |
from loguru import logger
class NotetonState:
"""
Class represents user state for bot
"""
MAIN_MENU = 'MAIN_MENU'
MAIN_LIST = 'MAIN_LIST'
SUBLIST = 'SUBLIST'
INFO = 'INFO'
CREATE_LIST_NAME = 'CREATE_LIST_NAME'
CREATE_LIST_TYPE = 'CREATE_LIST_TYPE'
EDIT_LIST = 'EDIT_LIST'
DELETE_LIST = 'DELETE_LIST'
FEEDBACK = 'FEEDBACK'
ADD_FILE = 'ADD_FILE'
ADD_ARTICLE = 'ADD_ARTICLE'
NO_ANSWER = 'NO_ANSWER'
DELETE_ITEM = 'DELETE_ITEM'
states = None
| [
6738,
2604,
14717,
1330,
49706,
628,
198,
4871,
1892,
18483,
9012,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5016,
6870,
2836,
1181,
329,
10214,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
8779,
1268,
62,
49275,
52,
796,
... | 2.147679 | 237 |
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to handle /pkgs"""
import httplib
import logging
import urllib
from google.appengine.api import memcache
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import common
from simian.mac.munki import handlers
def PackageExists(filename):
"""Check whether a package exists.
Args:
filename: str, package filename like 'foo.dmg'
Returns:
True or False
"""
return models.PackageInfo.get_by_key_name(filename) is not None
class Packages(
handlers.AuthenticationHandler,
blobstore_handlers.BlobstoreDownloadHandler):
"""Handler for /pkgs/"""
def get(self, filename):
"""GET
Args:
filename: str, package filename like 'foo.dmg'
Returns:
None if a blob is being returned,
or a response object
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not any((auth.IsAdminUser(email),
auth.IsSupportUser(email),
)):
raise auth.IsAdminMismatch
filename = urllib.unquote(filename)
pkg = models.PackageInfo.MemcacheWrappedGet(filename)
if pkg is None or not pkg.blobstore_key:
self.error(httplib.NOT_FOUND)
return
if common.IsPanicModeNoPackages():
self.error(httplib.SERVICE_UNAVAILABLE)
return
# Get the Blobstore BlobInfo for this package; memcache wrapped.
memcache_key = 'blobinfo_%s' % filename
blob_info = memcache.get(memcache_key)
if not blob_info:
blob_info = blobstore.BlobInfo.get(pkg.blobstore_key)
if blob_info:
memcache.set(memcache_key, blob_info, 300) # cache for 5 minutes.
else:
logging.error(
'Failure fetching BlobInfo for %s. Verify the blob exists: %s',
pkg.filename, pkg.blobstore_key)
self.error(httplib.NOT_FOUND)
return
header_date_str = self.request.headers.get('If-Modified-Since', '')
etag_nomatch_str = self.request.headers.get('If-None-Match', 0)
etag_match_str = self.request.headers.get('If-Match', 0)
pkg_date = blob_info.creation
pkg_size_bytes = blob_info.size
# TODO(user): The below can be simplified once all of our clients
# have ETag values set on the filesystem for these files. The
# parsing of If-Modified-Since could be removed. Removing it prematurely
# will cause a re-download of all packages on all clients for 1 iteration
# until they all have ETag values.
# Reduce complexity of elif conditional below.
# If an If-None-Match: ETag is supplied, don't worry about a
# missing file modification date -- the ETag supplies everything needed.
if etag_nomatch_str and not header_date_str:
resource_expired = False
else:
resource_expired = handlers.IsClientResourceExpired(
pkg_date, header_date_str)
# Client supplied If-Match: etag, but that etag does not match current
# etag. return 412.
if (etag_match_str and pkg.pkgdata_sha256 and
etag_match_str != pkg.pkgdata_sha256):
self.response.set_status(412)
# Client supplied no etag or If-No-Match: etag, and the etag did not
# match, or the client's file is older than the mod time of this package.
elif ((etag_nomatch_str and pkg.pkgdata_sha256 and
etag_nomatch_str != pkg.pkgdata_sha256) or resource_expired):
self.response.headers['Content-Disposition'] = str(
'attachment; filename=%s' % filename)
# header date empty or package has changed, send blob with last-mod date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.headers['Last-Modified'] = pkg_date.strftime(
handlers.HEADER_DATE_FORMAT)
self.response.headers['X-Download-Size'] = str(pkg_size_bytes)
self.send_blob(pkg.blobstore_key)
else:
# Client doesn't need to do anything, current version is OK based on
# ETag and/or last modified date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.set_status(httplib.NOT_MODIFIED)
class ClientRepair(Packages):
"""Handler for /repair/"""
def get(self, client_id_str=''):
"""GET
Returns:
None if a blob is being returned,
or a response object
"""
session = auth.DoAnyAuth()
client_id = handlers.GetClientIdForRequest(
self.request, session=session, client_id_str=client_id_str)
logging.info('Repair client ID: %s', client_id)
filename = None
for pkg in models.PackageInfo.all().filter('name =', 'munkitools'):
if client_id.get('track', '') in pkg.catalogs:
filename = pkg.filename
break
if filename:
logging.info('Sending client: %s', filename)
super(ClientRepair, self).get(filename)
else:
logging.warning('No repair client found.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2864,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 2.67615 | 2,109 |
import sys
import subprocess
import argparse
parser=argparse.ArgumentParser(
description='''This is a controller for Cheeky Kitten, to run preprogramed operations :-). ''',
epilog="""Beta Version 0.3 Josjuar Lister 2021-2022""")
parser.add_argument('file', metavar='file', nargs='+')
parser.add_argument('-d', '--double', action="store_true", help='double shift')
parser.add_argument('-e', '--scramble', action="store_true", help='double encrypt and rotate once')
parser.add_argument('-k', '--key', help='key used for encryption')
parser.add_argument('-x', '--hex-output', action="store_true", help='output readable hex')
args=parser.parse_args()
c = "cheeky"
b = "-b"
r = "-r"
k = "-k"
if args is None:
print('run btphone -h')
exit()
#if sys.argv[1] is not None:
# o = sys.argv[1]
#if sys.argv[0] is not None:
# f = sys.argv[0]
#else:
# print("no input file supplied...\n")
# i = input()#read from stdin
if args.file:
f = args.file
if args.double:
if args.key is None:
s1 = subprocess.Popen([c, b, f], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
s2 = s1.communicate([c, b])
print(s2)
else:
print("I will produce a cheeky %s", file)
if yesno('continue?\n'):
p = args.key
double = subprocess.Popen([c, b, f, k, p, "|", c, b, "|", c, b], stdout=PIPE, stderr=PIPE)
stdout, stderr = subprocess.communicate()
print(stdout) | [
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
1822,
29572,
198,
198,
48610,
28,
853,
29572,
13,
28100,
1713,
46677,
7,
198,
220,
220,
220,
6764,
28,
7061,
6,
1212,
318,
257,
10444,
329,
2580,
988,
88,
509,
2621,
11,
284,
1057,
6... | 2.467023 | 561 |
"""
@author - Mr Dk.
@version - 2018.12.31
@function -
Global variables.
"""
import os
from config import Config
from util.ringbuffer import RingBuffer
root_dir = os.path.dirname(os.path.abspath('.'))
conf = Config(root_dir + "/conf/localDebugConfig.ini")
buff = RingBuffer(conf.get_buffer_size())
| [
37811,
198,
220,
220,
220,
2488,
9800,
532,
1770,
360,
74,
13,
198,
220,
220,
220,
2488,
9641,
532,
2864,
13,
1065,
13,
3132,
198,
220,
220,
220,
2488,
8818,
532,
220,
198,
220,
220,
220,
220,
220,
220,
220,
8060,
9633,
13,
198,
... | 2.697479 | 119 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebSharedTableauRestUser(Model):
"""AdiEmsWebSharedTableauRestUser.
:param id:
:type id: str
:param name:
:type name: str
:param site_role:
:type site_role: str
:param last_login:
:type last_login: datetime
:param external_auth_user_id:
:type external_auth_user_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'site_role': {'key': 'siteRole', 'type': 'str'},
'last_login': {'key': 'lastLogin', 'type': 'iso-8601'},
'external_auth_user_id': {'key': 'externalAuthUserId', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
13,
198,
2,
19179,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
262,
2438,
318,
198,
2,
16935,
515,
13,
... | 2.798343 | 362 |
from django.conf import settings
import requests
from itassets.utils import human_time_duration
MERAKI_AUTH_HEADERS = {
'X-Cisco-Meraki-API-Key': settings.MERAKI_API_KEY,
'Content-Type': 'application/json',
}
def get_meraki_clients(network_id, timespan=1209600):
"""timespan is in seconds, default value is 14 days.
"""
url = f'https://api.meraki.com/api/v1/networks/{network_id}/clients?timespan={timespan}'
resp = requests.get(url, headers=MERAKI_AUTH_HEADERS)
resp.raise_for_status()
clients = resp.json()
while 'next' in resp.links:
url = resp.links['next']['url']
resp = requests.get(url, headers=MERAKI_AUTH_HEADERS)
resp.raise_for_status()
clients = clients + resp.json()
return resp.json()
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
7007,
198,
6738,
340,
19668,
13,
26791,
1330,
1692,
62,
2435,
62,
32257,
628,
198,
29296,
10206,
40,
62,
32,
24318,
62,
37682,
4877,
796,
1391,
198,
220,
220,
220,
705,
55,
12,
... | 2.488818 | 313 |
def merge(list1, list2):
"""
合并两个有序的列表成为一个有序的列表。本方法采用从后往前合并
:param list1: 有序列表1
:param list2: 有序列表2
:return: 合并后的有序列表
"""
i = len(list1) - 1 # 用于记录第一个有序列表的最后一个待合并的元素索引
j = len(list2) - 1 # 用于记录第二个有序列表的最后一个待合并的元素索引
k = len(list1) + len(list2) - 1 # 用于记录合并后有序列表的最后一个元素的索引
list3 = [0] * (len(list1) + len(list2)) # 用于存储最后合并后的有序列表
while i >= 0 and j >= 0: # 如果两个列表都没有合并完
if list1[i] > list2[j]: # 如果第一个有序列表当前最后一个元素大于第二个有序列表的
list3[k] = list1[i] # 把第一个有序列表的当前最后一个元素合并到列表3
k -= 1 # 列表3往前移动一位
i -= 1 # 列表1往前移动一位
else: # 如果第二个有序列表当前最后一个元素大于第一个有序列表的
list3[k] = list2[j] # 把第二个有序列表的当前最后一个元素合并到列表3
k -= 1 # 列表3往前移动一位
j -= 1 # 列表2往前移动一位
while j >= 0: # 如果列表2没有合并完,但是列表1合并完了
list3[k] = list2[j] # 将列表2的元素依次合并到列表3
k -= 1
j -= 1
while i >= 0: # 如果列表1没有合并完,但是列表2合并完了
list3[k] = list1[i] # 将列表1的元素依次合并到列表3
k -= 1
i -= 1
# print(k,i,j) # 此时k=-1, i=-1, j=-1
return list3
if __name__ == "__main__":
list2 = [2, 4, 7, 9, 12]
list1 = [1, 3, 5, 6, 8, 10, 11]
list3 = merge(list1=list1, list2=list2)
print(list3)
x = b'0001'
y = b'000a'
print(x[2:] < y[2:])
| [
198,
198,
4299,
20121,
7,
4868,
16,
11,
1351,
17,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
10263,
238,
230,
33176,
114,
10310,
97,
10310,
103,
17312,
231,
41753,
237,
21410,
26344,
245,
26193,
101,
22755,
238,
10310,
118,
... | 1.018211 | 1,263 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python3.7
# -*- coding: utf8 -*-
import matplotlib as mat
mat.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import argparse
import os
parser=argparse.ArgumentParser()
parser.add_argument('amp', help='select amp',type=int)
parser.add_argument('degree', help='filter order',type=int)
parser.add_argument('crange', help='capacitance range',type=int)
args=parser.parse_args()
amp=args.amp
degree=args.degree
crange=args.crange
home=os.environ['HOME']
dir='nphe_shaping'
nin='pe_stats/pe_stats-a{0}_d{1}c{2}'.format(amp,degree,crange)
name='{0}/{1}/{2}.dat'.format(home,dir,nin)
dt=0.2
tend=1000.0
t=np.arange(0,tend,dt)
tpeaks=[75,100,125,150,175]
a0_5d=np.array([-1.4766878,-1.4166647+0.5978596j,-1.2036832+1.2994843j])
a0_3d=np.array([-1.2633573,-1.1490948+0.7864188j])
sigma0=np.exp(1)/np.sqrt(2.0*np.pi)
data=np.loadtxt(name,comments='#')
vths=np.array([10,20,30,50])
K=np.size(vths)
pebins=np.arange(0,200)
for scale in [0.5,1.0,1.25]:
Tstep=scale
Tmax=255*Tstep
t0bins=np.arange(0,Tmax,Tstep)
dout='{0}/{1}/fitting/amp_stats{2}/tstep-{3}'.format(home,dir,amp,scale)
for Cpar in [0,1,2]:
for Rpar in [0,1,2]:
fig,ax=plt.subplots(nrows=2,ncols=5)
fig.set_size_inches(12,7)
for Tpar in [0,1,2,3,4]:
pe=data[:,0]
v0=data[:,9*Tpar+3*Cpar+Rpar+1]
tesT=v0!=0
pe=pe[tesT]
v0=v0[tesT]
M=np.size(v0,0)
tot=np.zeros([K,M])
totQ=np.zeros([K,M])
fwtm=2.0*tpeaks[Tpar]
if degree==3:
tau0=fwtm/(2.0*sigma0*np.sqrt(2.0*np.log(2.9)))
sigma=tau0*sigma0
a=(1.0/sigma)*a0_3d
k0=np.real((a[1]*np.conj(a[1])))
semi_g0=signal.lti([],[a[1],np.conj(a[1])],k0)
else:
tau0=fwtm/(2.0*sigma0*np.sqrt(2.0*np.log(15.55)))
sigma=tau0*sigma0
a=(1.0/sigma)*a0_5d
k0=-1.0*np.real((a[1]*np.conj(a[1]))*(a[2]*np.conj(a[2])))
semi_g0=signal.lti([],[a[1],np.conj(a[1]),a[2],np.conj(a[2])],k0)
t,g0=signal.impulse(semi_g0,T=t)
g0norm=(1.0/np.amax(g0))*g0
pulse=np.transpose(v0[np.newaxis])*g0norm
m=0
ax[1,Tpar].hist(pe,bins=pebins,log=True)
for vth in vths:
tot[m,:]=timeovert(pulse,vth,M)
totQ[m,:]=Tstep*np.digitize(tot[m,:],t0bins)
tbins,tsat=np.unique(totQ[m,:],return_counts=True)
ax[0,Tpar].scatter(pe,totQ[m,:],s=1.0)
ax[0,Tpar].axis([0.9,200,0,Tmax])
ax[0,Tpar].set_xscale('log')
ax[1,Tpar].hist(totQ[m,:],bins=t0bins,log=True,alpha=0.5)
m+=1
plt.savefig('{0}/tot_d{1}c{2}-{3}{4}par.png'.format(dout,degree,crange,Cpar,Rpar),dpi=100)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
2603,
29487,
8019,
355,
2603,
198,
6759,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
80... | 1.677956 | 1,624 |
# Make a copy of this as IBM_API_KEYS.py and fill in your API keys
#### IBM ##################
## STT
STT_API_ENDPOINT = "https://api.us-south.speech-to-text.watson.cloud.ibm.com/instances/d01dba71-c104-4035-beb2-979315bfc91f"
STT_API_KEY = "luSLdQ7tvPb0K9EdAfwA37OYRW8L-KvvRaZ2l4r2XA2G"
# TA
TA_API_ENDPOINT = "https://api.us-south.tone-analyzer.watson.cloud.ibm.com/instances/ac0ebb12-0c6a-49bf-9726-0ece18c5fb13"
TA_API_KEY = "UcpzpuBCWtH8Pvte0VRM5D4QuT5Xek0Fw-QqWVLXJyl-"
#### Sendgrid ##############
SENDGRID_API_KEY='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'
'''
curl -X GET -u "apikey:UcpzpuBCWtH8Pvte0VRM5D4QuT5Xek0Fw-QqWVLXJyl-" \
"https://api.us-south.tone-analyzer.watson.cloud.ibm.com/instances/ac0ebb12-0c6a-49bf-9726-0ece18c5fb13/v3/tone?version=2017-09-21
&text=Team%2C%20I%20know%20that%20times%20are%20tough%21%20Product%20sales%20have
%20been%20disappointing%20for%20the%20past%20three%20quarters.%20We%20have%20a%20
competitive%20product%2C%20but%20we%20need%20to%20do%20a%20better%20job%20of%20
selling%20it%21"
'''
| [
2,
6889,
257,
4866,
286,
428,
355,
19764,
62,
17614,
62,
7336,
16309,
13,
9078,
290,
6070,
287,
534,
7824,
8251,
198,
198,
4242,
19764,
1303,
14468,
2,
198,
2235,
3563,
51,
198,
2257,
51,
62,
17614,
62,
1677,
6322,
46,
12394,
796,
... | 1.883803 | 568 |
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
import argparse
import sys
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", help="path to a video")
ap.add_argument("-w", "--weights", help="weight file")
ap.add_argument("-v", "--visual", help="enables visual output, 1 or 0 (default is 1)")
ap.add_argument("-sm", "--show_misc", help="show misc detections in visual output, 1 or 0 (default is 0)")
ap.add_argument("-t", "--threshold", help="detection threshold, 0.33 to 1 (default is 0.5)")
args = vars(ap.parse_args())
if not args.get("path", False):
print("No path provided to video")
sys.exit()
if not args.get("weights", False):
print("No weight file provided")
sys.exit()
if not args.get("visual", False):
visual = True
else:
visual = bool(int(args.get("visual")))
if not args.get("show_misc", False):
show_misc = False
else:
show_misc = bool(int(args.get("show_misc")))
if not args.get("threshold", False):
threshold = 0.5
else:
threshold = float(args.get("threshold"))
detection(args.get("path"), args.get("weights"), visual, show_misc, threshold)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
6738,
28034,
10178,
13,
27530,
13,
411,
3262,
1330,
581,
3262,
1507,
198,
11748,
1822,
29572,
198,
11... | 2.806005 | 433 |
#!/usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
if __name__ == '__main__':
# Generate wordcloud for Greece
build_wordcloud = False
if build_wordcloud:
dir = "elasticsearch/analyse/nldb21/results/feb_idf_square_week/country"
dir_out = "elasticsearch/analyse/nldb21/results/feb_idf_square_week/country/wordcloud"
list_of_greece_name = ["Greece", "Ελλάς", "Ἑλλάς"]
list_of_greece_name = ["Greece", "Ἑλλάς"]
list_of_greece_week = ["2020-02-02", "2020-02-09", "2020-02-16", "2020-02-23"]
greece_df = pd.read_csv(dir + "/h-tfidf-Biggest-score-flooding.csv")
greece_df = greece_df[greece_df["user_flooding"] == "0"]
for name in list_of_greece_name:
greece_name_df = greece_df[greece_df["country"] == name]
for week in greece_name_df["date"].unique():
greece_name_w_df = greece_name_df[greece_name_df["date"] == week]
# list_of_terms = greece_name_w_df["terms"]
terms_ranks = {}
# using wordcloud with frequency : font size depanding of the H-TFIDF ranked
for rank, term in enumerate(greece_name_w_df["terms"]):
terms_ranks[term] = 500 - rank
try:
wordcloud = WordCloud(background_color="white", width=1600, height=800)
wordcloud.generate_from_frequencies(frequencies=terms_ranks)
plt.figure(figsize=(20, 10))
plt.imshow(wordcloud, interpolation="bilinear")
# don't display axis
plt.axis("off")
# remove margin
plt.tight_layout(pad=0)
plt.savefig(dir_out + "/Greece_" + week )
except:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1573,
17721,
1330,
9678,
18839,
198,
198,
361,
11593,
3672,
834,
6624,
70... | 1.876877 | 999 |
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
urlpatterns = []
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
4112,
62,
11748,
628,
198,
6371,
33279,
82,
796,
17635,
198
] | 2.909091 | 33 |
import contextlib
try:
from sentry_sdk import Hub, configure_scope
@contextlib.contextmanager
except ImportError:
@contextlib.contextmanager
@contextlib.contextmanager
| [
11748,
4732,
8019,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
1908,
563,
62,
21282,
74,
1330,
14699,
11,
17425,
62,
29982,
628,
220,
220,
220,
2488,
22866,
8019,
13,
22866,
37153,
628,
198,
16341,
17267,
12331,
25,
628,
220,
220,
... | 3.183333 | 60 |
from TaskModule.TaskGraph import *
from TaskModule.Task import *
from TaskModule.DataInstance import *
from ResourceModule import ResourcesManager as RM
from queue import Queue
import random
from TaskModule.Task import TaskStatus
scheduler = Scheduler()
cnt = 0
| [
6738,
15941,
26796,
13,
25714,
37065,
1330,
1635,
220,
198,
6738,
15941,
26796,
13,
25714,
1330,
1635,
198,
6738,
15941,
26796,
13,
6601,
33384,
1330,
1635,
198,
6738,
20857,
26796,
1330,
13864,
13511,
355,
29820,
198,
6738,
16834,
1330,
... | 3.469136 | 81 |
from dbapi import generic_connect, translate_key, parse_dsn
assembly = 'Npgsql'
typename = 'Npgsql.NpgsqlConnection'
keymap = {
'host': 'Server',
'port': 'Port',
'dbname': 'Database',
'user': 'User ID',
'password': 'Password',
}
| [
6738,
20613,
15042,
1330,
14276,
62,
8443,
11,
15772,
62,
2539,
11,
21136,
62,
9310,
77,
198,
198,
41873,
796,
705,
45,
6024,
25410,
6,
198,
774,
3617,
480,
796,
705,
45,
6024,
25410,
13,
45,
6024,
25410,
32048,
6,
198,
2539,
8899,
... | 2.5 | 100 |