code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import argparse
import cv2
import gdal
import logging
import numpy
import ogr
import osr
import os
import subprocess
import shutil
from danesfield import gdal_utils
from danesfield import rasterize
VECTOR_TYPES = ["buildings", "roads"]
def shift_vector(inputFeatures, outputVectorFile, outputLayerName, outProjection, offsetGeo):
outDriver = ogr.GetDriverByName("ESRI Shapefile")
print("Shifting vector -> {}".format(os.path.basename(outputVectorFile)))
outVector = outDriver.CreateDataSource(outputVectorFile)
outSrs = osr.SpatialReference(outProjection)
# create layer
outLayer = outVector.CreateLayer(os.path.basename(outputLayerName),
srs=outSrs, geom_type=ogr.wkbPolygon)
outFeatureDef = outLayer.GetLayerDefn()
# create rings from input rings by shifting points
for feature in inputFeatures:
# create the poly
outPoly = ogr.Geometry(ogr.wkbPolygon)
poly = feature.GetGeometryRef()
for ring_idx in range(poly.GetGeometryCount()):
ring = poly.GetGeometryRef(ring_idx)
# create the ring
outRing = ogr.Geometry(ogr.wkbLinearRing)
for i in range(0, ring.GetPointCount()):
pt = ring.GetPoint(i)
outRing.AddPoint(pt[0] + offsetGeo[0], pt[1] + offsetGeo[1])
outPoly.AddGeometry(outRing)
# create feature
outFeature = ogr.Feature(outFeatureDef)
outFeature.SetGeometry(outPoly)
outLayer.CreateFeature(outFeature)
def copy_shapefile(input, output):
inputNoExt = os.path.splitext(input)[0]
outputNoExt = os.path.splitext(output)[0]
for ext in ['.dbf', '.prj', '.shp', '.shx']:
shutil.copyfile(inputNoExt + ext, outputNoExt + ext)
def remove_shapefile(input):
inputNoExt = os.path.splitext(input)[0]
for ext in ['.dbf', '.prj', '.shp', '.shx']:
os.remove(inputNoExt + ext)
# project a vector point to image
def ProjectPoint(model, pt):
# simplest projection model
px = int((pt[0]-model['corners'][0])/model['project_model'][1]*model['scale'])
py = int((pt[1]-model['corners'][1])/model['project_model'][5]*model['scale'])
return [px, py]
def computeMatchingPoints(check_point_list, edge_img, dx, dy):
img_height = edge_img.shape[0]
img_width = edge_img.shape[1]
total_value = 0
# find overlap mask
for pt in check_point_list:
if pt[1]+dy < 0 or pt[1]+dy >= img_height or\
pt[0]+dx < 0 or pt[0]+dx >= img_width:
continue
if edge_img[pt[1]+dy, pt[0]+dx] > 200:
total_value += 1
return total_value
def spat_vectors(inputVectorFileNames, inputImageCorners, inputImageSrs,
outputMaskFileName, debug=False):
"""
Returns building features and optionally road features.
"""
global VECTOR_TYPES
geometryTypes = [ogr.wkbPolygon, ogr.wkbLineString]
resultList = []
for typeIndex in range(len(inputVectorFileNames)):
inputVectorFileName = inputVectorFileNames[typeIndex]
inputVector = gdal_utils.ogr_open(inputVectorFileName)
inputLayer = gdal_utils.ogr_get_layer(inputVector, geometryTypes[typeIndex])
inputVectorSrs = inputLayer.GetSpatialRef()
imageVectorDifferentSrs = False if inputVectorSrs.IsSame(inputImageSrs) else True
layerDefinition = inputLayer.GetLayerDefn()
hasBuildingField = False
for i in range(layerDefinition.GetFieldCount()):
if layerDefinition.GetFieldDefn(i).GetName() == "building":
hasBuildingField = True
break
# clip the shape file first
outputNoExt = os.path.splitext(outputMaskFileName)[0]
if imageVectorDifferentSrs:
outputVectorFile = outputNoExt + "_" + VECTOR_TYPES[typeIndex] + "_original.shp"
else:
outputVectorFile = outputNoExt + "_" + VECTOR_TYPES[typeIndex] + "_spat_not_aligned.shp"
ogr2ogr_args = ["ogr2ogr", "-spat",
str(inputImageCorners[0]), str(inputImageCorners[2]),
str(inputImageCorners[1]), str(inputImageCorners[3])]
if imageVectorDifferentSrs:
ogr2ogr_args.extend(["-spat_srs", str(inputImageSrs)])
if hasBuildingField:
ogr2ogr_args.extend(["-where", "building is not null"])
ogr2ogr_args.extend([outputVectorFile, inputVectorFileName])
ogr2ogr_args.append(inputLayer.GetName())
print("Spatial query (clip): {} -> {}".format(
os.path.basename(inputVectorFileName), os.path.basename(outputVectorFile)))
response = subprocess.run(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if debug:
print(*ogr2ogr_args)
print("{}\n{}".format(response.stdout, response.stderr))
if imageVectorDifferentSrs:
# convert to the same SRS as the image file
inputVectorFileName = outputNoExt + "_" + VECTOR_TYPES[typeIndex] + "_original.shp"
outputVectorFile = outputNoExt + "_" + VECTOR_TYPES[typeIndex] + "_spat_not_aligned.shp"
ogr2ogr_args = ["ogr2ogr", "-t_srs", str(inputImageSrs),
outputVectorFile, inputVectorFileName]
print("Convert SRS: {} -> {}".format(
os.path.basename(inputVectorFileName), os.path.basename(outputVectorFile)))
response = subprocess.run(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if debug:
print(*ogr2ogr_args)
print("{}\n{}".format(response.stdout, response.stderr))
else:
remove_shapefile(inputVectorFileName)
inputVectorFileName = outputVectorFile
inputLayerName = os.path.splitext(os.path.basename(inputVectorFileName))[0]
inputVector = gdal_utils.ogr_open(inputVectorFileName)
inputLayer = inputVector.GetLayer(inputLayerName)
inputList = list(inputLayer)
resultList.append(inputList)
return resultList
def main(args):
global VECTOR_TYPES
parser = argparse.ArgumentParser(
description="Generate building mask aligned with image. To do that we shift input "
"vector to match edges generated from image.")
parser.add_argument('output_mask',
help="Output image mask base name. <output_mask>_buildings.shp, "
"<output_mask>_buildings.tif are generated. Optionally "
"<output_mask>_roads.tif and <output_mask>_roads.shp are "
"also generated. See --input_vectors parameter.")
parser.add_argument('input_image', help='Orthorectified 8-bit image file')
parser.add_argument('input_vectors', nargs='+',
help='Buildings and optionally road vector files with OSM or '
'US Cities data. A polygon layer is chosen for buildings and a '
'line string layer is chosen for roads. '
'If both building and road layers are in the same vector file just '
'pass the file twice. Only elevated bridges are rendered '
'by default. If all roads need to be rendered pass --render_roads')
parser.add_argument('--render_cls', action="store_true",
help='Output a CLS image')
parser.add_argument('--render_roads', action="store_true",
help='Render all roads, not only elevated bridges')
parser.add_argument('--scale', type=float, default=0.2,
help='Scale factor. '
'We cannot deal with the images with original resolution')
parser.add_argument('--move_thres', type=float, default=5,
help='Distance for edge matching')
parser.add_argument("--offset", type=float, nargs=2,
help="Shift the mask using the offset specified "
"(using the SRS of the input_image) instead of the computed offset.")
parser.add_argument("--debug", action="store_true",
help="Print debugging information")
args = parser.parse_args(args)
scale = args.scale
inputImage = gdal_utils.gdal_open(args.input_image, gdal.GA_ReadOnly)
band = inputImage.GetRasterBand(1)
if (not band.DataType == gdal.GDT_Byte):
raise RuntimeError(
"Input image {} does not have Byte type. Use msi-to-rgb.py to-8bit.py "
"to convert it.".format(args.input_image))
projection = inputImage.GetProjection()
inputImageSrs = osr.SpatialReference(projection)
gt = inputImage.GetGeoTransform() # captures origin and pixel size
left, top = gdal.ApplyGeoTransform(gt, 0, 0)
right, bottom = gdal.ApplyGeoTransform(gt, inputImage.RasterXSize, inputImage.RasterYSize)
band = None
print("Resize and edge detection: {}".format(os.path.basename(args.input_image)))
color_image = cv2.imread(args.input_image)
small_color_image = numpy.zeros(
(int(color_image.shape[0]*scale),
int(color_image.shape[1]*scale), 3), dtype=numpy.uint8)
if scale != 1.0:
small_color_image = cv2.resize(color_image, None, fx=scale, fy=scale)
color_image = small_color_image
grayimg = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
edge_img = cv2.Canny(grayimg, 100, 200)
if args.debug:
cv2.imwrite(os.path.splitext(args.output_mask)[0] + '_edge.tif', edge_img)
model = {}
model['corners'] = [left, top, right, bottom]
model['project_model'] = gt
model['scale'] = scale
inputImageCorners = [left, right, bottom, top]
features = spat_vectors(
args.input_vectors, inputImageCorners, inputImageSrs,
args.output_mask)
print("Aligning {} buildings ...".format(len(features[0])))
tmp_img = numpy.zeros([int(color_image.shape[0]), int(color_image.shape[1])],
dtype=numpy.uint8)
for feature in features[0]:
poly = feature.GetGeometryRef()
for ring_idx in range(poly.GetGeometryCount()):
ring = poly.GetGeometryRef(ring_idx)
rp = []
for i in range(0, ring.GetPointCount()):
pt = ring.GetPoint(i)
rp.append(ProjectPoint(model, pt))
ring_points = numpy.array(rp)
ring_points = ring_points.reshape((-1, 1, 2))
# edge mask of the building cluster
cv2.polylines(tmp_img, [ring_points], True, (255), thickness=2)
check_point_list = []
# build a sparse set to fast process
for y in range(0, tmp_img.shape[0]):
for x in range(0, tmp_img.shape[1]):
if tmp_img[y, x] > 200:
check_point_list.append([x, y])
print("Checking {} points ...".format(len(check_point_list)))
max_value = 0
index_max_value = 0
offsetGeo = [0.0, 0.0]
current = [0, 0]
if not args.offset:
offset = [0, 0]
# shift moves possible from [0, 0]
moves = [
[1, 0], # 0
[1, 1], # 1
[0, 1], # 2
[-1, 1], # 3
[-1, 0], # 4
[-1, -1], # 5
[0, -1], # 6
[1, -1]] # 7
initial_cases = range(8)
# cases[i] shows shift moves possible after the previous move was cases[i][0]
# we change direction with at most 45 degrees.
next_cases = [
[0, 7, 1],
[1, 0, 2],
[2, 1, 3],
[3, 2, 4],
[4, 3, 5],
[5, 4, 6],
[6, 5, 7],
[7, 6, 0]
]
# move the mask to match
cases = initial_cases
old_max_value = 0
total_value = computeMatchingPoints(check_point_list, edge_img, 0, 0)
max_value = total_value
if args.debug:
print("Total value for ({}, {}) is: {} (max value: {})".format(
0, 0, total_value, max_value))
for i in range(args.move_thres):
if args.debug:
print("===== {} =====".format(i))
while (max_value > old_max_value):
old_max_value = max_value
for i in cases:
[dx, dy] = moves[i]
total_value = computeMatchingPoints(check_point_list, edge_img,
current[0] + dx, current[1] + dy)
if args.debug:
print("Total value for ({}, {}) is: {} (max value: {})".format(
dx, dy, total_value, max_value))
if total_value > max_value:
max_value = total_value
index_max_value = i
if (max_value > old_max_value):
[dx, dy] = moves[index_max_value]
current = [current[0] + dx, current[1] + dy]
if args.debug:
print("Current: {}".format(current))
offset = current
cases = next_cases[index_max_value]
break
offsetGeo = gdal.ApplyGeoTransform(gt, offset[0] / scale, offset[1] / scale)
offsetGeo[0] = offsetGeo[0] - left
offsetGeo[1] = top - offsetGeo[1]
print("Using offset: {} ({})".format(offsetGeo, offset))
if max_value/float(len(check_point_list)) < 0.05:
print("Fewer than 5% of points match {} / {}. This may happen because of "
"missing areas in the orthorectified image. "
"Increasing scale may increase the number of points that match.".format(
max_value, len(check_point_list)))
else:
print("Using offset: {}".format(offsetGeo))
offsetGeo = args.offset
for i in range(len(features)):
outputNoExt = os.path.splitext(args.output_mask)[0]
outputVectorFile = outputNoExt + "_" + VECTOR_TYPES[i] + "_spat.shp"
if not (offsetGeo[0] == 0.0 and offsetGeo[1] == 0.0):
shift_vector(features[i], outputVectorFile, outputNoExt, projection, offsetGeo)
else:
inputVectorFileName = outputNoExt + "_" + VECTOR_TYPES[i] + "_spat_not_aligned.shp"
print("Copy vector -> {}".format(os.path.basename(outputVectorFile)))
copy_shapefile(inputVectorFileName, outputVectorFile)
if not args.debug:
remove_shapefile(outputNoExt + "_" + VECTOR_TYPES[i] + "_spat_not_aligned.shp")
ogr2ogr_args = ["ogr2ogr", "-clipsrc",
str(inputImageCorners[0]), str(inputImageCorners[2]),
str(inputImageCorners[1]), str(inputImageCorners[3])]
outputNoExt = os.path.splitext(args.output_mask)[0]
ogr2ogr_args.extend([outputNoExt + "_" + VECTOR_TYPES[i] + ".shp",
outputNoExt + "_" + VECTOR_TYPES[i] + "_spat.shp"])
print("Clipping vector file {} -> {}".format(
os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + "_spat.shp"),
os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + ".shp")))
response = subprocess.run(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args.debug:
print(*ogr2ogr_args)
print("{}\n{}".format(response.stdout, response.stderr))
remove_shapefile(outputNoExt + "_" + VECTOR_TYPES[i] + "_spat.shp")
if i == 0:
print("Rasterizing buildings ...")
if args.render_cls:
rasterize_args = ["gdal_rasterize", "-ot", "Byte", "-init", "2",
"-burn", "6",
"-ts", str(inputImage.RasterXSize),
str(inputImage.RasterYSize),
"-te", str(inputImageCorners[0]), str(inputImageCorners[2]),
str(inputImageCorners[1]), str(inputImageCorners[3])]
else:
# make buildings red
rasterize_args = ["gdal_rasterize", "-ot", "Byte",
"-burn", "255", "-burn", "0", "-burn", "0", "-burn", "255",
"-ts", str(inputImage.RasterXSize),
str(inputImage.RasterYSize),
"-te", str(inputImageCorners[0]), str(inputImageCorners[2]),
str(inputImageCorners[1]), str(inputImageCorners[3])]
outputNoExt = os.path.splitext(args.output_mask)[0]
rasterize_args.extend([outputNoExt + "_" + VECTOR_TYPES[i] + ".shp",
outputNoExt + "_" + VECTOR_TYPES[i] + ".tif"])
print("Rasterizing {} -> {}".format(
os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + ".shp"),
os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + ".tif")))
response = subprocess.run(
rasterize_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args.debug:
print(*rasterize_args)
print("{}\n{}".format(response.stdout, response.stderr))
else:
print("Rasterizing bridges ...")
outputNoExt = os.path.splitext(args.output_mask)[0]
input = os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + ".shp")
output = os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + "_bridges.tif")
bridges = rasterize.rasterize_file_dilated_line(
input, inputImage, output,
numpy.ones((3, 3)), dilation_iterations=20,
query=rasterize.ELEVATED_ROADS_QUERY,
)
if not args.debug:
os.remove(output)
if args.render_roads:
output = os.path.basename(outputNoExt + "_" + VECTOR_TYPES[i] + "_roads.tif")
roads = rasterize.rasterize_file_dilated_line(
input, inputImage, output,
numpy.ones((3, 3)), dilation_iterations=20, query=rasterize.ROADS_QUERY)
if not args.debug:
os.remove(output)
buildingsData = gdal_utils.gdal_open(
os.path.basename(outputNoExt + "_" + VECTOR_TYPES[0] + ".tif"),
gdal.GA_ReadOnly)
if args.render_cls:
cls = buildingsData.GetRasterBand(1).ReadAsArray()
if args.render_roads:
cls[roads] = 11
cls[bridges] = 17
gdal_utils.gdal_save(cls, inputImage,
os.path.basename(outputNoExt + ".tif"),
gdal.GDT_Byte, options=['COMPRESS=DEFLATE'])
else:
red = buildingsData.GetRasterBand(1).ReadAsArray()
green = buildingsData.GetRasterBand(2).ReadAsArray()
blue = buildingsData.GetRasterBand(3).ReadAsArray()
opacity = buildingsData.GetRasterBand(4).ReadAsArray()
if args.render_roads:
red[roads] = 0
green[roads] = 255
blue[roads] = 0
opacity[roads] = 255
red[bridges] = 0
green[bridges] = 0
blue[bridges] = 255
opacity[bridges] = 255
gdal_utils.gdal_save([red, green, blue, opacity], inputImage,
os.path.basename(outputNoExt + ".tif"),
gdal.GDT_Byte, options=['COMPRESS=DEFLATE'])
if not args.debug:
os.remove(os.path.basename(outputNoExt + "_" + VECTOR_TYPES[0] + ".tif"))
if __name__ == '__main__':
import sys
try:
main(sys.argv[1:])
except Exception as e:
logging.exception(e)
sys.exit(1)
| [
"os.remove",
"argparse.ArgumentParser",
"numpy.ones",
"danesfield.gdal_utils.ogr_open",
"ogr.Feature",
"danesfield.gdal_utils.gdal_open",
"cv2.cvtColor",
"danesfield.gdal_utils.ogr_get_layer",
"shutil.copyfile",
"cv2.resize",
"cv2.Canny",
"os.path.basename",
"ogr.GetDriverByName",
"gdal.Ap... | [((717, 754), 'ogr.GetDriverByName', 'ogr.GetDriverByName', (['"""ESRI Shapefile"""'], {}), "('ESRI Shapefile')\n", (736, 754), False, 'import ogr\n'), ((907, 942), 'osr.SpatialReference', 'osr.SpatialReference', (['outProjection'], {}), '(outProjection)\n', (927, 942), False, 'import osr\n'), ((6512, 6673), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate building mask aligned with image. To do that we shift input vector to match edges generated from image."""'}), "(description=\n 'Generate building mask aligned with image. To do that we shift input vector to match edges generated from image.'\n )\n", (6535, 6673), False, 'import argparse\n'), ((8734, 8790), 'danesfield.gdal_utils.gdal_open', 'gdal_utils.gdal_open', (['args.input_image', 'gdal.GA_ReadOnly'], {}), '(args.input_image, gdal.GA_ReadOnly)\n', (8754, 8790), False, 'from danesfield import gdal_utils\n'), ((9107, 9139), 'osr.SpatialReference', 'osr.SpatialReference', (['projection'], {}), '(projection)\n', (9127, 9139), False, 'import osr\n'), ((9229, 9261), 'gdal.ApplyGeoTransform', 'gdal.ApplyGeoTransform', (['gt', '(0)', '(0)'], {}), '(gt, 0, 0)\n', (9251, 9261), False, 'import gdal\n'), ((9282, 9356), 'gdal.ApplyGeoTransform', 'gdal.ApplyGeoTransform', (['gt', 'inputImage.RasterXSize', 'inputImage.RasterYSize'], {}), '(gt, inputImage.RasterXSize, inputImage.RasterYSize)\n', (9304, 9356), False, 'import gdal\n'), ((9478, 9506), 'cv2.imread', 'cv2.imread', (['args.input_image'], {}), '(args.input_image)\n', (9488, 9506), False, 'import cv2\n'), ((9804, 9849), 'cv2.cvtColor', 'cv2.cvtColor', (['color_image', 'cv2.COLOR_BGR2GRAY'], {}), '(color_image, cv2.COLOR_BGR2GRAY)\n', (9816, 9849), False, 'import cv2\n'), ((9865, 9893), 'cv2.Canny', 'cv2.Canny', (['grayimg', '(100)', '(200)'], {}), '(grayimg, 100, 200)\n', (9874, 9893), False, 'import cv2\n'), ((999, 1032), 'os.path.basename', 'os.path.basename', (['outputLayerName'], {}), '(outputLayerName)\n', (1015, 1032), False, 'import os\n'), ((1286, 1314), 'ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPolygon'], {}), '(ogr.wkbPolygon)\n', (1298, 1314), False, 'import ogr\n'), ((1799, 1825), 'ogr.Feature', 'ogr.Feature', (['outFeatureDef'], {}), '(outFeatureDef)\n', (1810, 1825), False, 'import ogr\n'), ((1963, 1986), 'os.path.splitext', 'os.path.splitext', (['input'], {}), '(input)\n', (1979, 1986), False, 'import os\n'), ((2008, 2032), 'os.path.splitext', 'os.path.splitext', (['output'], {}), '(output)\n', (2024, 2032), False, 'import os\n'), ((2093, 2145), 'shutil.copyfile', 'shutil.copyfile', (['(inputNoExt + ext)', '(outputNoExt + ext)'], {}), '(inputNoExt + ext, outputNoExt + ext)\n', (2108, 2145), False, 'import shutil\n'), ((2194, 2217), 'os.path.splitext', 'os.path.splitext', (['input'], {}), '(input)\n', (2210, 2217), False, 'import os\n'), ((2278, 2305), 'os.remove', 'os.remove', (['(inputNoExt + ext)'], {}), '(inputNoExt + ext)\n', (2287, 2305), False, 'import os\n'), ((3470, 3510), 'danesfield.gdal_utils.ogr_open', 'gdal_utils.ogr_open', (['inputVectorFileName'], {}), '(inputVectorFileName)\n', (3489, 3510), False, 'from danesfield import gdal_utils\n'), ((3532, 3595), 'danesfield.gdal_utils.ogr_get_layer', 'gdal_utils.ogr_get_layer', (['inputVector', 'geometryTypes[typeIndex]'], {}), '(inputVector, geometryTypes[typeIndex])\n', (3556, 3595), False, 'from danesfield import gdal_utils\n'), ((5039, 5115), 'subprocess.run', 'subprocess.run', (['ogr2ogr_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (5053, 5115), False, 'import subprocess\n'), ((6262, 6302), 'danesfield.gdal_utils.ogr_open', 'gdal_utils.ogr_open', (['inputVectorFileName'], {}), '(inputVectorFileName)\n', (6281, 6302), False, 'from danesfield import gdal_utils\n'), ((9700, 9749), 'cv2.resize', 'cv2.resize', (['color_image', 'None'], {'fx': 'scale', 'fy': 'scale'}), '(color_image, None, fx=scale, fy=scale)\n', (9710, 9749), False, 'import cv2\n'), ((13684, 13748), 'gdal.ApplyGeoTransform', 'gdal.ApplyGeoTransform', (['gt', '(offset[0] / scale)', '(offset[1] / scale)'], {}), '(gt, offset[0] / scale, offset[1] / scale)\n', (13706, 13748), False, 'import gdal\n'), ((15702, 15778), 'subprocess.run', 'subprocess.run', (['ogr2ogr_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (15716, 15778), False, 'import subprocess\n'), ((796, 830), 'os.path.basename', 'os.path.basename', (['outputVectorFile'], {}), '(outputVectorFile)\n', (812, 830), False, 'import os\n'), ((1512, 1543), 'ogr.Geometry', 'ogr.Geometry', (['ogr.wkbLinearRing'], {}), '(ogr.wkbLinearRing)\n', (1524, 1543), False, 'import ogr\n'), ((4074, 4110), 'os.path.splitext', 'os.path.splitext', (['outputMaskFileName'], {}), '(outputMaskFileName)\n', (4090, 4110), False, 'import os\n'), ((5826, 5902), 'subprocess.run', 'subprocess.run', (['ogr2ogr_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(ogr2ogr_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (5840, 5902), False, 'import subprocess\n'), ((9423, 9457), 'os.path.basename', 'os.path.basename', (['args.input_image'], {}), '(args.input_image)\n', (9439, 9457), False, 'import os\n'), ((10846, 10861), 'numpy.array', 'numpy.array', (['rp'], {}), '(rp)\n', (10857, 10861), False, 'import numpy\n'), ((10981, 11042), 'cv2.polylines', 'cv2.polylines', (['tmp_img', '[ring_points]', '(True)', '(255)'], {'thickness': '(2)'}), '(tmp_img, [ring_points], True, 255, thickness=2)\n', (10994, 11042), False, 'import cv2\n'), ((14406, 14440), 'os.path.splitext', 'os.path.splitext', (['args.output_mask'], {}), '(args.output_mask)\n', (14422, 14440), False, 'import os\n'), ((15278, 15312), 'os.path.splitext', 'os.path.splitext', (['args.output_mask'], {}), '(args.output_mask)\n', (15294, 15312), False, 'import os\n'), ((17516, 17594), 'subprocess.run', 'subprocess.run', (['rasterize_args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(rasterize_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (17530, 17594), False, 'import subprocess\n'), ((17906, 17968), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')\n", (17922, 17968), False, 'import os\n'), ((17994, 18064), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '_bridges.tif')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '_bridges.tif')\n", (18010, 18064), False, 'import os\n'), ((20624, 20644), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (20641, 20644), False, 'import logging\n'), ((20653, 20664), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (20661, 20664), False, 'import sys\n'), ((4944, 4981), 'os.path.basename', 'os.path.basename', (['inputVectorFileName'], {}), '(inputVectorFileName)\n', (4960, 4981), False, 'import os\n'), ((4983, 5017), 'os.path.basename', 'os.path.basename', (['outputVectorFile'], {}), '(outputVectorFile)\n', (4999, 5017), False, 'import os\n'), ((6197, 6234), 'os.path.basename', 'os.path.basename', (['inputVectorFileName'], {}), '(inputVectorFileName)\n', (6213, 6234), False, 'import os\n'), ((15537, 15604), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '_spat.shp')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '_spat.shp')\n", (15553, 15604), False, 'import os\n'), ((15618, 15680), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')\n", (15634, 15680), False, 'import os\n'), ((17083, 17117), 'os.path.splitext', 'os.path.splitext', (['args.output_mask'], {}), '(args.output_mask)\n', (17099, 17117), False, 'import os\n'), ((17844, 17878), 'os.path.splitext', 'os.path.splitext', (['args.output_mask'], {}), '(args.output_mask)\n', (17860, 17878), False, 'import os\n'), ((18197, 18215), 'numpy.ones', 'numpy.ones', (['(3, 3)'], {}), '((3, 3))\n', (18207, 18215), False, 'import numpy\n'), ((18372, 18389), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (18381, 18389), False, 'import os\n'), ((18457, 18525), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '_roads.tif')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '_roads.tif')\n", (18473, 18525), False, 'import os\n'), ((18896, 18958), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[0] + '.tif')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[0] + '.tif')\n", (18912, 18958), False, 'import os\n'), ((5727, 5764), 'os.path.basename', 'os.path.basename', (['inputVectorFileName'], {}), '(inputVectorFileName)\n', (5743, 5764), False, 'import os\n'), ((5766, 5800), 'os.path.basename', 'os.path.basename', (['outputVectorFile'], {}), '(outputVectorFile)\n', (5782, 5800), False, 'import os\n'), ((9933, 9967), 'os.path.splitext', 'os.path.splitext', (['args.output_mask'], {}), '(args.output_mask)\n', (9949, 9967), False, 'import os\n'), ((14830, 14864), 'os.path.basename', 'os.path.basename', (['outputVectorFile'], {}), '(outputVectorFile)\n', (14846, 14864), False, 'import os\n'), ((17348, 17410), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '.shp')\n", (17364, 17410), False, 'import os\n'), ((17428, 17490), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[i] + '.tif')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[i] + '.tif')\n", (17444, 17490), False, 'import os\n'), ((18668, 18686), 'numpy.ones', 'numpy.ones', (['(3, 3)'], {}), '((3, 3))\n', (18678, 18686), False, 'import numpy\n'), ((18804, 18821), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (18813, 18821), False, 'import os\n'), ((19324, 19362), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '.tif')"], {}), "(outputNoExt + '.tif')\n", (19340, 19362), False, 'import os\n'), ((20254, 20292), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '.tif')"], {}), "(outputNoExt + '.tif')\n", (20270, 20292), False, 'import os\n'), ((20445, 20507), 'os.path.basename', 'os.path.basename', (["(outputNoExt + '_' + VECTOR_TYPES[0] + '.tif')"], {}), "(outputNoExt + '_' + VECTOR_TYPES[0] + '.tif')\n", (20461, 20507), False, 'import os\n')] |
import numpy as np
from scipy.stats import rankdata
from sklearn.datasets import load_iris
from utilities.rank_data import rank_data
def test_rank_data():
data = load_iris().data
# rank the data all at once
output = rank_data(data)
# check each column versus scipy equivalent
for i in range(data.shape[1]):
feature = data[:, i]
expected = rankdata(feature)
assert np.allclose(expected, output[:, i])
if __name__ == '__main__':
import pytest
pytest.main()
| [
"sklearn.datasets.load_iris",
"numpy.allclose",
"scipy.stats.rankdata",
"pytest.main",
"utilities.rank_data.rank_data"
] | [((232, 247), 'utilities.rank_data.rank_data', 'rank_data', (['data'], {}), '(data)\n', (241, 247), False, 'from utilities.rank_data import rank_data\n'), ((500, 513), 'pytest.main', 'pytest.main', ([], {}), '()\n', (511, 513), False, 'import pytest\n'), ((169, 180), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (178, 180), False, 'from sklearn.datasets import load_iris\n'), ((380, 397), 'scipy.stats.rankdata', 'rankdata', (['feature'], {}), '(feature)\n', (388, 397), False, 'from scipy.stats import rankdata\n'), ((413, 448), 'numpy.allclose', 'np.allclose', (['expected', 'output[:, i]'], {}), '(expected, output[:, i])\n', (424, 448), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''Example 01
Shows how to create simple geometry from splines and ellipse arcs, and how to mesh a quad mesh in GmshMesher.
Also demonstrates drawGeometry(), drawMesh, and drawing texts and labels in a figure.
'''
import numpy as np
import calfem.geometry as cfg
import calfem.mesh as cfm
import calfem.vis_mpl as cfv
import calfem.core as cfc
import calfem.utils as cfu
cfu.enableLogging()
# ---- Problem constants ----------------------------------------------------
kx1 = 100
ky1 = 100
t = 1.0
# Gauss points or integration points
n = 2
ep = [t, n]
D = np.matrix([
[kx1, 0.],
[0., ky1]
])
# ---- Define geometry ------------------------------------------------------
g = cfg.Geometry() # Create a GeoData object that holds the geometry.
g.point([0, 0])
g.point([2, 0])
g.point([2, 1])
g.point([0, 1])
g.point([0.5, 0.3])
g.point([0.3, 0.7])
g.point([0.7, 0.7])
g.point([0.8, 0.5])
g.point([1.7, 0.5])
g.point([1.5, 0.5])
g.point([1.7, 0.7])
id_hole1 = 50
id_hole2 = 60
id_outer = 80
g.ellipse([7, 8, 9, 10], marker=id_hole1)
g.spline([0, 1], marker=id_outer)
g.spline([2, 1], marker=id_outer)
g.spline([3, 2], marker=id_outer)
g.spline([0, 3], marker=id_outer)
g.spline([7, 9], marker=id_hole1)
g.spline([10, 9], marker=id_hole1)
g.spline([4, 5, 6, 4], marker=id_hole2)
g.surface([4, 3, 2, 1], [[7], [5, 6, 0]])
# ---- Generate mesh --------------------------------------------------------
mesh = cfm.GmshMesh(g)
mesh.el_type = 16
mesh.dofs_per_node = 1 # Degrees of freedom per node.
mesh.el_size_factor = 0.05 # Factor that changes element sizes.
coords, edof, dofs, bdofs, element_markers = mesh.create()
print(edof)
# ---- Solve problem --------------------------------------------------------
print("Assembling system matrix...")
n_dofs = np.size(dofs)
ex, ey = cfc.coordxtr(edof, coords, dofs)
K = np.zeros([n_dofs, n_dofs])
for el_topo, elx, ely, marker in zip(edof, ex, ey, element_markers):
# Calc element stiffness matrix: Conductivity matrix D is taken
# from Ddict and depends on which region (which marker) the element is in.
if mesh.el_type == 2:
Ke = cfc.flw2te(elx, ely, ep, D)
elif mesh.el_type == 3:
Ke = cfc.flw2i4e(elx, ely, ep, D)
elif mesh.el_type == 16:
Ke = cfc.flw2i8e(elx, ely, ep, D)
else:
print("Element type not supported")
cfc.assem(el_topo, K, Ke)
print("Solving equation system...")
f = np.zeros([n_dofs, 1])
bc = np.array([], 'i')
bc_val = np.array([], 'f')
bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_outer, 30.0)
bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_hole1, 300.0)
bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_hole2, 400.0)
a, r = cfc.solveq(K, f, bc, bc_val)
# ---- Compute element forces -----------------------------------------------
print("Computing element forces...")
ed = cfc.extract_eldisp(edof, a)
for i in range(np.shape(ex)[0]):
if mesh.el_type == 2:
es, et = cfc.flw2ts(ex[i, :], ey[i, :], D, ed[i, :])
elif mesh.el_type == 3:
es, et, eci = cfc.flw2i4s(ex[i, :], ey[i, :], ep, D, ed[i, :])
elif mesh.el_type == 16:
es, et, eci = cfc.flw2i8s(ex[i, :], ey[i, :], ep, D, ed[i, :])
else:
print("Element type not supported.")
# Do something with es, et, eci here.
# ---- Visualise mesh -------------------------------------------------------
# Hold left mouse button to pan.
# Hold right mouse button to zoom.
# Draw the geometry. Note that surfaces and volumes are not drawn at all by
# this function.
cfv.draw_geometry(g)
# New figure window
cfv.figure()
# Draw the mesh.
cfv.draw_mesh(
coords=coords,
edof=edof,
dofs_per_node=mesh.dofs_per_node,
el_type=mesh.el_type,
filled=True,
title="Example 01"
)
cfv.figure()
cfv.draw_nodal_values_shaded(a, coords, edof, title="Temperature")
cfv.colorbar()
cfv.figure()
cfv.draw_nodal_values_contourf(a, coords, edof, title="Temperature", dofs_per_node=mesh.dofs_per_node, el_type=mesh.el_type, draw_elements=True)
cfv.colorbar()
cfv.figure()
cfv.draw_nodal_values_contour(a, coords, edof)
cfv.colorbar()
# cfv.addText("This is a Text", pos=(1, -0.3), angle=45) #Adds a text in world space
# ourLabel = cfv.label("This is a Label", pos=(100,200), angle=-45) #Adds a label in the screen space
# ourLabel.text = "Label, changed." #We can change the attributes of labels and texts, such as color, text, and position.
# ourLabel.textColor = 'r' #Make it red. (1,0,0) would also have worked.
#ourLabel.position = (20,30)
# Enter main loop:
cfv.showAndWait()
| [
"calfem.vis_mpl.figure",
"calfem.vis_mpl.draw_nodal_values_contourf",
"calfem.vis_mpl.draw_nodal_values_contour",
"calfem.vis_mpl.draw_mesh",
"calfem.core.coordxtr",
"numpy.shape",
"calfem.core.flw2i8s",
"calfem.core.solveq",
"calfem.geometry.Geometry",
"calfem.vis_mpl.draw_geometry",
"calfem.me... | [((400, 419), 'calfem.utils.enableLogging', 'cfu.enableLogging', ([], {}), '()\n', (417, 419), True, 'import calfem.utils as cfu\n'), ((590, 625), 'numpy.matrix', 'np.matrix', (['[[kx1, 0.0], [0.0, ky1]]'], {}), '([[kx1, 0.0], [0.0, ky1]])\n', (599, 625), True, 'import numpy as np\n'), ((718, 732), 'calfem.geometry.Geometry', 'cfg.Geometry', ([], {}), '()\n', (730, 732), True, 'import calfem.geometry as cfg\n'), ((1451, 1466), 'calfem.mesh.GmshMesh', 'cfm.GmshMesh', (['g'], {}), '(g)\n', (1463, 1466), True, 'import calfem.mesh as cfm\n'), ((1806, 1819), 'numpy.size', 'np.size', (['dofs'], {}), '(dofs)\n', (1813, 1819), True, 'import numpy as np\n'), ((1829, 1861), 'calfem.core.coordxtr', 'cfc.coordxtr', (['edof', 'coords', 'dofs'], {}), '(edof, coords, dofs)\n', (1841, 1861), True, 'import calfem.core as cfc\n'), ((1867, 1893), 'numpy.zeros', 'np.zeros', (['[n_dofs, n_dofs]'], {}), '([n_dofs, n_dofs])\n', (1875, 1893), True, 'import numpy as np\n'), ((2448, 2469), 'numpy.zeros', 'np.zeros', (['[n_dofs, 1]'], {}), '([n_dofs, 1])\n', (2456, 2469), True, 'import numpy as np\n'), ((2476, 2493), 'numpy.array', 'np.array', (['[]', '"""i"""'], {}), "([], 'i')\n", (2484, 2493), True, 'import numpy as np\n'), ((2503, 2520), 'numpy.array', 'np.array', (['[]', '"""f"""'], {}), "([], 'f')\n", (2511, 2520), True, 'import numpy as np\n'), ((2535, 2581), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_outer', '(30.0)'], {}), '(bdofs, bc, bc_val, id_outer, 30.0)\n', (2546, 2581), True, 'import calfem.utils as cfu\n'), ((2595, 2642), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_hole1', '(300.0)'], {}), '(bdofs, bc, bc_val, id_hole1, 300.0)\n', (2606, 2642), True, 'import calfem.utils as cfu\n'), ((2656, 2703), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_hole2', '(400.0)'], {}), '(bdofs, bc, bc_val, id_hole2, 400.0)\n', (2667, 2703), True, 'import calfem.utils as cfu\n'), ((2712, 2740), 'calfem.core.solveq', 'cfc.solveq', (['K', 'f', 'bc', 'bc_val'], {}), '(K, f, bc, bc_val)\n', (2722, 2740), True, 'import calfem.core as cfc\n'), ((2864, 2891), 'calfem.core.extract_eldisp', 'cfc.extract_eldisp', (['edof', 'a'], {}), '(edof, a)\n', (2882, 2891), True, 'import calfem.core as cfc\n'), ((3553, 3573), 'calfem.vis_mpl.draw_geometry', 'cfv.draw_geometry', (['g'], {}), '(g)\n', (3570, 3573), True, 'import calfem.vis_mpl as cfv\n'), ((3596, 3608), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3606, 3608), True, 'import calfem.vis_mpl as cfv\n'), ((3628, 3760), 'calfem.vis_mpl.draw_mesh', 'cfv.draw_mesh', ([], {'coords': 'coords', 'edof': 'edof', 'dofs_per_node': 'mesh.dofs_per_node', 'el_type': 'mesh.el_type', 'filled': '(True)', 'title': '"""Example 01"""'}), "(coords=coords, edof=edof, dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type, filled=True, title='Example 01')\n", (3641, 3760), True, 'import calfem.vis_mpl as cfv\n'), ((3784, 3796), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3794, 3796), True, 'import calfem.vis_mpl as cfv\n'), ((3797, 3863), 'calfem.vis_mpl.draw_nodal_values_shaded', 'cfv.draw_nodal_values_shaded', (['a', 'coords', 'edof'], {'title': '"""Temperature"""'}), "(a, coords, edof, title='Temperature')\n", (3825, 3863), True, 'import calfem.vis_mpl as cfv\n'), ((3864, 3878), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (3876, 3878), True, 'import calfem.vis_mpl as cfv\n'), ((3880, 3892), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3890, 3892), True, 'import calfem.vis_mpl as cfv\n'), ((3893, 4041), 'calfem.vis_mpl.draw_nodal_values_contourf', 'cfv.draw_nodal_values_contourf', (['a', 'coords', 'edof'], {'title': '"""Temperature"""', 'dofs_per_node': 'mesh.dofs_per_node', 'el_type': 'mesh.el_type', 'draw_elements': '(True)'}), "(a, coords, edof, title='Temperature',\n dofs_per_node=mesh.dofs_per_node, el_type=mesh.el_type, draw_elements=True)\n", (3923, 4041), True, 'import calfem.vis_mpl as cfv\n'), ((4038, 4052), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (4050, 4052), True, 'import calfem.vis_mpl as cfv\n'), ((4054, 4066), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (4064, 4066), True, 'import calfem.vis_mpl as cfv\n'), ((4067, 4113), 'calfem.vis_mpl.draw_nodal_values_contour', 'cfv.draw_nodal_values_contour', (['a', 'coords', 'edof'], {}), '(a, coords, edof)\n', (4096, 4113), True, 'import calfem.vis_mpl as cfv\n'), ((4114, 4128), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (4126, 4128), True, 'import calfem.vis_mpl as cfv\n'), ((4565, 4582), 'calfem.vis_mpl.showAndWait', 'cfv.showAndWait', ([], {}), '()\n', (4580, 4582), True, 'import calfem.vis_mpl as cfv\n'), ((2380, 2405), 'calfem.core.assem', 'cfc.assem', (['el_topo', 'K', 'Ke'], {}), '(el_topo, K, Ke)\n', (2389, 2405), True, 'import calfem.core as cfc\n'), ((2152, 2179), 'calfem.core.flw2te', 'cfc.flw2te', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2162, 2179), True, 'import calfem.core as cfc\n'), ((2908, 2920), 'numpy.shape', 'np.shape', (['ex'], {}), '(ex)\n', (2916, 2920), True, 'import numpy as np\n'), ((2969, 3012), 'calfem.core.flw2ts', 'cfc.flw2ts', (['ex[i, :]', 'ey[i, :]', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], D, ed[i, :])\n', (2979, 3012), True, 'import calfem.core as cfc\n'), ((2221, 2249), 'calfem.core.flw2i4e', 'cfc.flw2i4e', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2232, 2249), True, 'import calfem.core as cfc\n'), ((3063, 3111), 'calfem.core.flw2i4s', 'cfc.flw2i4s', (['ex[i, :]', 'ey[i, :]', 'ep', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], ep, D, ed[i, :])\n', (3074, 3111), True, 'import calfem.core as cfc\n'), ((2292, 2320), 'calfem.core.flw2i8e', 'cfc.flw2i8e', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2303, 2320), True, 'import calfem.core as cfc\n'), ((3163, 3211), 'calfem.core.flw2i8s', 'cfc.flw2i8s', (['ex[i, :]', 'ey[i, :]', 'ep', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], ep, D, ed[i, :])\n', (3174, 3211), True, 'import calfem.core as cfc\n')] |
#%%
import time
import math
import sys
import argparse
import cPickle as pickle
import codecs
import numpy as np
from chainer import cuda, Variable, FunctionSet
import chainer.functions as F
from CharRNN import CharRNN, make_initial_state
sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
#%% arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--vocabulary', type=str, required=True)
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--sample', type=int, default=1)
parser.add_argument('--primetext', type=str, default='')
parser.add_argument('--length', type=int, default=2000)
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
np.random.seed(args.seed)
# load vocabulary
vocab = pickle.load(open(args.vocabulary, 'rb'))
ivocab = {}
for c, i in vocab.items():
ivocab[i] = c
# load model
model = pickle.load(open(args.model, 'rb'))
n_units = model.embed.W.data.shape[1]
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
# initialize generator
state = make_initial_state(n_units, batchsize=1, train=False)
if args.gpu >= 0:
for key, value in state.items():
value.data = cuda.to_gpu(value.data)
prev_char = np.array([0], dtype=np.int32)
if args.gpu >= 0:
prev_char = cuda.to_gpu(prev_char)
if len(args.primetext) > 0:
for i in unicode(args.primetext, 'utf-8'):
sys.stdout.write(i)
prev_char = np.ones((1,), dtype=np.int32) * vocab[i]
if args.gpu >= 0:
prev_char = cuda.to_gpu(prev_char)
state, prob = model.forward_one_step(prev_char, prev_char, state, train=False)
for i in xrange(args.length):
state, prob = model.forward_one_step(prev_char, prev_char, state, train=False)
if args.sample > 0:
probability = cuda.to_cpu(prob.data)[0].astype(np.float64)
probability /= np.sum(probability)
index = np.random.choice(range(len(probability)), p=probability)
else:
index = np.argmax(cuda.to_cpu(prob.data))
sys.stdout.write(ivocab[index])
prev_char = np.array([index], dtype=np.int32)
if args.gpu >= 0:
prev_char = cuda.to_gpu(prev_char)
print
| [
"sys.stdout.write",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"chainer.cuda.get_device",
"codecs.getwriter",
"numpy.ones",
"chainer.cuda.to_cpu",
"numpy.array",
"chainer.cuda.to_gpu",
"CharRNN.make_initial_state"
] | [((316, 341), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (339, 341), False, 'import argparse\n'), ((801, 826), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (815, 826), True, 'import numpy as np\n'), ((1154, 1207), 'CharRNN.make_initial_state', 'make_initial_state', (['n_units'], {'batchsize': '(1)', 'train': '(False)'}), '(n_units, batchsize=1, train=False)\n', (1172, 1207), False, 'from CharRNN import CharRNN, make_initial_state\n'), ((1321, 1350), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (1329, 1350), True, 'import numpy as np\n'), ((254, 279), 'codecs.getwriter', 'codecs.getwriter', (['"""utf_8"""'], {}), "('utf_8')\n", (270, 279), False, 'import codecs\n'), ((1385, 1407), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (1396, 1407), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((2120, 2151), 'sys.stdout.write', 'sys.stdout.write', (['ivocab[index]'], {}), '(ivocab[index])\n', (2136, 2151), False, 'import sys\n'), ((2169, 2202), 'numpy.array', 'np.array', (['[index]'], {'dtype': 'np.int32'}), '([index], dtype=np.int32)\n', (2177, 2202), True, 'import numpy as np\n'), ((1284, 1307), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['value.data'], {}), '(value.data)\n', (1295, 1307), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1492, 1511), 'sys.stdout.write', 'sys.stdout.write', (['i'], {}), '(i)\n', (1508, 1511), False, 'import sys\n'), ((1963, 1982), 'numpy.sum', 'np.sum', (['probability'], {}), '(probability)\n', (1969, 1982), True, 'import numpy as np\n'), ((2245, 2267), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (2256, 2267), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1071, 1096), 'chainer.cuda.get_device', 'cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (1086, 1096), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1532, 1561), 'numpy.ones', 'np.ones', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (1539, 1561), True, 'import numpy as np\n'), ((1623, 1645), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (1634, 1645), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((2092, 2114), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['prob.data'], {}), '(prob.data)\n', (2103, 2114), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1895, 1917), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['prob.data'], {}), '(prob.data)\n', (1906, 1917), False, 'from chainer import cuda, Variable, FunctionSet\n')] |
import numpy as np
def dichoto(function, p0, max_depth=10, eps=1e-10):
a,b=p0
i=0
while i < max_depth:
c=0.5*(a+b)
if np.abs(function(c))<=eps:
return c
if(function(c)<0):
a=c
if(function(c)>0):
b=c
i=i+1
return c
# Find the the golden ratio
f = lambda x : x**2-1-x
x = dichoto(f, (1, 2))
print("The golden ratio is : {}".format(x))
# Find the the solution
f = lambda x : np.tan(x)-1
x = dichoto(f, (0.5, 3.15/4), )
print("The solution to tan(x)=1 is : {}".format(x))
# Find the the solution
f = lambda x : (x-2)**2
x = dichoto(f, (1, 3), )
print("The solution to (x-2)^2=0 is : {}".format(x)) | [
"numpy.tan"
] | [((472, 481), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (478, 481), True, 'import numpy as np\n')] |
import unittest
import pandas as pd
import numpy as np
from dask import dataframe as dd
from .normalize_functions import (
encode_objects_general,
normalize_general,
normalize_chex,
)
class NormalizeTests(unittest.TestCase):
def test_encode_objects_general(self):
strings_feats = "alpha bravo charlie delta echo".split(" ")
strings_feats_rev = strings_feats[::-1]
int_feats = list(range(len(strings_feats)))
colnames = "A B C".split(" ")
data_dict = {
colname: data
for colname, data in zip(
colnames, (strings_feats, strings_feats_rev, int_feats)
)
}
sequence = np.arange(0, 5)
test_array = np.column_stack((sequence.T, sequence[::-1].T, sequence.T))
with self.subTest("Pandas test"):
mock_df = pd.DataFrame.from_dict(data=data_dict)
encoded_df = encode_objects_general(mock_df, "A B".split(" "))
df_test_groundtruth = pd.DataFrame(
test_array,
columns=colnames,
)
self.assertTrue(encoded_df.eq(df_test_groundtruth).all(axis=None))
with self.subTest("Dask test"):
mock_df = dd.from_pandas(
pd.DataFrame.from_dict(data=data_dict), npartitions=1
)
encoded_df = encode_objects_general(mock_df, "A B".split(" "))
df_test_groundtruth = dd.from_array(
test_array,
columns=colnames,
)
self.assertTrue(encoded_df.eq(df_test_groundtruth).compute().all(axis=None))
def test_normalize_general(self):
sequence = np.arange(0, 5)
test_array = np.column_stack((sequence.T, sequence[::-1].T, sequence.T))
colnames = "A B C".split(" ")
gt_sequence = np.arange(-1, 1.5, 0.5)
gt_array = np.column_stack((gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T))
with self.subTest("Pandas test"):
df = pd.DataFrame(
test_array,
columns=colnames,
)
df_norm = normalize_general(df, colnames)
gt_df = pd.DataFrame(
gt_array,
columns=colnames,
)
self.assertTrue(df_norm.eq(gt_df).all(axis=None))
with self.subTest("Dask test"):
df = dd.from_array(
test_array,
columns=colnames,
)
df_norm = normalize_general(df, colnames)
gt_df = dd.from_array(
gt_array,
columns=colnames,
)
self.assertTrue(df_norm.eq(gt_df).compute().all(axis=None))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"numpy.arange",
"numpy.column_stack",
"dask.dataframe.from_array"
] | [((2737, 2752), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2750, 2752), False, 'import unittest\n'), ((690, 705), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (699, 705), True, 'import numpy as np\n'), ((727, 786), 'numpy.column_stack', 'np.column_stack', (['(sequence.T, sequence[::-1].T, sequence.T)'], {}), '((sequence.T, sequence[::-1].T, sequence.T))\n', (742, 786), True, 'import numpy as np\n'), ((1679, 1694), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (1688, 1694), True, 'import numpy as np\n'), ((1716, 1775), 'numpy.column_stack', 'np.column_stack', (['(sequence.T, sequence[::-1].T, sequence.T)'], {}), '((sequence.T, sequence[::-1].T, sequence.T))\n', (1731, 1775), True, 'import numpy as np\n'), ((1836, 1859), 'numpy.arange', 'np.arange', (['(-1)', '(1.5)', '(0.5)'], {}), '(-1, 1.5, 0.5)\n', (1845, 1859), True, 'import numpy as np\n'), ((1879, 1947), 'numpy.column_stack', 'np.column_stack', (['(gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T)'], {}), '((gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T))\n', (1894, 1947), True, 'import numpy as np\n'), ((852, 890), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'data_dict'}), '(data=data_dict)\n', (874, 890), True, 'import pandas as pd\n'), ((1000, 1042), 'pandas.DataFrame', 'pd.DataFrame', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (1012, 1042), True, 'import pandas as pd\n'), ((1440, 1483), 'dask.dataframe.from_array', 'dd.from_array', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (1453, 1483), True, 'from dask import dataframe as dd\n'), ((2007, 2049), 'pandas.DataFrame', 'pd.DataFrame', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (2019, 2049), True, 'import pandas as pd\n'), ((2171, 2211), 'pandas.DataFrame', 'pd.DataFrame', (['gt_array'], {'columns': 'colnames'}), '(gt_array, columns=colnames)\n', (2183, 2211), True, 'import pandas as pd\n'), ((2378, 2421), 'dask.dataframe.from_array', 'dd.from_array', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (2391, 2421), True, 'from dask import dataframe as dd\n'), ((2543, 2584), 'dask.dataframe.from_array', 'dd.from_array', (['gt_array'], {'columns': 'colnames'}), '(gt_array, columns=colnames)\n', (2556, 2584), True, 'from dask import dataframe as dd\n'), ((1263, 1301), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'data_dict'}), '(data=data_dict)\n', (1285, 1301), True, 'import pandas as pd\n')] |
import os
import numpy as np
import h5py as h5
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Input data
folder_in = "/home/flo/PycharmProjects/21cm/Data/high_res/Numpy/Downscaled"
stages = range(1, 8)
for stage in stages:
this_file = os.path.join(folder_in, "fl" + str(stage) + "_shuffled.h5")
with h5.File(this_file, 'r') as hf:
Y = np.asarray(hf["data"])
X = np.asarray(hf["params"])
print("File '" + this_file + "' loaded. Size of image array in memory: " + str(Y.nbytes // 1e6) + " MB.")
name = "train.tfrecords_" + str(stage)
filename = os.path.join(folder_in, name)
tfrecord_writer = tf.python_io.TFRecordWriter(filename)
n_samples = X.shape[0]
rows = Y.shape[1]
cols = Y.shape[2]
for index in range(n_samples):
# 1. Convert data into tf.train.Feature
Y_raw = Y[index].flatten() #.tostring()
X_raw = X[index].flatten() #.tostring()
feature = {
'params_raw': _floats_feature(X_raw),
'image_raw': _floats_feature(Y_raw)
}
# 2. Create a tf.train.Features
features = tf.train.Features(feature=feature)
# 3. Createan example protocol
example = tf.train.Example(features=features)
# 4. Serialize the Example to string
example_to_string = example.SerializeToString()
# 5. Write to TFRecord
tfrecord_writer.write(example_to_string)
# Test
# filename = '/home/flo/PycharmProjects/21cm/Data/high_res/Numpy/Downscaled/train.tfrecords_1'
# def decode(serialized_example):
# # 1. define a parser
# features = tf.parse_single_example(
# serialized_example,
# # Defaults are not specified since both keys are required.
# features={
# 'params_raw': tf.VarLenFeature(tf.float32),
# 'image_raw': tf.VarLenFeature(tf.float32),
# })
#
# # 2. Convert the data
# image = tf.sparse_tensor_to_dense(features['image_raw'], default_value=0)
# params = tf.sparse_tensor_to_dense(features['params_raw'], default_value=0)
#
# # 3. Reshape
# image.set_shape((8))
# image = tf.reshape(image, [1, 8])
# params.set_shape(3)
# return image, params
#
# dataset = tf.data.TFRecordDataset(filename)
# dataset = dataset.map(decode)
| [
"tensorflow.train.BytesList",
"h5py.File",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Int64List",
"tensorflow.train.Example",
"numpy.asarray",
"tensorflow.train.Features",
"tensorflow.train.FloatList",
"os.path.join"
] | [((590, 613), 'h5py.File', 'h5.File', (['this_file', '"""r"""'], {}), "(this_file, 'r')\n", (597, 613), True, 'import h5py as h5\n'), ((633, 655), 'numpy.asarray', 'np.asarray', (["hf['data']"], {}), "(hf['data'])\n", (643, 655), True, 'import numpy as np\n'), ((668, 692), 'numpy.asarray', 'np.asarray', (["hf['params']"], {}), "(hf['params'])\n", (678, 692), True, 'import numpy as np\n'), ((874, 903), 'os.path.join', 'os.path.join', (['folder_in', 'name'], {}), '(folder_in, name)\n', (886, 903), False, 'import os\n'), ((930, 967), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (957, 967), True, 'import tensorflow as tf\n'), ((136, 169), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (154, 169), True, 'import tensorflow as tf\n'), ((236, 269), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (254, 269), True, 'import tensorflow as tf\n'), ((339, 370), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (357, 370), True, 'import tensorflow as tf\n'), ((1463, 1497), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (1480, 1497), True, 'import tensorflow as tf\n'), ((1563, 1598), 'tensorflow.train.Example', 'tf.train.Example', ([], {'features': 'features'}), '(features=features)\n', (1579, 1598), True, 'import tensorflow as tf\n')] |
# This implementation is based on codes of <NAME> & <NAME>
import time
import random
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
import pickle
from base.rl import ActorCriticRLAlgorithm
from base.replay_buffer import ReplayBuffer
@tf.function
def clip_with_gradient(x, low=-1, high=1):
clip_high = tf.cast(x > high, tf.float32)
clip_low = tf.cast(x < low, tf.float32)
return x + tf.stop_gradient((high - x) * clip_high + (low - x) * clip_low)
@tf.function
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = \
logp - tf.reduce_sum(tf.math.log(
clip_with_gradient(1 - squashed_action ** 2, low=0, high=1) + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
class SquashedGaussianActor(tf.keras.layers.Layer):
def __init__(self, env):
super(SquashedGaussianActor, self).__init__()
# obs_shape, action_dim,
self.obs_shape = env.observation_space.shape
self.action_dim = env.action_space.shape[0]
self.max_action = env.action_space.high[0]
# Actor parameters
self.l1 = tf.keras.layers.Dense(64, activation='relu', name='f0', input_shape=(None,) + self.obs_shape)
self.l2 = tf.keras.layers.Dense(64, activation='relu', name='f1')
self.l3_mu = tf.keras.layers.Dense(self.action_dim, name='f2_mu')
self.l3_log_std = tf.keras.layers.Dense(self.action_dim, name='f2_log_std')
@tf.function
def call(self, inputs, **kwargs):
h = self.l1(inputs)
h = self.l2(h)
mean = self.l3_mu(h)
log_std = self.l3_log_std(h)
std = tf.exp(log_std)
dist = tfp.distributions.MultivariateNormalDiag(mean, std)
sampled_action = dist.sample()
sampled_action_logp = dist.log_prob(sampled_action)
squahsed_action, squahsed_action_logp = apply_squashing_func(sampled_action, sampled_action_logp)
return squahsed_action, tf.reshape(squahsed_action_logp, (-1,1))
def dist(self, inputs):
h = self.l1(inputs)
h = self.l2(h)
mean = self.l3_mu(h)
log_std = self.l3_log_std(h)
std = tf.exp(log_std)
dist = tfp.distributions.MultivariateNormalDiag(mean, std)
return dist
def step(self, obs, deterministic=False):
if deterministic:
dist = self.dist(obs)
mean_action = dist.mean().numpy()
mean_action = np.nan_to_num(mean_action)
squashed_action = np.tanh(mean_action)
else:
squashed_action, _ = self.call(obs)
squashed_action = np.nan_to_num(squashed_action)
# squashed_action = squashed_action.numpy()
return squashed_action * self.max_action
class VNetwork(tf.keras.layers.Layer):
def __init__(self, obs_shape, output_dim=1):
super(VNetwork, self).__init__()
self.v_l0 = tf.keras.layers.Dense(64, activation='relu', name='v/f0', input_shape=(None,) + obs_shape)
self.v_l1 = tf.keras.layers.Dense(64, activation='relu', name='v/f1')
self.v_l2 = tf.keras.layers.Dense(output_dim, name='v/f2')
@tf.function
def call(self, inputs, **kwargs):
h = self.v_l0(inputs)
h = self.v_l1(h)
v = self.v_l2(h)
return v
class QNetwork(tf.keras.layers.Layer):
def __init__(self, obs_shape, num_critics=2):
super(QNetwork, self).__init__()
self.num_critics = num_critics
self.qs_l0, self.qs_l1, self.qs_l2 = [], [], []
for i in range(self.num_critics):
self.qs_l0.append(tf.keras.layers.Dense(64, activation='relu', name='q%d/f0' % i, input_shape=(None,) + obs_shape))
self.qs_l1.append(tf.keras.layers.Dense(64, activation='relu', name='q%d/f1' % i))
self.qs_l2.append(tf.keras.layers.Dense(1, name='q%d/f2' % i))
@tf.function
def call(self, inputs, **kwargs):
obs, action = inputs
obs_action = tf.concat([obs, action], axis=1)
qs = []
for i in range(self.num_critics):
h = self.qs_l0[i](obs_action)
h = self.qs_l1[i](h)
q = self.qs_l2[i](h)
qs.append(q)
return qs
class SAC(ActorCriticRLAlgorithm):
def __init__(self, env, test_env, policy_class=SquashedGaussianActor,
ent_coef='auto', reward_scale=1, seed=0):
super(SAC, self).__init__(policy_class=policy_class, env=env, test_env=test_env)
self.seed = seed
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
self.env = env
self.test_env = test_env
self.max_action = self.env.action_space.high[0]
self.reward_scale = reward_scale
self.obs_shape = self.env.observation_space.shape
self.state_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.shape[0]
self.replay_buffer = ReplayBuffer(size=64000)
self.num_critics = 2
self.gamma = 0.99
self.tau = 0.05
self.learning_rate = 3e-4
self.batch_size = 256
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
self.ent_coef = ent_coef
# self.optimizer_variables = []
self.info_labels = ['actor_loss', 'v_loss', 'q_loss', 'mean(v)',
'mean(qs)', 'ent_coef', 'entropy', 'logp_pi']
# Entropy coefficient (auto or fixed)
if isinstance(self.ent_coef, str) and self.ent_coef == 'auto':
# Default initial value of ent_coef when learned
init_value = 1.0
self.log_ent_coef = tf.keras.backend.variable(init_value, dtype=tf.float32, name='log_ent_coef')
self.ent_coefficient = tf.exp(self.log_ent_coef)
self.entropy_variables = [self.log_ent_coef]
else:
self.log_ent_coef = tf.math.log(self.ent_coef)
self.ent_coefficient = tf.constant(self.ent_coef)
# Actor, Critic Networks
self.actor = policy_class(self.env)
self.v = VNetwork(self.obs_shape)
self.q = QNetwork(self.obs_shape, num_critics=self.num_critics)
self.v_target = VNetwork(self.obs_shape)
self.actor_variables = self.actor.trainable_variables
self.critic_variables = self.v.trainable_variables + self.q.trainable_variables
self.actor_optimizer = tf.keras.optimizers.Adam(self.learning_rate)
self.critic_optimizer = tf.keras.optimizers.Adam(self.learning_rate)
if isinstance(ent_coef, str) and ent_coef == 'auto':
self.entropy_optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
self.optimizer_variables = self.actor.trainable_variables + self.v.trainable_variables + \
self.q.trainable_variables + self.v_target.trainable_variables
# @tf.function
def update_target(self, target_params, source_params):
for target, source in zip(target_params, source_params):
tf.keras.backend.set_value(target, (1 - self.tau) * target + self.tau * source)
@tf.function
def initialize_variables(self):
zero_like_state = tf.zeros((1,) + self.obs_shape)
zero_like_action = tf.zeros((1,self.action_dim))
self.actor(zero_like_state)
self.v(zero_like_state)
self.v_target(zero_like_state)
self.q(inputs=(zero_like_state, zero_like_action))
@tf.function
def train(self, obs, action, reward, next_obs, done):
# Casting from float64 to float32
obs = tf.cast(obs, tf.float32)
action = tf.cast(action, tf.float32) / self.max_action
reward = tf.cast(reward, tf.float32)[:, None] * self.reward_scale
next_obs = tf.cast(next_obs, tf.float32)
done = tf.cast(done, tf.float32)[:, None]
dist = self.actor.dist(obs)
with tf.GradientTape() as tape_actor:
# Actor training (pi)
action_pi, logp_pi = self.actor.call(obs)
qs_pi = self.q.call(inputs=(obs, action_pi))
# min_q_target = tf.reduce_min(qs_pi, axis=0)
actor_loss = tf.reduce_mean(tf.math.exp(self.log_ent_coef) * logp_pi - qs_pi[0])
actor_variables = self.actor.trainable_variables
grads_actor = tape_actor.gradient(actor_loss, actor_variables)
actor_op = self.actor_optimizer.apply_gradients(zip(grads_actor, actor_variables))
with tf.control_dependencies([actor_op]):
v_target = self.v_target(next_obs)
min_q_pi = tf.reduce_min(qs_pi, axis=0) # (batch, 1)
v_backup = tf.stop_gradient(min_q_pi - tf.math.exp(self.log_ent_coef) * logp_pi) # (batch, 1)
q_backup = tf.stop_gradient(reward + (1 - done) * self.gamma * v_target) # (batch, 1)
with tf.GradientTape() as tape_critic:
# Critic training (V, Q)
v = self.v(obs)
v_loss = 0.5 * tf.reduce_mean((v_backup - v) ** 2) # MSE, scalar
qs = self.q(inputs=(obs, action))
q_losses = [0.5 * tf.reduce_mean((q_backup - qs[k]) ** 2) for k in range(self.num_critics)] # (2, batch)
q_loss = tf.reduce_sum(q_losses, axis=0) # scalar
value_loss = v_loss + q_loss
critic_variables = self.v.trainable_variables + self.q.trainable_variables
grads_critic = tape_critic.gradient(value_loss, critic_variables)
self.critic_optimizer.apply_gradients(zip(grads_critic, critic_variables))
if isinstance(self.ent_coef, str) and self.ent_coef == 'auto':
with tf.GradientTape() as tape_ent:
ent_coef_loss = -tf.reduce_mean(self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy))
entropy_variables = [self.log_ent_coef]
grads_ent = tape_ent.gradient(ent_coef_loss, entropy_variables)
self.entropy_optimizer.apply_gradients(zip(grads_ent, entropy_variables))
return actor_loss, tf.reduce_mean(v_loss), tf.reduce_mean(q_loss), tf.reduce_mean(v), tf.reduce_mean(qs), \
tf.math.exp(self.log_ent_coef), tf.reduce_mean(dist.entropy()), tf.reduce_mean(logp_pi)
def learn(self, total_timesteps, log_interval=640, callback=None, verbose=1,
eval_interval=5000, eval_rollout=True, save_path=None, save_interval=500000):
self.initialize_variables()
for target, source in zip(self.v_target.trainable_variables, self.v.trainable_variables):
tf.keras.backend.set_value(target, source.numpy())
start_time = time.time()
episode_rewards = []
eval_rewards = []
obs = self.env.reset()
current_episode_reward = 0
for step in tqdm(range(total_timesteps), desc='SAC', ncols=70):
if callback is not None:
if callback(locals(), globals()) is False:
break
# Take an action
action = np.reshape(self.predict(np.array([obs]), deterministic=False)[0], -1)
next_obs, reward, done, _ = self.env.step(action)
# Store transition in the replay buffer.
self.replay_buffer.add(obs, action, reward, next_obs, float(done))
obs = next_obs
current_episode_reward += reward
if done:
obs = self.env.reset()
episode_rewards.append(current_episode_reward)
current_episode_reward = 0.0
if self.replay_buffer.can_sample(self.batch_size):
obss, actions, rewards, next_obss, dones = self.replay_buffer.sample(self.batch_size) # action is normalize
step_info = self.train(obss, actions, rewards, next_obss, dones)
if verbose >= 1 and step % log_interval == 0:
print('\n============================')
print('%15s: %10.6f' % ('10ep_rewmean', np.mean(episode_rewards[-10:])))
for i, label in enumerate(self.info_labels):
print('%15s: %10.6f' %(label, step_info[i].numpy()))
print('============================\n')
self.update_target(self.v_target.trainable_variables, self.v.trainable_variables)
if step % eval_interval == 0:
if eval_rollout:
eval_rewards.append(self.evaluate(1))
else:
eval_rewards.append(episode_rewards[-1])
if step % save_interval == 0 and save_path is not None:
print('** Saving models and evaluation returns..')
np.save(save_path + "/%s_rews_seed%d_iter%d.npy"%(self.env.spec.id, self.seed, step),
np.array(eval_rewards))
self.save(save_path + "/%s_model_seed%d.zip" % (self.env.spec.id, self.seed) )
return eval_rewards
def predict(self, obs, deterministic=False):
obs_rank = len(obs.shape)
if len(obs.shape) == 1:
obs = np.array([obs])
assert len(obs.shape) == 2
action = self.actor.step(obs, deterministic=deterministic)
# action = np.clip(action, self.action_space.low, self.action_space.high)
if obs_rank == 1:
return action[0], None
else:
return action, None
def load(self, filepath):
self.initialize_variables()
with open(filepath, 'rb') as f:
parameters = pickle.load(f)
self.load_parameters(parameters)
| [
"tensorflow.random.set_seed",
"tensorflow.keras.backend.set_value",
"tensorflow.reduce_sum",
"numpy.random.seed",
"numpy.nan_to_num",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"pickle.load",
"numpy.mean",
"base.replay_buffer.ReplayBuffer",
"numpy.prod",
"tensorflow.math.log",
"t... | [((361, 390), 'tensorflow.cast', 'tf.cast', (['(x > high)', 'tf.float32'], {}), '(x > high, tf.float32)\n', (368, 390), True, 'import tensorflow as tf\n'), ((406, 434), 'tensorflow.cast', 'tf.cast', (['(x < low)', 'tf.float32'], {}), '(x < low, tf.float32)\n', (413, 434), True, 'import tensorflow as tf\n'), ((860, 875), 'tensorflow.tanh', 'tf.tanh', (['sample'], {}), '(sample)\n', (867, 875), True, 'import tensorflow as tf\n'), ((450, 513), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['((high - x) * clip_high + (low - x) * clip_low)'], {}), '((high - x) * clip_high + (low - x) * clip_low)\n', (466, 513), True, 'import tensorflow as tf\n'), ((1498, 1595), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""f0"""', 'input_shape': '((None,) + self.obs_shape)'}), "(64, activation='relu', name='f0', input_shape=(None,) +\n self.obs_shape)\n", (1519, 1595), True, 'import tensorflow as tf\n'), ((1610, 1665), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""f1"""'}), "(64, activation='relu', name='f1')\n", (1631, 1665), True, 'import tensorflow as tf\n'), ((1687, 1739), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.action_dim'], {'name': '"""f2_mu"""'}), "(self.action_dim, name='f2_mu')\n", (1708, 1739), True, 'import tensorflow as tf\n'), ((1766, 1823), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.action_dim'], {'name': '"""f2_log_std"""'}), "(self.action_dim, name='f2_log_std')\n", (1787, 1823), True, 'import tensorflow as tf\n'), ((2019, 2034), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (2025, 2034), True, 'import tensorflow as tf\n'), ((2060, 2111), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (2100, 2111), True, 'import tensorflow_probability as tfp\n'), ((2559, 2574), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (2565, 2574), True, 'import tensorflow as tf\n'), ((2590, 2641), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (2630, 2641), True, 'import tensorflow_probability as tfp\n'), ((3315, 3410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""v/f0"""', 'input_shape': '((None,) + obs_shape)'}), "(64, activation='relu', name='v/f0', input_shape=(None\n ,) + obs_shape)\n", (3336, 3410), True, 'import tensorflow as tf\n'), ((3426, 3483), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""v/f1"""'}), "(64, activation='relu', name='v/f1')\n", (3447, 3483), True, 'import tensorflow as tf\n'), ((3504, 3550), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'name': '"""v/f2"""'}), "(output_dim, name='v/f2')\n", (3525, 3550), True, 'import tensorflow as tf\n'), ((4391, 4423), 'tensorflow.concat', 'tf.concat', (['[obs, action]'], {'axis': '(1)'}), '([obs, action], axis=1)\n', (4400, 4423), True, 'import tensorflow as tf\n'), ((4937, 4961), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (4955, 4961), True, 'import tensorflow as tf\n'), ((4978, 4998), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4992, 4998), True, 'import numpy as np\n'), ((5007, 5024), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5018, 5024), False, 'import random\n'), ((5392, 5416), 'base.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'size': '(64000)'}), '(size=64000)\n', (5404, 5416), False, 'from base.replay_buffer import ReplayBuffer\n'), ((6873, 6917), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (6897, 6917), True, 'import tensorflow as tf\n'), ((6950, 6994), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (6974, 6994), True, 'import tensorflow as tf\n'), ((7692, 7723), 'tensorflow.zeros', 'tf.zeros', (['((1,) + self.obs_shape)'], {}), '((1,) + self.obs_shape)\n', (7700, 7723), True, 'import tensorflow as tf\n'), ((7751, 7781), 'tensorflow.zeros', 'tf.zeros', (['(1, self.action_dim)'], {}), '((1, self.action_dim))\n', (7759, 7781), True, 'import tensorflow as tf\n'), ((8110, 8134), 'tensorflow.cast', 'tf.cast', (['obs', 'tf.float32'], {}), '(obs, tf.float32)\n', (8117, 8134), True, 'import tensorflow as tf\n'), ((8292, 8321), 'tensorflow.cast', 'tf.cast', (['next_obs', 'tf.float32'], {}), '(next_obs, tf.float32)\n', (8299, 8321), True, 'import tensorflow as tf\n'), ((11309, 11320), 'time.time', 'time.time', ([], {}), '()\n', (11318, 11320), False, 'import time\n'), ((2358, 2399), 'tensorflow.reshape', 'tf.reshape', (['squahsed_action_logp', '(-1, 1)'], {}), '(squahsed_action_logp, (-1, 1))\n', (2368, 2399), True, 'import tensorflow as tf\n'), ((2854, 2880), 'numpy.nan_to_num', 'np.nan_to_num', (['mean_action'], {}), '(mean_action)\n', (2867, 2880), True, 'import numpy as np\n'), ((2911, 2931), 'numpy.tanh', 'np.tanh', (['mean_action'], {}), '(mean_action)\n', (2918, 2931), True, 'import numpy as np\n'), ((3025, 3055), 'numpy.nan_to_num', 'np.nan_to_num', (['squashed_action'], {}), '(squashed_action)\n', (3038, 3055), True, 'import numpy as np\n'), ((6110, 6186), 'tensorflow.keras.backend.variable', 'tf.keras.backend.variable', (['init_value'], {'dtype': 'tf.float32', 'name': '"""log_ent_coef"""'}), "(init_value, dtype=tf.float32, name='log_ent_coef')\n", (6135, 6186), True, 'import tensorflow as tf\n'), ((6222, 6247), 'tensorflow.exp', 'tf.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (6228, 6247), True, 'import tensorflow as tf\n'), ((6352, 6378), 'tensorflow.math.log', 'tf.math.log', (['self.ent_coef'], {}), '(self.ent_coef)\n', (6363, 6378), True, 'import tensorflow as tf\n'), ((6414, 6440), 'tensorflow.constant', 'tf.constant', (['self.ent_coef'], {}), '(self.ent_coef)\n', (6425, 6440), True, 'import tensorflow as tf\n'), ((7102, 7160), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7126, 7160), True, 'import tensorflow as tf\n'), ((7524, 7603), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['target', '((1 - self.tau) * target + self.tau * source)'], {}), '(target, (1 - self.tau) * target + self.tau * source)\n', (7550, 7603), True, 'import tensorflow as tf\n'), ((8152, 8179), 'tensorflow.cast', 'tf.cast', (['action', 'tf.float32'], {}), '(action, tf.float32)\n', (8159, 8179), True, 'import tensorflow as tf\n'), ((8337, 8362), 'tensorflow.cast', 'tf.cast', (['done', 'tf.float32'], {}), '(done, tf.float32)\n', (8344, 8362), True, 'import tensorflow as tf\n'), ((8423, 8440), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8438, 8440), True, 'import tensorflow as tf\n'), ((9009, 9044), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[actor_op]'], {}), '([actor_op])\n', (9032, 9044), True, 'import tensorflow as tf\n'), ((9128, 9156), 'tensorflow.reduce_min', 'tf.reduce_min', (['qs_pi'], {'axis': '(0)'}), '(qs_pi, axis=0)\n', (9141, 9156), True, 'import tensorflow as tf\n'), ((9299, 9360), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(reward + (1 - done) * self.gamma * v_target)'], {}), '(reward + (1 - done) * self.gamma * v_target)\n', (9315, 9360), True, 'import tensorflow as tf\n'), ((10713, 10735), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['v_loss'], {}), '(v_loss)\n', (10727, 10735), True, 'import tensorflow as tf\n'), ((10737, 10759), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q_loss'], {}), '(q_loss)\n', (10751, 10759), True, 'import tensorflow as tf\n'), ((10761, 10778), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['v'], {}), '(v)\n', (10775, 10778), True, 'import tensorflow as tf\n'), ((10780, 10798), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['qs'], {}), '(qs)\n', (10794, 10798), True, 'import tensorflow as tf\n'), ((10817, 10847), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (10828, 10847), True, 'import tensorflow as tf\n'), ((10881, 10904), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['logp_pi'], {}), '(logp_pi)\n', (10895, 10904), True, 'import tensorflow as tf\n'), ((13937, 13952), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (13945, 13952), True, 'import numpy as np\n'), ((14407, 14421), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14418, 14421), False, 'import pickle\n'), ((4013, 4114), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': "('q%d/f0' % i)", 'input_shape': '((None,) + obs_shape)'}), "(64, activation='relu', name='q%d/f0' % i, input_shape\n =(None,) + obs_shape)\n", (4034, 4114), True, 'import tensorflow as tf\n'), ((4141, 4204), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': "('q%d/f1' % i)"}), "(64, activation='relu', name='q%d/f1' % i)\n", (4162, 4204), True, 'import tensorflow as tf\n'), ((4236, 4279), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'name': "('q%d/f2' % i)"}), "(1, name='q%d/f2' % i)\n", (4257, 4279), True, 'import tensorflow as tf\n'), ((8216, 8243), 'tensorflow.cast', 'tf.cast', (['reward', 'tf.float32'], {}), '(reward, tf.float32)\n', (8223, 8243), True, 'import tensorflow as tf\n'), ((9393, 9410), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9408, 9410), True, 'import tensorflow as tf\n'), ((9843, 9874), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['q_losses'], {'axis': '(0)'}), '(q_losses, axis=0)\n', (9856, 9874), True, 'import tensorflow as tf\n'), ((5592, 5628), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (5599, 5628), True, 'import numpy as np\n'), ((9547, 9582), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((v_backup - v) ** 2)'], {}), '((v_backup - v) ** 2)\n', (9561, 9582), True, 'import tensorflow as tf\n'), ((10298, 10315), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10313, 10315), True, 'import tensorflow as tf\n'), ((8714, 8744), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (8725, 8744), True, 'import tensorflow as tf\n'), ((9221, 9251), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (9232, 9251), True, 'import tensorflow as tf\n'), ((9731, 9770), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup - qs[k]) ** 2)'], {}), '((q_backup - qs[k]) ** 2)\n', (9745, 9770), True, 'import tensorflow as tf\n'), ((11713, 11728), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (11721, 11728), True, 'import numpy as np\n'), ((13651, 13673), 'numpy.array', 'np.array', (['eval_rewards'], {}), '(eval_rewards)\n', (13659, 13673), True, 'import numpy as np\n'), ((10401, 10448), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(logp_pi + self.target_entropy)'], {}), '(logp_pi + self.target_entropy)\n', (10417, 10448), True, 'import tensorflow as tf\n'), ((12733, 12763), 'numpy.mean', 'np.mean', (['episode_rewards[-10:]'], {}), '(episode_rewards[-10:])\n', (12740, 12763), True, 'import numpy as np\n')] |
import time
import picamera
import numpy as np
import cv2
with picamera.PiCamera() as camera:
camera.resolution = (320, 240)
camera.framerate = 24
time.sleep(2)
image = np.empty((240 * 320 * 3,), dtype=np.uint8)
camera.capture(image, 'bgr')
image = image.reshape((240, 320, 3))
| [
"numpy.empty",
"time.sleep",
"picamera.PiCamera"
] | [((64, 83), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (81, 83), False, 'import picamera\n'), ((160, 173), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (170, 173), False, 'import time\n'), ((186, 228), 'numpy.empty', 'np.empty', (['(240 * 320 * 3,)'], {'dtype': 'np.uint8'}), '((240 * 320 * 3,), dtype=np.uint8)\n', (194, 228), True, 'import numpy as np\n')] |
# -*- coding:UTF-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys,os
import time
import numpy as np
import tensorflow as tf
from glob import glob
from os import path
import logging
import shutil
sys.path.append(".")
#sys.path.append("/home/lhj/PHICOMM/Project/label_model/modelTest/label_model")
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
# [NEW] make file_name as a placeholder.
file_name_placeholder = tf.placeholder("string", name="fname")
file_reader = tf.read_file(file_name_placeholder, input_name)
# if file_name.endswith(".png"):
# image_reader = tf.image.decode_png(file_reader, channels = 3,
# name='png_reader')
# elif file_name.endswith(".gif"):
# image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
# name='gif_reader'))
# elif file_name.endswith(".bmp"):
# image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
# else:
# image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
# name='jpeg_reader')
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
#sess = tf.Session()
#result = sess.run(normalized)
#return result
return normalized
def sort_dict(dict_words,index):
"""
dict sort
:param dict_words:
:return:
"""
keys = dict_words.keys()
values = dict_words.values()
list_one = [(key, val) for key, val in zip(keys, values)]
if index == "value":
list_sort = sorted(list_one, key=lambda x: x[1], reverse=True)
else:
list_sort = sorted(list_one, key=lambda x: x[0], reverse=True)
return list_sort
def mymovefile(srcfile,dstfile):
if not os.path.isfile(srcfile):
print ("%s not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
shutil.move(srcfile,dstfile) #移动文件
print ("move %s -> %s"%( srcfile,dstfile))
def renameAndMovefile(srcfile,dstfile,prob):
if not os.path.isfile(srcfile):
print ("%s not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
print(os.path.splitext(srcfile)[0],os.path.splitext(srcfile)[1])
#change file houzhui
newname = os.path.splitext(srcfile)[0]+"__"+str("%.2f"%prob)+os.path.splitext(srcfile)[1] #要改的新后缀
os.rename(srcfile,newname)
shutil.move(newname,dstfile) #移动文件
print ("move %s -> %s"%( newname,dstfile))
def mycopyfile(srcfile,dstfile):
if not os.path.isfile(srcfile):
print ("%s not exist!"%(srcfile))
else:
fpath,fname=os.path.split(dstfile) #分离文件名和路径
if not os.path.exists(fpath):
os.makedirs(fpath) #创建路径
shutil.copyfile(srcfile,dstfile) #复制文件
print ("copy %s -> %s"%( srcfile,dstfile))
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
label_test = 0
label_test_ref = 0.99
label_test_ref_bottom = 0.05
image_path = "./dataset"
#path_pattern = "*.[jJ][Pp]*"
path_pattern = "*.jpg"
#lable_test_fail_dir = "./dataset/potato_total/label_test_fail_img/"
#lable_test_check_dir = "./dataset/potato_total/label_test_check_img/"
#lable_test_pass_dir = "./dataset/potato_total/label_test_pass_img"
process_mode = "move"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
parser.add_argument("--label_test", help="name of label test")
parser.add_argument("--label_test_ref", help="name of label test reference")
parser.add_argument("--label_test_ref_bottom", help="name of label test reference")
parser.add_argument("--image_path", help="image_path to be processed")
parser.add_argument("--pattern", help="ile search pattern for glob")
parser.add_argument("--lable_test_fail_dir", help="lable_test_fail_dir")
parser.add_argument("--lable_test_check_dir", help="lable_test_check_dir")
parser.add_argument("--lable_test_pass_dir", help="lable_test_pass_dir")
parser.add_argument("--process_mode", help="copy or move")
args = parser.parse_args()
if args.graph:
model_file = args.graph
# if args.image:
# file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
if args.label_test:
label_test = args.label_test
if args.label_test:
label_test_ref = args.label_test_ref
if args.label_test_ref_bottom:
label_test_ref_bottom = args.label_test_ref_bottom
if args.image_path:
image_path = args.image_path
if args.pattern:
path_pattern = args.pattern
if args.lable_test_fail_dir:
lable_test_fail_dir = args.lable_test_fail_dir
if args.lable_test_check_dir:
lable_test_check_dir = args.lable_test_check_dir
if args.lable_test_pass_dir:
lable_test_pass_dir = args.lable_test_pass_dir
if lable_test_pass_dir[len(lable_test_pass_dir)-1] != '/':
lable_test_pass_dir = lable_test_pass_dir + '/'
if lable_test_check_dir[len(lable_test_check_dir)-1] != '/':
lable_test_check_dir = lable_test_check_dir + '/'
if lable_test_fail_dir[len(lable_test_fail_dir)-1] != '/':
lable_test_fail_dir = lable_test_fail_dir + '/'
if args.process_mode:
process_mode = args.process_mode
# 获取logger实例,如果参数为空则返回root logger
logger = logging.getLogger("labelimage")
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志
file_handler = logging.FileHandler("test.log")
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.DEBUG)
logger.debug('This is debug message')
all_files = glob(path.join(image_path, path_pattern))
#all_files.sort()
print('Found {} files in {} folder'.format(len(all_files), image_path))
#print(all_files)
graph = load_graph(model_file)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
file_probability = {}
file_probability_aftersort = {}
start = time.time()
with tf.Session(graph=graph) as sess:
read_tensor_from_image_file_op = read_tensor_from_image_file(
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
for file_name in all_files:
#start = time.time()
t = sess.run(read_tensor_from_image_file_op,feed_dict={"fname:0": file_name})
#t = sess.run(read_tensor_from_image_file_op,feed_dict={file_name_placeholder: file_name})
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
#end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
#logger.debug('\nEvaluation image:'+str(file_name))
#print('\nEvaluation image:'+str(file_name))
#logger.debug('Evaluation time (1-image): {:.3f}s\n'.format(end-start))
label_index = 0
for i in top_k:
#logger.debug("label: %s , %.2f ",labels[top_k[i]], results[top_k[i]])
#print("label: %s , %.2f "%(labels[top_k[i]], results[top_k[i]]))
if str(labels[top_k[i]]) == label_test:
label_index = i
#logger.debug("evaluating label: %s , %d " % (str(labels[top_k[label_index]]), label_index))
#print("evaluating label: %s , %d " % (str(labels[top_k[label_index]]), label_index))
#logger.debug(" label_eval:%s probability:%.2f ExpectLabel:%s Hthresh:%s Lthresh:%s" % (labels[top_k[label_index]], results[top_k[label_index]], label_test, label_test_ref,label_test_ref_bottom))
#print(" label_eval:%s probability:%.2f ExpectLabel:%s Hthresh:%s Lthresh:%s" % (labels[top_k[label_index]], results[top_k[label_index]], label_test, label_test_ref,label_test_ref_bottom))
file_probability[file_name] = results[top_k[label_index]]
if (float(results[top_k[label_index]]) >= float(label_test_ref)):
if process_mode == "move":
mymovefile(file_name, lable_test_pass_dir)
else:
image_name = file_name.split("/")[-1]
mycopyfile(file_name, lable_test_pass_dir + "/" + image_name)
#renameAndMovefile(file_name, lable_test_pass_dir,file_probability[file_name])
pass
elif (float(results[top_k[label_index]]) <= float(label_test_ref_bottom)):
if process_mode == "move":
mymovefile(file_name, lable_test_fail_dir)
else:
image_name = file_name.split("/")[-1]
mycopyfile(file_name, lable_test_fail_dir + "/" + image_name)
#renameAndMovefile(file_name, lable_test_fail_dir,file_probability[file_name])
pass
else:
if process_mode == "move":
mymovefile(file_name, lable_test_check_dir)
else:
image_name = file_name.split("/")[-1]
mycopyfile(file_name, lable_test_check_dir + "/" + image_name)
#renameAndMovefile(file_name, lable_test_check_dir,file_probability[file_name])
pass
#print(file_probability)
#file_probability_aftersort = sorted(file_probability.items(),key = lambda x:x[0],reverse = True)
file_probability_aftersort = sort_dict(file_probability,"key")
#print(file_probability_aftersort)
for key,value in file_probability_aftersort:
logger.debug("file name:%s,probability:%s" %(key,value))
end=time.time()
logger.debug('Evaluation time (1-image): {:.3f}s\n'.format(end-start))
# 移除一些日志处理器
logger.removeHandler(file_handler)
| [
"argparse.ArgumentParser",
"logging.Formatter",
"os.path.isfile",
"os.path.join",
"sys.path.append",
"logging.FileHandler",
"tensorflow.subtract",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.cast",
"shutil.copyfile",
"tensorflow.GraphDef",
"os.rename",
"logging.StreamHandler",
... | [((980, 1000), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (995, 1000), False, 'import sys, os\n'), ((1120, 1130), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1128, 1130), True, 'import tensorflow as tf\n'), ((1145, 1158), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1156, 1158), True, 'import tensorflow as tf\n'), ((1548, 1586), 'tensorflow.placeholder', 'tf.placeholder', (['"""string"""'], {'name': '"""fname"""'}), "('string', name='fname')\n", (1562, 1586), True, 'import tensorflow as tf\n'), ((1604, 1651), 'tensorflow.read_file', 'tf.read_file', (['file_name_placeholder', 'input_name'], {}), '(file_name_placeholder, input_name)\n', (1616, 1651), True, 'import tensorflow as tf\n'), ((2246, 2311), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': '(3)', 'name': '"""jpeg_reader"""'}), "(file_reader, channels=3, name='jpeg_reader')\n", (2266, 2311), True, 'import tensorflow as tf\n'), ((2372, 2405), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (2379, 2405), True, 'import tensorflow as tf\n'), ((2424, 2455), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (2438, 2455), True, 'import tensorflow as tf\n'), ((2469, 2537), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (2493, 2537), True, 'import tensorflow as tf\n'), ((5409, 5434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5432, 5434), False, 'import argparse\n'), ((8243, 8274), 'logging.getLogger', 'logging.getLogger', (['"""labelimage"""'], {}), "('labelimage')\n", (8260, 8274), False, 'import logging\n'), ((8307, 8368), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-8s: %(message)s"""'], {}), "('%(asctime)s %(levelname)-8s: %(message)s')\n", (8324, 8368), False, 'import logging\n'), ((8396, 8427), 'logging.FileHandler', 'logging.FileHandler', (['"""test.log"""'], {}), "('test.log')\n", (8415, 8427), False, 'import logging\n'), ((8524, 8557), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (8545, 8557), False, 'import logging\n'), ((9290, 9301), 'time.time', 'time.time', ([], {}), '()\n', (9299, 9301), False, 'import time\n'), ((12702, 12713), 'time.time', 'time.time', ([], {}), '()\n', (12711, 12713), False, 'import time\n'), ((1267, 1297), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (1286, 1297), True, 'import tensorflow as tf\n'), ((2563, 2597), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (2574, 2597), True, 'import tensorflow as tf\n'), ((3170, 3193), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (3184, 3193), False, 'import sys, os\n'), ((3267, 3289), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (3280, 3289), False, 'import sys, os\n'), ((3401, 3430), 'shutil.move', 'shutil.move', (['srcfile', 'dstfile'], {}), '(srcfile, dstfile)\n', (3412, 3430), False, 'import shutil\n'), ((3553, 3576), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (3567, 3576), False, 'import sys, os\n'), ((3650, 3672), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (3663, 3672), False, 'import sys, os\n'), ((3993, 4020), 'os.rename', 'os.rename', (['srcfile', 'newname'], {}), '(srcfile, newname)\n', (4002, 4020), False, 'import sys, os\n'), ((4028, 4057), 'shutil.move', 'shutil.move', (['newname', 'dstfile'], {}), '(newname, dstfile)\n', (4039, 4057), False, 'import shutil\n'), ((4168, 4191), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (4182, 4191), False, 'import sys, os\n'), ((4265, 4287), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (4278, 4287), False, 'import sys, os\n'), ((4399, 4432), 'shutil.copyfile', 'shutil.copyfile', (['srcfile', 'dstfile'], {}), '(srcfile, dstfile)\n', (4414, 4432), False, 'import shutil\n'), ((8832, 8867), 'os.path.join', 'path.join', (['image_path', 'path_pattern'], {}), '(image_path, path_pattern)\n', (8841, 8867), False, 'from os import path\n'), ((9309, 9332), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (9319, 9332), True, 'import tensorflow as tf\n'), ((3318, 3339), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (3332, 3339), False, 'import sys, os\n'), ((3353, 3371), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (3364, 3371), False, 'import sys, os\n'), ((3701, 3722), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (3715, 3722), False, 'import sys, os\n'), ((3736, 3754), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (3747, 3754), False, 'import sys, os\n'), ((4316, 4337), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (4330, 4337), False, 'import sys, os\n'), ((4351, 4369), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (4362, 4369), False, 'import sys, os\n'), ((4562, 4588), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_file'], {}), '(label_file)\n', (4576, 4588), True, 'import tensorflow as tf\n'), ((10003, 10022), 'numpy.squeeze', 'np.squeeze', (['results'], {}), '(results)\n', (10013, 10022), True, 'import numpy as np\n'), ((3790, 3815), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3806, 3815), False, 'import sys, os\n'), ((3819, 3844), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3835, 3844), False, 'import sys, os\n'), ((3948, 3973), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3964, 3973), False, 'import sys, os\n'), ((3897, 3922), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3913, 3922), False, 'import sys, os\n')] |
import multiprocessing
import numpy
from smqtk.representation import DescriptorElement
from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper
# Try to import required modules
try:
import psycopg2
except ImportError:
psycopg2 = None
PSQL_TABLE_CREATE_RLOCK = multiprocessing.RLock()
# noinspection SqlNoDataSourceInspection
class PostgresDescriptorElement (DescriptorElement):
"""
Descriptor element whose vector is stored in a Postgres database.
We assume we will work with a Postgres version of at least 9.4 (due to
versions tested).
Efficient connection pooling may be achieved via external utilities like
PGBounder.
"""
ARRAY_DTYPE = numpy.float64
UPSERT_TABLE_TMPL = norm_psql_cmd_string("""
CREATE TABLE IF NOT EXISTS {table_name:s} (
{type_col:s} TEXT NOT NULL,
{uuid_col:s} TEXT NOT NULL,
{binary_col:s} BYTEA NOT NULL,
PRIMARY KEY ({type_col:s}, {uuid_col:s})
);
""")
SELECT_TMPL = norm_psql_cmd_string("""
SELECT {binary_col:s}
FROM {table_name:s}
WHERE {type_col:s} = %(type_val)s
AND {uuid_col:s} = %(uuid_val)s
;
""")
UPSERT_TMPL = norm_psql_cmd_string("""
WITH upsert AS (
UPDATE {table_name:s}
SET {binary_col:s} = %(binary_val)s
WHERE {type_col:s} = %(type_val)s
AND {uuid_col:s} = %(uuid_val)s
RETURNING *
)
INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s})
SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s
WHERE NOT EXISTS (SELECT * FROM upsert);
""")
@classmethod
def is_usable(cls):
if psycopg2 is None:
cls.get_logger().warning("Not usable. Requires psycopg2 module")
return False
return True
def __init__(self, type_str, uuid,
table_name='descriptors',
uuid_col='uid', type_col='type_str', binary_col='vector',
db_name='postgres', db_host=None, db_port=None, db_user=None,
db_pass=None, create_table=True):
"""
Initialize new PostgresDescriptorElement attached to some database
credentials.
We require that storage tables treat uuid AND type string columns as
primary keys. The type and uuid columns should be of the 'text' type.
The binary column should be of the 'bytea' type.
Default argument values assume a local PostgreSQL database with a table
created via the
``etc/smqtk/postgres/descriptor_element/example_table_init.sql``
file (relative to the SMQTK source tree or install root).
NOTES:
- Not all uuid types used here are necessarily of the ``uuid.UUID``
type, thus the recommendation to use a ``text`` type for the
column. For certain specific use cases they may be proper
``uuid.UUID`` instances or strings, but this cannot be generally
assumed.
:param type_str: Type of descriptor. This is usually the name of the
content descriptor that generated this vector.
:type type_str: str
:param uuid: Unique ID reference of the descriptor.
:type uuid: collections.Hashable
:param table_name: String label of the database table to use.
:type table_name: str
:param uuid_col: The column label for descriptor UUID storage
:type uuid_col: str
:param type_col: The column label for descriptor type string storage.
:type type_col: str
:param binary_col: The column label for descriptor vector binary
storage.
:type binary_col: str
:param db_host: Host address of the Postgres server. If None, we
assume the server is on the local machine and use the UNIX socket.
This might be a required field on Windows machines (not tested yet).
:type db_host: str | None
:param db_port: Port the Postgres server is exposed on. If None, we
assume the default port (5423).
:type db_port: int | None
:param db_name: The name of the database to connect to.
:type db_name: str
:param db_user: Postgres user to connect as. If None, postgres
defaults to using the current accessing user account name on the
operating system.
:type db_user: str | None
:param db_pass: Password for the user we're connecting as. This may be
None if no password is to be used.
:type db_pass: str | None
:param create_table: If this instance should try to create the storing
table before actions are performed against it. If the configured
user does not have sufficient permissions to create the table and it
does not currently exist, an exception will be raised.
:type create_table: bool
"""
super(PostgresDescriptorElement, self).__init__(type_str, uuid)
self.table_name = table_name
self.uuid_col = uuid_col
self.type_col = type_col
self.binary_col = binary_col
self.create_table = create_table
self.db_name = db_name
self.db_host = db_host
self.db_port = db_port
self.db_user = db_user
self.db_pass = db_pass
self._psql_helper = None
def __getstate__(self):
"""
Construct serialization state.
Due to the psql_helper containing a lock, it cannot be serialized. This
is OK due to our creation of the helper on demand. The cost incurred by
discarding the instance upon serialization is that once deserialized
elsewhere the helper instance will have to be created. Since this
creation post-deserialization only happens once, this is acceptable.
"""
state = super(PostgresDescriptorElement, self).__getstate__()
state.update({
"table_name": self.table_name,
"uuid_col": self.uuid_col,
"type_col": self.type_col,
"binary_col": self.binary_col,
"create_table": self.create_table,
"db_name": self.db_name,
"db_host": self.db_host,
"db_port": self.db_port,
"db_user": self.db_user,
"db_pass": self.db_pass,
})
return state
def __setstate__(self, state):
# Base DescriptorElement parts
super(PostgresDescriptorElement, self).__setstate__(state)
# Our parts
self.table_name = state['table_name']
self.uuid_col = state['uuid_col']
self.type_col = state['type_col']
self.binary_col = state['binary_col']
self.create_table = state['create_table']
self.db_name = state['db_name']
self.db_host = state['db_host']
self.db_port = state['db_port']
self.db_user = state['db_user']
self.db_pass = state['db_pass']
self._psql_helper = None
def _get_psql_helper(self):
"""
Internal method to create on demand the PSQL connection helper class.
:return: PsqlConnectionHelper utility.
:rtype: PsqlConnectionHelper
"""
# `hasattr` check used for backwards compatibility when interacting with
# databases containing elements serialized before the inclusion of this
# helper class.
if self._psql_helper is None:
# Only using a transport iteration size of 1 since this element is
# only meant to refer to a single entry in the associated table.
self._psql_helper = PsqlConnectionHelper(
self.db_name, self.db_host, self.db_port, self.db_user,
self.db_pass, itersize=1,
table_upsert_lock=PSQL_TABLE_CREATE_RLOCK
)
# Register table upsert command
if self.create_table:
self._psql_helper.set_table_upsert_sql(
self.UPSERT_TABLE_TMPL.format(
table_name=self.table_name,
type_col=self.type_col,
uuid_col=self.uuid_col,
binary_col=self.binary_col,
)
)
return self._psql_helper
def get_config(self):
return {
"table_name": self.table_name,
"uuid_col": self.uuid_col,
"type_col": self.type_col,
"binary_col": self.binary_col,
"create_table": self.create_table,
"db_name": self.db_name,
"db_host": self.db_host,
"db_port": self.db_port,
"db_user": self.db_user,
"db_pass": self.db_pass,
}
def has_vector(self):
"""
Check if the target database has a vector for our keys.
This also returns True if we have a cached vector since there must have
been a source vector to draw from if there is a cache of it.
If a vector is cached, this resets the cache expiry timeout.
:return: Whether or not this container current has a descriptor vector
stored.
:rtype: bool
"""
# Very similar to vector query, but replacing vector binary return with
# a true/null return. Save a little bit of time compared to testing
# vector return.
# OLD: return self.vector() is not None
# Using static value 'true' for binary "column" to reduce data return
# volume.
q_select = self.SELECT_TMPL.format(**{
'binary_col': 'true',
'table_name': self.table_name,
'type_col': self.type_col,
'uuid_col': self.uuid_col,
})
q_select_values = {
"type_val": self.type(),
"uuid_val": str(self.uuid())
}
def cb(cursor):
cursor.execute(q_select, q_select_values)
# Should either yield one or zero rows.
psql_helper = self._get_psql_helper()
return bool(list(psql_helper.single_execute(
cb, yield_result_rows=True
)))
def vector(self):
"""
Return this element's vector, or None if we don't have one.
:return: Get the stored descriptor vector as a numpy array. This returns
None of there is no vector stored in this container.
:rtype: numpy.ndarray or None
"""
q_select = self.SELECT_TMPL.format(**{
"binary_col": self.binary_col,
"table_name": self.table_name,
"type_col": self.type_col,
"uuid_col": self.uuid_col,
})
q_select_values = {
"type_val": self.type(),
"uuid_val": str(self.uuid())
}
# query execution callback
# noinspection PyProtectedMember
def cb(cursor):
# type: (psycopg2._psycopg.cursor) -> None
cursor.execute(q_select, q_select_values)
# This should only fetch a single row. Cannot yield more than one due
# use of primary keys.
psql_helper = self._get_psql_helper()
r = list(psql_helper.single_execute(cb, yield_result_rows=True))
if not r:
return None
else:
b = r[0][0]
v = numpy.frombuffer(b, self.ARRAY_DTYPE)
return v
def set_vector(self, new_vec):
"""
Set the contained vector.
If this container already stores a descriptor vector, this will
overwrite it.
If we are configured to use caching, and one has not been cached yet,
then we cache the vector and start a thread to monitor access times and
to remove the cache if the access timeout has expired.
If a vector was already cached, this new vector replaces the old one,
the vector database-side is replaced, and the cache expiry timeout is
reset.
:raises ValueError: ``new_vec`` was not a numpy ndarray.
:param new_vec: New vector to contain. This must be a numpy array.
:type new_vec: numpy.ndarray
:returns: Self.
:rtype: PostgresDescriptorElement
"""
if not isinstance(new_vec, numpy.ndarray):
new_vec = numpy.copy(new_vec)
if new_vec.dtype != self.ARRAY_DTYPE:
try:
new_vec = new_vec.astype(self.ARRAY_DTYPE)
except TypeError:
raise ValueError("Could not convert input to a vector of type "
"%s." % self.ARRAY_DTYPE)
q_upsert = self.UPSERT_TMPL.strip().format(**{
"table_name": self.table_name,
"binary_col": self.binary_col,
"type_col": self.type_col,
"uuid_col": self.uuid_col,
})
q_upsert_values = {
"binary_val": psycopg2.Binary(new_vec),
"type_val": self.type(),
"uuid_val": str(self.uuid()),
}
# query execution callback
# noinspection PyProtectedMember
def cb(cursor):
# type: (psycopg2._psycopg.cursor) -> None
cursor.execute(q_upsert, q_upsert_values)
# No return but need to force iteration.
psql_helper = self._get_psql_helper()
list(psql_helper.single_execute(cb, yield_result_rows=False))
return self
| [
"smqtk.utils.postgres.PsqlConnectionHelper",
"numpy.copy",
"numpy.frombuffer",
"smqtk.utils.postgres.norm_psql_cmd_string",
"psycopg2.Binary",
"multiprocessing.RLock"
] | [((292, 315), 'multiprocessing.RLock', 'multiprocessing.RLock', ([], {}), '()\n', (313, 315), False, 'import multiprocessing\n'), ((748, 1022), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n CREATE TABLE IF NOT EXISTS {table_name:s} (\n {type_col:s} TEXT NOT NULL,\n {uuid_col:s} TEXT NOT NULL,\n {binary_col:s} BYTEA NOT NULL,\n PRIMARY KEY ({type_col:s}, {uuid_col:s})\n );\n """'], {}), '(\n """\n CREATE TABLE IF NOT EXISTS {table_name:s} (\n {type_col:s} TEXT NOT NULL,\n {uuid_col:s} TEXT NOT NULL,\n {binary_col:s} BYTEA NOT NULL,\n PRIMARY KEY ({type_col:s}, {uuid_col:s})\n );\n """\n )\n', (768, 1022), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((1032, 1233), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n SELECT {binary_col:s}\n FROM {table_name:s}\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n ;\n """'], {}), '(\n """\n SELECT {binary_col:s}\n FROM {table_name:s}\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n ;\n """\n )\n', (1052, 1233), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((1243, 1712), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n WITH upsert AS (\n UPDATE {table_name:s}\n SET {binary_col:s} = %(binary_val)s\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n RETURNING *\n )\n INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s})\n SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s\n WHERE NOT EXISTS (SELECT * FROM upsert);\n """'], {}), '(\n """\n WITH upsert AS (\n UPDATE {table_name:s}\n SET {binary_col:s} = %(binary_val)s\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n RETURNING *\n )\n INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s})\n SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s\n WHERE NOT EXISTS (SELECT * FROM upsert);\n """\n )\n', (1263, 1712), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((7734, 7883), 'smqtk.utils.postgres.PsqlConnectionHelper', 'PsqlConnectionHelper', (['self.db_name', 'self.db_host', 'self.db_port', 'self.db_user', 'self.db_pass'], {'itersize': '(1)', 'table_upsert_lock': 'PSQL_TABLE_CREATE_RLOCK'}), '(self.db_name, self.db_host, self.db_port, self.db_user,\n self.db_pass, itersize=1, table_upsert_lock=PSQL_TABLE_CREATE_RLOCK)\n', (7754, 7883), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((11416, 11453), 'numpy.frombuffer', 'numpy.frombuffer', (['b', 'self.ARRAY_DTYPE'], {}), '(b, self.ARRAY_DTYPE)\n', (11432, 11453), False, 'import numpy\n'), ((12378, 12397), 'numpy.copy', 'numpy.copy', (['new_vec'], {}), '(new_vec)\n', (12388, 12397), False, 'import numpy\n'), ((12975, 12999), 'psycopg2.Binary', 'psycopg2.Binary', (['new_vec'], {}), '(new_vec)\n', (12990, 12999), False, 'import psycopg2\n')] |
#!/usr/bin/env python2
# This file is part of the OpenMV project.
#
# Copyright (c) 2013-2021 <NAME> <<EMAIL>>
# Copyright (c) 2013-2021 <NAME> <<EMAIL>>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# This script creates smaller patches from images.
import os, sys
import argparse
import random
import numpy as np
from skimage import io
from skimage import exposure
from sklearn.feature_extraction import image
def main():
# CMD args parser
parser = argparse.ArgumentParser(description='Generate smaller patches from images')
parser.add_argument("--input", action = "store", help = "Input images dir")
parser.add_argument("--output", action = "store", help = "Output images dir")
parser.add_argument("--width", action = "store", help = "Patch width", type=int, default = 32)
parser.add_argument("--height", action = "store", help = "Patch height", type=int, default = 32)
parser.add_argument("--patches", action = "store", help = "Number of patches", type=int, default = 10)
# Parse CMD args
args = parser.parse_args()
if (args.input == None or args.output == None):
parser.print_help()
sys.exit(1)
count = 0
images = os.listdir(args.input)
while (count < args.patches):
random.shuffle(images)
for i in xrange(len(images)):
img = io.imread(args.input+'/'+images[i])
patches = image.extract_patches_2d(img,
patch_size=(args.width, args.height),
max_patches=100, random_state=np.random.RandomState(0))
random.shuffle(patches)
for p in patches:
# Save low contrast patches only
if (exposure.is_low_contrast(p) == False):
io.imsave(args.output+'/patch_%.4d.ppm'%(count), p)
count += 1
break
if (count == args.patches):
break
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"random.shuffle",
"numpy.random.RandomState",
"sys.exit",
"skimage.exposure.is_low_contrast",
"skimage.io.imsave",
"os.listdir",
"skimage.io.imread"
] | [((500, 575), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate smaller patches from images"""'}), "(description='Generate smaller patches from images')\n", (523, 575), False, 'import argparse\n'), ((1243, 1265), 'os.listdir', 'os.listdir', (['args.input'], {}), '(args.input)\n', (1253, 1265), False, 'import os, sys\n'), ((1203, 1214), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1211, 1214), False, 'import os, sys\n'), ((1308, 1330), 'random.shuffle', 'random.shuffle', (['images'], {}), '(images)\n', (1322, 1330), False, 'import random\n'), ((1387, 1426), 'skimage.io.imread', 'io.imread', (["(args.input + '/' + images[i])"], {}), "(args.input + '/' + images[i])\n", (1396, 1426), False, 'from skimage import io\n'), ((1637, 1660), 'random.shuffle', 'random.shuffle', (['patches'], {}), '(patches)\n', (1651, 1660), False, 'import random\n'), ((1599, 1623), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1620, 1623), True, 'import numpy as np\n'), ((1760, 1787), 'skimage.exposure.is_low_contrast', 'exposure.is_low_contrast', (['p'], {}), '(p)\n', (1784, 1787), False, 'from skimage import exposure\n'), ((1819, 1872), 'skimage.io.imsave', 'io.imsave', (["(args.output + '/patch_%.4d.ppm' % count)", 'p'], {}), "(args.output + '/patch_%.4d.ppm' % count, p)\n", (1828, 1872), False, 'from skimage import io\n')] |
""" Unit tests for pointing
"""
import logging
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from rascil.data_models.memory_data_models import Skycomponent
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.calibration.pointing import create_pointingtable_from_blockvisibility
from rascil.processing_components.imaging.primary_beams import create_vp
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components.simulation.pointing import simulate_gaintable_from_pointingtable
from rascil.processing_components.simulation.pointing import simulate_pointingtable_from_timeseries
from rascil.processing_components.visibility.base import create_blockvisibility
from rascil.processing_components import create_image
log = logging.getLogger('logger')
log.setLevel(logging.WARNING)
class TestPointing(unittest.TestCase):
def setUp(self):
from rascil.data_models.parameters import rascil_path, rascil_data_path
self.doplot = False
self.midcore = create_named_configuration('MID', rmax=100.0)
self.nants = len(self.midcore.names)
self.dir = rascil_path('test_results')
self.ntimes = 100
interval = 10.0
self.times = numpy.arange(0.0, float(self.ntimes)) * interval
self.times *= numpy.pi / 43200.0
self.frequency = numpy.array([1.4e9])
self.channel_bandwidth = numpy.array([1e7])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
self.vis = create_blockvisibility(self.midcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
self.vis.data['vis'] *= 0.0
# Create model
self.model = create_image(npixel=512, cellsize=0.001, polarisation_frame=PolarisationFrame("stokesI"),
frequency=self.frequency, channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre)
def test_simulate_gaintable_from_time_series(self):
numpy.random.seed(18051955)
offset_phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-44.58 * u.deg, frame='icrs', equinox='J2000')
component = [Skycomponent(frequency=self.frequency, direction=offset_phasecentre,
polarisation_frame=PolarisationFrame("stokesI"), flux=[[1.0]])]
for type in ['wind']:
pt = create_pointingtable_from_blockvisibility(self.vis)
import matplotlib.pyplot as plt
ant = 15
plt.clf()
plt.plot(pt.time, pt.nominal[:, ant, 0, 0, 0], '.')
plt.plot(pt.time, pt.nominal[:, ant, 0, 0, 1], '.')
plt.xlabel('Time (s)')
plt.ylabel('Nominal (rad)')
plt.title("Nominal pointing for %s" % (type))
plt.show(block=False)
for reference_pointing in [False, True]:
pt = simulate_pointingtable_from_timeseries(pt, type=type, reference_pointing=reference_pointing)
import matplotlib.pyplot as plt
ant = 15
plt.clf()
r2a = 180.0 * 3600.0 / numpy.pi
plt.plot(pt.time, r2a * pt.pointing[:, ant, 0, 0, 0], '.')
plt.plot(pt.time, r2a * pt.pointing[:, ant, 0, 0, 1], '.')
plt.xlabel('Time (s)')
plt.ylabel('Pointing (arcsec)')
plt.title("Pointing for %s, reference pointing %s" % (type, reference_pointing))
plt.show(block=False)
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, component, pt, vp)
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
plt.clf()
plt.plot(gt[0].time, 1.0 / numpy.real(gt[0].gain[:, ant, 0, 0, 0]), '.')
plt.xlabel('Time (s)')
plt.ylabel('Gain')
plt.title("Gain for %s, reference pointing %s" % (type, reference_pointing))
plt.show(block=False)
if __name__ == '__main__':
unittest.main()
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.clf",
"rascil.data_models.parameters.rascil_path",
"unittest.main",
"rascil.processing_components.simulation.create_named_configuration",
"numpy.real",
"rascil.data_models.polarisation.PolarisationFrame",
"matplotlib.pyplot.show",
... | [((868, 895), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (885, 895), False, 'import logging\n'), ((4527, 4542), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4540, 4542), False, 'import unittest\n'), ((1128, 1173), 'rascil.processing_components.simulation.create_named_configuration', 'create_named_configuration', (['"""MID"""'], {'rmax': '(100.0)'}), "('MID', rmax=100.0)\n", (1154, 1173), False, 'from rascil.processing_components.simulation import create_named_configuration\n'), ((1238, 1265), 'rascil.data_models.parameters.rascil_path', 'rascil_path', (['"""test_results"""'], {}), "('test_results')\n", (1249, 1265), False, 'from rascil.data_models.parameters import rascil_path, rascil_data_path\n'), ((1461, 1488), 'numpy.array', 'numpy.array', (['[1400000000.0]'], {}), '([1400000000.0])\n', (1472, 1488), False, 'import numpy\n'), ((1515, 1540), 'numpy.array', 'numpy.array', (['[10000000.0]'], {}), '([10000000.0])\n', (1526, 1540), False, 'import numpy\n'), ((1561, 1637), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(+15.0 * u.deg)', 'dec': '(-45.0 * u.deg)', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')\n", (1569, 1637), False, 'from astropy.coordinates import SkyCoord\n'), ((2395, 2422), 'numpy.random.seed', 'numpy.random.seed', (['(18051955)'], {}), '(18051955)\n', (2412, 2422), False, 'import numpy\n'), ((2452, 2529), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(+15.0 * u.deg)', 'dec': '(-44.58 * u.deg)', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=+15.0 * u.deg, dec=-44.58 * u.deg, frame='icrs', equinox='J2000')\n", (2460, 2529), False, 'from astropy.coordinates import SkyCoord\n'), ((2779, 2830), 'rascil.processing_components.calibration.pointing.create_pointingtable_from_blockvisibility', 'create_pointingtable_from_blockvisibility', (['self.vis'], {}), '(self.vis)\n', (2820, 2830), False, 'from rascil.processing_components.calibration.pointing import create_pointingtable_from_blockvisibility\n'), ((2909, 2918), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2916, 2918), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2982), 'matplotlib.pyplot.plot', 'plt.plot', (['pt.time', 'pt.nominal[:, ant, 0, 0, 0]', '"""."""'], {}), "(pt.time, pt.nominal[:, ant, 0, 0, 0], '.')\n", (2939, 2982), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3046), 'matplotlib.pyplot.plot', 'plt.plot', (['pt.time', 'pt.nominal[:, ant, 0, 0, 1]', '"""."""'], {}), "(pt.time, pt.nominal[:, ant, 0, 0, 1], '.')\n", (3003, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3081), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3069, 3081), True, 'import matplotlib.pyplot as plt\n'), ((3094, 3121), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Nominal (rad)"""'], {}), "('Nominal (rad)')\n", (3104, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3177), 'matplotlib.pyplot.title', 'plt.title', (["('Nominal pointing for %s' % type)"], {}), "('Nominal pointing for %s' % type)\n", (3143, 3177), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3213), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3200, 3213), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1979), 'rascil.data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (1968, 1979), False, 'from rascil.data_models.polarisation import PolarisationFrame\n'), ((2130, 2158), 'rascil.data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (2147, 2158), False, 'from rascil.data_models.polarisation import PolarisationFrame\n'), ((3293, 3390), 'rascil.processing_components.simulation.pointing.simulate_pointingtable_from_timeseries', 'simulate_pointingtable_from_timeseries', (['pt'], {'type': 'type', 'reference_pointing': 'reference_pointing'}), '(pt, type=type, reference_pointing=\n reference_pointing)\n', (3331, 3390), False, 'from rascil.processing_components.simulation.pointing import simulate_pointingtable_from_timeseries\n'), ((3476, 3485), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3483, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3550, 3608), 'matplotlib.pyplot.plot', 'plt.plot', (['pt.time', '(r2a * pt.pointing[:, ant, 0, 0, 0])', '"""."""'], {}), "(pt.time, r2a * pt.pointing[:, ant, 0, 0, 0], '.')\n", (3558, 3608), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3683), 'matplotlib.pyplot.plot', 'plt.plot', (['pt.time', '(r2a * pt.pointing[:, ant, 0, 0, 1])', '"""."""'], {}), "(pt.time, r2a * pt.pointing[:, ant, 0, 0, 1], '.')\n", (3633, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3700, 3722), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3710, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3770), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pointing (arcsec)"""'], {}), "('Pointing (arcsec)')\n", (3749, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3872), 'matplotlib.pyplot.title', 'plt.title', (["('Pointing for %s, reference pointing %s' % (type, reference_pointing))"], {}), "('Pointing for %s, reference pointing %s' % (type, reference_pointing)\n )\n", (3796, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3905), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3892, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3944, 3972), 'rascil.processing_components.imaging.primary_beams.create_vp', 'create_vp', (['self.model', '"""MID"""'], {}), "(self.model, 'MID')\n", (3953, 3972), False, 'from rascil.processing_components.imaging.primary_beams import create_vp\n'), ((3994, 4060), 'rascil.processing_components.simulation.pointing.simulate_gaintable_from_pointingtable', 'simulate_gaintable_from_pointingtable', (['self.vis', 'component', 'pt', 'vp'], {}), '(self.vis, component, pt, vp)\n', (4031, 4060), False, 'from rascil.processing_components.simulation.pointing import simulate_gaintable_from_pointingtable\n'), ((4190, 4199), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4197, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4305, 4327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4315, 4327), True, 'import matplotlib.pyplot as plt\n'), ((4344, 4362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gain"""'], {}), "('Gain')\n", (4354, 4362), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4455), 'matplotlib.pyplot.title', 'plt.title', (["('Gain for %s, reference pointing %s' % (type, reference_pointing))"], {}), "('Gain for %s, reference pointing %s' % (type, reference_pointing))\n", (4388, 4455), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4493), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (4480, 4493), True, 'import matplotlib.pyplot as plt\n'), ((2673, 2701), 'rascil.data_models.polarisation.PolarisationFrame', 'PolarisationFrame', (['"""stokesI"""'], {}), "('stokesI')\n", (2690, 2701), False, 'from rascil.data_models.polarisation import PolarisationFrame\n'), ((4243, 4282), 'numpy.real', 'numpy.real', (['gt[0].gain[:, ant, 0, 0, 0]'], {}), '(gt[0].gain[:, ant, 0, 0, 0])\n', (4253, 4282), False, 'import numpy\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test lvmutil.funcfits.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# The line above will help with 2to3 support.
import unittest
import numpy as np
from warnings import catch_warnings, simplefilter
from ..funcfits import func_fit, func_val, iter_fit, mk_fit_dict
class TestFuncFits(unittest.TestCase):
"""Test lvmutil.funcfits
"""
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_mk_fit_dict(self):
"""Test fit dict
"""
fdict = mk_fit_dict(np.arange(10), 5, 'legendre', xmin=0., xmax=5000.)
assert isinstance(fdict, dict)
def test_poly_fit(self):
"""Test polynomial fit.
"""
x = np.linspace(0, np.pi, 50)
y = np.sin(x)
# Fit
dfit = func_fit(x, y, 'polynomial', 3)
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.97854984428713754)
def test_legendre_fit(self):
"""Test Legendre fit.
"""
# Generate data
x = np.linspace(0, np.pi, 50)
y = np.sin(x)
# Fit
dfit = func_fit(x, y, 'legendre', 4)
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.99940823486206976)
def test_cheby_fit(self):
"""Test Chebyshev fit.
"""
# Generate data
x = np.linspace(0, np.pi, 50)
y = np.sin(x)
# Fit
dfit = func_fit(x, y, 'chebyshev', 4)
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.99940823486206942)
def test_fit_with_sigma(self):
"""Test fit with sigma.
"""
# Generate data
x = np.linspace(0, np.pi, 50)
y = np.sin(x)
sigy = np.ones_like(y)*0.1
sigy[::2] = 0.15
# Fit
dfit = func_fit(x, y, 'legendre', 4, w=1./sigy)
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.99941056289796115)
def test_func_fit_other(self):
"""Test corner cases in fitting.
"""
# Generate data
x = np.linspace(0, np.pi, 50)
y = np.sin(x)
# Fit
with self.assertRaises(ValueError):
dfit = func_fit(x, y, 'fourier', 4)
dfit = func_fit(x, y, 'polynomial', 3)
dfit['func'] = 'fourier'
x2 = np.linspace(0, np.pi, 100)
with self.assertRaises(ValueError):
y2 = func_val(x2, dfit)
x = np.array([1.0])
y = np.array([2.0])
with catch_warnings(record=True) as w:
# simplefilter("always")
dfit = func_fit(x, y, 'polynomial', 1)
self.assertEqual(len(w), 1)
self.assertIn('conditioned', str(w[-1].message))
self.assertEqual(dfit['xmin'], -1.0)
self.assertEqual(dfit['xmax'], 1.0)
def test_iterfit(self):
"""Test iter fit with Legendre.
"""
# Generate data
x = np.linspace(0, np.pi, 100)
y = np.sin(x)
#
y[50] = 3.
# Fit
dfit, mask = iter_fit(x, y, 'legendre', 4)
self.assertEqual(mask.sum(), 1)
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.99941444872371643)
def test_iterfit2(self):
"""Test iter fit with some special cases.
"""
# Generate data
x = np.linspace(0, np.pi, 100)
y = np.sin(x)
#
y[50] = 3.
# Fit
with catch_warnings(record=True) as w:
# simplefilter("always")
dfit, mask = iter_fit(x, y, 'legendre', 4, forceimask=True)
self.assertEqual(len(w), 1)
self.assertEqual(str(w[-1].message),
"Initial mask cannot be enforced -- " +
"no initital mask supplied")
x2 = np.linspace(0, np.pi, 100)
y2 = func_val(x2, dfit)
np.testing.assert_allclose(y2[50], 0.99941444872371643)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| [
"numpy.ones_like",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"warnings.catch_warnings",
"numpy.testing.assert_allclose",
"unittest.defaultTestLoader.loadTestsFromName"
] | [((4444, 4498), 'unittest.defaultTestLoader.loadTestsFromName', 'unittest.defaultTestLoader.loadTestsFromName', (['__name__'], {}), '(__name__)\n', (4488, 4498), False, 'import unittest\n'), ((890, 915), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50)'], {}), '(0, np.pi, 50)\n', (901, 915), True, 'import numpy as np\n'), ((928, 937), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (934, 937), True, 'import numpy as np\n'), ((1012, 1038), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (1023, 1038), True, 'import numpy as np\n'), ((1079, 1133), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9785498442871375)'], {}), '(y2[50], 0.9785498442871375)\n', (1105, 1133), True, 'import numpy as np\n'), ((1247, 1272), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50)'], {}), '(0, np.pi, 50)\n', (1258, 1272), True, 'import numpy as np\n'), ((1285, 1294), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1291, 1294), True, 'import numpy as np\n'), ((1367, 1393), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (1378, 1393), True, 'import numpy as np\n'), ((1434, 1488), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9994082348620698)'], {}), '(y2[50], 0.9994082348620698)\n', (1460, 1488), True, 'import numpy as np\n'), ((1600, 1625), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50)'], {}), '(0, np.pi, 50)\n', (1611, 1625), True, 'import numpy as np\n'), ((1638, 1647), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1644, 1647), True, 'import numpy as np\n'), ((1721, 1747), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (1732, 1747), True, 'import numpy as np\n'), ((1788, 1842), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9994082348620694)'], {}), '(y2[50], 0.9994082348620694)\n', (1814, 1842), True, 'import numpy as np\n'), ((1960, 1985), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50)'], {}), '(0, np.pi, 50)\n', (1971, 1985), True, 'import numpy as np\n'), ((1998, 2007), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2004, 2007), True, 'import numpy as np\n'), ((2151, 2177), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (2162, 2177), True, 'import numpy as np\n'), ((2218, 2272), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9994105628979612)'], {}), '(y2[50], 0.9994105628979612)\n', (2244, 2272), True, 'import numpy as np\n'), ((2399, 2424), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(50)'], {}), '(0, np.pi, 50)\n', (2410, 2424), True, 'import numpy as np\n'), ((2437, 2446), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2443, 2446), True, 'import numpy as np\n'), ((2646, 2672), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (2657, 2672), True, 'import numpy as np\n'), ((2765, 2780), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2773, 2780), True, 'import numpy as np\n'), ((2793, 2808), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (2801, 2808), True, 'import numpy as np\n'), ((3251, 3277), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (3262, 3277), True, 'import numpy as np\n'), ((3290, 3299), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3296, 3299), True, 'import numpy as np\n'), ((3447, 3473), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (3458, 3473), True, 'import numpy as np\n'), ((3514, 3568), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9994144487237164)'], {}), '(y2[50], 0.9994144487237164)\n', (3540, 3568), True, 'import numpy as np\n'), ((3698, 3724), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (3709, 3724), True, 'import numpy as np\n'), ((3737, 3746), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3743, 3746), True, 'import numpy as np\n'), ((4175, 4201), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(100)'], {}), '(0, np.pi, 100)\n', (4186, 4201), True, 'import numpy as np\n'), ((4242, 4296), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y2[50]', '(0.9994144487237164)'], {}), '(y2[50], 0.9994144487237164)\n', (4268, 4296), True, 'import numpy as np\n'), ((714, 727), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (723, 727), True, 'import numpy as np\n'), ((2023, 2038), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (2035, 2038), True, 'import numpy as np\n'), ((2822, 2849), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (2836, 2849), False, 'from warnings import catch_warnings, simplefilter\n'), ((3803, 3830), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3817, 3830), False, 'from warnings import catch_warnings, simplefilter\n')] |
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from ManipulatorCore import Joint, ManipulatorCore
def test_arm_1():
ph = np.pi / 2
bot = ManipulatorCore([
Joint('prismatic', -ph, 10, 0, ph),
Joint('prismatic', -ph, 20, 0, ph),
Joint('prismatic', np.pi, 30, 0, 0)
])
assert_array_almost_equal(bot.arm_matrix,
np.array([[0, 1, 0, -20],
[0, 0, 1, 30],
[1, 0, 0, 10],
[0, 0, 0, 1]]))
def test_arm_2():
ph = np.pi / 2
bot = ManipulatorCore([
Joint('revolute', ph, 450, 0, ph),
Joint('prismatic', 0, 20, 0, ph),
Joint('revolute', 0, 250, 0, 0)
])
assert_array_almost_equal(bot.arm_matrix,
np.array([[0, 1, 0, 20],
[1, 0, 0, 0],
[0, 0, -1, 200],
[0, 0, 0, 1]]))
def test_arm_3():
ph = np.pi / 2
bot = ManipulatorCore([
Joint('revolute', 0, 0, 0, 0),
Joint('prismatic', 0, 20, 0, ph),
Joint('prismatic', 0, 30, 0, ph),
Joint('revolute', ph, 40, 0, 0)
])
assert_array_almost_equal(bot.arm_matrix,
np.array([[0, -1, 0, 0],
[-1, 0, 0, -30],
[0, 0, -1, -20],
[0, 0, 0, 1]])) | [
"ManipulatorCore.Joint",
"numpy.array"
] | [((441, 511), 'numpy.array', 'np.array', (['[[0, 1, 0, -20], [0, 0, 1, 30], [1, 0, 0, 10], [0, 0, 0, 1]]'], {}), '([[0, 1, 0, -20], [0, 0, 1, 30], [1, 0, 0, 10], [0, 0, 0, 1]])\n', (449, 511), True, 'import numpy as np\n'), ((911, 981), 'numpy.array', 'np.array', (['[[0, 1, 0, 20], [1, 0, 0, 0], [0, 0, -1, 200], [0, 0, 0, 1]]'], {}), '([[0, 1, 0, 20], [1, 0, 0, 0], [0, 0, -1, 200], [0, 0, 0, 1]])\n', (919, 981), True, 'import numpy as np\n'), ((1419, 1492), 'numpy.array', 'np.array', (['[[0, -1, 0, 0], [-1, 0, 0, -30], [0, 0, -1, -20], [0, 0, 0, 1]]'], {}), '([[0, -1, 0, 0], [-1, 0, 0, -30], [0, 0, -1, -20], [0, 0, 0, 1]])\n', (1427, 1492), True, 'import numpy as np\n'), ((234, 268), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', '(-ph)', '(10)', '(0)', 'ph'], {}), "('prismatic', -ph, 10, 0, ph)\n", (239, 268), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((278, 312), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', '(-ph)', '(20)', '(0)', 'ph'], {}), "('prismatic', -ph, 20, 0, ph)\n", (283, 312), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((322, 357), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', 'np.pi', '(30)', '(0)', '(0)'], {}), "('prismatic', np.pi, 30, 0, 0)\n", (327, 357), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((710, 743), 'ManipulatorCore.Joint', 'Joint', (['"""revolute"""', 'ph', '(450)', '(0)', 'ph'], {}), "('revolute', ph, 450, 0, ph)\n", (715, 743), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((753, 785), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', '(0)', '(20)', '(0)', 'ph'], {}), "('prismatic', 0, 20, 0, ph)\n", (758, 785), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((795, 826), 'ManipulatorCore.Joint', 'Joint', (['"""revolute"""', '(0)', '(250)', '(0)', '(0)'], {}), "('revolute', 0, 250, 0, 0)\n", (800, 826), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((1179, 1208), 'ManipulatorCore.Joint', 'Joint', (['"""revolute"""', '(0)', '(0)', '(0)', '(0)'], {}), "('revolute', 0, 0, 0, 0)\n", (1184, 1208), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((1218, 1250), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', '(0)', '(20)', '(0)', 'ph'], {}), "('prismatic', 0, 20, 0, ph)\n", (1223, 1250), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((1260, 1292), 'ManipulatorCore.Joint', 'Joint', (['"""prismatic"""', '(0)', '(30)', '(0)', 'ph'], {}), "('prismatic', 0, 30, 0, ph)\n", (1265, 1292), False, 'from ManipulatorCore import Joint, ManipulatorCore\n'), ((1302, 1333), 'ManipulatorCore.Joint', 'Joint', (['"""revolute"""', 'ph', '(40)', '(0)', '(0)'], {}), "('revolute', ph, 40, 0, 0)\n", (1307, 1333), False, 'from ManipulatorCore import Joint, ManipulatorCore\n')] |
import re
import pandas as pd
import numpy as np
import argparse
def tlgHistoryRecall(table, pointID):
init = table[table.pointID == pointID]
track = ":"+str(init.frameNumber.values[0])+"-"+str(init.pointID.values[0])
parental = init.parentID.values[0]
while (parental>0) :
init = table[table.pointID == parental]
track = ","+str(init.frameNumber.values[0])+"-"+str(init.pointID.values[0]) + track
parental = init.parentID.values[0]
return(track)
def getAncestory(table,pointID):
init = table[table.pointID == pointID]
parental = init.parentID.values[0]
div=init.Div.values[0]
ttLast = 0
if (div==1):
div=0;
while (parental>0)&(div ==0) :
init = table[table.pointID == parental]
parental = init.parentID.values[0]
div=init.Div.values[0]
ttLast = ttLast+1
lcAncestor = init.pointID.values[0]
outFrame = pd.DataFrame({"pointID": [pointID], "timefromDiv": [ttLast], "lcAncest": [lcAncestor]})
return(outFrame)
def detailTLGoutput(table):
parents = table.parentID.unique()[table.parentID.unique()!=0]
addon1 = pd.DataFrame()
for i in parents:
checklen = (len(table[table.parentID == i]))
if(checklen==1):
div = 0
else:
div = 1
addon1 = addon1.append(pd.DataFrame({"pointID": [i],"Div": [div]}))
points = table.pointID.unique()
res = [i for i in points if i not in parents]
addon2 = pd.DataFrame({"pointID": res, "end": 1})
tst2 = pd.merge(pd.merge(table,addon1,how="outer"),addon2,how="outer")
tst2 = tst2.fillna(0)
tst2 = tst2.astype({'Div': 'int', 'end': 'int'})
nde = pd.DataFrame()
for j in tst2.pointID.values:
tmp_ancestor = getAncestory(tst2,j)
nde = nde.append(tmp_ancestor)
final = pd.merge(tst2,nde,how="outer")
return(final)
def ancestorStr(refTable,ancestor):
tmp=refTable[refTable.lcAncest==ancestor];
if len(tmp.pointID.values)>1:
tmp = tmp[tmp.lcAncest != tmp.pointID]
if len(tmp.pointID.values)>1:
str0 = str(tmp.pointID.values[0])+":"+str(tmp.timefromDiv.values[0])
str1 = str(tmp.pointID.values[1])+":"+str(tmp.timefromDiv.values[1])
return("("+str0+","+str1+")"+str(ancestor))
else:
str0 = str(tmp.pointID.values[0])+":"+str(tmp.timefromDiv.values[0])
return("("+str0+")"+str(ancestor))
else:
str0 = str(tmp.pointID.values[0])+":"+str(tmp.timefromDiv.values[0])
return("("+str0+")"+str(ancestor))
def generateNewick(TLGfile):
# Load output from TimelapseGUI
tstng = pd.read_csv(TLGfile) ;
# Use function to generate extra data
tlg = detailTLGoutput(tstng)
# Determine the ancestor and end nodes
ancestorNodes = tlg.lcAncest.unique();
endNodes = tlg[tlg.end==1].pointID.unique()
# Refine the search to only include ancestors and ends
refined = tlg[tlg['pointID'].isin(np.concatenate([ancestorNodes,endNodes]))]
refined = refined.fillna(0)
refined = refined.astype({'lcAncest': 'int', 'pointID': 'int','parentID': 'int','frameNumber': 'int'})
refined.frameNumber = refined.frameNumber+1
#refined.loc[refined.parentID == 0,'Div'] = 1
#refined.loc[refined.parentID == 0,'lcAncest'] = 0
#refined = refined[:-1]
refined['coupling'] = refined['lcAncest'].apply(lambda x: ancestorStr(refined,x))
toplvlIDs=refined[(refined.lcAncest == refined.pointID)&(refined.parentID == 0)][:-1].pointID.values
replaceForTop = "("+":1,".join(str(x) for x in toplvlIDs)+":1)0;"
refined.loc[refined.lcAncest==refined.pointID,'coupling'] = replaceForTop
refined = refined.loc[~refined['pointID'].isin(toplvlIDs)]
LCAdf = pd.DataFrame() ;
for i in refined.lcAncest.unique():
tmpLCA = refined[refined.lcAncest == i]
tmpLCA = tmpLCA.loc[:,['lcAncest','frameNumber','coupling']].drop_duplicates()
LCAdf = LCAdf.append(tmpLCA[tmpLCA.frameNumber == tmpLCA.frameNumber.min()])
LCAsort = LCAdf.sort_values(by=['frameNumber'])
initial_line = LCAsort.iloc[0].coupling
for i in range(1,len(LCAsort)):
pattern = "\("+str(LCAsort.iloc[i].lcAncest)+":"
replace = "("+LCAsort.iloc[i].coupling+":"
newline = re.sub(pattern, replace,initial_line)
if(newline == initial_line):
pattern = "\,"+str(LCAsort.iloc[i].lcAncest)+":"
replace = ","+LCAsort.iloc[i].coupling+":"
newline = re.sub(pattern, replace,initial_line)
initial_line = newline
return(initial_line)
def main():
parser = argparse.ArgumentParser('Generate Newick-formatter tree.')
parser.add_argument('--in', type = str, dest='file', help = 'Path and file in timelapse GUI output form (CSV).')
args = parser.parse_args()
newick = generateNewick(args.file)
print(newick)
return
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.read_csv",
"pandas.merge",
"re.sub",
"numpy.concatenate"
] | [((922, 1014), 'pandas.DataFrame', 'pd.DataFrame', (["{'pointID': [pointID], 'timefromDiv': [ttLast], 'lcAncest': [lcAncestor]}"], {}), "({'pointID': [pointID], 'timefromDiv': [ttLast], 'lcAncest': [\n lcAncestor]})\n", (934, 1014), True, 'import pandas as pd\n'), ((1140, 1154), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1152, 1154), True, 'import pandas as pd\n'), ((1485, 1525), 'pandas.DataFrame', 'pd.DataFrame', (["{'pointID': res, 'end': 1}"], {}), "({'pointID': res, 'end': 1})\n", (1497, 1525), True, 'import pandas as pd\n'), ((1692, 1706), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1704, 1706), True, 'import pandas as pd\n'), ((1837, 1869), 'pandas.merge', 'pd.merge', (['tst2', 'nde'], {'how': '"""outer"""'}), "(tst2, nde, how='outer')\n", (1845, 1869), True, 'import pandas as pd\n'), ((2658, 2678), 'pandas.read_csv', 'pd.read_csv', (['TLGfile'], {}), '(TLGfile)\n', (2669, 2678), True, 'import pandas as pd\n'), ((3772, 3786), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3784, 3786), True, 'import pandas as pd\n'), ((4646, 4704), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Generate Newick-formatter tree."""'], {}), "('Generate Newick-formatter tree.')\n", (4669, 4704), False, 'import argparse\n'), ((1547, 1583), 'pandas.merge', 'pd.merge', (['table', 'addon1'], {'how': '"""outer"""'}), "(table, addon1, how='outer')\n", (1555, 1583), True, 'import pandas as pd\n'), ((4310, 4348), 're.sub', 're.sub', (['pattern', 'replace', 'initial_line'], {}), '(pattern, replace, initial_line)\n', (4316, 4348), False, 'import re\n'), ((1340, 1384), 'pandas.DataFrame', 'pd.DataFrame', (["{'pointID': [i], 'Div': [div]}"], {}), "({'pointID': [i], 'Div': [div]})\n", (1352, 1384), True, 'import pandas as pd\n'), ((2990, 3031), 'numpy.concatenate', 'np.concatenate', (['[ancestorNodes, endNodes]'], {}), '([ancestorNodes, endNodes])\n', (3004, 3031), True, 'import numpy as np\n'), ((4523, 4561), 're.sub', 're.sub', (['pattern', 'replace', 'initial_line'], {}), '(pattern, replace, initial_line)\n', (4529, 4561), False, 'import re\n')] |
#!/usr/bin/env python
import argparse
import codecs
import os
import re
import shutil
import warnings
from collections import defaultdict
from pathlib import Path
from typing import Dict, Generator, List, Optional, Tuple
import keyboardlayout as kl
import keyboardlayout.pygame as klp
import librosa
import numpy
import pygame
import soundfile
ANCHOR_INDICATOR = " anchor"
ANCHOR_NOTE_REGEX = re.compile(r"\s[abcdefg]$")
DESCRIPTION = 'Use your computer keyboard as a "piano"'
DESCRIPTOR_32BIT = "FLOAT"
BITS_32BIT = 32
AUDIO_ALLOWED_CHANGES_HARDWARE_DETERMINED = 0
SOUND_FADE_MILLISECONDS = 50
CYAN = (0, 255, 255, 255)
BLACK = (0, 0, 0, 255)
WHITE = (255, 255, 255, 255)
AUDIO_ASSET_PREFIX = "audio_files/"
KEYBOARD_ASSET_PREFIX = "keyboards/"
CURRENT_WORKING_DIR = Path(__file__).parent.absolute()
ALLOWED_EVENTS = {pygame.KEYDOWN, pygame.KEYUP, pygame.QUIT}
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=DESCRIPTION)
default_wav_file = "audio_files/piano_c4.wav"
parser.add_argument(
"--wav",
"-w",
metavar="FILE",
type=str,
default=default_wav_file,
help="WAV file (default: {})".format(default_wav_file),
)
default_keyboard_file = "keyboards/qwerty_piano.txt"
parser.add_argument(
"--keyboard",
"-k",
metavar="FILE",
type=str,
default=default_keyboard_file,
help="keyboard file (default: {})".format(default_keyboard_file),
)
parser.add_argument(
"--clear-cache",
"-c",
default=False,
action="store_true",
help="deletes stored transposed audio files and recalculates them",
)
parser.add_argument("--verbose", "-v", action="store_true", help="verbose mode")
return parser
def get_or_create_key_sounds(
wav_path: str,
sample_rate_hz: int,
channels: int,
tones: List[int],
clear_cache: bool,
keys: List[str],
) -> Generator[pygame.mixer.Sound, None, None]:
sounds = []
y, sr = librosa.load(wav_path, sr=sample_rate_hz, mono=channels == 1)
file_name = os.path.splitext(os.path.basename(wav_path))[0]
folder_containing_wav = Path(wav_path).parent.absolute()
cache_folder_path = Path(folder_containing_wav, file_name)
if clear_cache and cache_folder_path.exists():
shutil.rmtree(cache_folder_path)
if not cache_folder_path.exists():
print("Generating samples for each key")
os.mkdir(cache_folder_path)
for i, tone in enumerate(tones):
cached_path = Path(cache_folder_path, "{}.wav".format(tone))
if Path(cached_path).exists():
print("Loading note {} out of {} for {}".format(i + 1, len(tones), keys[i]))
sound, sr = librosa.load(cached_path, sr=sample_rate_hz, mono=channels == 1)
if channels > 1:
# the shape must be [length, 2]
sound = numpy.transpose(sound)
else:
print(
"Transposing note {} out of {} for {}".format(
i + 1, len(tones), keys[i]
)
)
if channels == 1:
sound = librosa.effects.pitch_shift(y, sr, n_steps=tone)
else:
new_channels = [
librosa.effects.pitch_shift(y[i], sr, n_steps=tone)
for i in range(channels)
]
sound = numpy.ascontiguousarray(numpy.vstack(new_channels).T)
soundfile.write(cached_path, sound, sample_rate_hz, DESCRIPTOR_32BIT)
sounds.append(sound)
sounds = map(pygame.sndarray.make_sound, sounds)
return sounds
BLACK_INDICES_C_SCALE = [1, 3, 6, 8, 10]
LETTER_KEYS_TO_INDEX = {"c": 0, "d": 2, "e": 4, "f": 5, "g": 7, "a": 9, "b": 11}
def __get_black_key_indices(key_name: str) -> set:
letter_key_index = LETTER_KEYS_TO_INDEX[key_name]
black_key_indices = set()
for ind in BLACK_INDICES_C_SCALE:
new_index = ind - letter_key_index
if new_index < 0:
new_index += 12
black_key_indices.add(new_index)
return black_key_indices
def get_keyboard_info(keyboard_file: str):
with codecs.open(keyboard_file, encoding="utf-8") as k_file:
lines = k_file.readlines()
keys = []
anchor_index = -1
black_key_indices = set()
for i, line in enumerate(lines):
line = line.strip()
if not line:
continue
match = ANCHOR_NOTE_REGEX.search(line)
if match:
anchor_index = i
black_key_indices = __get_black_key_indices(line[-1])
key = kl.Key(line[: match.start(0)])
elif line.endswith(ANCHOR_INDICATOR):
anchor_index = i
key = kl.Key(line[: -len(ANCHOR_INDICATOR)])
else:
key = kl.Key(line)
keys.append(key)
if anchor_index == -1:
raise ValueError(
"Invalid keyboard file, one key must have an anchor note or the "
"word anchor written next to it.\n"
"For example 'm c OR m anchor'.\n"
"That tells the program that the wav file will be used for key m, "
"and all other keys will be pitch shifted higher or lower from "
"that anchor. If an anchor note is used then keys are colored black "
"and white like a piano. If the word anchor is used, then the "
"highest key is white, and keys get darker as they descend in pitch."
)
tones = [i - anchor_index for i in range(len(keys))]
color_to_key = defaultdict(list)
if black_key_indices:
key_color = (120, 120, 120, 255)
key_txt_color = (50, 50, 50, 255)
else:
key_color = (65, 65, 65, 255)
key_txt_color = (0, 0, 0, 255)
for index, key in enumerate(keys):
if index == anchor_index:
color_to_key[CYAN].append(key)
continue
if black_key_indices:
used_index = (index - anchor_index) % 12
if used_index in black_key_indices:
color_to_key[BLACK].append(key)
continue
color_to_key[WHITE].append(key)
continue
# anchor mode, keys go up in half steps and we do not color black keys
# instead we color from grey low to white high
rgb_val = 255 - (len(keys) - 1 - index) * 3
color = (rgb_val, rgb_val, rgb_val, 255)
color_to_key[color].append(key)
return keys, tones, color_to_key, key_color, key_txt_color
def configure_pygame_audio_and_set_ui(
framerate_hz: int,
channels: int,
keyboard_arg: str,
color_to_key: Dict[str, List[kl.Key]],
key_color: Tuple[int, int, int, int],
key_txt_color: Tuple[int, int, int, int],
) -> Tuple[pygame.Surface, klp.KeyboardLayout]:
# ui
pygame.display.init()
pygame.display.set_caption("pianoputer")
# block events that we don't want, this must be after display.init
pygame.event.set_blocked(None)
pygame.event.set_allowed(list(ALLOWED_EVENTS))
# fonts
pygame.font.init()
# audio
pygame.mixer.init(
framerate_hz,
BITS_32BIT,
channels,
allowedchanges=AUDIO_ALLOWED_CHANGES_HARDWARE_DETERMINED,
)
screen_width = 50
screen_height = 50
if "qwerty" in keyboard_arg:
layout_name = kl.LayoutName.QWERTY
elif "azerty" in keyboard_arg:
layout_name = kl.LayoutName.AZERTY_LAPTOP
else:
ValueError("keyboard must have qwerty or azerty in its name")
margin = 4
key_size = 60
overrides = {}
for color_value, keys in color_to_key.items():
override_color = color = pygame.Color(color_value)
inverted_color = list(~override_color)
other_val = 255
if (
abs(color_value[0] - inverted_color[0]) > abs(color_value[0] - other_val)
) or color_value == CYAN:
override_txt_color = pygame.Color(inverted_color)
else:
# biases grey override keys to use white as txt_color
override_txt_color = pygame.Color([other_val] * 3 + [255])
override_key_info = kl.KeyInfo(
margin=margin,
color=override_color,
txt_color=override_txt_color,
txt_font=pygame.font.SysFont("Arial", key_size // 4),
txt_padding=(key_size // 10, key_size // 10),
)
for key in keys:
overrides[key.value] = override_key_info
key_txt_color = pygame.Color(key_txt_color)
keyboard_info = kl.KeyboardInfo(position=(0, 0), padding=2, color=key_txt_color)
key_info = kl.KeyInfo(
margin=margin,
color=pygame.Color(key_color),
txt_color=pygame.Color(key_txt_color),
txt_font=pygame.font.SysFont("Arial", key_size // 4),
txt_padding=(key_size // 6, key_size // 10),
)
letter_key_size = (key_size, key_size) # width, height
keyboard = klp.KeyboardLayout(
layout_name, keyboard_info, letter_key_size, key_info, overrides
)
screen_width = keyboard.rect.width
screen_height = keyboard.rect.height
screen = pygame.display.set_mode((screen_width, screen_height))
screen.fill(pygame.Color("black"))
if keyboard:
keyboard.draw(screen)
pygame.display.update()
return screen, keyboard
def play_until_user_exits(
keys: List[kl.Key],
key_sounds: List[pygame.mixer.Sound],
keyboard: klp.KeyboardLayout,
):
sound_by_key = dict(zip(keys, key_sounds))
playing = True
while playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
break
elif event.type == pygame.KEYDOWN:
if event.type == pygame.K_ESCAPE:
playing = False
break
key = keyboard.get_key(event)
if key is None:
continue
try:
sound = sound_by_key[key]
except KeyError:
continue
if event.type == pygame.KEYDOWN:
sound.stop()
sound.play(fade_ms=SOUND_FADE_MILLISECONDS)
elif event.type == pygame.KEYUP:
sound.fadeout(SOUND_FADE_MILLISECONDS)
pygame.quit()
print("Goodbye")
def get_audio_data(wav_path: str) -> Tuple:
audio_data, framerate_hz = soundfile.read(wav_path)
array_shape = audio_data.shape
if len(array_shape) == 1:
channels = 1
else:
channels = array_shape[1]
return audio_data, framerate_hz, channels
def process_args(parser: argparse.ArgumentParser, args: Optional[List]) -> Tuple:
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
# Enable warnings from scipy if requested
if not args.verbose:
warnings.simplefilter("ignore")
wav_path = args.wav
if wav_path.startswith(AUDIO_ASSET_PREFIX):
wav_path = os.path.join(CURRENT_WORKING_DIR, wav_path)
keyboard_path = args.keyboard
if keyboard_path.startswith(KEYBOARD_ASSET_PREFIX):
keyboard_path = os.path.join(CURRENT_WORKING_DIR, keyboard_path)
return wav_path, keyboard_path, args.clear_cache
def play_pianoputer(args: Optional[List[str]] = None):
parser = get_parser()
wav_path, keyboard_path, clear_cache = process_args(parser, args)
audio_data, framerate_hz, channels = get_audio_data(wav_path)
results = get_keyboard_info(keyboard_path)
keys, tones, color_to_key, key_color, key_txt_color = results
key_sounds = get_or_create_key_sounds(
wav_path, framerate_hz, channels, tones, clear_cache, keys
)
_screen, keyboard = configure_pygame_audio_and_set_ui(
framerate_hz, channels, keyboard_path, color_to_key, key_color, key_txt_color
)
print(
"Ready for you to play!\n"
"Press the keys on your keyboard. "
"To exit presss ESC or close the pygame window"
)
play_until_user_exits(keys, key_sounds, keyboard)
if __name__ == "__main__":
play_pianoputer()
| [
"os.mkdir",
"argparse.ArgumentParser",
"keyboardlayout.KeyboardInfo",
"keyboardlayout.pygame.KeyboardLayout",
"pygame.event.get",
"pygame.mixer.init",
"collections.defaultdict",
"pathlib.Path",
"pygame.font.init",
"pygame.display.update",
"shutil.rmtree",
"os.path.join",
"codecs.open",
"wa... | [((396, 423), 're.compile', 're.compile', (['"""\\\\s[abcdefg]$"""'], {}), "('\\\\s[abcdefg]$')\n", (406, 423), False, 'import re\n'), ((926, 974), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION'}), '(description=DESCRIPTION)\n', (949, 974), False, 'import argparse\n'), ((2045, 2106), 'librosa.load', 'librosa.load', (['wav_path'], {'sr': 'sample_rate_hz', 'mono': '(channels == 1)'}), '(wav_path, sr=sample_rate_hz, mono=channels == 1)\n', (2057, 2106), False, 'import librosa\n'), ((2256, 2294), 'pathlib.Path', 'Path', (['folder_containing_wav', 'file_name'], {}), '(folder_containing_wav, file_name)\n', (2260, 2294), False, 'from pathlib import Path\n'), ((5586, 5603), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5597, 5603), False, 'from collections import defaultdict\n'), ((6843, 6864), 'pygame.display.init', 'pygame.display.init', ([], {}), '()\n', (6862, 6864), False, 'import pygame\n'), ((6869, 6909), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""pianoputer"""'], {}), "('pianoputer')\n", (6895, 6909), False, 'import pygame\n'), ((6986, 7016), 'pygame.event.set_blocked', 'pygame.event.set_blocked', (['None'], {}), '(None)\n', (7010, 7016), False, 'import pygame\n'), ((7085, 7103), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (7101, 7103), False, 'import pygame\n'), ((7121, 7237), 'pygame.mixer.init', 'pygame.mixer.init', (['framerate_hz', 'BITS_32BIT', 'channels'], {'allowedchanges': 'AUDIO_ALLOWED_CHANGES_HARDWARE_DETERMINED'}), '(framerate_hz, BITS_32BIT, channels, allowedchanges=\n AUDIO_ALLOWED_CHANGES_HARDWARE_DETERMINED)\n', (7138, 7237), False, 'import pygame\n'), ((8514, 8541), 'pygame.Color', 'pygame.Color', (['key_txt_color'], {}), '(key_txt_color)\n', (8526, 8541), False, 'import pygame\n'), ((8562, 8626), 'keyboardlayout.KeyboardInfo', 'kl.KeyboardInfo', ([], {'position': '(0, 0)', 'padding': '(2)', 'color': 'key_txt_color'}), '(position=(0, 0), padding=2, color=key_txt_color)\n', (8577, 8626), True, 'import keyboardlayout as kl\n'), ((8959, 9047), 'keyboardlayout.pygame.KeyboardLayout', 'klp.KeyboardLayout', (['layout_name', 'keyboard_info', 'letter_key_size', 'key_info', 'overrides'], {}), '(layout_name, keyboard_info, letter_key_size, key_info,\n overrides)\n', (8977, 9047), True, 'import keyboardlayout.pygame as klp\n'), ((9152, 9206), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screen_width, screen_height)'], {}), '((screen_width, screen_height))\n', (9175, 9206), False, 'import pygame\n'), ((9297, 9320), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (9318, 9320), False, 'import pygame\n'), ((10313, 10326), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (10324, 10326), False, 'import pygame\n'), ((10425, 10449), 'soundfile.read', 'soundfile.read', (['wav_path'], {}), '(wav_path)\n', (10439, 10449), False, 'import soundfile\n'), ((2354, 2386), 'shutil.rmtree', 'shutil.rmtree', (['cache_folder_path'], {}), '(cache_folder_path)\n', (2367, 2386), False, 'import shutil\n'), ((2483, 2510), 'os.mkdir', 'os.mkdir', (['cache_folder_path'], {}), '(cache_folder_path)\n', (2491, 2510), False, 'import os\n'), ((4202, 4246), 'codecs.open', 'codecs.open', (['keyboard_file'], {'encoding': '"""utf-8"""'}), "(keyboard_file, encoding='utf-8')\n", (4213, 4246), False, 'import codecs\n'), ((7695, 7720), 'pygame.Color', 'pygame.Color', (['color_value'], {}), '(color_value)\n', (7707, 7720), False, 'import pygame\n'), ((9223, 9244), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (9235, 9244), False, 'import pygame\n'), ((9588, 9606), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (9604, 9606), False, 'import pygame\n'), ((10887, 10918), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10908, 10918), False, 'import warnings\n'), ((11011, 11054), 'os.path.join', 'os.path.join', (['CURRENT_WORKING_DIR', 'wav_path'], {}), '(CURRENT_WORKING_DIR, wav_path)\n', (11023, 11054), False, 'import os\n'), ((11170, 11218), 'os.path.join', 'os.path.join', (['CURRENT_WORKING_DIR', 'keyboard_path'], {}), '(CURRENT_WORKING_DIR, keyboard_path)\n', (11182, 11218), False, 'import os\n'), ((772, 786), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (776, 786), False, 'from pathlib import Path\n'), ((2140, 2166), 'os.path.basename', 'os.path.basename', (['wav_path'], {}), '(wav_path)\n', (2156, 2166), False, 'import os\n'), ((2769, 2833), 'librosa.load', 'librosa.load', (['cached_path'], {'sr': 'sample_rate_hz', 'mono': '(channels == 1)'}), '(cached_path, sr=sample_rate_hz, mono=channels == 1)\n', (2781, 2833), False, 'import librosa\n'), ((3512, 3581), 'soundfile.write', 'soundfile.write', (['cached_path', 'sound', 'sample_rate_hz', 'DESCRIPTOR_32BIT'], {}), '(cached_path, sound, sample_rate_hz, DESCRIPTOR_32BIT)\n', (3527, 3581), False, 'import soundfile\n'), ((7958, 7986), 'pygame.Color', 'pygame.Color', (['inverted_color'], {}), '(inverted_color)\n', (7970, 7986), False, 'import pygame\n'), ((8100, 8137), 'pygame.Color', 'pygame.Color', (['([other_val] * 3 + [255])'], {}), '([other_val] * 3 + [255])\n', (8112, 8137), False, 'import pygame\n'), ((8691, 8714), 'pygame.Color', 'pygame.Color', (['key_color'], {}), '(key_color)\n', (8703, 8714), False, 'import pygame\n'), ((8734, 8761), 'pygame.Color', 'pygame.Color', (['key_txt_color'], {}), '(key_txt_color)\n', (8746, 8761), False, 'import pygame\n'), ((8780, 8823), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(key_size // 4)'], {}), "('Arial', key_size // 4)\n", (8799, 8823), False, 'import pygame\n'), ((2199, 2213), 'pathlib.Path', 'Path', (['wav_path'], {}), '(wav_path)\n', (2203, 2213), False, 'from pathlib import Path\n'), ((2628, 2645), 'pathlib.Path', 'Path', (['cached_path'], {}), '(cached_path)\n', (2632, 2645), False, 'from pathlib import Path\n'), ((2935, 2957), 'numpy.transpose', 'numpy.transpose', (['sound'], {}), '(sound)\n', (2950, 2957), False, 'import numpy\n'), ((3187, 3235), 'librosa.effects.pitch_shift', 'librosa.effects.pitch_shift', (['y', 'sr'], {'n_steps': 'tone'}), '(y, sr, n_steps=tone)\n', (3214, 3235), False, 'import librosa\n'), ((4839, 4851), 'keyboardlayout.Key', 'kl.Key', (['line'], {}), '(line)\n', (4845, 4851), True, 'import keyboardlayout as kl\n'), ((8302, 8345), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(key_size // 4)'], {}), "('Arial', key_size // 4)\n", (8321, 8345), False, 'import pygame\n'), ((3307, 3358), 'librosa.effects.pitch_shift', 'librosa.effects.pitch_shift', (['y[i]', 'sr'], {'n_steps': 'tone'}), '(y[i], sr, n_steps=tone)\n', (3334, 3358), False, 'import librosa\n'), ((3470, 3496), 'numpy.vstack', 'numpy.vstack', (['new_channels'], {}), '(new_channels)\n', (3482, 3496), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import pandas as pd
import random
import json
IMAGE_SIZE = 24
NUM_CLASSES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 2046
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 512
NUM_PREPROCESS_THREADS = 8
IMAGE_HEIGHT = 480
IMAGE_WIDTH = 640
DOWNSIZE_FACTOR = 1.0
random.seed(1337)
def readCSV(fn):
csv = pd.read_csv(fn, keep_default_na=False, na_values=['NaN'])
df_test = csv
imagePaths_test = list(df_test['image_dir'])
pathsNumpy_test = np.array(imagePaths_test)
labelFnsNumpy_test = pathsNumpy_test
return pathsNumpy_test, labelFnsNumpy_test
def segmentation_readImages(input_queue, mode):
file_contents = tf.read_file(input_queue[0])
label_file_contents = tf.read_file(input_queue[1])
example = tf.cast(tf.image.decode_image(file_contents, channels=3), tf.float32)
labelMask = tf.cast(tf.image.decode_image(label_file_contents, channels=3), tf.float32)
if mode == "train":
chance_lr = tf.random_normal([1])
chance_ud = tf.random_normal([1])
if tf.reduce_all(tf.greater(chance_lr, tf.Variable(1.0))) is True:
example = tf.image.flip_left_right(example)
labelMask = tf.image.flip_left_right(labelMask)
if tf.reduce_all(tf.greater(chance_ud, tf.Variable(1.0))) is True:
example = tf.image.flip_up_down(example)
labelMask = tf.image.flip_up_down(labelMask)
example = tf.image.random_brightness(example, 10.0)
example.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, 3])
labelMask.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, 3])
return input_queue[0], example, labelMask
def _segmentation_generate_image_and_label_batch(_name, image, label, min_queue_examples, batch_size, shuffle):
num_preprocess_threads = NUM_PREPROCESS_THREADS
if shuffle:
n, images, label_batch = tf.train.shuffle_batch([_name, image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples)
return n, images, label_batch
else:
n, images, label_batch = tf.train.batch([_name, image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size)
return n, images, label_batch
def segmentation_distorted_inputs(imageFns, labelMasks, batch_size, num_examples_per_epoch, shuffle, _condition, mode):
imageFnsTensor = tf.convert_to_tensor(imageFns, dtype=tf.string)
labelMasksTensor = tf.convert_to_tensor(labelMasks, dtype=tf.string)
inputQueueTrain = tf.train.slice_input_producer([imageFnsTensor, labelMasksTensor], shuffle=False)
name, raw_image, labelMask = segmentation_readImages(inputQueueTrain, mode)
float_image = tf.image.per_image_standardization(raw_image)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)
print ('Filling queue with %d images before starting to train. This will take a few minutes.' % min_queue_examples)
_name, _image, _mask = _segmentation_generate_image_and_label_batch(name, float_image, labelMask, min_queue_examples, batch_size, shuffle=shuffle)
images = tf.image.resize_bicubic(_image, tf.convert_to_tensor([int(480 / DOWNSIZE_FACTOR), int(640 / DOWNSIZE_FACTOR)], dtype=tf.int32))
labelsRGB = tf.image.resize_bicubic(_mask, tf.convert_to_tensor([int(480 / DOWNSIZE_FACTOR), int(640 / DOWNSIZE_FACTOR)], dtype=tf.int32))
labelsFullRange = tf.image.rgb_to_grayscale(labelsRGB)
_max = tf.clip_by_value(tf.reduce_max(labelsFullRange), 1, 255)
labels = tf.divide(labelsFullRange, _max)
return _name, images, labels
| [
"tensorflow.image.flip_left_right",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.image.flip_up_down",
"tensorflow.train.shuffle_batch",
"pandas.read_csv",
"tensorflow.convert_to_tensor",
"tensorflow.reduce_max",
"tensorflow.divide",
"random.seed",
"numpy.array",
"tensorflow.image.per_image_s... | [((401, 418), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (412, 418), False, 'import random\n'), ((445, 502), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'keep_default_na': '(False)', 'na_values': "['NaN']"}), "(fn, keep_default_na=False, na_values=['NaN'])\n", (456, 502), True, 'import pandas as pd\n'), ((586, 611), 'numpy.array', 'np.array', (['imagePaths_test'], {}), '(imagePaths_test)\n', (594, 611), True, 'import numpy as np\n'), ((762, 790), 'tensorflow.read_file', 'tf.read_file', (['input_queue[0]'], {}), '(input_queue[0])\n', (774, 790), True, 'import tensorflow as tf\n'), ((814, 842), 'tensorflow.read_file', 'tf.read_file', (['input_queue[1]'], {}), '(input_queue[1])\n', (826, 842), True, 'import tensorflow as tf\n'), ((2416, 2463), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['imageFns'], {'dtype': 'tf.string'}), '(imageFns, dtype=tf.string)\n', (2436, 2463), True, 'import tensorflow as tf\n'), ((2484, 2533), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['labelMasks'], {'dtype': 'tf.string'}), '(labelMasks, dtype=tf.string)\n', (2504, 2533), True, 'import tensorflow as tf\n'), ((2553, 2638), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[imageFnsTensor, labelMasksTensor]'], {'shuffle': '(False)'}), '([imageFnsTensor, labelMasksTensor], shuffle=False\n )\n', (2582, 2638), True, 'import tensorflow as tf\n'), ((2726, 2771), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['raw_image'], {}), '(raw_image)\n', (2760, 2771), True, 'import tensorflow as tf\n'), ((3466, 3502), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['labelsRGB'], {}), '(labelsRGB)\n', (3491, 3502), True, 'import tensorflow as tf\n'), ((3578, 3610), 'tensorflow.divide', 'tf.divide', (['labelsFullRange', '_max'], {}), '(labelsFullRange, _max)\n', (3587, 3610), True, 'import tensorflow as tf\n'), ((862, 910), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['file_contents'], {'channels': '(3)'}), '(file_contents, channels=3)\n', (883, 910), True, 'import tensorflow as tf\n'), ((945, 999), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['label_file_contents'], {'channels': '(3)'}), '(label_file_contents, channels=3)\n', (966, 999), True, 'import tensorflow as tf\n'), ((1049, 1070), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (1065, 1070), True, 'import tensorflow as tf\n'), ((1087, 1108), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (1103, 1108), True, 'import tensorflow as tf\n'), ((1453, 1494), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['example', '(10.0)'], {}), '(example, 10.0)\n', (1479, 1494), True, 'import tensorflow as tf\n'), ((1846, 2042), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['[_name, image, label]'], {'batch_size': 'batch_size', 'num_threads': 'num_preprocess_threads', 'capacity': '(min_queue_examples + 3 * batch_size)', 'min_after_dequeue': 'min_queue_examples'}), '([_name, image, label], batch_size=batch_size,\n num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 *\n batch_size, min_after_dequeue=min_queue_examples)\n', (1868, 2042), True, 'import tensorflow as tf\n'), ((2101, 2248), 'tensorflow.train.batch', 'tf.train.batch', (['[_name, image, label]'], {'batch_size': 'batch_size', 'num_threads': 'num_preprocess_threads', 'capacity': '(min_queue_examples + 3 * batch_size)'}), '([_name, image, label], batch_size=batch_size, num_threads=\n num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size)\n', (2115, 2248), True, 'import tensorflow as tf\n'), ((3528, 3558), 'tensorflow.reduce_max', 'tf.reduce_max', (['labelsFullRange'], {}), '(labelsFullRange)\n', (3541, 3558), True, 'import tensorflow as tf\n'), ((1193, 1226), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['example'], {}), '(example)\n', (1217, 1226), True, 'import tensorflow as tf\n'), ((1242, 1277), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['labelMask'], {}), '(labelMask)\n', (1266, 1277), True, 'import tensorflow as tf\n'), ((1361, 1391), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['example'], {}), '(example)\n', (1382, 1391), True, 'import tensorflow as tf\n'), ((1407, 1439), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['labelMask'], {}), '(labelMask)\n', (1428, 1439), True, 'import tensorflow as tf\n'), ((1152, 1168), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (1163, 1168), True, 'import tensorflow as tf\n'), ((1320, 1336), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), '(1.0)\n', (1331, 1336), True, 'import tensorflow as tf\n')] |
"""
Allocate apertures to targets.
Contains code originally written by 2021 Akamai intern, <NAME>.
.. include:: ../include/links.rst
"""
from pathlib import Path
from IPython import embed
import numpy
def random_targets(r, n=None, density=5., rng=None):
r"""
Draw a set of random x and y coordinates within a circle.
Args:
r (:obj:`float`):
Radius of the circle.
n (:obj:`int`, optional):
The total number of points to draw. If None, number drawn
based on the density requested.
density (:obj:`float`, optional):
The average density of targets within the circle. This is
used to calculate the number of points to generate within
the circle: ``n = int(numpy.ceil(density*numpy.pi*r**2))``. Units
must be appropriate match radius units.
rng (`numpy.random.Generator`_, optional):
Random number generator to use. If None, a new one is
instantiated using `numpy.random.default_rng`_.
Returns:
:obj:`tuple`: Two vectors of length :math:`N_{\rm targ}` (the number of
targets). Cartesian x coordinates are in the first vector, y
coordinates in the second.
"""
# Calculate the number of points to match an expected density
if n is None:
n = int(numpy.ceil(density*numpy.pi*r**2))
if rng is None:
rng = numpy.random.default_rng()
c = numpy.empty((0,2), dtype=float)
overdraw = 1.5
while c.shape[0] != n:
# Draw more points than needed within the unit square until the correct
# number is reached.
# TODO: Probably a less brute-force way of doing this...
c = rng.uniform(low=-1, high=1, size=(int(n*overdraw),2))
# Find those within the r = 1
indx = c[:,0]**2 + c[:,1]**2 < 1
c = c[indx][:n]
# Increase overdraw for next iteration
overdraw *= 1.1
return r*c[:,0], r*c[:,1]
def parse_targets(ifile, ra_c=1, dec_c=2, ap_c=None, default_ap=0):
"""
Parse target coordinates and aperture types from an input file.
Args:
ifile (:obj:`str`):
Columnated ascii file with the target coordinates.
ra_c (:obj:`int`, optional):
1-indexed column with the RA coordinates. Assumed to be in decimal
degrees.
dec_c (:obj:`int`, optional):
1-indexed column with the declination coordinates. Assumed to be in
decimal degrees.
ap_c (:obj:`int`, optional):
1-indexed column with the aperture type to assign to each target.
If None, the type is not available in the input file and the
``default_types`` is used for all targets. Apertures must be 0 for
a single fiber or 1 for an IFU.
default_ap (:obj:`int`, optional):
If the aperture types are not provided in the file, this sets the
type to assign to *all* apertures. Apertures must be 0 for a single
fiber or 1 for an IFU.
Returns:
:obj:`tuple`: Three numpy vectors with the coordinates and aperture type
for each target.
"""
# Check the input
if default_ap not in [0, 1]:
raise ValueError('Default aperture type must be 0 (single-fiber) or 1 (IFU).')
# Instantiate the file path
p = Path(ifile).resolve()
# Check it exists
if not p.exists():
raise FileNotFoundError(f'{str(p)}')
# Read the file
db = numpy.genfromtxt(str(p), dtype=str).T
# Check the requested columns exist
if numpy.any(numpy.array([ra_c, dec_c, 1 if ap_c is None else ap_c]) > db.shape[0]):
raise ValueError(f'{p.name} only contains {db.shape[0]} columns. Check column requests.')
# Collect the data and convert to the correct type
ra = db[ra_c-1].astype(float)
dec = db[dec_c-1].astype(float)
ap = numpy.full(ra.size, default_ap, dtype=int) if ap_c is None else db[ap_c-1].astype(int)
return ra, dec, ap
| [
"numpy.full",
"numpy.ceil",
"numpy.empty",
"numpy.random.default_rng",
"pathlib.Path",
"numpy.array"
] | [((1457, 1489), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {'dtype': 'float'}), '((0, 2), dtype=float)\n', (1468, 1489), False, 'import numpy\n'), ((1421, 1447), 'numpy.random.default_rng', 'numpy.random.default_rng', ([], {}), '()\n', (1445, 1447), False, 'import numpy\n'), ((3919, 3961), 'numpy.full', 'numpy.full', (['ra.size', 'default_ap'], {'dtype': 'int'}), '(ra.size, default_ap, dtype=int)\n', (3929, 3961), False, 'import numpy\n'), ((1352, 1391), 'numpy.ceil', 'numpy.ceil', (['(density * numpy.pi * r ** 2)'], {}), '(density * numpy.pi * r ** 2)\n', (1362, 1391), False, 'import numpy\n'), ((3377, 3388), 'pathlib.Path', 'Path', (['ifile'], {}), '(ifile)\n', (3381, 3388), False, 'from pathlib import Path\n'), ((3614, 3669), 'numpy.array', 'numpy.array', (['[ra_c, dec_c, 1 if ap_c is None else ap_c]'], {}), '([ra_c, dec_c, 1 if ap_c is None else ap_c])\n', (3625, 3669), False, 'import numpy\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.keras import backend as K
from sklearn.linear_model import LassoLars
from timeit import default_timer as timer
from sklearn.linear_model import LinearRegression
from compression_tools.pruning.helper_functions import rel_error, get_layer_index, load_model_param
from compression_tools.pruning.delete_filters import delete_filter_before
from tools.progress.bar import Bar
def extract_inputs_and_outputs(
model,
layer,
layer_index_dic,
get_dataset="food20",
batches_n=80,
activation=False):
index = get_layer_index(layer_index_dic, layer)
if layer.use_bias:
bias = layer.get_weights()[1]
[_, H, W, C] = layer.output_shape
[h, w] = layer.kernel_size
fore_layer = layer.inbound_nodes[0].inbound_layers
try:
fore_layer_index = get_layer_index(layer_index_dic, fore_layer)
except Exception:
fore_layer_index = get_layer_index(layer_index_dic, fore_layer[0])
train_data, _ = get_dataset()
inputs = []
outputs = []
if activation:
get_layer_input = K.function([model.layers[0].input],
[model.layers[fore_layer_index].output])
get_layer_output = K.function([model.layers[0].input],
[model.layers[index+1].output])
else:
get_layer_input = K.function([model.layers[0].input],
[model.layers[fore_layer_index].output])
get_layer_output = K.function([model.layers[0].input],
[model.layers[index].output])
for batch in range(batches_n):
it = iter(train_data)
batch = next(train_data)
layer_input = get_layer_input([batch[0]])[0]
layer_output = get_layer_output([batch[0]])[0]
if activation:
X = []
Y = layer_output.reshape((-1, layer_output.shape[3]))
outputs.append(np.vstack(Y))
inputs = outputs
else:
hh = (h-1)/2
hw = (w-1)/2
x_samples = np.random.randint(0, H - h, 10)
y_samples = np.random.randint(0, W - w, 10)
if layer.use_bias:
for b in layer_output:
for l1 in range(b.shape[2]):
b[:, :, l1] = b[:, :, l1]-bias[l1]
Xs = []
Ys = []
for n, x in enumerate(x_samples):
Y = layer_output[:, x, y_samples[n], :]
x = x*layer.strides[0]
y_samples[n] = y_samples[n]*layer.strides[1]
X = layer_input[
:, int(x-hh):int(x+hh+1), int(y_samples[n]-hw):int(y_samples[n]+hw+1), :]
Xs.append(X)
Ys.append(Y)
inputs.append(np.stack(Xs))
outputs.append(np.vstack(Ys))
return [np.vstack(np.vstack(inputs)), np.vstack(outputs)]
def featuremap_reconstruction(x, y, copy_x=True, fit_intercept=False):
"""Given changed input X, used linear regression to reconstruct original Y
Args:
x: The pruned input
y: The original feature map of the convolution layer
Return:
new weights and bias which can reconstruct the feature map with small loss given X
"""
_reg = LinearRegression(n_jobs=-1, copy_X=copy_x, fit_intercept=fit_intercept)
_reg.fit(x, y)
return _reg.coef_, _reg.intercept_
def compute_pruned_kernel(
X,
W2,
Y,
alpha=1e-4,
c_new=None,
idx=None,
tolerance=0.02):
"""compute which channels to be pruned by lasso"""
nb_samples = X.shape[0]
c_in = X.shape[-1]
c_out = W2.shape[-1]
samples = np.random.randint(0, nb_samples, min(400, nb_samples // 20))
reshape_X = np.rollaxis(
np.transpose(X, (0, 3, 1, 2)).reshape((nb_samples, c_in, -1))[samples], 1, 0)
reshape_W2 = np.transpose(np.transpose(W2, (3, 2, 0, 1)).reshape((c_out, c_in, -1)), [1, 2, 0])
product = np.matmul(reshape_X, reshape_W2).reshape((c_in, -1)).T
reshape_Y = Y[samples].reshape(-1)
solver = LassoLars(alpha=alpha, fit_intercept=False, max_iter=3000)
def solve(alpha):
""" Solve the Lasso"""
solver.alpha = alpha
solver.fit(product, reshape_Y)
idxs = solver.coef_ != 0.
tmp = sum(idxs)
return idxs, tmp, solver.coef_
# print("pruned channel selecting")
start = timer()
if c_new == c_in:
idxs = np.array([True] * c_new)
# newW2 = W2.reshape(W2.shape[-1],)
else:
left = 0
right = alpha
lbound = c_new - tolerance * c_in / 2
rbound = c_new + tolerance * c_in / 2
while True:
_, tmp, coef = solve(right)
if tmp < c_new:
break
else:
right *= 2
# print("relax right to {}".format(right))
while True:
if lbound < 0:
lbound = 1
idxs, tmp, coef = solve(alpha)
# print loss
loss = 1 / (2 * float(product.shape[0])) * np.sqrt(
np.sum((reshape_Y - np.matmul(
product, coef)) ** 2, axis=0)) + alpha * np.sum(np.fabs(coef))
if lbound <= tmp and tmp <= rbound:
if False:
if tmp % 4 == 0:
break
elif tmp % 4 <= 2:
rbound = tmp - 1
lbound = lbound - 2
else:
lbound = tmp + 1
rbound = rbound + 2
else:
break
elif abs(left - right) <= right * 0.1:
if lbound > 1:
lbound = lbound - 1
if rbound < c_in:
rbound = rbound + 1
left = left / 1.2
right = right * 1.2
elif tmp > rbound:
left = left + (alpha - left) / 2
else:
right = right - (right - alpha) / 2
if alpha < 1e-10:
break
alpha = (left + right) / 2
c_new = tmp
newW2, _ = featuremap_reconstruction(
X[:, :, :, idxs].reshape((nb_samples, -1)), Y, fit_intercept=False)
return idxs, newW2
def prune_kernel_lasso(
model,
index,
layer_params,
prune_ratio,
layer_types,
layer_bias,
layer_output_shape,
filters,
layer_index_dic,
cp_lasso=True,
dataset="food20"):
if prune_ratio < 1:
left_edge_flag = False
after_add = False
layer_index = index
current_layer = layer_index_dic[layer_index]
fore_layer = current_layer.inbound_nodes[0].inbound_layers
while((not fore_layer == []
and not isinstance(fore_layer, tf.keras.layers.Conv2D)
and not isinstance(fore_layer, tf.keras.layers.Add)
or isinstance(fore_layer, tf.keras.layers.DepthwiseConv2D))
and not len(fore_layer.outbound_nodes) == 2):
# TODO:: Batch normalization
fore_layer = fore_layer.inbound_nodes[0].inbound_layers
if fore_layer == []:
new_model_param = layer_params
num_new_filter = layer_params[index][0].shape[-1]
# print("No pruning implemented for start conv layers")
return new_model_param, num_new_filter, layer_output_shape, filters
if isinstance(fore_layer, tf.keras.layers.Add):
after_add = True
if len(fore_layer.outbound_nodes) == 2:
# print("This conv2D is at the beginning edge")
next_layer = current_layer.outbound_nodes[0].layer
while(not isinstance(next_layer, tf.compat.v1.keras.layers.Conv2D)
and not isinstance(next_layer, tf.keras.layers.Add)):
next_layer = next_layer.outbound_nodes[0].layer
if isinstance(next_layer, tf.compat.v1.keras.layers.Conv2D):
# print("left edge")
left_edge_flag = True
############################################
if not left_edge_flag and not after_add:
layer = model.layers[index]
W = layer_params[index][0]
[inputs, outputs] = extract_inputs_and_outputs(
model, layer, layer_index_dic, dataset=dataset)
error1 = rel_error(
inputs.reshape(inputs.shape[0], -1).dot(W.reshape(-1, W.shape[-1])), outputs)
# print('feature map rmse: {}'.format(error1))
error2 = 1
# while(error2 > 0.05 and prune_ratio < 1):
nb_channel_new = int((1-prune_ratio)*(layer.input_shape[3]))
if cp_lasso is True:
idxs, newW2 = compute_pruned_kernel(inputs, W, outputs, c_new=nb_channel_new)
else:
idxs = np.argsort(-np.abs(W).sum((0, 1, 3)))
mask = np.zeros(len(idxs), bool)
idxs = idxs[:nb_channel_new]
mask[idxs] = True
idxsz = mask
reg = LinearRegression(fit_intercept=False)
reg.fit(inputs[:, :, :, idxs].reshape(inputs.shape[0], -1), outputs)
newW2 = reg.coef_
error2 = rel_error(
inputs[:, :, :, idxs].reshape(inputs.shape[0], -1).dot(newW2.T), outputs)
# print('feature map rmse: {}'.format(error2))
# print('prune_ratio is: {}'.format(prune_ratio))
# prune_ratio += 0.1
'''
if error2 > 0.1 or prune_ratio > 0.9:
print("BIG ERROR")
print('Prune {} c_in from {} to {}'.format(layer.name, inputs.shape[-1], sum(idxs)))
new_model_param = layer_params
num_new_filter = layer_params[index][0].shape[-1]
print("No pruning implemented for left edge conv layers")
else:
'''
# print("PRUN IT")
# print('Prune {} c_in from {} to {}'.format(layer.name, inputs.shape[-1], sum(idxs)))
prun_filter = []
for i, idx in enumerate(idxs):
if not idx:
prun_filter.append(i)
filters[index] = prun_filter
num_new_filter = W.shape[-1]-len(prun_filter)
h, w = layer.kernel_size
newW2 = newW2.reshape(-1, h, w, np.sum(idxs))
newW2 = np.transpose(newW2, [1, 2, 3, 0])
layer_params[index][0] = newW2
prun_filter = [prun_filter]
for i in range(len(prun_filter)-1, -1, -1):
new_model_param, layer_output_shape = delete_filter_before(
layer_params, layer_types, layer_output_shape, layer_bias,
index, prun_filter[i], layer_index_dic)
else:
new_model_param = layer_params
num_new_filter = layer_params[index][0].shape[-1]
# print("No pruning implemented for left edge conv layers")
else:
new_model_param = layer_params
num_new_filter = layer_params[index][0].shape[-1]
# print("No pruning implemented for conv layers")
return new_model_param, num_new_filter, layer_output_shape, filters
def channel_prune_model_lasso(
my_model,
prune_ratio,
min_index=3,
max_index=None,
dataset="food20"):
layer_types, layer_params, layer_output_shape, layer_bias, layer_index_dic = load_model_param(
my_model)
max_index = len(my_model.layers) if max_index is None else max_index
counter = 0
filters = {}
with Bar(f'Lasso channel pruning...') as bar:
for index, layer in enumerate(my_model.layers):
if isinstance(layer, tf.keras.layers.Conv2D) and\
not isinstance(layer, tf.keras.layers.DepthwiseConv2D) and\
layer.kernel_size[0] >= 1 and\
index >= min_index and index <= max_index:
if index >= min_index:
layer_params, _, layer_output_shape, filters = prune_kernel_lasso(
my_model,
index,
layer_params,
prune_ratio[index],
layer_types,
layer_bias,
layer_output_shape,
filters=filters,
layer_index_dic=layer_index_dic,
dataset=dataset)
counter += 1
else:
layer_params, _, layer_output_shape, filters = prune_kernel_lasso(
my_model,
index,
layer_params,
1.0,
layer_types,
layer_bias,
layer_output_shape,
filters=filters,
layer_index_dic=layer_index_dic,
dataset=dataset)
bar.next((100/len(my_model.layers)))
return layer_params, layer_types
| [
"numpy.stack",
"numpy.sum",
"numpy.abs",
"timeit.default_timer",
"sklearn.linear_model.LassoLars",
"compression_tools.pruning.helper_functions.get_layer_index",
"numpy.transpose",
"compression_tools.pruning.delete_filters.delete_filter_before",
"sklearn.linear_model.LinearRegression",
"tensorflow.... | [((657, 696), 'compression_tools.pruning.helper_functions.get_layer_index', 'get_layer_index', (['layer_index_dic', 'layer'], {}), '(layer_index_dic, layer)\n', (672, 696), False, 'from compression_tools.pruning.helper_functions import rel_error, get_layer_index, load_model_param\n'), ((3399, 3470), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': '(-1)', 'copy_X': 'copy_x', 'fit_intercept': 'fit_intercept'}), '(n_jobs=-1, copy_X=copy_x, fit_intercept=fit_intercept)\n', (3415, 3470), False, 'from sklearn.linear_model import LinearRegression\n'), ((4216, 4274), 'sklearn.linear_model.LassoLars', 'LassoLars', ([], {'alpha': 'alpha', 'fit_intercept': '(False)', 'max_iter': '(3000)'}), '(alpha=alpha, fit_intercept=False, max_iter=3000)\n', (4225, 4274), False, 'from sklearn.linear_model import LassoLars\n'), ((4547, 4554), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4552, 4554), True, 'from timeit import default_timer as timer\n'), ((11784, 11810), 'compression_tools.pruning.helper_functions.load_model_param', 'load_model_param', (['my_model'], {}), '(my_model)\n', (11800, 11810), False, 'from compression_tools.pruning.helper_functions import rel_error, get_layer_index, load_model_param\n'), ((919, 963), 'compression_tools.pruning.helper_functions.get_layer_index', 'get_layer_index', (['layer_index_dic', 'fore_layer'], {}), '(layer_index_dic, fore_layer)\n', (934, 963), False, 'from compression_tools.pruning.helper_functions import rel_error, get_layer_index, load_model_param\n'), ((1174, 1250), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[model.layers[fore_layer_index].output]'], {}), '([model.layers[0].input], [model.layers[fore_layer_index].output])\n', (1184, 1250), True, 'from tensorflow.keras import backend as K\n'), ((1315, 1384), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[model.layers[index + 1].output]'], {}), '([model.layers[0].input], [model.layers[index + 1].output])\n', (1325, 1384), True, 'from tensorflow.keras import backend as K\n'), ((1458, 1534), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[model.layers[fore_layer_index].output]'], {}), '([model.layers[0].input], [model.layers[fore_layer_index].output])\n', (1468, 1534), True, 'from tensorflow.keras import backend as K\n'), ((1599, 1664), 'tensorflow.keras.backend.function', 'K.function', (['[model.layers[0].input]', '[model.layers[index].output]'], {}), '([model.layers[0].input], [model.layers[index].output])\n', (1609, 1664), True, 'from tensorflow.keras import backend as K\n'), ((2997, 3015), 'numpy.vstack', 'np.vstack', (['outputs'], {}), '(outputs)\n', (3006, 3015), True, 'import numpy as np\n'), ((4593, 4617), 'numpy.array', 'np.array', (['([True] * c_new)'], {}), '([True] * c_new)\n', (4601, 4617), True, 'import numpy as np\n'), ((11935, 11967), 'tools.progress.bar.Bar', 'Bar', (['f"""Lasso channel pruning..."""'], {}), "(f'Lasso channel pruning...')\n", (11938, 11967), False, 'from tools.progress.bar import Bar\n'), ((1013, 1060), 'compression_tools.pruning.helper_functions.get_layer_index', 'get_layer_index', (['layer_index_dic', 'fore_layer[0]'], {}), '(layer_index_dic, fore_layer[0])\n', (1028, 1060), False, 'from compression_tools.pruning.helper_functions import rel_error, get_layer_index, load_model_param\n'), ((2178, 2209), 'numpy.random.randint', 'np.random.randint', (['(0)', '(H - h)', '(10)'], {}), '(0, H - h, 10)\n', (2195, 2209), True, 'import numpy as np\n'), ((2234, 2265), 'numpy.random.randint', 'np.random.randint', (['(0)', '(W - w)', '(10)'], {}), '(0, W - w, 10)\n', (2251, 2265), True, 'import numpy as np\n'), ((2977, 2994), 'numpy.vstack', 'np.vstack', (['inputs'], {}), '(inputs)\n', (2986, 2994), True, 'import numpy as np\n'), ((10720, 10753), 'numpy.transpose', 'np.transpose', (['newW2', '[1, 2, 3, 0]'], {}), '(newW2, [1, 2, 3, 0])\n', (10732, 10753), True, 'import numpy as np\n'), ((2046, 2058), 'numpy.vstack', 'np.vstack', (['Y'], {}), '(Y)\n', (2055, 2058), True, 'import numpy as np\n'), ((2898, 2910), 'numpy.stack', 'np.stack', (['Xs'], {}), '(Xs)\n', (2906, 2910), True, 'import numpy as np\n'), ((2939, 2952), 'numpy.vstack', 'np.vstack', (['Ys'], {}), '(Ys)\n', (2948, 2952), True, 'import numpy as np\n'), ((4024, 4054), 'numpy.transpose', 'np.transpose', (['W2', '(3, 2, 0, 1)'], {}), '(W2, (3, 2, 0, 1))\n', (4036, 4054), True, 'import numpy as np\n'), ((4108, 4140), 'numpy.matmul', 'np.matmul', (['reshape_X', 'reshape_W2'], {}), '(reshape_X, reshape_W2)\n', (4117, 4140), True, 'import numpy as np\n'), ((9371, 9408), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (9387, 9408), False, 'from sklearn.linear_model import LinearRegression\n'), ((10686, 10698), 'numpy.sum', 'np.sum', (['idxs'], {}), '(idxs)\n', (10692, 10698), True, 'import numpy as np\n'), ((10947, 11070), 'compression_tools.pruning.delete_filters.delete_filter_before', 'delete_filter_before', (['layer_params', 'layer_types', 'layer_output_shape', 'layer_bias', 'index', 'prun_filter[i]', 'layer_index_dic'], {}), '(layer_params, layer_types, layer_output_shape,\n layer_bias, index, prun_filter[i], layer_index_dic)\n', (10967, 11070), False, 'from compression_tools.pruning.delete_filters import delete_filter_before\n'), ((3916, 3945), 'numpy.transpose', 'np.transpose', (['X', '(0, 3, 1, 2)'], {}), '(X, (0, 3, 1, 2))\n', (3928, 3945), True, 'import numpy as np\n'), ((5335, 5348), 'numpy.fabs', 'np.fabs', (['coef'], {}), '(coef)\n', (5342, 5348), True, 'import numpy as np\n'), ((9166, 9175), 'numpy.abs', 'np.abs', (['W'], {}), '(W)\n', (9172, 9175), True, 'import numpy as np\n'), ((5256, 5280), 'numpy.matmul', 'np.matmul', (['product', 'coef'], {}), '(product, coef)\n', (5265, 5280), True, 'import numpy as np\n')] |
from typing import Optional, Tuple
import numpy as np
from scipy import integrate
from scipy.optimize import minimize
from paramak import ExtrudeMixedShape
from paramak.utils import add_thickness
class ToroidalFieldCoilPrincetonD(ExtrudeMixedShape):
"""Toroidal field coil based on Princeton-D curve
Args:
R1: smallest radius (cm)
R2: largest radius (cm)
thickness: magnet thickness (cm)
distance: extrusion distance (cm)
number_of_coils: the number of tf coils. This changes by the
azimuth_placement_angle dividing up 360 degrees by the number of
coils.
vertical_displacement: vertical displacement (cm). Defaults to 0.0.
with_inner_leg: Include the inner tf leg. Defaults to True.
"""
def __init__(
self,
R1: float,
R2: float,
thickness: float,
distance: float,
number_of_coils: int,
vertical_displacement: float = 0.0,
with_inner_leg: bool = True,
color: Tuple[float, float, float, Optional[float]] = (0.0, 0.0, 1.0),
**kwargs
) -> None:
super().__init__(distance=distance, color=color, **kwargs)
self.R1 = R1
self.R2 = R2
self.thickness = thickness
self.distance = distance
self.number_of_coils = number_of_coils
self.vertical_displacement = vertical_displacement
self.with_inner_leg = with_inner_leg
@property
def inner_points(self):
self.points
return self._inner_points
@inner_points.setter
def inner_points(self, value):
self._inner_points = value
@property
def outer_points(self):
self.points
return self._outer_points
@outer_points.setter
def outer_points(self, value):
self._outer_points = value
@property
def azimuth_placement_angle(self):
self.find_azimuth_placement_angle()
return self._azimuth_placement_angle
@azimuth_placement_angle.setter
def azimuth_placement_angle(self, value):
self._azimuth_placement_angle = value
def _compute_inner_points(self, R1, R2):
"""Computes the inner curve points
Args:
R1 (float): smallest radius (cm)
R2 (float): largest radius (cm)
Returns:
(list, list, list): R, Z and derivative lists for outer curve
points
"""
def error(z_0, R0, R2):
segment = get_segment(R0, R2, z_0)
return abs(segment[1][-1])
def get_segment(a, b, z_0):
a_R = np.linspace(a, b, num=70, endpoint=True)
asol = integrate.odeint(solvr, [z_0, 0], a_R)
return a_R, asol[:, 0], asol[:, 1]
def solvr(Y, R):
return [Y[1], -1 / (k * R) * (1 + Y[1] ** 2) ** (3 / 2)]
R0 = (R1 * R2) ** 0.5
k = 0.5 * np.log(R2 / R1)
# computing of z_0
# z_0 is computed by ensuring outer segment end is zero
z_0 = 10 # initial guess for z_0
res = minimize(error, z_0, args=(R0, R2))
z_0 = res.x
# compute inner and outer segments
segment1 = get_segment(R0, R1, z_0)
segment2 = get_segment(R0, R2, z_0)
r_values = np.concatenate(
[
np.flip(segment1[0]),
segment2[0][1:],
np.flip(segment2[0])[1:],
segment1[0][1:],
]
)
z_values = np.concatenate(
[
np.flip(segment1[1]),
segment2[1][1:],
-np.flip(segment2[1])[1:],
-segment1[1][1:],
]
)
return r_values, z_values
def find_points(self):
"""Finds the XZ points joined by connections that describe the 2D
profile of the toroidal field coil shape."""
# compute inner points
r_inner, z_inner = self._compute_inner_points(self.R1 + self.thickness, self.R2)
# compute outer points
dz_dr = np.diff(z_inner) / np.diff(r_inner)
dz_dr[0] = float("-inf")
dz_dr = np.append(dz_dr, float("inf"))
r_outer, z_outer = add_thickness(r_inner, z_inner, self.thickness, dy_dx=dz_dr)
r_outer, z_outer = np.flip(r_outer), np.flip(z_outer)
# add vertical displacement
z_outer += self.vertical_displacement
z_inner += self.vertical_displacement
# extract helping points for inner leg
inner_leg_connection_points = [
(r_inner[0], z_inner[0]),
(r_inner[-1], z_inner[-1]),
(r_outer[0], z_outer[0]),
(r_outer[-1], z_outer[-1]),
]
self.inner_leg_connection_points = inner_leg_connection_points
# add the leg to the points
if self.with_inner_leg:
r_inner = np.append(r_inner, r_inner[0])
z_inner = np.append(z_inner, z_inner[0])
r_outer = np.append(r_outer, r_outer[0])
z_outer = np.append(z_outer, z_outer[0])
# add connections
inner_points = [[r, z, "spline"] for r, z in zip(r_inner, z_inner)]
outer_points = [[r, z, "spline"] for r, z in zip(r_outer, z_outer)]
if self.with_inner_leg:
outer_points[-2][2] = "straight"
inner_points[-2][2] = "straight"
inner_points[-1][2] = "straight"
outer_points[-1][2] = "straight"
points = inner_points + outer_points
self.outer_points = np.vstack((r_outer, z_outer)).T
self.inner_points = np.vstack((r_inner, z_inner)).T
self.points = points
def find_azimuth_placement_angle(self):
"""Calculates the azimuth placement angles based on the number of tf
coils"""
angles = list(np.linspace(0, 360, self.number_of_coils, endpoint=False))
self.azimuth_placement_angle = angles
| [
"scipy.optimize.minimize",
"numpy.flip",
"numpy.log",
"scipy.integrate.odeint",
"numpy.append",
"paramak.utils.add_thickness",
"numpy.diff",
"numpy.linspace",
"numpy.vstack"
] | [((3059, 3094), 'scipy.optimize.minimize', 'minimize', (['error', 'z_0'], {'args': '(R0, R2)'}), '(error, z_0, args=(R0, R2))\n', (3067, 3094), False, 'from scipy.optimize import minimize\n'), ((4188, 4248), 'paramak.utils.add_thickness', 'add_thickness', (['r_inner', 'z_inner', 'self.thickness'], {'dy_dx': 'dz_dr'}), '(r_inner, z_inner, self.thickness, dy_dx=dz_dr)\n', (4201, 4248), False, 'from paramak.utils import add_thickness\n'), ((2605, 2645), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': '(70)', 'endpoint': '(True)'}), '(a, b, num=70, endpoint=True)\n', (2616, 2645), True, 'import numpy as np\n'), ((2665, 2703), 'scipy.integrate.odeint', 'integrate.odeint', (['solvr', '[z_0, 0]', 'a_R'], {}), '(solvr, [z_0, 0], a_R)\n', (2681, 2703), False, 'from scipy import integrate\n'), ((2895, 2910), 'numpy.log', 'np.log', (['(R2 / R1)'], {}), '(R2 / R1)\n', (2901, 2910), True, 'import numpy as np\n'), ((4045, 4061), 'numpy.diff', 'np.diff', (['z_inner'], {}), '(z_inner)\n', (4052, 4061), True, 'import numpy as np\n'), ((4064, 4080), 'numpy.diff', 'np.diff', (['r_inner'], {}), '(r_inner)\n', (4071, 4080), True, 'import numpy as np\n'), ((4276, 4292), 'numpy.flip', 'np.flip', (['r_outer'], {}), '(r_outer)\n', (4283, 4292), True, 'import numpy as np\n'), ((4294, 4310), 'numpy.flip', 'np.flip', (['z_outer'], {}), '(z_outer)\n', (4301, 4310), True, 'import numpy as np\n'), ((4856, 4886), 'numpy.append', 'np.append', (['r_inner', 'r_inner[0]'], {}), '(r_inner, r_inner[0])\n', (4865, 4886), True, 'import numpy as np\n'), ((4909, 4939), 'numpy.append', 'np.append', (['z_inner', 'z_inner[0]'], {}), '(z_inner, z_inner[0])\n', (4918, 4939), True, 'import numpy as np\n'), ((4963, 4993), 'numpy.append', 'np.append', (['r_outer', 'r_outer[0]'], {}), '(r_outer, r_outer[0])\n', (4972, 4993), True, 'import numpy as np\n'), ((5016, 5046), 'numpy.append', 'np.append', (['z_outer', 'z_outer[0]'], {}), '(z_outer, z_outer[0])\n', (5025, 5046), True, 'import numpy as np\n'), ((5504, 5533), 'numpy.vstack', 'np.vstack', (['(r_outer, z_outer)'], {}), '((r_outer, z_outer))\n', (5513, 5533), True, 'import numpy as np\n'), ((5564, 5593), 'numpy.vstack', 'np.vstack', (['(r_inner, z_inner)'], {}), '((r_inner, z_inner))\n', (5573, 5593), True, 'import numpy as np\n'), ((5787, 5844), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'self.number_of_coils'], {'endpoint': '(False)'}), '(0, 360, self.number_of_coils, endpoint=False)\n', (5798, 5844), True, 'import numpy as np\n'), ((3313, 3333), 'numpy.flip', 'np.flip', (['segment1[0]'], {}), '(segment1[0])\n', (3320, 3333), True, 'import numpy as np\n'), ((3532, 3552), 'numpy.flip', 'np.flip', (['segment1[1]'], {}), '(segment1[1])\n', (3539, 3552), True, 'import numpy as np\n'), ((3384, 3404), 'numpy.flip', 'np.flip', (['segment2[0]'], {}), '(segment2[0])\n', (3391, 3404), True, 'import numpy as np\n'), ((3604, 3624), 'numpy.flip', 'np.flip', (['segment2[1]'], {}), '(segment2[1])\n', (3611, 3624), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from packaging import version
import qcodes as qc
from qcodes import load_or_create_experiment, initialise_or_create_database_at
from plottr.data.datadict import DataDict
from plottr.utils import testdata
from plottr.node.tools import linearFlowchart
from plottr.data.qcodes_dataset import (
QCodesDSLoader,
get_ds_structure,
get_ds_info,
get_runs_from_db,
ds_to_datadict)
@pytest.fixture(scope='function')
def empty_db_path(tmp_path):
db_path = str(tmp_path / 'some.db')
initialise_or_create_database_at(db_path)
yield db_path
@pytest.fixture
def experiment(empty_db_path):
exp = load_or_create_experiment('2d_softsweep', sample_name='no sample')
yield exp
exp.conn.close()
@pytest.fixture
def database_with_three_datasets(empty_db_path):
"""Fixture of a database file with 3 DataSets"""
exp1 = load_or_create_experiment('get_runs_from_db', sample_name='qubit')
m1 = qc.Measurement(exp=exp1)
m1.register_custom_parameter('x', unit='cm')
m1.register_custom_parameter('y')
m1.register_custom_parameter('foo')
for n in range(2):
m1.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m1.run() as datasaver:
dataset11 = datasaver.dataset
with m1.run() as datasaver:
datasaver.add_result(('x', 1.), ('y', 2.), ('z_0', 42.), ('z_1', 0.2))
dataset12 = datasaver.dataset
exp2 = load_or_create_experiment('give_em', sample_name='now')
m2 = qc.Measurement(exp=exp2)
m2.register_custom_parameter('a')
m2.register_custom_parameter('b', unit='mm')
m2.register_custom_parameter('c', setpoints=['a', 'b'])
with m2.run() as datasaver:
datasaver.add_result(('a', 1.), ('b', 2.), ('c', 42.))
datasaver.add_result(('a', 4.), ('b', 5.), ('c', 77.))
dataset2 = datasaver.dataset
datasets = (dataset11, dataset12, dataset2)
yield empty_db_path, datasets
for ds in datasets:
ds.conn.close()
exp1.conn.close()
exp2.conn.close()
def test_load_2dsoftsweep(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
# check that unused parameters don't mess with
m.register_custom_parameter('foo')
dd_expected = DataDict(x=dict(values=np.array([]), unit='cm'),
y=dict(values=np.array([])))
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
dd_expected[f'z_{n}'] = dict(values=np.array([]), axes=['x', 'y'])
dd_expected.validate()
with m.run() as datasaver:
for result in testdata.generate_2d_scalar_simple(3, 3, N):
row = [(k, v) for k, v in result.items()] + [('foo', 1)]
datasaver.add_result(*row)
dd_expected.add_data(**result)
# retrieve data as data dict
ddict = ds_to_datadict(datasaver.dataset)
assert ddict == dd_expected
@pytest.mark.skipif(version.parse(qc.__version__)
< version.parse("0.20.0"),
reason="Requires QCoDes 0.20.0 or later")
def test_load_2dsoftsweep_known_shape(experiment):
N = 1
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
# check that unused parameters don't mess with
m.register_custom_parameter('foo')
dd_expected = DataDict(x=dict(values=np.array([]), unit='cm'),
y=dict(values=np.array([])))
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
dd_expected[f'z_{n}'] = dict(values=np.array([]), axes=['x', 'y'])
dd_expected.validate()
shape = (3, 3)
m.set_shapes({'z_0': shape})
with m.run() as datasaver:
for result in testdata.generate_2d_scalar_simple(*shape, N):
row = [(k, v) for k, v in result.items()] + [('foo', 1)]
datasaver.add_result(*row)
dd_expected.add_data(**result)
dd_expected['x']['values'] = dd_expected['x']['values'].reshape(*shape)
dd_expected['y']['values'] = dd_expected['y']['values'].reshape(*shape)
dd_expected['z_0']['values'] = dd_expected['z_0']['values'].reshape(*shape)
# retrieve data as data dict
ddict = ds_to_datadict(datasaver.dataset)
assert ddict == dd_expected
def test_get_ds_structure(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm',label='my_x_param')
m.register_custom_parameter('y')
# check that unused parameters don't mess with
m.register_custom_parameter('foo')
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m.run() as datasaver:
dataset = datasaver.dataset
# test dataset structure function
expected_structure = {
'x': {
'unit': 'cm',
'label': 'my_x_param',
'values': []
},
'y': {
'unit': '',
'label': '',
'values': []
}
# note that parameter 'foo' is not expected to be included
# because it's a "standalone" parameter
}
for n in range(N):
expected_structure.update(
{f'z_{n}': {
'unit': '',
'label': '',
'axes': ['x', 'y'],
'values': []
}
}
)
structure = get_ds_structure(dataset)
assert structure == expected_structure
def test_get_ds_info(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
m.register_custom_parameter('foo')
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m.run() as datasaver:
dataset = datasaver.dataset
ds_info_with_empty_timestamps = get_ds_info(dataset,
get_structure=False)
assert ds_info_with_empty_timestamps['completed_date'] == ''
assert ds_info_with_empty_timestamps['completed_time'] == ''
# timestamps are difficult to test for, so we will cheat here and
# instead of hard-coding timestamps we will just get them from the dataset
# The same applies to the guid as it contains the timestamp
started_ts = dataset.run_timestamp()
completed_ts = dataset.completed_timestamp()
expected_ds_info = {
'experiment': '2d_softsweep',
'sample': 'no sample',
'completed_date': completed_ts[:10],
'completed_time': completed_ts[11:],
'started_date': started_ts[:10],
'started_time': started_ts[11:],
'name': 'results',
'structure': None,
'records': 0,
'guid': dataset.guid
}
ds_info = get_ds_info(dataset, get_structure=False)
assert ds_info == expected_ds_info
expected_ds_info_with_structure = expected_ds_info.copy()
expected_ds_info_with_structure['structure'] = get_ds_structure(dataset)
ds_info_with_structure = get_ds_info(dataset)
assert ds_info_with_structure == expected_ds_info_with_structure
def test_get_runs_from_db(database_with_three_datasets):
db_path, datasets = database_with_three_datasets
# Prepare an expected overview of the created database
expected_overview = {ds.run_id: get_ds_info(ds, get_structure=False)
for ds in datasets}
# Get the actual overview of the created database
overview = get_runs_from_db(db_path) # get_structure=False is the default
# Finally, assert
assert overview == expected_overview
# Prepare an expected overview of the created database WITH STRUCTURE
expected_overview_with_structure = {
ds.run_id: get_ds_info(ds, get_structure=True)
for ds in datasets
}
# Get the actual overview of the created database WITH STRUCTURE
overview_with_structure = get_runs_from_db(db_path, get_structure=True)
# Finally, assert WITH STRUCTURE
assert overview_with_structure == expected_overview_with_structure
def test_update_qcloader(qtbot, empty_db_path):
db_path = empty_db_path
exp = load_or_create_experiment('2d_softsweep', sample_name='no sample')
N = 2
m = qc.Measurement(exp=exp)
m.register_custom_parameter('x')
m.register_custom_parameter('y')
dd_expected = DataDict(x=dict(values=np.array([])),
y=dict(values=np.array([])))
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
dd_expected[f'z_{n}'] = dict(values=np.array([]), axes=['x', 'y'])
dd_expected.validate()
# setting up the flowchart
fc = linearFlowchart(('loader', QCodesDSLoader))
loader = fc.nodes()['loader']
def check():
nresults = ds.number_of_results
loader.update()
ddict = fc.output()['dataOut']
if ddict is not None and nresults > 0:
z_in = dd_expected.data_vals('z_1')
z_out = ddict.data_vals('z_1')
if z_out is not None:
assert z_in.size == z_out.size
assert np.allclose(z_in, z_out, atol=1e-15)
with m.run() as datasaver:
ds = datasaver.dataset
run_id = datasaver.dataset.captured_run_id
loader.pathAndId = db_path, run_id
for result in testdata.generate_2d_scalar_simple(3, 3, N):
row = [(k, v) for k, v in result.items()]
datasaver.add_result(*row)
dd_expected.add_data(**result)
check()
check()
# insert data in small chunks, and check
# while True:
# try:
# ninsertions = np.random.randint(0, 5)
# for n in range(ninsertions):
# _ds.add_result(next(results))
# except StopIteration:
# _ds.mark_complete()
# break
# check()
# check()
| [
"qcodes.Measurement",
"plottr.data.qcodes_dataset.get_ds_info",
"qcodes.load_or_create_experiment",
"packaging.version.parse",
"numpy.allclose",
"pytest.fixture",
"plottr.data.qcodes_dataset.get_ds_structure",
"plottr.data.qcodes_dataset.ds_to_datadict",
"plottr.data.qcodes_dataset.get_runs_from_db"... | [((431, 463), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (445, 463), False, 'import pytest\n'), ((537, 578), 'qcodes.initialise_or_create_database_at', 'initialise_or_create_database_at', (['db_path'], {}), '(db_path)\n', (569, 578), False, 'from qcodes import load_or_create_experiment, initialise_or_create_database_at\n'), ((656, 722), 'qcodes.load_or_create_experiment', 'load_or_create_experiment', (['"""2d_softsweep"""'], {'sample_name': '"""no sample"""'}), "('2d_softsweep', sample_name='no sample')\n", (681, 722), False, 'from qcodes import load_or_create_experiment, initialise_or_create_database_at\n'), ((889, 955), 'qcodes.load_or_create_experiment', 'load_or_create_experiment', (['"""get_runs_from_db"""'], {'sample_name': '"""qubit"""'}), "('get_runs_from_db', sample_name='qubit')\n", (914, 955), False, 'from qcodes import load_or_create_experiment, initialise_or_create_database_at\n'), ((965, 989), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'exp1'}), '(exp=exp1)\n', (979, 989), True, 'import qcodes as qc\n'), ((1444, 1499), 'qcodes.load_or_create_experiment', 'load_or_create_experiment', (['"""give_em"""'], {'sample_name': '"""now"""'}), "('give_em', sample_name='now')\n", (1469, 1499), False, 'from qcodes import load_or_create_experiment, initialise_or_create_database_at\n'), ((1509, 1533), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'exp2'}), '(exp=exp2)\n', (1523, 1533), True, 'import qcodes as qc\n'), ((2114, 2144), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'experiment'}), '(exp=experiment)\n', (2128, 2144), True, 'import qcodes as qc\n'), ((2933, 2966), 'plottr.data.qcodes_dataset.ds_to_datadict', 'ds_to_datadict', (['datasaver.dataset'], {}), '(datasaver.dataset)\n', (2947, 2966), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((3229, 3259), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'experiment'}), '(exp=experiment)\n', (3243, 3259), True, 'import qcodes as qc\n'), ((4337, 4370), 'plottr.data.qcodes_dataset.ds_to_datadict', 'ds_to_datadict', (['datasaver.dataset'], {}), '(datasaver.dataset)\n', (4351, 4370), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((4463, 4493), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'experiment'}), '(exp=experiment)\n', (4477, 4493), True, 'import qcodes as qc\n'), ((5510, 5535), 'plottr.data.qcodes_dataset.get_ds_structure', 'get_ds_structure', (['dataset'], {}), '(dataset)\n', (5526, 5535), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((5634, 5664), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'experiment'}), '(exp=experiment)\n', (5648, 5664), True, 'import qcodes as qc\n'), ((6919, 6960), 'plottr.data.qcodes_dataset.get_ds_info', 'get_ds_info', (['dataset'], {'get_structure': '(False)'}), '(dataset, get_structure=False)\n', (6930, 6960), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((7115, 7140), 'plottr.data.qcodes_dataset.get_ds_structure', 'get_ds_structure', (['dataset'], {}), '(dataset)\n', (7131, 7140), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((7171, 7191), 'plottr.data.qcodes_dataset.get_ds_info', 'get_ds_info', (['dataset'], {}), '(dataset)\n', (7182, 7191), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((7622, 7647), 'plottr.data.qcodes_dataset.get_runs_from_db', 'get_runs_from_db', (['db_path'], {}), '(db_path)\n', (7638, 7647), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((8054, 8099), 'plottr.data.qcodes_dataset.get_runs_from_db', 'get_runs_from_db', (['db_path'], {'get_structure': '(True)'}), '(db_path, get_structure=True)\n', (8070, 8099), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((8298, 8364), 'qcodes.load_or_create_experiment', 'load_or_create_experiment', (['"""2d_softsweep"""'], {'sample_name': '"""no sample"""'}), "('2d_softsweep', sample_name='no sample')\n", (8323, 8364), False, 'from qcodes import load_or_create_experiment, initialise_or_create_database_at\n'), ((8384, 8407), 'qcodes.Measurement', 'qc.Measurement', ([], {'exp': 'exp'}), '(exp=exp)\n', (8398, 8407), True, 'import qcodes as qc\n'), ((8828, 8871), 'plottr.node.tools.linearFlowchart', 'linearFlowchart', (["('loader', QCodesDSLoader)"], {}), "(('loader', QCodesDSLoader))\n", (8843, 8871), False, 'from plottr.node.tools import linearFlowchart\n'), ((2691, 2734), 'plottr.utils.testdata.generate_2d_scalar_simple', 'testdata.generate_2d_scalar_simple', (['(3)', '(3)', 'N'], {}), '(3, 3, N)\n', (2725, 2734), False, 'from plottr.utils import testdata\n'), ((3860, 3905), 'plottr.utils.testdata.generate_2d_scalar_simple', 'testdata.generate_2d_scalar_simple', (['*shape', 'N'], {}), '(*shape, N)\n', (3894, 3905), False, 'from plottr.utils import testdata\n'), ((3021, 3050), 'packaging.version.parse', 'version.parse', (['qc.__version__'], {}), '(qc.__version__)\n', (3034, 3050), False, 'from packaging import version\n'), ((3073, 3096), 'packaging.version.parse', 'version.parse', (['"""0.20.0"""'], {}), "('0.20.0')\n", (3086, 3096), False, 'from packaging import version\n'), ((5990, 6031), 'plottr.data.qcodes_dataset.get_ds_info', 'get_ds_info', (['dataset'], {'get_structure': '(False)'}), '(dataset, get_structure=False)\n', (6001, 6031), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((7470, 7506), 'plottr.data.qcodes_dataset.get_ds_info', 'get_ds_info', (['ds'], {'get_structure': '(False)'}), '(ds, get_structure=False)\n', (7481, 7506), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((7885, 7920), 'plottr.data.qcodes_dataset.get_ds_info', 'get_ds_info', (['ds'], {'get_structure': '(True)'}), '(ds, get_structure=True)\n', (7896, 7920), False, 'from plottr.data.qcodes_dataset import QCodesDSLoader, get_ds_structure, get_ds_info, get_runs_from_db, ds_to_datadict\n'), ((9487, 9530), 'plottr.utils.testdata.generate_2d_scalar_simple', 'testdata.generate_2d_scalar_simple', (['(3)', '(3)', 'N'], {}), '(3, 3, N)\n', (9521, 9530), False, 'from plottr.utils import testdata\n'), ((2579, 2591), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2587, 2591), True, 'import numpy as np\n'), ((3694, 3706), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3702, 3706), True, 'import numpy as np\n'), ((8729, 8741), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8737, 8741), True, 'import numpy as np\n'), ((9270, 9306), 'numpy.allclose', 'np.allclose', (['z_in', 'z_out'], {'atol': '(1e-15)'}), '(z_in, z_out, atol=1e-15)\n', (9281, 9306), True, 'import numpy as np\n'), ((2362, 2374), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2370, 2374), True, 'import numpy as np\n'), ((2429, 2441), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2437, 2441), True, 'import numpy as np\n'), ((3477, 3489), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3485, 3489), True, 'import numpy as np\n'), ((3544, 3556), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3552, 3556), True, 'import numpy as np\n'), ((8523, 8535), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8531, 8535), True, 'import numpy as np\n'), ((8579, 8591), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8587, 8591), True, 'import numpy as np\n')] |
import random
from collections import deque
import numpy as np
import torch
from flatland.envs.malfunction_generators import malfunction_from_params
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
from importlib_resources import path
import torch_training.Nets
from torch_training.dueling_double_dqn import Agent
from utils.observation_utils import normalize_observation
random.seed(1)
np.random.seed(1)
"""
file_name = "./railway/complex_scene.pkl"
env = RailEnv(width=10,
height=20,
rail_generator=rail_from_file(file_name),
obs_builder_object=TreeObsForRailEnv(max_depth=3, predictor=ShortestPathPredictorForRailEnv()))
x_dim = env.width
y_dim = env.height
"""
# Parameters for the Environment
x_dim = 25
y_dim = 25
n_agents = 10
# We are training an Agent using the Tree Observation with depth 2
observation_builder = TreeObsForRailEnv(max_depth=2)
# Use a the malfunction generator to break agents from time to time
stochastic_data = {'malfunction_rate': 8000, # Rate of malfunction occurence of single agent
'min_duration': 15, # Minimal duration of malfunction
'max_duration': 50 # Max duration of malfunction
}
# Custom observation builder
TreeObservation = TreeObsForRailEnv(max_depth=2, predictor=ShortestPathPredictorForRailEnv(30))
# Different agent types (trains) with different speeds.
speed_ration_map = {1.: 0.25, # Fast passenger train
1. / 2.: 0.25, # Fast freight train
1. / 3.: 0.25, # Slow commuter train
1. / 4.: 0.25} # Slow freight train
env = RailEnv(width=x_dim,
height=y_dim,
rail_generator=sparse_rail_generator(max_num_cities=3,
# Number of cities in map (where train stations are)
seed=1, # Random seed
grid_mode=False,
max_rails_between_cities=2,
max_rails_in_city=2),
schedule_generator=sparse_schedule_generator(speed_ration_map),
number_of_agents=n_agents,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=TreeObservation)
env.reset(True, True)
observation_helper = TreeObsForRailEnv(max_depth=3, predictor=ShortestPathPredictorForRailEnv())
env_renderer = RenderTool(env, gl="PILSVG", )
num_features_per_node = env.obs_builder.observation_dim
tree_depth = 2
nr_nodes = 0
for i in range(tree_depth + 1):
nr_nodes += np.power(4, i)
state_size = num_features_per_node * nr_nodes
action_size = 5
# We set the number of episodes we would like to train on
if 'n_trials' not in locals():
n_trials = 60000
max_steps = int(4 * 2 * (20 + env.height + env.width))
eps = 1.
eps_end = 0.005
eps_decay = 0.9995
action_dict = dict()
final_action_dict = dict()
scores_window = deque(maxlen=100)
done_window = deque(maxlen=100)
scores = []
dones_list = []
action_prob = [0] * action_size
agent_obs = [None] * env.get_num_agents()
agent_next_obs = [None] * env.get_num_agents()
agent = Agent(state_size, action_size)
with path(torch_training.Nets, "navigator_checkpoint1200.pth") as file_in:
agent.qnetwork_local.load_state_dict(torch.load(file_in))
record_images = False
frame_step = 0
for trials in range(1, n_trials + 1):
# Reset environment
obs, info = env.reset(True, True)
env_renderer.reset()
# Build agent specific observations
for a in range(env.get_num_agents()):
agent_obs[a] = agent_obs[a] = normalize_observation(obs[a], tree_depth, observation_radius=10)
# Reset score and done
score = 0
env_done = 0
# Run episode
for step in range(max_steps):
# Action
for a in range(env.get_num_agents()):
if info['action_required'][a]:
action = agent.act(agent_obs[a], eps=0.)
else:
action = 0
action_prob[action] += 1
action_dict.update({a: action})
# Environment step
obs, all_rewards, done, _ = env.step(action_dict)
env_renderer.render_env(show=True, show_predictions=True, show_observations=False)
# Build agent specific observations and normalize
for a in range(env.get_num_agents()):
if obs[a]:
agent_obs[a] = normalize_observation(obs[a], tree_depth, observation_radius=10)
if done['__all__']:
break
| [
"numpy.random.seed",
"flatland.envs.predictions.ShortestPathPredictorForRailEnv",
"numpy.power",
"torch.load",
"torch_training.dueling_double_dqn.Agent",
"utils.observation_utils.normalize_observation",
"flatland.envs.malfunction_generators.malfunction_from_params",
"importlib_resources.path",
"flat... | [((682, 696), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (693, 696), False, 'import random\n'), ((697, 714), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (711, 714), True, 'import numpy as np\n'), ((1177, 1207), 'flatland.envs.observations.TreeObsForRailEnv', 'TreeObsForRailEnv', ([], {'max_depth': '(2)'}), '(max_depth=2)\n', (1194, 1207), False, 'from flatland.envs.observations import TreeObsForRailEnv\n'), ((2867, 2895), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['env'], {'gl': '"""PILSVG"""'}), "(env, gl='PILSVG')\n", (2877, 2895), False, 'from flatland.utils.rendertools import RenderTool\n'), ((3382, 3399), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (3387, 3399), False, 'from collections import deque\n'), ((3414, 3431), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (3419, 3431), False, 'from collections import deque\n'), ((3589, 3619), 'torch_training.dueling_double_dqn.Agent', 'Agent', (['state_size', 'action_size'], {}), '(state_size, action_size)\n', (3594, 3619), False, 'from torch_training.dueling_double_dqn import Agent\n'), ((3031, 3045), 'numpy.power', 'np.power', (['(4)', 'i'], {}), '(4, i)\n', (3039, 3045), True, 'import numpy as np\n'), ((3625, 3682), 'importlib_resources.path', 'path', (['torch_training.Nets', '"""navigator_checkpoint1200.pth"""'], {}), "(torch_training.Nets, 'navigator_checkpoint1200.pth')\n", (3629, 3682), False, 'from importlib_resources import path\n'), ((1625, 1660), 'flatland.envs.predictions.ShortestPathPredictorForRailEnv', 'ShortestPathPredictorForRailEnv', (['(30)'], {}), '(30)\n', (1656, 1660), False, 'from flatland.envs.predictions import ShortestPathPredictorForRailEnv\n'), ((2030, 2147), 'flatland.envs.rail_generators.sparse_rail_generator', 'sparse_rail_generator', ([], {'max_num_cities': '(3)', 'seed': '(1)', 'grid_mode': '(False)', 'max_rails_between_cities': '(2)', 'max_rails_in_city': '(2)'}), '(max_num_cities=3, seed=1, grid_mode=False,\n max_rails_between_cities=2, max_rails_in_city=2)\n', (2051, 2147), False, 'from flatland.envs.rail_generators import sparse_rail_generator\n'), ((2501, 2544), 'flatland.envs.schedule_generators.sparse_schedule_generator', 'sparse_schedule_generator', (['speed_ration_map'], {}), '(speed_ration_map)\n', (2526, 2544), False, 'from flatland.envs.schedule_generators import sparse_schedule_generator\n'), ((2640, 2680), 'flatland.envs.malfunction_generators.malfunction_from_params', 'malfunction_from_params', (['stochastic_data'], {}), '(stochastic_data)\n', (2663, 2680), False, 'from flatland.envs.malfunction_generators import malfunction_from_params\n'), ((2817, 2850), 'flatland.envs.predictions.ShortestPathPredictorForRailEnv', 'ShortestPathPredictorForRailEnv', ([], {}), '()\n', (2848, 2850), False, 'from flatland.envs.predictions import ShortestPathPredictorForRailEnv\n'), ((3736, 3755), 'torch.load', 'torch.load', (['file_in'], {}), '(file_in)\n', (3746, 3755), False, 'import torch\n'), ((4042, 4106), 'utils.observation_utils.normalize_observation', 'normalize_observation', (['obs[a]', 'tree_depth'], {'observation_radius': '(10)'}), '(obs[a], tree_depth, observation_radius=10)\n', (4063, 4106), False, 'from utils.observation_utils import normalize_observation\n'), ((4843, 4907), 'utils.observation_utils.normalize_observation', 'normalize_observation', (['obs[a]', 'tree_depth'], {'observation_radius': '(10)'}), '(obs[a], tree_depth, observation_radius=10)\n', (4864, 4907), False, 'from utils.observation_utils import normalize_observation\n')] |
import os
import numpy as np
import esutil as eu
import fitsio
import meds
import piff
import pixmappy
import desmeds
import ngmix
import scipy
from .._pizza_cutter import _build_metadata
from .._constants import MAGZP_REF
from meds.maker import MEDS_FMT_VERSION
from ... import __version__
def test_pizza_cutter_build_metadata(monkeypatch):
monkeypatch.setenv('MEDS_DIR', 'BLAH')
monkeypatch.setenv('PIFF_DATA_DIR', 'BLAHH')
monkeypatch.setenv('DESDATA', 'BLAHHH')
config = 'blah blah blah'
json_info = "tile info"
metadata, json_info_image = _build_metadata(config=config, json_info=json_info)
assert np.all(metadata['numpy_version'] == np.__version__)
assert np.all(metadata['scipy_version'] == scipy.__version__)
assert np.all(metadata['esutil_version'] == eu.__version__)
assert np.all(metadata['ngmix_version'] == ngmix.__version__)
assert np.all(
metadata['fitsio_version'] == fitsio.__version__)
assert np.all(metadata['meds_version'] == meds.__version__)
assert np.all(metadata['piff_version'] == piff.__version__)
assert np.all(
metadata['pixmappy_version'] == pixmappy.__version__)
assert np.all(
metadata['desmeds_version'] == desmeds.__version__)
assert np.all(
metadata['pizza_cutter_version'] == __version__)
assert np.all(metadata['config'] == config)
assert np.all(metadata['magzp_ref'] == MAGZP_REF)
assert np.all(
metadata['meds_fmt_version'] == MEDS_FMT_VERSION)
assert np.all(
metadata['meds_dir'] == os.environ['MEDS_DIR'])
assert np.all(
metadata['piff_data_dir'] ==
os.environ['PIFF_DATA_DIR'])
assert np.all(
metadata['desdata'] == os.environ['DESDATA'])
assert np.array_equal(
json_info_image,
np.frombuffer(json_info.encode("ascii"), dtype='u1'),
)
| [
"numpy.all"
] | [((636, 687), 'numpy.all', 'np.all', (["(metadata['numpy_version'] == np.__version__)"], {}), "(metadata['numpy_version'] == np.__version__)\n", (642, 687), True, 'import numpy as np\n'), ((699, 753), 'numpy.all', 'np.all', (["(metadata['scipy_version'] == scipy.__version__)"], {}), "(metadata['scipy_version'] == scipy.__version__)\n", (705, 753), True, 'import numpy as np\n'), ((765, 817), 'numpy.all', 'np.all', (["(metadata['esutil_version'] == eu.__version__)"], {}), "(metadata['esutil_version'] == eu.__version__)\n", (771, 817), True, 'import numpy as np\n'), ((829, 883), 'numpy.all', 'np.all', (["(metadata['ngmix_version'] == ngmix.__version__)"], {}), "(metadata['ngmix_version'] == ngmix.__version__)\n", (835, 883), True, 'import numpy as np\n'), ((895, 951), 'numpy.all', 'np.all', (["(metadata['fitsio_version'] == fitsio.__version__)"], {}), "(metadata['fitsio_version'] == fitsio.__version__)\n", (901, 951), True, 'import numpy as np\n'), ((972, 1024), 'numpy.all', 'np.all', (["(metadata['meds_version'] == meds.__version__)"], {}), "(metadata['meds_version'] == meds.__version__)\n", (978, 1024), True, 'import numpy as np\n'), ((1036, 1088), 'numpy.all', 'np.all', (["(metadata['piff_version'] == piff.__version__)"], {}), "(metadata['piff_version'] == piff.__version__)\n", (1042, 1088), True, 'import numpy as np\n'), ((1100, 1160), 'numpy.all', 'np.all', (["(metadata['pixmappy_version'] == pixmappy.__version__)"], {}), "(metadata['pixmappy_version'] == pixmappy.__version__)\n", (1106, 1160), True, 'import numpy as np\n'), ((1181, 1239), 'numpy.all', 'np.all', (["(metadata['desmeds_version'] == desmeds.__version__)"], {}), "(metadata['desmeds_version'] == desmeds.__version__)\n", (1187, 1239), True, 'import numpy as np\n'), ((1260, 1315), 'numpy.all', 'np.all', (["(metadata['pizza_cutter_version'] == __version__)"], {}), "(metadata['pizza_cutter_version'] == __version__)\n", (1266, 1315), True, 'import numpy as np\n'), ((1336, 1372), 'numpy.all', 'np.all', (["(metadata['config'] == config)"], {}), "(metadata['config'] == config)\n", (1342, 1372), True, 'import numpy as np\n'), ((1384, 1426), 'numpy.all', 'np.all', (["(metadata['magzp_ref'] == MAGZP_REF)"], {}), "(metadata['magzp_ref'] == MAGZP_REF)\n", (1390, 1426), True, 'import numpy as np\n'), ((1438, 1494), 'numpy.all', 'np.all', (["(metadata['meds_fmt_version'] == MEDS_FMT_VERSION)"], {}), "(metadata['meds_fmt_version'] == MEDS_FMT_VERSION)\n", (1444, 1494), True, 'import numpy as np\n'), ((1515, 1569), 'numpy.all', 'np.all', (["(metadata['meds_dir'] == os.environ['MEDS_DIR'])"], {}), "(metadata['meds_dir'] == os.environ['MEDS_DIR'])\n", (1521, 1569), True, 'import numpy as np\n'), ((1590, 1654), 'numpy.all', 'np.all', (["(metadata['piff_data_dir'] == os.environ['PIFF_DATA_DIR'])"], {}), "(metadata['piff_data_dir'] == os.environ['PIFF_DATA_DIR'])\n", (1596, 1654), True, 'import numpy as np\n'), ((1683, 1735), 'numpy.all', 'np.all', (["(metadata['desdata'] == os.environ['DESDATA'])"], {}), "(metadata['desdata'] == os.environ['DESDATA'])\n", (1689, 1735), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import glob
import random
import numpy as np
from multiprocessing import Pool
from dvs_utils.prepareData import prepareData
NUM_CLASSES = 4
NUM_POINTS = 2**14
DATASET_TRAIN_DIR = "/bigdata_hdd/klein/FrKlein_PoC/data/TrainFiles/"
DATASET_PREP_TRAIN_DIR = "/bigdata_hdd/klein/FrKlein_PoC/data/prepared/TrainFiles/"
DATASET_VALIDATION_DIR = "/bigdata_hdd/klein/FrKlein_PoC/data/ValidationFiles/"
DATASET_TEST_DIR = "/bigdata_hdd/klein/FrKlein_PoC/data/TestFiles/"
CLASS_MAPPING = {
1: 0, # original PERSON(1) --> 0
2: 1, # original DOG(2) --> 1
5: 2, # original BICYCLE(5) --> 2
6: 3, # original SPORTSBALL(6) --> 3
}
def load_ascii_cloud_prepared(fname):
points = []
labels = []
instances = []
with open(fname, 'r') as fd:
for line in fd.readlines():
if "//" in line:
continue
x, y, t, class_label, instance_label = line.strip().split(' ')
x, y, t, class_label, instance_label = float(x), float(y), float(t), int(class_label), int(instance_label)
points.append(np.array([x, y, t], dtype=np.float32))
labels.append(class_label)
instances.append(instance_label)
npPoints = np.array(points, dtype=np.float32)
npSeg = np.array(labels, dtype=np.uint8)
npIns = np.array(instances, dtype=np.uint16)
if len(npIns) != NUM_POINTS:
raise ValueError("Wrong NUM_POINTS of cloud: ", fname)
return npPoints, npSeg, npIns
class DVSDataset():
def __init__(self, input_list_txt = 'none', npoints=16384, split='train', batchsize=16):
random.seed(1337) # same result every time
if split not in ['train', 'validation', 'test', 'prepared_train', 'prepared_test']:
raise ValueError("unknown split")
self.input_list_txt = input_list_txt
self.split = split
self.batch_count = 0
self.batchsize = batchsize
if npoints != NUM_POINTS:
raise ValueError("npoints != NUM_POINTS")
if(input_list_txt == 'none'):
if(split == 'train'):
self.files_to_use = glob.glob(os.path.join(DATASET_TRAIN_DIR, "*.csv"))
elif(split == 'validation'):
self.files_to_use = glob.glob(os.path.join(DATASET_VALIDATION_DIR, "*.csv"))
elif(split == 'test'):
self.files_to_use = glob.glob(os.path.join(DATASET_TEST_DIR, "*.csv"))
elif(split == 'prepared_train'):
self.files_to_use = glob.glob(os.path.join(DATASET_PREP_TRAIN_DIR, "*.csv"))
else:
if(split == 'test' or split == 'prepared_test'):
self.files_to_use = []
self.files_to_use.append(input_list_txt)
else:
self.input_list_txt = input_list_txt
self.files_to_use = self.get_input_list()
random.shuffle(self.files_to_use)
self.length = len(self.files_to_use)
self.batch_num = self.length // batchsize
# --------------------------------------------------------------------------------------------------------------
# parallel csv read...
print("Start to read files...")
if split == 'prepared_train' or split == 'prepared_test':
self.length = len(self.files_to_use)
if(len(self.files_to_use) == 1):
points, labels, instances = load_ascii_cloud_prepared(self.files_to_use[0])
else:
pool = Pool(processes=None)
points, labels, instances = zip(*pool.map(load_ascii_cloud_prepared, self.files_to_use))
self.point_list = np.asarray(points)
self.semantic_label_list = np.asarray(labels)
self.instance_label_list = np.asarray(instances)
else:
self.point_list, self.semantic_label_list, self.instance_label_list = prepareData(self.files_to_use)
self.length = len(self.point_list.shape[0])
self.batch_num = self.length // batchsize
print(len(self.point_list), len(self.semantic_label_list), len(self.instance_label_list))
def get_input_list(self):
input_list = [line.strip() for line in open(self.input_list_txt, 'r')]
input_list = [os.path.join(self.data_root, item) for item in input_list]
return input_list
def get_all(self):
return self.point_list, self.semantic_label_list, self.instance_label_list
def get_batch(self, data_aug=False):
points = self.point_list[(self.batch_count*self.batchsize):((self.batch_count+1)*self.batchsize)][:][:]
sem = self.semantic_label_list[(self.batch_count*self.batchsize):((self.batch_count+1)*self.batchsize)][:][:]
inst = self.instance_label_list[(self.batch_count*self.batchsize):((self.batch_count+1)*self.batchsize)][:][:]
self.batch_count = self.batch_count + 1
if(self.batch_count == self.batch_num):
self.batch_count = 0
return points, sem, inst
def get_length(self):
return self.length
# ------------------------------------------------------------------------------
if __name__ == '__main__':
# ------------------------------------------------------------------------------
dvsDataset = DVSDataset('/home/klein/neural_networks/jsnet/JSNet_LK/data/train_csv_dvs.txt', split='train', batchsize=16)
points, sem, inst = dvsDataset.get_batch()
print(points.shape)
print(sem.shape)
print(inst.shape)
points, sem, inst = dvsDataset.get_batch()
print(points.shape)
print(sem.shape)
print(inst.shape)
points, sem, inst = dvsDataset.get_batch()
print(points.shape)
print(sem.shape)
print(inst.shape) | [
"random.shuffle",
"numpy.asarray",
"dvs_utils.prepareData.prepareData",
"random.seed",
"numpy.array",
"multiprocessing.Pool",
"os.path.join"
] | [((1248, 1282), 'numpy.array', 'np.array', (['points'], {'dtype': 'np.float32'}), '(points, dtype=np.float32)\n', (1256, 1282), True, 'import numpy as np\n'), ((1295, 1327), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.uint8'}), '(labels, dtype=np.uint8)\n', (1303, 1327), True, 'import numpy as np\n'), ((1340, 1376), 'numpy.array', 'np.array', (['instances'], {'dtype': 'np.uint16'}), '(instances, dtype=np.uint16)\n', (1348, 1376), True, 'import numpy as np\n'), ((1635, 1652), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (1646, 1652), False, 'import random\n'), ((2918, 2951), 'random.shuffle', 'random.shuffle', (['self.files_to_use'], {}), '(self.files_to_use)\n', (2932, 2951), False, 'import random\n'), ((3733, 3751), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (3743, 3751), True, 'import numpy as np\n'), ((3791, 3809), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (3801, 3809), True, 'import numpy as np\n'), ((3849, 3870), 'numpy.asarray', 'np.asarray', (['instances'], {}), '(instances)\n', (3859, 3870), True, 'import numpy as np\n'), ((3976, 4006), 'dvs_utils.prepareData.prepareData', 'prepareData', (['self.files_to_use'], {}), '(self.files_to_use)\n', (3987, 4006), False, 'from dvs_utils.prepareData import prepareData\n'), ((4361, 4395), 'os.path.join', 'os.path.join', (['self.data_root', 'item'], {}), '(self.data_root, item)\n', (4373, 4395), False, 'import os\n'), ((1109, 1146), 'numpy.array', 'np.array', (['[x, y, t]'], {'dtype': 'np.float32'}), '([x, y, t], dtype=np.float32)\n', (1117, 1146), True, 'import numpy as np\n'), ((3564, 3584), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'None'}), '(processes=None)\n', (3568, 3584), False, 'from multiprocessing import Pool\n'), ((2171, 2211), 'os.path.join', 'os.path.join', (['DATASET_TRAIN_DIR', '"""*.csv"""'], {}), "(DATASET_TRAIN_DIR, '*.csv')\n", (2183, 2211), False, 'import os\n'), ((2301, 2346), 'os.path.join', 'os.path.join', (['DATASET_VALIDATION_DIR', '"""*.csv"""'], {}), "(DATASET_VALIDATION_DIR, '*.csv')\n", (2313, 2346), False, 'import os\n'), ((2429, 2468), 'os.path.join', 'os.path.join', (['DATASET_TEST_DIR', '"""*.csv"""'], {}), "(DATASET_TEST_DIR, '*.csv')\n", (2441, 2468), False, 'import os\n'), ((2561, 2606), 'os.path.join', 'os.path.join', (['DATASET_PREP_TRAIN_DIR', '"""*.csv"""'], {}), "(DATASET_PREP_TRAIN_DIR, '*.csv')\n", (2573, 2606), False, 'import os\n')] |
"""
Based on: https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
"""
import random
from typing import List, Tuple
import numpy as np
from pyderl.utils.data_structures import SumSegmentTree, MinSegmentTree
class PrioritizedReplayBuffer:
""" Prioritized replay buffer.
Args:
size (int): Max number of transitions to store in the buffer. When the
buffer overflows the old memories are dropped.
alpha (float): How much prioritization is used (0 for no
prioritization and 1 for full prioritization).
"""
def __init__(self, size: int, alpha: float) -> None:
self._storage = []
self._maxsize = size
self._next_idx = 0
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def __len__(self):
return len(self._storage)
def _add_to_storage(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return (np.array(obses_t),
np.array(actions),
np.array(rewards),
np.array(obses_tp1),
np.array(dones))
def add(self, obs_t, action, reward, obs_tp1, done):
idx = self._next_idx
self._add_to_storage(obs_t, action, reward, obs_tp1, done)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size: int, beta: float) -> Tuple[np.ndarray, ...]:
""" Sample a batch of experiences.
Compared to uniform sampling, this method also returns the importance
weights and idxes of sampled experiences.
Args:
batch_size (int): How many transitions to sample.
beta (float): To what degree to use importance weights (0 means no
corrections and 1 means full correction).
Returns:
Tuple of numpy arrays. More specifically:
* obs_batch: Batch of observations.
* act_batch: Batch of actions executed given obs_batch.
* rew_batch: Rewards received as results of executing act_batch.
* next_obs_batch: Next set of observations seen after executing
act_batch.
* done_mask: done_mask[i] = 1 if executing act_batch[i] resulted
in the end of an episode and 0 otherwise.
* weights: Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition.
* idxes: Array of shape (batch_size,) and dtype np.int32 indices
in buffer of sampled experiences.
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self,
idxes: List[int],
priorities: List[float]) -> None:
""" Update priorities of the sampled transitions.
Sets the priority of a transition at index idxes[i] in the buffer to
priorities[i].
Args:
idxes (List[int]): List with the indices of the sampled transitions.
priorities (List[float]): List with the updated priorities
corresponding to the transitions at the sampled indices, denoted
by variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
| [
"numpy.array",
"random.random",
"pyderl.utils.data_structures.SumSegmentTree",
"pyderl.utils.data_structures.MinSegmentTree"
] | [((895, 922), 'pyderl.utils.data_structures.SumSegmentTree', 'SumSegmentTree', (['it_capacity'], {}), '(it_capacity)\n', (909, 922), False, 'from pyderl.utils.data_structures import SumSegmentTree, MinSegmentTree\n'), ((946, 973), 'pyderl.utils.data_structures.MinSegmentTree', 'MinSegmentTree', (['it_capacity'], {}), '(it_capacity)\n', (960, 973), False, 'from pyderl.utils.data_structures import SumSegmentTree, MinSegmentTree\n'), ((4443, 4460), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (4451, 4460), True, 'import numpy as np\n'), ((1887, 1904), 'numpy.array', 'np.array', (['obses_t'], {}), '(obses_t)\n', (1895, 1904), True, 'import numpy as np\n'), ((1922, 1939), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1930, 1939), True, 'import numpy as np\n'), ((1957, 1974), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (1965, 1974), True, 'import numpy as np\n'), ((1992, 2011), 'numpy.array', 'np.array', (['obses_tp1'], {}), '(obses_tp1)\n', (2000, 2011), True, 'import numpy as np\n'), ((2029, 2044), 'numpy.array', 'np.array', (['dones'], {}), '(dones)\n', (2037, 2044), True, 'import numpy as np\n'), ((1658, 1685), 'numpy.array', 'np.array', (['obs_t'], {'copy': '(False)'}), '(obs_t, copy=False)\n', (1666, 1685), True, 'import numpy as np\n'), ((1714, 1742), 'numpy.array', 'np.array', (['action'], {'copy': '(False)'}), '(action, copy=False)\n', (1722, 1742), True, 'import numpy as np\n'), ((1808, 1837), 'numpy.array', 'np.array', (['obs_tp1'], {'copy': '(False)'}), '(obs_tp1, copy=False)\n', (1816, 1837), True, 'import numpy as np\n'), ((2556, 2571), 'random.random', 'random.random', ([], {}), '()\n', (2569, 2571), False, 'import random\n')] |
import numpy as np
import scipy.sparse as sp
import SimPEG
from SimPEG import Utils
from SimPEG.EM.Utils import omega
from SimPEG.Utils import Zero, Identity
class Fields(SimPEG.Problem.Fields):
"""
Fancy Field Storage for a FDEM survey. Only one field type is stored for
each problem, the rest are computed. The fields obejct acts like an array and is indexed by
.. code-block:: python
f = problem.fields(m)
e = f[srcList,'e']
b = f[srcList,'b']
If accessing all sources for a given field, use the :code:`:`
.. code-block:: python
f = problem.fields(m)
e = f[:,'e']
b = f[:,'b']
The array returned will be size (nE or nF, nSrcs :math:`\\times` nFrequencies)
"""
knownFields = {}
dtype = complex
class Fields_e(Fields):
"""
Fields object for Problem_e.
:param Mesh mesh: mesh
:param Survey survey: survey
"""
knownFields = {'eSolution':'E'}
aliasFields = {
'e' : ['eSolution','E','_e'],
'ePrimary' : ['eSolution','E','_ePrimary'],
'eSecondary' : ['eSolution','E','_eSecondary'],
'b' : ['eSolution','F','_b'],
'bPrimary' : ['eSolution','F','_bPrimary'],
'bSecondary' : ['eSolution','F','_bSecondary']
}
def __init__(self,mesh,survey,**kwargs):
Fields.__init__(self,mesh,survey,**kwargs)
def startup(self):
self.prob = self.survey.prob
self._edgeCurl = self.survey.prob.mesh.edgeCurl
def _ePrimary(self, eSolution, srcList):
"""
Primary electric field from source
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
ePrimary = np.zeros_like(eSolution)
for i, src in enumerate(srcList):
ep = src.ePrimary(self.prob)
ePrimary[:,i] = ePrimary[:,i] + ep
return ePrimary
def _eSecondary(self, eSolution, srcList):
"""
Secondary electric field is the thing we solved for
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary electric field
"""
return eSolution
def _e(self, eSolution, srcList):
"""
Total electric field is sum of primary and secondary
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total electric field
"""
return self._ePrimary(eSolution,srcList) + self._eSecondary(eSolution,srcList)
def _eDeriv_u(self, src, v, adjoint = False):
"""
Derivative of the total electric field with respect to the thing we
solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect to the field we solved for with a vector
"""
return Identity()*v
def _eDeriv_m(self, src, v, adjoint = False):
"""
Derivative of the total electric field with respect to the inversion model. Here, we assume that the primary does not depend on the model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the electric field derivative with respect to the inversion model with a vector
"""
# assuming primary does not depend on the model
return Zero()
def _bPrimary(self, eSolution, srcList):
"""
Primary magnetic flux density from source
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary magnetic flux density as defined by the sources
"""
bPrimary = np.zeros([self._edgeCurl.shape[0],eSolution.shape[1]],dtype = complex)
for i, src in enumerate(srcList):
bp = src.bPrimary(self.prob)
bPrimary[:,i] = bPrimary[:,i] + bp
return bPrimary
def _bSecondary(self, eSolution, srcList):
"""
Secondary magnetic flux density from eSolution
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic flux density
"""
C = self._edgeCurl
b = (C * eSolution)
for i, src in enumerate(srcList):
b[:,i] *= - 1./(1j*omega(src.freq))
S_m, _ = src.eval(self.prob)
b[:,i] = b[:,i]+ 1./(1j*omega(src.freq)) * S_m
return b
def _bSecondaryDeriv_u(self, src, v, adjoint = False):
"""
Derivative of the secondary magnetic flux density with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary magnetic flux density with respect to the field we solved for with a vector
"""
C = self._edgeCurl
if adjoint:
return - 1./(1j*omega(src.freq)) * (C.T * v)
return - 1./(1j*omega(src.freq)) * (C * v)
def _bSecondaryDeriv_m(self, src, v, adjoint = False):
"""
Derivative of the secondary magnetic flux density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the secondary magnetic flux density derivative with respect to the inversion model with a vector
"""
S_mDeriv, _ = src.evalDeriv(self.prob, v, adjoint)
return 1./(1j * omega(src.freq)) * S_mDeriv
def _b(self, eSolution, srcList):
"""
Total magnetic flux density is sum of primary and secondary
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total magnetic flux density
"""
return self._bPrimary(eSolution, srcList) + self._bSecondary(eSolution, srcList)
def _bDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total magnetic flux density with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with respect to the field we solved for with a vector
"""
# Primary does not depend on u
return self._bSecondaryDeriv_u(src, v, adjoint)
def _bDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total magnetic flux density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the magnetic flux density derivative with respect to the inversion model with a vector
"""
# Assuming the primary does not depend on the model
return self._bSecondaryDeriv_m(src, v, adjoint)
class Fields_b(Fields):
"""
Fields object for Problem_b.
:param Mesh mesh: mesh
:param Survey survey: survey
"""
knownFields = {'bSolution':'F'}
aliasFields = {
'b' : ['bSolution','F','_b'],
'bPrimary' : ['bSolution','F','_bPrimary'],
'bSecondary' : ['bSolution','F','_bSecondary'],
'e' : ['bSolution','E','_e'],
'ePrimary' : ['bSolution','E','_ePrimary'],
'eSecondary' : ['bSolution','E','_eSecondary'],
}
def __init__(self,mesh,survey,**kwargs):
Fields.__init__(self,mesh,survey,**kwargs)
def startup(self):
self.prob = self.survey.prob
self._edgeCurl = self.survey.prob.mesh.edgeCurl
self._MeSigmaI = self.survey.prob.MeSigmaI
self._MfMui = self.survey.prob.MfMui
self._MeSigmaIDeriv = self.survey.prob.MeSigmaIDeriv
self._Me = self.survey.prob.Me
def _bPrimary(self, bSolution, srcList):
"""
Primary magnetic flux density from source
:param numpy.ndarray bSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
bPrimary = np.zeros_like(bSolution)
for i, src in enumerate(srcList):
bp = src.bPrimary(self.prob)
bPrimary[:,i] = bPrimary[:,i] + bp
return bPrimary
def _bSecondary(self, bSolution, srcList):
"""
Secondary magnetic flux density is the thing we solved for
:param numpy.ndarray bSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic flux density
"""
return bSolution
def _b(self, bSolution, srcList):
"""
Total magnetic flux density is sum of primary and secondary
:param numpy.ndarray bSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total magnetic flux density
"""
return self._bPrimary(bSolution, srcList) + self._bSecondary(bSolution, srcList)
def _bDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total magnetic flux density with respect to the thing we
solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic flux density with respect to the field we solved for with a vector
"""
return Identity()*v
def _bDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total magnetic flux density with respect to the inversion model. Here, we assume that the primary does not depend on the model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the magnetic flux density derivative with respect to the inversion model with a vector
"""
# assuming primary does not depend on the model
return Zero()
def _ePrimary(self, bSolution, srcList):
"""
Primary electric field from source
:param numpy.ndarray bSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary electric field as defined by the sources
"""
ePrimary = np.zeros([self._edgeCurl.shape[1],bSolution.shape[1]],dtype = complex)
for i,src in enumerate(srcList):
ep = src.ePrimary(self.prob)
ePrimary[:,i] = ePrimary[:,i] + ep
return ePrimary
def _eSecondary(self, bSolution, srcList):
"""
Secondary electric field from bSolution
:param numpy.ndarray bSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary electric field
"""
e = self._MeSigmaI * ( self._edgeCurl.T * ( self._MfMui * bSolution))
for i,src in enumerate(srcList):
_,S_e = src.eval(self.prob)
e[:,i] = e[:,i]+ -self._MeSigmaI * S_e
return e
def _eSecondaryDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the secondary electric field with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary electric field with respect to the field we solved for with a vector
"""
if not adjoint:
return self._MeSigmaI * ( self._edgeCurl.T * ( self._MfMui * v) )
else:
return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * v))
def _eSecondaryDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the secondary electric field with respect to the inversion model
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary electric field with respect to the model with a vector
"""
bSolution = self[[src],'bSolution']
_,S_e = src.eval(self.prob)
Me = self._Me
if adjoint:
Me = Me.T
w = self._edgeCurl.T * (self._MfMui * bSolution)
w = w - Utils.mkvc(Me * S_e,2)
if not adjoint:
de_dm = self._MeSigmaIDeriv(w) * v
elif adjoint:
de_dm = self._MeSigmaIDeriv(w).T * v
_, S_eDeriv = src.evalDeriv(self.prob, v, adjoint)
de_dm = de_dm - self._MeSigmaI * S_eDeriv
return de_dm
def _e(self, bSolution, srcList):
"""
Total electric field is sum of primary and secondary
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total electric field
"""
return self._ePrimary(bSolution, srcList) + self._eSecondary(bSolution, srcList)
def _eDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total electric field with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the electric field with respect to the field we solved for with a vector
"""
return self._eSecondaryDeriv_u(src, v, adjoint)
def _eDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total electric field density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the electric field derivative with respect to the inversion model with a vector
"""
# assuming primary doesn't depend on model
return self._eSecondaryDeriv_m(src, v, adjoint)
class Fields_j(Fields):
"""
Fields object for Problem_j.
:param Mesh mesh: mesh
:param Survey survey: survey
"""
knownFields = {'jSolution':'F'}
aliasFields = {
'j' : ['jSolution','F','_j'],
'jPrimary' : ['jSolution','F','_jPrimary'],
'jSecondary' : ['jSolution','F','_jSecondary'],
'h' : ['jSolution','E','_h'],
'hPrimary' : ['jSolution','E','_hPrimary'],
'hSecondary' : ['jSolution','E','_hSecondary'],
}
def __init__(self,mesh,survey,**kwargs):
Fields.__init__(self,mesh,survey,**kwargs)
def startup(self):
self.prob = self.survey.prob
self._edgeCurl = self.survey.prob.mesh.edgeCurl
self._MeMuI = self.survey.prob.MeMuI
self._MfRho = self.survey.prob.MfRho
self._MfRhoDeriv = self.survey.prob.MfRhoDeriv
self._Me = self.survey.prob.Me
def _jPrimary(self, jSolution, srcList):
"""
Primary current density from source
:param numpy.ndarray jSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary current density as defined by the sources
"""
jPrimary = np.zeros_like(jSolution,dtype = complex)
for i, src in enumerate(srcList):
jp = src.jPrimary(self.prob)
jPrimary[:,i] = jPrimary[:,i] + jp
return jPrimary
def _jSecondary(self, jSolution, srcList):
"""
Secondary current density is the thing we solved for
:param numpy.ndarray jSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary current density
"""
return jSolution
def _j(self, jSolution, srcList):
"""
Total current density is sum of primary and secondary
:param numpy.ndarray jSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total current density
"""
return self._jPrimary(jSolution, srcList) + self._jSecondary(jSolution, srcList)
def _jDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total current density with respect to the thing we
solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect to the field we solved for with a vector
"""
return Identity()*v
def _jDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total current density with respect to the inversion model. Here, we assume that the primary does not depend on the model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the current density derivative with respect to the inversion model with a vector
"""
# assuming primary does not depend on the model
return Zero()
def _hPrimary(self, jSolution, srcList):
"""
Primary magnetic field from source
:param numpy.ndarray hSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary magnetic field as defined by the sources
"""
hPrimary = np.zeros([self._edgeCurl.shape[1],jSolution.shape[1]],dtype = complex)
for i, src in enumerate(srcList):
hp = src.hPrimary(self.prob)
hPrimary[:,i] = hPrimary[:,i] + hp
return hPrimary
def _hSecondary(self, jSolution, srcList):
"""
Secondary magnetic field from bSolution
:param numpy.ndarray jSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic field
"""
h = self._MeMuI * (self._edgeCurl.T * (self._MfRho * jSolution) )
for i, src in enumerate(srcList):
h[:,i] *= -1./(1j*omega(src.freq))
S_m,_ = src.eval(self.prob)
h[:,i] = h[:,i]+ 1./(1j*omega(src.freq)) * self._MeMuI * (S_m)
return h
def _hSecondaryDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the secondary magnetic field with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary magnetic field with respect to the field we solved for with a vector
"""
if not adjoint:
return -1./(1j*omega(src.freq)) * self._MeMuI * (self._edgeCurl.T * (self._MfRho * v) )
elif adjoint:
return -1./(1j*omega(src.freq)) * self._MfRho.T * (self._edgeCurl * ( self._MeMuI.T * v))
def _hSecondaryDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the secondary magnetic field with respect to the inversion model
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary magnetic field with respect to the model with a vector
"""
jSolution = self[[src],'jSolution']
MeMuI = self._MeMuI
C = self._edgeCurl
MfRho = self._MfRho
MfRhoDeriv = self._MfRhoDeriv
Me = self._Me
if not adjoint:
hDeriv_m = -1./(1j*omega(src.freq)) * MeMuI * (C.T * (MfRhoDeriv(jSolution)*v ) )
elif adjoint:
hDeriv_m = -1./(1j*omega(src.freq)) * MfRhoDeriv(jSolution).T * ( C * (MeMuI.T * v ) )
S_mDeriv,_ = src.evalDeriv(self.prob, adjoint = adjoint)
if not adjoint:
S_mDeriv = S_mDeriv(v)
hDeriv_m = hDeriv_m + 1./(1j*omega(src.freq)) * MeMuI * (Me * S_mDeriv)
elif adjoint:
S_mDeriv = S_mDeriv(Me.T * (MeMuI.T * v))
hDeriv_m = hDeriv_m + 1./(1j*omega(src.freq)) * S_mDeriv
return hDeriv_m
def _h(self, jSolution, srcList):
"""
Total magnetic field is sum of primary and secondary
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total magnetic field
"""
return self._hPrimary(jSolution, srcList) + self._hSecondary(jSolution, srcList)
def _hDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total magnetic field with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect to the field we solved for with a vector
"""
return self._hSecondaryDeriv_u(src, v, adjoint)
def _hDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total magnetic field density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the magnetic field derivative with respect to the inversion model with a vector
"""
# assuming the primary doesn't depend on the model
return self._hSecondaryDeriv_m(src, v, adjoint)
class Fields_h(Fields):
"""
Fields object for Problem_h.
:param Mesh mesh: mesh
:param Survey survey: survey
"""
knownFields = {'hSolution':'E'}
aliasFields = {
'h' : ['hSolution','E','_h'],
'hPrimary' : ['hSolution','E','_hPrimary'],
'hSecondary' : ['hSolution','E','_hSecondary'],
'j' : ['hSolution','F','_j'],
'jPrimary' : ['hSolution','F','_jPrimary'],
'jSecondary' : ['hSolution','F','_jSecondary']
}
def __init__(self,mesh,survey,**kwargs):
Fields.__init__(self,mesh,survey,**kwargs)
def startup(self):
self.prob = self.survey.prob
self._edgeCurl = self.survey.prob.mesh.edgeCurl
self._MeMuI = self.survey.prob.MeMuI
self._MfRho = self.survey.prob.MfRho
def _hPrimary(self, hSolution, srcList):
"""
Primary magnetic field from source
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary magnetic field as defined by the sources
"""
hPrimary = np.zeros_like(hSolution,dtype = complex)
for i, src in enumerate(srcList):
hp = src.hPrimary(self.prob)
hPrimary[:,i] = hPrimary[:,i] + hp
return hPrimary
def _hSecondary(self, hSolution, srcList):
"""
Secondary magnetic field is the thing we solved for
:param numpy.ndarray hSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary magnetic field
"""
return hSolution
def _h(self, hSolution, srcList):
"""
Total magnetic field is sum of primary and secondary
:param numpy.ndarray hSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total magnetic field
"""
return self._hPrimary(hSolution, srcList) + self._hSecondary(hSolution, srcList)
def _hDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total magnetic field with respect to the thing we
solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the magnetic field with respect to the field we solved for with a vector
"""
return Identity()*v
def _hDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total magnetic field with respect to the inversion model. Here, we assume that the primary does not depend on the model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the magnetic field derivative with respect to the inversion model with a vector
"""
# assuming primary does not depend on the model
return Zero()
def _jPrimary(self, hSolution, srcList):
"""
Primary current density from source
:param numpy.ndarray hSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: primary current density as defined by the sources
"""
jPrimary = np.zeros([self._edgeCurl.shape[0], hSolution.shape[1]], dtype = complex)
for i, src in enumerate(srcList):
jp = src.jPrimary(self.prob)
jPrimary[:,i] = jPrimary[:,i] + jp
return jPrimary
def _jSecondary(self, hSolution, srcList):
"""
Secondary current density from eSolution
:param numpy.ndarray hSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: secondary current density
"""
j = self._edgeCurl*hSolution
for i, src in enumerate(srcList):
_,S_e = src.eval(self.prob)
j[:,i] = j[:,i]+ -S_e
return j
def _jSecondaryDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the secondary current density with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the secondary current density with respect to the field we solved for with a vector
"""
if not adjoint:
return self._edgeCurl*v
elif adjoint:
return self._edgeCurl.T*v
def _jSecondaryDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the secondary current density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the secondary current density derivative with respect to the inversion model with a vector
"""
_,S_eDeriv = src.evalDeriv(self.prob, v, adjoint)
return -S_eDeriv
def _j(self, hSolution, srcList):
"""
Total current density is sum of primary and secondary
:param numpy.ndarray eSolution: field we solved for
:param list srcList: list of sources
:rtype: numpy.ndarray
:return: total current density
"""
return self._jPrimary(hSolution, srcList) + self._jSecondary(hSolution, srcList)
def _jDeriv_u(self, src, v, adjoint=False):
"""
Derivative of the total current density with respect to the thing we solved for
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: product of the derivative of the current density with respect to the field we solved for with a vector
"""
return self._jSecondaryDeriv_u(src,v,adjoint)
def _jDeriv_m(self, src, v, adjoint=False):
"""
Derivative of the total current density with respect to the inversion model.
:param SimPEG.EM.FDEM.Src src: source
:param numpy.ndarray v: vector to take product with
:param bool adjoint: adjoint?
:rtype: SimPEG.Utils.Zero
:return: product of the current density with respect to the inversion model with a vector
"""
# assuming the primary does not depend on the model
return self._jSecondaryDeriv_m(src,v,adjoint)
| [
"numpy.zeros_like",
"SimPEG.Utils.mkvc",
"numpy.zeros",
"SimPEG.Utils.Identity",
"SimPEG.Utils.Zero",
"SimPEG.EM.Utils.omega"
] | [((1924, 1948), 'numpy.zeros_like', 'np.zeros_like', (['eSolution'], {}), '(eSolution)\n', (1937, 1948), True, 'import numpy as np\n'), ((3926, 3932), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (3930, 3932), False, 'from SimPEG.Utils import Zero, Identity\n'), ((4282, 4352), 'numpy.zeros', 'np.zeros', (['[self._edgeCurl.shape[0], eSolution.shape[1]]'], {'dtype': 'complex'}), '([self._edgeCurl.shape[0], eSolution.shape[1]], dtype=complex)\n', (4290, 4352), True, 'import numpy as np\n'), ((9238, 9262), 'numpy.zeros_like', 'np.zeros_like', (['bSolution'], {}), '(bSolution)\n', (9251, 9262), True, 'import numpy as np\n'), ((11294, 11300), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (11298, 11300), False, 'from SimPEG.Utils import Zero, Identity\n'), ((11636, 11706), 'numpy.zeros', 'np.zeros', (['[self._edgeCurl.shape[1], bSolution.shape[1]]'], {'dtype': 'complex'}), '([self._edgeCurl.shape[1], bSolution.shape[1]], dtype=complex)\n', (11644, 11706), True, 'import numpy as np\n'), ((16848, 16887), 'numpy.zeros_like', 'np.zeros_like', (['jSolution'], {'dtype': 'complex'}), '(jSolution, dtype=complex)\n', (16861, 16887), True, 'import numpy as np\n'), ((18872, 18878), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (18876, 18878), False, 'from SimPEG.Utils import Zero, Identity\n'), ((19214, 19284), 'numpy.zeros', 'np.zeros', (['[self._edgeCurl.shape[1], jSolution.shape[1]]'], {'dtype': 'complex'}), '([self._edgeCurl.shape[1], jSolution.shape[1]], dtype=complex)\n', (19222, 19284), True, 'import numpy as np\n'), ((24755, 24794), 'numpy.zeros_like', 'np.zeros_like', (['hSolution'], {'dtype': 'complex'}), '(hSolution, dtype=complex)\n', (24768, 24794), True, 'import numpy as np\n'), ((26771, 26777), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (26775, 26777), False, 'from SimPEG.Utils import Zero, Identity\n'), ((27115, 27185), 'numpy.zeros', 'np.zeros', (['[self._edgeCurl.shape[0], hSolution.shape[1]]'], {'dtype': 'complex'}), '([self._edgeCurl.shape[0], hSolution.shape[1]], dtype=complex)\n', (27123, 27185), True, 'import numpy as np\n'), ((3323, 3333), 'SimPEG.Utils.Identity', 'Identity', ([], {}), '()\n', (3331, 3333), False, 'from SimPEG.Utils import Zero, Identity\n'), ((10679, 10689), 'SimPEG.Utils.Identity', 'Identity', ([], {}), '()\n', (10687, 10689), False, 'from SimPEG.Utils import Zero, Identity\n'), ((13753, 13776), 'SimPEG.Utils.mkvc', 'Utils.mkvc', (['(Me * S_e)', '(2)'], {}), '(Me * S_e, 2)\n', (13763, 13776), False, 'from SimPEG import Utils\n'), ((18270, 18280), 'SimPEG.Utils.Identity', 'Identity', ([], {}), '()\n', (18278, 18280), False, 'from SimPEG.Utils import Zero, Identity\n'), ((26170, 26180), 'SimPEG.Utils.Identity', 'Identity', ([], {}), '()\n', (26178, 26180), False, 'from SimPEG.Utils import Zero, Identity\n'), ((4948, 4963), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (4953, 4963), False, 'from SimPEG.EM.Utils import omega\n'), ((5712, 5727), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (5717, 5727), False, 'from SimPEG.EM.Utils import omega\n'), ((6311, 6326), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (6316, 6326), False, 'from SimPEG.EM.Utils import omega\n'), ((19885, 19900), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (19890, 19900), False, 'from SimPEG.EM.Utils import omega\n'), ((5659, 5674), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (5664, 5674), False, 'from SimPEG.EM.Utils import omega\n'), ((5042, 5057), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (5047, 5057), False, 'from SimPEG.EM.Utils import omega\n'), ((20572, 20587), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (20577, 20587), False, 'from SimPEG.EM.Utils import omega\n'), ((21483, 21498), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (21488, 21498), False, 'from SimPEG.EM.Utils import omega\n'), ((19978, 19993), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (19983, 19993), False, 'from SimPEG.EM.Utils import omega\n'), ((20695, 20710), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (20700, 20710), False, 'from SimPEG.EM.Utils import omega\n'), ((21600, 21615), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (21605, 21615), False, 'from SimPEG.EM.Utils import omega\n'), ((21835, 21850), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (21840, 21850), False, 'from SimPEG.EM.Utils import omega\n'), ((21995, 22010), 'SimPEG.EM.Utils.omega', 'omega', (['src.freq'], {}), '(src.freq)\n', (22000, 22010), False, 'from SimPEG.EM.Utils import omega\n')] |
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy import linalg
from ..dmd import get_dmd, exact_dmd, get_amplitude_spectrum
class TestDMD:
"""Unit tests for the `dmd` module."""
def test__synthetic_example_1__should_find_two_eigenvalues(self):
t = np.linspace(0, 1, num=11)
y = np.empty((2, len(t)))
y[:, 0] = [1, 1]
A = np.array([
[1, -2],
[0, +3]
])
expect_eigvals = np.array([3, 1])
for i in range(1, len(t)):
y[:, i] = A.dot(y[:, i-1])
X, Y = y[:, :-1], y[:, 1:]
actual_eigvals, Phi, s = exact_dmd(X, Y)
assert_allclose(actual_eigvals, expect_eigvals, rtol=1e-8, atol=0)
def test__synthetic_example_2__should_find_two_eigenvalues(self):
alpha = -0.093
omega = 2*np.pi*0.14
phi = np.pi / 3
t, dt = np.linspace(0, 50, num=501, retstep=True)
z = np.exp(alpha * t) * np.cos(omega * t + phi)
X, Y = self._build_input_matrices(z, L=2)
lamda, Phi, Psi, amps, z_hat, err_res = get_dmd(t, X, Y)
expect_eigvals = np.array([alpha - 1j*omega, alpha + 1j*omega])
err_fit = linalg.norm(z_hat - z) / linalg.norm(z)
assert_allclose(lamda, expect_eigvals, rtol=1e-12, atol=0)
assert_allclose(err_res, 0, rtol=0, atol=1e-12)
assert_allclose(err_fit, 0, rtol=0, atol=1e-12)
def test__synthetic_example_3__should_find_six_eigenvalues(self):
alpha = np.empty(3)
alpha[0] = 0.029
alpha[1] = 0.057
alpha[2] = -0.01
a = np.empty(3)
a[0] = 1e-5
a[1] = 2e-5
a[2] = 1e-5
omega = np.empty(3)
omega[0] = 0.9
omega[1] = 0.125
omega[2] = 0.01
phi = np.empty(3)
phi[0] = np.pi/2
phi[1] = np.pi/3
phi[2] = np.pi
t = np.linspace(0, 50, num=501)
z = 0 * t
for i in range(3):
z = z + a[i] * np.exp(alpha[i]*t) * np.cos(omega[i]*t + phi[i])
X, Y = self._build_input_matrices(z, L=40)
lamda, Phi, Psi, amps, z_hat, err_res = get_dmd(t, X, Y)
# Sort eigenvalues by increasing imaginary part.
expect_eigvals = np.array([
alpha[0] - 1j*omega[0], alpha[1] - 1j*omega[1],
alpha[2] - 1j*omega[2], alpha[2] + 1j*omega[2],
alpha[1] + 1j*omega[1], alpha[0] + 1j*omega[0],
])
err_fit = linalg.norm(z_hat - z) / linalg.norm(z)
assert_allclose(lamda, expect_eigvals, rtol=1e-4, atol=0)
assert_allclose(err_res, 0, rtol=0, atol=1e-12)
assert_allclose(err_fit, 0, rtol=0, atol=1e-8)
def test__synthetic_example_4__should_find_two_eigenvalues(self):
# Test almost neutrally stable case.
alpha = -2.52e-3
a = 1e-10
omega = 3.7e-1
phi = np.pi / 2.6
t = np.linspace(0, 50, num=50*200 + 1)
z = 0 * t
z = a * np.exp(alpha*t) * np.cos(omega*t + phi)
X, Y = self._build_input_matrices(z, L=500)
lamda, Phi, Psi, amps, z_hat, err_res = get_dmd(t, X, Y)
# Sort eigenvalues by increasing imaginary part.
expect_eigvals = np.array([
alpha - 1j*omega, alpha + 1j*omega,
])
err_fit = linalg.norm(z_hat - z) / linalg.norm(z)
assert_allclose(lamda, expect_eigvals, rtol=1e-12, atol=0)
assert_allclose(err_res, 0, rtol=0, atol=1e-12)
assert_allclose(err_fit, 0, rtol=0, atol=1e-10)
def _build_input_matrices(self, z, L=0):
X = np.zeros((L, len(z)-L))
Y = np.zeros((L, len(z)-L))
for i in range(L):
X[i, :] = z[i:-L+i]
for i in range(L-1):
Y[i, :] = z[i+1:-L+i+1]
Y[L-1, :] = z[L:]
return X, Y
class TestDMD_get_amplitude_spectrum:
def test__periodic_signal__should_restore_spectrum_correctly(self):
t = np.linspace(0, 6, num=61)
a = 1.0
f = 2.1
z = a * np.sin(2*np.pi*f*t)
L = 57
X = np.zeros((L, len(z)-L))
Y = np.zeros((L, len(z)-L))
for i in range(L):
X[i, :] = z[i:-L+i]
for i in range(L-1):
Y[i, :] = z[i+1:-L+i+1]
Y[L-1, :] = z[L:]
lamdas, __, __, amps, __, __ = get_dmd(t, X, Y)
freqs, spectrum = get_amplitude_spectrum(lamdas, amps, L)
assert_equal(len(freqs), 2)
assert_equal(len(spectrum), 2)
assert_allclose(np.abs(freqs), f, rtol=1e-8)
assert_allclose(spectrum, a/2, rtol=1e-8)
| [
"numpy.abs",
"numpy.empty",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"scipy.linalg.norm",
"numpy.testing.assert_allclose"
] | [((306, 331), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(11)'}), '(0, 1, num=11)\n', (317, 331), True, 'import numpy as np\n'), ((405, 433), 'numpy.array', 'np.array', (['[[1, -2], [0, +3]]'], {}), '([[1, -2], [0, +3]])\n', (413, 433), True, 'import numpy as np\n'), ((493, 509), 'numpy.array', 'np.array', (['[3, 1]'], {}), '([3, 1])\n', (501, 509), True, 'import numpy as np\n'), ((680, 747), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual_eigvals', 'expect_eigvals'], {'rtol': '(1e-08)', 'atol': '(0)'}), '(actual_eigvals, expect_eigvals, rtol=1e-08, atol=0)\n', (695, 747), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((911, 952), 'numpy.linspace', 'np.linspace', (['(0)', '(50)'], {'num': '(501)', 'retstep': '(True)'}), '(0, 50, num=501, retstep=True)\n', (922, 952), True, 'import numpy as np\n'), ((1151, 1205), 'numpy.array', 'np.array', (['[alpha - 1.0j * omega, alpha + 1.0j * omega]'], {}), '([alpha - 1.0j * omega, alpha + 1.0j * omega])\n', (1159, 1205), True, 'import numpy as np\n'), ((1265, 1323), 'numpy.testing.assert_allclose', 'assert_allclose', (['lamda', 'expect_eigvals'], {'rtol': '(1e-12)', 'atol': '(0)'}), '(lamda, expect_eigvals, rtol=1e-12, atol=0)\n', (1280, 1323), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1332, 1379), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_res', '(0)'], {'rtol': '(0)', 'atol': '(1e-12)'}), '(err_res, 0, rtol=0, atol=1e-12)\n', (1347, 1379), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1388, 1435), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_fit', '(0)'], {'rtol': '(0)', 'atol': '(1e-12)'}), '(err_fit, 0, rtol=0, atol=1e-12)\n', (1403, 1435), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1523, 1534), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (1531, 1534), True, 'import numpy as np\n'), ((1623, 1634), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (1631, 1634), True, 'import numpy as np\n'), ((1712, 1723), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (1720, 1723), True, 'import numpy as np\n'), ((1811, 1822), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (1819, 1822), True, 'import numpy as np\n'), ((1909, 1936), 'numpy.linspace', 'np.linspace', (['(0)', '(50)'], {'num': '(501)'}), '(0, 50, num=501)\n', (1920, 1936), True, 'import numpy as np\n'), ((2263, 2449), 'numpy.array', 'np.array', (['[alpha[0] - 1.0j * omega[0], alpha[1] - 1.0j * omega[1], alpha[2] - 1.0j *\n omega[2], alpha[2] + 1.0j * omega[2], alpha[1] + 1.0j * omega[1], alpha\n [0] + 1.0j * omega[0]]'], {}), '([alpha[0] - 1.0j * omega[0], alpha[1] - 1.0j * omega[1], alpha[2] -\n 1.0j * omega[2], alpha[2] + 1.0j * omega[2], alpha[1] + 1.0j * omega[1],\n alpha[0] + 1.0j * omega[0]])\n', (2271, 2449), True, 'import numpy as np\n'), ((2533, 2592), 'numpy.testing.assert_allclose', 'assert_allclose', (['lamda', 'expect_eigvals'], {'rtol': '(0.0001)', 'atol': '(0)'}), '(lamda, expect_eigvals, rtol=0.0001, atol=0)\n', (2548, 2592), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2599, 2646), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_res', '(0)'], {'rtol': '(0)', 'atol': '(1e-12)'}), '(err_res, 0, rtol=0, atol=1e-12)\n', (2614, 2646), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2655, 2702), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_fit', '(0)'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(err_fit, 0, rtol=0, atol=1e-08)\n', (2670, 2702), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2923, 2959), 'numpy.linspace', 'np.linspace', (['(0)', '(50)'], {'num': '(50 * 200 + 1)'}), '(0, 50, num=50 * 200 + 1)\n', (2934, 2959), True, 'import numpy as np\n'), ((3234, 3288), 'numpy.array', 'np.array', (['[alpha - 1.0j * omega, alpha + 1.0j * omega]'], {}), '([alpha - 1.0j * omega, alpha + 1.0j * omega])\n', (3242, 3288), True, 'import numpy as np\n'), ((3372, 3430), 'numpy.testing.assert_allclose', 'assert_allclose', (['lamda', 'expect_eigvals'], {'rtol': '(1e-12)', 'atol': '(0)'}), '(lamda, expect_eigvals, rtol=1e-12, atol=0)\n', (3387, 3430), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3439, 3486), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_res', '(0)'], {'rtol': '(0)', 'atol': '(1e-12)'}), '(err_res, 0, rtol=0, atol=1e-12)\n', (3454, 3486), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3495, 3542), 'numpy.testing.assert_allclose', 'assert_allclose', (['err_fit', '(0)'], {'rtol': '(0)', 'atol': '(1e-10)'}), '(err_fit, 0, rtol=0, atol=1e-10)\n', (3510, 3542), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3958, 3983), 'numpy.linspace', 'np.linspace', (['(0)', '(6)'], {'num': '(61)'}), '(0, 6, num=61)\n', (3969, 3983), True, 'import numpy as np\n'), ((4554, 4598), 'numpy.testing.assert_allclose', 'assert_allclose', (['spectrum', '(a / 2)'], {'rtol': '(1e-08)'}), '(spectrum, a / 2, rtol=1e-08)\n', (4569, 4598), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((965, 982), 'numpy.exp', 'np.exp', (['(alpha * t)'], {}), '(alpha * t)\n', (971, 982), True, 'import numpy as np\n'), ((985, 1008), 'numpy.cos', 'np.cos', (['(omega * t + phi)'], {}), '(omega * t + phi)\n', (991, 1008), True, 'import numpy as np\n'), ((1216, 1238), 'scipy.linalg.norm', 'linalg.norm', (['(z_hat - z)'], {}), '(z_hat - z)\n', (1227, 1238), False, 'from scipy import linalg\n'), ((1241, 1255), 'scipy.linalg.norm', 'linalg.norm', (['z'], {}), '(z)\n', (1252, 1255), False, 'from scipy import linalg\n'), ((2484, 2506), 'scipy.linalg.norm', 'linalg.norm', (['(z_hat - z)'], {}), '(z_hat - z)\n', (2495, 2506), False, 'from scipy import linalg\n'), ((2509, 2523), 'scipy.linalg.norm', 'linalg.norm', (['z'], {}), '(z)\n', (2520, 2523), False, 'from scipy import linalg\n'), ((3011, 3034), 'numpy.cos', 'np.cos', (['(omega * t + phi)'], {}), '(omega * t + phi)\n', (3017, 3034), True, 'import numpy as np\n'), ((3323, 3345), 'scipy.linalg.norm', 'linalg.norm', (['(z_hat - z)'], {}), '(z_hat - z)\n', (3334, 3345), False, 'from scipy import linalg\n'), ((3348, 3362), 'scipy.linalg.norm', 'linalg.norm', (['z'], {}), '(z)\n', (3359, 3362), False, 'from scipy import linalg\n'), ((4032, 4057), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t)'], {}), '(2 * np.pi * f * t)\n', (4038, 4057), True, 'import numpy as np\n'), ((4517, 4530), 'numpy.abs', 'np.abs', (['freqs'], {}), '(freqs)\n', (4523, 4530), True, 'import numpy as np\n'), ((2993, 3010), 'numpy.exp', 'np.exp', (['(alpha * t)'], {}), '(alpha * t)\n', (2999, 3010), True, 'import numpy as np\n'), ((2035, 2064), 'numpy.cos', 'np.cos', (['(omega[i] * t + phi[i])'], {}), '(omega[i] * t + phi[i])\n', (2041, 2064), True, 'import numpy as np\n'), ((2014, 2034), 'numpy.exp', 'np.exp', (['(alpha[i] * t)'], {}), '(alpha[i] * t)\n', (2020, 2034), True, 'import numpy as np\n')] |
import numpy as np
class RollingCircularMean(object):
def __init__(self, size=800):
self.size = size
self.data = []
def insert_data(self,item):
self.data.append(np.deg2rad(item))
if len(self.data) > self.size:
self.data.pop(0)
def value(self):
if self.data:
return np.rad2deg(circmean(self.data))
else:
return 0.0
# utility func
# ---------------------------------------------------------------------------------
def circmean(alpha,axis=None):
mean_angle = np.arctan2(np.mean(np.sin(alpha),axis),np.mean(np.cos(alpha),axis))
return mean_angle
| [
"numpy.sin",
"numpy.cos",
"numpy.deg2rad"
] | [((196, 212), 'numpy.deg2rad', 'np.deg2rad', (['item'], {}), '(item)\n', (206, 212), True, 'import numpy as np\n'), ((582, 595), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (588, 595), True, 'import numpy as np\n'), ((610, 623), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (616, 623), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
start_idx = 1
PREDEFINE_LEN = 6
anno_num = 0
total_anno = 0
class generating:
def __init__(self, path, label_path, dst_dir):
self.Path = path #####视频路径
self.base_name = os.path.basename(self.Path)
self.base_name = os.path.splitext(self.base_name)[0]
self.label_path = os.path.join(label_path, self.base_name + '.txt')#bbox 文件
self.dst_dir = dst_dir
self.target_actions = []
os.makedirs(self.dst_dir,exist_ok=True)
global anno_num
if os.path.exists(self.label_path):
self._all_boxes = np.loadtxt(self.label_path,dtype='float32')
self._all_boxes = self._all_boxes.reshape([-1,5])
self._all_boxes = self._all_boxes[np.argsort(self._all_boxes[:, 0])]
self.target_actions = self.extract_whole_action(self._all_boxes)
with open(os.path.join(self.dst_dir,self.base_name + '.txt'),'w') as outfile:
for action in self.target_actions:
anno_num += 1
outfile.write('{} {}'.format(int(action[0]),int(action[1])) + " " + " ".join([str(a) for a in action[2:]]) + '\n')
else:
print('this video has none box annotation')
def bbox_iou(self,box1, box2, x1y1x2y2=True, GIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
# Get the coordinates of bounding boxes
if x1y1x2y2:
# x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else:
# x, y, w, h = box1
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter_area = (min(b1_x2, b2_x2) - max(b1_x1, b2_x1)) * (min(b1_y2, b2_y2) - max(b1_y1, b2_y1))
# Union Area
union_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1) + 1e-16) + \
(b2_x2 - b2_x1) * (b2_y2 - b2_y1) - inter_area
iou = inter_area / union_area # iou
return iou
def extract_whole_action(self,anno_boxes):
sorted_anno = anno_boxes[np.argsort(anno_boxes[:, 0])]
target_actions = np.ones([0,6],'float32')
global total_anno
for anno in sorted_anno:
total_anno += 1
loc = np.where(target_actions[:,0] == anno[0])#起始帧数值一样
loc2 = np.where(target_actions[:,1] == anno[0])#结束帧数值相同
if loc[0].shape[0] == 0:
if loc2[0].shape[0] == 0:
target_action = np.ones([1,6],'float32')
target_action[0,0] = anno[0]
target_action[0,1] = anno[0]+PREDEFINE_LEN
target_action[0,2:6] = anno[1:]
target_actions = np.concatenate((target_actions,target_action),axis=0)
else:
cur_actions = target_actions[loc2]
is_exist = False#是否为同一个action
for cur_action in cur_actions:
iou = self.bbox_iou(anno[1:], cur_action[2:])
if (iou > 0.1):
cur_action[1] = anno[0] + PREDEFINE_LEN
cur_action[2], cur_action[3] = min(anno[1], cur_action[2]), min(anno[2], cur_action[3])
cur_action[4], cur_action[5] = max(anno[3], cur_action[4]), max(anno[4], cur_action[5])
is_exist = True
if not is_exist:
target_action = np.ones([1,6], 'float32')
target_action[0, 0] = anno[0]
target_action[0, 1] = anno[0] + PREDEFINE_LEN
target_action[0, 2:6] = anno[1:]
target_actions = np.concatenate((target_actions, target_action), axis=0)
else:
target_actions[loc2] = cur_actions
else:
target_action = np.ones([1,6], 'float32')
target_action[0, 0] = anno[0]
target_action[0, 1] = anno[0] + PREDEFINE_LEN
target_action[0, 2:6] = anno[1:]
target_actions = np.concatenate((target_actions, target_action), axis=0)
return target_actions
def generate_single_sample(self,img_list,labels):
standard_with = img_list[0].shape[1]
standard_height = img_list[0].shape[0]
global start_idx
if(len(labels) != 0):
for label in labels:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
pos_path = os.path.join(dst_dir, 'pos','%05d.mp4' % start_idx)
os.makedirs(os.path.join(dst_dir, 'pos'),exist_ok=True)
print(pos_path)
vw = cv2.VideoWriter(pos_path, fourcc, 6,(standard_with, standard_height))
for image in img_list:
vw.write(image)
vw.release()
start_idx += 1
def start(self):
if os.path.exists(self.label_path) and len(self.target_actions) != 0:
video_cap = cv2.VideoCapture(self.Path)
frame_list = []
frame_idx = 0
global start_idx
action_id = 0
self.target_actions = self.target_actions[np.argsort(self.target_actions[:, 0])]
cur_action = self.target_actions[action_id]
while True:
is_read,img = video_cap.read()
if is_read:
if frame_idx >= cur_action[0] and frame_idx < cur_action[1]:
frame_list.append(img)
if len(frame_list) == (cur_action[1]-cur_action[0]) and frame_idx == cur_action[1]:
self.generate_single_sample(frame_list,cur_action)
frame_list = []
action_id += 1
cur_action = self.target_actions[action_id]
frame_idx += 1
else:
break
if __name__ == '__main__':
total_tatgets = [r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200617/fall20200617',
r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200618/fall20200618',
r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200619/fall20200619',
r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200620/fall20200620',
r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200621/fall20200621',
r'/media/hzh/NO.6/Q3_RIGHT_newest/Q3/fall/fall20200622/fall20200622']
for tar_path in total_tatgets:
# tar_path = r'/media/hzh/ssd_disk/摔倒标注数据/fall/fall20200622/fall20200622'
label_path = tar_path+'_label'
# label_path = r'/media/hzh/ssd_disk/摔倒标注数据/fall/fall20200622/fall20200622_label'
# new_label_path = tar_path+'_newlabel'
dst_dir = tar_path+'_newlabel'
sub_video_list = os.listdir(tar_path)
suffix = '.mp4'
for i, sub_video in enumerate(sub_video_list):
if sub_video.endswith(suffix) or sub_video.endswith('.flv'):
print("process " + sub_video + "..")
full_sub_video = os.path.join(tar_path, sub_video)
instance_ = generating(full_sub_video,label_path,dst_dir)
# instance_.start()
print('total fall action:',anno_num)
print('total src fall action:',total_anno) | [
"os.makedirs",
"cv2.VideoWriter_fourcc",
"os.path.basename",
"os.path.exists",
"numpy.ones",
"numpy.argsort",
"cv2.VideoCapture",
"numpy.where",
"os.path.splitext",
"numpy.loadtxt",
"cv2.VideoWriter",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] | [((232, 259), 'os.path.basename', 'os.path.basename', (['self.Path'], {}), '(self.Path)\n', (248, 259), False, 'import os\n'), ((347, 396), 'os.path.join', 'os.path.join', (['label_path', "(self.base_name + '.txt')"], {}), "(label_path, self.base_name + '.txt')\n", (359, 396), False, 'import os\n'), ((477, 517), 'os.makedirs', 'os.makedirs', (['self.dst_dir'], {'exist_ok': '(True)'}), '(self.dst_dir, exist_ok=True)\n', (488, 517), False, 'import os\n'), ((553, 584), 'os.path.exists', 'os.path.exists', (['self.label_path'], {}), '(self.label_path)\n', (567, 584), False, 'import os\n'), ((2473, 2499), 'numpy.ones', 'np.ones', (['[0, 6]', '"""float32"""'], {}), "([0, 6], 'float32')\n", (2480, 2499), True, 'import numpy as np\n'), ((7246, 7266), 'os.listdir', 'os.listdir', (['tar_path'], {}), '(tar_path)\n', (7256, 7266), False, 'import os\n'), ((285, 317), 'os.path.splitext', 'os.path.splitext', (['self.base_name'], {}), '(self.base_name)\n', (301, 317), False, 'import os\n'), ((616, 660), 'numpy.loadtxt', 'np.loadtxt', (['self.label_path'], {'dtype': '"""float32"""'}), "(self.label_path, dtype='float32')\n", (626, 660), True, 'import numpy as np\n'), ((2418, 2446), 'numpy.argsort', 'np.argsort', (['anno_boxes[:, 0]'], {}), '(anno_boxes[:, 0])\n', (2428, 2446), True, 'import numpy as np\n'), ((2603, 2644), 'numpy.where', 'np.where', (['(target_actions[:, 0] == anno[0])'], {}), '(target_actions[:, 0] == anno[0])\n', (2611, 2644), True, 'import numpy as np\n'), ((2671, 2712), 'numpy.where', 'np.where', (['(target_actions[:, 1] == anno[0])'], {}), '(target_actions[:, 1] == anno[0])\n', (2679, 2712), True, 'import numpy as np\n'), ((5299, 5330), 'os.path.exists', 'os.path.exists', (['self.label_path'], {}), '(self.label_path)\n', (5313, 5330), False, 'import os\n'), ((5390, 5417), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.Path'], {}), '(self.Path)\n', (5406, 5417), False, 'import cv2\n'), ((768, 801), 'numpy.argsort', 'np.argsort', (['self._all_boxes[:, 0]'], {}), '(self._all_boxes[:, 0])\n', (778, 801), True, 'import numpy as np\n'), ((4263, 4289), 'numpy.ones', 'np.ones', (['[1, 6]', '"""float32"""'], {}), "([1, 6], 'float32')\n", (4270, 4289), True, 'import numpy as np\n'), ((4479, 4534), 'numpy.concatenate', 'np.concatenate', (['(target_actions, target_action)'], {'axis': '(0)'}), '((target_actions, target_action), axis=0)\n', (4493, 4534), True, 'import numpy as np\n'), ((4825, 4856), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4847, 4856), False, 'import cv2\n'), ((4884, 4936), 'os.path.join', 'os.path.join', (['dst_dir', '"""pos"""', "('%05d.mp4' % start_idx)"], {}), "(dst_dir, 'pos', '%05d.mp4' % start_idx)\n", (4896, 4936), False, 'import os\n'), ((5061, 5131), 'cv2.VideoWriter', 'cv2.VideoWriter', (['pos_path', 'fourcc', '(6)', '(standard_with, standard_height)'], {}), '(pos_path, fourcc, 6, (standard_with, standard_height))\n', (5076, 5131), False, 'import cv2\n'), ((5582, 5619), 'numpy.argsort', 'np.argsort', (['self.target_actions[:, 0]'], {}), '(self.target_actions[:, 0])\n', (5592, 5619), True, 'import numpy as np\n'), ((7505, 7538), 'os.path.join', 'os.path.join', (['tar_path', 'sub_video'], {}), '(tar_path, sub_video)\n', (7517, 7538), False, 'import os\n'), ((902, 953), 'os.path.join', 'os.path.join', (['self.dst_dir', "(self.base_name + '.txt')"], {}), "(self.dst_dir, self.base_name + '.txt')\n", (914, 953), False, 'import os\n'), ((2835, 2861), 'numpy.ones', 'np.ones', (['[1, 6]', '"""float32"""'], {}), "([1, 6], 'float32')\n", (2842, 2861), True, 'import numpy as np\n'), ((3061, 3116), 'numpy.concatenate', 'np.concatenate', (['(target_actions, target_action)'], {'axis': '(0)'}), '((target_actions, target_action), axis=0)\n', (3075, 3116), True, 'import numpy as np\n'), ((4964, 4992), 'os.path.join', 'os.path.join', (['dst_dir', '"""pos"""'], {}), "(dst_dir, 'pos')\n", (4976, 4992), False, 'import os\n'), ((3824, 3850), 'numpy.ones', 'np.ones', (['[1, 6]', '"""float32"""'], {}), "([1, 6], 'float32')\n", (3831, 3850), True, 'import numpy as np\n'), ((4072, 4127), 'numpy.concatenate', 'np.concatenate', (['(target_actions, target_action)'], {'axis': '(0)'}), '((target_actions, target_action), axis=0)\n', (4086, 4127), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from .source_pos import gauss
def lc_nodriftcorr(meta, wave_1d, optspec, log):
'''Plot a 2D light curve without drift correction.
Parameters
----------
meta: MetaClass
The metadata object.
wave_1d:
Wavelength array with trimmed edges depending on xwindow and ywindow which have been set in the S3 ecf
optspec:
The optimally extracted spectrum.
log: logedit.Logedit
The open log in which notes from this step can be added.
Returns
-------
None
'''
optspec = np.ma.masked_invalid(optspec)
plt.figure(3101, figsize=(8, 8))
plt.clf()
wmin = wave_1d.min()
wmax = wave_1d.max()
n_int, nx = optspec.shape
vmin = 0.97
vmax = 1.03
normspec = optspec / np.ma.mean(optspec, axis=0)
plt.imshow(normspec, origin='lower', aspect='auto', extent=[wmin, wmax, 0, n_int], vmin=vmin, vmax=vmax,
cmap=plt.cm.RdYlBu_r)
ediff = np.ma.zeros(n_int)
for m in range(n_int):
ediff[m] = 1e6 * np.ma.median(np.ma.abs(np.ma.ediff1d(normspec[m])))
MAD = np.ma.mean(ediff)
log.writelog("MAD = " + str(np.round(MAD, 0).astype(int)) + " ppm")
plt.title("MAD = " + str(np.round(MAD, 0).astype(int)) + " ppm")
plt.ylabel('Integration Number')
plt.xlabel(r'Wavelength ($\mu m$)')
plt.colorbar(label='Normalized Flux')
plt.tight_layout()
plt.savefig(meta.outputdir + 'figs/fig3101-2D_LC.png')
if not meta.hide_plots:
plt.pause(0.2)
def image_and_background(data, meta, n):
'''Make image+background plot.
Parameters
----------
data: DataClass
The data object.
meta: MetaClass
The metadata object.
n: int
The integration number.
Returns
-------
None
'''
intstart, subdata, submask, subbg = data.intstart, data.subdata, data.submask, data.subbg
plt.figure(3301, figsize=(8,8))
plt.clf()
plt.suptitle(f'Integration {intstart + n}')
plt.subplot(211)
plt.title('Background-Subtracted Flux')
max = np.max(subdata[n] * submask[n])
plt.imshow(subdata[n] * submask[n], origin='lower', aspect='auto', vmin=0, vmax=max / 10)
plt.colorbar()
plt.ylabel('Pixel Position')
plt.subplot(212)
plt.title('Subtracted Background')
median = np.median(subbg[n])
std = np.std(subbg[n])
plt.imshow(subbg[n], origin='lower', aspect='auto', vmin=median - 3 * std, vmax=median + 3 * std)
plt.colorbar()
plt.ylabel('Pixel Position')
plt.xlabel('Pixel Position')
plt.tight_layout()
plt.savefig(meta.outputdir + 'figs/fig3301-' + str(intstart + n) + '-Image+Background.png')
if not meta.hide_plots:
plt.pause(0.2)
def optimal_spectrum(data, meta, n):
'''Make optimal spectrum plot.
Parameters
----------
data: DataClass
The data object.
meta: MetaClass
The metadata object.
n: int
The integration number.
Returns
-------
None
'''
intstart, subnx, stdspec, optspec, opterr = data.intstart, meta.subnx, data.stdspec, data.optspec, data.opterr
plt.figure(3302)
plt.clf()
plt.suptitle(f'1D Spectrum - Integration {intstart + n}')
plt.semilogy(range(subnx), stdspec[n], '-', color='C1', label='Standard Spec')
plt.errorbar(range(subnx), optspec[n], opterr[n], fmt='-', color='C2', ecolor='C2', label='Optimal Spec')
plt.ylabel('Flux')
plt.xlabel('Pixel Position')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(meta.outputdir + 'figs/fig3302-' + str(intstart + n) + '-Spectrum.png')
if not meta.hide_plots:
plt.pause(0.2)
def source_position(meta, x_dim, pos_max, m,
isgauss=False, x=None, y=None, popt=None,
isFWM=False, y_pixels=None, sum_row=None, y_pos=None):
'''Plot source position for MIRI data.
Parameters
----------
meta: MetaClass
The metadata object.
x_dim: int
The number of pixels in the y-direction in the image.
pos_max: float
The brightest row.
m: int
The file number.
y_pixels: 1darray
The indices of the y-pixels.
sum_row: 1darray
The sum over each row.
isgauss: bool
Used a guassian centring method.
popt: list
The fitted Gaussian terms.
isFWM: bool
Used a flux-weighted mean centring method.
y_pos: float
The FWM central position of the star.
Returns
-------
None
Notes
-----
History:
- 2021-07-14: <NAME>
Initial version.
- Oct 15, 2021: <NAME>
Tided up the code a bit to reduce repeated code.
'''
plt.figure(3303)
plt.clf()
plt.plot(y_pixels, sum_row, 'o', label= 'Data')
if isgauss:
x_gaussian = np.linspace(0,x_dim,500)
gaussian = gauss(x_gaussian, *popt)
plt.plot(x_gaussian, gaussian, '-', label= 'Gaussian Fit')
plt.axvline(popt[1], ls=':', label= 'Gaussian Center', c='C2')
plt.xlim(pos_max-meta.spec_hw, pos_max+meta.spec_hw)
elif isFWM:
plt.axvline(y_pos, ls='-', label= 'Weighted Row')
plt.axvline(pos_max, ls='--', label= 'Brightest Row', c='C3')
plt.ylabel('Row Flux')
plt.xlabel('Row Pixel Position')
plt.legend()
plt.tight_layout()
plt.savefig(meta.outputdir + 'figs/fig3303-file' + str(m+1) + '-source_pos.png')
if not meta.hide_plots:
plt.pause(0.2)
def profile(eventdir, profile, submask, n, hide_plots=False):
'''
Plot weighting profile from optimal spectral extraction routine
Parameters
----------
eventdir: str
Directory in which to save outupts.
profile: ndarray
Fitted profile in the same shape as the data array.
submask: ndarray
Outlier mask.
n: int
The current integration number.
hide_plots:
If True, plots will automatically be closed rather than popping up.
Returns
-------
None
'''
profile = np.ma.masked_invalid(profile)
vmax = 0.05*np.ma.max(profile*submask)
plt.figure(3305)
plt.clf()
plt.suptitle(f"Profile - Integration {n}")
plt.imshow(profile*submask, aspect='auto', origin='lower',vmax=vmax)
plt.ylabel('Pixel Postion')
plt.xlabel('Pixel Position')
plt.tight_layout()
plt.savefig(eventdir+'figs/fig3305-'+str(n)+'-Profile.png')
if not hide_plots:
plt.pause(0.2)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.suptitle",
"numpy.ma.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"matplotlib.pyplot.axvline",
"numpy.std",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"numpy.max",
"nu... | [((605, 634), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['optspec'], {}), '(optspec)\n', (625, 634), True, 'import numpy as np\n'), ((639, 671), 'matplotlib.pyplot.figure', 'plt.figure', (['(3101)'], {'figsize': '(8, 8)'}), '(3101, figsize=(8, 8))\n', (649, 671), True, 'import matplotlib.pyplot as plt\n'), ((676, 685), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (683, 685), True, 'import matplotlib.pyplot as plt\n'), ((855, 985), 'matplotlib.pyplot.imshow', 'plt.imshow', (['normspec'], {'origin': '"""lower"""', 'aspect': '"""auto"""', 'extent': '[wmin, wmax, 0, n_int]', 'vmin': 'vmin', 'vmax': 'vmax', 'cmap': 'plt.cm.RdYlBu_r'}), "(normspec, origin='lower', aspect='auto', extent=[wmin, wmax, 0,\n n_int], vmin=vmin, vmax=vmax, cmap=plt.cm.RdYlBu_r)\n", (865, 985), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1027), 'numpy.ma.zeros', 'np.ma.zeros', (['n_int'], {}), '(n_int)\n', (1020, 1027), True, 'import numpy as np\n'), ((1142, 1159), 'numpy.ma.mean', 'np.ma.mean', (['ediff'], {}), '(ediff)\n', (1152, 1159), True, 'import numpy as np\n'), ((1305, 1337), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Integration Number"""'], {}), "('Integration Number')\n", (1315, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength ($\\\\mu m$)"""'], {}), "('Wavelength ($\\\\mu m$)')\n", (1352, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1419), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Normalized Flux"""'}), "(label='Normalized Flux')\n", (1394, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1442), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1440, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1501), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(meta.outputdir + 'figs/fig3101-2D_LC.png')"], {}), "(meta.outputdir + 'figs/fig3101-2D_LC.png')\n", (1458, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1984), 'matplotlib.pyplot.figure', 'plt.figure', (['(3301)'], {'figsize': '(8, 8)'}), '(3301, figsize=(8, 8))\n', (1962, 1984), True, 'import matplotlib.pyplot as plt\n'), ((1988, 1997), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2002, 2045), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Integration {intstart + n}"""'], {}), "(f'Integration {intstart + n}')\n", (2014, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2066), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2061, 2066), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2110), 'matplotlib.pyplot.title', 'plt.title', (['"""Background-Subtracted Flux"""'], {}), "('Background-Subtracted Flux')\n", (2080, 2110), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2152), 'numpy.max', 'np.max', (['(subdata[n] * submask[n])'], {}), '(subdata[n] * submask[n])\n', (2127, 2152), True, 'import numpy as np\n'), ((2157, 2250), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(subdata[n] * submask[n])'], {'origin': '"""lower"""', 'aspect': '"""auto"""', 'vmin': '(0)', 'vmax': '(max / 10)'}), "(subdata[n] * submask[n], origin='lower', aspect='auto', vmin=0,\n vmax=max / 10)\n", (2167, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2265), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2263, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2270, 2298), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixel Position"""'], {}), "('Pixel Position')\n", (2280, 2298), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2319), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2314, 2319), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2358), 'matplotlib.pyplot.title', 'plt.title', (['"""Subtracted Background"""'], {}), "('Subtracted Background')\n", (2333, 2358), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2391), 'numpy.median', 'np.median', (['subbg[n]'], {}), '(subbg[n])\n', (2381, 2391), True, 'import numpy as np\n'), ((2402, 2418), 'numpy.std', 'np.std', (['subbg[n]'], {}), '(subbg[n])\n', (2408, 2418), True, 'import numpy as np\n'), ((2423, 2524), 'matplotlib.pyplot.imshow', 'plt.imshow', (['subbg[n]'], {'origin': '"""lower"""', 'aspect': '"""auto"""', 'vmin': '(median - 3 * std)', 'vmax': '(median + 3 * std)'}), "(subbg[n], origin='lower', aspect='auto', vmin=median - 3 * std,\n vmax=median + 3 * std)\n", (2433, 2524), True, 'import matplotlib.pyplot as plt\n'), ((2525, 2539), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2537, 2539), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixel Position"""'], {}), "('Pixel Position')\n", (2554, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2605), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixel Position"""'], {}), "('Pixel Position')\n", (2587, 2605), True, 'import matplotlib.pyplot as plt\n'), ((2610, 2628), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2626, 2628), True, 'import matplotlib.pyplot as plt\n'), ((3193, 3209), 'matplotlib.pyplot.figure', 'plt.figure', (['(3302)'], {}), '(3302)\n', (3203, 3209), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3223), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3221, 3223), True, 'import matplotlib.pyplot as plt\n'), ((3228, 3285), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""1D Spectrum - Integration {intstart + n}"""'], {}), "(f'1D Spectrum - Integration {intstart + n}')\n", (3240, 3285), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (3493, 3501), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3534), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixel Position"""'], {}), "('Pixel Position')\n", (3516, 3534), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3561), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3549, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3584), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3582, 3584), True, 'import matplotlib.pyplot as plt\n'), ((4782, 4798), 'matplotlib.pyplot.figure', 'plt.figure', (['(3303)'], {}), '(3303)\n', (4792, 4798), True, 'import matplotlib.pyplot as plt\n'), ((4803, 4812), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4810, 4812), True, 'import matplotlib.pyplot as plt\n'), ((4817, 4863), 'matplotlib.pyplot.plot', 'plt.plot', (['y_pixels', 'sum_row', '"""o"""'], {'label': '"""Data"""'}), "(y_pixels, sum_row, 'o', label='Data')\n", (4825, 4863), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5308), 'matplotlib.pyplot.axvline', 'plt.axvline', (['pos_max'], {'ls': '"""--"""', 'label': '"""Brightest Row"""', 'c': '"""C3"""'}), "(pos_max, ls='--', label='Brightest Row', c='C3')\n", (5259, 5308), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5336), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Row Flux"""'], {}), "('Row Flux')\n", (5324, 5336), True, 'import matplotlib.pyplot as plt\n'), ((5341, 5373), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Row Pixel Position"""'], {}), "('Row Pixel Position')\n", (5351, 5373), True, 'import matplotlib.pyplot as plt\n'), ((5378, 5390), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5388, 5390), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5413), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5411, 5413), True, 'import matplotlib.pyplot as plt\n'), ((6114, 6143), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['profile'], {}), '(profile)\n', (6134, 6143), True, 'import numpy as np\n'), ((6191, 6207), 'matplotlib.pyplot.figure', 'plt.figure', (['(3305)'], {}), '(3305)\n', (6201, 6207), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6221), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6219, 6221), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6268), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Profile - Integration {n}"""'], {}), "(f'Profile - Integration {n}')\n", (6238, 6268), True, 'import matplotlib.pyplot as plt\n'), ((6273, 6344), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(profile * submask)'], {'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmax': 'vmax'}), "(profile * submask, aspect='auto', origin='lower', vmax=vmax)\n", (6283, 6344), True, 'import matplotlib.pyplot as plt\n'), ((6346, 6373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixel Postion"""'], {}), "('Pixel Postion')\n", (6356, 6373), True, 'import matplotlib.pyplot as plt\n'), ((6378, 6406), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixel Position"""'], {}), "('Pixel Position')\n", (6388, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6411, 6429), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6427, 6429), True, 'import matplotlib.pyplot as plt\n'), ((823, 850), 'numpy.ma.mean', 'np.ma.mean', (['optspec'], {'axis': '(0)'}), '(optspec, axis=0)\n', (833, 850), True, 'import numpy as np\n'), ((1538, 1552), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (1547, 1552), True, 'import matplotlib.pyplot as plt\n'), ((2761, 2775), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (2770, 2775), True, 'import matplotlib.pyplot as plt\n'), ((3709, 3723), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (3718, 3723), True, 'import matplotlib.pyplot as plt\n'), ((4902, 4928), 'numpy.linspace', 'np.linspace', (['(0)', 'x_dim', '(500)'], {}), '(0, x_dim, 500)\n', (4913, 4928), True, 'import numpy as np\n'), ((4979, 5036), 'matplotlib.pyplot.plot', 'plt.plot', (['x_gaussian', 'gaussian', '"""-"""'], {'label': '"""Gaussian Fit"""'}), "(x_gaussian, gaussian, '-', label='Gaussian Fit')\n", (4987, 5036), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5107), 'matplotlib.pyplot.axvline', 'plt.axvline', (['popt[1]'], {'ls': '""":"""', 'label': '"""Gaussian Center"""', 'c': '"""C2"""'}), "(popt[1], ls=':', label='Gaussian Center', c='C2')\n", (5057, 5107), True, 'import matplotlib.pyplot as plt\n'), ((5117, 5173), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(pos_max - meta.spec_hw)', '(pos_max + meta.spec_hw)'], {}), '(pos_max - meta.spec_hw, pos_max + meta.spec_hw)\n', (5125, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5535, 5549), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (5544, 5549), True, 'import matplotlib.pyplot as plt\n'), ((6160, 6188), 'numpy.ma.max', 'np.ma.max', (['(profile * submask)'], {}), '(profile * submask)\n', (6169, 6188), True, 'import numpy as np\n'), ((6525, 6539), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.2)'], {}), '(0.2)\n', (6534, 6539), True, 'import matplotlib.pyplot as plt\n'), ((5194, 5242), 'matplotlib.pyplot.axvline', 'plt.axvline', (['y_pos'], {'ls': '"""-"""', 'label': '"""Weighted Row"""'}), "(y_pos, ls='-', label='Weighted Row')\n", (5205, 5242), True, 'import matplotlib.pyplot as plt\n'), ((1103, 1129), 'numpy.ma.ediff1d', 'np.ma.ediff1d', (['normspec[m]'], {}), '(normspec[m])\n', (1116, 1129), True, 'import numpy as np\n'), ((1192, 1208), 'numpy.round', 'np.round', (['MAD', '(0)'], {}), '(MAD, 0)\n', (1200, 1208), True, 'import numpy as np\n'), ((1261, 1277), 'numpy.round', 'np.round', (['MAD', '(0)'], {}), '(MAD, 0)\n', (1269, 1277), True, 'import numpy as np\n')] |
from typing import Dict
import pytest
import numpy as np
import string
from hyperminhash.perf import estimate_error
from hyperminhash.hyperminhash import HyperMinHash
def rnd_str(size: int):
arr = np.random.choice([_ for _ in string.ascii_letters], size)
return "".join(list(arr))
def test_zeros(exp: float = 0.0):
hll = HyperMinHash()
for i in range(hll.m):
val = np.uint16(np.random.randint(0, np.iinfo(np.uint16).max))
if hll.lz(val) == 0:
exp += 1
hll.reg[i] = val
_, got = hll.reg_sum_and_zeros()
assert got == exp, f"expected {exp:.2f}, got {got:.2f}"
def test_all_zeros(exp: float = 16384.0):
hll = HyperMinHash()
_, got = hll.reg_sum_and_zeros()
assert got == exp, f"expected {exp:.2f}, got {got:.2f}"
def test_cardinality(step_init: int = 10000, iters: int = 1000000):
sk = HyperMinHash()
step = step_init
unique = set()
for i in range(iters):
st = rnd_str(32)
b = str.encode(st)
sk.add(b)
unique.add(st)
if len(unique) % step == 0:
exact = np.uint64(len(unique))
res = np.uint64(sk.cardinality())
step *= 10
ratio = estimate_error(res, exact)
assert ratio <= 2, f"Exact {exact}, got {res} which is {ratio:.2f} error. String: {st}."
print(f"PASS iter {i}.")
@pytest.mark.slow
def test_merge(num_items: int = 3500000):
sk1 = HyperMinHash()
sk2 = HyperMinHash()
unique = set()
for _ in range(num_items):
for sk in (sk1, sk2):
st = rnd_str(32)
b = str.encode(st)
sk.add(b)
unique.add(st)
print("Populated sketches")
for _sk1, _sk2 in ((sk1, sk2), (sk2, sk1)):
msk = _sk1.merge(_sk2)
exact = np.uint64(len(unique))
res = msk.cardinality()
ratio = estimate_error(res, exact)
assert ratio <= 2, f"Exact {exact}, got {res} which is {ratio:.2f} error."
@pytest.mark.slow
@pytest.mark.parametrize("j", range(1, 21))
def test_intersection(j, k: int = 1000000):
sk1 = HyperMinHash()
sk2 = HyperMinHash()
unique: Dict[str, int] = {}
frac = np.float64(j) / np.float64(20)
for i in range(k):
st = str(i)
b = str.encode(st)
sk1.add(b)
unique[st] = unique.get(st, 0) + 1
for i in range(int(np.float64(k) * frac), 2 * k):
st = str(i)
b = str.encode(st)
sk2.add(b)
unique[st] = unique.get(st, 0) + 1
col = 0
for count in unique.values():
if count > 1:
col += 1
exact = np.uint64(k - int(np.float64(k) * frac))
res = sk1.intersection(sk2)
ratio = estimate_error(res, exact)
assert ratio <= 100, f"Exact {exact}, got {res} which is {ratio:.2f} error."
print(f"PASS iter {j}.")
@pytest.mark.slow
def test_no_intersection(num_items1: int = 1000000, num_items2: int = 2000000):
sk1 = HyperMinHash()
sk2 = HyperMinHash()
for i in range(num_items1):
st = str(i)
b = str.encode(st)
sk1.add(b)
print("Populated sketch 1")
for i in range(num_items1, num_items2):
st = str(i)
b = str.encode(st)
sk2.add(b)
print("Populated sketch 2")
got = sk1.intersection(sk2)
assert got == 0, f"Expected no intersection, got {got}."
| [
"hyperminhash.perf.estimate_error",
"hyperminhash.hyperminhash.HyperMinHash",
"numpy.iinfo",
"numpy.random.choice",
"numpy.float64"
] | [((203, 260), 'numpy.random.choice', 'np.random.choice', (['[_ for _ in string.ascii_letters]', 'size'], {}), '([_ for _ in string.ascii_letters], size)\n', (219, 260), True, 'import numpy as np\n'), ((331, 345), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (343, 345), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((633, 647), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (645, 647), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((816, 830), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (828, 830), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((1303, 1317), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (1315, 1317), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((1325, 1339), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (1337, 1339), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((1873, 1887), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (1885, 1887), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((1895, 1909), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (1907, 1909), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((2381, 2407), 'hyperminhash.perf.estimate_error', 'estimate_error', (['res', 'exact'], {}), '(res, exact)\n', (2395, 2407), False, 'from hyperminhash.perf import estimate_error\n'), ((2620, 2634), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (2632, 2634), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((2642, 2656), 'hyperminhash.hyperminhash.HyperMinHash', 'HyperMinHash', ([], {}), '()\n', (2654, 2656), False, 'from hyperminhash.hyperminhash import HyperMinHash\n'), ((1653, 1679), 'hyperminhash.perf.estimate_error', 'estimate_error', (['res', 'exact'], {}), '(res, exact)\n', (1667, 1679), False, 'from hyperminhash.perf import estimate_error\n'), ((1948, 1961), 'numpy.float64', 'np.float64', (['j'], {}), '(j)\n', (1958, 1961), True, 'import numpy as np\n'), ((1964, 1978), 'numpy.float64', 'np.float64', (['(20)'], {}), '(20)\n', (1974, 1978), True, 'import numpy as np\n'), ((1087, 1113), 'hyperminhash.perf.estimate_error', 'estimate_error', (['res', 'exact'], {}), '(res, exact)\n', (1101, 1113), False, 'from hyperminhash.perf import estimate_error\n'), ((2106, 2119), 'numpy.float64', 'np.float64', (['k'], {}), '(k)\n', (2116, 2119), True, 'import numpy as np\n'), ((410, 429), 'numpy.iinfo', 'np.iinfo', (['np.uint16'], {}), '(np.uint16)\n', (418, 429), True, 'import numpy as np\n'), ((2319, 2332), 'numpy.float64', 'np.float64', (['k'], {}), '(k)\n', (2329, 2332), True, 'import numpy as np\n')] |
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Distributed under terms of the GPLv3 license.
"""
"""
import numpy as np
import pytest
import pyronn_torch
def test_init():
assert pyronn_torch.cpp_extension
@pytest.mark.parametrize('with_texture', ('with_texture', False))
@pytest.mark.parametrize('with_backward', ('with_backward', False))
def test_projection(with_texture, with_backward):
projector = pyronn_torch.ConeBeamProjector(
(128, 128, 128), (2.0, 2.0, 2.0), (-127.5, -127.5, -127.5),
(2, 480, 620), [1.0, 1.0], (0, 0),
np.array(
[[[-3.10e+2, -1.20e+03, 0.00e+00, 1.86e+5],
[-2.40e+2, 0.00e+00, 1.20e+03, 1.44e+5],
[-1.00e+00, 0.00e+00, 0.00e+00, 6.00e+2]],
[[-2.89009888e+2, -1.20522754e+3, -1.02473585e-13, 1.86000000e+5],
[-2.39963440e+2, -4.18857765e+0, 1.20000000e+3, 1.44000000e+5],
[-9.99847710e-01, -1.74524058e-2, 0.00000000e+0,
6.00000000e+2]]]))
volume = projector.new_volume_tensor(
requires_grad=True if with_backward else False)
volume += 1.
result = projector.project_forward(volume, use_texture=with_texture)
assert result is not None
if with_backward:
assert volume.requires_grad
assert result.requires_grad
loss = result.mean()
loss.backward()
@pytest.mark.parametrize('with_texture', ('with_texture', False))
@pytest.mark.parametrize('with_backward', ('with_backward', False))
def test_projection_backward(with_texture, with_backward):
projector = pyronn_torch.ConeBeamProjector(
(128, 128, 128), (2.0, 2.0, 2.0), (-127.5, -127.5, -127.5),
(2, 480, 620), [1.0, 1.0], (0, 0),
np.array(
[[[-3.10e+2, -1.20e+03, 0.00e+00, 1.86e+5],
[-2.40e+2, 0.00e+00, 1.20e+03, 1.44e+5],
[-1.00e+00, 0.00e+00, 0.00e+00, 6.00e+2]],
[[-2.89009888e+2, -1.20522754e+3, -1.02473585e-13, 1.86000000e+5],
[-2.39963440e+2, -4.18857765e+0, 1.20000000e+3, 1.44000000e+5],
[-9.99847710e-01, -1.74524058e-2, 0.00000000e+0,
6.00000000e+2]]]))
projection = projector.new_projection_tensor(
requires_grad=True if with_backward else False)
projection += 1.
result = projector.project_backward(projection, use_texture=with_texture)
assert result.shape == projector._volume_shape
assert result is not None
if with_backward:
assert projection.requires_grad
assert result.requires_grad
loss = result.mean()
loss.backward()
@pytest.mark.parametrize('with_backward', ('with_backward', False))
def test_conrad_config(with_backward, with_texture=True):
import pytest
pytest.importorskip("pyconrad")
projector = pyronn_torch.ConeBeamProjector.from_conrad_config()
volume = projector.new_volume_tensor(
requires_grad=True if with_backward else False)
volume += 1.
result = projector.project_forward(volume, use_texture=with_texture)
import pyconrad.autoinit
pyconrad.imshow(result)
assert result is not None
if with_backward:
assert volume.requires_grad
assert result.requires_grad
loss = result.mean()
loss.backward()
def test_projection_backward_conrad(with_texture=True, with_backward=True):
import pytest
pytest.importorskip("pyconrad")
projector = pyronn_torch.ConeBeamProjector.from_conrad_config()
projection = projector.new_projection_tensor(
requires_grad=True if with_backward else False)
projection += 1000.
result = projector.project_backward(projection, use_texture=with_texture)
import pyconrad.autoinit
pyconrad.imshow(result)
assert result.shape == projector._volume_shape
assert result is not None
if with_backward:
assert projection.requires_grad
assert result.requires_grad
loss = result.mean()
loss.backward()
def test_conrad_forward_backward():
import pytest
pytest.importorskip("pyconrad")
projector = pyronn_torch.ConeBeamProjector.from_conrad_config()
# import conebeam_projector
# other_projector = conebeam_projector.CudaProjector()
volume = projector.new_volume_tensor()
volume += 1.
result = projector.project_forward(volume, use_texture=False)
# import pyconrad.autoinit
# pyconrad.imshow(result)
reco = projector.project_backward(result, use_texture=False)
# import pyconrad.autoinit
# pyconrad.imshow(reco)
assert result is not None
assert reco is not None
def test_register_hook():
was_executed = False
def require_nonleaf_grad(v):
def hook(g):
nonlocal was_executed
was_executed = True
v.grad_nonleaf = g
v.register_hook(hook)
projector = pyronn_torch.ConeBeamProjector(
(128, 128, 128), (2.0, 2.0, 2.0), (-127.5, -127.5, -127.5),
(2, 480, 620), [1.0, 1.0], (0, 0),
np.array(
[[[-3.10e+2, -1.20e+03, 0.00e+00, 1.86e+5],
[-2.40e+2, 0.00e+00, 1.20e+03, 1.44e+5],
[-1.00e+00, 0.00e+00, 0.00e+00, 6.00e+2]],
[[-2.89009888e+2, -1.20522754e+3, -1.02473585e-13, 1.86000000e+5],
[-2.39963440e+2, -4.18857765e+0, 1.20000000e+3, 1.44000000e+5],
[-9.99847710e-01, -1.74524058e-2, 0.00000000e+0,
6.00000000e+2]]]))
x = projector.new_volume_tensor(requires_grad=True)
require_nonleaf_grad(x)
loss = projector.project_forward(x)
loss.mean().backward()
x.grad_nonleaf
assert was_executed
| [
"pyronn_torch.ConeBeamProjector.from_conrad_config",
"pytest.mark.parametrize",
"pytest.importorskip",
"numpy.array"
] | [((212, 276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_texture"""', "('with_texture', False)"], {}), "('with_texture', ('with_texture', False))\n", (235, 276), False, 'import pytest\n'), ((278, 344), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_backward"""', "('with_backward', False)"], {}), "('with_backward', ('with_backward', False))\n", (301, 344), False, 'import pytest\n'), ((1367, 1431), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_texture"""', "('with_texture', False)"], {}), "('with_texture', ('with_texture', False))\n", (1390, 1431), False, 'import pytest\n'), ((1433, 1499), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_backward"""', "('with_backward', False)"], {}), "('with_backward', ('with_backward', False))\n", (1456, 1499), False, 'import pytest\n'), ((2603, 2669), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_backward"""', "('with_backward', False)"], {}), "('with_backward', ('with_backward', False))\n", (2626, 2669), False, 'import pytest\n'), ((2750, 2781), 'pytest.importorskip', 'pytest.importorskip', (['"""pyconrad"""'], {}), "('pyconrad')\n", (2769, 2781), False, 'import pytest\n'), ((2799, 2850), 'pyronn_torch.ConeBeamProjector.from_conrad_config', 'pyronn_torch.ConeBeamProjector.from_conrad_config', ([], {}), '()\n', (2848, 2850), False, 'import pyronn_torch\n'), ((3377, 3408), 'pytest.importorskip', 'pytest.importorskip', (['"""pyconrad"""'], {}), "('pyconrad')\n", (3396, 3408), False, 'import pytest\n'), ((3426, 3477), 'pyronn_torch.ConeBeamProjector.from_conrad_config', 'pyronn_torch.ConeBeamProjector.from_conrad_config', ([], {}), '()\n', (3475, 3477), False, 'import pyronn_torch\n'), ((4040, 4071), 'pytest.importorskip', 'pytest.importorskip', (['"""pyconrad"""'], {}), "('pyconrad')\n", (4059, 4071), False, 'import pytest\n'), ((4089, 4140), 'pyronn_torch.ConeBeamProjector.from_conrad_config', 'pyronn_torch.ConeBeamProjector.from_conrad_config', ([], {}), '()\n', (4138, 4140), False, 'import pyronn_torch\n'), ((562, 824), 'numpy.array', 'np.array', (['[[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0], [-1.0,\n 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -1.02473585e-13, \n 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0], [-0.99984771, -\n 0.0174524058, 0.0, 600.0]]]'], {}), '([[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0\n ], [-1.0, 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -\n 1.02473585e-13, 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0],\n [-0.99984771, -0.0174524058, 0.0, 600.0]]])\n', (570, 824), True, 'import numpy as np\n'), ((1726, 1988), 'numpy.array', 'np.array', (['[[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0], [-1.0,\n 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -1.02473585e-13, \n 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0], [-0.99984771, -\n 0.0174524058, 0.0, 600.0]]]'], {}), '([[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0\n ], [-1.0, 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -\n 1.02473585e-13, 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0],\n [-0.99984771, -0.0174524058, 0.0, 600.0]]])\n', (1734, 1988), True, 'import numpy as np\n'), ((5010, 5272), 'numpy.array', 'np.array', (['[[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0], [-1.0,\n 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -1.02473585e-13, \n 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0], [-0.99984771, -\n 0.0174524058, 0.0, 600.0]]]'], {}), '([[[-310.0, -1200.0, 0.0, 186000.0], [-240.0, 0.0, 1200.0, 144000.0\n ], [-1.0, 0.0, 0.0, 600.0]], [[-289.009888, -1205.22754, -\n 1.02473585e-13, 186000.0], [-239.96344, -4.18857765, 1200.0, 144000.0],\n [-0.99984771, -0.0174524058, 0.0, 600.0]]])\n', (5018, 5272), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
# In[25]:
HEIGHT = 96
WIDTH = 96
DEPTH = 3
SIZE = HEIGHT*WIDTH*DEPTH
# In[26]:
DATA_PATH = '../dataset/stl10_binary/train_X.bin'
LABEL_PATH = '../dataset/stl10_binary/train_y.bin'
# In[27]:
def read_labels(path_to_labels):
with open(path_to_labels, 'rb') as f:
labels = np.fromfile(f,dtype=np.uint8)
return labels
# In[28]:
def read_all_images(path_to_data):
with open(path_to_data, 'rb') as f:
all_data = np.fromfile(f,dtype=np.uint8)
#Data resized to 3x64x64
#-1 since size of the pictures depends on the input file
images = np.reshape(all_data, (-1, 3, HEIGHT, WIDTH))
#Transposing to a standard image format
#Comment this line before training algorithms like CNNs
images = np.transpose(images, (0,3,2,1))
return images
# In[29]:
def read_single_image(image_file):
image = np.fromfile(image_file,dtype=np.uint8,count=SIZE)
image = np.reshape(image,(3,HEIGHT,WIDTH))
image = np.transpose(image, (2,1,0))
return image
# In[30]:
def plot_image(image):
plt.imshow(image)
plt.show()
# In[34]:
def display_one_image():
with open(DATA_PATH, 'rb') as f:
image=read_single_image(f)
plot_image(image)
# In[36]:
def get_shape_of_dataset():
images= read_all_images(DATA_PATH)
return images.shape
# In[ ]:
# In[ ]:
| [
"matplotlib.pyplot.show",
"numpy.fromfile",
"matplotlib.pyplot.imshow",
"numpy.transpose",
"numpy.reshape"
] | [((1002, 1053), 'numpy.fromfile', 'np.fromfile', (['image_file'], {'dtype': 'np.uint8', 'count': 'SIZE'}), '(image_file, dtype=np.uint8, count=SIZE)\n', (1013, 1053), True, 'import numpy as np\n'), ((1070, 1107), 'numpy.reshape', 'np.reshape', (['image', '(3, HEIGHT, WIDTH)'], {}), '(image, (3, HEIGHT, WIDTH))\n', (1080, 1107), True, 'import numpy as np\n'), ((1122, 1152), 'numpy.transpose', 'np.transpose', (['image', '(2, 1, 0)'], {}), '(image, (2, 1, 0))\n', (1134, 1152), True, 'import numpy as np\n'), ((1208, 1225), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1218, 1225), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1240), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1238, 1240), True, 'import matplotlib.pyplot as plt\n'), ((392, 422), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint8'}), '(f, dtype=np.uint8)\n', (403, 422), True, 'import numpy as np\n'), ((551, 581), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.uint8'}), '(f, dtype=np.uint8)\n', (562, 581), True, 'import numpy as np\n'), ((705, 749), 'numpy.reshape', 'np.reshape', (['all_data', '(-1, 3, HEIGHT, WIDTH)'], {}), '(all_data, (-1, 3, HEIGHT, WIDTH))\n', (715, 749), True, 'import numpy as np\n'), ((888, 922), 'numpy.transpose', 'np.transpose', (['images', '(0, 3, 2, 1)'], {}), '(images, (0, 3, 2, 1))\n', (900, 922), True, 'import numpy as np\n')] |
import contextlib
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
class MPLBoss:
def __init__(self, settings):
self.outf_dirname = settings._temp_r_dirname
self.png_dirname = settings.output_dirname
self.png_fname_base = (
settings.png_fname_base
+ f"{{png_fnumber:0{settings.png_fnum_digits}d}}.png"
)
self.plot_count = 0
# init private vars
self._fig = self._ax = None
# dpi more or less chosen arbitrarily; I'm not sure how important this
# value is. Its purpose is to scale width and height (because mpl
# understands these in terms of inches and dpi, rather than pixels.)
# The actually realized size in pixels seems to be slightly smaller
# than one would expect---e.g., I'm asking for 1280x720 and getting
# 1270x710. I'm not sure what to do about this.
self._dpi = 96
self.out_width = settings.out_width / self._dpi
self.out_height = settings.out_height / self._dpi
@contextlib.contextmanager
def make_png(self, window):
self.init_png(window)
try:
yield
finally:
self.close_png(window)
def init_png(self, window):
print(f"Writing frame {self.plot_count} \r", end="")
assert self._ax is None
self._fig, self._ax = plt.subplots(
figsize=(self.out_width, self.out_height), dpi=self._dpi
)
plt.axis("off")
self._ax.set_xlim(window.start, window.end)
self._ax.set_ylim(window.bottom, window.top)
def close_png(self, window):
png_fname = self.png_fname_base.format(png_fnumber=self.plot_count + 1)
self._fig.tight_layout()
# Infuriatingly, savefig overrides any value of facecolor set
# previously
self._fig.savefig(
png_fname,
dpi="figure",
facecolor=self.hex_color(window.bg_color),
)
plt.close(self._fig)
self._fig = self._ax = None
self.plot_count += 1
@staticmethod
def hex_color(color):
# Will raise a ValueError if color has floats (rather than ints)
return f"#{color[0]:02x}{color[1]:02x}{color[2]:02x}{color[3]:02x}"
def now_line(self, now, window):
line = lines.Line2D([now, now], [window.bottom, window.top], zorder=30)
self._ax.add_line(line)
def plot_rect(self, x1, x2, y1, y2, color, zorder):
rect = patches.Polygon(
xy=np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]),
color=self.hex_color(color),
zorder=zorder,
)
self._ax.add_patch(rect)
def plot_line(self, x1, x2, y1, y2, color, width, zorder):
# linewidth in matplotlib appears to be about twice as thick as the
# corresponding value in R
# The default zorder for patches is 1; for lines, 2. We want lines to
# appear behind patches.
line = lines.Line2D(
[x1, x2],
[y1, y2],
color=self.hex_color(color),
linewidth=width / 2,
zorder=zorder,
)
self._ax.add_line(line)
x_alignments = {0: "left", 0.5: "center", 1.0: "right"}
y_alignments = {0: "baseline", 0.5: "center", 1.0: "top"}
def text(self, text, x, y, color, size, position=(0.5, 0), zorder=20):
# The "size" argument was originally used for the 'cex' argument of
# r's text() function, which scales the text relative to some
# baseline size. Eyeballing it, multiplying this by 8 seems to
# give a similar size.
ha = self.x_alignments[position[0]]
va = self.y_alignments[position[1]]
self._ax.text(
x,
y,
text,
color=self.hex_color(color),
fontsize=8 * size,
horizontalalignment=ha,
verticalalignment=va,
zorder=zorder,
)
def bracket(self, x1, x2, y1, y2, color, width, zorder):
line = lines.Line2D(
[x1, x1, x2, x2],
[y1, y2, y2, y1],
color=self.hex_color(color),
linewidth=width / 2,
zorder=zorder,
)
self._ax.add_line(line)
def run(self):
# This function exists for compatibility with the previous R version
# of this class, but there is nothing to do here.
pass
| [
"matplotlib.lines.Line2D",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((1443, 1513), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(self.out_width, self.out_height)', 'dpi': 'self._dpi'}), '(figsize=(self.out_width, self.out_height), dpi=self._dpi)\n', (1455, 1513), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1559), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1552, 1559), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2073), 'matplotlib.pyplot.close', 'plt.close', (['self._fig'], {}), '(self._fig)\n', (2062, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2450), 'matplotlib.lines.Line2D', 'lines.Line2D', (['[now, now]', '[window.bottom, window.top]'], {'zorder': '(30)'}), '([now, now], [window.bottom, window.top], zorder=30)\n', (2398, 2450), True, 'import matplotlib.lines as lines\n'), ((2587, 2637), 'numpy.array', 'np.array', (['[[x1, y1], [x2, y1], [x2, y2], [x1, y2]]'], {}), '([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])\n', (2595, 2637), True, 'import numpy as np\n')] |
import numpy as np
from engine.optimizers.base_sgd import BaseSGD
def square_loss(x, y, w, c):
return c * np.sum([(np.dot(w.T, x[i]) - y[i]) ** 2 for i in range(x.shape[0])]) + np.dot(w, w) / 2
def square_increment(x_i, y_i, w, c, eps):
return - eps * (c * 2 * x_i * (np.dot(w.T, x_i) - y_i) + w)
class SquareSGD(BaseSGD):
def __init__(self, c, eps):
self.c = c
self.eps = eps
def loss(x, y, w):\
return square_loss(x, y, w, self.c)
def increment(x_i, y_i, w):
return square_increment(x_i, y_i, w, self.c, self.eps)
super().__init__(loss, increment) | [
"numpy.dot"
] | [((183, 195), 'numpy.dot', 'np.dot', (['w', 'w'], {}), '(w, w)\n', (189, 195), True, 'import numpy as np\n'), ((280, 296), 'numpy.dot', 'np.dot', (['w.T', 'x_i'], {}), '(w.T, x_i)\n', (286, 296), True, 'import numpy as np\n'), ((121, 138), 'numpy.dot', 'np.dot', (['w.T', 'x[i]'], {}), '(w.T, x[i])\n', (127, 138), True, 'import numpy as np\n')] |
from collections import defaultdict
from copy import deepcopy
import time
import pandas as pd
import pickle
import os
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
from xgboost import XGBRegressor
import numpy as np
CIRCUIT_LIST = ["circuitId_1", "circuitId_2", "circuitId_3", "circuitId_4", "circuitId_6",
"circuitId_7", "circuitId_9", "circuitId_10", "circuitId_11", "circuitId_13", "circuitId_14",
"circuitId_15", "circuitId_17", "circuitId_18", "circuitId_22", "circuitId_24", "circuitId_32",
"circuitId_34", "circuitId_69", "circuitId_70", "circuitId_71", "circuitId_73", "tyre_1", "tyre_2"]
def get_current_circuit(df: pd.DataFrame):
for circuit in CIRCUIT_LIST:
if df[circuit].all() == 1:
return circuit
raise ValueError('Something wrong with the race dataframe, multiple circuits in the same race')
def fix_data_types(to_fix):
for col in to_fix.columns:
if to_fix[col].dtype == 'object':
to_fix[col] = to_fix[col].astype(str).astype(int)
return to_fix
def load_dataset():
""" Load the dataset and build the pandas dataframe """
# TODO load from absolute path
data_complete = pd.read_csv('./envs/race_strategy_model/dataset/finalDataset.csv', delimiter=',')
data_complete = pd.concat([data_complete, pd.get_dummies(data_complete['tyre'], prefix='tyre')], axis=1).drop(
['tyre'], axis=1)
data_complete = pd.concat([data_complete, pd.get_dummies(data_complete['circuitId'], prefix='circuitId')],
axis=1).drop(['circuitId'], axis=1)
data_complete = pd.concat([data_complete, pd.get_dummies(data_complete['year'], prefix='year')], axis=1).drop(
['year'], axis=1)
return data_complete
def discard_wet(data: pd.DataFrame):
""" Discard the wet races, as we don't have enough data to predict correctly
the wetness and performance of the track"""
races = data['raceId'].unique()
for race in races:
race_laps = data[data['raceId'] == race]
# Select only races having for each lap all the flags related to wet conditions set to 0
if not (race_laps['tyre_7'].all() == 0 and race_laps['tyre_8'].all() == 0 and race_laps['rainy'].all() == 0):
data = data.loc[data['raceId'] != race]
# Drop all wet-related information from the dataframe
data = data.drop(columns=['tyre_7', 'tyre_8', 'rainy'])
return data
def discard_suspended_races(data):
""" Remove races containing laps slower than double the pole lap, they have probably been interrupted """
# Divide by the pole time
data['nextLap'] = data['nextLap'] / data['pole']
# Find lap times abnormally high
anomalies = data[data['nextLap'] > 2]
races_to_discard = anomalies['raceId'].unique()
for race in races_to_discard:
data = data[data['raceId'] != race]
# Undo the pole time division
data['nextLap'] = data['nextLap'] * data['pole']
return data
class RaceStrategyModel(object):
def __init__(self, year: int, verbose=False, n_cores=1):
print("XGB using {} threads".format(n_cores))
self.regular_model = XGBRegressor(n_jobs=n_cores)
self.pit_model = XGBRegressor(n_jobs=n_cores)
self.safety_model = XGBRegressor(n_jobs=n_cores)
self.test_race = None
self.scaler = None
self.test_race_pit_model = None
self.dummy_columns = None
self.n_cores = n_cores
# self.start_lap = start_lap
if year == 2014:
year = "year_1"
elif year == 2015:
year = "year_2"
elif year == 2016:
year = "year_3"
elif year == 2017:
year = "year_4"
elif year == 2018:
year = "year_5"
elif year == 2019:
year = "year_6"
else:
raise ValueError("No race available for year " + str(year))
self.year = year
self.verbose = verbose
def split_train_test(self, df: pd.DataFrame, split_fraction: float):
""" Split the dataset randomly but keeping whole races together """
test_data = pd.DataFrame(columns=df.columns)
races = df[df[self.year] == 1]['raceId'].unique()
if split_fraction != 0:
split_size = int(round(split_fraction * len(races)))
else:
# Leave only one race out from the training
split_size = 1
test_races = np.random.choice(races, size=split_size)
for race in test_races:
race_laps = df.loc[df['raceId'] == race]
test_data = test_data.append(race_laps)
df = df[df.raceId != race]
return df, test_data
def normalize_dataset(self, df):
""" Normalize integer-valued columns of the dataset """
data = df.copy()
# print(df.columns)
# Remove columns not to be normalized
zero_one = ['battle', 'drs', "circuitId_1", "circuitId_2", "circuitId_3", "circuitId_4", "circuitId_6",
"circuitId_7", "circuitId_9", "circuitId_10", "circuitId_11", "circuitId_13", "circuitId_14",
"circuitId_15", "circuitId_17", "circuitId_18", "circuitId_22", "circuitId_24", "circuitId_32",
"circuitId_34", "circuitId_69", "circuitId_70", "circuitId_71", "circuitId_73", "tyre_1", "tyre_2",
"tyre_3", "tyre_4", "tyre_5", "tyre_6",
"year_1", "year_2", "year_3", "year_4", "year_5", "year_6", "nextLap", 'pit', 'safety', "unnorm_lap"]
#'milliseconds',
#'cumulative', 'unnorm_lap']
temp_df = data[zero_one].copy()
data.drop(zero_one, axis=1, inplace=True)
# if self.columns is not None and len(data.columns) != len(self.columns):
# print(set(data.columns).difference(set(self.columns)))
# exit(-1)
if not self.scaler:
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.scaler.fit(data)
scaled = data
else:
scaled = self.scaler.transform(data)
data.loc[:, :] = scaled
data = data.join(temp_df)
del temp_df
return data
def __process_dataset(self, dataset):
""" Pre-process the dataset to obtain training data and its labels"""
# Discard wet and suspended races
old_races = len(dataset['raceId'].unique())
dataset = discard_wet(dataset)
dataset = discard_suspended_races(dataset)
new_races = len(dataset['raceId'].unique())
if self.verbose:
print("{} wet and suspended races were discarded".format(old_races - new_races))
# Eliminate the last lap from the training data, as it has 0 target
dataset = dataset[dataset['nextLap'] > 0]
# Express the next lap target as a delta to the pole lap
dataset['nextLap'] = (dataset['nextLap'] - dataset['pole'])
# Duplicate columns to use them after normalization
dataset['base'] = dataset['pole'].astype(int)
dataset['true'] = dataset['milliseconds'].astype(int)
dataset['true_cumulative'] = dataset['cumulative'].astype(int)
# Normalize the dataset, but normalize the lap time and cumulative time individually, in order to be able to
# normalize them at runtime
# Remove the duplicated unnormalized columns from the train data
dataset = dataset.drop(columns=['base', 'true', 'true_cumulative'])
dataset = self.normalize_dataset(dataset)
_, self.test_race = self.split_train_test(dataset, split_fraction=0)
self.__compute_pitstop_model(dataset)
self.dummy_columns = dataset.columns
train_data = self.normalize_dataset(dataset)
# train_data = train_data[train_data['unnorm_lap'] > self.start_lap] # Take laps after a threshold
# Remove columns used only to identify the laps in testing
train_data = train_data.drop(columns=['unnorm_lap', "raceId", "driverId", "race_length"])
# Split the dataset into three separate datasets, one per each model to be trained
train_pit = deepcopy(train_data.loc[train_data['pit'] != 0])
train_safety = deepcopy(train_data.loc[(train_data['safety'] != 0) & (train_data['pit'] == 0)])
train_regular = deepcopy(train_data.loc[(train_data['pit'] == 0) & (train_data['safety'] == 0)])
# Remove features related to pit and safety in the "regular" laps model
train_regular = train_regular.drop(columns=['safety', 'pit', 'pit-cost', 'pitstop-milliseconds'])
# Extract the target labels
labels_pit = train_pit.pop('nextLap')
labels_safety = train_safety.pop('nextLap')
labels_regular = train_regular.pop('nextLap')
train_data = {'regular': train_regular, 'safety': train_safety, 'pit': train_pit}
labels = {'regular': labels_regular, 'safety': labels_safety, 'pit': labels_pit}
return train_data, labels
def __compute_pitstop_model(self, full_dataset: pd.DataFrame):
"""Compute a normal distribution's parameters for each driver's pit-stop times"""
circuit = get_current_circuit(self.test_race)
pits = []
pits_safety = []
stop_laps = full_dataset[(full_dataset['pitstop-milliseconds'] > 0) & (full_dataset[circuit] == 1)].sort_values('lap')
pit_times = stop_laps[stop_laps['safety'] == 0]['pitstop-milliseconds'].values
pit_safety_times = stop_laps[stop_laps['safety'] > 0]['pitstop-milliseconds'].values
pits.extend(pit_times.tolist())
pits_safety.extend(pit_safety_times.tolist())
safety_mean = np.mean(pit_safety_times) if len(pit_safety_times) > 0 else 0
safety_std = np.std(pit_safety_times) if len(pit_safety_times) > 0 else 0
mean = np.mean(pit_times) if len(pit_times) > 0 else 0
std = np.std(pit_times) if len(pit_times) > 0 else 0
self.test_race_pit_model = {'regular': (mean, std), 'safety': (safety_mean, safety_std)}
def train(self):
""" Train the regression models """
if self.verbose:
print('Training models...')
self.scaler = None
if self.verbose:
print("Model uses {} cores".format(self.n_cores))
# self.regular_model = XGBRegressor(n_jobs=self.n_cores)
# self.pit_model = XGBRegressor(n_jobs=self.n_cores)
# self.safety_model = XGBRegressor(n_jobs=self.n_cores)
dataset = load_dataset()
datasets, labels = self.__process_dataset(dataset)
self.regular_model.fit(datasets['regular'], labels['regular'])
self.pit_model.fit(datasets['pit'], labels['pit'])
self.safety_model.fit(datasets['safety'], labels['safety'])
if self.verbose:
print('Done!\n')
def resplit(self):
# TODO fix the invalidation of scaler to avoid the normalization of test races
self.scaler = None
dataset = load_dataset()
self.__process_dataset(dataset)
self._test_race = fix_data_types(self.test_race)
self.laps_database = defaultdict(lambda: None)
self.race_id = self.test_race["raceId"].values[0]
for i in range(self.test_race["lap"].count()):
row = self.test_race.iloc[[i]]
self.laps_database[(row["driverId"].values[0], row["lap"].values[0])] = row
def load(self):
""" Restore prediction models from previously pickled files to avoid retraining """
if self.verbose:
print("Loading prediction models from pickled files...")
if not os.path.isfile("./envs/race_strategy_model/pickled_models/regular.model"):
print("ERROR: regular.model is missing")
exit(-1)
else:
self.regular_model.load_model('./envs/race_strategy_model/pickled_models/regular.model')
if not os.path.isfile("./envs/race_strategy_model/pickled_models/safety.model"):
print("ERROR: safety.model is missing")
exit(-1)
else:
self.safety_model.load_model('./envs/race_strategy_model/pickled_models/safety.model')
if not os.path.isfile("./envs/race_strategy_model/pickled_models/pit.model"):
print("ERROR: pit.model is missing")
exit(-1)
else:
self.pit_model.load_model('./envs/race_strategy_model/pickled_models/pit.model')
if not os.path.isfile("./envs/race_strategy_model/pickled_models/scaler.pickle"):
print("ERROR: scaler.pickle is missing")
exit(-1)
else:
with open('./envs/race_strategy_model/pickled_models/scaler.pickle', 'rb') as scaler_file:
self.scaler = pickle.load(scaler_file)
scaler_file.close()
# if not os.path.isfile("pickled_models/test_race.pickle"):
# print("ERROR: test_race.pickle is missing")
# exit(-1)
# else:
# with open('pickled_models/test_race.pickle', 'rb') as pit_file:
# self.pit_model = pickle.load(pit_file)
# pit_file.close()
if self.verbose:
print("Done!\n")
# self.regular_model.set_params(**{"n_jobs": self.n_cores})
# self.safety_model.set_params(**{"n_jobs": self.n_cores})
# self.pit_model.set_params(**{"n_jobs": self.n_cores})
print(self.regular_model.get_params())
def save(self):
""" Pickle the model objects to avoid retraining """
for model, name in zip([self.regular_model, self.safety_model, self.pit_model],
['regular', 'safety', 'pit']):
model.save_model('./envs/race_strategy_model/pickled_models/{}.model'.format(name))
with open('./envs/race_strategy_model/pickled_models/scaler.pickle', 'wb') as savefile:
pickle.dump(self.scaler, savefile)
savefile.close()
#self.test_race.to_csv(".envs/race_strategy_model/dataset/test_race.csv")
def predict(self, state, lap_type):
if lap_type == 'regular':
state.drop(columns=['safety', 'pit', 'pit-cost', 'pitstop-milliseconds'])
return self.regular_model.predict(state)
elif lap_type == 'pit':
return self.regular_model.predict(state)
else:
return self.safety_model.predict(state)
def get_prediction_model(self, state: str):
if state == 'regular':
return self.regular_model
if state == 'safety':
return self.safety_model
if state == 'pit':
return self.pit_model
else:
raise ValueError("The specified state is not valid, allowed model states are 'regular', 'safety' and 'pit'")
if __name__ == '__main__':
model = RaceStrategyModel(2019)
#model.load()
#model.resplit()
model.train()
model.save()
print(model.test_race['driverId'].unique())
| [
"pandas.DataFrame",
"copy.deepcopy",
"pickle.dump",
"pandas.read_csv",
"numpy.std",
"pandas.get_dummies",
"sklearn.preprocessing.MinMaxScaler",
"collections.defaultdict",
"os.path.isfile",
"numpy.mean",
"pickle.load",
"xgboost.XGBRegressor",
"numpy.random.choice"
] | [((1230, 1315), 'pandas.read_csv', 'pd.read_csv', (['"""./envs/race_strategy_model/dataset/finalDataset.csv"""'], {'delimiter': '""","""'}), "('./envs/race_strategy_model/dataset/finalDataset.csv',\n delimiter=',')\n", (1241, 1315), True, 'import pandas as pd\n'), ((3206, 3234), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_jobs': 'n_cores'}), '(n_jobs=n_cores)\n', (3218, 3234), False, 'from xgboost import XGBRegressor\n'), ((3260, 3288), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_jobs': 'n_cores'}), '(n_jobs=n_cores)\n', (3272, 3288), False, 'from xgboost import XGBRegressor\n'), ((3317, 3345), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'n_jobs': 'n_cores'}), '(n_jobs=n_cores)\n', (3329, 3345), False, 'from xgboost import XGBRegressor\n'), ((4187, 4219), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (4199, 4219), True, 'import pandas as pd\n'), ((4496, 4536), 'numpy.random.choice', 'np.random.choice', (['races'], {'size': 'split_size'}), '(races, size=split_size)\n', (4512, 4536), True, 'import numpy as np\n'), ((8210, 8258), 'copy.deepcopy', 'deepcopy', (["train_data.loc[train_data['pit'] != 0]"], {}), "(train_data.loc[train_data['pit'] != 0])\n", (8218, 8258), False, 'from copy import deepcopy\n'), ((8282, 8367), 'copy.deepcopy', 'deepcopy', (["train_data.loc[(train_data['safety'] != 0) & (train_data['pit'] == 0)]"], {}), "(train_data.loc[(train_data['safety'] != 0) & (train_data['pit'] == 0)]\n )\n", (8290, 8367), False, 'from copy import deepcopy\n'), ((8387, 8472), 'copy.deepcopy', 'deepcopy', (["train_data.loc[(train_data['pit'] == 0) & (train_data['safety'] == 0)]"], {}), "(train_data.loc[(train_data['pit'] == 0) & (train_data['safety'] == 0)]\n )\n", (8395, 8472), False, 'from copy import deepcopy\n'), ((11189, 11215), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (11200, 11215), False, 'from collections import defaultdict\n'), ((5995, 6030), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (6007, 6030), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((9742, 9767), 'numpy.mean', 'np.mean', (['pit_safety_times'], {}), '(pit_safety_times)\n', (9749, 9767), True, 'import numpy as np\n'), ((9825, 9849), 'numpy.std', 'np.std', (['pit_safety_times'], {}), '(pit_safety_times)\n', (9831, 9849), True, 'import numpy as np\n'), ((9902, 9920), 'numpy.mean', 'np.mean', (['pit_times'], {}), '(pit_times)\n', (9909, 9920), True, 'import numpy as np\n'), ((9964, 9981), 'numpy.std', 'np.std', (['pit_times'], {}), '(pit_times)\n', (9970, 9981), True, 'import numpy as np\n'), ((11684, 11757), 'os.path.isfile', 'os.path.isfile', (['"""./envs/race_strategy_model/pickled_models/regular.model"""'], {}), "('./envs/race_strategy_model/pickled_models/regular.model')\n", (11698, 11757), False, 'import os\n'), ((11964, 12036), 'os.path.isfile', 'os.path.isfile', (['"""./envs/race_strategy_model/pickled_models/safety.model"""'], {}), "('./envs/race_strategy_model/pickled_models/safety.model')\n", (11978, 12036), False, 'import os\n'), ((12240, 12309), 'os.path.isfile', 'os.path.isfile', (['"""./envs/race_strategy_model/pickled_models/pit.model"""'], {}), "('./envs/race_strategy_model/pickled_models/pit.model')\n", (12254, 12309), False, 'import os\n'), ((12504, 12577), 'os.path.isfile', 'os.path.isfile', (['"""./envs/race_strategy_model/pickled_models/scaler.pickle"""'], {}), "('./envs/race_strategy_model/pickled_models/scaler.pickle')\n", (12518, 12577), False, 'import os\n'), ((13937, 13971), 'pickle.dump', 'pickle.dump', (['self.scaler', 'savefile'], {}), '(self.scaler, savefile)\n', (13948, 13971), False, 'import pickle\n'), ((12800, 12824), 'pickle.load', 'pickle.load', (['scaler_file'], {}), '(scaler_file)\n', (12811, 12824), False, 'import pickle\n'), ((1359, 1411), 'pandas.get_dummies', 'pd.get_dummies', (["data_complete['tyre']"], {'prefix': '"""tyre"""'}), "(data_complete['tyre'], prefix='tyre')\n", (1373, 1411), True, 'import pandas as pd\n'), ((1500, 1562), 'pandas.get_dummies', 'pd.get_dummies', (["data_complete['circuitId']"], {'prefix': '"""circuitId"""'}), "(data_complete['circuitId'], prefix='circuitId')\n", (1514, 1562), True, 'import pandas as pd\n'), ((1677, 1729), 'pandas.get_dummies', 'pd.get_dummies', (["data_complete['year']"], {'prefix': '"""year"""'}), "(data_complete['year'], prefix='year')\n", (1691, 1729), True, 'import pandas as pd\n')] |
from control import lqr
import numpy as np
# -------------------------------------------------------------------------------------------------
def Correction2D(K):
for i in range(len(K)):
for j in range(len(K[0])):
if abs(K[i][j]) < 1e-6:
K[i][j] = 0
return K
# -------------------------------------------------------------------------------------------------
def Correction(K):
for i in range(len(K)):
if abs(K[i]) <1e-3:
K[i] = 0
return K
# -------------------------------------------------------------------------------------------------
def hover(g, m, Ix, Iy, Iz):
#Define State and Input Matrices:
A = [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0,-g, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[g, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]
B = [[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 1/Ix, 0 , 0 ],\
[0 , 0 , 1/Iy, 0 ],\
[0 , 0 , 0 , 1/Iz],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[-1/m, 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ]]
#Defining weight matrices Q and R for state and input matrices respectively:
Q = [[5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10]]
R = [[100, 0, 0, 0],\
[ 0, 100, 0, 0],\
[ 0, 0, 100, 0],\
[ 0, 0, 0, 100]]
#Calculating Matrix K gain with LQR method:
k, S, E = lqr(A, B, Q, R)
k = np.array(Correction2D(k))
#Return these variables back to the control file.
return k
# -------------------------------------------------------------------------------------------------
def cruise(g, m, Ix, Iy, Iz, Cd, u_max):
K_cruise = np.zeros((10, 4, 9))
for i in range(10):
u = u_max*(i+1)/10
theta = np.arcsin(-Cd*(u**2)/(m*g))
w = u*np.tan(theta)
#Define State and Input Matrices:
A = [[ 0, 0, 0, 1, 0, np.tan(theta), 0, 0, 0],\
[ 0, 0, 0, 0, 1, 0, 0, 0, 0],\
[ 0, 0, 0, 0, 0, 1/np.cos(theta), 0, 0, 0],\
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],\
[ 0, -g*np.cos(theta), 0, 0, -w, 0, -2*Cd*u/m, 0, 0],\
[g*np.cos(theta), 0, 0, w, 0, -u, 0, 0, 0],\
[ 0, -g*np.sin(theta), 0, 0, u, 0, 0, 0, 2*Cd*w/m]]
B = [[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[0 , 1/Ix, 0 , 0 ],\
[0 , 0 , 1/Iy, 0 ],\
[0 , 0 , 0 , 1/Iz],\
[0 , 0 , 0 , 0 ],\
[0 , 0 , 0 , 0 ],\
[-1/m, 0 , 0 , 0 ]]
#Defining weight matrices Q and R for state and input matrices respectively:
Q = [[5, 0, 0, 0, 0, 0, 0, 0, 0],\
[0, 5, 0, 0, 0, 0, 0, 0, 0],\
[0, 0, 5, 0, 0, 0, 0, 0, 0],\
[0, 0, 0, 1, 0, 0, 0, 0, 0],\
[0, 0, 0, 0, 1, 0, 0, 0, 0],\
[0, 0, 0, 0, 0, 1, 0, 0, 0],\
[0, 0, 0, 0, 0, 0, 10, 0, 0],\
[0, 0, 0, 0, 0, 0, 0, 1, 0],\
[0, 0, 0, 0, 0, 0, 0, 0, 5]]
R = [[1, 0, 0, 0],\
[ 0, 1, 0, 0],\
[ 0, 0, 1, 0],\
[ 0, 0, 0, 1]]
#Calculating Matrix K gain with LQR method:
K, S, E = lqr(A, B, Q, R)
K_cruise[i] = np.array(Correction2D(K))
K_cruise = K_cruise.tolist()
#Return these variables back to the control file.
return K_cruise
# -------------------------------------------------------------------------------------------------
def spin():
return None
| [
"control.lqr",
"numpy.zeros",
"numpy.arcsin",
"numpy.tan",
"numpy.sin",
"numpy.cos"
] | [((2379, 2394), 'control.lqr', 'lqr', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (2382, 2394), False, 'from control import lqr\n'), ((2641, 2661), 'numpy.zeros', 'np.zeros', (['(10, 4, 9)'], {}), '((10, 4, 9))\n', (2649, 2661), True, 'import numpy as np\n'), ((2714, 2747), 'numpy.arcsin', 'np.arcsin', (['(-Cd * u ** 2 / (m * g))'], {}), '(-Cd * u ** 2 / (m * g))\n', (2723, 2747), True, 'import numpy as np\n'), ((4468, 4483), 'control.lqr', 'lqr', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (4471, 4483), False, 'from control import lqr\n'), ((2750, 2763), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (2756, 2763), True, 'import numpy as np\n'), ((2856, 2869), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (2862, 2869), True, 'import numpy as np\n'), ((3045, 3058), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3051, 3058), True, 'import numpy as np\n'), ((3390, 3403), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3396, 3403), True, 'import numpy as np\n'), ((3465, 3478), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3471, 3478), True, 'import numpy as np\n'), ((3576, 3589), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3582, 3589), True, 'import numpy as np\n')] |
"""PMSG_disc.py
Created by <NAME>, <NAME>.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, Component,ExecComp,IndepVarComp,ScipyOptimizer,pyOptSparseDriver
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
from openmdao.drivers import *
import numpy as np
from numpy import array,float,min,sign
from math import pi, cos,cosh, sqrt, radians, sin,sinh, exp, log10, log, tan, atan
import pandas as pd
class PMSG(Component):
""" Estimates overall mass dimensions and Efficiency of PMSG -disc rotor generator. """
def __init__(self):
super(PMSG, self).__init__()
# PMSG_disc generator design inputs
self.add_param('r_s', val=0.0, units ='m', desc='airgap radius r_s')
self.add_param('l_s', val=0.0, units ='m', desc='Stator core length l_s')
self.add_param('h_s', val=0.0, units ='m', desc='Yoke height h_s')
self.add_param('tau_p',val=0.0, units ='m', desc='Pole pitch self.tau_p')
self.add_param('machine_rating',val=0.0, units ='W', desc='Machine rating')
self.add_param('n_nom',val=0.0, units ='rpm', desc='rated speed')
self.add_param('Torque',val=0.0, units ='Nm', desc='Rated torque ')
self.add_param('h_m',val=0.0, units ='m', desc='magnet height')
self.add_param('h_ys',val=0.0, units ='m', desc='Yoke height')
self.add_param('h_yr',val=0.0, units ='m', desc='rotor yoke height')
# structural design variables
self.add_param('n_s' ,val=0.0, desc='number of stator arms n_s')
self.add_param('b_st' , val=0.0, units ='m', desc='arm width b_st')
self.add_param('d_s',val=0.0,units ='m', desc='arm depth d_s')
self.add_param('t_ws' ,val=0.0,units ='m', desc='arm depth thickness ')
self.add_param('t_d',val=0.0, units='m', desc='disc thickness')
self.add_param('R_o',val=0.0, units ='m',desc='Shaft radius')
# PMSG_disc generator design outputs
# Magnetic loading
self.add_output('B_symax' ,val=0.0, desc='Peak Stator Yoke flux density B_ymax')
self.add_output('B_tmax',val=0.0, desc='Peak Teeth flux density')
self.add_output('B_rymax',val=0.0, desc='Peak Rotor yoke flux density')
self.add_output('B_smax',val=0.0, desc='Peak Stator flux density')
self.add_output('B_pm1',val=0.0, desc='Fundamental component of peak air gap flux density')
self.add_output('B_g' ,val=0.0, desc='Peak air gap flux density B_g')
#Stator design
self.add_output('N_s' ,val=0.0, desc='Number of turns in the stator winding')
self.add_output('b_s',val=0.0, desc='slot width')
self.add_output('b_t',val=0.0, desc='tooth width')
self.add_output('A_Cuscalc',val=0.0, desc='Conductor cross-section mm^2')
#Rotor magnet dimension
self.add_output('b_m',val=0.0, desc='magnet width')
self.add_output('p',val=0.0, desc='No of pole pairs')
# Electrical performance
self.add_output('E_p',val=0.0, desc='Stator phase voltage')
self.add_output('f',val=0.0, desc='Generator output frequency')
self.add_output('I_s',val=0.0, desc='Generator output phase current')
self.add_output('R_s',val=0.0, desc='Stator resistance')
self.add_output('L_s',val=0.0, desc='Stator synchronising inductance')
self.add_output('A_1' ,val=0.0, desc='Electrical loading')
self.add_output('J_s',val=0.0, desc='Current density')
# Objective functions
self.add_output('Mass',val=0.0, desc='Actual mass')
self.add_output('K_rad',val=0.0, desc='K_rad')
self.add_output('Losses',val=0.0, desc='Total loss')
self.add_output('gen_eff',val=0.0, desc='Generator efficiency')
# Structural performance
self.add_output('u_Ar',val=0.0, desc='Rotor radial deflection')
self.add_output('y_Ar',val=0.0, desc='Rotor axial deflection')
self.add_output('u_As',val=0.0, desc='Stator radial deflection')
self.add_output('y_As',val=0.0, desc='Stator axial deflection')
self.add_output('z_A_s',val=0.0, desc='Stator circumferential deflection')
self.add_output('u_all_r',val=0.0, desc='Allowable radial rotor')
self.add_output('u_all_s',val=0.0, desc='Allowable radial stator')
self.add_output('y_all',val=0.0, desc='Allowable axial')
self.add_output('z_all_s',val=0.0, desc='Allowable circum stator')
self.add_output('z_all_r',val=0.0, desc='Allowable circum rotor')
self.add_output('b_all_s',val=0.0, desc='Allowable arm')
self.add_output('TC1',val=0.0, desc='Torque constraint')
self.add_output('TC2',val=0.0, desc='Torque constraint-rotor')
self.add_output('TC3',val=0.0, desc='Torque constraint-stator')
# Other parameters
self.add_output('R_out',val=0.0, desc='Outer radius')
self.add_output('S' ,val=0.0, desc='Stator slots')
self.add_output('Slot_aspect_ratio',val=0.0, desc='Slot aspect ratio')
# Mass Outputs
self.add_output('mass_PM',val=0.0, desc='Magnet mass')
self.add_output('Copper',val=0.0, desc='Copper Mass')
self.add_output('Iron',val=0.0, desc='Electrical Steel Mass')
self.add_output('Structural_mass' ,val=0.0, desc='Structural Mass')
# Material properties
self.add_param('rho_Fes',val=0.0,units='kg*m**-3', desc='Structural Steel density ')
self.add_param('rho_Fe',val=0.0,units='kg*m**-3', desc='Magnetic Steel density ')
self.add_param('rho_Copper',val=0.0,units='kg*m**-3', desc='Copper density ')
self.add_param('rho_PM',val=0.0,units='kg*m**-3', desc='Magnet density ')
#inputs/outputs for interface with drivese
self.add_param('main_shaft_cm',val= np.array([0.0, 0.0, 0.0]), desc='Main Shaft CM')
self.add_param('main_shaft_length',val=0.0, desc='main shaft length')
self.add_output('I',val=np.array([0.0, 0.0, 0.0]),desc='Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('cm', val=np.array([0.0, 0.0, 0.0]),desc='COM [x,y,z]')
self.gen_sizing = generator_sizing()
def solve_nonlinear(self, inputs, outputs, resid):
(outputs['B_symax'], outputs['B_tmax'], outputs['B_rymax'], outputs['B_smax'], outputs['B_pm1'], outputs['B_g'], outputs['N_s'], outputs['b_s'], \
outputs['b_t'], outputs['A_Cuscalc'], outputs['b_m'], outputs['p'], outputs['E_p'], outputs['f'], \
outputs['I_s'], outputs['R_s'], outputs['L_s'], outputs['A_1'], outputs['J_s'], outputs['Mass'],\
outputs['K_rad'],outputs['Losses'], outputs['gen_eff'], outputs['u_Ar'], outputs['y_Ar'], \
outputs['u_As'], outputs['y_As'], outputs['z_A_s'], outputs['u_all_r'], outputs['u_all_s'], \
outputs['y_all'], outputs['z_all_s'],outputs['z_all_r'], outputs['b_all_s'], outputs['TC1'], \
outputs['TC2'], outputs['TC3'], outputs['R_out'], outputs['S'],outputs['Slot_aspect_ratio'],\
outputs['cm'], outputs['I'],outputs['mass_PM'],outputs['Copper'],outputs['Iron'],outputs['Structural_mass'])\
= self.gen_sizing.compute(inputs['r_s'], inputs['l_s'], inputs['h_s'], inputs['tau_p'], inputs['machine_rating'],
inputs['n_nom'], inputs['Torque'], inputs['h_m'],inputs['h_ys'], inputs['h_yr'],inputs['rho_Fe'], inputs['rho_Copper'],inputs['b_st'], inputs['d_s'], \
inputs['t_ws'], inputs['n_s'], inputs['t_d'],\
inputs['R_o'], inputs['rho_Fes'],inputs['rho_PM'],inputs['main_shaft_cm'],inputs['main_shaft_length'])
return outputs
class generator_sizing(object):
def __init__(self):
pass
def compute(self,r_s, l_s,h_s,tau_p,machine_rating,n_nom,Torque,h_m,h_ys,h_yr,
rho_Fe,rho_Copper,b_st,d_s,t_ws,n_s,t_d,R_o,rho_Fes,rho_PM,main_shaft_cm,main_shaft_length):
self.r_s=r_s
self.l_s=l_s
self.h_s=h_s
self.tau_p=tau_p
self.h_m=h_m
self.h_ys=h_ys
self.h_yr=h_yr
self.machine_rating=machine_rating
self.n_nom=n_nom
self.Torque=Torque
self.b_st=b_st
self.d_s=d_s
self.t_ws=t_ws
self.n_s=n_s
self.t_d=t_d
self.R_o=R_o
self.rho_Fe=rho_Fe
self.rho_Copper=rho_Copper
self.rho_Fes=rho_Fes
self.rho_PM=rho_PM
self.main_shaft_cm=main_shaft_cm
self.main_shaft_length=main_shaft_length
#Assign values to universal constants
B_r =1.2 # Tesla remnant flux density
g1 =9.81 # m/s^2 acceleration due to gravity
E =2e11 # N/m^2 young's modulus
sigma =40000.0 # shear stress assumed
ratio =0.7 # ratio of magnet width to pole pitch(bm/self.tau_p)
mu_0 =pi*4e-7 # permeability of free space
mu_r =1.06 # relative permeability
phi =90*2*pi/360 # tilt angle (rotor tilt -90 degrees during transportation)
cofi =0.85 # power factor
#Assign values to design constants
h_w =0.005 # wedge height
y_tau_p=1.0 # coiil span to pole pitch
m =3.0 # no of phases
q1 =1.0 # no of slots per pole per phase
b_s_tau_s=0.45 # slot width/slot pitch ratio
k_sfil =0.65 # Slot fill factor
P_Fe0h =4.0 #specific hysteresis losses W/kg @ 1.5 T
P_Fe0e =1.0 #specific hysteresis losses W/kg @ 1.5 T
rho_Cu=1.8*10**(-8)*1.4 # resistivity of copper
b_so = 0.004 #stator slot opening
k_fes =0.9 #useful iron stack length
T = self.Torque
v=0.3 # poisson's ratio
# back iron thickness for rotor and stator
self.t_s =self.h_ys
self.t =self.h_yr
# Aspect ratio
self.K_rad=self.l_s/(2*self.r_s) # aspect ratio
###################################################### Electromagnetic design#############################################
dia = 2*self.r_s # air gap diameter
g = 0.001*dia # air gap length
self.b_m =0.7*self.tau_p # magnet width
l_u =k_fes * self.l_s #useful iron stack length
We =self.tau_p
l_b = 2*self.tau_p #end winding length
l_e =self.l_s+2*0.001*self.r_s # equivalent core length
r_r = self.r_s-g #rotor radius
self.p = round(pi*dia/(2*self.tau_p)) # pole pairs
self.f = self.n_nom*self.p/60 # frequency
self.S = 2*self.p*q1*m # Stator slots
N_conductors=self.S*2
self.N_s=N_conductors/2/3 # Stator turns per phase
tau_s=pi*dia/self.S # slot pitch
self.b_s = b_s_tau_s*tau_s #slot width
self.b_t = tau_s-(self.b_s) #tooth width
self.Slot_aspect_ratio=self.h_s/self.b_s
alpha_p = pi/2*0.7
# Calculating Carter factor for statorand effective air gap length
gamma = 4/pi*(b_so/2/(g+self.h_m/mu_r)*atan(b_so/2/(g+self.h_m/mu_r))-log(sqrt(1+(b_so/2/(g+self.h_m/mu_r))**2)))
k_C = tau_s/(tau_s-gamma*(g+self.h_m/mu_r)) # carter coefficient
g_eff = k_C*(g+self.h_m/mu_r)
# angular frequency in radians
om_m = 2*pi*self.n_nom/60
om_e = self.p*om_m/2
# Calculating magnetic loading
self.B_pm1 = B_r*self.h_m/mu_r/(g_eff)
self.B_g= B_r*self.h_m/mu_r/(g_eff)*(4.0/pi)*sin(alpha_p)
self.B_symax=self.B_g*self.b_m*l_e/(2*self.h_ys*l_u)
self.B_rymax=self.B_g*self.b_m*l_e/(2*self.h_yr*self.l_s)
self.B_tmax =self.B_g*tau_s/self.b_t
k_wd = sin(pi/6)/q1/sin(pi/6/q1) # winding factor
L_t=self.l_s+2*self.tau_p
# Stator winding length ,cross-section and resistance
l_Cus = 2*(self.N_s)*(2*self.tau_p+L_t)
A_s = self.b_s*(self.h_s-h_w)*q1*self.p
A_scalc = self.b_s*1000*(self.h_s*1000-h_w*1000)*q1*self.p
A_Cus = A_s*k_sfil/(self.N_s)
self.A_Cuscalc = A_scalc *k_sfil/(self.N_s)
self.R_s = l_Cus*rho_Cu/A_Cus
# Calculating leakage inductance in stator
L_m = 2*m*k_wd**2*(self.N_s)**2*mu_0*self.tau_p*L_t/pi**2/g_eff/self.p
L_ssigmas=2*mu_0*self.l_s*self.N_s**2/self.p/q1*((self.h_s-h_w)/(3*self.b_s)+h_w/b_so) #slot leakage inductance
L_ssigmaew=(2*mu_0*self.l_s*self.N_s**2/self.p/q1)*0.34*g*(l_e-0.64*self.tau_p*y_tau_p)/self.l_s #end winding leakage inductance
L_ssigmag=2*mu_0*self.l_s*self.N_s**2/self.p/q1*(5*(g*k_C/b_so)/(5+4*(g*k_C/b_so))) # tooth tip leakage inductance#tooth tip leakage inductance
L_ssigma = (L_ssigmas+L_ssigmaew+L_ssigmag)
self.L_s = L_m+L_ssigma
# Calculating no-load voltage induced in the stator and stator current
self.E_p = 2*(self.N_s)*L_t*self.r_s*k_wd*om_m*self.B_g/sqrt(2)
Z=(self.machine_rating/(m*self.E_p))
G=(self.E_p**2-(om_e*self.L_s*Z)**2)
self.I_s= sqrt(Z**2+(((self.E_p-G**0.5)/(om_e*self.L_s)**2)**2))
self.B_smax=sqrt(2)*self.I_s*mu_0/g_eff
# Calculating stator current and electrical loading
self.J_s = self.I_s/self.A_Cuscalc
I_snom =(self.machine_rating/m/self.E_p/cofi) #rated current
I_qnom =self.machine_rating/(m*self.E_p)
X_snom =om_e*(L_m+L_ssigma)
self.A_1 = 6*self.N_s*self.I_s/(pi*dia)
#Calculating electromagnetically active mass
V_Cus =m*l_Cus*A_Cus # copper volume
V_Fest =L_t*2*self.p*q1*m*self.b_t*self.h_s # volume of iron in stator tooth
V_Fesy =L_t*pi*((self.r_s+self.h_s+self.h_ys)**2-(self.r_s+self.h_s)**2) # volume of iron in stator yoke
V_Fery =L_t*pi*((r_r-self.h_m)**2-(r_r-self.h_m-self.h_yr)**2) # volume of iron in rotor yoke
self.Copper =V_Cus*self.rho_Copper
M_Fest =V_Fest*self.rho_Fe # mass of stator tooth
M_Fesy =V_Fesy*self.rho_Fe # mass of stator yoke
M_Fery =V_Fery*self.rho_Fe # mass of rotor yoke
self.Iron =M_Fest+M_Fesy+M_Fery
#Calculating losses"
#1.Copper losses
K_R=1.2 # Skin effect correction co-efficient
P_Cu =m*I_snom**2*self.R_s*K_R
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys =M_Fesy*(self.B_symax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator yoke
P_Ftys =M_Fesy*((self.B_symax/1.5)**2)*(P_Fe0e*(om_e/(2*pi*60))**2)# Eddy losses in stator yoke
P_Fesynom=P_Hyys+P_Ftys
P_Hyd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0h*om_e/(2*pi*60)) # Hysteresis losses in stator teeth
P_Ftd=M_Fest*(self.B_tmax/1.5)**2*(P_Fe0e*(om_e/(2*pi*60))**2) # Eddy losses in stator teeth
P_Festnom=P_Hyd+P_Ftd
P_ad=0.2*(P_Hyys + P_Ftys + P_Hyd + P_Ftd ) # additional stray losses due to leakage flux
pFtm =300 # specific magnet loss
P_Ftm=pFtm*2*self.p*self.b_m*self.l_s #magnet losses
self.Losses=P_Cu+P_Festnom+P_Fesynom+P_ad+P_Ftm
self.gen_eff=self.machine_rating*100/(self.machine_rating+self.Losses)
################################################## Structural Design ############################################################
## Structural deflection calculations
#rotor structure
R = self.r_s-g-self.h_m-0.5*self.t # mean radius of the rotor rim
l=L_t
b =self.R_o # Shaft radius
R_b =R-0.5*self.t # Inner radius of the rotor
R_a=R+0.5*self.h_yr # Outer radius of rotor yoke
a=R-0.5*self.t;
a_1=R_b
c =R/500
self.u_all_r =c/20 # allowable radial deflection
self.y_all =2*l/100 # allowable axial deflection
R_1 = R-self.t*0.5 #inner radius of rotor cylinder
K =4*(sin(ratio*pi/2))/pi
q3 = self.B_g**2/2/mu_0 # normal component of Maxwell's stress
self.mass_PM =(2*pi*(R+0.5*self.t)*l*self.h_m*ratio*self.rho_PM) # magnet mass
mass_st_lam=self.rho_Fe*2*pi*R*l*self.h_yr # mass of rotor yoke steel
# Calculation of radial deflection of rotor
# cylindrical shell function and circular plate parameters for disc rotor based on Table 11.2 Roark's formulas
lamb =((3*(1-v**2)/R_a**2/self.h_yr**2)**0.25)
x1=lamb*l
C_2=cosh(x1)*sin(x1)+sinh(x1)*cos(x1)
C_3=sinh(x1)*sin(x1)
C_4=cosh(x1)*sin(x1)-sinh(x1)*cos(x1)
C_11=(sinh(x1))**2-(sin(x1))**2
C_13=cosh(x1)*sinh(x1)-cos(x1)*sin(x1)
C_14=(sinh(x1)**2+sin(x1)**2)
C_a1=cosh(x1*0.5)*cos(x1*0.5)
C_a2=cosh(x1*0.5)*sin(x1*0.5)+sinh(x1*0.5)*cos(x1*0.5)
F_1_x0=cosh(lamb*0)*cos(lamb*0)
F_1_ls2=cosh(lamb*0.5*self.l_s)*cos(lamb*0.5*self.l_s)
F_2_x0=cosh(lamb*0)*sin(lamb*0)+sinh(lamb*0)*cos(lamb*0)
F_2_ls2=cosh(x1/2)*sin(x1/2)+sinh(x1/2)*cos(x1/2)
if (self.l_s<2*a):
a=self.l_s/2
else:
a=self.l_s*0.5-1
F_a4_x0=cosh(lamb*(0))*sin(lamb*(0))-sinh(lamb*(0))*cos(lamb*(0))
F_a4_ls2=cosh(pi/180*lamb*(0.5*self.l_s-a))*sin(pi/180*lamb*(0.5*self.l_s-a))-sinh(pi/180*lamb*(0.5*self.l_s-a))*cos(pi/180*lamb*(0.5*self.l_s-a))
D_r=E*self.h_yr**3/(12*(1-v**2))
D_ax=E*self.t_d**3/(12*(1-v**2))
# Radial deflection analytical model from McDonald's thesis defined in parts
Part_1 =R_b*((1-v)*R_b**2+(1+v)*self.R_o**2)/(R_b**2-self.R_o**2)/E
Part_2 =(C_2*C_a2-2*C_3*C_a1)/2/C_11
Part_3 = (C_3*C_a2-C_4*C_a1)/C_11
Part_4 =((0.25/D_r/lamb**3))
Part_5=q3*R_b**2/(E*(R_a-R_b))
f_d = Part_5/(Part_1-self.t_d*(Part_4*Part_2*F_2_ls2-Part_3*2*Part_4*F_1_ls2-Part_4*F_a4_ls2))
fr=f_d*self.t_d
self.u_Ar =abs(Part_5+fr/2/D_r/lamb**3*((-F_1_x0/C_11)*(C_3*C_a2-C_4*C_a1)+(F_2_x0/2/C_11)*(C_2*C_a2-2*C_3*C_a1)-F_a4_x0/2))
# Calculation of Axial deflection of rotor
W=0.5*g1*sin(phi)*((l-self.t_d)*self.h_yr*self.rho_Fes) # uniform annular line load acting on rotor cylinder assumed as an annular plate
w=self.rho_Fes*g1*sin(phi)*self.t_d # disc assumed as plate with a uniformly distributed pressure between
a_i=self.R_o
# Flat circular plate constants according to Roark's table 11.2
C_2p= 0.25*(1-(((self.R_o/R)**2)*(1+(2*log(R/self.R_o)))))
C_3p=(self.R_o/4/R)*((1+(self.R_o/R)**2)*log(R/self.R_o)+(self.R_o/R)**2-1)
C_6= (self.R_o/4/R_a)*((self.R_o/R_a)**2-1+2*log(R_a/self.R_o))
C_5=0.5*(1-(self.R_o/R)**2)
C_8= 0.5*(1+v+(1-v)*((self.R_o/R)**2))
C_9=(self.R_o/R)*(0.5*(1+v)*log(R/self.R_o) + (1-v)/4*(1-(self.R_o/R)**2))
# Flat circular plate loading constants
L_11=(1 + 4*(self.R_o/a_1)**2 - 5*(self.R_o/a_1)**4 - 4*((self.R_o/a_1)**2)*log(a_1/self.R_o)*(2+(self.R_o/a_1)**2))/64
L_14=(1/16)*(1-(self.R_o/R_b)**4-4*(self.R_o/R_b)**2*log(R_b/self.R_o))
y_ai=-W*(a_1**3)*(C_2p*(C_6*a_1/self.R_o - C_6)/C_5 - a_1*C_3p/self.R_o +C_3p)/D_ax # Axial deflection of plate due to The deflection of an annular plate with a uniform annular line load
# Axial Deflection due to uniformaly distributed pressure load
M_rb=-w*R**2*(C_6*(R**2-self.R_o**2)*0.5/R/self.R_o-L_14)/C_5
Q_b=w*0.5*(R**2-self.R_o**2)/self.R_o
y_aii=M_rb*R_a**2*C_2p/D_ax+Q_b*R_a**3*C_3p/D_ax-w*R_a**4*L_11/D_ax
self.y_Ar=abs(y_ai+y_aii)
self.z_all_r=0.05*2*pi*R/360 # allowable torsional deflection of rotor
#stator structure deflection calculation
self.R_out=(R/0.995+self.h_s+self.h_ys)
a_s = (self.b_st*self.d_s)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws)) # cross-sectional area of stator armms
A_st =l*self.t_s # cross-sectional area of rotor cylinder
N_st = round(self.n_s)
theta_s =pi*1/N_st # half angle between spokes
I_st =l*self.t_s**3/12 # second moment of area of stator cylinder
I_arm_axi_s =((self.b_st*self.d_s**3)-((self.b_st-2*self.t_ws)*(self.d_s-2*self.t_ws)**3))/12 # second moment of area of stator arm
I_arm_tor_s = ((self.d_s*self.b_st**3)-((self.d_s-2*self.t_ws)*(self.b_st-2*self.t_ws)**3))/12 # second moment of area of rotot arm w.r.t torsion
R_st =self.r_s+self.h_s+self.h_ys*0.5
k_2 = sqrt(I_st/A_st) # radius of gyration
self.b_all_s =2*pi*self.R_o/N_st
m2 =(k_2/R_st)**2
c1 =R_st/500
R_1s = R_st-self.t_s*0.5
d_se=dia+2*(self.h_ys+self.h_s+h_w) # stator outer diameter
# Calculation of radial deflection of stator
Numers=R_st**3*((0.25*(sin(theta_s)-(theta_s*cos(theta_s)))/(sin(theta_s))**2)-(0.5/sin(theta_s))+(0.5/theta_s))
Povs=((theta_s/(sin(theta_s))**2)+1/tan(theta_s))*((0.25*R_st/A_st)+(0.25*R_st**3/I_st))
Qovs=R_st**3/(2*I_st*theta_s*(m2+1))
Lovs=(R_1s-self.R_o)*0.5/a_s
Denoms=I_st*(Povs-Qovs+Lovs)
self.u_As =(q3*R_st**2/E/self.t_s)*(1+Numers/Denoms)
# Calculation of axial deflection of stator
mass_st_lam_s= M_Fest+pi*l*self.rho_Fe*((R_st+0.5*self.h_ys)**2-(R_st-0.5*self.h_ys)**2)
W_is =0.5*g1*sin(phi)*(self.rho_Fes*l*self.d_s**2) # length of stator arm beam at which self-weight acts
W_iis =g1*sin(phi)*(mass_st_lam_s+V_Cus*self.rho_Copper)/2/N_st # weight of stator cylinder and teeth
w_s =self.rho_Fes*g1*sin(phi)*a_s*N_st # uniformly distributed load of the arms
l_is =R_st-self.R_o # distance at which the weight of the stator cylinder acts
l_iis =l_is # distance at which the weight of the stator cylinder acts
l_iiis =l_is # distance at which the weight of the stator cylinder acts
self.u_all_s = c1/20
X_comp1 = (W_is*l_is**3/12/E/I_arm_axi_s) # deflection component due to stator arm beam at which self-weight acts
X_comp2 =(W_iis*l_iis**4/24/E/I_arm_axi_s) # deflection component due to 1/nth of stator cylinder
X_comp3 =w_s*l_iiis**4/24/E/I_arm_axi_s # deflection component due to weight of arms
self.y_As =X_comp1+X_comp2+X_comp3 # axial deflection
# Stator circumferential deflection
self.z_all_s =0.05*2*pi*R_st/360 # allowable torsional deflection
self.z_A_s =2*pi*(R_st+0.5*self.t_s)*l/(2*N_st)*sigma*(l_is+0.5*self.t_s)**3/3/E/I_arm_tor_s
mass_stru_steel =2*(N_st*(R_1s-self.R_o)*a_s*self.rho_Fes)
self.TC1=T*1.0/(2*pi*sigma) # Torque/shear stress
self.TC2=R**2*l # Evaluating Torque constraint for rotor
self.TC3=R_st**2*l # Evaluating Torque constraint for stator
self.Structural_mass=mass_stru_steel+(pi*(R**2-R_o**2)*self.t_d*self.rho_Fes)
self.Mass = self.Structural_mass+self.Iron+self.Copper+self.mass_PM
self.I = np.array([0.0, 0.0, 0.0])
# Calculating mass moments of inertia and center of mass
self.I[0] = (0.5*self.Mass*self.R_out**2)
self.I[1] = (0.25*self.Mass*self.R_out**2+(1/12)*self.Mass*self.l_s**2)
self.I[2] = self.I[1]
self.cm = np.array([0.0, 0.0, 0.0])
self.cm[0] = self.main_shaft_cm[0] + self.main_shaft_length/2. + self.l_s/2
self.cm[1] = self.main_shaft_cm[1]
self.cm[2] = self.main_shaft_cm[2]
return(self.B_symax, self.B_tmax, self.B_rymax,self.B_smax, self.B_pm1, self.B_g, self.N_s, self.b_s, \
self.b_t, self.A_Cuscalc, self.b_m, self.p, self.E_p, self.f,self.I_s, self.R_s, self.L_s, self.A_1,\
self.J_s,self.Mass,self.K_rad, self.Losses, self.gen_eff,self.u_Ar,self.y_Ar,\
self.u_As,self.y_As,self.z_A_s,self.u_all_r,self.u_all_s,self.y_all,self.z_all_s,\
self.z_all_r,self.b_all_s, \
self.TC1,self.TC2,self.TC3,self.R_out,self.S,self.Slot_aspect_ratio,\
self.cm,self.I,self.mass_PM,self.Copper,self.Iron,self.Structural_mass)
####################################################Cost Analysis#######################################################################
class PMSG_Cost(Component):
""" Provides a material cost estimate for a PMSG _disc generator. Manufacturing costs are excluded"""
def __init__(self):
super(PMSG_Cost, self).__init__()
# Inputs
# Specific cost of material by type
self.add_param('C_Cu',val=0.0, desc='Specific cost of copper')
self.add_param('C_Fe',val=0.0,desc='Specific cost of magnetic steel/iron')
self.add_param('C_Fes',val=0.0,desc='Specific cost of structural steel')
self.add_param('C_PM',val=0.0,desc='Specific cost of Magnet')
# Mass of each material type
self.add_param('Copper',val=0.0, desc='Copper mass')
self.add_param('Iron',val=0.0, desc='Iron mass')
self.add_param('mass_PM' ,val=0.0, desc='Magnet mass')
self.add_param('Structural_mass',val=0.0, desc='Structural mass')
# Outputs
self.add_output('Costs',val=0.0,desc='Total cost')
self.gen_costs=generator_costing()
def solve_nonlinear(self,inputs,outputs,resid):
(outputs['Costs'])=self.gen_costs.compute(inputs['Copper'],inputs['C_Cu'], \
inputs['Iron'],inputs['C_Fe'],inputs['C_Fes'],inputs['mass_PM'],inputs['C_PM'],inputs['Structural_mass'])
return outputs
class generator_costing(object):
def __init__(self):
pass
def compute(self,Copper,C_Cu,Iron,C_Fe,C_Fes,mass_PM,C_PM,Structural_mass):
self.Copper=Copper
self.mass_PM=mass_PM
self.Iron=Iron
self.Structural_mass=Structural_mass
# Material cost as a function of material mass and specific cost of material
K_gen=self.Copper*C_Cu+self.Iron*C_Fe+C_PM*self.mass_PM
Cost_str=C_Fes*self.Structural_mass
Costs=K_gen+Cost_str
return(Costs)
####################################################OPTIMISATION SET_UP ###############################################################
class PMSG_disc_Opt(Group):
""" Creates a new Group containing PMSG and an optimizer"""
def __init__(self):
super(PMSG_disc_Opt, self).__init__()
self.add('machine_rating', IndepVarComp('machine_rating',0.0),promotes=['*'])
self.add('Torque',IndepVarComp('Torque', 0.0),promotes=['*'])
self.add('n_nom', IndepVarComp('n_nom', 0.0),promotes=['*'])
self.add('main_shaft_cm', IndepVarComp('main_shaft_cm',np.array([0.0, 0.0, 0.0])),promotes=['*'])
self.add('main_shaft_length',IndepVarComp('main_shaft_length',val=0.0),promotes=['*'])
self.add('r_s',IndepVarComp('r_s',0.0),promotes=['*'])
self.add('l_s',IndepVarComp('l_s',0.0),promotes=['*'])
self.add('h_s',IndepVarComp('h_s',0.0),promotes=['*'])
self.add('tau_p',IndepVarComp('tau_p',0.0),promotes=['*'])
self.add('h_m' ,IndepVarComp('h_m',0.0),promotes=['*'])
self.add('h_ys',IndepVarComp('h_ys',0.0),promotes=['*'])
self.add('h_yr',IndepVarComp('h_yr',0.0),promotes=['*'])
self.add('n_s',IndepVarComp('n_s',0.0),promotes=['*'])
self.add('b_st',IndepVarComp('b_st',0.0),promotes=['*'])
self.add('d_s',IndepVarComp('d_s',0.0),promotes=['*'])
self.add('t_ws',IndepVarComp('t_ws',0.0),promotes=['*'])
self.add('t_d',IndepVarComp('t_d',0.0),promotes=['*'])
self.add('R_o',IndepVarComp('R_o',0.0),promotes=['*'])
self.add('rho_Fes',IndepVarComp('rho_Fes',0.0),promotes=['*'])
self.add('rho_Fe',IndepVarComp('rho_Fe',0.0),promotes=['*'])
self.add('rho_Copper',IndepVarComp('rho_Copper',0.0),promotes=['*'])
self.add('rho_PM',IndepVarComp('rho_PM',0.0),promotes=['*'])
# add PMSG component, create constraint equations
self.add('PMSG',PMSG(),promotes=['*'])
self.add('con_uAs', ExecComp('con_uAs =u_all_s-u_As'),promotes=['*'])
self.add('con_zAs', ExecComp('con_zAs =z_all_s-z_A_s'),promotes=['*'])
self.add('con_yAs', ExecComp('con_yAs =y_all-y_As'),promotes=['*'])
self.add('con_bst', ExecComp('con_bst =b_all_s-b_st'),promotes=['*'])
self.add('con_uAr', ExecComp('con_uAr =u_all_r-u_Ar'),promotes=['*'])
self.add('con_yAr', ExecComp('con_yAr =y_all-y_Ar'),promotes=['*'])
self.add('con_TC2', ExecComp('con_TC2 =TC2-TC1'),promotes=['*'])
self.add('con_TC3', ExecComp('con_TC3 =TC3-TC1'),promotes=['*'])
self.add('con_Bsmax', ExecComp('con_Bsmax =B_g-B_smax'),promotes=['*'])
# add PMSG_Cost component, connect i/o
self.add('PMSG_Cost',PMSG_Cost(),promotes=['*'])
self.add('C_Cu',IndepVarComp('C_Cu',val=0.0),promotes=['*'])
self.add('C_Fe',IndepVarComp('C_Fe',val=0.0),promotes=['*'])
self.add('C_Fes',IndepVarComp('C_Fes',val=0.0),promotes=['*'])
self.add('C_PM',IndepVarComp('C_PM',val=0.0),promotes=['*'])
#self.connect('PMSG.Copper','PMSG_Cost.Copper')
def PMSG_disc_Opt_example():
#Example optimization of a PMSG_disc rotor generator for costs on a 5 MW reference turbine
opt_problem = Problem(root=PMSG_disc_Opt())
# add optimizer and set-up problem (using user defined input on objective function)
opt_problem.driver=pyOptSparseDriver()
opt_problem.driver.options['optimizer'] = 'CONMIN'
#opt_problem.driver.add_objective('Costs') # Define Objective
opt_problem.driver.opt_settings['IPRINT'] = 4
opt_problem.driver.opt_settings['ITRM'] = 3
opt_problem.driver.opt_settings['ITMAX'] = 10
opt_problem.driver.opt_settings['DELFUN'] = 1e-3
opt_problem.driver.opt_settings['DABFUN'] = 1e-3
opt_problem.driver.opt_settings['IFILE'] = 'CONMIN_PMSG_disc.out'
opt_problem.root.deriv_options['type']='fd'
#
# Set bounds for design variables for a PMSG designed for a 5MW turbine
opt_problem.driver.add_desvar('r_s',lower=0.5,upper=9.0)
opt_problem.driver.add_desvar('l_s', lower=0.5, upper=2.5)
opt_problem.driver.add_desvar('h_s', lower=0.04, upper=0.1)
opt_problem.driver.add_desvar('tau_p', lower=0.04, upper=0.1)
opt_problem.driver.add_desvar('h_m', lower=0.005, upper=0.10)
opt_problem.driver.add_desvar('h_yr', lower=0.045, upper=0.25)
opt_problem.driver.add_desvar('h_ys', lower=0.045, upper=0.25)
opt_problem.driver.add_desvar('t_d', lower=0.1, upper=0.25)
opt_problem.driver.add_desvar('n_s', lower=5., upper=15.)
opt_problem.driver.add_desvar('b_st', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_s', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_ws', lower=0.001, upper=0.2)
##
## # Specificiency target efficiency(%)
Eta_Target = 93.0
#
# # set up constraints for the PMSG_arms generator
#
opt_problem.driver.add_constraint('B_symax',upper=2.0-1.0e-6) #1
opt_problem.driver.add_constraint('B_rymax',upper=2.0-1.0e-6) #2
opt_problem.driver.add_constraint('B_tmax',upper=2.0-1.0e-6) #3
opt_problem.driver.add_constraint('B_g',lower=0.70,upper=1.20) #4
opt_problem.driver.add_constraint('con_Bsmax',lower=0.0+1.0e-6) #5
opt_problem.driver.add_constraint('E_p',lower=500.0,upper=5000.0) #6
opt_problem.driver.add_constraint('con_uAs',lower=0.0+1.0e-6) #7
opt_problem.driver.add_constraint('con_zAs',lower=0.0+1.0e-6) #8-
opt_problem.driver.add_constraint('con_yAs',lower=0.0+1.0e-6) #9-
opt_problem.driver.add_constraint('con_uAr',lower=0.0+1.0e-6) #11
opt_problem.driver.add_constraint('con_yAr',lower=0.0+1.0e-6) #12-
opt_problem.driver.add_constraint('con_TC2',lower=0.0+1.0e-6) #13
opt_problem.driver.add_constraint('con_TC3',lower=0.0+1.0e-6) #14
opt_problem.driver.add_constraint('con_bst',lower=0.0+1.0e-6) #10
opt_problem.driver.add_constraint('A_1',upper=60000.0-1.0e-6) #15
opt_problem.driver.add_constraint('J_s',upper=6.0) #16-
opt_problem.driver.add_constraint('A_Cuscalc',lower=5.0) #17
opt_problem.driver.add_constraint('K_rad',lower=0.2+1.0e-6,upper=0.27) #18
opt_problem.driver.add_constraint('Slot_aspect_ratio',lower=4.0,upper=10.0) #19
opt_problem.driver.add_constraint('gen_eff',lower=Eta_Target) #20
#Define Objective
opt_problem.driver.add_objective('Costs')
opt_problem.setup()
# Specify Target machine parameters
opt_problem['machine_rating']=5000000.0
opt_problem['Torque']=4.143289e6
opt_problem['n_nom']=12.1
# Initialize design variables
opt_problem['r_s']= 3.49 #3.494618182
opt_problem['l_s']= 1.5 #1.506103927
opt_problem['h_s']= 0.06 #0.06034976
opt_problem['tau_p'] =0.07 #0.07541515
opt_problem['h_m'] = 0.0105 #0.0090100202
opt_problem['h_ys'] =0.085 #0.084247994 #
opt_problem['h_yr'] = 0.055 #0.0545789687
opt_problem['n_s'] = 5.0 #5.0
opt_problem['b_st'] = 0.460 #0.46381
opt_problem['t_d'] = 0.105 #0.10
opt_problem['d_s']= 0.350 #0.35031 #
opt_problem['t_ws']= 0.150 #=0.14720 #
opt_problem['R_o'] =0.43 #0.43
# Provide specific costs for materials
opt_problem['C_Cu'] =4.786
opt_problem['C_Fe'] = 0.556
opt_problem['C_Fes'] =0.50139
opt_problem['C_PM'] =95.0
opt_problem['main_shaft_cm']=np.array([0.0, 0.0, 0.0])
opt_problem['main_shaft_length'] =2.0
# Provide Material properties
opt_problem['rho_Fe'] = 7700.0 #Steel density
opt_problem['rho_Fes'] = 7850.0 #Steel density
opt_problem['rho_Copper'] =8900.0 # Kg/m3 copper density
opt_problem['rho_PM'] =7450.0
#Run optimization
opt_problem.run()
""" Uncomment to print solution to screen/an excel file
raw_data = {'Parameters': ['Rating','Stator Arms', 'Stator Axial arm dimension','Stator Circumferential arm dimension',' Stator arm Thickness' ,'Rotor disc Thickness',' Stator Radial deflection', 'Stator Axial deflection','Stator circum deflection',' Rotor Radial deflection', 'Rotor Axial deflection','Air gap diameter',\
'Overall Outer diameter', 'Stator length', 'l/d ratio','Slot_aspect_ratio','Pole pitch', 'Stator slot height','Stator slotwidth','Stator tooth width', 'Stator yoke height', 'Rotor yoke height', 'Magnet height', 'Magnet width', 'Peak air gap flux density fundamental','Peak stator yoke flux density','Peak rotor yoke flux density',\
'Flux density above magnet','Maximum Stator flux density','Maximum tooth flux density','Pole pairs', 'Generator output frequency', 'Generator output phase voltage', 'Generator Output phase current', 'Stator resistance','Synchronous inductance', 'Stator slots','Stator turns','Conductor cross-section','Stator Current density ',\
'Specific current loading','Generator Efficiency ','Iron mass','Magnet mass','Copper mass','Mass of Arms and disc', 'Total Mass', 'Total Material Cost'],
'Values': [opt_problem['machine_rating']/1000000,opt_problem['n_s'],opt_problem['d_s']*1000,opt_problem['b_st']*1000,opt_problem['t_ws']*1000,opt_problem['t_d']*1000,opt_problem['u_As']*1000,opt_problem['y_As']*1000,opt_problem['z_A_s']*1000,opt_problem['u_Ar']*1000,opt_problem['y_Ar']*1000,2*opt_problem['r_s'],\
opt_problem['R_out']*2,opt_problem['l_s'],opt_problem['K_rad'],opt_problem['Slot_aspect_ratio'],opt_problem['tau_p']*1000,opt_problem['h_s']*1000,opt_problem['b_s']*1000,opt_problem['b_t']*1000,opt_problem['h_ys']*1000,opt_problem['h_yr']*1000,opt_problem['h_m']*1000,opt_problem['b_m']*1000,opt_problem['B_g'],opt_problem['B_symax'],\
opt_problem['B_rymax'],opt_problem['B_pm1'],opt_problem['B_smax'],opt_problem['B_tmax'],opt_problem['p'],opt_problem['f'],opt_problem['E_p'],opt_problem['I_s'],opt_problem['R_s'],opt_problem['L_s'],opt_problem['S'],opt_problem['N_s'],opt_problem['A_Cuscalc'],opt_problem['J_s'],opt_problem['A_1']/1000,opt_problem['gen_eff'],\
opt_problem['Iron']/1000,opt_problem['mass_PM']/1000,opt_problem['Copper']/1000,opt_problem['Structural_mass']/1000,opt_problem['Mass']/1000,opt_problem['Costs']/1000],
'Limit': ['','','',opt_problem['b_all_s']*1000,'','',opt_problem['u_all_s']*1000,opt_problem['y_all']*1000,opt_problem['z_all_s']*1000,opt_problem['u_all_r']*1000,opt_problem['y_all']*1000,'','','','(0.2-0.27)','(4-10)','','','','','','','','','<2','<2','<2','<2','<2',opt_problem['B_g'],'<2','','>500','','','','','','5',\
'3-6','60','>93%','','','','','',''],
'Units':['MW','unit','mm','mm','mm','','mm','mm','mm','mm','mm','m','m','m','','','mm','mm','mm','mm','mm','mm','mm','mm','T','T','T','T','T','T','-','Hz','V','A','ohm/phase','p.u','slots','turns','mm^2','A/mm^2','kA/m','%','tons','tons','tons','tons','tons','k$']}
df=pd.DataFrame(raw_data, columns=['Parameters','Values','Limit','Units'])
print df
df.to_excel('PMSG_'+str(opt_problem['machine_rating']/1e6)+'_discRotor_MW_1.7.x.xlsx')
"""
if __name__=="__main__":
# Run an example optimization of PMSG generator on cost
PMSG_disc_Opt_example()
| [
"openmdao.api.IndepVarComp",
"openmdao.api.ExecComp",
"math.atan",
"math.sqrt",
"math.tan",
"math.sin",
"math.cosh",
"math.log",
"numpy.array",
"math.cos",
"math.sinh",
"openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver"
] | [((28444, 28463), 'openmdao.drivers.pyoptsparse_driver.pyOptSparseDriver', 'pyOptSparseDriver', ([], {}), '()\n', (28461, 28463), False, 'from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver\n'), ((32459, 32484), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (32467, 32484), True, 'import numpy as np\n'), ((12629, 12697), 'math.sqrt', 'sqrt', (['(Z ** 2 + ((self.E_p - G ** 0.5) / (om_e * self.L_s) ** 2) ** 2)'], {}), '(Z ** 2 + ((self.E_p - G ** 0.5) / (om_e * self.L_s) ** 2) ** 2)\n', (12633, 12697), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((19799, 19816), 'math.sqrt', 'sqrt', (['(I_st / A_st)'], {}), '(I_st / A_st)\n', (19803, 19816), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((22414, 22439), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (22422, 22439), True, 'import numpy as np\n'), ((22672, 22697), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (22680, 22697), True, 'import numpy as np\n'), ((11187, 11199), 'math.sin', 'sin', (['alpha_p'], {}), '(alpha_p)\n', (11190, 11199), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((11390, 11406), 'math.sin', 'sin', (['(pi / 6 / q1)'], {}), '(pi / 6 / q1)\n', (11393, 11406), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((12520, 12527), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (12524, 12527), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16004, 16012), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (16008, 16012), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16013, 16020), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (16016, 16020), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16180, 16194), 'math.cosh', 'cosh', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16184, 16194), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16193, 16206), 'math.cos', 'cos', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16196, 16206), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16273, 16287), 'math.cosh', 'cosh', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16277, 16287), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16286, 16299), 'math.cos', 'cos', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16289, 16299), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16309, 16336), 'math.cosh', 'cosh', (['(lamb * 0.5 * self.l_s)'], {}), '(lamb * 0.5 * self.l_s)\n', (16313, 16336), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16333, 16359), 'math.cos', 'cos', (['(lamb * 0.5 * self.l_s)'], {}), '(lamb * 0.5 * self.l_s)\n', (16336, 16359), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((25579, 25614), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""machine_rating"""', '(0.0)'], {}), "('machine_rating', 0.0)\n", (25591, 25614), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((25651, 25678), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""Torque"""', '(0.0)'], {}), "('Torque', 0.0)\n", (25663, 25678), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((25716, 25742), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_nom"""', '(0.0)'], {}), "('n_nom', 0.0)\n", (25728, 25742), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((25896, 25938), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""main_shaft_length"""'], {'val': '(0.0)'}), "('main_shaft_length', val=0.0)\n", (25908, 25938), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((25976, 26000), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""r_s"""', '(0.0)'], {}), "('r_s', 0.0)\n", (25988, 26000), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26034, 26058), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""l_s"""', '(0.0)'], {}), "('l_s', 0.0)\n", (26046, 26058), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26092, 26116), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_s"""', '(0.0)'], {}), "('h_s', 0.0)\n", (26104, 26116), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26152, 26178), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""tau_p"""', '(0.0)'], {}), "('tau_p', 0.0)\n", (26164, 26178), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26213, 26237), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_m"""', '(0.0)'], {}), "('h_m', 0.0)\n", (26225, 26237), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26272, 26297), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_ys"""', '(0.0)'], {}), "('h_ys', 0.0)\n", (26284, 26297), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26332, 26357), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""h_yr"""', '(0.0)'], {}), "('h_yr', 0.0)\n", (26344, 26357), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26395, 26419), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""n_s"""', '(0.0)'], {}), "('n_s', 0.0)\n", (26407, 26419), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26454, 26479), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""b_st"""', '(0.0)'], {}), "('b_st', 0.0)\n", (26466, 26479), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26513, 26537), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""d_s"""', '(0.0)'], {}), "('d_s', 0.0)\n", (26525, 26537), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26572, 26597), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_ws"""', '(0.0)'], {}), "('t_ws', 0.0)\n", (26584, 26597), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26631, 26655), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""t_d"""', '(0.0)'], {}), "('t_d', 0.0)\n", (26643, 26655), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26689, 26713), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""R_o"""', '(0.0)'], {}), "('R_o', 0.0)\n", (26701, 26713), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26755, 26783), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fes"""', '(0.0)'], {}), "('rho_Fes', 0.0)\n", (26767, 26783), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26820, 26847), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Fe"""', '(0.0)'], {}), "('rho_Fe', 0.0)\n", (26832, 26847), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26888, 26919), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_Copper"""', '(0.0)'], {}), "('rho_Copper', 0.0)\n", (26900, 26919), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((26956, 26983), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""rho_PM"""', '(0.0)'], {}), "('rho_PM', 0.0)\n", (26968, 26983), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27121, 27154), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAs =u_all_s-u_As"""'], {}), "('con_uAs =u_all_s-u_As')\n", (27129, 27154), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27194, 27228), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_zAs =z_all_s-z_A_s"""'], {}), "('con_zAs =z_all_s-z_A_s')\n", (27202, 27228), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27268, 27299), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAs =y_all-y_As"""'], {}), "('con_yAs =y_all-y_As')\n", (27276, 27299), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27339, 27372), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_bst =b_all_s-b_st"""'], {}), "('con_bst =b_all_s-b_st')\n", (27347, 27372), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27412, 27445), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_uAr =u_all_r-u_Ar"""'], {}), "('con_uAr =u_all_r-u_Ar')\n", (27420, 27445), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27485, 27516), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_yAr =y_all-y_Ar"""'], {}), "('con_yAr =y_all-y_Ar')\n", (27493, 27516), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27556, 27584), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC2 =TC2-TC1"""'], {}), "('con_TC2 =TC2-TC1')\n", (27564, 27584), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27624, 27652), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_TC3 =TC3-TC1"""'], {}), "('con_TC3 =TC3-TC1')\n", (27632, 27652), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27694, 27727), 'openmdao.api.ExecComp', 'ExecComp', (['"""con_Bsmax =B_g-B_smax"""'], {}), "('con_Bsmax =B_g-B_smax')\n", (27702, 27727), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27861, 27890), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Cu"""'], {'val': '(0.0)'}), "('C_Cu', val=0.0)\n", (27873, 27890), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27925, 27954), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fe"""'], {'val': '(0.0)'}), "('C_Fe', val=0.0)\n", (27937, 27954), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((27990, 28020), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_Fes"""'], {'val': '(0.0)'}), "('C_Fes', val=0.0)\n", (28002, 28020), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((28055, 28084), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""C_PM"""'], {'val': '(0.0)'}), "('C_PM', val=0.0)\n", (28067, 28084), False, 'from openmdao.api import Group, Problem, Component, ExecComp, IndepVarComp, ScipyOptimizer, pyOptSparseDriver\n'), ((5596, 5621), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5604, 5621), True, 'import numpy as np\n'), ((5745, 5770), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5753, 5770), True, 'import numpy as np\n'), ((5887, 5912), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5895, 5912), True, 'import numpy as np\n'), ((11377, 11388), 'math.sin', 'sin', (['(pi / 6)'], {}), '(pi / 6)\n', (11380, 11388), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((15410, 15429), 'math.sin', 'sin', (['(ratio * pi / 2)'], {}), '(ratio * pi / 2)\n', (15413, 15429), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((15963, 15971), 'math.cosh', 'cosh', (['x1'], {}), '(x1)\n', (15967, 15971), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((15972, 15979), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (15975, 15979), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((15980, 15988), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (15984, 15988), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((15989, 15996), 'math.cos', 'cos', (['x1'], {}), '(x1)\n', (15992, 15996), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16028, 16036), 'math.cosh', 'cosh', (['x1'], {}), '(x1)\n', (16032, 16036), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16037, 16044), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (16040, 16044), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16045, 16053), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (16049, 16053), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16054, 16061), 'math.cos', 'cos', (['x1'], {}), '(x1)\n', (16057, 16061), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16071, 16079), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (16075, 16079), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16085, 16092), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (16088, 16092), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16105, 16113), 'math.cosh', 'cosh', (['x1'], {}), '(x1)\n', (16109, 16113), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16114, 16122), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (16118, 16122), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16123, 16130), 'math.cos', 'cos', (['x1'], {}), '(x1)\n', (16126, 16130), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16131, 16138), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (16134, 16138), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16148, 16156), 'math.sinh', 'sinh', (['x1'], {}), '(x1)\n', (16152, 16156), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16160, 16167), 'math.sin', 'sin', (['x1'], {}), '(x1)\n', (16163, 16167), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16213, 16227), 'math.cosh', 'cosh', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16217, 16227), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16226, 16239), 'math.sin', 'sin', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16229, 16239), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16238, 16252), 'math.sinh', 'sinh', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16242, 16252), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16251, 16264), 'math.cos', 'cos', (['(x1 * 0.5)'], {}), '(x1 * 0.5)\n', (16254, 16264), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16366, 16380), 'math.cosh', 'cosh', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16370, 16380), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16379, 16392), 'math.sin', 'sin', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16382, 16392), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16391, 16405), 'math.sinh', 'sinh', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16395, 16405), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16404, 16417), 'math.cos', 'cos', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16407, 16417), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16427, 16439), 'math.cosh', 'cosh', (['(x1 / 2)'], {}), '(x1 / 2)\n', (16431, 16439), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16438, 16449), 'math.sin', 'sin', (['(x1 / 2)'], {}), '(x1 / 2)\n', (16441, 16449), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16448, 16460), 'math.sinh', 'sinh', (['(x1 / 2)'], {}), '(x1 / 2)\n', (16452, 16460), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16459, 16470), 'math.cos', 'cos', (['(x1 / 2)'], {}), '(x1 / 2)\n', (16462, 16470), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16558, 16572), 'math.cosh', 'cosh', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16562, 16572), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16573, 16586), 'math.sin', 'sin', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16576, 16586), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16587, 16601), 'math.sinh', 'sinh', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16591, 16601), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16602, 16615), 'math.cos', 'cos', (['(lamb * 0)'], {}), '(lamb * 0)\n', (16605, 16615), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16628, 16672), 'math.cosh', 'cosh', (['(pi / 180 * lamb * (0.5 * self.l_s - a))'], {}), '(pi / 180 * lamb * (0.5 * self.l_s - a))\n', (16632, 16672), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16663, 16706), 'math.sin', 'sin', (['(pi / 180 * lamb * (0.5 * self.l_s - a))'], {}), '(pi / 180 * lamb * (0.5 * self.l_s - a))\n', (16666, 16706), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16697, 16741), 'math.sinh', 'sinh', (['(pi / 180 * lamb * (0.5 * self.l_s - a))'], {}), '(pi / 180 * lamb * (0.5 * self.l_s - a))\n', (16701, 16741), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((16732, 16775), 'math.cos', 'cos', (['(pi / 180 * lamb * (0.5 * self.l_s - a))'], {}), '(pi / 180 * lamb * (0.5 * self.l_s - a))\n', (16735, 16775), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((17450, 17458), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (17453, 17458), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((17607, 17615), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (17610, 17615), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20610, 20618), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (20613, 20618), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((25821, 25846), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (25829, 25846), True, 'import numpy as np\n'), ((10772, 10810), 'math.atan', 'atan', (['(b_so / 2 / (g + self.h_m / mu_r))'], {}), '(b_so / 2 / (g + self.h_m / mu_r))\n', (10776, 10810), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((10807, 10856), 'math.sqrt', 'sqrt', (['(1 + (b_so / 2 / (g + self.h_m / mu_r)) ** 2)'], {}), '(1 + (b_so / 2 / (g + self.h_m / mu_r)) ** 2)\n', (10811, 10856), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((12703, 12710), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (12707, 12710), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((17998, 18017), 'math.log', 'log', (['(R_a / self.R_o)'], {}), '(R_a / self.R_o)\n', (18001, 18017), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((18121, 18138), 'math.log', 'log', (['(R / self.R_o)'], {}), '(R / self.R_o)\n', (18124, 18138), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((18394, 18413), 'math.log', 'log', (['(R_b / self.R_o)'], {}), '(R_b / self.R_o)\n', (18397, 18413), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20229, 20241), 'math.tan', 'tan', (['theta_s'], {}), '(theta_s)\n', (20232, 20241), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20875, 20883), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (20878, 20883), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((17915, 17932), 'math.log', 'log', (['(R / self.R_o)'], {}), '(R / self.R_o)\n', (17918, 17932), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((18294, 18313), 'math.log', 'log', (['(a_1 / self.R_o)'], {}), '(a_1 / self.R_o)\n', (18297, 18313), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20161, 20173), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (20164, 20173), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20209, 20221), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (20212, 20221), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20744, 20752), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (20747, 20752), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((17851, 17868), 'math.log', 'log', (['(R / self.R_o)'], {}), '(R / self.R_o)\n', (17854, 17868), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20138, 20150), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (20141, 20150), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20100, 20112), 'math.sin', 'sin', (['theta_s'], {}), '(theta_s)\n', (20103, 20112), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n'), ((20122, 20134), 'math.cos', 'cos', (['theta_s'], {}), '(theta_s)\n', (20125, 20134), False, 'from math import pi, cos, cosh, sqrt, radians, sin, sinh, exp, log10, log, tan, atan\n')] |
import unittest
import numpy as np
from decouple import config
from pysony.graph import GraphDistance
class TestGraphDistance(unittest.TestCase):
def testGraphDistance(self):
myGraphDistance = GraphDistance(
threshold = 20
)
X = np.random.rand(10,2) / 2
X[:,0] += -0.1729636
X[:,1] += 51.5214588
node,edge = myGraphDistance.compute(X)
print(node)
print(edge)
print(len(node))
print(len(edge))
if __name__ == '__main__':
unittest.main(verbosity = 2) | [
"unittest.main",
"numpy.random.rand",
"pysony.graph.GraphDistance"
] | [((523, 549), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (536, 549), False, 'import unittest\n'), ((207, 234), 'pysony.graph.GraphDistance', 'GraphDistance', ([], {'threshold': '(20)'}), '(threshold=20)\n', (220, 234), False, 'from pysony.graph import GraphDistance\n'), ((271, 292), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (285, 292), True, 'import numpy as np\n')] |
import argparse
import json
from pathlib import Path
import numpy as np
from model import MultiTaskModel
import parse
import create_data
import asyncio
import websockets
import logging
import logging.handlers
class QueryModel:
def __init__(self,model_file,identifier):
self.config = None
self.model = None
self.sentence_length = None
self.name_to_name_to_indices = None
self.logger = logging.getLogger('root.Model-{}'.format(identifier))
load_path = Path(model_file)
if (not load_path.exists()) or (not load_path.is_dir()):
self.logger.error("model directory {} doesn't exist".format(model_file))
config_filename = load_path.joinpath("model_config.json")
with config_filename.open('r',encoding='utf8') as fp:
self.config = json.load(fp)
index_filename = load_path.joinpath("name_to_index.json")
with index_filename.open('r',encoding='utf8') as fp:
self.name_to_name_to_indices = json.load(fp)
self.sentence_length = self.config['sentence_length']
self.model = MultiTaskModel(self.config,self.sentence_length,{},{})
self.model.load_model(load_path.joinpath("nn"))
self.input_names = []
self.target_name_to_def = {}
self.input_name_to_def = {}
self.name_to_index_to_name = {}
for i in self.config['inputs']:
input_name = i['name']
self.input_names.append(input_name)
self.input_name_to_def[input_name] = i
for t in self.config['tasks']:
target_name = t['target']
self.target_name_to_def[target_name] = t
index_to_name = {}
for x,y in self.name_to_name_to_indices[target_name].items():
index_to_name[y] = x
self.name_to_index_to_name[target_name] = index_to_name
def query(self,query_input):
num_examples, sentences, inputs, targets = parse.parse_json_file_with_index(query_input,self.name_to_name_to_indices,self.input_names,[],self.sentence_length)
for input_name in self.input_names:
if not input_name in inputs:
self.logger.warning("problem: model input \"{}\" not found in dataset file, feeding zero values".format(input_name))
input_def = self.input_name_to_def[input_name]
input_type = input_def['type']
array_shape = []
if input_type == "vector_sequence":
array_shape = [num_examples,self.sentence_length,input_def['vector_length']]
elif input_type == "class_sequence":
array_shape = [num_examples,self.sentence_length]
elif input_type == "graph_structure":
array_shape = [num_examples,self.sentence_length,self.sentence_length]
inputs[input_name] = (input_type,np.zeros(array_shape))
data = {}
for x,y in inputs.items():
data[x] = y[1]
results = self.model.query(data)
return results
class QueryServer:
def __init__(self,args):
self.query_config = None
with open(args.query_config,'r',encoding='utf8') as fp:
self.query_config = json.load(fp)
log_filename = None
if 'log_filename' in self.query_config:
log_filename = self.query_config['log_filename']
self.init_logging(log_filename)
self.logger.info("initializing server...")
#Load models
self.models = []
identifier = 0
if not args.model_file is None:
self.models.append(QueryModel(args.model_file))
else:
for m in self.query_config['models']:
self.models.append(QueryModel(m,identifier))
self.logger.info("loading model {} from {}".format(identifier,m))
identifier += 1
self.logger.info("models loaded successfully")
self.target_name_to_models = {}
for i in range(len(self.models)):
m = self.models[i]
for target_name in m.target_name_to_def:
if not target_name in self.target_name_to_models:
self.target_name_to_models[target_name] = [i]
else:
self.target_name_to_models[target_name].append(i)
self.corenlp_server = None
self.corenlp_port = 9000
if 'corenlp_server' in self.query_config:
self.corenlp_server = self.query_config['corenlp_server']
if 'corenlp_port' in self.query_config:
self.corenlp_port = self.query_config['corenlp_port']
self.wordvector_file = self.query_config['wordvector_file']
wv_path = Path(self.wordvector_file)
if (not wv_path.exists()) or wv_path.is_dir():
self.logger.critical("word vector file does not exist: {}".format(self.wordvector_file))
raise FileNotFoundError
self.hostname = self.query_config['hostname']
self.port = self.query_config['port']
self.dp = create_data.DataProcessor(self.wordvector_file)
def query(self,text):
query_input = self.dp.get_data(text,self.corenlp_server,self.corenlp_port,self.wordvector_file)
results = []
for i in range(len(self.models)):
model = self.models[i]
result = model.query(query_input)
results.append(result)
averaged_result = self.average_results(results,text)
return json.dumps(averaged_result,indent=4,ensure_ascii=False)
async def handler(self,websocket,path):
while True:
try:
message = await websocket.recv()
self.logger.info("received query: {}".format(message))
except websockets.ConnectionClosed:
break
else:
result = self.query(message)
self.logger.info("sent reply: {}".format(result))
await websocket.send(result)
def start(self):
start_server = websockets.serve(self.handler, self.hostname, self.port)
self.logger.info("listening on {}:{}".format(self.hostname,self.port))
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
#TODO break ties in a special way?
def find_max_in_dict(self,dictionary):
best_class = None
max_votes = 0
for class_name, num_votes in dictionary.items():
if num_votes > max_votes:
max_votes = num_votes
best_class = class_name
return best_class
def average_results(self,results,query_string):
query_string = self.dp.clean_string(query_string)
query_string = query_string.split(" ")
query_length = len(query_string)
json_result = {}
for target_name, model_indices in self.target_name_to_models.items():
num_models = len(model_indices)
target_type = self.models[model_indices[0]].target_name_to_def[target_name]['type']
if target_type == "sentence_class":
result = {}
for i in model_indices:
model = self.models[i]
target_value = results[i][target_name]
predicted_class = target_value[0]
predicted_class_name = model.name_to_index_to_name[target_name][predicted_class]
if predicted_class_name in result:
result[predicted_class_name] += 1
else:
result[predicted_class_name] = 1
best_class = self.find_max_in_dict(result)
json_result[target_name] = best_class
elif target_type == "class_sequence":
result = []
for i in range(query_length):
result.append({})
for i in model_indices:
model = self.models[i]
target_value = results[i][target_name]
for j in range(query_length):
predicted_class = target_value[0,j]
predicted_class_name = model.name_to_index_to_name[target_name][predicted_class]
if predicted_class_name in result[j]:
result[j][predicted_class_name] += 1
else:
result[j][predicted_class_name] = 1
best_class_name_list = []
for i in range(query_length):
best_class = self.find_max_in_dict(result[i])
best_class_name_list.append((query_string[i],best_class))
json_result[target_name] = best_class_name_list
elif target_type == "fixed_length_class_sequence":
#TODO possible error here if different models have different fixed lengths for this annotation, if they were trained on the same dataset this should not occur
sequence_length = self.models[model_indices[0]].target_name_to_def[target_name]['sequence_length']
result = []
for i in range(sequence_length):
result.append({})
for i in model_indices:
model = self.models[i]
target_value = results[i][target_name]
for j in range(sequence_length):
predicted_class = target_value[0,j]
predicted_class_name = model.name_to_index_to_name[target_name][predicted_class]
if predicted_class_name in result[j]:
result[j][predicted_class_name] += 1
else:
result[j][predicted_class_name] = 1
best_class_name_list = []
for i in range(sequence_length):
best_class = self.find_max_in_dict(result[i])
best_class_name_list.append(best_class)
json_result[target_name] = best_class_name_list
return json_result
def init_logging(self,log_filename):
#set up logging
root_logger = logging.getLogger('root')
root_logger.setLevel(logging.DEBUG)
#create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
root_logger.addHandler(ch)
#create rotating file handler
try:
if not log_filename is None:
maxBytes = 1024*1024*10
backupCount=5
fh = logging.handlers.RotatingFileHandler(log_filename, maxBytes=maxBytes, backupCount=backupCount,mode='a')
fh.setFormatter(formatter)
root_logger.addHandler(fh)
except Exception as ex:
root_logger.error("Could not create log file {}, reason: {}".format(log_filename,ex))
self.logger = logging.getLogger('root.QueryServer')
def parse_args():
parser = argparse.ArgumentParser(description="Perform NL classification with pre-trained neural networks")
parser.add_argument("--query_config",type=str,default="query_config.json",help="json file containing configuration")
parser.add_argument("--model_file", help="path to saved model, overrides models specified in query config file")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
query_server = QueryServer(args)
query_server.start()
| [
"json.load",
"websockets.serve",
"argparse.ArgumentParser",
"asyncio.get_event_loop",
"logging.StreamHandler",
"numpy.zeros",
"logging.handlers.RotatingFileHandler",
"json.dumps",
"logging.Formatter",
"parse.parse_json_file_with_index",
"pathlib.Path",
"create_data.DataProcessor",
"logging.g... | [((11317, 11419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform NL classification with pre-trained neural networks"""'}), "(description=\n 'Perform NL classification with pre-trained neural networks')\n", (11340, 11419), False, 'import argparse\n'), ((509, 525), 'pathlib.Path', 'Path', (['model_file'], {}), '(model_file)\n', (513, 525), False, 'from pathlib import Path\n'), ((1115, 1172), 'model.MultiTaskModel', 'MultiTaskModel', (['self.config', 'self.sentence_length', '{}', '{}'], {}), '(self.config, self.sentence_length, {}, {})\n', (1129, 1172), False, 'from model import MultiTaskModel\n'), ((1969, 2092), 'parse.parse_json_file_with_index', 'parse.parse_json_file_with_index', (['query_input', 'self.name_to_name_to_indices', 'self.input_names', '[]', 'self.sentence_length'], {}), '(query_input, self.name_to_name_to_indices,\n self.input_names, [], self.sentence_length)\n', (2001, 2092), False, 'import parse\n'), ((4803, 4829), 'pathlib.Path', 'Path', (['self.wordvector_file'], {}), '(self.wordvector_file)\n', (4807, 4829), False, 'from pathlib import Path\n'), ((5142, 5189), 'create_data.DataProcessor', 'create_data.DataProcessor', (['self.wordvector_file'], {}), '(self.wordvector_file)\n', (5167, 5189), False, 'import create_data\n'), ((5578, 5635), 'json.dumps', 'json.dumps', (['averaged_result'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(averaged_result, indent=4, ensure_ascii=False)\n', (5588, 5635), False, 'import json\n'), ((6125, 6181), 'websockets.serve', 'websockets.serve', (['self.handler', 'self.hostname', 'self.port'], {}), '(self.handler, self.hostname, self.port)\n', (6141, 6181), False, 'import websockets\n'), ((10327, 10352), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (10344, 10352), False, 'import logging\n'), ((10445, 10518), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (10462, 10518), False, 'import logging\n'), ((10565, 10588), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (10586, 10588), False, 'import logging\n'), ((11238, 11275), 'logging.getLogger', 'logging.getLogger', (['"""root.QueryServer"""'], {}), "('root.QueryServer')\n", (11255, 11275), False, 'import logging\n'), ((831, 844), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (840, 844), False, 'import json\n'), ((1016, 1029), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1025, 1029), False, 'import json\n'), ((3296, 3309), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3305, 3309), False, 'import json\n'), ((6269, 6293), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6291, 6293), False, 'import asyncio\n'), ((6335, 6359), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6357, 6359), False, 'import asyncio\n'), ((10894, 11002), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['log_filename'], {'maxBytes': 'maxBytes', 'backupCount': 'backupCount', 'mode': '"""a"""'}), "(log_filename, maxBytes=maxBytes,\n backupCount=backupCount, mode='a')\n", (10930, 11002), False, 'import logging\n'), ((2931, 2952), 'numpy.zeros', 'np.zeros', (['array_shape'], {}), '(array_shape)\n', (2939, 2952), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from unittest import mock
import numpy as np
from ax.core.metric import Metric
from ax.core.objective import Objective
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint
from ax.core.parameter import (
ChoiceParameter,
FixedParameter,
ParameterType,
RangeParameter,
)
from ax.core.search_space import SearchSpace
from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values
from ax.models.discrete_base import DiscreteModel
from ax.utils.common.testutils import TestCase
class DiscreteModelBridgeTest(TestCase):
def setUp(self):
self.parameters = [
ChoiceParameter("x", ParameterType.FLOAT, values=[0, 1]),
ChoiceParameter("y", ParameterType.STRING, values=["foo", "bar"]),
FixedParameter("z", ParameterType.BOOL, value=True),
]
parameter_constraints = []
self.search_space = SearchSpace(self.parameters, parameter_constraints)
self.observation_features = [
ObservationFeatures(parameters={"x": 0, "y": "foo", "z": True}),
ObservationFeatures(parameters={"x": 1, "y": "foo", "z": True}),
ObservationFeatures(parameters={"x": 1, "y": "bar", "z": True}),
]
self.observation_data = [
ObservationData(
metric_names=["a", "b"],
means=np.array([1.0, -1.0]),
covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
),
ObservationData(
metric_names=["a", "b"],
means=np.array([2.0, -2.0]),
covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
),
ObservationData(
metric_names=["a"], means=np.array([3.0]), covariance=np.array([[3.0]])
),
]
self.observations = [
Observation(
features=self.observation_features[i],
data=self.observation_data[i],
arm_name=str(i),
)
for i in range(3)
]
self.pending_observations = {
"b": [ObservationFeatures(parameters={"x": 0, "y": "foo", "z": True})]
}
self.model_gen_options = {"option": "yes"}
@mock.patch(
"ax.modelbridge.discrete.DiscreteModelBridge.__init__", return_value=None
)
def testFit(self, mock_init):
ma = DiscreteModelBridge()
ma._training_data = self.observations
model = mock.create_autospec(DiscreteModel, instance=True)
ma._fit(
model, self.search_space, self.observation_features, self.observation_data
)
self.assertEqual(ma.parameters, ["x", "y", "z"])
self.assertEqual(sorted(ma.outcomes), ["a", "b"])
Xs = {
"a": [[0, "foo", True], [1, "foo", True], [1, "bar", True]],
"b": [[0, "foo", True], [1, "foo", True]],
}
Ys = {"a": [[1.0], [2.0], [3.0]], "b": [[-1.0], [-2.0]]}
Yvars = {"a": [[1.0], [2.0], [3.0]], "b": [[6.0], [7.0]]}
parameter_values = [[0.0, 1.0], ["foo", "bar"], [True]]
model_fit_args = model.fit.mock_calls[0][2]
for i, x in enumerate(model_fit_args["Xs"]):
self.assertEqual(x, Xs[ma.outcomes[i]])
for i, y in enumerate(model_fit_args["Ys"]):
self.assertEqual(y, Ys[ma.outcomes[i]])
for i, v in enumerate(model_fit_args["Yvars"]):
self.assertEqual(v, Yvars[ma.outcomes[i]])
self.assertEqual(model_fit_args["parameter_values"], parameter_values)
sq_feat = ObservationFeatures({})
sq_data = self.observation_data[0]
with self.assertRaises(ValueError):
ma._fit(
model,
self.search_space,
self.observation_features + [sq_feat],
self.observation_data + [sq_data],
)
@mock.patch(
"ax.modelbridge.discrete.DiscreteModelBridge.__init__", return_value=None
)
def testPredict(self, mock_init):
ma = DiscreteModelBridge()
model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
model.predict.return_value = (
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
)
ma.model = model
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._predict(self.observation_features)
X = [[0, "foo", True], [1, "foo", True], [1, "bar", True]]
self.assertTrue(model.predict.mock_calls[0][2]["X"], X)
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
@mock.patch(
"ax.modelbridge.discrete.DiscreteModelBridge.__init__", return_value=None
)
def testGen(self, mock_init):
# Test with constraints
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=True),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
],
)
ma = DiscreteModelBridge()
model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0, 3.0]], [1.0, 2.0], {})
ma.model = model
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_features, weights, best_observation, _ = ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({}),
model_gen_options=self.model_gen_options,
)
gen_args = model.gen.mock_calls[0][2]
self.assertEqual(gen_args["n"], 3)
self.assertEqual(
gen_args["parameter_values"], [[0.0, 1.0], ["foo", "bar"], [True]]
)
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([-1.0, 0.0]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][0], np.array([[0.0, -1.0]]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][1], np.array([[-2]]))
)
self.assertEqual(gen_args["pending_observations"][0], [])
self.assertEqual(gen_args["pending_observations"][1], [[0, "foo", True]])
self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
self.assertEqual(
observation_features[0].parameters, {"x": 0.0, "y": 2.0, "z": 3.0}
)
self.assertEqual(
observation_features[1].parameters, {"x": 1.0, "y": 1.0, "z": 3.0}
)
self.assertEqual(weights, [1.0, 2.0])
# Test with no constraints, no fixed feature, no pending observations
search_space = SearchSpace(self.parameters[:2])
optimization_config.outcome_constraints = []
ma.parameters = ["x", "y"]
ma._gen(
n=3,
search_space=search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
model_gen_options={},
)
gen_args = model.gen.mock_calls[1][2]
self.assertEqual(gen_args["parameter_values"], [[0.0, 1.0], ["foo", "bar"]])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertIsNone(gen_args["pending_observations"])
# Test validation
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=False),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
],
)
with self.assertRaises(ValueError):
ma._gen(
n=3,
search_space=search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
model_gen_options={},
)
@mock.patch(
"ax.modelbridge.discrete.DiscreteModelBridge.__init__", return_value=None
)
def testCrossValidate(self, mock_init):
ma = DiscreteModelBridge()
model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
model.cross_validate.return_value = (
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
)
ma.model = model
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._cross_validate(
self.observation_features, self.observation_data, self.observation_features
)
Xs = [
[[0, "foo", True], [1, "foo", True], [1, "bar", True]],
[[0, "foo", True], [1, "foo", True]],
]
Ys = [[[1.0], [2.0], [3.0]], [[-1.0], [-2.0]]]
Yvars = [[[1.0], [2.0], [3.0]], [[6.0], [7.0]]]
Xtest = [[0, "foo", True], [1, "foo", True], [1, "bar", True]]
# Transform to arrays:
model_cv_args = model.cross_validate.mock_calls[0][2]
for i, x in enumerate(model_cv_args["Xs_train"]):
self.assertEqual(x, Xs[i])
for i, y in enumerate(model_cv_args["Ys_train"]):
self.assertEqual(y, Ys[i])
for i, v in enumerate(model_cv_args["Yvars_train"]):
self.assertEqual(v, Yvars[i])
self.assertEqual(model_cv_args["X_test"], Xtest)
# Transform from arrays:
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
def testGetParameterValues(self):
parameter_values = _get_parameter_values(self.search_space, ["x", "y", "z"])
self.assertEqual(parameter_values, [[0.0, 1.0], ["foo", "bar"], [True]])
search_space = SearchSpace(self.parameters)
search_space._parameters["x"] = RangeParameter(
"x", ParameterType.FLOAT, 0.1, 0.4
)
with self.assertRaises(ValueError):
_get_parameter_values(search_space, ["x", "y", "z"])
| [
"ax.core.observation.ObservationFeatures",
"unittest.mock.create_autospec",
"ax.core.parameter.RangeParameter",
"unittest.mock.MagicMock",
"ax.modelbridge.discrete.DiscreteModelBridge",
"ax.core.metric.Metric",
"ax.core.parameter.FixedParameter",
"unittest.mock.patch",
"numpy.array",
"ax.modelbrid... | [((2469, 2558), 'unittest.mock.patch', 'mock.patch', (['"""ax.modelbridge.discrete.DiscreteModelBridge.__init__"""'], {'return_value': 'None'}), "('ax.modelbridge.discrete.DiscreteModelBridge.__init__',\n return_value=None)\n", (2479, 2558), False, 'from unittest import mock\n'), ((4115, 4204), 'unittest.mock.patch', 'mock.patch', (['"""ax.modelbridge.discrete.DiscreteModelBridge.__init__"""'], {'return_value': 'None'}), "('ax.modelbridge.discrete.DiscreteModelBridge.__init__',\n return_value=None)\n", (4125, 4204), False, 'from unittest import mock\n'), ((4991, 5080), 'unittest.mock.patch', 'mock.patch', (['"""ax.modelbridge.discrete.DiscreteModelBridge.__init__"""'], {'return_value': 'None'}), "('ax.modelbridge.discrete.DiscreteModelBridge.__init__',\n return_value=None)\n", (5001, 5080), False, 'from unittest import mock\n'), ((8460, 8549), 'unittest.mock.patch', 'mock.patch', (['"""ax.modelbridge.discrete.DiscreteModelBridge.__init__"""'], {'return_value': 'None'}), "('ax.modelbridge.discrete.DiscreteModelBridge.__init__',\n return_value=None)\n", (8470, 8549), False, 'from unittest import mock\n'), ((1143, 1194), 'ax.core.search_space.SearchSpace', 'SearchSpace', (['self.parameters', 'parameter_constraints'], {}), '(self.parameters, parameter_constraints)\n', (1154, 1194), False, 'from ax.core.search_space import SearchSpace\n'), ((2616, 2637), 'ax.modelbridge.discrete.DiscreteModelBridge', 'DiscreteModelBridge', ([], {}), '()\n', (2635, 2637), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((2700, 2750), 'unittest.mock.create_autospec', 'mock.create_autospec', (['DiscreteModel'], {'instance': '(True)'}), '(DiscreteModel, instance=True)\n', (2720, 2750), False, 'from unittest import mock\n'), ((3799, 3822), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', (['{}'], {}), '({})\n', (3818, 3822), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((4266, 4287), 'ax.modelbridge.discrete.DiscreteModelBridge', 'DiscreteModelBridge', ([], {}), '()\n', (4285, 4287), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((4304, 4363), 'unittest.mock.MagicMock', 'mock.MagicMock', (['DiscreteModel'], {'autospec': '(True)', 'instance': '(True)'}), '(DiscreteModel, autospec=True, instance=True)\n', (4318, 4363), False, 'from unittest import mock\n'), ((5415, 5436), 'ax.modelbridge.discrete.DiscreteModelBridge', 'DiscreteModelBridge', ([], {}), '()\n', (5434, 5436), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((5453, 5512), 'unittest.mock.MagicMock', 'mock.MagicMock', (['DiscreteModel'], {'autospec': '(True)', 'instance': '(True)'}), '(DiscreteModel, autospec=True, instance=True)\n', (5467, 5512), False, 'from unittest import mock\n'), ((7217, 7249), 'ax.core.search_space.SearchSpace', 'SearchSpace', (['self.parameters[:2]'], {}), '(self.parameters[:2])\n', (7228, 7249), False, 'from ax.core.search_space import SearchSpace\n'), ((8617, 8638), 'ax.modelbridge.discrete.DiscreteModelBridge', 'DiscreteModelBridge', ([], {}), '()\n', (8636, 8638), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((8655, 8714), 'unittest.mock.MagicMock', 'mock.MagicMock', (['DiscreteModel'], {'autospec': '(True)', 'instance': '(True)'}), '(DiscreteModel, autospec=True, instance=True)\n', (8669, 8714), False, 'from unittest import mock\n'), ((10162, 10219), 'ax.modelbridge.discrete._get_parameter_values', '_get_parameter_values', (['self.search_space', "['x', 'y', 'z']"], {}), "(self.search_space, ['x', 'y', 'z'])\n", (10183, 10219), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((10324, 10352), 'ax.core.search_space.SearchSpace', 'SearchSpace', (['self.parameters'], {}), '(self.parameters)\n', (10335, 10352), False, 'from ax.core.search_space import SearchSpace\n'), ((10393, 10443), 'ax.core.parameter.RangeParameter', 'RangeParameter', (['"""x"""', 'ParameterType.FLOAT', '(0.1)', '(0.4)'], {}), "('x', ParameterType.FLOAT, 0.1, 0.4)\n", (10407, 10443), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((867, 923), 'ax.core.parameter.ChoiceParameter', 'ChoiceParameter', (['"""x"""', 'ParameterType.FLOAT'], {'values': '[0, 1]'}), "('x', ParameterType.FLOAT, values=[0, 1])\n", (882, 923), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((937, 1002), 'ax.core.parameter.ChoiceParameter', 'ChoiceParameter', (['"""y"""', 'ParameterType.STRING'], {'values': "['foo', 'bar']"}), "('y', ParameterType.STRING, values=['foo', 'bar'])\n", (952, 1002), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((1016, 1067), 'ax.core.parameter.FixedParameter', 'FixedParameter', (['"""z"""', 'ParameterType.BOOL'], {'value': '(True)'}), "('z', ParameterType.BOOL, value=True)\n", (1030, 1067), False, 'from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType, RangeParameter\n'), ((1246, 1309), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', ([], {'parameters': "{'x': 0, 'y': 'foo', 'z': True}"}), "(parameters={'x': 0, 'y': 'foo', 'z': True})\n", (1265, 1309), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((1323, 1386), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', ([], {'parameters': "{'x': 1, 'y': 'foo', 'z': True}"}), "(parameters={'x': 1, 'y': 'foo', 'z': True})\n", (1342, 1386), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((1400, 1463), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', ([], {'parameters': "{'x': 1, 'y': 'bar', 'z': True}"}), "(parameters={'x': 1, 'y': 'bar', 'z': True})\n", (1419, 1463), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((4415, 4447), 'numpy.array', 'np.array', (['[[1.0, -1], [2.0, -2]]'], {}), '([[1.0, -1], [2.0, -2]])\n', (4423, 4447), True, 'import numpy as np\n'), ((8773, 8805), 'numpy.array', 'np.array', (['[[1.0, -1], [2.0, -2]]'], {}), '([[1.0, -1], [2.0, -2]])\n', (8781, 8805), True, 'import numpy as np\n'), ((10522, 10574), 'ax.modelbridge.discrete._get_parameter_values', '_get_parameter_values', (['search_space', "['x', 'y', 'z']"], {}), "(search_space, ['x', 'y', 'z'])\n", (10543, 10574), False, 'from ax.modelbridge.discrete import DiscreteModelBridge, _get_parameter_values\n'), ((2337, 2400), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', ([], {'parameters': "{'x': 0, 'y': 'foo', 'z': True}"}), "(parameters={'x': 0, 'y': 'foo', 'z': True})\n", (2356, 2400), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((5968, 5991), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', (['{}'], {}), '({})\n', (5987, 5991), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((6344, 6365), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (6352, 6365), True, 'import numpy as np\n'), ((6465, 6488), 'numpy.array', 'np.array', (['[[0.0, -1.0]]'], {}), '([[0.0, -1.0]])\n', (6473, 6488), True, 'import numpy as np\n'), ((6588, 6604), 'numpy.array', 'np.array', (['[[-2]]'], {}), '([[-2]])\n', (6596, 6604), True, 'import numpy as np\n'), ((7528, 7551), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', (['{}'], {}), '({})\n', (7547, 7551), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((1601, 1622), 'numpy.array', 'np.array', (['[1.0, -1.0]'], {}), '([1.0, -1.0])\n', (1609, 1622), True, 'import numpy as np\n'), ((1651, 1685), 'numpy.array', 'np.array', (['[[1.0, 4.0], [4.0, 6.0]]'], {}), '([[1.0, 4.0], [4.0, 6.0]])\n', (1659, 1685), True, 'import numpy as np\n'), ((1794, 1815), 'numpy.array', 'np.array', (['[2.0, -2.0]'], {}), '([2.0, -2.0])\n', (1802, 1815), True, 'import numpy as np\n'), ((1844, 1878), 'numpy.array', 'np.array', (['[[2.0, 5.0], [5.0, 7.0]]'], {}), '([[2.0, 5.0], [5.0, 7.0]])\n', (1852, 1878), True, 'import numpy as np\n'), ((1966, 1981), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (1974, 1981), True, 'import numpy as np\n'), ((1994, 2011), 'numpy.array', 'np.array', (['[[3.0]]'], {}), '([[3.0]])\n', (2002, 2011), True, 'import numpy as np\n'), ((4488, 4520), 'numpy.array', 'np.array', (['[[1.0, 4.0], [4.0, 6]]'], {}), '([[1.0, 4.0], [4.0, 6]])\n', (4496, 4520), True, 'import numpy as np\n'), ((4522, 4554), 'numpy.array', 'np.array', (['[[2.0, 5.0], [5.0, 7]]'], {}), '([[2.0, 5.0], [5.0, 7]])\n', (4530, 4554), True, 'import numpy as np\n'), ((5239, 5250), 'ax.core.metric.Metric', 'Metric', (['"""a"""'], {}), "('a')\n", (5245, 5250), False, 'from ax.core.metric import Metric\n'), ((7956, 7967), 'ax.core.metric.Metric', 'Metric', (['"""a"""'], {}), "('a')\n", (7962, 7967), False, 'from ax.core.metric import Metric\n'), ((8377, 8400), 'ax.core.observation.ObservationFeatures', 'ObservationFeatures', (['{}'], {}), '({})\n', (8396, 8400), False, 'from ax.core.observation import Observation, ObservationData, ObservationFeatures\n'), ((8846, 8878), 'numpy.array', 'np.array', (['[[1.0, 4.0], [4.0, 6]]'], {}), '([[1.0, 4.0], [4.0, 6]])\n', (8854, 8878), True, 'import numpy as np\n'), ((8880, 8912), 'numpy.array', 'np.array', (['[[2.0, 5.0], [5.0, 7]]'], {}), '([[2.0, 5.0], [5.0, 7]])\n', (8888, 8912), True, 'import numpy as np\n'), ((5336, 5347), 'ax.core.metric.Metric', 'Metric', (['"""b"""'], {}), "('b')\n", (5342, 5347), False, 'from ax.core.metric import Metric\n'), ((8054, 8065), 'ax.core.metric.Metric', 'Metric', (['"""b"""'], {}), "('b')\n", (8060, 8065), False, 'from ax.core.metric import Metric\n')] |
import numpy as np
import scipy.stats as sts
class Correlation:
"""
Given a dataframe of the automatic metric scores of some candidates along with human DA scores,
it computes the Pearson correlation coefficient (as a default choice) of each candidate,
and make a cluster of their ranks with the help of Wilcoxon rank sum test,
where the p-value of any two candidates bigger than 0.05 is regarded to tie with one another.
"""
def __init__(self, frame):
self.frame = frame
def to_array(self, col):
return np.array((self.frame[col]), dtype=np.float64)
def ranksums_test(self, col1: str, col2: str) -> float:
""" Ranksum test between col_A and col_B """
if col1 == col2:
return -1
p_value = sts.ranksums(self.to_array(col1), self.to_array(col2)).pvalue
return p_value
def column_by_Pearson(self):
""" Columns ranked by Pearson correlation to Human DA """
assert 'Human' in self.frame.columns, 'Absent: "Human" scores'
result = self.frame.corr('pearson')['Human']
values = [v for _, v in result.items()]
sortedV = np.argsort(values)[::-1]
sortedK = np.array(result.keys())[sortedV]
sortedCols = np.delete(sortedK, np.where(sortedK == 'Human'))
return sortedCols
def rank_cluster(self):
""" Rank cluster of candidates """
candidates = self.column_by_Pearson()
num_col = len(candidates)
pairwise = np.array([[self.ranksums_test(col1, col2) for col2 in candidates]\
for col1 in candidates])
cluster = np.zeros(num_col).astype(int)
for i in range(num_col-1):
if all(pairwise[i, i+1:] < 0.05):
cluster[i+1:]+=1
return list(zip(candidates, cluster))
| [
"numpy.argsort",
"numpy.zeros",
"numpy.where",
"numpy.array"
] | [((582, 625), 'numpy.array', 'np.array', (['self.frame[col]'], {'dtype': 'np.float64'}), '(self.frame[col], dtype=np.float64)\n', (590, 625), True, 'import numpy as np\n'), ((1228, 1246), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (1238, 1246), True, 'import numpy as np\n'), ((1344, 1372), 'numpy.where', 'np.where', (["(sortedK == 'Human')"], {}), "(sortedK == 'Human')\n", (1352, 1372), True, 'import numpy as np\n'), ((1737, 1754), 'numpy.zeros', 'np.zeros', (['num_col'], {}), '(num_col)\n', (1745, 1754), True, 'import numpy as np\n')] |
"""Module implementing point transformations and their matrices."""
import numpy as np
def axis_angle_rotation(axis, angle, point=None, deg=True):
r"""Return a 4x4 matrix for rotation about any axis by given angle.
Rotations around an axis that contains the origin can easily be
computed using Rodrigues' rotation formula. The key quantity is
the ``K`` cross product matrix for the unit vector ``n`` defining
the axis of the rotation:
/ 0 -nz ny \
K = | nz 0 -nx |
\ -ny nx 0 /
For a rotation angle ``phi`` around the vector ``n`` the rotation
matrix is given by
R = I + sin(phi) K + (1 - cos(phi)) K^2
where ``I`` is the 3-by-3 unit matrix and ``K^2`` denotes the matrix
square of ``K``.
If the rotation axis doesn't contain the origin, we have to first
shift real space to transform the axis' ``p0`` reference point into
the origin, then shift the points back after rotation:
p' = R @ (p - p0) + p0 = R @ p + (p0 - R @ p0)
This means that the rotation in general consists of a 3-by-3
rotation matrix ``R``, and a translation given by
``b = p0 - R @ p0``. These can be encoded in a 4-by-4 transformation
matrix by filling the 3-by-3 leading principal submatrix with ``R``,
and filling the top 3 values in the last column with ``b``.
Parameters
----------
axis : 3-length sequence
The direction vector of the rotation axis. It need not be a
unit vector, but it must not be a zero vector.
angle : float
Angle of rotation around the axis. The angle is defined as a
counterclockwise rotation when facing the normal vector of the
rotation axis. Passed either in degrees or radians depending on
the value of ``deg``.
point : 3-length sequence, optional
The origin of the rotation (a reference point through which the
rotation axis passes). By default the rotation axis contains the
origin.
deg : bool, optional
Whether the angle is specified in degrees. ``False`` implies
radians.
Examples
--------
Generate a transformation matrix for rotation around a cube's body
diagonal by 120 degrees.
>>> import numpy as np
>>> from pyvista import transformations
>>> trans = transformations.axis_angle_rotation([1, 1, 1], 120)
Check that the transformation cycles the cube's three corners.
>>> corners = np.array([
... [1, 0, 0],
... [0, 1, 0],
... [0, 0, 1],
... ])
>>> rotated = transformations.apply_transformation_to_points(trans, corners)
>>> np.allclose(rotated, corners[[1, 2, 0], :])
True
"""
if deg:
# convert to radians
angle *= np.pi / 180
# return early for no rotation; play it safe and check only exact equality
if angle % (2 * np.pi) == 0:
return np.eye(4)
axis = np.asarray(axis, dtype='float64')
if axis.shape != (3,):
raise ValueError('Axis must be a 3-length array-like.')
if point is not None:
point = np.asarray(point)
if point.shape != (3,):
raise ValueError('Rotation center must be a 3-length array-like.')
# check and normalize
axis_norm = np.linalg.norm(axis)
if np.isclose(axis_norm, 0):
raise ValueError('Cannot rotate around zero vector axis.')
if not np.isclose(axis_norm, 1):
axis = axis / axis_norm
# build Rodrigues' rotation matrix
K = np.zeros((3, 3))
K[[2, 0, 1], [1, 2, 0]] = axis
K += -K.T
R = np.eye(3) + np.sin(angle) * K + (1 - np.cos(angle)) * K @ K
augmented = np.eye(4)
augmented[:-1, :-1] = R
if point is not None:
# rotation of point p would be R @ (p - point) + point
# which is R @ p + (point - R @ point)
augmented[:-1, -1] = point - R @ point
return augmented
def reflection(normal, point=None):
"""Return a 4x4 matrix for reflection across a normal about a point.
Projection to a unit vector ``n`` can be computed using the dyadic
product (or outer product) ``P`` of ``n`` with itself, which is a
3-by-3 symmetric matrix.
Reflection across a plane that contains the origin amounts to
reversing the components of real space points that are perpendicular
to the reflection plane. This gives us the transformation ``R``
acting on a point ``p`` as
p' = R @ p = p - 2 P @ p = (I - 2 P) @ p
so the reflection's transformation matrix is the unit matrix minus
twice the dyadic product ``P``.
If additionally we want to compute a reflection to a plane that does
not contain the origin, we can we can first shift every point in
real space by ``-p0`` (if ``p0`` is a point that lies on the plane)
p' = R @ (p - p0) + p0 = R @ p + (p0 - R @ p0)
This means that the reflection in general consists of a 3-by-3
reflection matrix ``R``, and a translation given by
``b = p0 - R @ p0``. These can be encoded in a 4-by-4 transformation
matrix by filling the 3-by-3 leading principal submatrix with ``R``,
and filling the top 3 values in the last column with ``b``.
Parameters
----------
normal : 3-length sequence
The normal vector of the reflection plane. It need not be a unit
vector, but it must not be a zero vector.
point : 3-length sequence, optional
The origin of the reflection (a reference point through which
the reflection plane passes). By default the reflection plane
contains the origin.
Examples
--------
Generate a transformation matrix for reflection over the XZ plane.
>>> import numpy as np
>>> from pyvista import transformations
>>> trans = transformations.reflection([0, 1, 0])
Check that the reflection transforms corners of a cube among one
another.
>>> verts = np.array([
... [ 1, -1, 1],
... [-1, -1, 1],
... [-1, -1, -1],
... [-1, -1, 1],
... [ 1, 1, 1],
... [-1, 1, 1],
... [-1, 1, -1],
... [-1, 1, 1],
... ])
>>> mirrored = transformations.apply_transformation_to_points(trans, verts)
>>> np.allclose(mirrored, verts[[np.r_[4:8, 0:4]], :])
True
"""
normal = np.asarray(normal, dtype='float64')
if normal.shape != (3,):
raise ValueError('Normal must be a 3-length array-like.')
if point is not None:
point = np.asarray(point)
if point.shape != (3,):
raise ValueError('Plane reference point must be a 3-length array-like.')
# check and normalize
normal_norm = np.linalg.norm(normal)
if np.isclose(normal_norm, 0):
raise ValueError('Plane normal cannot be zero.')
if not np.isclose(normal_norm, 1):
normal = normal / normal_norm
# build reflection matrix
projection = np.outer(normal, normal)
R = np.eye(3) - 2 * projection
augmented = np.eye(4)
augmented[:-1, :-1] = R
if point is not None:
# reflection of point p would be R @ (p - point) + point
# which is R @ p + (point - R @ point)
augmented[:-1, -1] = point - R @ point
return augmented
def apply_transformation_to_points(transformation, points, inplace=False):
"""Apply a given transformation matrix (3x3 or 4x4) to a set of points.
Parameters
----------
transformation : np.ndarray
Transformation matrix of shape (3, 3) or (4, 4).
points : np.ndarray
Array of points to be transformed of shape (N, 3).
inplace : bool, optional
Updates points in-place while returning nothing.
Returns
-------
numpy.ndarray
Transformed points.
Examples
--------
Scale a set of points in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> scale_factor = 2
>>> tf = scale_factor * np.eye(4)
>>> tf[3, 3,] = 1
>>> pyvista.transformations.apply_transformation_to_points(tf, points, inplace=True)
>>> assert np.all(np.isclose(points, scale_factor * points_orig))
"""
transformation_shape = transformation.shape
if transformation_shape not in ((3, 3), (4, 4)):
raise ValueError('`transformation` must be of shape (3, 3) or (4, 4).')
if points.shape[1] != 3:
raise ValueError('`points` must be of shape (N, 3).')
if transformation_shape[0] == 4:
# Divide by scale factor when homogeneous
transformation /= transformation[3, 3]
# Add the homogeneous coordinate
# `points_2` is a copy of the data, not a view
points_2 = np.empty((len(points), 4))
points_2[:, :-1] = points
points_2[:, -1] = 1
else:
points_2 = points
# Paged matrix multiplication. For arrays with ndim > 2, matmul assumes
# that the matrices to be multiplied lie in the last two dimensions.
points_2 = (transformation[np.newaxis, :, :] @ points_2.T)[0, :3, :].T
# If inplace, set the points
if inplace:
points[:] = points_2
else:
# otherwise return the new points
return points_2
| [
"numpy.outer",
"numpy.asarray",
"numpy.zeros",
"numpy.isclose",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"numpy.eye"
] | [((2953, 2986), 'numpy.asarray', 'np.asarray', (['axis'], {'dtype': '"""float64"""'}), "(axis, dtype='float64')\n", (2963, 2986), True, 'import numpy as np\n'), ((3292, 3312), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (3306, 3312), True, 'import numpy as np\n'), ((3320, 3344), 'numpy.isclose', 'np.isclose', (['axis_norm', '(0)'], {}), '(axis_norm, 0)\n', (3330, 3344), True, 'import numpy as np\n'), ((3530, 3546), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3538, 3546), True, 'import numpy as np\n'), ((3680, 3689), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3686, 3689), True, 'import numpy as np\n'), ((6327, 6362), 'numpy.asarray', 'np.asarray', (['normal'], {'dtype': '"""float64"""'}), "(normal, dtype='float64')\n", (6337, 6362), True, 'import numpy as np\n'), ((6680, 6702), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (6694, 6702), True, 'import numpy as np\n'), ((6710, 6736), 'numpy.isclose', 'np.isclose', (['normal_norm', '(0)'], {}), '(normal_norm, 0)\n', (6720, 6736), True, 'import numpy as np\n'), ((6920, 6944), 'numpy.outer', 'np.outer', (['normal', 'normal'], {}), '(normal, normal)\n', (6928, 6944), True, 'import numpy as np\n'), ((6996, 7005), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7002, 7005), True, 'import numpy as np\n'), ((2931, 2940), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2937, 2940), True, 'import numpy as np\n'), ((3120, 3137), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (3130, 3137), True, 'import numpy as np\n'), ((3424, 3448), 'numpy.isclose', 'np.isclose', (['axis_norm', '(1)'], {}), '(axis_norm, 1)\n', (3434, 3448), True, 'import numpy as np\n'), ((6500, 6517), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (6510, 6517), True, 'import numpy as np\n'), ((6806, 6832), 'numpy.isclose', 'np.isclose', (['normal_norm', '(1)'], {}), '(normal_norm, 1)\n', (6816, 6832), True, 'import numpy as np\n'), ((6953, 6962), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6959, 6962), True, 'import numpy as np\n'), ((3604, 3613), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3610, 3613), True, 'import numpy as np\n'), ((3616, 3629), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3622, 3629), True, 'import numpy as np\n'), ((3641, 3654), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3647, 3654), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import scipy.linalg
import scipy.sparse
import numba
import time
from .tree import Tree
from .misc.mkl_sparse import SpMV_viaMKL
def get_level_information(node_width, theta):
# get information for this level
dd = 0.01
r1 = 0.5*node_width*(np.sqrt(2)+dd)
r2 = 0.5*node_width*(4-np.sqrt(2)-2*dd)
small_surface_x_base = r1*np.cos(theta)
small_surface_y_base = r1*np.sin(theta)
large_surface_x_base = r2*np.cos(theta)
large_surface_y_base = r2*np.sin(theta)
return small_surface_x_base, small_surface_y_base, large_surface_x_base, \
large_surface_y_base, r1, r2
def fake_print(*args, **kwargs):
pass
def get_print_function(verbose):
return print if verbose else fake_print
def on_the_fly_fmm(x, y, tau, Nequiv, Ncutoff, Kernel_Form, numba_functions, verbose=False):
"""
On-the-fly KIFMM
computes sum_{i!=j} G(x_i,x_j) * tau_j
for greens function G specified in the functions:
Kernel_Apply and Kernel_Form
Inputs (all required except Kernel Apply and verbose):
x, float(nsource): x-coordinates of sources
y, float(nsource): y-coordinates of sources
tau, float(nsource): density
Nequiv, int: number of points used in check/equiv surfaces
Ncutoff, int: maximum number of points per leaf node
Kernel_Apply, function: Kernel Apply function
Kernel_Form, function: Kernel Form function
verbose, bool: enable verbose output
Outputs:
potential, float(nsource)
Notes on inputs:
Nequiv determines the precision of the solution
For the Laplace problem, N=64 gives ~machine precision
Ncutoff determines the balance of work between FMM and local evals
The best value for this depends on your machine and how efficient
your Kernel_Apply/Kernel_Form functions are. If your functions are
very efficient, set this high (like 2000) for best efficiency. If
your functions are very slow, set this low but larger than Nequiv.
Kernel_Form:
This is a function that evaluates a density, with inputs:
(sx, sy, tau, tx=None, ty=None)
where sx, sy are source nodes; tx, ty are target nodes
and tau is the density
if tx and ty are not provided, should provide a 'self-eval',
i.e. not computing the diagonal terms
Kernel_Apply:
This is a function that outputs an evaluation matrix, with inputs:
(sx, sy, tx=None, ty=None)
where sx, sy are source nodes; tx, ty are target nodes
if tx and ty are not provided, should provide a 'self-eval'
matrix, i.e. with zero on the diagonal
If this function is not provided, one will be generated using the
kernel_form function, instead.
See examples for more information on how to construct the Kernel_Form
and the Kernel_Apply functions
"""
my_print = get_print_function(verbose)
my_print('\nBeginning FMM')
# build the tree
st = time.time()
tree = Tree(x, y, Ncutoff)
tree_formation_time = (time.time() - st)*1000
my_print('....Tree formed in: {:0.1f}'.format(tree_formation_time))
if tree.levels <= 2:
# just do a direct evaluation in this case
solution = np.zeros(tau.shape[0], dtype=float)
Kernel_Apply(x, y, tau, solution)
else:
solution = _on_the_fly_fmm(tree, tau, Nequiv, Kernel_Form, numba_functions, verbose)
fmm_time = (time.time()-st)*1000
my_print('FMM completed in {:0.1f}'.format(fmm_time))
return solution, tree
def prepare_numba_functions(Kernel_Apply, Kernel_Self_Apply, Kernel_Eval):
@numba.njit("(f8[:],f8[:],b1[:],i8[:],i8[:],f8[:],i8[:,:],f8[:])", parallel=True)
def evaluate_neighbor_interactions(x, y, leaf, botind, topind, tau, colleagues, sol):
n = botind.shape[0]
for i in numba.prange(n):
if leaf[i]:
bind1 = botind[i]
tind1 = topind[i]
for j in range(9):
ci = colleagues[i,j]
if ci >= 0:
bind2 = botind[ci]
tind2 = topind[ci]
if ci == i:
Kernel_Self_Apply(x[bind1:tind1], y[bind1:tind1], tau[bind1:tind1], sol[bind1:tind1])
else:
Kernel_Apply(x[bind2:tind2], y[bind2:tind2], x[bind1:tind1], y[bind1:tind1], 0.0, 0.0, tau[bind2:tind2], sol[bind1:tind1])
@numba.njit("(f8[:],f8[:],b1[:],i8[:],i8[:],i8[:],i8[:,:],i8,i8[:],i8[:],f8[:])", parallel=True)
def build_neighbor_interactions(x, y, leaf, ns, botind, topind, colleagues, n_data, iis, jjs, data):
n = botind.shape[0]
leaf_vals = np.zeros(n, dtype=np.int64)
for i in range(n):
track_val = 0
if leaf[i]:
for j in range(9):
ci = colleagues[i,j]
if ci >= 0:
leaf_vals[i] += ns[i]*ns[ci]
start_vals = np.empty(n, dtype=np.int64)
start_vals[0] = 0
for i in range(1,n):
start_vals[i] = start_vals[i-1] + leaf_vals[i-1]
for i in numba.prange(n):
track_val = 0
if leaf[i]:
bind1 = botind[i]
tind1 = topind[i]
n1 = tind1 - bind1
for j in range(9):
ci = colleagues[i,j]
if ci >= 0:
if ci == i:
for iki, ki in enumerate(range(bind1, tind1)):
for ikj, kj in enumerate(range(bind1, tind1)):
if ki != kj:
data[start_vals[i]+track_val+ikj*n1+iki] = Kernel_Eval(x[kj],y[kj],x[ki],y[ki])
iis[start_vals[i]+track_val+ikj*n1+iki] = ki
jjs[start_vals[i]+track_val+ikj*n1+iki] = kj
track_val += n1*n1
else:
bind2 = botind[ci]
tind2 = topind[ci]
n2 = tind2 - bind2
for iki, ki in enumerate(range(bind1, tind1)):
for ikj, kj in enumerate(range(bind2, tind2)):
data[start_vals[i]+track_val+ikj*n1+iki] = Kernel_Eval(x[kj],y[kj],x[ki],y[ki])
iis[start_vals[i]+track_val+ikj*n1+iki] = ki
jjs[start_vals[i]+track_val+ikj*n1+iki] = kj
track_val += n1*n2
@numba.njit("(f8[:],f8[:],i8[:],i8[:],f8[:],f8[:],f8[:],f8[:],i8[:],i8[:],f8[:],b1[:],i8)", parallel=True)
def build_upwards_pass(x, y, botind, topind, xmid, ymid, xring, yring, iis, jjs, data, doit, track_val):
n = botind.shape[0]
n1 = xring.shape[0]
start_vals = np.empty(n, dtype=np.int64)
start_vals[0] = 0
for i in range(1,n):
adder = n1*(topind[i-1]-botind[i-1]) if doit[i-1] else 0
start_vals[i] = start_vals[i-1] + adder
for i in numba.prange(n):
if doit[i]:
bi = botind[i]
ti = topind[i]
n2 = ti - bi
for ki in range(n1):
for ikj, kj in enumerate(range(bi, ti)):
data[start_vals[i]+ki*n2+ikj] = Kernel_Eval(x[kj],y[kj],xring[ki]+xmid[i],yring[ki]+ymid[i])
iis [start_vals[i]+ki*n2+ikj] = ki + i*n1
jjs [start_vals[i]+ki*n2+ikj] = kj
track_val += n1*n2
return track_val
@numba.njit("(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:,:])",parallel=True)
def numba_upwards_pass(x, y, botind, topind, ns, compute_upwards, xtarg, ytarg, xmid, ymid, tau, ucheck):
n = botind.shape[0]
for i in numba.prange(n):
if compute_upwards[i] and (ns[i] > 0):
bi = botind[i]
ti = topind[i]
Kernel_Apply(x[bi:ti], y[bi:ti], xtarg, ytarg, xmid[i], ymid[i], tau[bi:ti], ucheck[i])
@numba.njit("(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:,:],f8[:])",parallel=True)
def numba_downwards_pass2(x, y, botind, topind, ns, leaf, xsrc, ysrc, xmid, ymid, local_expansions, sol):
n = botind.shape[0]
for i in numba.prange(n):
if leaf[i] and (ns[i] > 0):
bi = botind[i]
ti = topind[i]
Kernel_Apply(xsrc, ysrc, x[bi:ti], y[bi:ti], -xmid[i], -ymid[i], local_expansions[i], sol[bi:ti])
return evaluate_neighbor_interactions, build_neighbor_interactions, build_upwards_pass, numba_upwards_pass, numba_downwards_pass2
def _on_the_fly_fmm(tree, tau, Nequiv, Kernel_Form, numba_functions, verbose):
my_print = get_print_function(verbose)
(evaluate_neighbor_interactions, build_neighbor_interactions, \
build_upwards_pass, numba_upwards_pass, numba_downwards_pass2) \
= numba_functions
# allocate workspace in tree
if not tree.workspace_allocated:
tree.allocate_workspace(Nequiv)
st = time.time()
theta = np.linspace(0, 2*np.pi, Nequiv, endpoint=False)
# need to reorder tau to match tree order
tau_ordered = tau[tree.ordv]
solution_ordered = np.zeros_like(tau)
# get check/equiv surfaces for every level
small_xs = []
small_ys = []
large_xs = []
large_ys = []
small_radii = []
large_radii = []
widths = []
for ind in range(tree.levels):
Level = tree.Levels[ind]
width = Level.width
small_x, small_y, large_x, large_y, small_radius, large_radius = \
get_level_information(width, theta)
small_xs.append(small_x)
small_ys.append(small_y)
large_xs.append(large_x)
large_ys.append(large_y)
small_radii.append(small_radius)
large_radii.append(large_radius)
widths.append(width)
# get C2E (check solution to equivalent density) operator for each level
E2C_LUs = []
for ind in range(tree.levels):
equiv_to_check = Kernel_Form(small_xs[ind], small_ys[ind], \
large_xs[ind], large_ys[ind])
E2C_LUs.append(sp.linalg.lu_factor(equiv_to_check))
# get Collected Equivalent Coordinates for each level
M2MC = []
for ind in range(tree.levels-1):
collected_equiv_xs = np.concatenate([
small_xs[ind+1] - 0.5*widths[ind+1],
small_xs[ind+1] - 0.5*widths[ind+1],
small_xs[ind+1] + 0.5*widths[ind+1],
small_xs[ind+1] + 0.5*widths[ind+1],
])
collected_equiv_ys = np.concatenate([
small_ys[ind+1] - 0.5*widths[ind+1],
small_ys[ind+1] + 0.5*widths[ind+1],
small_ys[ind+1] - 0.5*widths[ind+1],
small_ys[ind+1] + 0.5*widths[ind+1],
])
Kern = Kernel_Form(collected_equiv_xs, collected_equiv_ys, \
large_xs[ind], large_ys[ind])
M2MC.append(Kern)
# get all required M2L translations
M2LS = []
M2LS.append(None)
for ind in range(1, tree.levels):
M2Lhere = np.empty([7,7], dtype=object)
for indx in range(7):
for indy in range(7):
if indx-3 in [-1, 0, 1] and indy-3 in [-1, 0, 1]:
M2Lhere[indx, indy] = None
else:
small_xhere = small_xs[ind] + (indx - 3)*widths[ind]
small_yhere = small_ys[ind] + (indy - 3)*widths[ind]
M2Lhere[indx,indy] = Kernel_Form(small_xhere, \
small_yhere, small_xs[ind], small_ys[ind])
M2LS.append(M2Lhere)
# get all Collected M2L translations
CM2LS = []
CM2LS.append(None)
base_shifts_x = np.empty([3,3], dtype=int)
base_shifts_y = np.empty([3,3], dtype=int)
for kkx in range(3):
for kky in range(3):
base_shifts_x[kkx, kky] = 2*(kkx-1)
base_shifts_y[kkx, kky] = 2*(kky-1)
for ind in range(1, tree.levels):
CM2Lhere = np.empty([3,3], dtype=object)
M2Lhere = M2LS[ind]
for kkx in range(3):
for kky in range(3):
if not (kkx-1 == 0 and kky-1 == 0):
CM2Lh = np.empty([4*Nequiv, 4*Nequiv], dtype=float)
base_shift_x = base_shifts_x[kkx, kky]
base_shift_y = base_shifts_y[kkx, kky]
for ii in range(2):
for jj in range(2):
shiftx = base_shift_x - ii + 3
shifty = base_shift_y - jj + 3
base = 2*ii + jj
for iii in range(2):
for jjj in range(2):
full_shift_x = shiftx + iii
full_shift_y = shifty + jjj
bb = 2*iii + jjj
if full_shift_x-3 in [-1,0,1] and full_shift_y-3 in [-1,0,1]:
CM2Lh[base*Nequiv:(base+1)*Nequiv,bb*Nequiv:(bb+1)*Nequiv] = 0.0
else:
CM2Lh[base*Nequiv:(base+1)*Nequiv,bb*Nequiv:(bb+1)*Nequiv] = \
M2Lhere[full_shift_x, full_shift_y]
CM2Lhere[kkx, kky] = CM2Lh
CM2LS.append(CM2Lhere)
et = time.time()
my_print('....Time for prep work: {:0.2f}'.format(1000*(et-st)))
# upwards pass - start at bottom leaf nodes and build multipoles up
st = time.time()
for ind in reversed(range(tree.levels)[1:]):
Level = tree.Levels[ind]
u_check_surfaces = Level.Check_Us
# check if there is a level below us, if there is, lift all its expansions
if ind != tree.levels-1:
ancestor_level = tree.Levels[ind+1]
if ind != tree.levels-1:
ancestor_level = tree.Levels[ind+1]
temp1 = M2MC[ind].dot(ancestor_level.RSEQD.T).T
numba_distribute(u_check_surfaces, temp1, ancestor_level.short_parent_ind, int(ancestor_level.n_node/4))
numba_upwards_pass(tree.x, tree.y, Level.bot_ind, Level.top_ind, Level.ns, Level.compute_upwards, large_xs[ind], large_ys[ind], Level.xmid, Level.ymid, tau_ordered, u_check_surfaces)
Level.Equiv_Densities[:] = sp.linalg.lu_solve(E2C_LUs[ind], u_check_surfaces.T).T
et = time.time()
my_print('....Time for upwards pass: {:0.2f}'.format(1000*(et-st)))
# downwards pass 1 - start at top and work down to build up local expansions
st = time.time()
for ind in range(1, tree.levels-1):
# first move local expansions downward
Level = tree.Levels[ind]
descendant_level = tree.Levels[ind+1]
doit = np.logical_and(np.logical_or(Level.not_leaf, Level.Xlist), np.logical_not(Level.fake_leaf))
local_expansions = sp.linalg.lu_solve(E2C_LUs[ind], Level.Local_Solutions[doit].T).T
local_solutions = M2MC[ind].T.dot(local_expansions.T).T
sorter = np.argsort(Level.children_ind[doit])
local_solutions = local_solutions[sorter]
# now we have not leaves in the descendant_level.Local_Solutions...
descendant_level.Local_Solutions[:] = local_solutions.reshape(descendant_level.Local_Solutions.shape)
# compute all possible interactions
M2Ms = np.empty([3,3,doit.sum(),4*Nequiv], dtype=float)
CM2Lh = CM2LS[ind+1]
for kkx in range(3):
for kky in range(3):
if not (kkx-1 == 0 and kky-1 == 0):
M2Ms[kkx, kky, :, :] = CM2Lh[kkx, kky].dot(descendant_level.RSEQD.T).T
ci4 = (Level.children_ind/4).astype(int)
numba_add_interactions(doit, ci4, Level.colleagues, Level.xmid, Level.ymid, descendant_level.Local_Solutions, M2Ms, Nequiv)
et = time.time()
my_print('....Time for downwards pass 1: {:0.2f}'.format(1000*(et-st)))
# downwards pass 2 - start at top and evaluate local expansions
st = time.time()
for ind in range(1,tree.levels):
Level = tree.Levels[ind]
local_expansions = sp.linalg.lu_solve(E2C_LUs[ind], Level.Local_Solutions.T).T
numba_downwards_pass2(tree.x, tree.y, Level.bot_ind, Level.top_ind, Level.ns, Level.leaf, large_xs[ind], large_ys[ind], Level.xmid, Level.ymid, local_expansions, solution_ordered)
et = time.time()
my_print('....Time for downwards pass 2: {:0.2f}'.format(1000*(et-st)))
solution_save = solution_ordered.copy()
# downwards pass 3 - start at top and evaluate neighbor interactions
st = time.time()
for ind in range(1,tree.levels):
Level = tree.Levels[ind]
evaluate_neighbor_interactions(tree.x, tree.y, Level.leaf, Level.bot_ind, Level.top_ind, tau_ordered, Level.colleagues, solution_ordered)
et = time.time()
my_print('....Time for downwards pass 3: {:0.2f}'.format(1000*(et-st)))
# deorder the solution
desorter = np.argsort(tree.ordv)
return solution_ordered[desorter]
@numba.njit("(b1[:],i8[:],i8[:,:],f8[:],f8[:],f8[:,:],f8[:,:,:,:],i8)",parallel=True)
def numba_add_interactions(doit, ci4, colleagues, xmid, ymid, Local_Solutions, M2Ms, Nequiv):
n = doit.shape[0]
for i in numba.prange(n):
if doit[i]:
dii = ci4[i]
for j in range(9):
ci = colleagues[i,j]
if ci >= 0 and ci != i:
xdist = int(np.sign(xmid[ci]-xmid[i]))
ydist = int(np.sign(ymid[ci]-ymid[i]))
# if abs(xmid[i] - xmid[ci]) < 1e-14:
# xdist = 0
# elif xmid[i] - xmid[ci] < 0:
# xdist = 1
# else:
# xdist = -1
# if abs(ymid[i] - ymid[ci]) < 1e-14:
# ydist = 0
# elif ymid[i] - ymid[ci] < 0:
# ydist = 1
# else:
# ydist = -1
di = ci4[ci]
# for k in range(4):
# Local_Solutions[4*dii+k] += \
# M2Ms[xdist+1,ydist+1,di,k*Nequiv:(k+1)*Nequiv]
for k in range(4):
for ll in range(Local_Solutions.shape[1]):
Local_Solutions[4*dii+k, ll] += \
M2Ms[xdist+1,ydist+1,di,k*Nequiv+ll]
class FMM_Plan(object):
def __init__(self, tree, theta, large_xs, large_ys, E2C_LUs, M2MC, M2LS, CM2LS, neighbor_mats, upwards_mats, downwards_mats, numba_functions, verbose):
self.tree = tree
self.theta = theta
self.large_xs = large_xs
self.large_ys = large_ys
self.E2C_LUs = E2C_LUs
self.M2MC = M2MC
self.M2LS = M2LS
self.CM2LS = CM2LS
self.neighbor_mats = neighbor_mats
self.upwards_mats = upwards_mats
self.downwards_mats = downwards_mats
self.numba_functions = numba_functions
self.verbose = verbose
def extract(self):
return self.tree, self.theta, self.large_xs, self.large_ys, self.E2C_LUs, self.M2MC, self.M2LS, self.CM2LS, self.neighbor_mats, self.upwards_mats, self.downwards_mats, self.numba_functions, self.verbose
def fmm_planner(x, y, Nequiv, Ncutoff, Kernel_Form, numba_functions, verbose=False):
my_print = get_print_function(verbose)
my_print('\nPlanning FMM')
(evaluate_neighbor_interactions, build_neighbor_interactions, \
build_upwards_pass, numba_upwards_pass, numba_downwards_pass2) \
= numba_functions
# building a tree
st = time.time()
tree = Tree(x, y, Ncutoff)
tree_formation_time = (time.time() - st)*1000
my_print('....Tree formed in: {:0.1f}'.format(tree_formation_time))
# allocate workspace in tree
if not tree.workspace_allocated:
tree.allocate_workspace(Nequiv)
st = time.time()
theta = np.linspace(0, 2*np.pi, Nequiv, endpoint=False)
# get check/equiv surfaces for every level
small_xs = []
small_ys = []
large_xs = []
large_ys = []
small_radii = []
large_radii = []
widths = []
for ind in range(tree.levels):
Level = tree.Levels[ind]
width = Level.width
small_x, small_y, large_x, large_y, small_radius, large_radius = \
get_level_information(width, theta)
small_xs.append(small_x)
small_ys.append(small_y)
large_xs.append(large_x)
large_ys.append(large_y)
small_radii.append(small_radius)
large_radii.append(large_radius)
widths.append(width)
# get C2E (check solution to equivalent density) operator for each level
E2C_LUs = []
for ind in range(tree.levels):
equiv_to_check = Kernel_Form(small_xs[ind], small_ys[ind], \
large_xs[ind], large_ys[ind])
E2C_LUs.append(sp.linalg.lu_factor(equiv_to_check))
# get Collected Equivalent Coordinates for each level
M2MC = []
for ind in range(tree.levels-1):
collected_equiv_xs = np.concatenate([
small_xs[ind+1] - 0.5*widths[ind+1],
small_xs[ind+1] - 0.5*widths[ind+1],
small_xs[ind+1] + 0.5*widths[ind+1],
small_xs[ind+1] + 0.5*widths[ind+1],
])
collected_equiv_ys = np.concatenate([
small_ys[ind+1] - 0.5*widths[ind+1],
small_ys[ind+1] + 0.5*widths[ind+1],
small_ys[ind+1] - 0.5*widths[ind+1],
small_ys[ind+1] + 0.5*widths[ind+1],
])
Kern = Kernel_Form(collected_equiv_xs, collected_equiv_ys, \
large_xs[ind], large_ys[ind])
M2MC.append(Kern)
# get all required M2L translations
M2LS = []
M2LS.append(None)
for ind in range(1, tree.levels):
M2Lhere = np.empty([7,7], dtype=object)
for indx in range(7):
for indy in range(7):
if indx-3 in [-1, 0, 1] and indy-3 in [-1, 0, 1]:
M2Lhere[indx, indy] = None
else:
small_xhere = small_xs[ind] + (indx - 3)*widths[ind]
small_yhere = small_ys[ind] + (indy - 3)*widths[ind]
M2Lhere[indx,indy] = Kernel_Form(small_xhere, \
small_yhere, small_xs[ind], small_ys[ind])
M2LS.append(M2Lhere)
# get all Collected M2L translations
CM2LS = []
CM2LS.append(None)
base_shifts_x = np.empty([3,3], dtype=int)
base_shifts_y = np.empty([3,3], dtype=int)
for kkx in range(3):
for kky in range(3):
base_shifts_x[kkx, kky] = 2*(kkx-1)
base_shifts_y[kkx, kky] = 2*(kky-1)
for ind in range(1, tree.levels):
CM2Lhere = np.empty([3,3], dtype=object)
M2Lhere = M2LS[ind]
for kkx in range(3):
for kky in range(3):
if not (kkx-1 == 0 and kky-1 == 0):
CM2Lh = np.empty([4*Nequiv, 4*Nequiv], dtype=float)
base_shift_x = base_shifts_x[kkx, kky]
base_shift_y = base_shifts_y[kkx, kky]
for ii in range(2):
for jj in range(2):
shiftx = base_shift_x - ii + 3
shifty = base_shift_y - jj + 3
base = 2*ii + jj
for iii in range(2):
for jjj in range(2):
full_shift_x = shiftx + iii
full_shift_y = shifty + jjj
bb = 2*iii + jjj
if full_shift_x-3 in [-1,0,1] and full_shift_y-3 in [-1,0,1]:
CM2Lh[base*Nequiv:(base+1)*Nequiv,bb*Nequiv:(bb+1)*Nequiv] = 0.0
else:
CM2Lh[base*Nequiv:(base+1)*Nequiv,bb*Nequiv:(bb+1)*Nequiv] = \
M2Lhere[full_shift_x, full_shift_y]
CM2Lhere[kkx, kky] = CM2Lh.T
CM2LS.append(CM2Lhere)
et = time.time()
my_print('....Time for basic work: {:0.2f}'.format(1000*(et-st)))
# generate sparse matrix for neighbor interactions for each level
st = time.time()
neighbor_mats = []
memory = np.empty([4*Ncutoff,4*Ncutoff], dtype=float)
base_ranges = np.arange(4*Ncutoff)
for Level in tree.Levels:
n_data = numba_get_neighbor_length(Level.leaf, Level.ns, Level.colleagues)
iis = np.zeros(n_data, dtype=int)
jjs = np.zeros(n_data, dtype=int)
data = np.zeros(n_data, dtype=float)
build_neighbor_interactions(tree.x, tree.y, Level.leaf, Level.ns,
Level.bot_ind, Level.top_ind, Level.colleagues, n_data, iis, jjs, data)
# track_val = 0
# for i in range(Level.n_node):
# if Level.leaf[i] and Level.ns[i]>0:
# bind1 = Level.bot_ind[i]
# tind1 = Level.top_ind[i]
# for j in range(9):
# ci = Level.colleagues[i,j]
# if ci >= 0:
# bind2 = Level.bot_ind[ci]
# tind2 = Level.top_ind[ci]
# if ci == i:
# mat = memory[:Level.ns[i],:Level.ns[i]]
# mat = Kernel_Form(tree.x[bind1:tind1], tree.y[bind1:tind1], out=mat)
# else:
# mat = memory[:Level.ns[i],:Level.ns[ci]]
# mat = Kernel_Form(tree.x[bind2:tind2], tree.y[bind2:tind2], tree.x[bind1:tind1], tree.y[bind1:tind1], out=mat)
# indj = bind2 + base_ranges[:Level.ns[ci]]
# indi = bind1 + base_ranges[:Level.ns[i]]
# nn = Level.ns[i]*Level.ns[ci]
# iis[track_val:track_val+nn] = np.repeat(indi, indj.shape[0])
# jjs[track_val:track_val+nn] = np.tile(indj, indi.shape[0])
# data[track_val:track_val+nn] = mat.ravel()
# track_val += nn
level_matrix = sp.sparse.coo_matrix((data,(iis,jjs)),shape=[tree.x.shape[0],tree.x.shape[0]])
neighbor_mats.append(level_matrix.tocsr())
neighbor_mat = neighbor_mats[0]
for ind in range(1,tree.levels):
neighbor_mat += neighbor_mats[ind]
et = time.time()
my_print('....Time to make neighbor mats {:0.2f}'.format(1000*(et-st)))
# generate sparse matrix for upwards pass for each level
st = time.time()
upwards_mats = []
for ind, Level in enumerate(tree.Levels):
iis = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=int)
jjs = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=int)
data = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=float)
track_val = 0
doit = np.logical_and(Level.compute_upwards, Level.ns>0)
track_val = build_upwards_pass(tree.x, tree.y, Level.bot_ind, Level.top_ind, \
Level.xmid, Level.ymid, large_xs[ind], large_ys[ind], iis, jjs, \
data, doit, track_val)
# iis = np.empty(10*Ncutoff**2, dtype=int)
# jjs = np.empty(10*Ncutoff**2, dtype=int)
# data = np.empty(10*Ncutoff**2, dtype=float)
# track_val = 0
# for i in range(Level.n_node):
# if Level.compute_upwards[i] and Level.ns[i]>0:
# bi = Level.bot_ind[i]
# ti = Level.top_ind[i]
# mat = Kernel_Form(tree.x[bi:ti], tree.y[bi:ti], large_xs[ind]+Level.xmid[i], large_ys[ind]+Level.ymid[i])
# jj, ii = np.meshgrid(np.arange(bi, ti), np.arange(Nequiv)+i*Nequiv)
# iis = set_matval(iis, ii.ravel(), track_val)
# jjs = set_matval(jjs, jj.ravel(), track_val)
# data = set_matval(data, mat.ravel(), track_val)
# track_val += ii.ravel().shape[0]
iis = iis[:track_val]
jjs = jjs[:track_val]
data = data[:track_val]
level_matrix = sp.sparse.coo_matrix((data,(iis,jjs)),shape=[Nequiv*Level.n_node,tree.x.shape[0]])
upwards_mats.append(level_matrix.tocsr())
et = time.time()
my_print('....Time to make upwards mats {:0.2f}'.format(1000*(et-st)))
# generate sparse matrix for downwards pass for each level
st = time.time()
downwards_mats = []
for ind, Level in enumerate(tree.Levels):
iis = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=int)
jjs = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=int)
data = np.empty(Level.n_node*Ncutoff*Nequiv, dtype=float)
track_val = 0
doit = np.logical_and(Level.leaf, Level.ns>0)
track_val = build_upwards_pass(tree.x, tree.y, Level.bot_ind, Level.top_ind, \
Level.xmid, Level.ymid, large_xs[ind], large_ys[ind], iis, jjs, \
data, doit, track_val)
# iis = np.empty(10*Ncutoff**2, dtype=int)
# jjs = np.empty(10*Ncutoff**2, dtype=int)
# data = np.empty(10*Ncutoff**2, dtype=float)
# track_val = 0
# for i in range(Level.n_node):
# if Level.leaf[i] and Level.ns[i]>0:
# bi = Level.bot_ind[i]
# ti = Level.top_ind[i]
# mat = Kernel_Form(tree.x[bi:ti], tree.y[bi:ti], large_xs[ind]+Level.xmid[i], large_ys[ind]+Level.ymid[i])
# jj, ii = np.meshgrid(np.arange(bi, ti), np.arange(Nequiv)+i*Nequiv)
# iis = set_matval(iis, ii.ravel(), track_val)
# jjs = set_matval(jjs, jj.ravel(), track_val)
# data = set_matval(data, mat.ravel(), track_val)
# track_val += ii.ravel().shape[0]
iis = iis[:track_val]
jjs = jjs[:track_val]
data = data[:track_val]
level_matrix = sp.sparse.coo_matrix((data,(iis,jjs)),shape=[Nequiv*Level.n_node,tree.x.shape[0]])
downwards_mats.append(level_matrix.T.tocsr())
# downwards_mats.append(upwards_mats[ind].T.tocsr())
et = time.time()
my_print('....Time to make downwards mats {:0.2f}'.format(1000*(et-st)))
fmm_plan = FMM_Plan(tree, theta, large_xs, large_ys, E2C_LUs, M2MC, M2LS, CM2LS, neighbor_mat, upwards_mats, downwards_mats, numba_functions, verbose)
return fmm_plan
@numba.njit("i8(b1[:],i8[:],i8[:,:])",parallel=False)
def numba_get_neighbor_length(leaf, ns, colleagues):
n = 0
for i in range(leaf.shape[0]):
if leaf[i] and (ns[i] > 0):
for j in range(9):
ci = colleagues[i,j]
if ci >= 0:
n += ns[ci]*ns[i]
return n
def set_matval(xx, xn, ti):
nn = xn.shape[0]
try:
xx[ti:ti+nn] = xn
except:
xxn = np.empty(xx.shape[0]*2, dtype=xx.dtype)
xxn[:ti] = xx[:ti]
xxn[ti:ti+nn] = xn
xx = xxn
return xx
@numba.njit("(f8[:,:],f8[:,:],i8[:],i8)",parallel=True)
def numba_distribute(ucs, temp, pi, n):
for i in numba.prange(n):
ucs[pi[i]] = temp[i]
def planned_fmm(fmm_plan, tau):
tree, theta, large_xs, large_ys, E2C_LUs, M2MC, M2LS, CM2LS, neighbor_mats, upwards_mats, downwards_mats, numba_functions, verbose \
= fmm_plan.extract()
Nequiv = theta.shape[0]
my_print = get_print_function(verbose)
(evaluate_neighbor_interactions, build_neighbor_interactions, \
build_upwards_pass, numba_upwards_pass, numba_downwards_pass2) \
= numba_functions
my_print('Executing FMM')
tau_ordered = tau[tree.ordv]
solution_ordered = np.zeros_like(tau)
# upwards pass - start at bottom leaf nodes and build multipoles up
st = time.time()
mat_time = 0
lu_time = 0
for ind in reversed(range(tree.levels)[1:]):
Level = tree.Levels[ind]
stt = time.time()
u_check_surfaces = SpMV_viaMKL(upwards_mats[ind], tau_ordered).reshape([Level.n_node, Nequiv])
mat_time += time.time() - stt
if ind != tree.levels-1:
ancestor_level = tree.Levels[ind+1]
temp1 = M2MC[ind].dot(ancestor_level.RSEQD.T).T
numba_distribute(u_check_surfaces, temp1, ancestor_level.short_parent_ind, int(ancestor_level.n_node/4))
stt = time.time()
Level.Equiv_Densities[:] = sp.linalg.lu_solve(E2C_LUs[ind], u_check_surfaces.T).T
lu_time += time.time() - stt
et = time.time()
my_print('....Time for upwards pass: {:0.2f}'.format(1000*(et-st)))
# my_print('....Time for matvecs: {:0.2f}'.format(1000*mat_time))
# my_print('....Time for lus: {:0.2f}'.format(1000*lu_time))
# downwards pass 1 - start at top and work down to build up local expansions
st = time.time()
for ind in range(1, tree.levels-1):
# first move local expansions downward
Level = tree.Levels[ind]
descendant_level = tree.Levels[ind+1]
doit = np.logical_and(np.logical_or(Level.not_leaf, Level.Xlist), np.logical_not(Level.fake_leaf))
local_expansions = sp.linalg.lu_solve(E2C_LUs[ind], Level.Local_Solutions[doit].T).T
local_solutions = M2MC[ind].T.dot(local_expansions.T).T
sorter = np.argsort(Level.children_ind[doit])
local_solutions = local_solutions[sorter]
# now we have not leaves in the descendant_level.Local_Solutions...
descendant_level.Local_Solutions[:] = local_solutions.reshape(descendant_level.Local_Solutions.shape)
# compute all possible interactions
# do we actually need to do all these? probably not...
M2Ms = np.empty([3,3,doit.sum(),4*Nequiv], dtype=float)
CM2Lh = CM2LS[ind+1]
for kkx in range(3):
for kky in range(3):
if not (kkx-1 == 0 and kky-1 == 0):
np.dot(descendant_level.RSEQD, CM2Lh[kkx, kky], out=M2Ms[kkx, kky])
ci4 = (Level.children_ind/4).astype(int)
numba_add_interactions(doit, ci4, Level.colleagues, Level.xmid, Level.ymid, descendant_level.Local_Solutions, M2Ms, Nequiv)
et = time.time()
my_print('....Time for downwards pass 1: {:0.2f}'.format(1000*(et-st)))
# downwards pass 2 - start at top and evaluate local expansions
st = time.time()
for ind in range(1,tree.levels):
Level = tree.Levels[ind]
local_expansions = sp.linalg.lu_solve(E2C_LUs[ind], Level.Local_Solutions.T).T
solution_ordered += SpMV_viaMKL(downwards_mats[ind], local_expansions.ravel())
et = time.time()
my_print('....Time for downwards pass 2: {:0.2f}'.format(1000*(et-st)))
solution_save = solution_ordered.copy()
# downwards pass 3 - start at top and evaluate neighbor interactions
st = time.time()
solution_ordered += SpMV_viaMKL(neighbor_mats, tau_ordered)
et = time.time()
my_print('....Time for downwards pass 3: {:0.2f}'.format(1000*(et-st)))
# deorder the solution
desorter = np.argsort(tree.ordv)
return solution_ordered[desorter]
| [
"numpy.empty",
"numba.njit",
"numpy.argsort",
"numpy.sin",
"numpy.arange",
"numba.prange",
"numpy.zeros_like",
"numpy.logical_not",
"scipy.sparse.coo_matrix",
"numpy.linspace",
"numpy.cos",
"numpy.dot",
"numpy.concatenate",
"numpy.logical_and",
"scipy.linalg.lu_solve",
"numpy.zeros",
... | [((17713, 17802), 'numba.njit', 'numba.njit', (['"""(b1[:],i8[:],i8[:,:],f8[:],f8[:],f8[:,:],f8[:,:,:,:],i8)"""'], {'parallel': '(True)'}), "('(b1[:],i8[:],i8[:,:],f8[:],f8[:],f8[:,:],f8[:,:,:,:],i8)',\n parallel=True)\n", (17723, 17802), False, 'import numba\n'), ((31306, 31359), 'numba.njit', 'numba.njit', (['"""i8(b1[:],i8[:],i8[:,:])"""'], {'parallel': '(False)'}), "('i8(b1[:],i8[:],i8[:,:])', parallel=False)\n", (31316, 31359), False, 'import numba\n'), ((31878, 31933), 'numba.njit', 'numba.njit', (['"""(f8[:,:],f8[:,:],i8[:],i8)"""'], {'parallel': '(True)'}), "('(f8[:,:],f8[:,:],i8[:],i8)', parallel=True)\n", (31888, 31933), False, 'import numba\n'), ((3245, 3256), 'time.time', 'time.time', ([], {}), '()\n', (3254, 3256), False, 'import time\n'), ((3914, 3999), 'numba.njit', 'numba.njit', (['"""(f8[:],f8[:],b1[:],i8[:],i8[:],f8[:],i8[:,:],f8[:])"""'], {'parallel': '(True)'}), "('(f8[:],f8[:],b1[:],i8[:],i8[:],f8[:],i8[:,:],f8[:])', parallel=True\n )\n", (3924, 3999), False, 'import numba\n'), ((4770, 4870), 'numba.njit', 'numba.njit', (['"""(f8[:],f8[:],b1[:],i8[:],i8[:],i8[:],i8[:,:],i8,i8[:],i8[:],f8[:])"""'], {'parallel': '(True)'}), "('(f8[:],f8[:],b1[:],i8[:],i8[:],i8[:],i8[:,:],i8,i8[:],i8[:],f8[:])'\n , parallel=True)\n", (4780, 4870), False, 'import numba\n'), ((6977, 7092), 'numba.njit', 'numba.njit', (['"""(f8[:],f8[:],i8[:],i8[:],f8[:],f8[:],f8[:],f8[:],i8[:],i8[:],f8[:],b1[:],i8)"""'], {'parallel': '(True)'}), "(\n '(f8[:],f8[:],i8[:],i8[:],f8[:],f8[:],f8[:],f8[:],i8[:],i8[:],f8[:],b1[:],i8)'\n , parallel=True)\n", (6987, 7092), False, 'import numba\n'), ((8028, 8142), 'numba.njit', 'numba.njit', (['"""(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:,:])"""'], {'parallel': '(True)'}), "(\n '(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:,:])'\n , parallel=True)\n", (8038, 8142), False, 'import numba\n'), ((8527, 8641), 'numba.njit', 'numba.njit', (['"""(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:,:],f8[:])"""'], {'parallel': '(True)'}), "(\n '(f8[:],f8[:],i8[:],i8[:],i8[:],b1[:],f8[:],f8[:],f8[:],f8[:],f8[:,:],f8[:])'\n , parallel=True)\n", (8537, 8641), False, 'import numba\n'), ((9570, 9581), 'time.time', 'time.time', ([], {}), '()\n', (9579, 9581), False, 'import time\n'), ((9594, 9643), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'Nequiv'], {'endpoint': '(False)'}), '(0, 2 * np.pi, Nequiv, endpoint=False)\n', (9605, 9643), True, 'import numpy as np\n'), ((9744, 9762), 'numpy.zeros_like', 'np.zeros_like', (['tau'], {}), '(tau)\n', (9757, 9762), True, 'import numpy as np\n'), ((12384, 12411), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'int'}), '([3, 3], dtype=int)\n', (12392, 12411), True, 'import numpy as np\n'), ((12431, 12458), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'int'}), '([3, 3], dtype=int)\n', (12439, 12458), True, 'import numpy as np\n'), ((14080, 14091), 'time.time', 'time.time', ([], {}), '()\n', (14089, 14091), False, 'import time\n'), ((14249, 14260), 'time.time', 'time.time', ([], {}), '()\n', (14258, 14260), False, 'import time\n'), ((15097, 15108), 'time.time', 'time.time', ([], {}), '()\n', (15106, 15108), False, 'import time\n'), ((15275, 15286), 'time.time', 'time.time', ([], {}), '()\n', (15284, 15286), False, 'import time\n'), ((16539, 16550), 'time.time', 'time.time', ([], {}), '()\n', (16548, 16550), False, 'import time\n'), ((16704, 16715), 'time.time', 'time.time', ([], {}), '()\n', (16713, 16715), False, 'import time\n'), ((17070, 17081), 'time.time', 'time.time', ([], {}), '()\n', (17079, 17081), False, 'import time\n'), ((17284, 17295), 'time.time', 'time.time', ([], {}), '()\n', (17293, 17295), False, 'import time\n'), ((17521, 17532), 'time.time', 'time.time', ([], {}), '()\n', (17530, 17532), False, 'import time\n'), ((17651, 17672), 'numpy.argsort', 'np.argsort', (['tree.ordv'], {}), '(tree.ordv)\n', (17661, 17672), True, 'import numpy as np\n'), ((17927, 17942), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (17939, 17942), False, 'import numba\n'), ((20364, 20375), 'time.time', 'time.time', ([], {}), '()\n', (20373, 20375), False, 'import time\n'), ((20663, 20674), 'time.time', 'time.time', ([], {}), '()\n', (20672, 20674), False, 'import time\n'), ((20687, 20736), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'Nequiv'], {'endpoint': '(False)'}), '(0, 2 * np.pi, Nequiv, endpoint=False)\n', (20698, 20736), True, 'import numpy as np\n'), ((23356, 23383), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'int'}), '([3, 3], dtype=int)\n', (23364, 23383), True, 'import numpy as np\n'), ((23403, 23430), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'int'}), '([3, 3], dtype=int)\n', (23411, 23430), True, 'import numpy as np\n'), ((25054, 25065), 'time.time', 'time.time', ([], {}), '()\n', (25063, 25065), False, 'import time\n'), ((25224, 25235), 'time.time', 'time.time', ([], {}), '()\n', (25233, 25235), False, 'import time\n'), ((25272, 25321), 'numpy.empty', 'np.empty', (['[4 * Ncutoff, 4 * Ncutoff]'], {'dtype': 'float'}), '([4 * Ncutoff, 4 * Ncutoff], dtype=float)\n', (25280, 25321), True, 'import numpy as np\n'), ((25335, 25357), 'numpy.arange', 'np.arange', (['(4 * Ncutoff)'], {}), '(4 * Ncutoff)\n', (25344, 25357), True, 'import numpy as np\n'), ((27383, 27394), 'time.time', 'time.time', ([], {}), '()\n', (27392, 27394), False, 'import time\n'), ((27544, 27555), 'time.time', 'time.time', ([], {}), '()\n', (27553, 27555), False, 'import time\n'), ((29192, 29203), 'time.time', 'time.time', ([], {}), '()\n', (29201, 29203), False, 'import time\n'), ((29355, 29366), 'time.time', 'time.time', ([], {}), '()\n', (29364, 29366), False, 'import time\n'), ((31038, 31049), 'time.time', 'time.time', ([], {}), '()\n', (31047, 31049), False, 'import time\n'), ((31986, 32001), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (31998, 32001), False, 'import numba\n'), ((32563, 32581), 'numpy.zeros_like', 'np.zeros_like', (['tau'], {}), '(tau)\n', (32576, 32581), True, 'import numpy as np\n'), ((32663, 32674), 'time.time', 'time.time', ([], {}), '()\n', (32672, 32674), False, 'import time\n'), ((33377, 33388), 'time.time', 'time.time', ([], {}), '()\n', (33386, 33388), False, 'import time\n'), ((33718, 33729), 'time.time', 'time.time', ([], {}), '()\n', (33727, 33729), False, 'import time\n'), ((35043, 35054), 'time.time', 'time.time', ([], {}), '()\n', (35052, 35054), False, 'import time\n'), ((35210, 35221), 'time.time', 'time.time', ([], {}), '()\n', (35219, 35221), False, 'import time\n'), ((35475, 35486), 'time.time', 'time.time', ([], {}), '()\n', (35484, 35486), False, 'import time\n'), ((35691, 35702), 'time.time', 'time.time', ([], {}), '()\n', (35700, 35702), False, 'import time\n'), ((35776, 35787), 'time.time', 'time.time', ([], {}), '()\n', (35785, 35787), False, 'import time\n'), ((35908, 35929), 'numpy.argsort', 'np.argsort', (['tree.ordv'], {}), '(tree.ordv)\n', (35918, 35929), True, 'import numpy as np\n'), ((379, 392), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (385, 392), True, 'import numpy as np\n'), ((423, 436), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (429, 436), True, 'import numpy as np\n'), ((467, 480), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (473, 480), True, 'import numpy as np\n'), ((511, 524), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (517, 524), True, 'import numpy as np\n'), ((3517, 3552), 'numpy.zeros', 'np.zeros', (['tau.shape[0]'], {'dtype': 'float'}), '(tau.shape[0], dtype=float)\n', (3525, 3552), True, 'import numpy as np\n'), ((4130, 4145), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (4142, 4145), False, 'import numba\n'), ((5019, 5046), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (5027, 5046), True, 'import numpy as np\n'), ((5306, 5333), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (5314, 5333), True, 'import numpy as np\n'), ((5467, 5482), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (5479, 5482), False, 'import numba\n'), ((7269, 7296), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (7277, 7296), True, 'import numpy as np\n'), ((7490, 7505), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (7502, 7505), False, 'import numba\n'), ((8287, 8302), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (8299, 8302), False, 'import numba\n'), ((8786, 8801), 'numba.prange', 'numba.prange', (['n'], {}), '(n)\n', (8798, 8801), False, 'import numba\n'), ((10908, 11106), 'numpy.concatenate', 'np.concatenate', (['[small_xs[ind + 1] - 0.5 * widths[ind + 1], small_xs[ind + 1] - 0.5 *\n widths[ind + 1], small_xs[ind + 1] + 0.5 * widths[ind + 1], small_xs[\n ind + 1] + 0.5 * widths[ind + 1]]'], {}), '([small_xs[ind + 1] - 0.5 * widths[ind + 1], small_xs[ind + 1\n ] - 0.5 * widths[ind + 1], small_xs[ind + 1] + 0.5 * widths[ind + 1], \n small_xs[ind + 1] + 0.5 * widths[ind + 1]])\n', (10922, 11106), True, 'import numpy as np\n'), ((11181, 11379), 'numpy.concatenate', 'np.concatenate', (['[small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[ind + 1] + 0.5 *\n widths[ind + 1], small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[\n ind + 1] + 0.5 * widths[ind + 1]]'], {}), '([small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[ind + 1\n ] + 0.5 * widths[ind + 1], small_ys[ind + 1] - 0.5 * widths[ind + 1], \n small_ys[ind + 1] + 0.5 * widths[ind + 1]])\n', (11195, 11379), True, 'import numpy as np\n'), ((11726, 11756), 'numpy.empty', 'np.empty', (['[7, 7]'], {'dtype': 'object'}), '([7, 7], dtype=object)\n', (11734, 11756), True, 'import numpy as np\n'), ((12665, 12695), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'object'}), '([3, 3], dtype=object)\n', (12673, 12695), True, 'import numpy as np\n'), ((15734, 15770), 'numpy.argsort', 'np.argsort', (['Level.children_ind[doit]'], {}), '(Level.children_ind[doit])\n', (15744, 15770), True, 'import numpy as np\n'), ((21880, 22078), 'numpy.concatenate', 'np.concatenate', (['[small_xs[ind + 1] - 0.5 * widths[ind + 1], small_xs[ind + 1] - 0.5 *\n widths[ind + 1], small_xs[ind + 1] + 0.5 * widths[ind + 1], small_xs[\n ind + 1] + 0.5 * widths[ind + 1]]'], {}), '([small_xs[ind + 1] - 0.5 * widths[ind + 1], small_xs[ind + 1\n ] - 0.5 * widths[ind + 1], small_xs[ind + 1] + 0.5 * widths[ind + 1], \n small_xs[ind + 1] + 0.5 * widths[ind + 1]])\n', (21894, 22078), True, 'import numpy as np\n'), ((22153, 22351), 'numpy.concatenate', 'np.concatenate', (['[small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[ind + 1] + 0.5 *\n widths[ind + 1], small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[\n ind + 1] + 0.5 * widths[ind + 1]]'], {}), '([small_ys[ind + 1] - 0.5 * widths[ind + 1], small_ys[ind + 1\n ] + 0.5 * widths[ind + 1], small_ys[ind + 1] - 0.5 * widths[ind + 1], \n small_ys[ind + 1] + 0.5 * widths[ind + 1]])\n', (22167, 22351), True, 'import numpy as np\n'), ((22698, 22728), 'numpy.empty', 'np.empty', (['[7, 7]'], {'dtype': 'object'}), '([7, 7], dtype=object)\n', (22706, 22728), True, 'import numpy as np\n'), ((23637, 23667), 'numpy.empty', 'np.empty', (['[3, 3]'], {'dtype': 'object'}), '([3, 3], dtype=object)\n', (23645, 23667), True, 'import numpy as np\n'), ((25483, 25510), 'numpy.zeros', 'np.zeros', (['n_data'], {'dtype': 'int'}), '(n_data, dtype=int)\n', (25491, 25510), True, 'import numpy as np\n'), ((25525, 25552), 'numpy.zeros', 'np.zeros', (['n_data'], {'dtype': 'int'}), '(n_data, dtype=int)\n', (25533, 25552), True, 'import numpy as np\n'), ((25568, 25597), 'numpy.zeros', 'np.zeros', (['n_data'], {'dtype': 'float'}), '(n_data, dtype=float)\n', (25576, 25597), True, 'import numpy as np\n'), ((27128, 27215), 'scipy.sparse.coo_matrix', 'sp.sparse.coo_matrix', (['(data, (iis, jjs))'], {'shape': '[tree.x.shape[0], tree.x.shape[0]]'}), '((data, (iis, jjs)), shape=[tree.x.shape[0], tree.x.\n shape[0]])\n', (27148, 27215), True, 'import scipy as sp\n'), ((27639, 27691), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'int'}), '(Level.n_node * Ncutoff * Nequiv, dtype=int)\n', (27647, 27691), True, 'import numpy as np\n'), ((27703, 27755), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'int'}), '(Level.n_node * Ncutoff * Nequiv, dtype=int)\n', (27711, 27755), True, 'import numpy as np\n'), ((27767, 27821), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'float'}), '(Level.n_node * Ncutoff * Nequiv, dtype=float)\n', (27775, 27821), True, 'import numpy as np\n'), ((27855, 27906), 'numpy.logical_and', 'np.logical_and', (['Level.compute_upwards', '(Level.ns > 0)'], {}), '(Level.compute_upwards, Level.ns > 0)\n', (27869, 27906), True, 'import numpy as np\n'), ((29050, 29143), 'scipy.sparse.coo_matrix', 'sp.sparse.coo_matrix', (['(data, (iis, jjs))'], {'shape': '[Nequiv * Level.n_node, tree.x.shape[0]]'}), '((data, (iis, jjs)), shape=[Nequiv * Level.n_node, tree\n .x.shape[0]])\n', (29070, 29143), True, 'import scipy as sp\n'), ((29452, 29504), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'int'}), '(Level.n_node * Ncutoff * Nequiv, dtype=int)\n', (29460, 29504), True, 'import numpy as np\n'), ((29516, 29568), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'int'}), '(Level.n_node * Ncutoff * Nequiv, dtype=int)\n', (29524, 29568), True, 'import numpy as np\n'), ((29580, 29634), 'numpy.empty', 'np.empty', (['(Level.n_node * Ncutoff * Nequiv)'], {'dtype': 'float'}), '(Level.n_node * Ncutoff * Nequiv, dtype=float)\n', (29588, 29634), True, 'import numpy as np\n'), ((29668, 29708), 'numpy.logical_and', 'np.logical_and', (['Level.leaf', '(Level.ns > 0)'], {}), '(Level.leaf, Level.ns > 0)\n', (29682, 29708), True, 'import numpy as np\n'), ((30831, 30924), 'scipy.sparse.coo_matrix', 'sp.sparse.coo_matrix', (['(data, (iis, jjs))'], {'shape': '[Nequiv * Level.n_node, tree.x.shape[0]]'}), '((data, (iis, jjs)), shape=[Nequiv * Level.n_node, tree\n .x.shape[0]])\n', (30851, 30924), True, 'import scipy as sp\n'), ((32804, 32815), 'time.time', 'time.time', ([], {}), '()\n', (32813, 32815), False, 'import time\n'), ((33229, 33240), 'time.time', 'time.time', ([], {}), '()\n', (33238, 33240), False, 'import time\n'), ((34177, 34213), 'numpy.argsort', 'np.argsort', (['Level.children_ind[doit]'], {}), '(Level.children_ind[doit])\n', (34187, 34213), True, 'import numpy as np\n'), ((290, 300), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (297, 300), True, 'import numpy as np\n'), ((3315, 3326), 'time.time', 'time.time', ([], {}), '()\n', (3324, 3326), False, 'import time\n'), ((3714, 3725), 'time.time', 'time.time', ([], {}), '()\n', (3723, 3725), False, 'import time\n'), ((10733, 10768), 'scipy.linalg.lu_factor', 'sp.linalg.lu_factor', (['equiv_to_check'], {}), '(equiv_to_check)\n', (10752, 10768), True, 'import scipy as sp\n'), ((15033, 15085), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'u_check_surfaces.T'], {}), '(E2C_LUs[ind], u_check_surfaces.T)\n', (15051, 15085), True, 'import scipy as sp\n'), ((15483, 15525), 'numpy.logical_or', 'np.logical_or', (['Level.not_leaf', 'Level.Xlist'], {}), '(Level.not_leaf, Level.Xlist)\n', (15496, 15525), True, 'import numpy as np\n'), ((15527, 15558), 'numpy.logical_not', 'np.logical_not', (['Level.fake_leaf'], {}), '(Level.fake_leaf)\n', (15541, 15558), True, 'import numpy as np\n'), ((15587, 15650), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'Level.Local_Solutions[doit].T'], {}), '(E2C_LUs[ind], Level.Local_Solutions[doit].T)\n', (15605, 15650), True, 'import scipy as sp\n'), ((16813, 16870), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'Level.Local_Solutions.T'], {}), '(E2C_LUs[ind], Level.Local_Solutions.T)\n', (16831, 16870), True, 'import scipy as sp\n'), ((20434, 20445), 'time.time', 'time.time', ([], {}), '()\n', (20443, 20445), False, 'import time\n'), ((21705, 21740), 'scipy.linalg.lu_factor', 'sp.linalg.lu_factor', (['equiv_to_check'], {}), '(equiv_to_check)\n', (21724, 21740), True, 'import scipy as sp\n'), ((31751, 31792), 'numpy.empty', 'np.empty', (['(xx.shape[0] * 2)'], {'dtype': 'xx.dtype'}), '(xx.shape[0] * 2, dtype=xx.dtype)\n', (31759, 31792), True, 'import numpy as np\n'), ((32939, 32950), 'time.time', 'time.time', ([], {}), '()\n', (32948, 32950), False, 'import time\n'), ((33276, 33328), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'u_check_surfaces.T'], {}), '(E2C_LUs[ind], u_check_surfaces.T)\n', (33294, 33328), True, 'import scipy as sp\n'), ((33350, 33361), 'time.time', 'time.time', ([], {}), '()\n', (33359, 33361), False, 'import time\n'), ((33926, 33968), 'numpy.logical_or', 'np.logical_or', (['Level.not_leaf', 'Level.Xlist'], {}), '(Level.not_leaf, Level.Xlist)\n', (33939, 33968), True, 'import numpy as np\n'), ((33970, 34001), 'numpy.logical_not', 'np.logical_not', (['Level.fake_leaf'], {}), '(Level.fake_leaf)\n', (33984, 34001), True, 'import numpy as np\n'), ((34030, 34093), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'Level.Local_Solutions[doit].T'], {}), '(E2C_LUs[ind], Level.Local_Solutions[doit].T)\n', (34048, 34093), True, 'import scipy as sp\n'), ((35319, 35376), 'scipy.linalg.lu_solve', 'sp.linalg.lu_solve', (['E2C_LUs[ind]', 'Level.Local_Solutions.T'], {}), '(E2C_LUs[ind], Level.Local_Solutions.T)\n', (35337, 35376), True, 'import scipy as sp\n'), ((332, 342), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (339, 342), True, 'import numpy as np\n'), ((12865, 12912), 'numpy.empty', 'np.empty', (['[4 * Nequiv, 4 * Nequiv]'], {'dtype': 'float'}), '([4 * Nequiv, 4 * Nequiv], dtype=float)\n', (12873, 12912), True, 'import numpy as np\n'), ((23837, 23884), 'numpy.empty', 'np.empty', (['[4 * Nequiv, 4 * Nequiv]'], {'dtype': 'float'}), '([4 * Nequiv, 4 * Nequiv], dtype=float)\n', (23845, 23884), True, 'import numpy as np\n'), ((34785, 34852), 'numpy.dot', 'np.dot', (['descendant_level.RSEQD', 'CM2Lh[kkx, kky]'], {'out': 'M2Ms[kkx, kky]'}), '(descendant_level.RSEQD, CM2Lh[kkx, kky], out=M2Ms[kkx, kky])\n', (34791, 34852), True, 'import numpy as np\n'), ((18129, 18156), 'numpy.sign', 'np.sign', (['(xmid[ci] - xmid[i])'], {}), '(xmid[ci] - xmid[i])\n', (18136, 18156), True, 'import numpy as np\n'), ((18188, 18215), 'numpy.sign', 'np.sign', (['(ymid[ci] - ymid[i])'], {}), '(ymid[ci] - ymid[i])\n', (18195, 18215), True, 'import numpy as np\n')] |
import numpy as np
import properties
from .... import survey
from ....utils import Zero
class BaseSrc(survey.BaseSrc):
"""
Base DC source
"""
_q = None
def __init__(self, receiver_list, location, current=1.0, **kwargs):
super().__init__(receiver_list=receiver_list, **kwargs)
self.location = location
self.current = current
@property
def location(self):
"""location of the source electrodes"""
return self._location
@location.setter
def location(self, other):
other = np.asarray(other, dtype=float)
other = np.atleast_2d(other)
self._location = other
@property
def current(self):
"""amplitudes of the source currents"""
return self._current
@current.setter
def current(self, other):
other = np.atleast_1d(np.asarray(other, dtype=float))
if other.ndim > 1:
raise ValueError("Too many dimensions for current array")
if len(other) > 1 and len(other) != self.location.shape[0]:
raise ValueError(
"Current must be constant or equal to the number of specified source locations."
f" saw {len(other)} current sources and {self.location.shape[0]} locations."
)
self._current = other
def eval(self, sim):
if self._q is not None:
return self._q
else:
if sim._formulation == "HJ":
inds = sim.mesh.closest_points_index(self.location, grid_loc="CC")
self._q = np.zeros(sim.mesh.nC)
self._q[inds] = self.current
elif sim._formulation == "EB":
loc = self.location
cur = self.current
interpolation_matrix = sim.mesh.get_interpolation_matrix(
loc, locType="N"
).toarray()
q = np.sum(cur[:, np.newaxis] * interpolation_matrix, axis=0)
self._q = q
return self._q
def evalDeriv(self, sim):
return Zero()
class Multipole(BaseSrc):
"""
Generic Multipole Source
"""
@property
def location_a(self):
"""Locations of the A electrode"""
return self.location
@property
def location_b(self):
"""Location of the B electrode"""
return np.full_like(self.location, np.nan)
class Dipole(BaseSrc):
"""
Dipole source
"""
def __init__(
self,
receiver_list,
location_a=None,
location_b=None,
location=None,
**kwargs,
):
if "current" in kwargs.keys():
value = kwargs.pop("current")
current = [value, -value]
else:
current = [1.0, -1.0]
# if location_a set, then use location_a, location_b
if location_a is not None:
if location_b is None:
raise ValueError(
"For a dipole source both location_a and location_b " "must be set"
)
if location is not None:
raise ValueError(
"Cannot set both location and location_a, location_b. "
"Please provide either location=(location_a, location_b) "
"or both location_a=location_a, location_b=location_b"
)
location = [location_a, location_b]
elif location is not None:
if len(location) != 2:
raise ValueError(
"location must be a list or tuple of length 2: "
"[location_a, location_b]. The input location has "
f"length {len(location)}"
)
# instantiate
super().__init__(
receiver_list=receiver_list, location=location, current=current, **kwargs
)
def __repr__(self):
return (
f"{self.__class__.__name__}(" f"a: {self.location_a}; b: {self.location_b})"
)
@property
def location_a(self):
"""Location of the A-electrode"""
return self.location[0]
@property
def location_b(self):
"""Location of the B-electrode"""
return self.location[1]
class Pole(BaseSrc):
def __init__(self, receiver_list, location=None, **kwargs):
super().__init__(receiver_list=receiver_list, location=location, **kwargs)
if len(self.location) != 1:
raise ValueError(
f"Pole sources only have a single location, not {len(self.location)}"
)
@property
def location_a(self):
"""Location of the A electrode"""
return self.location[0]
@property
def location_b(self):
"""Location of the B electrode"""
return np.full_like(self.location[0], np.nan)
| [
"numpy.full_like",
"numpy.sum",
"numpy.asarray",
"numpy.zeros",
"numpy.atleast_2d"
] | [((559, 589), 'numpy.asarray', 'np.asarray', (['other'], {'dtype': 'float'}), '(other, dtype=float)\n', (569, 589), True, 'import numpy as np\n'), ((606, 626), 'numpy.atleast_2d', 'np.atleast_2d', (['other'], {}), '(other)\n', (619, 626), True, 'import numpy as np\n'), ((2354, 2389), 'numpy.full_like', 'np.full_like', (['self.location', 'np.nan'], {}), '(self.location, np.nan)\n', (2366, 2389), True, 'import numpy as np\n'), ((4786, 4824), 'numpy.full_like', 'np.full_like', (['self.location[0]', 'np.nan'], {}), '(self.location[0], np.nan)\n', (4798, 4824), True, 'import numpy as np\n'), ((854, 884), 'numpy.asarray', 'np.asarray', (['other'], {'dtype': 'float'}), '(other, dtype=float)\n', (864, 884), True, 'import numpy as np\n'), ((1564, 1585), 'numpy.zeros', 'np.zeros', (['sim.mesh.nC'], {}), '(sim.mesh.nC)\n', (1572, 1585), True, 'import numpy as np\n'), ((1904, 1961), 'numpy.sum', 'np.sum', (['(cur[:, np.newaxis] * interpolation_matrix)'], {'axis': '(0)'}), '(cur[:, np.newaxis] * interpolation_matrix, axis=0)\n', (1910, 1961), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# adapted from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/transforms/augmentation.py
import sys
import inspect
import random
import numpy as np
import pprint
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from PIL import Image
from .transform import (
Transform,
TransformList,
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
VFlipTransform,
ExtentTransform,
ResizeTransform,
RotationTransform
)
__all__ = [
"Augmentation",
"TransformGen",
"apply_transform_gens",
"AugInput",
"StandardAugInput",
"apply_augmentations",
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeShortestEdge",
"RandomCropWithInstance",
"PadAugmentation",
]
"""
Overview of the augmentation system:
We have a design goal that aims at allowing:
(1) Arbitrary structures of input data (e.g. list[list[boxes]], dict[str, boxes],
multiple semantic segmentations for each image, etc) and arbitrary new data types
(rotated boxes, 3D meshes, densepose, etc)
(2) A list of augmentation to be applied sequentially
`Augmentation` defines policies to create deterministic transforms from input data.
An augmentation policy may need to access arbitrary input data, so it declares the input
data needed, to be provided by users when calling its `get_transform` method.
`Augmentation` is not able to apply transforms to data: data associated with
one sample may be much more than what `Augmentation` gets. For example, most
augmentation policies only need an image, but the actual input samples can be
much more complicated.
`AugInput` manages all inputs needed by `Augmentation` and implements the logic
to apply a sequence of augmentation. It has to define how the inputs are transformed,
because arguments needed by one `Augmentation` needs to be transformed to become arguments
of the next `Augmentation` in the sequence.
`AugInput` does not need to contain all input data, because most augmentation policies
only need very few fields (e.g., most only need "image"). We provide `StandardAugInput`
that only contains "images", "boxes", "sem_seg", that are enough to create transforms
for most cases. In this way, users keep the responsibility to apply transforms to other
potentially new data types and structures, e.g. keypoints, proposals boxes.
To extend the system, one can do:
1. To add a new augmentation policy that only needs to use standard inputs
("image", "boxes", "sem_seg"), writing a subclass of `Augmentation` is sufficient.
2. To use new data types or custom data structures, `StandardAugInput` can still be used as long
as the new data types or custom data structures are not needed by any augmentation policy.
The new data types or data structures can be transformed using the
transforms returned by `AugInput.apply_augmentations`.
3. To add new augmentation policies that need new data types or data structures, in addition to
implementing new `Augmentation`, a new `AugInput` is needed as well.
"""
def _check_img_dtype(img):
assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
type(img)
)
assert not isinstance(img.dtype, np.integer) or (
img.dtype == np.uint8
), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
img.dtype
)
assert img.ndim in [2, 3], img.ndim
class Augmentation(metaclass=ABCMeta):
"""
Augmentation defines policies/strategies to generate :class:`Transform` from data.
It is often used for pre-processing of input data. A policy typically contains
randomness, but it can also choose to deterministically generate a :class:`Transform`.
A "policy" that generates a :class:`Transform` may, in the most general case,
need arbitrary information from input data in order to determine what transforms
to apply. Therefore, each :class:`Augmentation` instance defines the arguments
needed by its :meth:`get_transform` method with the :attr:`input_args` attribute.
When called with the positional arguments defined by the :attr:`input_args`,
the :meth:`get_transform` method executes the policy.
Examples:
::
# if a policy needs to know both image and semantic segmentation
assert aug.input_args == ("image", "sem_seg")
tfm: Transform = aug.get_transform(image, sem_seg)
new_image = tfm.apply_image(image)
To implement a custom :class:`Augmentation`, define its :attr:`input_args` and
implement :meth:`get_transform`.
Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
but not how to apply the actual transform to those data.
"""
input_args: Tuple[str] = ("image",)
"""
Attribute of class instances that defines the argument(s) needed by
:meth:`get_transform`. Default to only "image", because most policies only
require knowing the image in order to determine the transform.
Users can freely define arbitrary new args and their types in custom
:class:`Augmentation`. In detectron2 we use the following convention:
* image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255].
* boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
of N instances. Each is in XYXY format in unit of absolute coordinates.
* sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
We do not specify convention for other types and do not include builtin
:class:`Augmentation` that uses other types in detectron2.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != "self" and not k.startswith("_"):
setattr(self, k, v)
# NOTE: in the future, can allow it to return list[Augmentation],
# to delegate augmentation to others
@abstractmethod
def get_transform(self, *args) -> Transform:
"""
Execute the policy to use input data to create transform(s).
Args:
arguments must follow what's defined in :attr:`input_args`.
Returns:
Return a :class:`Transform` instance, which is the transform to apply to inputs.
"""
pass
def _rand_range(self, low=1.0, high=None, size=None):
"""
Uniform float random number between low and high.
"""
if high is None:
low, high = 0, low
if size is None:
size = []
return np.random.uniform(low, high, size)
def __repr__(self):
"""
Produce something like:
"MyAugmentation(field1={self.field1}, field2={self.field2})"
"""
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for name, param in sig.parameters.items():
assert (
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), (
"Attribute {} not found! "
"Default __repr__ only works if attributes match the constructor.".format(name)
)
attr = getattr(self, name)
default = param.default
if default is attr:
continue
argstr.append("{}={}".format(name, pprint.pformat(attr)))
return "{}({})".format(classname, ", ".join(argstr))
except AssertionError:
return super().__repr__()
__str__ = __repr__
TransformGen = Augmentation
"""
Alias for Augmentation, since it is something that generates :class:`Transform`s
"""
class AugInput:
"""
A base class for anything on which a list of :class:`Augmentation` can be applied.
This class provides input arguments for :class:`Augmentation` to use, and defines how
to apply transforms to these data.
An instance of this class must satisfy the following:
* :class:`Augmentation` declares some data it needs as arguments. A :class:`AugInput`
must provide access to these data in the form of attribute access (``getattr``).
For example, if a :class:`Augmentation` to be applied needs "image" and "sem_seg"
arguments, this class must have the attribute "image" and "sem_seg" whose content
is as required by the :class:`Augmentation`s.
* This class must have a :meth:`transform(tfm: Transform) -> None` method which
in-place transforms all attributes stored in the class.
"""
def transform(self, tfm: Transform) -> None:
raise NotImplementedError
def apply_augmentations(
self, augmentations: List[Union[Augmentation, Transform]]
) -> TransformList:
"""
Apply a list of Transform/Augmentation in-place and returned the applied transform.
Attributes of this class will be modified.
Returns:
TransformList:
returns transformed inputs and the list of transforms applied.
The TransformList can then be applied to other data associated with the inputs.
"""
tfms = []
for aug in augmentations:
if isinstance(aug, Augmentation):
args = []
for f in aug.input_args:
try:
args.append(getattr(self, f))
except AttributeError:
raise AttributeError(
f"Augmentation {aug} needs '{f}', which is not an attribute of {self}!"
)
tfm = aug.get_transform(*args)
assert isinstance(tfm, Transform), (
f"{type(aug)}.get_transform must return an instance of Transform! "
"Got {type(tfm)} instead."
)
else:
tfm = aug
self.transform(tfm)
tfms.append(tfm)
return TransformList(tfms)
class StandardAugInput(AugInput):
"""
A standard implementation of :class:`AugInput` for the majority of use cases.
This class provides the following standard attributes that are common to use by
Augmentation (augmentation policies). These are chosen because most
:class:`Augmentation` won't need anything more to define a augmentation policy.
After applying augmentations to these special attributes, the returned transforms
can then be used to transform other data structures that users have.
Attributes:
image (ndarray): image in HW or HWC format. The meaning of C is up to users
boxes (ndarray or None): Nx4 boxes in XYXY_ABS mode
sem_seg (ndarray or None): HxW semantic segmentation mask
Examples:
::
input = StandardAugInput(image, boxes=boxes)
tfms = input.apply_augmentations(list_of_augmentations)
transformed_image = input.image
transformed_boxes = input.boxes
transformed_other_data = tfms.apply_other(other_data)
An extended project that works with new data types may require augmentation
policies that need more inputs. An algorithm may need to transform inputs
in a way different from the standard approach defined in this class. In those
situations, users can implement new subclasses of :class:`AugInput` with differnt
attributes and the :meth:`transform` method.
"""
def __init__(
self,
image: np.ndarray,
*,
boxes: Optional[np.ndarray] = None,
sem_seg: Optional[np.ndarray] = None,
):
"""
Args:
image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255].
boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
of N instances. Each is in XYXY format in unit of absolute coordinates.
sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
"""
_check_img_dtype(image)
self.image = image
self.boxes = boxes
self.sem_seg = sem_seg
def transform(self, tfm: Transform) -> None:
"""
In-place transform all attributes of this class.
"""
self.image = tfm.apply_image(self.image)
if self.boxes is not None:
self.boxes = tfm.apply_box(self.boxes)
if self.sem_seg is not None:
self.sem_seg = tfm.apply_segmentation(self.sem_seg)
def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
"""
Use :meth:`AugInput.apply_augmentations` instead.
"""
if isinstance(inputs, np.ndarray):
# handle the common case of image-only Augmentation, also for backward compatibility
image_only = True
inputs = StandardAugInput(inputs)
else:
image_only = False
tfms = inputs.apply_augmentations(augmentations)
return inputs.image if image_only else inputs, tfms
apply_transform_gens = apply_augmentations
"""
Alias for backward-compatibility.
"""
class RandomApply(Augmentation):
"""
Randomly apply the wrapper transformation with a given probability.
"""
def __init__(self, transform, prob=0.5):
"""
Args:
transform (Transform, Augmentation): the transform to be wrapped
by the `RandomApply`. The `transform` can either be a
`Transform` or `Augmentation` instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
assert isinstance(transform, (Transform, Augmentation)), (
f"The given transform must either be a Transform or Augmentation instance. "
f"Not {type(transform)}"
)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
self.transform = transform
if isinstance(transform, Augmentation):
self.input_args = transform.input_args
def get_transform(self, img):
do = self._rand_range() < self.prob
if do:
if isinstance(self.transform, Augmentation):
return self.transform.get_transform(img)
else:
return self.transform
else:
return NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
""" Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, img):
return ResizeTransform(
img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class RandomCrop(Augmentation):
"""
Randomly crop a subimage out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
See `config/defaults.py` for explanation.
crop_size (tuple[float]): the relative ratio or absolute pixels of
height and width
"""
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomExtent(Augmentation):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
img_h, img_w = img.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(Augmentation):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=img.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(Augmentation):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(Augmentation):
"""
Randomly transforms saturation of an RGB image.
Input images are assumed to have 'RGB' channel order.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
assert img.shape[-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = img.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(Augmentation):
"""
The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, img):
assert img.shape[-1] == 3, "RandomLighting only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
def _gen_crop_transform_with_instance(crop_size, image_size, instances, crop_box=True):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
bbox = random.choice(instances)
crop_size = np.asarray(crop_size, dtype=np.int32)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
# if some instance is cropped extend the box
if not crop_box:
num_modifications = 0
modified = True
# convert crop_size to float
crop_size = crop_size.astype(np.float32)
while modified:
modified, x0, y0, crop_size = adjust_crop(x0, y0, crop_size, instances)
num_modifications += 1
if num_modifications > 100:
raise ValueError(
"Cannot finished cropping adjustment within 100 tries (#instances {}).".format(
len(instances)
)
)
return CropTransform(0, 0, image_size[1], image_size[0])
return CropTransform(*map(int, (x0, y0, crop_size[1], crop_size[0])))
def adjust_crop(x0, y0, crop_size, instances, eps=1e-3):
modified = False
x1 = x0 + crop_size[1]
y1 = y0 + crop_size[0]
for bbox in instances:
if bbox[0] < x0 - eps and bbox[2] > x0 + eps:
crop_size[1] += x0 - bbox[0]
x0 = bbox[0]
modified = True
if bbox[0] < x1 - eps and bbox[2] > x1 + eps:
crop_size[1] += bbox[2] - x1
x1 = bbox[2]
modified = True
if bbox[1] < y0 - eps and bbox[3] > y0 + eps:
crop_size[0] += y0 - bbox[1]
y0 = bbox[1]
modified = True
if bbox[1] < y1 - eps and bbox[3] > y1 + eps:
crop_size[0] += bbox[3] - y1
y1 = bbox[3]
modified = True
return modified, x0, y0, crop_size
class RandomCropWithInstance(RandomCrop):
""" Instance-aware cropping.
"""
def __init__(self, crop_type, crop_size, crop_instance=True):
"""
Args:
crop_instance (bool): if False, extend cropping boxes to avoid cropping instances
"""
super().__init__(crop_type, crop_size)
self.crop_instance = crop_instance
self.input_args = ("image", "boxes")
def get_transform(self, img, boxes):
image_size = img.shape[:2]
crop_size = self.get_crop_size(image_size)
return _gen_crop_transform_with_instance(
crop_size, image_size, boxes, crop_box=self.crop_instance
)
class PadAugmentation(Augmentation):
def __init__(self, crop_size):
"""
Args:
crop_instance (bool): if False, extend cropping boxes to avoid cropping instances
"""
super().__init__()
self.crop_size = crop_size
def get_crop_size(self, image_size):
h, w = image_size
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
def get_transform(self, img):
image_size = img.shape[:2]
image_size = self.get_crop_size(image_size)
return _PadTransform(image_size[0], image_size[1], self.crop_size[1], self.crop_size[0])
class _PadTransform(Transform):
def __init__(self, h: int, w: int, crop_h: int, crop_w: int):
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
h, w = img.shape[:2]
assert (
self.h == h and self.w == w
), "Input size mismatch h w {}:{} -> {}:{}".format(self.h, self.w, h, w)
padding = ((0, self.crop_h - h), (0, self.crop_w - w), (0, 0))
img = np.pad(img, pad_width=padding)
return img
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
| [
"numpy.pad",
"numpy.random.uniform",
"pprint.pformat",
"numpy.ceil",
"numpy.asarray",
"numpy.floor",
"random.choice",
"numpy.random.randint",
"numpy.array",
"inspect.signature",
"numpy.random.normal",
"numpy.random.choice",
"numpy.random.rand"
] | [((28939, 28963), 'random.choice', 'random.choice', (['instances'], {}), '(instances)\n', (28952, 28963), False, 'import random\n'), ((28980, 29017), 'numpy.asarray', 'np.asarray', (['crop_size'], {'dtype': 'np.int32'}), '(crop_size, dtype=np.int32)\n', (28990, 29017), True, 'import numpy as np\n'), ((29601, 29644), 'numpy.random.randint', 'np.random.randint', (['min_yx[0]', '(max_yx[0] + 1)'], {}), '(min_yx[0], max_yx[0] + 1)\n', (29618, 29644), True, 'import numpy as np\n'), ((29654, 29697), 'numpy.random.randint', 'np.random.randint', (['min_yx[1]', '(max_yx[1] + 1)'], {}), '(min_yx[1], max_yx[1] + 1)\n', (29671, 29697), True, 'import numpy as np\n'), ((6872, 6906), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', 'size'], {}), '(low, high, size)\n', (6889, 6906), True, 'import numpy as np\n'), ((21533, 21565), 'numpy.random.randint', 'np.random.randint', (['(h - croph + 1)'], {}), '(h - croph + 1)\n', (21550, 21565), True, 'import numpy as np\n'), ((21579, 21611), 'numpy.random.randint', 'np.random.randint', (['(w - cropw + 1)'], {}), '(w - cropw + 1)\n', (21596, 21611), True, 'import numpy as np\n'), ((23881, 23945), 'numpy.array', 'np.array', (['[-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h]'], {}), '([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])\n', (23889, 23945), True, 'import numpy as np\n'), ((24017, 24076), 'numpy.random.uniform', 'np.random.uniform', (['self.scale_range[0]', 'self.scale_range[1]'], {}), '(self.scale_range[0], self.scale_range[1])\n', (24034, 24076), True, 'import numpy as np\n'), ((25354, 25411), 'numpy.random.uniform', 'np.random.uniform', (['self.intensity_min', 'self.intensity_max'], {}), '(self.intensity_min, self.intensity_max)\n', (25371, 25411), True, 'import numpy as np\n'), ((26199, 26256), 'numpy.random.uniform', 'np.random.uniform', (['self.intensity_min', 'self.intensity_max'], {}), '(self.intensity_min, self.intensity_max)\n', (26216, 26256), True, 'import numpy as np\n'), ((27256, 27313), 'numpy.random.uniform', 'np.random.uniform', (['self.intensity_min', 'self.intensity_max'], {}), '(self.intensity_min, self.intensity_max)\n', (27273, 27313), True, 'import numpy as np\n'), ((28035, 28133), 'numpy.array', 'np.array', (['[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, -0.6948, \n 0.4203]]'], {}), '([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, \n -0.6948, 0.4203]])\n', (28043, 28133), True, 'import numpy as np\n'), ((28178, 28212), 'numpy.array', 'np.array', (['[0.2175, 0.0188, 0.0045]'], {}), '([0.2175, 0.0188, 0.0045])\n', (28186, 28212), True, 'import numpy as np\n'), ((28343, 28385), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'self.scale', 'size': '(3)'}), '(scale=self.scale, size=3)\n', (28359, 28385), True, 'import numpy as np\n'), ((33038, 33068), 'numpy.pad', 'np.pad', (['img'], {'pad_width': 'padding'}), '(img, pad_width=padding)\n', (33044, 33068), True, 'import numpy as np\n'), ((7088, 7120), 'inspect.signature', 'inspect.signature', (['self.__init__'], {}), '(self.__init__)\n', (7105, 7120), False, 'import inspect\n'), ((17648, 17723), 'numpy.random.randint', 'np.random.randint', (['self.short_edge_length[0]', '(self.short_edge_length[1] + 1)'], {}), '(self.short_edge_length[0], self.short_edge_length[1] + 1)\n', (17665, 17723), True, 'import numpy as np\n'), ((17757, 17797), 'numpy.random.choice', 'np.random.choice', (['self.short_edge_length'], {}), '(self.short_edge_length)\n', (17773, 17797), True, 'import numpy as np\n'), ((19987, 20034), 'numpy.random.uniform', 'np.random.uniform', (['self.angle[0]', 'self.angle[1]'], {}), '(self.angle[0], self.angle[1])\n', (20004, 20034), True, 'import numpy as np\n'), ((20308, 20336), 'numpy.random.choice', 'np.random.choice', (['self.angle'], {}), '(self.angle)\n', (20324, 20336), True, 'import numpy as np\n'), ((29467, 29505), 'numpy.asarray', 'np.asarray', (['image_size'], {'dtype': 'np.int32'}), '(image_size, dtype=np.int32)\n', (29477, 29505), True, 'import numpy as np\n'), ((20402, 20431), 'numpy.random.choice', 'np.random.choice', (['self.center'], {}), '(self.center)\n', (20418, 20431), True, 'import numpy as np\n'), ((22103, 22147), 'numpy.asarray', 'np.asarray', (['self.crop_size'], {'dtype': 'np.float32'}), '(self.crop_size, dtype=np.float32)\n', (22113, 22147), True, 'import numpy as np\n'), ((24193, 24209), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (24207, 24209), True, 'import numpy as np\n'), ((24274, 24290), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (24288, 24290), True, 'import numpy as np\n'), ((29554, 29572), 'numpy.ceil', 'np.ceil', (['center_yx'], {}), '(center_yx)\n', (29561, 29572), True, 'import numpy as np\n'), ((20122, 20177), 'numpy.random.uniform', 'np.random.uniform', (['self.center[0][0]', 'self.center[1][0]'], {}), '(self.center[0][0], self.center[1][0])\n', (20139, 20177), True, 'import numpy as np\n'), ((20199, 20254), 'numpy.random.uniform', 'np.random.uniform', (['self.center[0][1]', 'self.center[1][1]'], {}), '(self.center[0][1], self.center[1][1])\n', (20216, 20254), True, 'import numpy as np\n'), ((29390, 29409), 'numpy.floor', 'np.floor', (['center_yx'], {}), '(center_yx)\n', (29398, 29409), True, 'import numpy as np\n'), ((7846, 7866), 'pprint.pformat', 'pprint.pformat', (['attr'], {}), '(attr)\n', (7860, 7866), False, 'import pprint\n'), ((22181, 22198), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (22195, 22198), True, 'import numpy as np\n')] |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Low Rank Trotter steps.
There are a variety of tunable parameters for the low rank
Trotter steps--L, M, epsilon, lambda.
This set of routines interfaces with OpenFermion to provide
data containers and transformers from molecular integrals to
low-rank trotter step data structures.
"""
from typing import Optional
import numpy as np
from scipy.linalg import expm
from openfermion import MolecularData
from openfermion import (
low_rank_two_body_decomposition,
prepare_one_body_squared_evolution,
)
class LowRankTrotter:
"""Holds data for low rank Trotter step generation and analysis specific
to running analysis through the FQE. Determination of basis transforms
and matrices relies heavily on OpenFermion low-rank routines.
"""
def __init__(
self,
molecule: Optional[MolecularData] = None,
oei: Optional[np.ndarray] = None,
tei: Optional[np.ndarray] = None,
integral_cutoff: Optional[float] = 1.0e-8,
basis_cutoff: Optional[float] = 1.0e-8,
spin_basis: Optional[bool] = False,
):
self.molecule = molecule
self.oei = oei
self.tei = tei
self.icut = integral_cutoff
self.lmax = None
self.mcut = basis_cutoff
self.mmax = None
self.spin_basis = spin_basis
# if only molecule is provided get the spatial-MO matrices
if molecule is not None and oei is None and tei is None:
self.oei, self.tei = molecule.get_integrals()
def first_factorization(self, threshold: Optional[float] = None):
r"""Factorize :math:`V = 1/2 \sum_{ijkl, st}V_{ijkl} is^ jt^ kt ls` by
transforming to chemist notation.
Args:
threshold: threshold for factorization.
Returns:
Tuple of (eigenvalues of factors, one-body ops in factors, one
body correction).
"""
if threshold is None:
threshold = self.icut
# convert physics notation integrals into chemist notation
# and determine the first low-rank factorization
if self.spin_basis:
(
eigenvalues,
one_body_squares,
one_body_correction,
_,
) = low_rank_two_body_decomposition(
self.tei,
truncation_threshold=threshold,
final_rank=self.lmax,
spin_basis=self.spin_basis,
)
else:
(
eigenvalues,
one_body_squares,
one_body_correction,
_,
) = low_rank_two_body_decomposition(
0.5 * self.tei, # type: ignore
truncation_threshold=threshold,
final_rank=self.lmax,
spin_basis=self.spin_basis,
)
return eigenvalues, one_body_squares, one_body_correction
def second_factorization(
self,
eigenvalues: np.ndarray,
one_body_squares: np.ndarray,
threshold: Optional[float] = None,
):
r"""
Get Givens angles and DiagonalHamiltonian to simulate squared one-body.
The goal here will be to prepare to simulate evolution under
:math:`(\sum_{pq} h_{pq} a^{\dagger}_p a_q)^2` by decomposing as
:math:`R e^{-i \sum_{pq} V_{pq} n_p n_q} R^\dagger` where
:math:`R` is a basis transformation matrix.
Args:
eigenvalues: eigenvalues of 2nd quantized op
one_body_squares: one-body-ops to square
threshold: cutoff threshold.
TODO: WARNING: THIS IS NOT USED CURRENTLY
Returns:
Tuple(List[np.ndarray], List[np.ndarray]) scaled-rho-rho spatial
matrix and list of spatial basis transformations
"""
if threshold is None:
# TODO: update OpenFermion to take cutoff
threshold = self.mcut
scaled_density_density_matrices = []
basis_change_matrices = []
for j in range(len(eigenvalues)):
# Testing out constructing density-density
(
sdensity_density_matrix,
sbasis_change_matrix,
) = prepare_one_body_squared_evolution(
one_body_squares[j][::2, ::2], spin_basis=False)
scaled_density_density_matrices.append(
np.real(eigenvalues[j]) * sdensity_density_matrix)
basis_change_matrices.append(sbasis_change_matrix)
return scaled_density_density_matrices, basis_change_matrices
def get_l_and_m(self, first_factor_cutoff, second_factor_cutoff):
"""
Determine the L rank and M rank for an integral matrix
Args:
first_factor_cutoff: First factorization cumulative eigenvalue
cutoff.
second_factor_cutoff: Second factorization cumulative error cutoff.
Returns:
Return L and list of lists with M values for each L.
"""
(
_,
one_body_squares,
_,
) = self.first_factorization(first_factor_cutoff)
m_factors = []
for l in range(one_body_squares.shape[0]):
w, _ = np.linalg.eigh(one_body_squares[l][::2, ::2])
# Determine upper-bound on truncation errors that would occur
# if we dropped the eigenvalues lower than some cumulative error
cumulative_error_sum = np.cumsum(sorted(np.abs(w))[::-1])
truncation_errors = cumulative_error_sum[-1] - cumulative_error_sum
max_rank = 1 + np.argmax(truncation_errors <= second_factor_cutoff)
m_factors.append(max_rank)
return one_body_squares.shape[0], m_factors
def prepare_trotter_sequence(self, delta_t: float):
"""Build the Trotter sequence for FQE Evolution.
Args:
delta_t: Time to evolve.
Returns:
Tuple(List[np.ndarray], List[np.ndarray]) both sets of lists are
n x n matrices. The first list is the list of basis change
operators and the second list is the density-density matrix-spatial
format.
"""
(
eigenvalues,
one_body_squares,
one_body_correction,
) = self.first_factorization(self.icut)
(
scaled_density_density_matrices,
basis_change_matrices,
) = self.second_factorization(eigenvalues, one_body_squares, self.mcut)
trotter_basis_change = [
basis_change_matrices[0] @ expm(
-1j * delta_t * (self.oei + one_body_correction[::2, ::2]))
]
time_scaled_rho_rho_matrices = []
for ii in range(len(basis_change_matrices) - 1):
trotter_basis_change.append(basis_change_matrices[ii + 1]
@ basis_change_matrices[ii].conj().T)
time_scaled_rho_rho_matrices.append(
delta_t * scaled_density_density_matrices[ii])
# get the last element
time_scaled_rho_rho_matrices.append(
delta_t * scaled_density_density_matrices[-1].astype(np.complex128))
trotter_basis_change.append(basis_change_matrices[ii + 1].conj().T)
return trotter_basis_change, time_scaled_rho_rho_matrices
| [
"scipy.linalg.expm",
"numpy.abs",
"numpy.argmax",
"numpy.linalg.eigh",
"openfermion.prepare_one_body_squared_evolution",
"numpy.real",
"openfermion.low_rank_two_body_decomposition"
] | [((2905, 3032), 'openfermion.low_rank_two_body_decomposition', 'low_rank_two_body_decomposition', (['self.tei'], {'truncation_threshold': 'threshold', 'final_rank': 'self.lmax', 'spin_basis': 'self.spin_basis'}), '(self.tei, truncation_threshold=threshold,\n final_rank=self.lmax, spin_basis=self.spin_basis)\n', (2936, 3032), False, 'from openfermion import low_rank_two_body_decomposition, prepare_one_body_squared_evolution\n'), ((3271, 3405), 'openfermion.low_rank_two_body_decomposition', 'low_rank_two_body_decomposition', (['(0.5 * self.tei)'], {'truncation_threshold': 'threshold', 'final_rank': 'self.lmax', 'spin_basis': 'self.spin_basis'}), '(0.5 * self.tei, truncation_threshold=\n threshold, final_rank=self.lmax, spin_basis=self.spin_basis)\n', (3302, 3405), False, 'from openfermion import low_rank_two_body_decomposition, prepare_one_body_squared_evolution\n'), ((4896, 4983), 'openfermion.prepare_one_body_squared_evolution', 'prepare_one_body_squared_evolution', (['one_body_squares[j][::2, ::2]'], {'spin_basis': '(False)'}), '(one_body_squares[j][::2, ::2],\n spin_basis=False)\n', (4930, 4983), False, 'from openfermion import low_rank_two_body_decomposition, prepare_one_body_squared_evolution\n'), ((5908, 5953), 'numpy.linalg.eigh', 'np.linalg.eigh', (['one_body_squares[l][::2, ::2]'], {}), '(one_body_squares[l][::2, ::2])\n', (5922, 5953), True, 'import numpy as np\n'), ((6282, 6334), 'numpy.argmax', 'np.argmax', (['(truncation_errors <= second_factor_cutoff)'], {}), '(truncation_errors <= second_factor_cutoff)\n', (6291, 6334), True, 'import numpy as np\n'), ((7259, 7325), 'scipy.linalg.expm', 'expm', (['(-1.0j * delta_t * (self.oei + one_body_correction[::2, ::2]))'], {}), '(-1.0j * delta_t * (self.oei + one_body_correction[::2, ::2]))\n', (7263, 7325), False, 'from scipy.linalg import expm\n'), ((5065, 5088), 'numpy.real', 'np.real', (['eigenvalues[j]'], {}), '(eigenvalues[j])\n', (5072, 5088), True, 'import numpy as np\n'), ((6157, 6166), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (6163, 6166), True, 'import numpy as np\n')] |
"""
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: <NAME> <<EMAIL>>
This package is distributed under New BSD license.
"""
import numpy as np
def compute_precision(Y_pred, Y_true):
"""
Compute precision = True positives / Total Number of Predicted Positives
= True positives / (True Positives + False Positives)
NOTE: This method applies to binary classification only!
Arguments:
Y_pred -- predictions, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Y_true -- true values, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Return:
P -- precision, numpy array of (n_y, 1)
>> P is a number between 0 and 1 where 0 is bad and 1 is good
"""
true_positives = np.sum(((Y_pred + Y_true) == 2).astype(float), axis=1, keepdims=True)
false_positives = np.sum(((Y_pred - Y_true) == 1).astype(float), axis=1, keepdims=True)
if true_positives == 0:
precision = 0
else:
precision = true_positives / (true_positives + false_positives)
return precision
def compute_recall(Y_pred, Y_true):
"""
Compute recall = True positives / Total Number of Actual Positives
= True positives / (True Positives + False Negatives)
NOTE: This method applies to classification only!
Arguments:
Y_pred -- predictions, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Y_true -- true values, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Return:
R -- recall, numpy array of (n_y, 1)
>> R is a number between 0 and 1 where 0 is bad and 1 is good
"""
true_positives = np.sum(((Y_pred + Y_true) == 2).astype(float), axis=1, keepdims=True)
false_negatives = np.sum(((Y_true - Y_pred) == 1).astype(float), axis=1, keepdims=True)
if true_positives == 0:
recall = 0
else:
recall = true_positives / (true_positives + false_negatives)
return recall
def compute_Fscore(Y_pred, Y_true):
"""
Compute F-scoare = 2*P*R / (P + R) where P = precision
R = recall
NOTE: This method applies to classification only!
Arguments:
Y_pred -- predictions, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Y_true -- true values, numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of examples
Return:
F -- F-score, numpy array of (n_y, 1)
>> F is a number between 0 and 1 where 0 is bad and 1 is good
"""
P = compute_precision(Y_pred, Y_true)
R = compute_recall(Y_pred, Y_true)
if (P + R) == 0:
F = 0
else:
F = 2 * P * R / (P + R)
return F
def goodness_fit_regression(Y_pred, Y_true):
"""
Compute goodness of fit metrics: R2, std(error), avg(error).
Note: these metrics only apply to regression
Arguments:
Y_pred -- numpy array of size (K, m) where K = num outputs, n = num examples
Y_true -- numpy array of size (K, m) where K = num outputs, m = num examples
Return:
R2 -- float, RSquare value
sig -- numpy array of shape (K, 1), standard deviation of error
mu -- numpy array of shape (K, 1), avg value of error expressed
"""
K = Y_true.shape[0]
R2 = rsquare(Y_pred, Y_true)
sig = np.std(Y_pred - Y_true)
mu = np.mean(Y_pred - Y_true)
return R2.reshape(K, 1), sig.reshape(K, 1), mu.reshape(K, 1)
def rsquare(Y_pred, Y_true):
"""
Compute R-square for a single response.
NOTE: If you have more than one response, then you'll either have to modify this method to handle many responses at
once or wrap a for loop around it (i.e. treat one response at a time).
Arguments:
Y_pred -- predictions, numpy array of shape (K, m) where n_y = no. of outputs, m = no. of examples
Y_true -- true values, numpy array of shape (K, m) where n_y = no. of outputs, m = no. of examples
Return:
R2 -- the R-square value, numpy array of shape (K, 1)
"""
epsilon = 1e-8 # small number to avoid division by zero
Y_bar = np.mean(Y_true)
SSE = np.sum(np.square(Y_pred - Y_true), axis=1)
SSTO = np.sum(np.square(Y_true - Y_bar) + epsilon, axis=1)
R2 = 1 - SSE / SSTO
return R2
| [
"numpy.std",
"numpy.mean",
"numpy.square"
] | [((3429, 3452), 'numpy.std', 'np.std', (['(Y_pred - Y_true)'], {}), '(Y_pred - Y_true)\n', (3435, 3452), True, 'import numpy as np\n'), ((3462, 3486), 'numpy.mean', 'np.mean', (['(Y_pred - Y_true)'], {}), '(Y_pred - Y_true)\n', (3469, 3486), True, 'import numpy as np\n'), ((4215, 4230), 'numpy.mean', 'np.mean', (['Y_true'], {}), '(Y_true)\n', (4222, 4230), True, 'import numpy as np\n'), ((4248, 4274), 'numpy.square', 'np.square', (['(Y_pred - Y_true)'], {}), '(Y_pred - Y_true)\n', (4257, 4274), True, 'import numpy as np\n'), ((4302, 4327), 'numpy.square', 'np.square', (['(Y_true - Y_bar)'], {}), '(Y_true - Y_bar)\n', (4311, 4327), True, 'import numpy as np\n')] |
import numpy as np
import pickle
import tensorflow as tf
def DataNormalisationZeroCentred(InputData, AverageData=None):
if AverageData is None:
AverageData = np.mean(InputData, axis=0)
NormalisedData = InputData - AverageData
else:
NormalisedData = InputData - AverageData
return NormalisedData, AverageData
def ReadModels(model_folder):
model_binary = tf.keras.models.load_model(
model_folder + "BinaryClassification/saved_model.model")
reader_fid = open(
model_folder + "BinaryClassification/saved_image_norm.model", "rb")
aveImg_binary = pickle.load(reader_fid)
reader_fid.close()
model_regression = tf.keras.models.load_model(
model_folder + "Regression/saved_model.model")
reader_fid = open(
model_folder + "Regression/saved_image_norm.model", "rb")
aveImg_regression = pickle.load(reader_fid)
reader_fid.close()
return model_binary, aveImg_binary, model_regression, aveImg_regression
| [
"numpy.mean",
"pickle.load",
"tensorflow.keras.models.load_model"
] | [((399, 486), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(model_folder + 'BinaryClassification/saved_model.model')"], {}), "(model_folder +\n 'BinaryClassification/saved_model.model')\n", (425, 486), True, 'import tensorflow as tf\n'), ((611, 634), 'pickle.load', 'pickle.load', (['reader_fid'], {}), '(reader_fid)\n', (622, 634), False, 'import pickle\n'), ((682, 755), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(model_folder + 'Regression/saved_model.model')"], {}), "(model_folder + 'Regression/saved_model.model')\n", (708, 755), True, 'import tensorflow as tf\n'), ((878, 901), 'pickle.load', 'pickle.load', (['reader_fid'], {}), '(reader_fid)\n', (889, 901), False, 'import pickle\n'), ((172, 198), 'numpy.mean', 'np.mean', (['InputData'], {'axis': '(0)'}), '(InputData, axis=0)\n', (179, 198), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Plot module for Seispy Toolbox
"""
import numpy as np
import matplotlib.pyplot as plt
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
__all__ = ['wiggle', 'traces', 'show']
def insert_zeros(trace, tt=None):
"""Insert zero locations in data trace and tt vector based on linear fit
"""
if tt is None:
tt = np.arange(len(trace))
# Find zeros
zc_idx = np.where(np.diff(np.signbit(trace)))[0]
x1 = tt[zc_idx]
x2 = tt[zc_idx + 1]
y1 = trace[zc_idx]
y2 = trace[zc_idx + 1]
a = (y2 - y1) / (x2 - x1)
tt_zero = x1 - y1 / a
# split tt and trace
tt_split = np.split(tt, zc_idx + 1)
trace_split = np.split(trace, zc_idx + 1)
tt_zi = tt_split[0]
trace_zi = trace_split[0]
# insert zeros in tt and trace
for i in range(len(tt_zero)):
tt_zi = np.hstack(
(tt_zi, np.array([tt_zero[i]]), tt_split[i + 1]))
trace_zi = np.hstack(
(trace_zi, np.zeros(1), trace_split[i + 1]))
return trace_zi, tt_zi
def wiggle_input_check(data, tt, xx, sf, verbose):
"""Helper function for wiggle() and traces() to check input
"""
# Input check for verbose
if not isinstance(verbose, bool):
raise TypeError("verbose must be a bool")
# Input check for data
if type(data).__module__ != np.__name__:
raise TypeError("data must be a numpy array")
if len(data.shape) != 2:
raise ValueError("data must be a 2D array")
# Input check for tt
if tt is None:
tt = np.arange(data.shape[0])
if verbose:
print("tt is automatically generated.")
print(tt)
else:
if type(tt).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(tt.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
# Input check for xx
if xx is None:
xx = np.arange(data.shape[1])
if verbose:
print("xx is automatically generated.")
print(xx)
else:
if type(xx).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(xx.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
if verbose:
print(xx)
# Input check for streth factor (sf)
if not isinstance(sf, (int, float)):
raise TypeError("Strech factor(sf) must be a number")
# Compute trace horizontal spacing
ts = np.min(np.diff(xx))
# Rescale data by trace_spacing and strech_factor
data_max_std = np.max(np.std(data, axis=0))
data = data / data_max_std * ts * sf
return data, tt, xx, ts
def wiggle(data, tt=None, xx=None, color='k', sf=0.15, verbose=False):
"""Wiggle plot of a sesimic data section
Syntax examples:
wiggle(data)
wiggle(data, tt)
wiggle(data, tt, xx)
wiggle(data, tt, xx, color)
fi = wiggle(data, tt, xx, color, sf, verbose)
Use the column major order for array as in Fortran to optimal performance.
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
"""
# Input check
data, tt, xx, ts = wiggle_input_check(data, tt, xx, sf, verbose)
# Plot data using matplotlib.pyplot
Ntr = data.shape[1]
ax = plt.gca()
for ntr in range(Ntr):
trace = data[:, ntr]
offset = xx[ntr]
if verbose:
print(offset)
trace_zi, tt_zi = insert_zeros(trace, tt)
ax.fill_betweenx(tt_zi, offset, trace_zi + offset,
where=trace_zi >= 0,
facecolor=color)
ax.plot(trace_zi + offset, tt_zi, color)
ax.set_xlim(xx[0] - ts, xx[-1] + ts)
ax.set_ylim(tt[0], tt[-1])
ax.invert_yaxis()
def traces(data, tt=None, xx=None, color='k', sf=0.15, verbose=False, shade=False):
"""Plot large seismic dataset in real time using pyqtgraph
"""
# Input check
data, tt, xx, ts = wiggle_input_check(data, tt, xx, sf, verbose)
Ntr = data.shape[1]
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pg.setConfigOptions(antialias=True) # Enable antialiasing
p = pg.plot()
for ntr in range(Ntr):
trace = data[:, ntr]
offset = xx[ntr]
if shade:
# Insert zeros
trace_zi, tt_zi = insert_zeros(trace, tt)
# Seperate top and bottom line
trace_top = np.array(
[i + offset if i >= 0 else None for i in trace_zi],
dtype='float64')
trace_line = np.array(
[offset if i >= 0 else None for i in trace_zi],
dtype='float64')
trace_bot = np.array(
[i + offset if i <= 0 else None for i in trace_zi],
dtype='float64')
# Plot top and bottom
top = p.plot(x=trace_top, y=tt_zi, pen=color)
bot = p.plot(x=trace_line, y=tt_zi, pen=color)
p.plot(x=trace_bot, y=tt_zi, pen=color)
fill = pg.FillBetweenItem(bot, top, brush=color)
p.addItem(fill)
else:
p.plot(x=trace+offset, y=tt, pen=color)
p.showGrid(x=True, y=True, alpha=0.3)
p.invertY(True)
p.setRange(yRange=[np.min(tt), np.max(tt)], padding=0)
return p
def show():
"""Helper function to show pyqtgraph figures
"""
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
data = np.random.randn(1000, 100)
traces(data)
show()
| [
"pyqtgraph.setConfigOption",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.random.randn",
"numpy.std",
"numpy.zeros",
"numpy.split",
"numpy.min",
"numpy.diff",
"numpy.arange",
"numpy.array",
"numpy.signbit",
"matplotlib.pyplot.gca",
"numpy.max",
"pyqtgraph.plot",
"pyqtgraph.setConfi... | [((651, 675), 'numpy.split', 'np.split', (['tt', '(zc_idx + 1)'], {}), '(tt, zc_idx + 1)\n', (659, 675), True, 'import numpy as np\n'), ((694, 721), 'numpy.split', 'np.split', (['trace', '(zc_idx + 1)'], {}), '(trace, zc_idx + 1)\n', (702, 721), True, 'import numpy as np\n'), ((3781, 3790), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3788, 3790), True, 'import matplotlib.pyplot as plt\n'), ((4537, 4574), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (4555, 4574), True, 'import pyqtgraph as pg\n'), ((4579, 4616), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (4597, 4616), True, 'import pyqtgraph as pg\n'), ((4621, 4656), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (4640, 4656), True, 'import pyqtgraph as pg\n'), ((4689, 4698), 'pyqtgraph.plot', 'pg.plot', ([], {}), '()\n', (4696, 4698), True, 'import pyqtgraph as pg\n'), ((5974, 6000), 'numpy.random.randn', 'np.random.randn', (['(1000)', '(100)'], {}), '(1000, 100)\n', (5989, 6000), True, 'import numpy as np\n'), ((1563, 1587), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (1572, 1587), True, 'import numpy as np\n'), ((2044, 2068), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (2053, 2068), True, 'import numpy as np\n'), ((2710, 2721), 'numpy.diff', 'np.diff', (['xx'], {}), '(xx)\n', (2717, 2721), True, 'import numpy as np\n'), ((2804, 2824), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2810, 2824), True, 'import numpy as np\n'), ((4948, 5027), 'numpy.array', 'np.array', (['[(i + offset if i >= 0 else None) for i in trace_zi]'], {'dtype': '"""float64"""'}), "([(i + offset if i >= 0 else None) for i in trace_zi], dtype='float64')\n", (4956, 5027), True, 'import numpy as np\n'), ((5084, 5159), 'numpy.array', 'np.array', (['[(offset if i >= 0 else None) for i in trace_zi]'], {'dtype': '"""float64"""'}), "([(offset if i >= 0 else None) for i in trace_zi], dtype='float64')\n", (5092, 5159), True, 'import numpy as np\n'), ((5215, 5294), 'numpy.array', 'np.array', (['[(i + offset if i <= 0 else None) for i in trace_zi]'], {'dtype': '"""float64"""'}), "([(i + offset if i <= 0 else None) for i in trace_zi], dtype='float64')\n", (5223, 5294), True, 'import numpy as np\n'), ((5548, 5589), 'pyqtgraph.FillBetweenItem', 'pg.FillBetweenItem', (['bot', 'top'], {'brush': 'color'}), '(bot, top, brush=color)\n', (5566, 5589), True, 'import pyqtgraph as pg\n'), ((5896, 5925), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (5923, 5925), False, 'from pyqtgraph.Qt import QtGui\n'), ((437, 454), 'numpy.signbit', 'np.signbit', (['trace'], {}), '(trace)\n', (447, 454), True, 'import numpy as np\n'), ((893, 915), 'numpy.array', 'np.array', (['[tt_zero[i]]'], {}), '([tt_zero[i]])\n', (901, 915), True, 'import numpy as np\n'), ((988, 999), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (996, 999), True, 'import numpy as np\n'), ((5770, 5780), 'numpy.min', 'np.min', (['tt'], {}), '(tt)\n', (5776, 5780), True, 'import numpy as np\n'), ((5782, 5792), 'numpy.max', 'np.max', (['tt'], {}), '(tt)\n', (5788, 5792), True, 'import numpy as np\n')] |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
import random
import numpy as np
import psutil
from .lidar_sensor_params import SensorParams
from .utils import pybullet
from .utils.math import batches, rotate_quat
from .utils.pybullet import bullet_client as bc
class Lidar:
def __init__(
self, origin, sensor_params: SensorParams, bullet_client: bc.BulletClient
):
self._origin = origin
self._sensor_params = sensor_params
self._bullet_client = bullet_client
self._n_threads = psutil.cpu_count(logical=False)
# As an optimization we compute a set of "base rays" once and shift translate
# them to follow the user, and then trace for collisions.
self._base_rays = None
self._static_lidar_noise = self._compute_static_lidar_noise()
@property
def origin(self):
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
def _compute_static_lidar_noise(self):
n_rays = int(
(self._sensor_params.end_angle - self._sensor_params.start_angle)
/ self._sensor_params.angle_resolution
)
n_points = n_rays * len(self._sensor_params.laser_angles)
static_lidar_noise = []
for _ in range(n_points):
static_lidar_noise.append(
random.gauss(
self._sensor_params.noise_mu, self._sensor_params.noise_sigma
)
)
return np.array(static_lidar_noise, dtype=np.float)
def compute_point_cloud(self):
rays = self._compute_rays()
point_cloud, hits = self._trace_rays(rays)
# point_cloud = self._apply_noise(point_cloud)
assert (
len(point_cloud) == len(hits) == len(rays) == len(self._static_lidar_noise)
)
return point_cloud, hits, rays
def _compute_rays(self):
if self._base_rays is None:
self._base_rays = []
n_rays = int(
(self._sensor_params.end_angle - self._sensor_params.start_angle)
/ self._sensor_params.angle_resolution
)
yaws = -self._sensor_params.laser_angles
rolls = np.arange(n_rays) * self._sensor_params.angle_resolution
for yaw, roll in itertools.product(yaws, rolls):
rot = pybullet.getQuaternionFromEuler((roll, 0, yaw))
origin = np.array([0, 0, 0])
direction = rotate_quat(
np.asarray(rot, dtype=float),
np.asarray((0, self._sensor_params.max_distance, 0), dtype=float),
)
self._base_rays.append((origin, direction))
rays = [
(origin + self._origin, direction + self._origin)
for origin, direction in self._base_rays
]
return rays
def _trace_rays(self, rays):
results = []
for batched_rays in batches(
rays, int(pybullet.MAX_RAY_INTERSECTION_BATCH_SIZE - 1)
):
origins, directions = zip(*batched_rays)
results.extend(
self._bullet_client.rayTestBatch(origins, directions, self._n_threads)
)
hit_ids, _, _, positions, _ = zip(*results)
positions = list(positions)
hits = []
for i, position in enumerate(positions):
hit = hit_ids[i] != -1
hits.append(hit)
positions[i] = (
np.array(position) if hit else np.array([np.inf, np.inf, np.inf])
)
return positions, hits
def _apply_noise(self, point_cloud):
dynamic_noise = np.random.normal(
self._sensor_params.noise_mu,
self._sensor_params.noise_sigma,
size=len(point_cloud),
)
local_pc = point_cloud - self._origin
noise = self._static_lidar_noise + dynamic_noise
return point_cloud + (
local_pc
/ np.linalg.norm(local_pc, axis=1)[:, np.newaxis]
* noise[:, np.newaxis]
)
| [
"numpy.asarray",
"numpy.array",
"numpy.arange",
"numpy.linalg.norm",
"itertools.product",
"random.gauss",
"psutil.cpu_count"
] | [((1629, 1660), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (1645, 1660), False, 'import psutil\n'), ((2594, 2638), 'numpy.array', 'np.array', (['static_lidar_noise'], {'dtype': 'np.float'}), '(static_lidar_noise, dtype=np.float)\n', (2602, 2638), True, 'import numpy as np\n'), ((3407, 3437), 'itertools.product', 'itertools.product', (['yaws', 'rolls'], {}), '(yaws, rolls)\n', (3424, 3437), False, 'import itertools\n'), ((2451, 2526), 'random.gauss', 'random.gauss', (['self._sensor_params.noise_mu', 'self._sensor_params.noise_sigma'], {}), '(self._sensor_params.noise_mu, self._sensor_params.noise_sigma)\n', (2463, 2526), False, 'import random\n'), ((3321, 3338), 'numpy.arange', 'np.arange', (['n_rays'], {}), '(n_rays)\n', (3330, 3338), True, 'import numpy as np\n'), ((3534, 3553), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3542, 3553), True, 'import numpy as np\n'), ((4592, 4610), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (4600, 4610), True, 'import numpy as np\n'), ((4623, 4657), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (4631, 4657), True, 'import numpy as np\n'), ((3616, 3644), 'numpy.asarray', 'np.asarray', (['rot'], {'dtype': 'float'}), '(rot, dtype=float)\n', (3626, 3644), True, 'import numpy as np\n'), ((3666, 3731), 'numpy.asarray', 'np.asarray', (['(0, self._sensor_params.max_distance, 0)'], {'dtype': 'float'}), '((0, self._sensor_params.max_distance, 0), dtype=float)\n', (3676, 3731), True, 'import numpy as np\n'), ((5089, 5121), 'numpy.linalg.norm', 'np.linalg.norm', (['local_pc'], {'axis': '(1)'}), '(local_pc, axis=1)\n', (5103, 5121), True, 'import numpy as np\n')] |
import array
import random
import numpy as np
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
# パラメータ定義テーブル(Ax仕様)
PARAMETERS = [
{
"name": "x1",
"type": "range",
"bounds": [-10.0, 10.0],
"value_type": "float",
},
{
"name": "x2",
"type": "range",
"bounds": [-10.0, 10.0],
"value_type": "float",
},
]
LOCUS = np.array([128, 64, 32, 16, 8, 4, 2, 1]) # 数値変換テーブル
NLOCUS = len(LOCUS) # 数値変換テーブルのビット数
NPARAM = len(PARAMETERS) # パラメータ数
NBIT = NLOCUS*NPARAM # ビット数
NGEN = 40 # 世代数
NPOP = 300 # 集団の個体数
CXPB = 0.5 # 交叉率
MUTPB = 0.2 # 突然変異率(個体)
INDPB = 0.05 # 突然変異率(ビット)
# Optimizerクラス
class Optimizer():
def __init__(self, cb_step=None, cb_end=None):
self.cb_step = cb_step # ステップ・コールバック
self.cb_end = cb_end # 終了コールバック
# 最小化
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# 遺伝子タイプ
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1) # ビット
# 初期化
# Structure initializers
# 個体
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, NBIT)
# 集団
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# 評価関数
toolbox.register("evaluate", self.evaluate)
# 交叉
toolbox.register("mate", tools.cxTwoPoint) # 2点交叉
# 突然変異
toolbox.register("mutate", tools.mutFlipBit, indpb=INDPB) # フリップビット
# 個体選択
toolbox.register("select", tools.selTournament, tournsize=3) # トーナメント
# toolbox.register("select", tools.selRoulette) # ルーレット
# toolbox.register("select", tools.selBest) # ランキング
self.toolbox = toolbox
# 数値変換
def b2n(self, l):
# l: 1パラメータ分のビット列
return sum(l*LOCUS)
# レンジ変換
def scale(self, a, fromBound, toBound, t):
# a: 変換元数値
# fromBound, toBount: 変換前後のレンジ
# t: キャスト型
(min1, max1) = fromBound
(min2, max2) = toBound
ret = a/(max1-min1)*(max2-min2)+min2
ret = t(ret)
ret = max(min2, ret)
ret = min(max2, ret)
return ret
# 遺伝子to実数リスト変換
def getX(self, individual):
# individual: 1個体分のビット列
# 最適化対象パラメータの値を配列にセットする
ls = np.array(individual).reshape([NPARAM, NLOCUS])
ret = []
for i, l in enumerate(ls):
# Ax仕様のパラメータ定義テーブル
p = PARAMETERS[i]
type = p['type']
if type == 'range':
# タイプ=レンジの場合
bounds = p['bounds'] # レンジ
value_type = p['value_type'] # 型
# 型変換
t = eval(value_type)
xmin = t(bounds[0])
xmax = t(bounds[1])
# 実数・レンジ変換
x = self.scale(self.b2n(l), (0, sum(LOCUS)), (xmin, xmax), t)
elif type == 'choice':
# タイプ=択一の場合
values = p['values'] # 選択肢
bounds = [values[0], values[-1]] # レンジ
value_type = p['value_type'] # 型
# 型変換
t = eval(value_type)
xmin = t(bounds[0])
xmax = t(bounds[1])
# 実数・レンジ変換
x = self.scale(self.b2n(l), (0, sum(LOCUS)), (xmin, xmax), t)
# 選択肢に振り分け
n = len(values)
for j in range(n):
a = xmin + (xmax - xmin)/n * j
b = xmin + (xmax - xmin)/n * (j+1)
if x >= a and x < b:
x = values[j]
break
elif type == 'fixed':
# タイプ=固定の場合
value = p['value'] # 固定値
x = value
else:
raise ValueError("unknown parameter type", type)
ret.append(x)
return np.array(ret)
# count = 0
# 評価関数
def evaluate(self, individual):
# self.count += 1
# print(self.count)
# 最適化対象パラメータの値を配列にセットする
x = self.getX(individual)
score = 0
if self.cb_step:
# パラメータ通知コールバック
# クライアントへ今回のパラメータを通知し、評価値の受信を待ち受ける
score = self.cb_step(x)
else:
# スクリプト単体で動作させる場合
x1 = x[0]
x2 = x[1]
# Booth Function
score = (x1 + 2*x2 - 7)**2 + (2*x1 + x2 - 5)**2
return score,
# 最適化関数
def optimize(self, ngen=NGEN):
# random.seed(64)
pop = self.toolbox.population(n=NPOP)
# エリート保存個体
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log = algorithms.eaSimple(pop, self.toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=ngen,
stats=stats, halloffame=hof, verbose=True)
# ベストパラメータ=最適解に最も近づいた値
best_ind = hof.items[0]
print(best_ind)
print(best_ind.fitness.values)
best_parameters = self.getX(best_ind)
print(best_parameters)
if self.cb_end:
# 終了コールバック:Unityアプリへベストパラメータを通知する
self.cb_end(best_parameters)
# return pop, log, hof
if __name__ == "__main__":
obj = Optimizer()
obj.optimize()
| [
"deap.base.Toolbox",
"deap.tools.Statistics",
"deap.creator.create",
"numpy.array",
"deap.algorithms.eaSimple",
"deap.tools.HallOfFame"
] | [((441, 480), 'numpy.array', 'np.array', (['[128, 64, 32, 16, 8, 4, 2, 1]'], {}), '([128, 64, 32, 16, 8, 4, 2, 1])\n', (449, 480), True, 'import numpy as np\n'), ((900, 959), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'base.Fitness'], {'weights': '(-1.0,)'}), "('FitnessMin', base.Fitness, weights=(-1.0,))\n", (914, 959), False, 'from deap import creator\n'), ((968, 1056), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'array.array'], {'typecode': '"""b"""', 'fitness': 'creator.FitnessMin'}), "('Individual', array.array, typecode='b', fitness=creator.\n FitnessMin)\n", (982, 1056), False, 'from deap import creator\n'), ((1071, 1085), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (1083, 1085), False, 'from deap import base\n'), ((4108, 4121), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4116, 4121), True, 'import numpy as np\n'), ((4824, 4843), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {}), '(1)\n', (4840, 4843), False, 'from deap import tools\n'), ((4860, 4908), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (4876, 4908), False, 'from deap import tools\n'), ((5082, 5202), 'deap.algorithms.eaSimple', 'algorithms.eaSimple', (['pop', 'self.toolbox'], {'cxpb': 'CXPB', 'mutpb': 'MUTPB', 'ngen': 'ngen', 'stats': 'stats', 'halloffame': 'hof', 'verbose': '(True)'}), '(pop, self.toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=ngen,\n stats=stats, halloffame=hof, verbose=True)\n', (5101, 5202), False, 'from deap import algorithms\n'), ((2506, 2526), 'numpy.array', 'np.array', (['individual'], {}), '(individual)\n', (2514, 2526), True, 'import numpy as np\n')] |
"""
Module to process and analyse rheology data
containing stress ramps
Created: March 24th, 2020
Author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Rstressramp():
"""
Class with the functions relevant to stress ramps
Main focus on extracting data from .csv files,
computing K' and data visualization
"""
def readcsv_full2ramp(filename,
export = True,
file_export = None,
action_name = 'viscometry ramp',
variables = ['sample des', 'stress', 'strain (sample)'],
sep = ',',
dec = '.'):
"""
Function to select the desired data from raw .csv files.
TO DO: combine this and Rtimedep.readcsv_full2time to a general function
INPUT
filename : string, file to read
export : if True, the selected data is exported to a .csv file
file_export : string, name of the file where the data will be exported.
if None, then attaches the suffix '_clean_stress_ramp'
to the file name.
action_name : string, name of the dataset where the ramp data is
variables : list of strings, desired variables to be extracted. The
name can be a partial match of the column name, and is
case insensitive. If more than one column matches a given
variable name, all the corresponding columns are included.
sep : string, character used as a delimiter in the .csv file
dec : string, character used as a decimal separator in the .csv file
OUTPUT
select_data : data frame with the selected data. Only returns the value
if export = False. When export = True, the function only
exports the data without returning any values.
"""
# Import the file as a data frame
data_input = pd.read_csv(filename,
sep = sep, decimal = dec)
print('\n Successfully imported the file: ')
print(filename)
# Because there is more than one action in the file,
# select only the data for the stress ramp
# TO DO: make this selection optional for the user
data_frame = Rstressramp.splitaction_ramp(data_input,
action_name = action_name)
# Find the columns that match the desired variable names
# and select the data within.
columns = []
for ivar in variables:
print('\n Variable to search:', ivar)
column_names = [x for x in data_frame.columns if ivar in x.lower()]
print('Variables found:', column_names)
columns.extend(column_names)
select_data = data_frame[columns]
# Export the data to the file specified in file_export or
# return the data frame if export == False.
if export == True:
if file_export == None:
file_export = filename.replace('.csv','_clean_stress_ramp.csv')
select_data.to_csv(file_export,
index=False,
sep = sep, decimal = dec)
print('\n Selected data exported to:', file_export)
else:
return select_data
def splitaction_ramp(data_frame,
action_header = 'Action Name',
action_name = 'viscometry ramp'):
"""
Function to extract the stress ramp data from
a file with multiple types of measurement
INPUT
data_frame : pandas data frame with the full data
action_header : string with the name of the column
containing the type of measurement, or action,
action_name : string with the name of the measurement,
or action. It accepts a partial match,
and is case insensitive.
OUTPUT
select_data : pandas data frame containing only the
stress ram[] data.
"""
print('\n Splitting data by action name: ', action_name)
# Gets all the actions within the data frame
iaction = [x for x in data_frame[action_header].unique() if action_name in x.lower()]
print(iaction)
data_frame.set_index(action_header, inplace = True)
# Find the location of the desired action, and save to a data frame
# If the action name is not found, it prints an error message
try:
select_data = data_frame.loc[iaction]
select_data.reset_index(inplace = True)
except IndexError:
print('ERROR: Action name not found')
select_data = None
return select_data
def compute_k(stress, strain, show = None,
remove_neg = True):
"""
Function to compute the differential storage modulus
from the slope of the stress vs strain curve.
INPUT
stress : numpy array or list, Shear Stress (in Pa) data
strain : numpy array or list, Shear Strain (in %) data
show : 'stress', 'strain', 'both', or None. Plots the result
remove_neg : if True, removes data where strain is negative
OUTPUT
stress : numpy array, mean value of the stress (in Pa) where k is computed
strain : numpy array, mean value of strain (in %) where k is computed
k : numpy array, differential storage modulus, (in Pa)
"""
# Work with numpy arrays
stress = np.array(stress)
strain = np.array(strain)
# Start by cleaning the data from any NaN value
ind_nan = np.isnan(strain) | np.isnan(stress)
stress = stress[~ind_nan]
strain = strain[~ind_nan]
# Clean the data from values after rupture, strain must be
# less than 5000%
ind_nonrupture = np.where(strain < 5e3)[0]
stress = stress[ind_nonrupture]
strain = strain[ind_nonrupture]
# Remove data where strain is negative. Note that if recording
# the absolute strain of the sample, strain can be negative
# in the initial interval. This data is tipically not useful
# and therefore not desired.
if remove_neg == True:
ind_positive = np.where(strain >= 0)
stress = stress[ind_positive]
strain = strain[ind_positive]
# Compute the differential values of strain and stress
diff_stress = stress[1:] - stress[:-1]
diff_strain = strain[1:] - strain[:-1]
# Compute k' and the mean values of stress and strain
k = diff_stress / diff_strain * 100 # multiplied by 100, because strain is in %
stress = (stress[1:] + stress[:-1])/2
strain = (strain[1:] + strain[:-1])/2
# Show the results if desired
if show == 'stress': Rstressramp.plot_k([stress], k)
elif show == 'strain': Rstressramp.plot_k([strain], k)
elif show == 'both': Rstressramp.plot_k([stress, strain], k)
elif show is not None: print('Error: cannot plot: ', show)
return [stress, strain, k]
def plot_k(x, k, linewidth = 1.5,
marker = 'o', color = 'k', marker_facecolor = 'k'):
"""
Function to plot, in log scale, the differential storage modulus, k
as a function of stress, strain, or both.
INPUT
x : list of numpy arrays of dependent variables
k : numpy array, differential storage modulus
linewidth : float, width of the line to plot
marker : string, marker of the lineplot, needs to be compatible with
matplotlib.pyplot
color : color for the lineplot, and marker border, needs to
be compatible with matplotlib.pyplot
marker_facecolor : color of the marker, compatible with
matplotlib.pyplot
"""
# Plot the first variable
x1 = x[0]
plt.figure(figsize = (9,5))
plt.plot(x1, k, c = color, lw = linewidth, marker = marker,
mec = color, mfc = marker_facecolor)
plt.loglog()
plt.ylabel('$K\'$ (Pa)')
# If there is more than one dependent variable,
# Plot also the second variable in a different figure
try:
x2 = x[1]
plt.xlabel('$\sigma$ (Pa)')
plt.pause(0.1)
plt.figure(figsize =(9, 5))
plt.plot(x2, k, c = color, lw = linewidth, marker = marker,
mec = color, mfc = marker_facecolor)
plt.loglog()
plt.ylabel('$K\'$ (Pa)')
plt.xlabel('$\gamma$ (%)')
except IndexError: pass
def export_kall(data_frame, file_export = None,
remove_neg = True,
group_header = 'Sample Description',
subgroup_header = None,
stress_header = 'Shear stress(Pa)',
strain_header = 'Shear strain (sample)(%)'):
"""
Function to compute the differential storage modulus
for all the data groups (e.g. samples, interals, experiments)
within a data_frame
INPUT
data_frame : pandas data frame with the full data
file_export : string, name of the file where data will be exported
if None, it saves to 'All_k_curves.csv'
remove_neg : if True, removes data where strain is negative
group_header : string, name of the column where the data group label are
subgroup_header : string, name of the column where the sub dataset label are
stress_header : string, name of the column where the stress data is
strain_header : string, name of the column where the strain data is
OUTPUT
all_data : data frame with the computed stress, strain, k'
It also saves the data_rame to file_export.
"""
groups_all = []
subgroups_all = []
s_all = []
y_all = []
k_all = []
for igroup in data_frame[group_header].unique():
data_group = data_frame.loc[data_frame[group_header] == igroup]
try:
list_subgroups = data_group[subgroup_header].unique()
subset_header = subgroup_header
except KeyError:
list_subgroups = [igroup]
subset_header = group_header
for isubset in list_subgroups:
data_subgroup = data_group.loc[data_group[subset_header] == isubset]
stress = np.array(data_group[stress_header])
strain = np.array(data_group[strain_header])
[s, y, k] = Rstressramp.compute_k(stress, strain, remove_neg = remove_neg)
groups_all.extend([igroup]*len(s))
subgroups_all.extend([isubset]*len(s))
s_all.extend(s)
y_all.extend(y)
k_all.extend(k)
all_data = pd.DataFrame()
all_data[group_header] = groups_all
try: subgroup_header[0]; all_data[subgroup_header] = subgroups_all
except TypeError: pass
all_data['Stress (Pa)'] = s_all
all_data['Strain (%)'] = y_all
all_data['K prime (Pa)'] = k_all
if file_export is None: file_export = 'All_k_curves.csv'
all_data.to_csv(file_export, index = False)
return all_data
def mean_kall_interp(filename, xvariable,num_interp = 100, show_plot = True,
sample_header = 'Sample Description',
stress_header = 'Stress (Pa)',
strain_header = 'Strain (%)',
k_header = 'K prime (Pa)',
sep = ',', dec = '.'):
"""
Function to compute the mean curve for the
differential elastic modulus for all the data within a file
Note that it is based on interpolation!
INPUT
filename : string, name of the file with the whole data
xvariable : string, can be 'stress' or 'strain', indicating
over which variable to compute the mean.
show_plot : if True, shows the results in a plot
sample_header : string, name of the column with the sample label is
stress_header : string, name of the column with the shear data
strain_header : string, name of the column with the strain data
sep : string, character used as delimiter in csv file
dec : string, character used as decimal separator in csv file
OUTPUT
xinterp : numpy array, vector used for interpolation
kmean : numpy array, mean curve of k
kstd : numpy array, standard deviation curve of k
"""
# Read data and get all the samples within the data frame
data = pd.read_csv(filename, sep = sep, decimal = dec)
all_samples = data[sample_header].unique()
# Define which dependent variable to extract
if 'stress' in xvariable: xvar = stress_header
elif 'strain' in xvariable: xvar = strain_header
# Loop to get mean values of minimum and maximum xdata for the samples
xmin = []; xmax = []
for isample in all_samples:
data_sample = data.loc[data[sample_header] == isample]
xsample = np.array(data_sample[xvar])
xmin.append(np.min(xsample))
xmax.append(np.max(xsample))
xmin_avg = np.mean(np.array(xmin))
xmax_avg = np.mean(np.array(xmax))
xmax_std = np.std(np.array(xmax))
print('Rupture: ', xmax_avg, '+/-', xmax_std)
# Build interpolation vector
xmin_log = np.log10(xmin_avg)
xmax_log = np.log10(xmax_avg)
xinterp = np.logspace(xmin_log, xmax_log, num = num_interp)
#Loop to get the interpolated curves for each sample within the file
k_all = []
for isample in all_samples:
data_sample = data.loc[data[sample_header] == isample]
xsample = data_sample[xvar]
ksample = data_sample[k_header]
k_interp = np.interp(xinterp, xsample, ksample)
k_all.append(k_interp)
k_all = np.array(k_all)
kmean = np.mean(k_all, axis = 0)
kstd = np.std(k_all, axis = 0)
# Plot the average curve and standard deviation, if desired
if show_plot == True:
plt.fill_between(xinterp, kmean - kstd, kmean + kstd, color = 'lightgray',
alpha = 0.8)
plt.plot(xinterp, kmean, c = 'darkgray', marker = 'o', mfc = 'w')
plt.ylabel('$K\'$ (Pa)')
plt.xlabel(xvar)
plt.loglog()
return [xinterp, kmean, kstd]
def mean_kall_window(filename, xvariable, xmin_log = -1, xmax_log = 5, winavg_number = 50,
show_plot = True,
sample_header = 'Sample Description',
stress_header = 'Stress (Pa)',
strain_header = 'Strain (%)',
k_header = 'K prime (Pa)',
sep = ',', dec = '.'):
"""
Function to compute the mean curve for the
differential elastic modulus for all the data within a file
Note that it is based on window averaging, and not interpolation!
INPUT
filename : string, name of the file with the whole data
xvariable : string, can be 'stress' or 'strain', indicating
over which variable to compute the mean.
xmin_log : float, minimum value for average -> 10**xmin
xmax_log : float, minimum value for average -> 10**xmax
winavg_number : number of windows used to average, in logspace
show_plot : if True, shows the results in a plot
sample_header : string, name of the column with the sample label is
stress_header : string, name of the column with the shear data
strain_header : string, name of the column with the strain data
sep : string, character used as delimiter in csv file
dec : string, character used as decimal separator in csv file
OUTPUT
xmean : numpy array, mean value of the xvariable
kmean : numpy array, mean curve of k
kstd : numpy array, standard deviation curve of k
"""
# Read data and get all the samples within the data frame
data = pd.read_csv(filename, sep = sep, decimal = dec)
all_samples = data[sample_header].unique()
# Define which dependent variable to extract
if 'stress' in xvariable: xvar = stress_header
elif 'strain' in xvariable: xvar = strain_header
xmean = []
kmean = []
kstd = []
# Loop to average all the curves within the window
avg_windows = np.logspace(xmin_log, xmax_log, num = winavg_number)
avg_windows = [round(x, 3) for x in avg_windows]
for dw in range(len(avg_windows)-1):
x_all = []
k_all = []
for isample in all_samples:
# It extracts the xvariable and the k data from the data
# frame for a given sample
data_sample = data.loc[data[sample_header]==isample]
xdata = data_sample[xvar]
kdata = data_sample[k_header]
#Selects the data within the avg window and stores it
ind_selec = (xdata > avg_windows[dw]) & (xdata <= avg_windows[dw+1])
x_all.extend(xdata[ind_selec])
k_all.extend(kdata[ind_selec])
# Convert list to numpy array for mean and isnan to work properly
x_all = np.array(x_all)
k_all = np.array(k_all)
try:
# Get the mean curve, only for non values
xmean.append(np.mean(x_all[~np.isnan(x_all)]))
kmean.append(np.mean(k_all[~np.isnan(k_all)]))
kstd.append(np.std(k_all[~np.isnan(k_all)]))
except TypeError: print('Error in mean calculation')
# Convert from list to numpy array
xmean = np.array(xmean)
kmean = np.array(kmean)
kstd = np.array(kstd)
# Plot the average curve and standard deviation, if desired
if show_plot == True:
plt.fill_between(xmean, kmean - kstd, kmean + kstd, color = 'lightgray',
alpha = 0.8)
plt.plot(xmean, kmean, c = 'darkgray', marker = 'o', mfc = 'w')
plt.ylabel('$K\'$ (Pa)')
plt.xlabel(xvar)
plt.loglog()
return [xmean, kmean, kstd]
| [
"matplotlib.pyplot.loglog",
"pandas.read_csv",
"numpy.logspace",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.interp",
"matplotlib.pyplot.fill_between",
"pandas.DataFrame",
"numpy.std",
"numpy.max",
"numpy.log10",
"matplotlib.pyplot.pause",
"numpy.min",
"matplotlib.pyp... | [((2099, 2142), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (2110, 2142), True, 'import pandas as pd\n'), ((5868, 5884), 'numpy.array', 'np.array', (['stress'], {}), '(stress)\n', (5876, 5884), True, 'import numpy as np\n'), ((5902, 5918), 'numpy.array', 'np.array', (['strain'], {}), '(strain)\n', (5910, 5918), True, 'import numpy as np\n'), ((8340, 8366), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (8350, 8366), True, 'import matplotlib.pyplot as plt\n'), ((8376, 8467), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'k'], {'c': 'color', 'lw': 'linewidth', 'marker': 'marker', 'mec': 'color', 'mfc': 'marker_facecolor'}), '(x1, k, c=color, lw=linewidth, marker=marker, mec=color, mfc=\n marker_facecolor)\n', (8384, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8498, 8510), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (8508, 8510), True, 'import matplotlib.pyplot as plt\n'), ((8519, 8542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (8529, 8542), True, 'import matplotlib.pyplot as plt\n'), ((11421, 11435), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11433, 11435), True, 'import pandas as pd\n'), ((13343, 13386), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (13354, 13386), True, 'import pandas as pd\n'), ((14192, 14210), 'numpy.log10', 'np.log10', (['xmin_avg'], {}), '(xmin_avg)\n', (14200, 14210), True, 'import numpy as np\n'), ((14230, 14248), 'numpy.log10', 'np.log10', (['xmax_avg'], {}), '(xmax_avg)\n', (14238, 14248), True, 'import numpy as np\n'), ((14267, 14314), 'numpy.logspace', 'np.logspace', (['xmin_log', 'xmax_log'], {'num': 'num_interp'}), '(xmin_log, xmax_log, num=num_interp)\n', (14278, 14314), True, 'import numpy as np\n'), ((14725, 14740), 'numpy.array', 'np.array', (['k_all'], {}), '(k_all)\n', (14733, 14740), True, 'import numpy as np\n'), ((14757, 14779), 'numpy.mean', 'np.mean', (['k_all'], {'axis': '(0)'}), '(k_all, axis=0)\n', (14764, 14779), True, 'import numpy as np\n'), ((14797, 14818), 'numpy.std', 'np.std', (['k_all'], {'axis': '(0)'}), '(k_all, axis=0)\n', (14803, 14818), True, 'import numpy as np\n'), ((16990, 17033), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (17001, 17033), True, 'import pandas as pd\n'), ((17395, 17445), 'numpy.logspace', 'np.logspace', (['xmin_log', 'xmax_log'], {'num': 'winavg_number'}), '(xmin_log, xmax_log, num=winavg_number)\n', (17406, 17445), True, 'import numpy as np\n'), ((18697, 18712), 'numpy.array', 'np.array', (['xmean'], {}), '(xmean)\n', (18705, 18712), True, 'import numpy as np\n'), ((18729, 18744), 'numpy.array', 'np.array', (['kmean'], {}), '(kmean)\n', (18737, 18744), True, 'import numpy as np\n'), ((18760, 18774), 'numpy.array', 'np.array', (['kstd'], {}), '(kstd)\n', (18768, 18774), True, 'import numpy as np\n'), ((5994, 6010), 'numpy.isnan', 'np.isnan', (['strain'], {}), '(strain)\n', (6002, 6010), True, 'import numpy as np\n'), ((6013, 6029), 'numpy.isnan', 'np.isnan', (['stress'], {}), '(stress)\n', (6021, 6029), True, 'import numpy as np\n'), ((6217, 6242), 'numpy.where', 'np.where', (['(strain < 5000.0)'], {}), '(strain < 5000.0)\n', (6225, 6242), True, 'import numpy as np\n'), ((6631, 6652), 'numpy.where', 'np.where', (['(strain >= 0)'], {}), '(strain >= 0)\n', (6639, 6652), True, 'import numpy as np\n'), ((8712, 8740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sigma$ (Pa)"""'], {}), "('$\\\\sigma$ (Pa)')\n", (8722, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8752, 8766), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (8761, 8766), True, 'import matplotlib.pyplot as plt\n'), ((8779, 8805), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (8789, 8805), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8910), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'k'], {'c': 'color', 'lw': 'linewidth', 'marker': 'marker', 'mec': 'color', 'mfc': 'marker_facecolor'}), '(x2, k, c=color, lw=linewidth, marker=marker, mec=color, mfc=\n marker_facecolor)\n', (8827, 8910), True, 'import matplotlib.pyplot as plt\n'), ((8945, 8957), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (8955, 8957), True, 'import matplotlib.pyplot as plt\n'), ((8970, 8993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (8980, 8993), True, 'import matplotlib.pyplot as plt\n'), ((9007, 9034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma$ (%)"""'], {}), "('$\\\\gamma$ (%)')\n", (9017, 9034), True, 'import matplotlib.pyplot as plt\n'), ((13842, 13869), 'numpy.array', 'np.array', (['data_sample[xvar]'], {}), '(data_sample[xvar])\n', (13850, 13869), True, 'import numpy as np\n'), ((13980, 13994), 'numpy.array', 'np.array', (['xmin'], {}), '(xmin)\n', (13988, 13994), True, 'import numpy as np\n'), ((14023, 14037), 'numpy.array', 'np.array', (['xmax'], {}), '(xmax)\n', (14031, 14037), True, 'import numpy as np\n'), ((14065, 14079), 'numpy.array', 'np.array', (['xmax'], {}), '(xmax)\n', (14073, 14079), True, 'import numpy as np\n'), ((14624, 14660), 'numpy.interp', 'np.interp', (['xinterp', 'xsample', 'ksample'], {}), '(xinterp, xsample, ksample)\n', (14633, 14660), True, 'import numpy as np\n'), ((14932, 15019), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xinterp', '(kmean - kstd)', '(kmean + kstd)'], {'color': '"""lightgray"""', 'alpha': '(0.8)'}), "(xinterp, kmean - kstd, kmean + kstd, color='lightgray',\n alpha=0.8)\n", (14948, 15019), True, 'import matplotlib.pyplot as plt\n'), ((15056, 15115), 'matplotlib.pyplot.plot', 'plt.plot', (['xinterp', 'kmean'], {'c': '"""darkgray"""', 'marker': '"""o"""', 'mfc': '"""w"""'}), "(xinterp, kmean, c='darkgray', marker='o', mfc='w')\n", (15064, 15115), True, 'import matplotlib.pyplot as plt\n'), ((15134, 15157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (15144, 15157), True, 'import matplotlib.pyplot as plt\n'), ((15171, 15187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xvar'], {}), '(xvar)\n', (15181, 15187), True, 'import matplotlib.pyplot as plt\n'), ((15200, 15212), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (15210, 15212), True, 'import matplotlib.pyplot as plt\n'), ((18258, 18273), 'numpy.array', 'np.array', (['x_all'], {}), '(x_all)\n', (18266, 18273), True, 'import numpy as np\n'), ((18294, 18309), 'numpy.array', 'np.array', (['k_all'], {}), '(k_all)\n', (18302, 18309), True, 'import numpy as np\n'), ((18886, 18971), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xmean', '(kmean - kstd)', '(kmean + kstd)'], {'color': '"""lightgray"""', 'alpha': '(0.8)'}), "(xmean, kmean - kstd, kmean + kstd, color='lightgray',\n alpha=0.8)\n", (18902, 18971), True, 'import matplotlib.pyplot as plt\n'), ((19008, 19065), 'matplotlib.pyplot.plot', 'plt.plot', (['xmean', 'kmean'], {'c': '"""darkgray"""', 'marker': '"""o"""', 'mfc': '"""w"""'}), "(xmean, kmean, c='darkgray', marker='o', mfc='w')\n", (19016, 19065), True, 'import matplotlib.pyplot as plt\n'), ((19084, 19107), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (19094, 19107), True, 'import matplotlib.pyplot as plt\n'), ((19121, 19137), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xvar'], {}), '(xvar)\n', (19131, 19137), True, 'import matplotlib.pyplot as plt\n'), ((19150, 19162), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (19160, 19162), True, 'import matplotlib.pyplot as plt\n'), ((10997, 11032), 'numpy.array', 'np.array', (['data_group[stress_header]'], {}), '(data_group[stress_header])\n', (11005, 11032), True, 'import numpy as np\n'), ((11058, 11093), 'numpy.array', 'np.array', (['data_group[strain_header]'], {}), '(data_group[strain_header])\n', (11066, 11093), True, 'import numpy as np\n'), ((13894, 13909), 'numpy.min', 'np.min', (['xsample'], {}), '(xsample)\n', (13900, 13909), True, 'import numpy as np\n'), ((13935, 13950), 'numpy.max', 'np.max', (['xsample'], {}), '(xsample)\n', (13941, 13950), True, 'import numpy as np\n'), ((18429, 18444), 'numpy.isnan', 'np.isnan', (['x_all'], {}), '(x_all)\n', (18437, 18444), True, 'import numpy as np\n'), ((18492, 18507), 'numpy.isnan', 'np.isnan', (['k_all'], {}), '(k_all)\n', (18500, 18507), True, 'import numpy as np\n'), ((18553, 18568), 'numpy.isnan', 'np.isnan', (['k_all'], {}), '(k_all)\n', (18561, 18568), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from numpy.linalg import inv
from scipy.linalg import schur, sqrtm
import numpy as np
def invSqrt(a,b,c):
eps = 1e-12
mask = (b != 0)
r1 = mask * (c - a) / (2. * b + eps)
t1 = np.sign(r1) / (np.abs(r1) + np.sqrt(1. + r1*r1));
r = 1.0 / np.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / np.sqrt( r*r*a - 2*r*t*b + t*t*c)
z = 1. / np.sqrt( t*t*a + 2*r*t*b + r*r*c)
d = np.sqrt( x * z)
x = x / d
z = z / d
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c
def Ell2LAF(ell):
A23 = np.zeros((2,3))
A23[0,2] = ell[0]
A23[1,2] = ell[1]
a = ell[2]
b = ell[3]
c = ell[4]
sc = np.sqrt(np.sqrt(a*c - b*b))
ia,ib,ic = invSqrt(a,b,c)
A = np.array([[ia, ib], [ib, ic]]) / sc
sc = np.sqrt(A[0,0] * A[1,1] - A[1,0] * A[0,1])
A23[0:2,0:2] = rectifyAffineTransformationUpIsUp(A / sc) * sc
return A23
def rectifyAffineTransformationUpIsUp(A):
det = np.sqrt(np.abs(A[0,0]*A[1,1] - A[1,0]*A[0,1] + 1e-10))
b2a2 = np.sqrt(A[0,1] * A[0,1] + A[0,0] * A[0,0])
A_new = np.zeros((2,2))
A_new[0,0] = b2a2 / det
A_new[0,1] = 0
A_new[1,0] = (A[1,1]*A[0,1]+A[1,0]*A[0,0])/(b2a2*det)
A_new[1,1] = det / b2a2
return A_new
def ells2LAFs(ells):
LAFs = np.zeros((len(ells), 2,3))
for i in range(len(ells)):
LAFs[i,:,:] = Ell2LAF(ells[i,:])
return LAFs
def LAF2pts(LAF, n_pts = 50):
a = np.linspace(0, 2*np.pi, n_pts);
x = [0]
x.extend(list(np.sin(a)))
x = np.array(x).reshape(1,-1)
y = [0]
y.extend(list(np.cos(a)))
y = np.array(y).reshape(1,-1)
HLAF = np.concatenate([LAF, np.array([0,0,1]).reshape(1,3)])
H_pts =np.concatenate([x,y,np.ones(x.shape)])
H_pts_out = np.transpose(np.matmul(HLAF, H_pts))
H_pts_out[:,0] = H_pts_out[:,0] / H_pts_out[:, 2]
H_pts_out[:,1] = H_pts_out[:,1] / H_pts_out[:, 2]
return H_pts_out[:,0:2]
def convertLAFs_to_A23format(LAFs):
sh = LAFs.shape
if (len(sh) == 3) and (sh[1] == 2) and (sh[2] == 3): # n x 2 x 3 classical [A, (x;y)] matrix
work_LAFs = deepcopy(LAFs)
elif (len(sh) == 2) and (sh[1] == 7): #flat format, x y scale a11 a12 a21 a22
work_LAFs = np.zeros((sh[0], 2,3))
work_LAFs[:,0,2] = LAFs[:,0]
work_LAFs[:,1,2] = LAFs[:,1]
work_LAFs[:,0,0] = LAFs[:,2] * LAFs[:,3]
work_LAFs[:,0,1] = LAFs[:,2] * LAFs[:,4]
work_LAFs[:,1,0] = LAFs[:,2] * LAFs[:,5]
work_LAFs[:,1,1] = LAFs[:,2] * LAFs[:,6]
elif (len(sh) == 2) and (sh[1] == 6): #flat format, x y s*a11 s*a12 s*a21 s*a22
work_LAFs = np.zeros((sh[0], 2,3))
work_LAFs[:,0,2] = LAFs[:,0]
work_LAFs[:,1,2] = LAFs[:,1]
work_LAFs[:,0,0] = LAFs[:,2]
work_LAFs[:,0,1] = LAFs[:,3]
work_LAFs[:,1,0] = LAFs[:,4]
work_LAFs[:,1,1] = LAFs[:,5]
else:
print ('Unknown LAF format')
return None
return work_LAFs
def LAFs2ell(in_LAFs):
LAFs = convertLAFs_to_A23format(in_LAFs)
ellipses = np.zeros((len(LAFs),5))
for i in range(len(LAFs)):
LAF = deepcopy(LAFs[i,:,:])
scale = np.sqrt(LAF[0,0]*LAF[1,1] - LAF[0,1]*LAF[1, 0] + 1e-10)
u, W, v = np.linalg.svd(LAF[0:2,0:2] / scale, full_matrices=True)
W[0] = 1. / (W[0]*W[0]*scale*scale)
W[1] = 1. / (W[1]*W[1]*scale*scale)
A = np.matmul(np.matmul(u, np.diag(W)), u.transpose())
ellipses[i,0] = LAF[0,2]
ellipses[i,1] = LAF[1,2]
ellipses[i,2] = A[0,0]
ellipses[i,3] = A[0,1]
ellipses[i,4] = A[1,1]
return ellipses
def visualize_LAFs(img, LAFs):
work_LAFs = convertLAFs_to_A23format(LAFs)
plt.figure()
plt.imshow(255 - img)
for i in range(len(work_LAFs)):
ell = LAF2pts(work_LAFs[i,:,:])
plt.plot( ell[:,0], ell[:,1], 'r')
plt.show()
return
def readMODS_keypointsFile(fname):
mrSize = 3.0 * np.sqrt(3.0)
features_dict = {}
with open(fname, 'rb') as f:
lines = f.readlines()
det_num = int(lines[0])
current_pos = 1
for det_idx in range(det_num):
dd = lines[current_pos]
dd = dd.strip().split(' ')
det_name = dd[0]
desc_num = int(dd[1])
features_dict[det_name] = {}
current_pos +=1
print (det_name, desc_num)
for desc_idx in range(desc_num):
dd2 = lines[current_pos]
dd2 = dd2.strip().split(' ')
desc_name = dd2[0]
features_num = int(dd2[1])
print (desc_name, features_num)
current_pos+=1
desc_len = int(lines[current_pos])
print (desc_len)
LAFs = np.zeros((features_num, 7))
if desc_len > 0:
descriptors = np.zeros((features_num, desc_len))
else:
descriptors = None
for feat_idx in range(features_num):
current_pos+=1
l = lines[current_pos].strip().split(' ')
LAFs[feat_idx,0:2] = np.array(l[0:2])
LAFs[feat_idx,2] = mrSize * np.array(float(l[2]))
LAFs[feat_idx,3:] = np.array(l[3:3+4])
if desc_len > 0:
descriptors[feat_idx,:] = np.array(l[8:])
features_dict[det_name][desc_name] = (LAFs, descriptors)
current_pos+=1
return features_dict
def readMODS_ExtractFeaturesFile(fname):
mrSize = 3.0 * np.sqrt(3.0)
features_dict = {}
with open(fname, 'rb') as f:
lines = f.readlines()
det_num = int(lines[0])
current_pos = 1
for det_idx in range(det_num):
dd = lines[current_pos]
dd = dd.strip().split(' ')
det_name = dd[0]
desc_num = int(dd[1])
features_dict[det_name] = {}
current_pos +=1
print (det_name, desc_num)
for desc_idx in range(desc_num):
dd2 = lines[current_pos]
dd2 = dd2.strip().split(' ')
desc_name = dd2[0]
features_num = int(dd2[1])
print (desc_name, features_num)
current_pos+=1
desc_len = int(lines[current_pos])
print (desc_len)
LAFs = np.zeros((features_num, 7))
if desc_len > 0:
descriptors = np.zeros((features_num, desc_len))
else:
descriptors = None
for feat_idx in range(features_num):
current_pos+=1
l = lines[current_pos].strip().split(' ')
LAFs[feat_idx,0:2] = np.array(l[14:16])
LAFs[feat_idx,2] = mrSize * np.array(float(l[23]))
LAFs[feat_idx,3:] = np.array(l[16:20])
if desc_len > 0:
descriptors[feat_idx,:] = np.array(l[26:])
features_dict[det_name][desc_name] = (LAFs, descriptors)
current_pos+=1
return features_dict
| [
"copy.deepcopy",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.array",
"numpy.sin",
"numpy.linspace",
"numpy.sign",
"numpy.matmul",
"numpy.cos",
"numpy.dia... | [((550, 564), 'numpy.sqrt', 'np.sqrt', (['(x * z)'], {}), '(x * z)\n', (557, 564), True, 'import numpy as np\n'), ((748, 764), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (756, 764), True, 'import numpy as np\n'), ((974, 1020), 'numpy.sqrt', 'np.sqrt', (['(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1])'], {}), '(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1])\n', (981, 1020), True, 'import numpy as np\n'), ((1217, 1263), 'numpy.sqrt', 'np.sqrt', (['(A[0, 1] * A[0, 1] + A[0, 0] * A[0, 0])'], {}), '(A[0, 1] * A[0, 1] + A[0, 0] * A[0, 0])\n', (1224, 1263), True, 'import numpy as np\n'), ((1272, 1288), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1280, 1288), True, 'import numpy as np\n'), ((1625, 1657), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_pts'], {}), '(0, 2 * np.pi, n_pts)\n', (1636, 1657), True, 'import numpy as np\n'), ((3875, 3887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3885, 3887), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(255 - img)'], {}), '(255 - img)\n', (3902, 3913), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4045, 4047), True, 'import matplotlib.pyplot as plt\n'), ((277, 288), 'numpy.sign', 'np.sign', (['r1'], {}), '(r1)\n', (284, 288), True, 'import numpy as np\n'), ((341, 363), 'numpy.sqrt', 'np.sqrt', (['(1.0 + t1 * t1)'], {}), '(1.0 + t1 * t1)\n', (348, 363), True, 'import numpy as np\n'), ((456, 502), 'numpy.sqrt', 'np.sqrt', (['(r * r * a - 2 * r * t * b + t * t * c)'], {}), '(r * r * a - 2 * r * t * b + t * t * c)\n', (463, 502), True, 'import numpy as np\n'), ((503, 549), 'numpy.sqrt', 'np.sqrt', (['(t * t * a + 2 * r * t * b + r * r * c)'], {}), '(t * t * a + 2 * r * t * b + r * r * c)\n', (510, 549), True, 'import numpy as np\n'), ((870, 892), 'numpy.sqrt', 'np.sqrt', (['(a * c - b * b)'], {}), '(a * c - b * b)\n', (877, 892), True, 'import numpy as np\n'), ((929, 959), 'numpy.array', 'np.array', (['[[ia, ib], [ib, ic]]'], {}), '([[ia, ib], [ib, ic]])\n', (937, 959), True, 'import numpy as np\n'), ((1159, 1212), 'numpy.abs', 'np.abs', (['(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] + 1e-10)'], {}), '(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] + 1e-10)\n', (1165, 1212), True, 'import numpy as np\n'), ((1953, 1975), 'numpy.matmul', 'np.matmul', (['HLAF', 'H_pts'], {}), '(HLAF, H_pts)\n', (1962, 1975), True, 'import numpy as np\n'), ((2288, 2302), 'copy.deepcopy', 'deepcopy', (['LAFs'], {}), '(LAFs)\n', (2296, 2302), False, 'from copy import deepcopy\n'), ((3292, 3315), 'copy.deepcopy', 'deepcopy', (['LAFs[i, :, :]'], {}), '(LAFs[i, :, :])\n', (3300, 3315), False, 'from copy import deepcopy\n'), ((3330, 3392), 'numpy.sqrt', 'np.sqrt', (['(LAF[0, 0] * LAF[1, 1] - LAF[0, 1] * LAF[1, 0] + 1e-10)'], {}), '(LAF[0, 0] * LAF[1, 1] - LAF[0, 1] * LAF[1, 0] + 1e-10)\n', (3337, 3392), True, 'import numpy as np\n'), ((3405, 3461), 'numpy.linalg.svd', 'np.linalg.svd', (['(LAF[0:2, 0:2] / scale)'], {'full_matrices': '(True)'}), '(LAF[0:2, 0:2] / scale, full_matrices=True)\n', (3418, 3461), True, 'import numpy as np\n'), ((3998, 4033), 'matplotlib.pyplot.plot', 'plt.plot', (['ell[:, 0]', 'ell[:, 1]', '"""r"""'], {}), "(ell[:, 0], ell[:, 1], 'r')\n", (4006, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4127), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4122, 4127), True, 'import numpy as np\n'), ((5774, 5786), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5781, 5786), True, 'import numpy as np\n'), ((292, 302), 'numpy.abs', 'np.abs', (['r1'], {}), '(r1)\n', (298, 302), True, 'import numpy as np\n'), ((305, 327), 'numpy.sqrt', 'np.sqrt', (['(1.0 + r1 * r1)'], {}), '(1.0 + r1 * r1)\n', (312, 327), True, 'import numpy as np\n'), ((1687, 1696), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1693, 1696), True, 'import numpy as np\n'), ((1707, 1718), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1715, 1718), True, 'import numpy as np\n'), ((1763, 1772), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1769, 1772), True, 'import numpy as np\n'), ((1783, 1794), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1791, 1794), True, 'import numpy as np\n'), ((1905, 1921), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (1912, 1921), True, 'import numpy as np\n'), ((2406, 2429), 'numpy.zeros', 'np.zeros', (['(sh[0], 2, 3)'], {}), '((sh[0], 2, 3))\n', (2414, 2429), True, 'import numpy as np\n'), ((2805, 2828), 'numpy.zeros', 'np.zeros', (['(sh[0], 2, 3)'], {}), '((sh[0], 2, 3))\n', (2813, 2828), True, 'import numpy as np\n'), ((3585, 3595), 'numpy.diag', 'np.diag', (['W'], {}), '(W)\n', (3592, 3595), True, 'import numpy as np\n'), ((4951, 4978), 'numpy.zeros', 'np.zeros', (['(features_num, 7)'], {}), '((features_num, 7))\n', (4959, 4978), True, 'import numpy as np\n'), ((6610, 6637), 'numpy.zeros', 'np.zeros', (['(features_num, 7)'], {}), '((features_num, 7))\n', (6618, 6637), True, 'import numpy as np\n'), ((1841, 1860), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1849, 1860), True, 'import numpy as np\n'), ((5046, 5080), 'numpy.zeros', 'np.zeros', (['(features_num, desc_len)'], {}), '((features_num, desc_len))\n', (5054, 5080), True, 'import numpy as np\n'), ((5333, 5349), 'numpy.array', 'np.array', (['l[0:2]'], {}), '(l[0:2])\n', (5341, 5349), True, 'import numpy as np\n'), ((5462, 5482), 'numpy.array', 'np.array', (['l[3:3 + 4]'], {}), '(l[3:3 + 4])\n', (5470, 5482), True, 'import numpy as np\n'), ((6705, 6739), 'numpy.zeros', 'np.zeros', (['(features_num, desc_len)'], {}), '((features_num, desc_len))\n', (6713, 6739), True, 'import numpy as np\n'), ((6992, 7010), 'numpy.array', 'np.array', (['l[14:16]'], {}), '(l[14:16])\n', (7000, 7010), True, 'import numpy as np\n'), ((7124, 7142), 'numpy.array', 'np.array', (['l[16:20]'], {}), '(l[16:20])\n', (7132, 7142), True, 'import numpy as np\n'), ((5569, 5584), 'numpy.array', 'np.array', (['l[8:]'], {}), '(l[8:])\n', (5577, 5584), True, 'import numpy as np\n'), ((7231, 7247), 'numpy.array', 'np.array', (['l[26:]'], {}), '(l[26:])\n', (7239, 7247), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
from hypothesis import assume
def sample_program_configs(draw):
#The number of elements in Input(X) should be 1
in_shape = draw(st.lists(st.integers(
min_value=1, max_value=1), min_size=1, max_size=1))
step_data = draw(st.floats(min_value=0.1, max_value=0.5))
input_type = draw(st.sampled_from(["type_int", "type_int64", "type_float"]))
def generate_input1(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_input2(*args, **kwargs):
return np.random.randint(in_shape).astype(np.int32)
def generate_input3(*args, **kwargs):
return np.random.randint(in_shape).astype(np.int64)
build_ops = OpConfig(
type = "increment",
inputs = {
"X" : ["input_data"],
},
outputs = {
"Out": ["output_data"],
},
attrs = {
"step" : step_data,
})
if input_type == "type_int":
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input2)),
},
outputs=["output_data"])
elif input_type == "type_int64":
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input3)),
},
outputs=["output_data"])
elif input_type == "type_float":
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
},
outputs=["output_data"])
return program_config
| [
"sys.path.append",
"functools.partial",
"program_config.OpConfig",
"hypothesis.strategies.sampled_from",
"numpy.random.random",
"numpy.random.randint",
"hypothesis.strategies.integers",
"hypothesis.strategies.floats"
] | [((622, 643), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (637, 643), False, 'import sys\n'), ((1647, 1769), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""increment"""', 'inputs': "{'X': ['input_data']}", 'outputs': "{'Out': ['output_data']}", 'attrs': "{'step': step_data}"}), "(type='increment', inputs={'X': ['input_data']}, outputs={'Out': [\n 'output_data']}, attrs={'step': step_data})\n", (1655, 1769), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\n'), ((1193, 1232), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.1)', 'max_value': '(0.5)'}), '(min_value=0.1, max_value=0.5)\n', (1202, 1232), True, 'import hypothesis.strategies as st\n'), ((1256, 1313), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['type_int', 'type_int64', 'type_float']"], {}), "(['type_int', 'type_int64', 'type_float'])\n", (1271, 1313), True, 'import hypothesis.strategies as st\n'), ((1095, 1132), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(1)'}), '(min_value=1, max_value=1)\n', (1106, 1132), True, 'import hypothesis.strategies as st\n'), ((1373, 1399), 'numpy.random.random', 'np.random.random', (['in_shape'], {}), '(in_shape)\n', (1389, 1399), True, 'import numpy as np\n'), ((1476, 1503), 'numpy.random.randint', 'np.random.randint', (['in_shape'], {}), '(in_shape)\n', (1493, 1503), True, 'import numpy as np\n'), ((1578, 1605), 'numpy.random.randint', 'np.random.randint', (['in_shape'], {}), '(in_shape)\n', (1595, 1605), True, 'import numpy as np\n'), ((2076, 2100), 'functools.partial', 'partial', (['generate_input2'], {}), '(generate_input2)\n', (2083, 2100), False, 'from functools import partial\n'), ((2346, 2370), 'functools.partial', 'partial', (['generate_input3'], {}), '(generate_input3)\n', (2353, 2370), False, 'from functools import partial\n'), ((2616, 2640), 'functools.partial', 'partial', (['generate_input1'], {}), '(generate_input1)\n', (2623, 2640), False, 'from functools import partial\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from config import word_emb_param_names, pos_enc_param_names
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
channels = d_pos_vec
position = np.arange(n_position)
num_timescales = channels // 2
log_timescale_increment = (np.log(float(1e4) / float(1)) /
(num_timescales - 1))
inv_timescales = np.exp(
np.arange(num_timescales)) * -log_timescale_increment
scaled_time = np.expand_dims(position, 1) * np.expand_dims(
inv_timescales, 0)
signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant')
position_enc = signal
return position_enc.astype("float32")
class NoamDecay(LearningRateDecay):
"""
learning rate scheduler
"""
def __init__(self,
d_model,
warmup_steps,
static_lr=2.0,
begin=1,
step=1,
dtype='float32'):
super(NoamDecay, self).__init__(begin, step, dtype)
self.d_model = d_model
self.warmup_steps = warmup_steps
self.static_lr = static_lr
def step(self):
a = self.create_lr_var(self.step_num**-0.5)
b = self.create_lr_var((self.warmup_steps**-1.5) * self.step_num)
lr_value = (self.d_model**-0.5) * layers.elementwise_min(
a, b) * self.static_lr
return lr_value
class PrePostProcessLayer(Layer):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, normalized_shape=None):
super(PrePostProcessLayer, self).__init__()
for cmd in process_cmd:
if cmd == "n":
self._layer_norm = LayerNorm(
normalized_shape = normalized_shape,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
def forward(self, prev_out, out, process_cmd, dropout_rate=0.):
"""
forward
:param prev_out:
:param out:
:param process_cmd:
:param dropout_rate:
:return:
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = self._layer_norm(out)
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(out,
dropout_prob=dropout_rate,
is_test=False)
return out
class PositionwiseFeedForwardLayer(Layer):
"""
PositionwiseFeedForwardLayer
"""
def __init__(self, input_hid, d_inner_hid, d_hid, dropout_rate):
super(PositionwiseFeedForwardLayer, self).__init__()
self._i2h = Linear( input_dim= input_hid,
output_dim=d_inner_hid,
act="relu")
self._h2o = Linear( input_dim = d_inner_hid,
output_dim=d_hid)
self._dropout_rate = dropout_rate
def forward(self, x):
"""
forward
:param x:
:return:
"""
hidden = self._i2h(x)
if self._dropout_rate:
hidden = layers.dropout(hidden,
dropout_prob=self._dropout_rate,
is_test=False)
out = self._h2o(hidden)
return out
class MultiHeadAttentionLayer(Layer):
"""
MultiHeadAttentionLayer
"""
def __init__(self,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
gather_idx=None,
static_kv=False):
super(MultiHeadAttentionLayer, self).__init__()
self._n_head = n_head
self._d_key = d_key
self._d_value = d_value
self._d_model = d_model
self._dropout_rate = dropout_rate
self._q_fc = Linear( input_dim = d_model,
output_dim=d_key * n_head,
bias_attr=False )
self._k_fc = Linear( input_dim = d_model,
output_dim=d_key * n_head,
bias_attr=False )
self._v_fc = Linear( input_dim = d_model,
output_dim=d_value * n_head,
bias_attr=False )
self._proj_fc = Linear( input_dim = d_model,
output_dim=self._d_model,
bias_attr=False )
def forward(self,
queries,
keys,
values,
attn_bias,
cache=None,
gather_idx=None):
"""
forward
:param queries:
:param keys:
:param values:
:param attn_bias:
:return:
"""
# compute q ,k ,v
keys = queries if keys is None else keys
values = keys if values is None else values
q = self._q_fc(queries)
k = self._k_fc(keys)
v = self._v_fc(values)
# split head
reshaped_q = layers.reshape(x=q,
shape=[ q.shape[0], q.shape[1], self._n_head, self._d_key],
inplace=False)
transpose_q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
reshaped_k = layers.reshape(x=k,
shape=[ k.shape[0], k.shape[1], self._n_head, self._d_key],
inplace=False)
transpose_k = layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = layers.reshape(x=v,
shape=[ v.shape[0], v.shape[1], self._n_head, self._d_value],
inplace=False)
transpose_v = layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
if cache is not None:
cache_k, cache_v = cache["k"], cache["v"]
transpose_k = layers.concat([cache_k, transpose_k], axis=2)
transpose_v = layers.concat([cache_v, transpose_v], axis=2)
cache["k"], cache["v"] = transpose_k, transpose_v
# scale dot product attention
product = layers.matmul(x=transpose_q,
y=transpose_k,
transpose_y=True,
alpha=self._d_model**-0.5)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if self._dropout_rate:
weights_droped = layers.dropout(weights,
dropout_prob=self._dropout_rate,
is_test=False)
out = layers.matmul(weights_droped, transpose_v)
else:
out = layers.matmul(weights, transpose_v)
# combine heads
if len(out.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(out, perm=[0, 2, 1, 3])
final_out = layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=False)
# fc to output
proj_out = self._proj_fc(final_out)
return proj_out
class EncoderSubLayer(Layer):
"""
EncoderSubLayer
"""
def __init__(self,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
super(EncoderSubLayer, self).__init__()
self._preprocess_cmd = preprocess_cmd
self._postprocess_cmd = postprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout
self._preprocess_layer = PrePostProcessLayer(self._preprocess_cmd, [d_model])
self._multihead_attention_layer = MultiHeadAttentionLayer(
d_key, d_value, d_model, n_head,
attention_dropout)
self._postprocess_layer = PrePostProcessLayer(self._postprocess_cmd,
None)
self._preprocess_layer2 = PrePostProcessLayer(self._preprocess_cmd, [d_model])
self._positionwise_feed_forward = PositionwiseFeedForwardLayer(
d_model, d_inner_hid, d_model, relu_dropout)
self._postprocess_layer2 = PrePostProcessLayer(
self._postprocess_cmd,
None)
def forward(self, enc_input, attn_bias):
"""
forward
:param enc_input:
:param attn_bias:
:return:
"""
pre_process_multihead = self._preprocess_layer(
None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout)
attn_output = self._multihead_attention_layer(pre_process_multihead,
None, None, attn_bias)
attn_output = self._postprocess_layer(enc_input, attn_output,
self._postprocess_cmd,
self._prepostprocess_dropout)
pre_process2_output = self._preprocess_layer2(
None, attn_output, self._preprocess_cmd,
self._prepostprocess_dropout)
ffd_output = self._positionwise_feed_forward(pre_process2_output)
return self._postprocess_layer2(attn_output, ffd_output,
self._postprocess_cmd,
self._prepostprocess_dropout)
class EncoderLayer(Layer):
"""
encoder
"""
def __init__(self,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd="n",
postprocess_cmd="da"):
super(EncoderLayer, self).__init__()
self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list()
self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer
self._preprocess_layer = PrePostProcessLayer(
self._preprocess_cmd, [d_model])
for i in range(n_layer):
self._encoder_sublayers.append(
self.add_sublayer(
'esl_%d' % i,
EncoderSubLayer(n_head, d_key, d_value,
d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd,
postprocess_cmd)))
def forward(self, enc_input, attn_bias):
"""
forward
:param enc_input:
:param attn_bias:
:return:
"""
for i in range(self._n_layer):
enc_output = self._encoder_sublayers[i](enc_input, attn_bias)
enc_input = enc_output
return self._preprocess_layer(None, enc_output, self._preprocess_cmd,
self._prepostprocess_dropout)
class PrepareEncoderDecoderLayer(Layer):
"""
PrepareEncoderDecoderLayer
"""
def __init__(self,
src_vocab_size,
src_emb_dim,
src_max_len,
dropout_rate,
word_emb_param_name=None,
pos_enc_param_name=None):
super(PrepareEncoderDecoderLayer, self).__init__()
self._src_max_len = src_max_len
self._src_emb_dim = src_emb_dim
self._src_vocab_size = src_vocab_size
self._dropout_rate = dropout_rate
self._input_emb = Embedding(size=[src_vocab_size, src_emb_dim],
padding_idx=0,
param_attr=fluid.ParamAttr(
name=word_emb_param_name,
initializer=fluid.initializer.Normal(
0., src_emb_dim**-0.5)))
pos_inp = position_encoding_init(src_max_len, src_emb_dim)
self._pos_emb = Embedding(
size=[self._src_max_len, src_emb_dim],
param_attr=fluid.ParamAttr(
name=pos_enc_param_name,
initializer=fluid.initializer.NumpyArrayInitializer(pos_inp),
trainable=False))
# use in dygraph_mode to fit different length batch
# self._pos_emb._w = to_variable(
# position_encoding_init(self._src_max_len, self._src_emb_dim))
def forward(self, src_word, src_pos):
"""
forward
:param src_word:
:param src_pos:
:return:
"""
# print("here")
# print(self._input_emb._w._numpy().shape)
src_word_emb = self._input_emb(src_word)
src_word_emb = layers.scale(x=src_word_emb,
scale=self._src_emb_dim**0.5)
# # TODO change this to fit dynamic length input
src_pos_emb = self._pos_emb(src_pos)
src_pos_emb.stop_gradient = True
enc_input = src_word_emb + src_pos_emb
enc_input = layers.reshape( enc_input, shape=[ enc_input.shape[0], enc_input.shape[1], -1])
return layers.dropout(
enc_input, dropout_prob=self._dropout_rate,
is_test=False) if self._dropout_rate else enc_input
class WrapEncoderLayer(Layer):
"""
encoderlayer
"""
def __init__(self, src_vocab_size, max_length, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd,
postprocess_cmd, weight_sharing):
"""
The wrapper assembles together all needed layers for the encoder.
"""
super(WrapEncoderLayer, self).__init__()
self._prepare_encoder_layer = PrepareEncoderDecoderLayer(
src_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
word_emb_param_name=word_emb_param_names[0],
pos_enc_param_name=pos_enc_param_names[0])
self._encoder = EncoderLayer(n_layer, n_head, d_key,
d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd,
postprocess_cmd)
def forward(self, enc_inputs):
"""forward"""
src_word, src_pos, src_slf_attn_bias = enc_inputs
enc_input = self._prepare_encoder_layer(src_word, src_pos)
enc_output = self._encoder(enc_input, src_slf_attn_bias)
return enc_output
class DecoderSubLayer(Layer):
"""
decoder
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout, relu_dropout,
preprocess_cmd, postprocess_cmd):
super(DecoderSubLayer, self).__init__()
self._postprocess_cmd = postprocess_cmd
self._preprocess_cmd = preprocess_cmd
self._prepostprcess_dropout = prepostprocess_dropout
self._pre_process_layer = PrePostProcessLayer(
preprocess_cmd, [d_model])
self._multihead_attention_layer = MultiHeadAttentionLayer(
d_key, d_value, d_model, n_head,
attention_dropout)
self._post_process_layer = PrePostProcessLayer(
postprocess_cmd, None)
self._pre_process_layer2 = PrePostProcessLayer(
preprocess_cmd, [d_model])
self._multihead_attention_layer2 = MultiHeadAttentionLayer(
d_key, d_value, d_model, n_head,
attention_dropout)
self._post_process_layer2 = PrePostProcessLayer(
postprocess_cmd, [d_model])
self._pre_process_layer3 = PrePostProcessLayer(
preprocess_cmd, [d_model])
self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer(
d_model, d_inner_hid, d_model, relu_dropout)
self._post_process_layer3 = PrePostProcessLayer(
postprocess_cmd, None)
def forward(self,
dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
cache=None,
gather_idx=None):
"""
forward
:param dec_input:
:param enc_output:
:param slf_attn_bias:
:param dec_enc_attn_bias:
:return:
"""
pre_process_rlt = self._pre_process_layer(None, dec_input,
self._preprocess_cmd,
self._prepostprcess_dropout)
slf_attn_output = self._multihead_attention_layer(
pre_process_rlt, None, None, slf_attn_bias, cache, gather_idx)
slf_attn_output_pp = self._post_process_layer(
dec_input, slf_attn_output, self._postprocess_cmd,
self._prepostprcess_dropout)
pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp,
self._preprocess_cmd,
self._prepostprcess_dropout)
enc_attn_output_pp = self._multihead_attention_layer2(
pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias)
enc_attn_output = self._post_process_layer2(slf_attn_output_pp,
enc_attn_output_pp,
self._postprocess_cmd,
self._prepostprcess_dropout)
pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output,
self._preprocess_cmd,
self._prepostprcess_dropout)
ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3)
dec_output = self._post_process_layer3(enc_attn_output, ffd_output,
self._postprocess_cmd,
self._prepostprcess_dropout)
return dec_output
class DecoderLayer(Layer):
"""
decoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model,
d_inner_hid, prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd, postprocess_cmd):
super(DecoderLayer, self).__init__()
self._pre_process_layer = PrePostProcessLayer(preprocess_cmd, [d_model])
self._decoder_sub_layers = list()
self._n_layer = n_layer
self._preprocess_cmd = preprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout
for i in range(n_layer):
self._decoder_sub_layers.append(
self.add_sublayer(
'dsl_%d' % i,
DecoderSubLayer( n_head, d_key, d_value,
d_model, d_inner_hid,
prepostprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd,
postprocess_cmd)))
def forward(self,
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
caches=None,
gather_idx=None):
"""
forward
:param dec_input:
:param enc_output:
:param dec_slf_attn_bias:
:param dec_enc_attn_bias:
:return:
"""
for i in range(self._n_layer):
tmp_dec_output = self._decoder_sub_layers[i](
dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias,
None if caches is None else caches[i], gather_idx)
dec_input = tmp_dec_output
dec_output = self._pre_process_layer(None, tmp_dec_output,
self._preprocess_cmd,
self._prepostprocess_dropout)
return dec_output
class WrapDecoderLayer(Layer):
"""
decoder
"""
def __init__(self,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
gather_idx=None):
"""
The wrapper assembles together all needed layers for the encoder.
"""
super(WrapDecoderLayer, self).__init__()
self._prepare_decoder_layer = PrepareEncoderDecoderLayer(
trg_vocab_size,
d_model,
max_length,
prepostprocess_dropout,
word_emb_param_name=word_emb_param_names[1],
pos_enc_param_name=pos_enc_param_names[1])
self._decoder_layer = DecoderLayer(n_layer, n_head,
d_key, d_value, d_model, d_inner_hid,
prepostprocess_dropout,
attention_dropout, relu_dropout,
preprocess_cmd, postprocess_cmd)
self._weight_sharing = weight_sharing
if not weight_sharing:
self._fc = Linear(input_dim = d_model,
output_dim=trg_vocab_size,
bias_attr=False)
def forward(self, dec_inputs, enc_output, caches=None, gather_idx=None):
"""
forward
:param dec_inputs:
:param enc_output:
:return:
"""
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs
dec_input = self._prepare_decoder_layer(trg_word, trg_pos)
dec_output = self._decoder_layer(dec_input, enc_output,
trg_slf_attn_bias, trg_src_attn_bias,
caches, gather_idx)
dec_output_reshape = layers.reshape(dec_output,
shape=[-1, dec_output.shape[-1]],
inplace=False)
if self._weight_sharing:
predict = layers.matmul(x=dec_output_reshape,
y=self._prepare_decoder_layer._input_emb.weight,
transpose_y=True)
else:
predict = self._fc(dec_output_reshape)
if dec_inputs is None:
# Return probs for independent decoder program.
predict_out = layers.softmax(predict)
return predict_out
return predict
class TransFormer(Layer):
"""
model
"""
def __init__(self,
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
preprocess_cmd,
postprocess_cmd,
weight_sharing,
label_smooth_eps=0.0):
super(TransFormer, self).__init__()
self._label_smooth_eps = label_smooth_eps
self._trg_vocab_size = trg_vocab_size
if weight_sharing:
assert src_vocab_size == trg_vocab_size, (
"Vocabularies in source and target should be same for weight sharing."
)
self._wrap_encoder_layer = WrapEncoderLayer(
src_vocab_size, max_length, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd,
weight_sharing)
self._wrap_decoder_layer = WrapDecoderLayer(
trg_vocab_size, max_length, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd,
weight_sharing)
if weight_sharing:
self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight
self.n_layer = n_layer
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
def forward(self, enc_inputs, dec_inputs, label, weights):
"""
forward
:param enc_inputs:
:param dec_inputs:
:param label:
:param weights:
:return:
"""
enc_output = self._wrap_encoder_layer(enc_inputs)
predict = self._wrap_decoder_layer(dec_inputs, enc_output)
if self._label_smooth_eps:
label_out = layers.label_smooth(label=layers.one_hot(
input=label, depth=self._trg_vocab_size),
epsilon=self._label_smooth_eps)
cost = layers.softmax_with_cross_entropy(
logits=predict,
label=label_out,
soft_label=True if self._label_smooth_eps else False)
weighted_cost = cost * weights
sum_cost = layers.reduce_sum(weighted_cost)
token_num = layers.reduce_sum(weights)
token_num.stop_gradient = True
avg_cost = sum_cost / token_num
return sum_cost, avg_cost, predict, token_num
def beam_search(self,
enc_inputs,
dec_inputs,
bos_id=0,
eos_id=1,
beam_size=4,
max_len=30,
alpha=0.6):
"""
Beam search with the alive and finished two queues, both have a beam size
capicity separately. It includes `grow_topk` `grow_alive` `grow_finish` as
steps.
1. `grow_topk` selects the top `2*beam_size` candidates to avoid all getting
EOS.
2. `grow_alive` selects the top `beam_size` non-EOS candidates as the inputs
of next decoding step.
3. `grow_finish` compares the already finished candidates in the finished queue
and newly added finished candidates from `grow_topk`, and selects the top
`beam_size` finished candidates.
"""
def expand_to_beam_size(tensor, beam_size):
tensor = layers.reshape(tensor,
[tensor.shape[0], 1] + tensor.shape[1:])
tile_dims = [1] * len(tensor.shape)
tile_dims[1] = beam_size
return layers.expand(tensor, tile_dims)
def merge_beam_dim(tensor):
return layers.reshape(tensor, [-1] + tensor.shape[2:])
# run encoder
enc_output = self._wrap_encoder_layer(enc_inputs)
# constant number
inf = float(1. * 1e7)
batch_size = enc_output.shape[0]
### initialize states of beam search ###
## init for the alive ##
initial_ids, trg_src_attn_bias = dec_inputs # (batch_size, 1)
initial_log_probs = to_variable(
np.array([[0.] + [-inf] * (beam_size - 1)], dtype="float32"))
alive_log_probs = layers.expand(initial_log_probs, [batch_size, 1])
alive_seq = to_variable(
np.tile(np.array([[[bos_id]]], dtype="int64"),
(batch_size, beam_size, 1)))
## init for the finished ##
finished_scores = to_variable(
np.array([[-inf] * beam_size], dtype="float32"))
finished_scores = layers.expand(finished_scores, [batch_size, 1])
finished_seq = to_variable(
np.tile(np.array([[[bos_id]]], dtype="int64"),
(batch_size, beam_size, 1)))
finished_flags = layers.zeros_like(finished_scores)
### initialize inputs and states of transformer decoder ###
## init inputs for decoder, shaped `[batch_size*beam_size, ...]`
trg_word = layers.reshape(alive_seq[:, :, -1],
[batch_size * beam_size, 1, 1])
trg_pos = layers.zeros_like(trg_word)
trg_src_attn_bias = merge_beam_dim(
expand_to_beam_size(trg_src_attn_bias, beam_size))
enc_output = merge_beam_dim(expand_to_beam_size(enc_output, beam_size))
## init states (caches) for transformer, need to be updated according to selected beam
caches = [{
"k":
layers.fill_constant(
shape=[batch_size * beam_size, self.n_head, 0, self.d_key],
dtype=enc_output.dtype,
value=0),
"v":
layers.fill_constant(
shape=[batch_size * beam_size, self.n_head, 0, self.d_value],
dtype=enc_output.dtype,
value=0),
} for i in range(self.n_layer)]
def update_states(caches, beam_idx, beam_size):
for cache in caches:
cache["k"] = gather_2d_by_gather(cache["k"], beam_idx,
beam_size, batch_size, False)
cache["v"] = gather_2d_by_gather(cache["v"], beam_idx,
beam_size, batch_size, False)
return caches
def gather_2d_by_gather(tensor_nd,
beam_idx,
beam_size,
batch_size,
need_flat=True):
batch_idx = layers.range(0, batch_size, 1,
dtype="int64") * beam_size
flat_tensor = merge_beam_dim(tensor_nd) if need_flat else tensor_nd
idx = layers.reshape(layers.elementwise_add(beam_idx, batch_idx, 0),
[-1])
new_flat_tensor = layers.gather(flat_tensor, idx)
new_tensor_nd = layers.reshape(
new_flat_tensor,
shape=[batch_size, beam_idx.shape[1]] +
tensor_nd.shape[2:]) if need_flat else new_flat_tensor
return new_tensor_nd
def early_finish(alive_log_probs, finished_scores,
finished_in_finished):
max_length_penalty = np.power(((5. + max_len) / 6.), alpha)
# The best possible score of the most likely alive sequence
lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty
# Now to compute the lowest score of a finished sequence in finished
# If the sequence isn't finished, we multiply it's score by 0. since
# scores are all -ve, taking the min will give us the score of the lowest
# finished item.
lowest_score_of_fininshed_in_finished = layers.reduce_min(
finished_scores * finished_in_finished, 1)
# If none of the sequences have finished, then the min will be 0 and
# we have to replace it by -ve INF if it is. The score of any seq in alive
# will be much higher than -ve INF and the termination condition will not
# be met.
lowest_score_of_fininshed_in_finished += (
1. - layers.reduce_max(finished_in_finished, 1)) * -inf
bound_is_met = layers.reduce_all(
layers.greater_than(lowest_score_of_fininshed_in_finished,
lower_bound_alive_scores))
return bound_is_met
def grow_topk(i, logits, alive_seq, alive_log_probs, states):
logits = layers.reshape(logits, [batch_size, beam_size, -1])
candidate_log_probs = layers.log(layers.softmax(logits, axis=2))
log_probs = layers.elementwise_add(candidate_log_probs,
alive_log_probs, 0)
length_penalty = np.power(5.0 + (i + 1.0) / 6.0, alpha)
curr_scores = log_probs / length_penalty
flat_curr_scores = layers.reshape(curr_scores, [batch_size, -1])
topk_scores, topk_ids = layers.topk(flat_curr_scores,
k=beam_size * 2)
print( "topk ids", topk_ids)
topk_log_probs = topk_scores * length_penalty
topk_beam_index = topk_ids // self._trg_vocab_size
topk_ids = topk_ids % self._trg_vocab_size
print( "topk ids2", topk_ids)
# use gather as gather_nd, TODO: use gather_nd
topk_seq = gather_2d_by_gather(alive_seq, topk_beam_index,
beam_size, batch_size)
print( "topk ids", topk_ids )
reshape_temp = layers.reshape(topk_ids, topk_ids.shape + [1])
topk_seq = layers.concat(
[topk_seq,
reshape_temp],
axis=2)
states = update_states(states, topk_beam_index, beam_size)
eos = layers.fill_constant(shape=topk_ids.shape,
dtype="int64",
value=eos_id)
topk_finished = layers.cast(layers.equal(topk_ids, eos), "float32")
#topk_seq: [batch_size, 2*beam_size, i+1]
#topk_log_probs, topk_scores, topk_finished: [batch_size, 2*beam_size]
return topk_seq, topk_log_probs, topk_scores, topk_finished, states
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished,
states):
curr_scores += curr_finished * -inf
_, topk_indexes = layers.topk(curr_scores, k=beam_size)
alive_seq = gather_2d_by_gather(curr_seq, topk_indexes,
beam_size * 2, batch_size)
alive_log_probs = gather_2d_by_gather(curr_log_probs, topk_indexes,
beam_size * 2, batch_size)
states = update_states(states, topk_indexes, beam_size * 2)
return alive_seq, alive_log_probs, states
def grow_finished(finished_seq, finished_scores, finished_flags,
curr_seq, curr_scores, curr_finished):
# finished scores
finished_seq = layers.concat([
finished_seq,
layers.fill_constant(shape=[batch_size, beam_size, 1],
dtype="int64",
value=eos_id)
],
axis=2)
# Set the scores of the unfinished seq in curr_seq to large negative
# values
curr_scores += (1. - curr_finished) * -inf
# concatenating the sequences and scores along beam axis
curr_finished_seq = layers.concat([finished_seq, curr_seq], axis=1)
curr_finished_scores = layers.concat([finished_scores, curr_scores],
axis=1)
curr_finished_flags = layers.concat([finished_flags, curr_finished],
axis=1)
_, topk_indexes = layers.topk(curr_finished_scores, k=beam_size)
finished_seq = gather_2d_by_gather(curr_finished_seq, topk_indexes,
beam_size * 3, batch_size)
finished_scores = gather_2d_by_gather(curr_finished_scores,
topk_indexes, beam_size * 3,
batch_size)
finished_flags = gather_2d_by_gather(curr_finished_flags,
topk_indexes, beam_size * 3,
batch_size)
return finished_seq, finished_scores, finished_flags
for i in range(max_len):
logits = self._wrap_decoder_layer(
(trg_word, trg_pos, None, trg_src_attn_bias), enc_output,
caches)
topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk(
i, logits, alive_seq, alive_log_probs, caches)
alive_seq, alive_log_probs, states = grow_alive(
topk_seq, topk_scores, topk_log_probs, topk_finished, states)
finished_seq, finished_scores, finished_flags = grow_finished(
finished_seq, finished_scores, finished_flags, topk_seq,
topk_scores, topk_finished)
trg_word = layers.reshape(alive_seq[:, :, -1],
[batch_size * beam_size, 1, 1])
trg_pos = layers.fill_constant(shape=trg_word.shape,
dtype="int64",
value=i)
if early_finish(alive_log_probs, finished_scores,
finished_flags).numpy():
break
return finished_seq, finished_scores
| [
"paddle.fluid.layers.reduce_min",
"paddle.fluid.initializer.Constant",
"numpy.sin",
"numpy.arange",
"paddle.fluid.layers.transpose",
"paddle.fluid.layers.softmax_with_cross_entropy",
"paddle.fluid.layers.concat",
"paddle.fluid.layers.reduce_sum",
"paddle.fluid.layers.greater_than",
"paddle.fluid.l... | [((1145, 1166), 'numpy.arange', 'np.arange', (['n_position'], {}), '(n_position)\n', (1154, 1166), True, 'import numpy as np\n'), ((1427, 1454), 'numpy.expand_dims', 'np.expand_dims', (['position', '(1)'], {}), '(position, 1)\n', (1441, 1454), True, 'import numpy as np\n'), ((1457, 1490), 'numpy.expand_dims', 'np.expand_dims', (['inv_timescales', '(0)'], {}), '(inv_timescales, 0)\n', (1471, 1490), True, 'import numpy as np\n'), ((4008, 4071), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'input_hid', 'output_dim': 'd_inner_hid', 'act': '"""relu"""'}), "(input_dim=input_hid, output_dim=d_inner_hid, act='relu')\n", (4014, 4071), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((4141, 4188), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_inner_hid', 'output_dim': 'd_hid'}), '(input_dim=d_inner_hid, output_dim=d_hid)\n', (4147, 4188), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5218, 5287), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_key * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)\n', (5224, 5287), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5361, 5430), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_key * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)\n', (5367, 5430), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5504, 5575), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_value * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_value * n_head, bias_attr=False)\n', (5510, 5575), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5652, 5720), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': 'self._d_model', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=self._d_model, bias_attr=False)\n', (5658, 5720), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((6384, 6482), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'q', 'shape': '[q.shape[0], q.shape[1], self._n_head, self._d_key]', 'inplace': '(False)'}), '(x=q, shape=[q.shape[0], q.shape[1], self._n_head, self.\n _d_key], inplace=False)\n', (6398, 6482), True, 'import paddle.fluid.layers as layers\n'), ((6573, 6622), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_q', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_q, perm=[0, 2, 1, 3])\n', (6589, 6622), True, 'import paddle.fluid.layers as layers\n'), ((6644, 6742), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'k', 'shape': '[k.shape[0], k.shape[1], self._n_head, self._d_key]', 'inplace': '(False)'}), '(x=k, shape=[k.shape[0], k.shape[1], self._n_head, self.\n _d_key], inplace=False)\n', (6658, 6742), True, 'import paddle.fluid.layers as layers\n'), ((6833, 6882), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_k', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_k, perm=[0, 2, 1, 3])\n', (6849, 6882), True, 'import paddle.fluid.layers as layers\n'), ((6904, 7004), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'v', 'shape': '[v.shape[0], v.shape[1], self._n_head, self._d_value]', 'inplace': '(False)'}), '(x=v, shape=[v.shape[0], v.shape[1], self._n_head, self.\n _d_value], inplace=False)\n', (6918, 7004), True, 'import paddle.fluid.layers as layers\n'), ((7095, 7144), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_v', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_v, perm=[0, 2, 1, 3])\n', (7111, 7144), True, 'import paddle.fluid.layers as layers\n'), ((7493, 7588), 'paddle.fluid.layers.matmul', 'layers.matmul', ([], {'x': 'transpose_q', 'y': 'transpose_k', 'transpose_y': '(True)', 'alpha': '(self._d_model ** -0.5)'}), '(x=transpose_q, y=transpose_k, transpose_y=True, alpha=self.\n _d_model ** -0.5)\n', (7506, 7588), True, 'import paddle.fluid.layers as layers\n'), ((7751, 7774), 'paddle.fluid.layers.softmax', 'layers.softmax', (['product'], {}), '(product)\n', (7765, 7774), True, 'import paddle.fluid.layers as layers\n'), ((8264, 8304), 'paddle.fluid.layers.transpose', 'layers.transpose', (['out'], {'perm': '[0, 2, 1, 3]'}), '(out, perm=[0, 2, 1, 3])\n', (8280, 8304), True, 'import paddle.fluid.layers as layers\n'), ((8325, 8420), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'trans_x', 'shape': '[0, 0, trans_x.shape[2] * trans_x.shape[3]]', 'inplace': '(False)'}), '(x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],\n inplace=False)\n', (8339, 8420), True, 'import paddle.fluid.layers as layers\n'), ((14495, 14555), 'paddle.fluid.layers.scale', 'layers.scale', ([], {'x': 'src_word_emb', 'scale': '(self._src_emb_dim ** 0.5)'}), '(x=src_word_emb, scale=self._src_emb_dim ** 0.5)\n', (14507, 14555), True, 'import paddle.fluid.layers as layers\n'), ((14801, 14878), 'paddle.fluid.layers.reshape', 'layers.reshape', (['enc_input'], {'shape': '[enc_input.shape[0], enc_input.shape[1], -1]'}), '(enc_input, shape=[enc_input.shape[0], enc_input.shape[1], -1])\n', (14815, 14878), True, 'import paddle.fluid.layers as layers\n'), ((24251, 24326), 'paddle.fluid.layers.reshape', 'layers.reshape', (['dec_output'], {'shape': '[-1, dec_output.shape[-1]]', 'inplace': '(False)'}), '(dec_output, shape=[-1, dec_output.shape[-1]], inplace=False)\n', (24265, 24326), True, 'import paddle.fluid.layers as layers\n'), ((27277, 27401), 'paddle.fluid.layers.softmax_with_cross_entropy', 'layers.softmax_with_cross_entropy', ([], {'logits': 'predict', 'label': 'label_out', 'soft_label': '(True if self._label_smooth_eps else False)'}), '(logits=predict, label=label_out,\n soft_label=True if self._label_smooth_eps else False)\n', (27310, 27401), True, 'import paddle.fluid.layers as layers\n'), ((27493, 27525), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['weighted_cost'], {}), '(weighted_cost)\n', (27510, 27525), True, 'import paddle.fluid.layers as layers\n'), ((27546, 27572), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['weights'], {}), '(weights)\n', (27563, 27572), True, 'import paddle.fluid.layers as layers\n'), ((29483, 29532), 'paddle.fluid.layers.expand', 'layers.expand', (['initial_log_probs', '[batch_size, 1]'], {}), '(initial_log_probs, [batch_size, 1])\n', (29496, 29532), True, 'import paddle.fluid.layers as layers\n'), ((29837, 29884), 'paddle.fluid.layers.expand', 'layers.expand', (['finished_scores', '[batch_size, 1]'], {}), '(finished_scores, [batch_size, 1])\n', (29850, 29884), True, 'import paddle.fluid.layers as layers\n'), ((30054, 30088), 'paddle.fluid.layers.zeros_like', 'layers.zeros_like', (['finished_scores'], {}), '(finished_scores)\n', (30071, 30088), True, 'import paddle.fluid.layers as layers\n'), ((30250, 30317), 'paddle.fluid.layers.reshape', 'layers.reshape', (['alive_seq[:, :, -1]', '[batch_size * beam_size, 1, 1]'], {}), '(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1])\n', (30264, 30317), True, 'import paddle.fluid.layers as layers\n'), ((30370, 30397), 'paddle.fluid.layers.zeros_like', 'layers.zeros_like', (['trg_word'], {}), '(trg_word)\n', (30387, 30397), True, 'import paddle.fluid.layers as layers\n'), ((1355, 1380), 'numpy.arange', 'np.arange', (['num_timescales'], {}), '(num_timescales)\n', (1364, 1380), True, 'import numpy as np\n'), ((1529, 1548), 'numpy.sin', 'np.sin', (['scaled_time'], {}), '(scaled_time)\n', (1535, 1548), True, 'import numpy as np\n'), ((1550, 1569), 'numpy.cos', 'np.cos', (['scaled_time'], {}), '(scaled_time)\n', (1556, 1569), True, 'import numpy as np\n'), ((4441, 4511), 'paddle.fluid.layers.dropout', 'layers.dropout', (['hidden'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(hidden, dropout_prob=self._dropout_rate, is_test=False)\n', (4455, 4511), True, 'import paddle.fluid.layers as layers\n'), ((7256, 7301), 'paddle.fluid.layers.concat', 'layers.concat', (['[cache_k, transpose_k]'], {'axis': '(2)'}), '([cache_k, transpose_k], axis=2)\n', (7269, 7301), True, 'import paddle.fluid.layers as layers\n'), ((7328, 7373), 'paddle.fluid.layers.concat', 'layers.concat', (['[cache_v, transpose_v]'], {'axis': '(2)'}), '([cache_v, transpose_v], axis=2)\n', (7341, 7373), True, 'import paddle.fluid.layers as layers\n'), ((7835, 7906), 'paddle.fluid.layers.dropout', 'layers.dropout', (['weights'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(weights, dropout_prob=self._dropout_rate, is_test=False)\n', (7849, 7906), True, 'import paddle.fluid.layers as layers\n'), ((8013, 8055), 'paddle.fluid.layers.matmul', 'layers.matmul', (['weights_droped', 'transpose_v'], {}), '(weights_droped, transpose_v)\n', (8026, 8055), True, 'import paddle.fluid.layers as layers\n'), ((8088, 8123), 'paddle.fluid.layers.matmul', 'layers.matmul', (['weights', 'transpose_v'], {}), '(weights, transpose_v)\n', (8101, 8123), True, 'import paddle.fluid.layers as layers\n'), ((14896, 14969), 'paddle.fluid.layers.dropout', 'layers.dropout', (['enc_input'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(enc_input, dropout_prob=self._dropout_rate, is_test=False)\n', (14910, 14969), True, 'import paddle.fluid.layers as layers\n'), ((23560, 23629), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': 'trg_vocab_size', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=trg_vocab_size, bias_attr=False)\n', (23566, 23629), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((24471, 24578), 'paddle.fluid.layers.matmul', 'layers.matmul', ([], {'x': 'dec_output_reshape', 'y': 'self._prepare_decoder_layer._input_emb.weight', 'transpose_y': '(True)'}), '(x=dec_output_reshape, y=self._prepare_decoder_layer.\n _input_emb.weight, transpose_y=True)\n', (24484, 24578), True, 'import paddle.fluid.layers as layers\n'), ((24829, 24852), 'paddle.fluid.layers.softmax', 'layers.softmax', (['predict'], {}), '(predict)\n', (24843, 24852), True, 'import paddle.fluid.layers as layers\n'), ((28668, 28731), 'paddle.fluid.layers.reshape', 'layers.reshape', (['tensor', '([tensor.shape[0], 1] + tensor.shape[1:])'], {}), '(tensor, [tensor.shape[0], 1] + tensor.shape[1:])\n', (28682, 28731), True, 'import paddle.fluid.layers as layers\n'), ((28872, 28904), 'paddle.fluid.layers.expand', 'layers.expand', (['tensor', 'tile_dims'], {}), '(tensor, tile_dims)\n', (28885, 28904), True, 'import paddle.fluid.layers as layers\n'), ((28961, 29008), 'paddle.fluid.layers.reshape', 'layers.reshape', (['tensor', '([-1] + tensor.shape[2:])'], {}), '(tensor, [-1] + tensor.shape[2:])\n', (28975, 29008), True, 'import paddle.fluid.layers as layers\n'), ((29395, 29456), 'numpy.array', 'np.array', (['[[0.0] + [-inf] * (beam_size - 1)]'], {'dtype': '"""float32"""'}), "([[0.0] + [-inf] * (beam_size - 1)], dtype='float32')\n", (29403, 29456), True, 'import numpy as np\n'), ((29762, 29809), 'numpy.array', 'np.array', (['[[-inf] * beam_size]'], {'dtype': '"""float32"""'}), "([[-inf] * beam_size], dtype='float32')\n", (29770, 29809), True, 'import numpy as np\n'), ((32115, 32146), 'paddle.fluid.layers.gather', 'layers.gather', (['flat_tensor', 'idx'], {}), '(flat_tensor, idx)\n', (32128, 32146), True, 'import paddle.fluid.layers as layers\n'), ((32525, 32563), 'numpy.power', 'np.power', (['((5.0 + max_len) / 6.0)', 'alpha'], {}), '((5.0 + max_len) / 6.0, alpha)\n', (32533, 32563), True, 'import numpy as np\n'), ((33048, 33108), 'paddle.fluid.layers.reduce_min', 'layers.reduce_min', (['(finished_scores * finished_in_finished)', '(1)'], {}), '(finished_scores * finished_in_finished, 1)\n', (33065, 33108), True, 'import paddle.fluid.layers as layers\n'), ((33838, 33889), 'paddle.fluid.layers.reshape', 'layers.reshape', (['logits', '[batch_size, beam_size, -1]'], {}), '(logits, [batch_size, beam_size, -1])\n', (33852, 33889), True, 'import paddle.fluid.layers as layers\n'), ((33991, 34054), 'paddle.fluid.layers.elementwise_add', 'layers.elementwise_add', (['candidate_log_probs', 'alive_log_probs', '(0)'], {}), '(candidate_log_probs, alive_log_probs, 0)\n', (34013, 34054), True, 'import paddle.fluid.layers as layers\n'), ((34132, 34170), 'numpy.power', 'np.power', (['(5.0 + (i + 1.0) / 6.0)', 'alpha'], {}), '(5.0 + (i + 1.0) / 6.0, alpha)\n', (34140, 34170), True, 'import numpy as np\n'), ((34255, 34300), 'paddle.fluid.layers.reshape', 'layers.reshape', (['curr_scores', '[batch_size, -1]'], {}), '(curr_scores, [batch_size, -1])\n', (34269, 34300), True, 'import paddle.fluid.layers as layers\n'), ((34338, 34384), 'paddle.fluid.layers.topk', 'layers.topk', (['flat_curr_scores'], {'k': '(beam_size * 2)'}), '(flat_curr_scores, k=beam_size * 2)\n', (34349, 34384), True, 'import paddle.fluid.layers as layers\n'), ((34976, 35022), 'paddle.fluid.layers.reshape', 'layers.reshape', (['topk_ids', '(topk_ids.shape + [1])'], {}), '(topk_ids, topk_ids.shape + [1])\n', (34990, 35022), True, 'import paddle.fluid.layers as layers\n'), ((35046, 35093), 'paddle.fluid.layers.concat', 'layers.concat', (['[topk_seq, reshape_temp]'], {'axis': '(2)'}), '([topk_seq, reshape_temp], axis=2)\n', (35059, 35093), True, 'import paddle.fluid.layers as layers\n'), ((35233, 35304), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': 'topk_ids.shape', 'dtype': '"""int64"""', 'value': 'eos_id'}), "(shape=topk_ids.shape, dtype='int64', value=eos_id)\n", (35253, 35304), True, 'import paddle.fluid.layers as layers\n'), ((35869, 35906), 'paddle.fluid.layers.topk', 'layers.topk', (['curr_scores'], {'k': 'beam_size'}), '(curr_scores, k=beam_size)\n', (35880, 35906), True, 'import paddle.fluid.layers as layers\n'), ((37068, 37115), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_seq, curr_seq]'], {'axis': '(1)'}), '([finished_seq, curr_seq], axis=1)\n', (37081, 37115), True, 'import paddle.fluid.layers as layers\n'), ((37151, 37204), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_scores, curr_scores]'], {'axis': '(1)'}), '([finished_scores, curr_scores], axis=1)\n', (37164, 37204), True, 'import paddle.fluid.layers as layers\n'), ((37288, 37342), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_flags, curr_finished]'], {'axis': '(1)'}), '([finished_flags, curr_finished], axis=1)\n', (37301, 37342), True, 'import paddle.fluid.layers as layers\n'), ((37421, 37467), 'paddle.fluid.layers.topk', 'layers.topk', (['curr_finished_scores'], {'k': 'beam_size'}), '(curr_finished_scores, k=beam_size)\n', (37432, 37467), True, 'import paddle.fluid.layers as layers\n'), ((38791, 38858), 'paddle.fluid.layers.reshape', 'layers.reshape', (['alive_seq[:, :, -1]', '[batch_size * beam_size, 1, 1]'], {}), '(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1])\n', (38805, 38858), True, 'import paddle.fluid.layers as layers\n'), ((38919, 38985), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': 'trg_word.shape', 'dtype': '"""int64"""', 'value': 'i'}), "(shape=trg_word.shape, dtype='int64', value=i)\n", (38939, 38985), True, 'import paddle.fluid.layers as layers\n'), ((1621, 1640), 'numpy.mod', 'np.mod', (['channels', '(2)'], {}), '(channels, 2)\n', (1627, 1640), True, 'import numpy as np\n'), ((2360, 2388), 'paddle.fluid.layers.elementwise_min', 'layers.elementwise_min', (['a', 'b'], {}), '(a, b)\n', (2382, 2388), True, 'import paddle.fluid.layers as layers\n'), ((29586, 29623), 'numpy.array', 'np.array', (['[[[bos_id]]]'], {'dtype': '"""int64"""'}), "([[[bos_id]]], dtype='int64')\n", (29594, 29623), True, 'import numpy as np\n'), ((29941, 29978), 'numpy.array', 'np.array', (['[[[bos_id]]]'], {'dtype': '"""int64"""'}), "([[[bos_id]]], dtype='int64')\n", (29949, 29978), True, 'import numpy as np\n'), ((30729, 30847), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size * beam_size, self.n_head, 0, self.d_key]', 'dtype': 'enc_output.dtype', 'value': '(0)'}), '(shape=[batch_size * beam_size, self.n_head, 0, self.\n d_key], dtype=enc_output.dtype, value=0)\n', (30749, 30847), True, 'import paddle.fluid.layers as layers\n'), ((30922, 31042), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size * beam_size, self.n_head, 0, self.d_value]', 'dtype': 'enc_output.dtype', 'value': '(0)'}), '(shape=[batch_size * beam_size, self.n_head, 0, self.\n d_value], dtype=enc_output.dtype, value=0)\n', (30942, 31042), True, 'import paddle.fluid.layers as layers\n'), ((31790, 31835), 'paddle.fluid.layers.range', 'layers.range', (['(0)', 'batch_size', '(1)'], {'dtype': '"""int64"""'}), "(0, batch_size, 1, dtype='int64')\n", (31802, 31835), True, 'import paddle.fluid.layers as layers\n'), ((31998, 32044), 'paddle.fluid.layers.elementwise_add', 'layers.elementwise_add', (['beam_idx', 'batch_idx', '(0)'], {}), '(beam_idx, batch_idx, 0)\n', (32020, 32044), True, 'import paddle.fluid.layers as layers\n'), ((32175, 32271), 'paddle.fluid.layers.reshape', 'layers.reshape', (['new_flat_tensor'], {'shape': '([batch_size, beam_idx.shape[1]] + tensor_nd.shape[2:])'}), '(new_flat_tensor, shape=[batch_size, beam_idx.shape[1]] +\n tensor_nd.shape[2:])\n', (32189, 32271), True, 'import paddle.fluid.layers as layers\n'), ((33591, 33679), 'paddle.fluid.layers.greater_than', 'layers.greater_than', (['lowest_score_of_fininshed_in_finished', 'lower_bound_alive_scores'], {}), '(lowest_score_of_fininshed_in_finished,\n lower_bound_alive_scores)\n', (33610, 33679), True, 'import paddle.fluid.layers as layers\n'), ((33935, 33965), 'paddle.fluid.layers.softmax', 'layers.softmax', (['logits'], {'axis': '(2)'}), '(logits, axis=2)\n', (33949, 33965), True, 'import paddle.fluid.layers as layers\n'), ((35423, 35450), 'paddle.fluid.layers.equal', 'layers.equal', (['topk_ids', 'eos'], {}), '(topk_ids, eos)\n', (35435, 35450), True, 'import paddle.fluid.layers as layers\n'), ((27111, 27166), 'paddle.fluid.layers.one_hot', 'layers.one_hot', ([], {'input': 'label', 'depth': 'self._trg_vocab_size'}), '(input=label, depth=self._trg_vocab_size)\n', (27125, 27166), True, 'import paddle.fluid.layers as layers\n'), ((33478, 33520), 'paddle.fluid.layers.reduce_max', 'layers.reduce_max', (['finished_in_finished', '(1)'], {}), '(finished_in_finished, 1)\n', (33495, 33520), True, 'import paddle.fluid.layers as layers\n'), ((36588, 36676), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size, beam_size, 1]', 'dtype': '"""int64"""', 'value': 'eos_id'}), "(shape=[batch_size, beam_size, 1], dtype='int64', value\n =eos_id)\n", (36608, 36676), True, 'import paddle.fluid.layers as layers\n'), ((13577, 13627), 'paddle.fluid.initializer.Normal', 'fluid.initializer.Normal', (['(0.0)', '(src_emb_dim ** -0.5)'], {}), '(0.0, src_emb_dim ** -0.5)\n', (13601, 13627), True, 'import paddle.fluid as fluid\n'), ((13935, 13983), 'paddle.fluid.initializer.NumpyArrayInitializer', 'fluid.initializer.NumpyArrayInitializer', (['pos_inp'], {}), '(pos_inp)\n', (13974, 13983), True, 'import paddle.fluid as fluid\n'), ((3601, 3662), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out'], {'dropout_prob': 'dropout_rate', 'is_test': '(False)'}), '(out, dropout_prob=dropout_rate, is_test=False)\n', (3615, 3662), True, 'import paddle.fluid.layers as layers\n'), ((2877, 2908), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (2903, 2908), True, 'import paddle.fluid as fluid\n'), ((2993, 3024), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (3019, 3024), True, 'import paddle.fluid as fluid\n')] |
import numpy as np
from scipy import optimize
import logging
logger = logging.getLogger(__name__)
def scipyFit(x, y, method,p0 = None,boundaries = (-np.inf, np.inf),sigma = None):
if boundaries is not None and len(boundaries) != 2:
raise ValueError("Boundaries need to be a two 2D tuple")
if p0 is not None and boundaries is not None and boundaries != (-np.inf, np.inf) and len(p0) != len(boundaries[0]) :
raise ValueError("P0 and Fixed Array have to have the same length")
popt, pcov = optimize.curve_fit(method, x, y,p0=p0,bounds = boundaries,sigma=sigma)
perr = np.sqrt(np.diag(pcov))
return popt, perr
def gaussian_amp(x, y0, amp, cen, wid):
'''
Fitting function used. Fits a Gaussian using the following function:
.. math::
y(x)=y_0+\frac{amp}{\sqrt{2\pi wid}}\text{exp}(-\frac{(x-cen)^2}{2*wid^2})
:param x:x-Axis against which we will approximate the function
:type x:1-D numpy array
:param y0:y-Offset of the function
:type y0:float
:param amp:Amplitude of the gaussian
:type amp:float
:param cen:x-Value of center of distribution
:type cen:float
:param wid:Standard deviation of the distribution
:type wid:float
:return:y-Array of a gaussian distribution
:rtype:1-D numpy array
'''
return y0 + (amp / (np.sqrt(2 * np.pi) * wid)) * np.exp(-(x - cen) ** 2 / (2 * wid ** 2))
def gaussian(x, y0, cen, wid):
'''
Fitting function used. Fits a Gaussian using the following function:
.. math::
y(x)=y_0+\frac{amp}{\sqrt{2\pi wid}}\text{exp}(-\frac{(x-cen)^2}{2*wid^2})
:param x:x-Axis against which we will approximate the function
:type x:1-D numpy array
:param y0:y-Offset of the function
:type y0:float
:param amp:Amplitude of the gaussian
:type amp:float
:param cen:x-Value of center of distribution
:type cen:float
:param wid:Standard deviation of the distribution
:type wid:float
:return:y-Array of a gaussian distribution
:rtype:1-D numpy array
'''
return y0 + (1 / (np.sqrt(2 * np.pi) * wid)) * np.exp(-(x - cen) ** 2 / (2 * wid ** 2))
def sinOffset(x,amp,tau,offset,phi):
return offset+amp*np.sin(2*np.pi*x/tau+phi)
def linearPolynomial(x, a, b):
return a + b * x
def exponentialDistribution(x,A,B,u):
return A+B*np.exp(-x*u)*u
def quadraticPolynomial(x, a, b, c,d):
return a + b * x + c * x ** 2 +d * x ** 3
def sin(x,amp,tau):
'''
Represents the used sin within our Fit
:type x: 1-D numpy array
:param amp: Amplitude of sin
:type amp: float
:type tau: float
:return: the functional values for the array x
:rtype: 1-D numpy array
'''
return amp*np.sin(2*np.pi*4*x/tau)
def sinc(x, a, tau_acf):
'''
Represents the used sinc within our Fit
:param x: 1-D numpy array
:param a: float, amplitude of the sinc
:param tau_acf: float
:return: the functional value for the array x
:rtype: 1-D numpy array
'''
return a * np.sinc(4 * x / tau_acf)**2
def sinc_sin(x,a,tau,a_s):
return sinc(x,a,tau) + sin(x,a_s,tau)
def trismooth(x,window_width):
'''
This function is implemented to create a similar function to the Trismooth function of idl
:rtype: 1-D numpy array
:type window_width: int
:param x: The array containg the data which should be filtered. In our case this represents the Flux within the
lightCurve
:type x: 1-D numpy array
:param window_width: The bin size which the function will look at
:return: The smoothed variant of x
'''
if window_width%2 != 0:
window_width = window_width+1
lend = len(x)-1
if (lend+1) < window_width:
raise ValueError("Window_width cannot be bigger than length -1")
halfWeights = np.arange(window_width/2)
weights = np.append(halfWeights,[window_width/2])
weights = np.append(weights,halfWeights[::-1])
weights +=1
tot = np.sum(weights)
smoothed = np.zeros(lend+1)
offset = int(window_width/2)
for i in range(offset,lend-offset):
smoothed[i]=np.sum(x[i-offset:i+offset+1]*weights)
smoothed /=tot
for i in range(0,offset):
smoothed[i] = np.sum(x[0:i+offset+1]*weights[offset-i:]) / np.sum(weights[offset-i:])
for i in range(lend-offset,lend-1,-1):
smoothed[i] = np.sum(x[i-offset:]*weights[0:offset+(lend-i)]) / np.sum(weights[0:offset+(lend-i)])
return smoothed | [
"numpy.sum",
"numpy.zeros",
"scipy.optimize.curve_fit",
"numpy.append",
"numpy.sinc",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"logging.getLogger",
"numpy.sqrt"
] | [((71, 98), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (88, 98), False, 'import logging\n'), ((519, 590), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['method', 'x', 'y'], {'p0': 'p0', 'bounds': 'boundaries', 'sigma': 'sigma'}), '(method, x, y, p0=p0, bounds=boundaries, sigma=sigma)\n', (537, 590), False, 'from scipy import optimize\n'), ((3802, 3829), 'numpy.arange', 'np.arange', (['(window_width / 2)'], {}), '(window_width / 2)\n', (3811, 3829), True, 'import numpy as np\n'), ((3842, 3884), 'numpy.append', 'np.append', (['halfWeights', '[window_width / 2]'], {}), '(halfWeights, [window_width / 2])\n', (3851, 3884), True, 'import numpy as np\n'), ((3896, 3933), 'numpy.append', 'np.append', (['weights', 'halfWeights[::-1]'], {}), '(weights, halfWeights[::-1])\n', (3905, 3933), True, 'import numpy as np\n'), ((3959, 3974), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3965, 3974), True, 'import numpy as np\n'), ((3991, 4009), 'numpy.zeros', 'np.zeros', (['(lend + 1)'], {}), '(lend + 1)\n', (3999, 4009), True, 'import numpy as np\n'), ((609, 622), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (616, 622), True, 'import numpy as np\n'), ((2713, 2744), 'numpy.sin', 'np.sin', (['(2 * np.pi * 4 * x / tau)'], {}), '(2 * np.pi * 4 * x / tau)\n', (2719, 2744), True, 'import numpy as np\n'), ((4102, 4148), 'numpy.sum', 'np.sum', (['(x[i - offset:i + offset + 1] * weights)'], {}), '(x[i - offset:i + offset + 1] * weights)\n', (4108, 4148), True, 'import numpy as np\n'), ((1357, 1397), 'numpy.exp', 'np.exp', (['(-(x - cen) ** 2 / (2 * wid ** 2))'], {}), '(-(x - cen) ** 2 / (2 * wid ** 2))\n', (1363, 1397), True, 'import numpy as np\n'), ((2098, 2138), 'numpy.exp', 'np.exp', (['(-(x - cen) ** 2 / (2 * wid ** 2))'], {}), '(-(x - cen) ** 2 / (2 * wid ** 2))\n', (2104, 2138), True, 'import numpy as np\n'), ((2199, 2232), 'numpy.sin', 'np.sin', (['(2 * np.pi * x / tau + phi)'], {}), '(2 * np.pi * x / tau + phi)\n', (2205, 2232), True, 'import numpy as np\n'), ((3015, 3039), 'numpy.sinc', 'np.sinc', (['(4 * x / tau_acf)'], {}), '(4 * x / tau_acf)\n', (3022, 3039), True, 'import numpy as np\n'), ((4214, 4264), 'numpy.sum', 'np.sum', (['(x[0:i + offset + 1] * weights[offset - i:])'], {}), '(x[0:i + offset + 1] * weights[offset - i:])\n', (4220, 4264), True, 'import numpy as np\n'), ((4259, 4287), 'numpy.sum', 'np.sum', (['weights[offset - i:]'], {}), '(weights[offset - i:])\n', (4265, 4287), True, 'import numpy as np\n'), ((4352, 4407), 'numpy.sum', 'np.sum', (['(x[i - offset:] * weights[0:offset + (lend - i)])'], {}), '(x[i - offset:] * weights[0:offset + (lend - i)])\n', (4358, 4407), True, 'import numpy as np\n'), ((4402, 4440), 'numpy.sum', 'np.sum', (['weights[0:offset + (lend - i)]'], {}), '(weights[0:offset + (lend - i)])\n', (4408, 4440), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.exp', 'np.exp', (['(-x * u)'], {}), '(-x * u)\n', (2339, 2347), True, 'import numpy as np\n'), ((1328, 1346), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1335, 1346), True, 'import numpy as np\n'), ((2069, 2087), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2076, 2087), True, 'import numpy as np\n')] |
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""
Use the jpeg_ls (CharPyLS) python package to decode pixel transfer syntaxes.
"""
try:
import numpy
HAVE_NP = True
except ImportError:
HAVE_NP = False
try:
import jpeg_ls
HAVE_JPEGLS = True
except ImportError:
HAVE_JPEGLS = False
import pydicom.encaps
from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness
import pydicom.uid
HANDLER_NAME = 'JPEG-LS'
DEPENDENCIES = {
'numpy': ('http://www.numpy.org/', 'NumPy'),
'jpeg_ls': ('https://github.com/Who8MyLunch/CharPyLS', 'CharPyLS'),
}
SUPPORTED_TRANSFER_SYNTAXES = [
pydicom.uid.JPEGLSLossless,
pydicom.uid.JPEGLSLossy,
]
def is_available():
"""Return True if the handler has its dependencies met."""
return HAVE_NP and HAVE_JPEGLS
def needs_to_convert_to_RGB(dicom_dataset):
return False
def should_change_PhotometricInterpretation_to_RGB(dicom_dataset):
should_change = dicom_dataset.SamplesPerPixel == 3
return False
def supports_transfer_syntax(transfer_syntax):
"""
Returns
-------
bool
True if this pixel data handler might support this transfer syntax.
False to prevent any attempt to try to use this handler
to decode the given transfer syntax
"""
return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES
def get_pixeldata(dicom_dataset):
"""
Use the jpeg_ls package to decode the PixelData attribute
Returns
-------
numpy.ndarray
A correctly sized (but not shaped) numpy array
of the entire data volume
Raises
------
ImportError
if the required packages are not available
NotImplementedError
if the transfer syntax is not supported
TypeError
if the pixel data type is unsupported
"""
if (dicom_dataset.file_meta.TransferSyntaxUID
not in SUPPORTED_TRANSFER_SYNTAXES):
msg = ("The jpeg_ls does not support "
"this transfer syntax {0}.".format(
dicom_dataset.file_meta.TransferSyntaxUID.name))
raise NotImplementedError(msg)
if not HAVE_JPEGLS:
msg = ("The jpeg_ls package is required to use pixel_array "
"for this transfer syntax {0}, and jpeg_ls could not "
"be imported.".format(
dicom_dataset.file_meta.TransferSyntaxUID.name))
raise ImportError(msg)
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# dicom_dataset.PixelRepresentation -- 0 for unsigned, 1 for signed;
# dicom_dataset.BitsAllocated -- 8, 16, or 32
if dicom_dataset.PixelRepresentation == 0:
format_str = 'uint{}'.format(dicom_dataset.BitsAllocated)
elif dicom_dataset.PixelRepresentation == 1:
format_str = 'int{}'.format(dicom_dataset.BitsAllocated)
else:
format_str = 'bad_pixel_representation'
try:
numpy_format = numpy.dtype(format_str)
except TypeError:
msg = ("Data type not understood by NumPy: "
"format='{}', PixelRepresentation={}, "
"BitsAllocated={}".format(
format_str,
dicom_dataset.PixelRepresentation,
dicom_dataset.BitsAllocated))
raise TypeError(msg)
numpy_format = dtype_corrected_for_endianness(
dicom_dataset.is_little_endian, numpy_format)
# decompress here
UncompressedPixelData = bytearray()
if ('NumberOfFrames' in dicom_dataset and
dicom_dataset.NumberOfFrames > 1):
# multiple compressed frames
CompressedPixelDataSeq = pydicom.encaps.decode_data_sequence(
dicom_dataset.PixelData)
# print len(CompressedPixelDataSeq)
for frame in CompressedPixelDataSeq:
decompressed_image = jpeg_ls.decode(
numpy.frombuffer(frame, dtype=numpy.uint8))
UncompressedPixelData.extend(decompressed_image.tobytes())
else:
# single compressed frame
CompressedPixelData = pydicom.encaps.defragment_data(
dicom_dataset.PixelData)
decompressed_image = jpeg_ls.decode(
numpy.frombuffer(CompressedPixelData, dtype=numpy.uint8))
UncompressedPixelData.extend(decompressed_image.tobytes())
pixel_array = numpy.frombuffer(UncompressedPixelData, numpy_format)
if should_change_PhotometricInterpretation_to_RGB(dicom_dataset):
dicom_dataset.PhotometricInterpretation = "RGB"
return pixel_array
| [
"numpy.frombuffer",
"numpy.dtype",
"pydicom.pixel_data_handlers.util.dtype_corrected_for_endianness"
] | [((3358, 3434), 'pydicom.pixel_data_handlers.util.dtype_corrected_for_endianness', 'dtype_corrected_for_endianness', (['dicom_dataset.is_little_endian', 'numpy_format'], {}), '(dicom_dataset.is_little_endian, numpy_format)\n', (3388, 3434), False, 'from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness\n'), ((4357, 4410), 'numpy.frombuffer', 'numpy.frombuffer', (['UncompressedPixelData', 'numpy_format'], {}), '(UncompressedPixelData, numpy_format)\n', (4373, 4410), False, 'import numpy\n'), ((2979, 3002), 'numpy.dtype', 'numpy.dtype', (['format_str'], {}), '(format_str)\n', (2990, 3002), False, 'import numpy\n'), ((4213, 4269), 'numpy.frombuffer', 'numpy.frombuffer', (['CompressedPixelData'], {'dtype': 'numpy.uint8'}), '(CompressedPixelData, dtype=numpy.uint8)\n', (4229, 4269), False, 'import numpy\n'), ((3898, 3940), 'numpy.frombuffer', 'numpy.frombuffer', (['frame'], {'dtype': 'numpy.uint8'}), '(frame, dtype=numpy.uint8)\n', (3914, 3940), False, 'import numpy\n')] |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# test
outName = 'Silica64-Y8090-00955-opt1'
master = basins.loadMaster(outName)
dataName = master['dataName']
wqData = waterQuality.DataModelWQ(dataName)
trainset = '00955-Y8090'
testset = '00955-Y0010'
if master['varY'] is not None:
plotVar = ['00060', '00955']
else:
plotVar = ['00955']
# point test
yP1, ycP1 = basins.testModel(outName, trainset, wqData=wqData)
errMatC1 = wqData.errBySiteC(ycP1, subset=trainset, varC=master['varYC'])
if master['varY'] is not None:
errMatQ1 = wqData.errBySiteQ(yP1, subset=trainset, varQ=master['varY'])
yP2, ycP2 = basins.testModel(outName, testset, wqData=wqData)
errMatC2 = wqData.errBySiteC(ycP2, subset=testset, varC=master['varYC'])
if master['varY'] is not None:
errMatQ2 = wqData.errBySiteQ(yP2, subset=testset, varQ=master['varY'])
# box
dataBox = list()
for k in range(2):
for var in plotVar:
if var == '00060':
temp = [errMatQ1[:, 0, k], errMatQ2[:, 0, k]]
else:
ic = master['varYC'].index(var)
temp = [errMatC1[:, ic, k], errMatC2[:, ic, k]]
dataBox.append(temp)
fig = figplot.boxPlot(dataBox, label1=['RMSE', 'Corr'], label2=[
'train', 'test'], sharey=False)
fig.show()
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
basins.testModelSeq(outName, siteNoLst, wqData=wqData)
# time series map
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
def funcMap():
nM = len(plotVar)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
axM = np.array([axM]) if nM == 1 else axM
for k, var in enumerate(plotVar):
if var == '00060':
axplot.mapPoint(axM[k], lat, lon, errMatQ2[:, 0, 1], s=12)
axM[k].set_title('streamflow')
else:
ic = master['varYC'].index(var)
shortName = codePdf.loc[var]['shortName']
title = '{} {}'.format(shortName, var)
axplot.mapPoint(axM[k], lat, lon, errMatC2[:, ic, 1], s=12)
axM[k].set_title(title)
figP, axP = plt.subplots(nM, 1, figsize=(8, 6))
axP = np.array([axP]) if nM == 1 else axP
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfPred, dfObs = basins.loadSeq(outName, siteNo)
t = dfPred.index.values.astype(np.datetime64)
tBar = np.datetime64('2000-01-01')
info1 = wqData.subsetInfo(trainset)
info2 = wqData.subsetInfo(testset)
ind1 = info1[info1['siteNo'] == siteNo].index
ind2 = info2[info2['siteNo'] == siteNo].index
t1 = info1['date'][ind1].values.astype(np.datetime64)
t2 = info2['date'][ind2].values.astype(np.datetime64)
tp = np.concatenate([t1, t2])
yp = np.concatenate([ycP1[ind1], ycP2[ind2]])
for k, var in enumerate(plotVar):
rmse, corr = waterQuality.calErrSeq(dfPred[var], dfObs[var])
tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format(
siteNo, rmse[0], rmse[1], corr[0], corr[1])
if var == '00060':
styLst = '--'
title = 'streamflow '+tStr
axplot.plotTS(axP[k], t, [dfPred[var], dfObs[var]], tBar=tBar,
legLst=['LSTM', 'observation'], styLst=styLst, cLst='br')
axP[k].set_title(title)
else:
styLst = '-*'
shortName = codePdf.loc[var]['shortName']
title = shortName + ' ' + tStr
axplot.plotTS(axP[k], t, dfPred[var], tBar=tBar,
legLst=['LSTM-sequence'], styLst='-', cLst='b')
axplot.plotTS(axP[k], tp, yp, legLst=[
'LSTM-sample'], styLst='*', cLst='g')
axplot.plotTS(axP[k], t, dfObs[var],
legLst=['observation'], styLst='*', cLst='r')
axP[k].set_title(title)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcMap, funcPoint)
for ax in figP.axes:
ax.set_xlim(np.datetime64('2015-01-01'), np.datetime64('2020-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1990-01-01'), np.datetime64('1995-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1980-01-01'), np.datetime64('2020-01-01'))
figP.canvas.draw()
| [
"hydroDL.post.axplot.plotTS",
"hydroDL.app.waterQuality.calErrSeq",
"hydroDL.post.figplot.clickMap",
"numpy.datetime64",
"hydroDL.master.basins.testModel",
"hydroDL.app.waterQuality.DataModelWQ",
"hydroDL.master.basins.loadSeq",
"importlib.reload",
"hydroDL.master.basins.loadMaster",
"numpy.array"... | [((389, 415), 'hydroDL.master.basins.loadMaster', 'basins.loadMaster', (['outName'], {}), '(outName)\n', (406, 415), False, 'from hydroDL.master import basins\n'), ((455, 489), 'hydroDL.app.waterQuality.DataModelWQ', 'waterQuality.DataModelWQ', (['dataName'], {}), '(dataName)\n', (479, 489), False, 'from hydroDL.app import waterQuality\n'), ((659, 709), 'hydroDL.master.basins.testModel', 'basins.testModel', (['outName', 'trainset'], {'wqData': 'wqData'}), '(outName, trainset, wqData=wqData)\n', (675, 709), False, 'from hydroDL.master import basins\n'), ((903, 952), 'hydroDL.master.basins.testModel', 'basins.testModel', (['outName', 'testset'], {'wqData': 'wqData'}), '(outName, testset, wqData=wqData)\n', (919, 952), False, 'from hydroDL.master import basins\n'), ((1441, 1534), 'hydroDL.post.figplot.boxPlot', 'figplot.boxPlot', (['dataBox'], {'label1': "['RMSE', 'Corr']", 'label2': "['train', 'test']", 'sharey': '(False)'}), "(dataBox, label1=['RMSE', 'Corr'], label2=['train', 'test'],\n sharey=False)\n", (1456, 1534), False, 'from hydroDL.post import axplot, figplot\n'), ((1629, 1683), 'hydroDL.master.basins.testModelSeq', 'basins.testModelSeq', (['outName', 'siteNoLst'], {'wqData': 'wqData'}), '(outName, siteNoLst, wqData=wqData)\n', (1648, 1683), False, 'from hydroDL.master import basins\n'), ((1711, 1780), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'varLst': "['LAT_GAGE', 'LNG_GAGE']", 'siteNoLst': 'siteNoLst'}), "(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\n", (1726, 1780), False, 'from hydroDL.data import gageII, usgs\n'), ((4231, 4256), 'importlib.reload', 'importlib.reload', (['figplot'], {}), '(figplot)\n', (4247, 4256), False, 'import importlib\n'), ((4270, 4306), 'hydroDL.post.figplot.clickMap', 'figplot.clickMap', (['funcMap', 'funcPoint'], {}), '(funcMap, funcPoint)\n', (4286, 4306), False, 'from hydroDL.post import axplot, figplot\n'), ((1926, 1961), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nM', '(1)'], {'figsize': '(8, 6)'}), '(nM, 1, figsize=(8, 6))\n', (1938, 1961), True, 'import matplotlib.pyplot as plt\n'), ((2474, 2509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nM', '(1)'], {'figsize': '(8, 6)'}), '(nM, 1, figsize=(8, 6))\n', (2486, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2671, 2702), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (2685, 2702), False, 'from hydroDL.master import basins\n'), ((2764, 2791), 'numpy.datetime64', 'np.datetime64', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (2777, 2791), True, 'import numpy as np\n'), ((3097, 3121), 'numpy.concatenate', 'np.concatenate', (['[t1, t2]'], {}), '([t1, t2])\n', (3111, 3121), True, 'import numpy as np\n'), ((3131, 3171), 'numpy.concatenate', 'np.concatenate', (['[ycP1[ind1], ycP2[ind2]]'], {}), '([ycP1[ind1], ycP2[ind2]])\n', (3145, 3171), True, 'import numpy as np\n'), ((1972, 1987), 'numpy.array', 'np.array', (['[axM]'], {}), '([axM])\n', (1980, 1987), True, 'import numpy as np\n'), ((2520, 2535), 'numpy.array', 'np.array', (['[axP]'], {}), '([axP])\n', (2528, 2535), True, 'import numpy as np\n'), ((3232, 3279), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfPred[var]', 'dfObs[var]'], {}), '(dfPred[var], dfObs[var])\n', (3254, 3279), False, 'from hydroDL.app import waterQuality\n'), ((4345, 4372), 'numpy.datetime64', 'np.datetime64', (['"""2015-01-01"""'], {}), "('2015-01-01')\n", (4358, 4372), True, 'import numpy as np\n'), ((4374, 4401), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4387, 4401), True, 'import numpy as np\n'), ((4460, 4487), 'numpy.datetime64', 'np.datetime64', (['"""1990-01-01"""'], {}), "('1990-01-01')\n", (4473, 4487), True, 'import numpy as np\n'), ((4489, 4516), 'numpy.datetime64', 'np.datetime64', (['"""1995-01-01"""'], {}), "('1995-01-01')\n", (4502, 4516), True, 'import numpy as np\n'), ((4575, 4602), 'numpy.datetime64', 'np.datetime64', (['"""1980-01-01"""'], {}), "('1980-01-01')\n", (4588, 4602), True, 'import numpy as np\n'), ((4604, 4631), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4617, 4631), True, 'import numpy as np\n'), ((2085, 2143), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[k]', 'lat', 'lon', 'errMatQ2[:, 0, 1]'], {'s': '(12)'}), '(axM[k], lat, lon, errMatQ2[:, 0, 1], s=12)\n', (2100, 2143), False, 'from hydroDL.post import axplot, figplot\n'), ((2362, 2421), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[k]', 'lat', 'lon', 'errMatC2[:, ic, 1]'], {'s': '(12)'}), '(axM[k], lat, lon, errMatC2[:, ic, 1], s=12)\n', (2377, 2421), False, 'from hydroDL.post import axplot, figplot\n'), ((3512, 3637), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', '[dfPred[var], dfObs[var]]'], {'tBar': 'tBar', 'legLst': "['LSTM', 'observation']", 'styLst': 'styLst', 'cLst': '"""br"""'}), "(axP[k], t, [dfPred[var], dfObs[var]], tBar=tBar, legLst=[\n 'LSTM', 'observation'], styLst=styLst, cLst='br')\n", (3525, 3637), False, 'from hydroDL.post import axplot, figplot\n'), ((3844, 3944), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', 'dfPred[var]'], {'tBar': 'tBar', 'legLst': "['LSTM-sequence']", 'styLst': '"""-"""', 'cLst': '"""b"""'}), "(axP[k], t, dfPred[var], tBar=tBar, legLst=['LSTM-sequence'],\n styLst='-', cLst='b')\n", (3857, 3944), False, 'from hydroDL.post import axplot, figplot\n'), ((3979, 4054), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 'tp', 'yp'], {'legLst': "['LSTM-sample']", 'styLst': '"""*"""', 'cLst': '"""g"""'}), "(axP[k], tp, yp, legLst=['LSTM-sample'], styLst='*', cLst='g')\n", (3992, 4054), False, 'from hydroDL.post import axplot, figplot\n'), ((4084, 4170), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', 'dfObs[var]'], {'legLst': "['observation']", 'styLst': '"""*"""', 'cLst': '"""r"""'}), "(axP[k], t, dfObs[var], legLst=['observation'], styLst='*',\n cLst='r')\n", (4097, 4170), False, 'from hydroDL.post import axplot, figplot\n')] |
import os
import typing
import numpy as np
from aocd import get_data
from dotenv import load_dotenv
from utils import timeit
def get_session() -> str:
load_dotenv()
return os.getenv('SESSION_COOKIE')
def get_list(data: str = None, day: int = None, year: int = None) -> typing.List:
if not data:
aoc_input = [int(x) for x in get_data(get_session(), day=day, year=year).split(',')]
else:
aoc_input = [int(x) for x in data.split(',')]
return aoc_input
# This method works for 80 days and does not scale for 256 days
@timeit
def part1(aoc_input: typing.List, days: int) -> int:
aoc_input_copy = []
aoc_input_copy = flash(aoc_input, aoc_input_copy, days)
return int(len(aoc_input_copy))
def flash(aoc_input: list, aoc_input_copy: typing.List, days: int) -> typing.List:
for day in range(days):
aoc_input_copy = []
for timer in aoc_input:
if timer == 0: # Each day 0 becomes a 6 and adds a new 8 to the end of the list
aoc_input_copy.append(6)
aoc_input_copy.append(8)
else:
aoc_input_copy.append(timer - 1) # Decrease the timer of each fish after each day
aoc_input = aoc_input_copy
return aoc_input_copy
@timeit
def part2(aoc_input: typing.List, days: int) -> int:
np_array = np.zeros(9, dtype=np.float64) # Get a 1D array of 0's
for x in aoc_input:
np_array[x] += 1 # Count the timers
for day in range(days):
np_array = np.roll(np_array, -1)
# np.roll is awesome.
# Saves inserting, deleting, shifting
np_array[6] += np_array[8]
return int(np.sum(np_array))
if __name__ == '__main__':
print(f'Part 1: {part1(get_list(data=None, day=6, year=2021), 80)}')
print(f'Part 2: {part2(get_list(data=None, day=6, year=2021), 256)}')
| [
"numpy.sum",
"numpy.roll",
"numpy.zeros",
"dotenv.load_dotenv",
"os.getenv"
] | [((159, 172), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (170, 172), False, 'from dotenv import load_dotenv\n'), ((184, 211), 'os.getenv', 'os.getenv', (['"""SESSION_COOKIE"""'], {}), "('SESSION_COOKIE')\n", (193, 211), False, 'import os\n'), ((1342, 1371), 'numpy.zeros', 'np.zeros', (['(9)'], {'dtype': 'np.float64'}), '(9, dtype=np.float64)\n', (1350, 1371), True, 'import numpy as np\n'), ((1513, 1534), 'numpy.roll', 'np.roll', (['np_array', '(-1)'], {}), '(np_array, -1)\n', (1520, 1534), True, 'import numpy as np\n'), ((1661, 1677), 'numpy.sum', 'np.sum', (['np_array'], {}), '(np_array)\n', (1667, 1677), True, 'import numpy as np\n')] |
import pickle
from keras.models import load_model
from sklearn.preprocessing import MultiLabelBinarizer
from gensim import models
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
import numpy as np
import subprocess
subprocess.call(['sh', 'src/models/get_word2vec.sh'])
with open('data/processed/Genredict.pkl','rb') as f:
Genre_ID_to_name=pickle.load(f)
model_textual = load_model('models/overview_nn.h5')
w2v_model = models.KeyedVectors.load_word2vec_format('data/external/GoogleNews-vectors-negative300-SLIM.bin', binary=True)
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
with open('models/mlb.pkl','rb') as f:
mlb=pickle.load(f)
genre_list=sorted(list(Genre_ID_to_name.keys()))
def nn_predict(input_string):
movie_mean_wordvec=np.zeros((1,300))
tokens = tokenizer.tokenize(input_string)
stopped_tokens = [k for k in tokens if not k in en_stop]
count_in_vocab=0
s=0
for tok in stopped_tokens:
if tok.lower() in w2v_model.vocab:
count_in_vocab+=1
s+=w2v_model[tok.lower()]
if count_in_vocab!=0:
movie_mean_wordvec[0]=s/float(count_in_vocab)
pred_array = model_textual.predict(movie_mean_wordvec)
predicted = np.argsort(pred_array[0])[::-1][:3]
predicted_genre_Y = np.array([[1 if k in predicted else 0 for k in range(len(pred_array[0])) ]])
predicted_genre_ids = mlb.inverse_transform(predicted_genre_Y)[0]
predicted_genres = list(map(Genre_ID_to_name.get, predicted_genre_ids))
return predicted_genres
| [
"keras.models.load_model",
"nltk.tokenize.RegexpTokenizer",
"stop_words.get_stop_words",
"numpy.zeros",
"numpy.argsort",
"pickle.load",
"subprocess.call",
"gensim.models.KeyedVectors.load_word2vec_format"
] | [((248, 301), 'subprocess.call', 'subprocess.call', (["['sh', 'src/models/get_word2vec.sh']"], {}), "(['sh', 'src/models/get_word2vec.sh'])\n", (263, 301), False, 'import subprocess\n'), ((413, 448), 'keras.models.load_model', 'load_model', (['"""models/overview_nn.h5"""'], {}), "('models/overview_nn.h5')\n", (423, 448), False, 'from keras.models import load_model\n'), ((461, 576), 'gensim.models.KeyedVectors.load_word2vec_format', 'models.KeyedVectors.load_word2vec_format', (['"""data/external/GoogleNews-vectors-negative300-SLIM.bin"""'], {'binary': '(True)'}), "(\n 'data/external/GoogleNews-vectors-negative300-SLIM.bin', binary=True)\n", (501, 576), False, 'from gensim import models\n'), ((584, 607), 'nltk.tokenize.RegexpTokenizer', 'RegexpTokenizer', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (599, 607), False, 'from nltk.tokenize import RegexpTokenizer\n'), ((618, 638), 'stop_words.get_stop_words', 'get_stop_words', (['"""en"""'], {}), "('en')\n", (632, 638), False, 'from stop_words import get_stop_words\n'), ((377, 391), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (388, 391), False, 'import pickle\n'), ((686, 700), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (697, 700), False, 'import pickle\n'), ((806, 824), 'numpy.zeros', 'np.zeros', (['(1, 300)'], {}), '((1, 300))\n', (814, 824), True, 'import numpy as np\n'), ((1257, 1282), 'numpy.argsort', 'np.argsort', (['pred_array[0]'], {}), '(pred_array[0])\n', (1267, 1282), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 02:27:29 2018
@author: james
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 15:00:40 2018
@author: JamesChiou
"""
import os
import random
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
import math
import matplotlib.pyplot as plt
# Define dataset class
class ImageDataset(Dataset):
def __init__(self, file_path, transform = None):
df = pd.read_csv(file_path)
if 'label' in df.columns:
self.is_train = True
else:
self.is_train = False
if self.is_train:
# training data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
else:
# test data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
# Define WideResNet network
##############################
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(1, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 7)
out = out.view(-1, self.nChannels)
return self.fc(out)
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
# Model save dir
if not os.path.isdir('models'):
os.mkdir('models')
modeldir = 'models'
# Use cuda or cpu
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# Fix random seed for reproducibility
randomSeed = 2018
random.seed(randomSeed)
torch.manual_seed(randomSeed)
np.random.seed(randomSeed)
# Best valid accuracy
best_acc = 0
best_epoch = 0
# Training parameters
n_epoches = 300
# Record
losses = np.zeros((n_epoches))
valid_losses = np.zeros((int(n_epoches/2)))
valid_accuracy = np.zeros((int(n_epoches/2),2))
y_pred = []
def main():
global best_acc,best_epoch,losses,valid_losses,valid_accuracy
global n_epoches
global y_pred
# Dataset transforms
transform1 = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(p=1.),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
transform2 = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
# Load dataset
print('Start loading data')
test_dataset1 = ImageDataset('data/test.csv', transform1)
test_dataset2 = ImageDataset('data/test.csv', transform2)
# Load dataloader
testloader1 = DataLoader(test_dataset1,batch_size=100,
num_workers=2,shuffle=False,pin_memory=True)
testloader2 = DataLoader(test_dataset2,batch_size=100,
num_workers=2,shuffle=False,pin_memory=True)
y_pred_probs = []
print('Start predict')
#1
model = wrn(num_classes=10,
depth=28,
widen_factor=10,
dropRate=0.,).to(device)
best_para = torch.load('models/best/CC_epoch_258_nodropout.pth')
y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch)
y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch)
y_pred_prob = (y_pred_prob1+y_pred_prob2)/2
y_pred_probs.append(y_pred_prob)
print('predict complete: 1')
#2
model = wrn(num_classes=10,
depth=28,
widen_factor=10,
dropRate=0.3,).to(device)
best_para = torch.load('models/best/CC_epoch_184_1211_9602.pth')
y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch)
y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch)
y_pred_prob = (y_pred_prob1+y_pred_prob2)/2
y_pred_probs.append(y_pred_prob)
print('predict complete: 2')
#3
model = wrn(num_classes=10,
depth=28,
widen_factor=10,
dropRate=0.3,).to(device)
best_para = torch.load('models/best/CC_epoch_192_1212_9580.pth')
y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch)
y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch)
y_pred_prob = (y_pred_prob1+y_pred_prob2)/2
y_pred_probs.append(y_pred_prob)
print('predict complete: 3')
#4
model = wrn(num_classes=10,
depth=28,
widen_factor=10,
dropRate=0.3,).to(device)
best_para = torch.load('models/best/CC_epoch_240_1217_9595_pseudolabel.pth')
y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch)
y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch)
y_pred_prob = (y_pred_prob1+y_pred_prob2)/2
y_pred_probs.append(y_pred_prob)
print('predict complete: 4')
y_pred_probs_mean = (y_pred_probs[0]+y_pred_probs[1]+y_pred_probs[2]+y_pred_probs[3])/4
y_pred_probs_mean = y_pred_probs_mean.cpu().data.numpy()
y_pred = np.argmax(y_pred_probs_mean, axis=1)
sub = pd.DataFrame(y_pred, columns=['label'])
sub.index.name='id'
sub.to_csv('ensemble_4_hflip.csv', index=True)
def test(testloader,model,criterion,device,best_param,best_epoch):
# custom select model
model.load_state_dict(best_param)
model.eval()
y_pred_prob = []
for i_batch, data in enumerate(testloader):
images = data.to(device)
outputs = model(images).detach()
prob = torch.nn.Softmax(dim=1)(outputs)
y_pred_prob.append(prob)
y_pred_prob = torch.cat(y_pred_prob)
#print(y_pred_prob)
return y_pred_prob
'''
y_pred = y_pred.astype(int)
sub = pd.DataFrame(y_pred, columns=['label'])
sub.index.name='id'
sub.to_csv('answer_wrn_28_10_%d.csv'%best_epoch, index=True)
'''
if __name__ == '__main__':
main() | [
"os.mkdir",
"numpy.random.seed",
"numpy.argmax",
"pandas.read_csv",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.Softmax",
"torch.device",
"torchvision.transforms.Normalize",
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"torch.nn.functional.avg_pool2d",
"torch.load",
"torchv... | [((5450, 5475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5473, 5475), False, 'import torch\n'), ((5609, 5632), 'random.seed', 'random.seed', (['randomSeed'], {}), '(randomSeed)\n', (5620, 5632), False, 'import random\n'), ((5633, 5662), 'torch.manual_seed', 'torch.manual_seed', (['randomSeed'], {}), '(randomSeed)\n', (5650, 5662), False, 'import torch\n'), ((5663, 5689), 'numpy.random.seed', 'np.random.seed', (['randomSeed'], {}), '(randomSeed)\n', (5677, 5689), True, 'import numpy as np\n'), ((5799, 5818), 'numpy.zeros', 'np.zeros', (['n_epoches'], {}), '(n_epoches)\n', (5807, 5818), True, 'import numpy as np\n'), ((5360, 5383), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (5373, 5383), False, 'import os\n'), ((5389, 5407), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (5397, 5407), False, 'import os\n'), ((5490, 5512), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5502, 5512), False, 'import torch\n'), ((5532, 5551), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5544, 5551), False, 'import torch\n'), ((6714, 6806), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset1'], {'batch_size': '(100)', 'num_workers': '(2)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(test_dataset1, batch_size=100, num_workers=2, shuffle=False,\n pin_memory=True)\n', (6724, 6806), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6846, 6938), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset2'], {'batch_size': '(100)', 'num_workers': '(2)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(test_dataset2, batch_size=100, num_workers=2, shuffle=False,\n pin_memory=True)\n', (6856, 6938), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7169, 7221), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_258_nodropout.pth"""'], {}), "('models/best/CC_epoch_258_nodropout.pth')\n", (7179, 7221), False, 'import torch\n'), ((7648, 7700), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_184_1211_9602.pth"""'], {}), "('models/best/CC_epoch_184_1211_9602.pth')\n", (7658, 7700), False, 'import torch\n'), ((8127, 8179), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_192_1212_9580.pth"""'], {}), "('models/best/CC_epoch_192_1212_9580.pth')\n", (8137, 8179), False, 'import torch\n'), ((8606, 8670), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_240_1217_9595_pseudolabel.pth"""'], {}), "('models/best/CC_epoch_240_1217_9595_pseudolabel.pth')\n", (8616, 8670), False, 'import torch\n'), ((9112, 9148), 'numpy.argmax', 'np.argmax', (['y_pred_probs_mean'], {'axis': '(1)'}), '(y_pred_probs_mean, axis=1)\n', (9121, 9148), True, 'import numpy as np\n'), ((9159, 9198), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': "['label']"}), "(y_pred, columns=['label'])\n", (9171, 9198), True, 'import pandas as pd\n'), ((9670, 9692), 'torch.cat', 'torch.cat', (['y_pred_prob'], {}), '(y_pred_prob)\n', (9679, 9692), False, 'import torch\n'), ((686, 708), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (697, 708), True, 'import pandas as pd\n'), ((1682, 1707), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_planes'], {}), '(in_planes)\n', (1696, 1707), True, 'import torch.nn as nn\n'), ((1729, 1750), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1736, 1750), True, 'import torch.nn as nn\n'), ((1772, 1861), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1781, 1861), True, 'import torch.nn as nn\n'), ((1908, 1934), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {}), '(out_planes)\n', (1922, 1934), True, 'import torch.nn as nn\n'), ((1956, 1977), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1963, 1977), True, 'import torch.nn as nn\n'), ((1999, 2085), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=\n False)\n', (2008, 2085), True, 'import torch.nn as nn\n'), ((3352, 3374), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3365, 3374), True, 'import torch.nn as nn\n'), ((3834, 3908), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nChannels[0]'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(1, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n', (3843, 3908), True, 'import torch.nn as nn\n'), ((4325, 4353), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nChannels[3]'], {}), '(nChannels[3])\n', (4339, 4353), True, 'import torch.nn as nn\n'), ((4374, 4395), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4381, 4395), True, 'import torch.nn as nn\n'), ((4414, 4450), 'torch.nn.Linear', 'nn.Linear', (['nChannels[3]', 'num_classes'], {}), '(nChannels[3], num_classes)\n', (4423, 4450), True, 'import torch.nn as nn\n'), ((5116, 5136), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', '(7)'], {}), '(out, 7)\n', (5128, 5136), True, 'import torch.nn.functional as F\n'), ((999, 1037), 'torch.from_numpy', 'torch.from_numpy', (['df.iloc[:, 0].values'], {}), '(df.iloc[:, 0].values)\n', (1015, 1037), False, 'import torch\n'), ((2656, 2711), 'torch.nn.functional.dropout', 'F.dropout', (['out'], {'p': 'self.droprate', 'training': 'self.training'}), '(out, p=self.droprate, training=self.training)\n', (2665, 2711), True, 'import torch.nn.functional as F\n'), ((6127, 6150), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6148, 6150), False, 'from torchvision import transforms\n'), ((6168, 6206), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(1.0)'}), '(p=1.0)\n', (6199, 6206), False, 'from torchvision import transforms\n'), ((6223, 6244), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6242, 6244), False, 'from torchvision import transforms\n'), ((6262, 6304), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6282, 6304), False, 'from torchvision import transforms\n'), ((6362, 6385), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6383, 6385), False, 'from torchvision import transforms\n'), ((6404, 6425), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6423, 6425), False, 'from torchvision import transforms\n'), ((6444, 6486), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6464, 6486), False, 'from torchvision import transforms\n'), ((9581, 9604), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9597, 9604), False, 'import torch\n'), ((2251, 2340), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, padding=0,\n bias=False)\n', (2260, 2340), True, 'import torch.nn as nn\n'), ((4678, 4696), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (4687, 4696), False, 'import math\n')] |
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import imageio
def correct_line_shift(img: np.ndarray, value: int):
"""Corrects the lineshift of a given image."""
rolled = np.roll(img[::2, :], value, axis=1)
img[::2, :] = rolled
return img
def show_corrected_image(img: np.ndarray):
fig, ax = plt.subplots()
ax.imshow(corrected, cmap="gray")
ax.axis("off")
return fig, ax
if __name__ == "__main__":
fname = pathlib.Path("/data/Amit_QNAP/Calcium_FXS/x10/")
images = [
next((fname / "WT_674").glob("AVG*WT*.png")),
next((fname / "FXS_614").glob("AVG*FXS*.png")),
]
for image in images:
data = imageio.imread(image)
corrected = correct_line_shift(data, 3)
fig, ax = show_corrected_image(corrected)
fig.savefig(image.with_suffix(".corrected.png"), transparent=True, dpi=300)
| [
"imageio.imread",
"pathlib.Path",
"matplotlib.pyplot.subplots",
"numpy.roll"
] | [((201, 236), 'numpy.roll', 'np.roll', (['img[::2, :]', 'value'], {'axis': '(1)'}), '(img[::2, :], value, axis=1)\n', (208, 236), True, 'import numpy as np\n'), ((336, 350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (348, 350), True, 'import matplotlib.pyplot as plt\n'), ((468, 516), 'pathlib.Path', 'pathlib.Path', (['"""/data/Amit_QNAP/Calcium_FXS/x10/"""'], {}), "('/data/Amit_QNAP/Calcium_FXS/x10/')\n", (480, 516), False, 'import pathlib\n'), ((688, 709), 'imageio.imread', 'imageio.imread', (['image'], {}), '(image)\n', (702, 709), False, 'import imageio\n')] |
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, Sequence
import numpy as np
import pandas as pd
import scipy.optimize
from invoice_net.parsers import (
parses_as_full_date,
parses_as_amount,
parses_as_invoice_number,
)
from invoice_net.data_handler import DataHandler
def __inner_filter_out_mistakes(
tokens: Iterable[str],
filter_func: Callable[[str], Any],
ignore_exceptions: bool = False,
) -> np.ndarray:
mask = []
for token in tokens:
try:
mask.append(bool(filter_func(token)))
except Exception:
if ignore_exceptions:
mask.append(False)
else:
raise
return np.array(mask)
def _filter_out_mistakes(token_predictions: pd.DataFrame) -> pd.DataFrame:
"""Filter out obvious mistakes, like Foo bar -> date prediction"""
filters_table: Dict[str, Callable[[str], Any]] = defaultdict(
lambda: lambda x: x
)
filters_table["document_date"] = parses_as_full_date
filters_table["document_id"] = parses_as_invoice_number
filters_table["amount_total"] = parses_as_amount
groups = []
for prediction, group in token_predictions.groupby("pred"):
groups.append(
group[
__inner_filter_out_mistakes(
group.word, filters_table[prediction]
)
]
)
return pd.concat(groups)
def _get_token_predictions(
predictions: np.ndarray, raw_text: Sequence[str], file_names: pd.Series
) -> pd.DataFrame:
"""Take model predictions and flatten to prediction per token."""
assert predictions.shape[0] == len(raw_text) == len(file_names), (
f"Number of samples does not match; ({predictions.shape[0]}, "
f"{len(raw_text)}, {len(file_names)})"
)
assert predictions.ndim == 3
candidates = np.where(predictions > 0.5)
tokens = [line.split() for line in raw_text]
tmp = []
for sample_idx, token_idx, class_idx in zip(*candidates):
# if prediction is not for padding text
if len(tokens[sample_idx]) > token_idx:
tmp.append(
{
"word": tokens[sample_idx][token_idx],
"pred": class_idx,
"confidence": predictions[sample_idx, token_idx, class_idx],
"file_name": file_names.iloc[sample_idx],
}
)
return pd.DataFrame.from_records(tmp)
def hungarian_prediction(token_predictions):
predictions = defaultdict(dict)
for file_name, df in token_predictions.groupby("file_name"):
hungarian_table = pd.pivot_table(
df,
values=["cost"],
index=["word"],
columns=["pred"],
aggfunc=np.min,
fill_value=1,
)
row_idxs, col_idxs = scipy.optimize.linear_sum_assignment(
hungarian_table
)
for row_idx, col_idx in zip(row_idxs, col_idxs):
col_name = hungarian_table.columns[col_idx][1]
predictions[file_name][col_name] = (
hungarian_table.iloc[row_idx].name,
1 - hungarian_table.iloc[row_idx, col_idx],
)
predictions_df = pd.DataFrame(predictions).transpose()
return predictions_df.reindex(columns=sorted(predictions_df.columns))
def get_predicted_classes(
predictions: np.ndarray, data_handler: DataHandler
) -> pd.DataFrame:
"""Get one predicted label per one file"""
token_predictions = _get_token_predictions(
predictions, data_handler.data.raw_text, data_handler.data.file_name
)
token_predictions["cost"] = 1 - token_predictions["confidence"]
token_predictions["pred"] = data_handler.to_human_readable_classes(
token_predictions.pred
)
token_predictions.drop(
token_predictions[token_predictions.pred == "unclassified"].index,
inplace=True,
)
token_predictions = _filter_out_mistakes(token_predictions)
return hungarian_prediction(token_predictions)
| [
"pandas.DataFrame",
"pandas.pivot_table",
"collections.defaultdict",
"numpy.where",
"numpy.array",
"pandas.DataFrame.from_records",
"pandas.concat"
] | [((723, 737), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (731, 737), True, 'import numpy as np\n'), ((939, 972), 'collections.defaultdict', 'defaultdict', (['(lambda : lambda x: x)'], {}), '(lambda : lambda x: x)\n', (950, 972), False, 'from collections import defaultdict\n'), ((1434, 1451), 'pandas.concat', 'pd.concat', (['groups'], {}), '(groups)\n', (1443, 1451), True, 'import pandas as pd\n'), ((1893, 1920), 'numpy.where', 'np.where', (['(predictions > 0.5)'], {}), '(predictions > 0.5)\n', (1901, 1920), True, 'import numpy as np\n'), ((2468, 2498), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp'], {}), '(tmp)\n', (2493, 2498), True, 'import pandas as pd\n'), ((2564, 2581), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2575, 2581), False, 'from collections import defaultdict\n'), ((2673, 2776), 'pandas.pivot_table', 'pd.pivot_table', (['df'], {'values': "['cost']", 'index': "['word']", 'columns': "['pred']", 'aggfunc': 'np.min', 'fill_value': '(1)'}), "(df, values=['cost'], index=['word'], columns=['pred'],\n aggfunc=np.min, fill_value=1)\n", (2687, 2776), True, 'import pandas as pd\n'), ((3273, 3298), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (3285, 3298), True, 'import pandas as pd\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from akg.utils import kernel_exec as utils
import numpy as np
from akg.topi.util import get_const_tuple
from tests.common.test_op import prelu
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
def prelu_run(shape, w_shape, dtype, rtol, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input_data, w_data = gen_data(dtype, shape, w_shape)
return mod, expect, (input_data, w_data, expect)
else:
return mod
else:
mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name='prelu', attrs=attrs)
expect, input_data, w_data = gen_data(dtype, shape, w_shape)
output = utils.mod_launch(mod, (input_data, w_data, expect), expect=expect)
# #ctx.sync()
# reshape_output = output_b.reshape(output_b.size)
# reshape_output_np = output_np.reshape(output_np.size)
# errorcount = 0
# for i in range(reshape_output.size):
# limitError = abs(reshape_output[i] * rtol)
# if abs(reshape_output[i] - reshape_output_np[i]) > limitError:
# errorcount += 1
return (input_data, w_data), output, expect, compare_tensor(output, expect, rtol=rtol)
def gen_data(dtype, shape, w_shape):
# input_data = random_gaussian(shape, miu=1, sigma=50.0).astype(dtype.lower())
input_data = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(shape)).astype(dtype)
w_data = random_gaussian(w_shape, miu=1, sigma=2.0).astype(dtype.lower())
# expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0]
if w_shape[0] == 1:
# pass
expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0]
else:
w_reshape = w_data.reshape(1, w_shape[0], 1, 1)
w_broadcast = np.broadcast_to(w_reshape, shape)
expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_broadcast
# pass
# for j in range(shape[1]):
# for i in range(shape[0]):
# for k in range(shape[2]):
# for l in range(shape[3]):
# expect[i, j, k, l] = input_data[i, j, k, l] * (input_data[i, j, k, l] > 0) + input_data[i, j, k, l] * (input_data[i, j, k, l] < 0) * w_data[j]
return expect, input_data, w_data
| [
"tests.common.tensorio.compare_tensor",
"tests.common.gen_random.random_gaussian",
"akg.utils.kernel_exec.op_build_test",
"numpy.broadcast_to",
"akg.topi.util.get_const_tuple",
"akg.utils.kernel_exec.mod_launch"
] | [((1025, 1143), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['prelu.prelu', '[shape, w_shape]', '[dtype, dtype]'], {'kernel_name': 'kernel_name', 'attrs': 'attrs', 'tuning': 't'}), '(prelu.prelu, [shape, w_shape], [dtype, dtype],\n kernel_name=kernel_name, attrs=attrs, tuning=t)\n', (1044, 1143), True, 'from akg.utils import kernel_exec as utils\n'), ((1349, 1453), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['prelu.prelu', '[shape, w_shape]', '[dtype, dtype]'], {'kernel_name': '"""prelu"""', 'attrs': 'attrs'}), "(prelu.prelu, [shape, w_shape], [dtype, dtype],\n kernel_name='prelu', attrs=attrs)\n", (1368, 1453), True, 'from akg.utils import kernel_exec as utils\n'), ((1536, 1602), 'akg.utils.kernel_exec.mod_launch', 'utils.mod_launch', (['mod', '(input_data, w_data, expect)'], {'expect': 'expect'}), '(mod, (input_data, w_data, expect), expect=expect)\n', (1552, 1602), True, 'from akg.utils import kernel_exec as utils\n'), ((2689, 2722), 'numpy.broadcast_to', 'np.broadcast_to', (['w_reshape', 'shape'], {}), '(w_reshape, shape)\n', (2704, 2722), True, 'import numpy as np\n'), ((2042, 2083), 'tests.common.tensorio.compare_tensor', 'compare_tensor', (['output', 'expect'], {'rtol': 'rtol'}), '(output, expect, rtol=rtol)\n', (2056, 2083), False, 'from tests.common.tensorio import compare_tensor\n'), ((2317, 2359), 'tests.common.gen_random.random_gaussian', 'random_gaussian', (['w_shape'], {'miu': '(1)', 'sigma': '(2.0)'}), '(w_shape, miu=1, sigma=2.0)\n', (2332, 2359), False, 'from tests.common.gen_random import random_gaussian\n'), ((2266, 2288), 'akg.topi.util.get_const_tuple', 'get_const_tuple', (['shape'], {}), '(shape)\n', (2281, 2288), False, 'from akg.topi.util import get_const_tuple\n')] |
import gensim
import pandas as pd
import numpy as np
from gensim.models.wrappers import LdaMallet
import sys
""" This class is creates a list of n recommendations that are the most similar to a list of paintings
liked by the user. It uses a Latent Dirichlet Allocation approach which expresses the paintings as
a distribution of topics. Topics are themselves a distribution of words. """
class QueryLdaModel:
path_to_model = 'resources/models/lda.model'
path_to_cos_mat = 'resources/matrices/lda/cosine-mat.npy'
path_to_topdoc_mat = 'resources/matrices/lda/lda-output.npy'
painting_df = pd.read_csv('resources/datasets/ng-dataset.csv')
def __init__(self, painting_list, n):
self.painting_list = painting_list
self.n = n
def load_model(self, path_to_model):
"""Load the LDA model"""
lda_model = LdaMallet.load(path_to_model)
return lda_model
def load_cosine_matrix(self, path_to_cos_mat):
"""Load the cosine similarity matrix"""
cos_sim_mat = np.load(path_to_cos_mat)
return cos_sim_mat
def load_topdoc_matrix(self, path_to_topdoc_mat):
"""Load the topic-document matrix"""
topdoc_mat = np.load(path_to_topdoc_mat)
return topdoc_mat
def pid2index(self, painting_df, painting_id):
"""From the painting ID, returns the index of the painting in the painting dataframe
Input:
painting_df: dataframe of paintings
painting_list: list of paintings ID (e.g ['000-02T4-0000', '000-03WC-0000...'])
Output:
index_list: list of the paintings indexes in the dataframe (e.g [32, 45, ...])
"""
try:
index = painting_df.loc[painting_df['painting_id'] == painting_id].index[0]
except IndexError as ie:
index = "Painting ID '" + painting_id + "' not found in dataset."
return index
def pidlist2indexlist(self, painting_df, painting_list):
"""From a list of painting ID, returns the indexes of the paintings
Input:
painting_df: dataframe of paintings
painting_list: list of paintings ID (e.g ['000-02T4-0000', '000-03WC-0000...'])
Output:
index_list: list of the paintings indexes in the dataframe (e.g [32, 45, ...])
"""
index_list = [self.pid2index(painting_df, painting_id) for painting_id in painting_list]
return index_list
def index2pid(self, painting_df, index):
"""From the index, returns the painting ID from the paintings dataframe
Input:
painting_df: dataframe of paintings
index: index of the painting in the dataframe
Output:
pid: return the painting ID (e.g: 000-02T4-0000 )
"""
try:
pid = painting_df.iloc[index].painting_id
except IndexError as ie:
pid = "Index '" + index + "' not found in dataset."
return pid
def indexlist2pidlist(self, painting_df, index_list):
"""From a list of indexes, returns the painting IDs
Input:
painting_df: dataframe of paintings
index_list: list of the painting indexes in the dataframe
Output:
pid: list of paintings ID
"""
pids_list = [self.index2pid(painting_df, index) for index in index_list]
return pids_list
def recommend_paintings(self, painting_df, painting_list, cos_mat, n):
"""Recommand paintings for a user based on a list of items that were liked
Input:
painting_df: dataframe of paintings
painting_list: list of paintings index liked by a user
cos_sim_mat: Cosine Similarity Matrix
n: number of recommendation wanted
Output:
a list of indexes for recommended paintings
"""
n_painting = len(painting_list)
score_list = []
index_list = self.pidlist2indexlist(painting_df, painting_list)
for index in index_list:
score = cos_mat[index]
score[index] = 0
score_list.append(score)
score_list = np.sum(score_list, 0)/n_painting
top_n_index = sorted(range(len(score_list)), key=lambda i: score_list[i], reverse=True)[:n]
top_n_pids = self.indexlist2pidlist(painting_df, top_n_index)
return top_n_pids
def main(self):
model = self.load_model(self.path_to_model)
cos_mat = self.load_cosine_matrix(self.path_to_cos_mat)
topdoc_mat = self.load_topdoc_matrix(self.path_to_topdoc_mat)
pids_list = self.recommend_paintings(self.painting_df, self.painting_list, cos_mat, self.n)
return pids_list
if __name__ == "__main__":
lda = QueryLdaModel(['000-01DF-0000', '000-0168-0000', '000-019M-0000', '000-043Q-0000'], 10)
pids_list = lda.main()
print(pids_list)
| [
"pandas.read_csv",
"numpy.load",
"numpy.sum",
"gensim.models.wrappers.LdaMallet.load"
] | [((636, 684), 'pandas.read_csv', 'pd.read_csv', (['"""resources/datasets/ng-dataset.csv"""'], {}), "('resources/datasets/ng-dataset.csv')\n", (647, 684), True, 'import pandas as pd\n'), ((943, 972), 'gensim.models.wrappers.LdaMallet.load', 'LdaMallet.load', (['path_to_model'], {}), '(path_to_model)\n', (957, 972), False, 'from gensim.models.wrappers import LdaMallet\n'), ((1144, 1168), 'numpy.load', 'np.load', (['path_to_cos_mat'], {}), '(path_to_cos_mat)\n', (1151, 1168), True, 'import numpy as np\n'), ((1341, 1368), 'numpy.load', 'np.load', (['path_to_topdoc_mat'], {}), '(path_to_topdoc_mat)\n', (1348, 1368), True, 'import numpy as np\n'), ((4690, 4711), 'numpy.sum', 'np.sum', (['score_list', '(0)'], {}), '(score_list, 0)\n', (4696, 4711), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""
Write random data to the data.txt file
takes in a universe size M, writes n
random digits to the universe M
"""
import argparse
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--M', metavar='M', type=int, default=100,
help='The size of the universe - data written ranges from 0, ..., M-1')
parser.add_argument('--n', metavar='n', type=int, default=300,
help='The number of integers to write to the data.txt file')
args = parser.parse_args()
File_object = open('data.txt','w+')
M = args.M
n = args.n
datastream = np.random.randint(0, M-1, n)
for data in datastream:
File_object.write(str(data) + ' ')
File_object.close()
if __name__ == '__main__':
main() | [
"numpy.random.randint",
"argparse.ArgumentParser"
] | [((196, 221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (219, 221), False, 'import argparse\n'), ((603, 633), 'numpy.random.randint', 'np.random.randint', (['(0)', '(M - 1)', 'n'], {}), '(0, M - 1, n)\n', (620, 633), True, 'import numpy as np\n')] |
import os
from collections import Counter
from itertools import islice, combinations
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
import numpy as np
import pandas as pd
import nltk
try:
nltk.pos_tag(nltk.word_tokenize('This is a test sentence.'))
except LookupError:
print('Installing nltk perceptron tagger.')
nltk.download('averaged_perceptron_tagger')
class CalculateScores():
"""Calculates ngram scores for documents.
Considered parts of speech are (see `nltk` docs for details)
- Nouns: 'NN', 'NNS', 'NNP', 'NNPS'
- Adjectives: 'JJ', 'JJR', 'JJS'
All texts of the corpus are tokenized and POS tags are generated.
A global dictionary of counts of different ngrams is build in `allNGrams`.
The ngram relations of every text are listed in `outputDict`.
Scoring counts occurance of different words left and right of each single
token in each ngram, weighted by ngram size.
:param sourceDataframe: Dataframe containing the basic corpus
:type sourceDataframe: class:`pandas.DataFrame`
:param textColumn: Column name to use for ngram calculation
:type textColumn: str
:param pubIDColumn: Column name to use for publication identification (assumend to be unique)
:type pubIDColumn: str
:param yearColumn: Column name for temporal ordering publications, used during writing the scoring files
:type yearColumn: str
:param ngramsize: Maximum of considered ngrams (default: 5-gram)
:type ngramsize: int
"""
def __init__(
self,
sourceDataframe,
textColumn="text",
pubIDColumn="pubID",
yearColumn='year',
ngramsize=5,
debug=False
):
self.baseDF = sourceDataframe
self.textCol = textColumn
self.pubIDCol = pubIDColumn
self.yearCol = yearColumn
self.ngramEnd = ngramsize
self.outputDict = {}
self.allNGrams = []
self.counts = {}
self.corpussize = 1
self.uniqueNGrams = ()
self.debug = debug
def getTermPatterns(self):
"""Create dictionaries of occuring ngrams."""
allNGrams = {x: [] for x in range(1, self.ngramEnd + 1, 1)}
pos_tag = ["NN", "NNS", "NNP", "NNPS", "JJ", "JJR", "JJS"]
for _, row in tqdm(self.baseDF.iterrows()):
tokens = nltk.word_tokenize(row[self.textCol])
pos = nltk.pos_tag(tokens)
nnJJtokens = [x[0].lower() for x in pos if x[1] in pos_tag]
tempNGram = []
for i in range(1, self.ngramEnd + 1, 1):
val = allNGrams[i]
newngrams = list(nltk.ngrams(nnJJtokens, i))
val.extend(newngrams)
tempNGram.extend(newngrams)
allNGrams.update({i: val})
self.outputDict[row[self.pubIDCol]] = tempNGram
self.allNGrams = allNGrams
allgrams = [x for y in [y for x, y in self.allNGrams.items()] for x in y]
self.corpussize = len(allgrams)
self.counts = Counter(allgrams)
self.uniqueNGrams = set(allgrams)
def getScore(self, target):
"""Calculate ngram score."""
valueList = []
for _, subgram in enumerate(target):
contains = [x for x in self.allNGrams[2] if subgram in x]
rvalue = len(set(x for x in contains if x[0] == subgram))
lvalue = len(set(x for x in contains if x[1] == subgram))
valueList.append((lvalue + 1) * (rvalue + 1))
return {
target: 1 / self.counts[target] * (np.prod(valueList)) ** (1 / (2.0 * len(target)))
}
def _calcBatch(self, batch):
res = []
for elem in tqdm(batch):
res.append(self.getScore(elem))
return res
def run(self, write=False, outpath='./', recreate=False, limitCPUs=True):
"""Get score for all documents."""
scores = {}
self.getTermPatterns()
if self.debug is True:
print(f'Found {len(self.uniqueNGrams)} unique {self.ngramEnd}-grams.')
if limitCPUs is True:
ncores = int(cpu_count() * 1 / 4)
else:
ncores = cpu_count() - 2
pool = Pool(ncores)
chunk_size = int(len(self.uniqueNGrams) / ncores)
batches = [
list(self.uniqueNGrams)[i:i + chunk_size] for i in range(0, len(self.uniqueNGrams), chunk_size)
]
ncoresResults = pool.map(self._calcBatch, batches)
results = [x for y in ncoresResults for x in y]
for elem in results:
scores.update(elem)
for key, val in self.outputDict.items():
tmpList = []
for elem in val:
tmpList.append([elem, scores[elem]])
self.outputDict.update({key: tmpList})
if write is True:
for year, df in self.baseDF.groupby(self.yearCol):
filePath = f'{outpath}{str(year)}.tsv'
if os.path.isfile(filePath):
if recreate is False:
raise IOError(
f'File at {filePath} exists. Set recreate = True to rewrite file.'
)
if recreate is True:
os.remove(filePath)
with open(filePath, 'a') as yearfile:
for pub in df[self.pubIDCol].unique():
for elem in self.outputDict[pub]:
yearfile.write(f'{pub}\t{elem[0]}\t{elem[1]}\n')
return scores, self.outputDict
class LinksOverTime():
"""Create multilayer pajek files for corpus.
To keep track of nodes over time, we need a global register of node names.
This class takes care of this, by adding new keys of authors, papers or
ngrams to the register.
:param dataframe: Source dataframe containing metadata of texts (authors, publicationID and year)
:type dataframe: class:`pandas.DataFrame`
:param authorColumn: Column name for author information
:param pubIDColumn: Column name to identify publications
:param yearColumn: Column name with year information
"""
def __init__(
self,
dataframe,
authorColumn='authors',
pubIDColumn="pubID",
yearColumn='year',
debug=False
):
self.dataframe = dataframe
self.authorCol = authorColumn
self.pubIDCol = pubIDColumn
self.yearColumn = yearColumn
self.nodeMap = {}
self.debug = debug
def _window(self, seq, n):
"""Return a sliding window (of width n) over data from the iterable.
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def _createSlices(self, windowsize):
slices = []
years = sorted(self.dataframe[self.yearColumn].unique())
for x in self._window(years, windowsize):
slices.append(x)
return slices
def createNodeRegister(self, sl, scorePath, scoreLimit):
"""Create multilayer node register for time slice."""
if self.debug is True:
print(f'Slice: {sl[0]}')
dataframe = self.dataframe[self.dataframe[self.yearColumn].isin(sl)]
dfNgramsList = [pd.read_csv(
scorePath + str(slN) + '.tsv',
sep='\t',
header=None
) for slN in sl]
ngramdataframe = pd.concat(dfNgramsList)
ngramdataframe = ngramdataframe[ngramdataframe[2] > scoreLimit]
authorList = [x for y in [x.split(';') for x in dataframe[self.authorCol].values] for x in y]
authors = [x for x in set(authorList) if x]
pubs = dataframe[self.pubIDCol].fillna('None').unique()
ngrams = ngramdataframe[1].unique()
for authorval in authors:
if not self.nodeMap.values():
self.nodeMap.update({authorval: 1})
else:
if authorval not in self.nodeMap.keys():
self.nodeMap.update(
{authorval: max(self.nodeMap.values()) + 1}
)
for pubval in list(pubs):
if pubval not in self.nodeMap.keys():
self.nodeMap.update({pubval: max(self.nodeMap.values()) + 1})
for ngramval in list(ngrams):
if ngramval not in self.nodeMap.keys():
self.nodeMap.update({ngramval: max(self.nodeMap.values()) + 1})
if self.debug is True:
print(
'\tNumber of vertices (authors, papers and ngrams) {0}'.format(
max(self.nodeMap.values())
)
)
def writeLinks(self, sl, scorePath, scoreLimit, outpath='./', recreate=False):
"""Write multilayer links to file in Pajek format."""
dataframe = self.dataframe[self.dataframe[self.yearColumn].isin(sl)]
filePath = outpath + 'multilayerPajek_{0}.net'.format(sl[0])
if os.path.isfile(filePath):
if recreate is False:
raise IOError(
f'File at {filePath} exists. Set recreate = True to rewrite file.'
)
if recreate is True:
os.remove(filePath)
dfNgramsList = [pd.read_csv(
scorePath + str(slN) + '.tsv',
sep='\t',
header=None
) for slN in sl]
ngramdataframe = pd.concat(dfNgramsList)
ngramdataframe = ngramdataframe[ngramdataframe[2] > scoreLimit]
with open(filePath, 'a') as file:
file.write("# A network in a general multiplex format\n")
file.write("*Vertices {0}\n".format(max(self.nodeMap.values())))
for x, y in self.nodeMap.items():
tmpStr = '{0} "{1}"\n'.format(y, x)
if tmpStr:
file.write(tmpStr)
file.write("*Multiplex\n")
file.write("# layer node layer node [weight]\n")
if self.debug is True:
print('\tWriting inter-layer links to file.')
for _, row in dataframe.fillna('').iterrows():
authors = row[self.authorCol].split(';')
paper = row[self.pubIDCol]
if paper not in self.nodeMap.keys():
print(f'Cannot find {paper}')
ngramsList = ngramdataframe[ngramdataframe[0] == paper]
paperNr = self.nodeMap[paper]
if len(authors) >= 2:
# pairs = [x for x in combinations(authors, 2)]
for pair in combinations(authors, 2): # pairs:
file.write(
'{0} {1} {2} {3} 1\n'.format(
1,
self.nodeMap[pair[0]],
1,
self.nodeMap[pair[1]]
)
)
for author in authors:
try:
authNr = self.nodeMap[author]
file.write(
'{0} {1} {2} {3} 1\n'.format(
1,
authNr,
2,
paperNr
)
)
except KeyError:
pass
for _, ngramrow in ngramsList.iterrows():
try:
ngramNr = self.nodeMap[ngramrow[1]]
weight = ngramrow[2]
file.write(
'{0} {1} {2} {3} {4}\n'.format(
2,
paperNr,
3,
ngramNr,
weight
)
)
except KeyError:
pass
def run(self, recreate=False, windowsize=1, scorePath='./', outPath='./', scoreLimit=1.0):
"""Create data for all slices."""
for sl in tqdm(self._createSlices(windowsize)):
self.createNodeRegister(sl, scorePath, scoreLimit)
self.writeLinks(sl, scorePath, scoreLimit, outpath=outPath, recreate=recreate)
| [
"tqdm.tqdm",
"os.remove",
"nltk.pos_tag",
"collections.Counter",
"nltk.ngrams",
"os.path.isfile",
"itertools.combinations",
"itertools.islice",
"multiprocessing.Pool",
"nltk.download",
"pandas.concat",
"nltk.word_tokenize",
"numpy.prod",
"multiprocessing.cpu_count"
] | [((225, 271), 'nltk.word_tokenize', 'nltk.word_tokenize', (['"""This is a test sentence."""'], {}), "('This is a test sentence.')\n", (243, 271), False, 'import nltk\n'), ((345, 388), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (358, 388), False, 'import nltk\n'), ((3045, 3062), 'collections.Counter', 'Counter', (['allgrams'], {}), '(allgrams)\n', (3052, 3062), False, 'from collections import Counter\n'), ((3705, 3716), 'tqdm.tqdm', 'tqdm', (['batch'], {}), '(batch)\n', (3709, 3716), False, 'from tqdm import tqdm\n'), ((4210, 4222), 'multiprocessing.Pool', 'Pool', (['ncores'], {}), '(ncores)\n', (4214, 4222), False, 'from multiprocessing import Pool, cpu_count\n'), ((7577, 7600), 'pandas.concat', 'pd.concat', (['dfNgramsList'], {}), '(dfNgramsList)\n', (7586, 7600), True, 'import pandas as pd\n'), ((9117, 9141), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (9131, 9141), False, 'import os\n'), ((9559, 9582), 'pandas.concat', 'pd.concat', (['dfNgramsList'], {}), '(dfNgramsList)\n', (9568, 9582), True, 'import pandas as pd\n'), ((2356, 2393), 'nltk.word_tokenize', 'nltk.word_tokenize', (['row[self.textCol]'], {}), '(row[self.textCol])\n', (2374, 2393), False, 'import nltk\n'), ((2412, 2432), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (2424, 2432), False, 'import nltk\n'), ((6744, 6757), 'itertools.islice', 'islice', (['it', 'n'], {}), '(it, n)\n', (6750, 6757), False, 'from itertools import islice, combinations\n'), ((4179, 4190), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4188, 4190), False, 'from multiprocessing import Pool, cpu_count\n'), ((4965, 4989), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (4979, 4989), False, 'import os\n'), ((9362, 9381), 'os.remove', 'os.remove', (['filePath'], {}), '(filePath)\n', (9371, 9381), False, 'import os\n'), ((2653, 2679), 'nltk.ngrams', 'nltk.ngrams', (['nnJJtokens', 'i'], {}), '(nnJJtokens, i)\n', (2664, 2679), False, 'import nltk\n'), ((3575, 3593), 'numpy.prod', 'np.prod', (['valueList'], {}), '(valueList)\n', (3582, 3593), True, 'import numpy as np\n'), ((10724, 10748), 'itertools.combinations', 'combinations', (['authors', '(2)'], {}), '(authors, 2)\n', (10736, 10748), False, 'from itertools import islice, combinations\n'), ((4123, 4134), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4132, 4134), False, 'from multiprocessing import Pool, cpu_count\n'), ((5258, 5277), 'os.remove', 'os.remove', (['filePath'], {}), '(filePath)\n', (5267, 5277), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;" src="earth-lab-logo-rgb.png" width="150" height="150" />
#
# # Earth Analytics Education - EA Python Course Spring 2021
# ## Important - Assignment Guidelines
#
# 1. Before you submit your assignment to GitHub, make sure to run the entire notebook with a fresh kernel. To do this first, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart & Run All)
# 2. Always replace the `raise NotImplementedError()` code with your code that addresses the activity challenge. If you don't replace that code, your notebook will not run.
#
# ```
# # YOUR CODE HERE
# raise NotImplementedError()
# ```
#
# 3. Any open ended questions will have a "YOUR ANSWER HERE" within a markdown cell. Replace that text with your answer also formatted using Markdown.
# 4. **DO NOT RENAME THIS NOTEBOOK File!** If the file name changes, the autograder will not grade your assignment properly.
# 6. When you create a figure, comment out `plt.show()` to ensure the autograder can grade your plots. For figure cells, DO NOT DELETE the code that says `DO NOT REMOVE LINE BELOW`.
#
# ```
# ### DO NOT REMOVE LINE BELOW ###
# student_plot1_ax = nb.convert_axes(plt)
# ```
#
# * Only include the package imports, code, and outputs that are required to run your homework assignment.
# * Be sure that your code can be run on any operating system. This means that:
# 1. the data should be downloaded in the notebook to ensure it's reproducible
# 2. all paths should be created dynamically using the `os.path.join`
#
# ## Follow to PEP 8 Syntax Guidelines & Documentation
#
# * Run the `autopep8` tool on all cells prior to submitting (HINT: hit shift + the tool to run it on all cells at once!
# * Use clear and expressive names for variables.
# * Organize your code to support readability.
# * Check for code line length
# * Use comments and white space sparingly where it is needed
# * Make sure all python imports are at the top of your notebook and follow PEP 8 order conventions
# * Spell check your Notebook before submitting it.
#
# For all of the plots below, be sure to do the following:
#
# * Make sure each plot has a clear TITLE and, where appropriate, label the x and y axes. Be sure to include UNITS in your labels.
#
# ### Add Your Name Below
# **Your Name:** <NAME>
# <img style="float: left;" src="colored-bar.png"/>
# ---
# # Week 04 and 05 Homework - Automate NDVI Workflow
#
# For this assignment, you will write code to generate a plot of the mean normalized difference vegetation index (NDVI) for two different sites in the United States across one year of data:
#
# * San Joaquin Experimental Range (SJER) in Southern California, United States
# * Harvard Forest (HARV) in the Northeastern United States
#
# The data that you will use for this week is available from **earthpy** using the following download:
#
# `et.data.get_data('ndvi-automation')`
#
# ## Assignment Goals
#
# Your goal in this assignment is to create the most efficient and concise workflow that you can that allows for:
#
# 1. The code to scale if you added new sites or more time periods to the analysis.
# 2. Someone else to understand your workflow.
# 3. The LEAST and most efficient (i.e. runs fast, minimize repetition) amount of code that completes the task.
#
# ### HINTS
#
# * Remove values outside of the landsat valid range of values as specified in the metadata, as needed.
# * Keep any output files SEPARATE FROM input files. Outputs should be created in an outputs directory that is created in the code (if needed) and/or tested for.
# * Use the functions that we demonstrated during class to make your workflow more efficient.
# * BONUS - if you chose - you can export your data as a csv file. You will get bonus points for doing this.
#
#
# ## Assignment Requirements
#
# Your submission to the GitHub repository should include:
# * This Jupyter Notebook file (.ipynb) with:
# * The code to create a plot of mean NDVI across a year for 2 NEON Field Sites:
# * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object
# * The **data should be cleaned to remove the influence of clouds**. See the [earthdatascience website for an example of what your plot might look like with and without removal of clouds](https://www.earthdatascience.org/courses/earth-analytics-python/create-efficient-data-workflows/).
# * BONUS: Create one output `.csv` file that has 3 columns - NDVI, Date and Site Name - with values for SJER and HARV.
#
# Your notebook should:
# * Have *at least* 2 well documented and well named functions with docstrings.
# * Include a Markdown cell at the top of the notebook that outlines the overall workflow using pseudocode (i.e. plain language, not code)
# * Include additional Markdown cells throughout the notebook to describe:
# * the data that you used - and where it is from
# * how data are being processing
# * how the code is optimized to run fast and be more concise
# # Replace this cell with your pseudocode for this workflow
#
# If you happen to be a diagram person a diagram is ok too
#
#
# 1. Import required packages
# 2. Download the data
# 3. Set the working directory.
# 4. Create paths to sites
# 5. Create cloud mask – get the cloud pixel values from earthpy
# 6. Create a function to extract site name and datetime from directory path names, using the path to the directory that contains the information of interest and the date and site name location within that directory path as index lists as the function parameters
# 7. Create a function that will open, crop and specify valid ranges of a landsat band, using the path to the band, the cropping extent, and the valid range as function parameters
# 8. Create dataframe of mean NDVI
# a. Create an empty list that will hold site, date, and mean NDVI information
# b. Create a for loop to loop through site paths
# i. Get list of scene paths of both sites using glob
# ii. Get shapefiles for each site using glob and pulling out index 0
# iii. Open shapefiles
# iv. Create a nested for loop to loop through each scene
# 1. Go through each scene directory and pull out date and site information using the function created earlier in the notebook
# 2. Go through each scene and create sorted list of bands in each scene using glob. Only bands 4 and 5 are needed for calculating NDVI
# 3. Go through each scene and get qa pixel layers using glob and pulling out index 0. This will pop out each qa pixel layer as the loop loops through each scene so that it's not in list form and can be worked with
# 4. Open the qa layer
# 5. Crop the qa layer using the shapefile opened in the first layer of the loop
# 6. Create an empty list that will hold bands 4 and 5 once they are cleaned and free of clouds
# 7. Create another for loop inside the already nested loop
# a. Clean the bands using the previously created function that will open the band, crop it using its associate shapefile, and specify landsat's valid range
# b. Apply cloud mask to band
# c. Append list so that it holds the cloud free bands. This list will be used to calculate mean NDVI
# 8. Calculate mean NDVI
# 9. Append the mean NDVI to the list holding the site information (the function that pulled site and date information from scene directory paths created a list as the output)
# 10. Append this list of lists to the empty list created outside the for loop at the top
# 9. Convert list into a pandas dataframe
# 10. Set index on date
# 11. Create figure
# a. Set figure space
# b. Create overall figure title
# c. Create a for loop to loop through dataframe and create individual dataframes grouped by site for plotting
# d. Set axes labels
# e. Format date on x axis
# f. Create a legend
# 12. Drop na values from dataframe for exporting
# 13. Export pandas dataframe to .csv file
# 14. Create a figure that displays mean NDVI at the HARV and SJER locations over a year, with mean NDVI on the y-axis and the month on the x-axis using the pandas dataframe created in the previous step.
# In[1]:
# Autograding imports - do not modify this cell
import matplotcheck.autograde as ag
import matplotcheck.notebook as nb
import matplotcheck.timeseries as ts
from datetime import datetime
# In[2]:
# Import needed packages in PEP 8 order
# and no unused imports listed (10 points total)
import os
from glob import glob
import matplotlib.pyplot as plt
import pandas as pd
import rioxarray as rxr
import xarray as xr
import geopandas as gpd
import earthpy as et
import earthpy.mask as em
from datetime import datetime
import numpy as np
from matplotlib.dates import DateFormatter
# Download the data
et.data.get_data('ndvi-automation')
# Create a path to the directory
directory_path = os.path.join(et.io.HOME, "earth-analytics", "data")
# Set working directory
os.chdir(directory_path)
# In[3]:
# DO NOT MODIFY THIS CELL
# Tests that the working directory is set to earth-analytics/data
path = os.path.normpath(os.getcwd())
student_wd_parts = path.split(os.sep)
if student_wd_parts[-2:] == ['earth-analytics', 'data']:
print("\u2705 Great - it looks like your working directory is set correctly to ~/earth-analytics/data")
else:
print("\u274C Oops, the autograder will not run unless your working directory is set to earth-analytics/data")
# In[4]:
# Create paths to sites
site_paths = glob(os.path.join("ndvi-automation", "sites", "*"))
site_paths
# Create cloud mask
# Get the cloud pixel values from earthpy
high_cloud_confidence = (
em.pixel_flags["pixel_qa"]["L8"]["High Cloud Confidence"])
cloud = em.pixel_flags["pixel_qa"]["L8"]["Cloud"]
cloud_shadow = em.pixel_flags["pixel_qa"]["L8"]["Cloud Shadow"]
all_masked_values = cloud_shadow + cloud + high_cloud_confidence
# # Figure 1: Plot 1 - Mean NDVI For Each Site Across the Year (50 points)
#
# Create a plot of the mean normalized difference vegetation index (NDVI) for the two different sites in the United States across the year:
#
# * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object.
# * Each site should be identified with a different color in the plot and legend.
# * The final plot **data should be cleaned to remove the influence of clouds**.
# * Be sure to include appropriate title and axes labels.
#
# Add additional cells as needed for processing data (e.g. defining functions, etc), but be sure to:
# * follow the instructions in the code cells that have been provided to ensure that you are able to use the sanity check tests that are provided.
# * include only the plot code in the cell identified for the final plot code below
# ## Task 1:
#
# In the cell below, create a single dataframe containing MEAN NDVI, the site name,
# and the date of the data for the HARV site
# scene `HARV/landsat-crop/LC080130302017031701T1-SC20181023151837`. The column names for the final
# DataFrame should be`mean_ndvi`, and `site`, and the data should be **indexed on the date**.
#
# Use the functions that we reviewed in class (or create your own versions of them) to implement your code
#
# ### In the Cell below Place All Functions Needed to Run this Notebook (20 points)
# In[5]:
### DO NOT REMOVE THIS LINE OR EDIT / MOVE THIS CELL ###
start_time = datetime.now()
# Create functions to extract site name and datetime from directory path names and open, crop and specify valid ranges of a landsat band.
# In[6]:
# In this cell place all of the functions needed to run your notebook
# You will be graded here on function application, docstrings, efficiency so ensure
# All functions are placed here!
# Function to extract sitename and datetime from directory path names
def extract_sitename_date(directory_path,
sitename_location,
datetime_location):
"""Extract sitename and datetime from directory path name.
Parameters
-----------
directory_path : string
A path to the directory name
sitename_location : index list
Index of sitename location in directory path name
datetime_location : index list
Index of datetime location in directory path name
Returns
-----------
list : list of site names and datetime information
"""
# Create an empty list to append sitename and date information
site_name_date_list = []
# Assign datetime location to an object
date_location = directory_path[datetime_location[0]: datetime_location[1]]
# Specify datetime format
format = "%Y%m%d"
# Use datetime and format to create date varibale
date = datetime.strptime(date_location, format)
# Assign sitename information to a variable
site = directory_path[sitename_location[0]: sitename_location[1]]
# Append site variable to list
site_name_date_list.append(site)
# Append date variable to list
site_name_date_list.append(date)
return site_name_date_list
# Function to clean landsat bands
def open_clean_bands(band_path,
crop_extent,
valid_range=None):
"""Open, crop and specify valid ranges of a landsat band.
Parameters
-----------
band_path : string
A path to the array to be opened
valid_range : tuple (optional)
A tuple of min and max range of values for the data. Default = None
Returns
-----------
arr : xarray DataArray
An xarray DataArray with values that should be masked set to 1 for True (Boolean)
"""
# TODO add tests to ensure the arrays are the same .shape
band = rxr.open_rasterio(band_path, masked=True).rio.clip(crop_extent.geometry,
from_disk=True).squeeze()
# Only run this step if a valid range tuple is provided
if valid_range:
mask = ((band < valid_range[0]) | (band > valid_range[1]))
band = band.where(~xr.where(mask, True, False))
return band
# In[7]:
# Create dataframe of mean NDVI in this cell using the functions created above
# Create path to HARV data
harv_path = glob(os.path.join("ndvi-automation", "sites", "HARV"))
# Open and clean all HARV bands
harv_scene_info = []
# Create a loop to establish the scene directory path
# glob is not necessary here, however it is for the larger workflow, which is
# what is being demonstrated here
for path in harv_path:
# Establish the scene directory path that is of interest
scene_path = glob(os.path.join(path, "landsat-crop",
"LC080130302017031701T1-SC20181023151837"))
# Set the path to the associated shapefile
bound = os.path.join(path, "vector", "HARV-crop.shp")
# Open the shapefile
harv_boundary = gpd.read_file(bound)
# Create a nested for loop to be able to work with each .tif file (band)
# in the scene, again this is necessary when working with multiple scenes
for tif in scene_path:
# Get site and date info from the scene directory path
site_info = extract_sitename_date(tif, [22, 26], [50, 58])
# Sort the bands using glob so that they are in the right order
# Only bands 4 and 5 are needed
harv_bands = sorted(glob(os.path.join(tif, "*band[4-5]*")))
# Set the path to the qa layer in the scene directory
qa_layer_path = os.path.join(tif,
"LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif")
# Open the qa layer
opened_layer = rxr.open_rasterio(qa_layer_path, masked=True)
# Crop the qa layer using the boundary associated with the scene and
# opened in a previous step
cropped_layer = opened_layer.rio.clip(harv_boundary.geometry).squeeze()
# Create an empty list to store bands after they are cleaned of clouds
tif_bands = []
# Create an additional loop that is nested inside the other two that will
# be used to work with each band inside the scene directory
for a_band in harv_bands:
# Clean the band using the previously created function
# The function opens, crops, and sets landsat's valid range
clean_band = open_clean_bands(
a_band, harv_boundary, valid_range=(0, 10000))
# Apply the cloud mask to the clean band
cloud_free_band = clean_band.where(
~cropped_layer.isin(all_masked_values))
# The band to the empty list that will be used to calculate mean NDVI
tif_bands.append(cloud_free_band)
# Calculate mean NDVI using the list that is storing the clean bands
# that are free of clouds
mean_ndvi = np.nanmean(
(tif_bands[1]-tif_bands[0]) / (tif_bands[1]+tif_bands[0]))
# Append the mean NDVI to the list that was the result of the function
# that grabbed site and date information from the scene directory path name
site_info.append(mean_ndvi)
# Append this lists of lists to the list outside of the nested for
# loops at the top
harv_scene_info.append(site_info)
# Convert list into a pandas dataframe
harv_info_df = pd.DataFrame(harv_scene_info, columns=[
"site", "date", "mean_ndvi"])
# Set index
harv_date_as_index = harv_info_df.set_index("date")
# Call dataframe
harv_date_as_index
# In[8]:
# This cell is testing your data output above
student_ndvi_ts_single_site = _
single_scene_points = 0
# Ensure the data is stored in a dataframe.
if isinstance(student_ndvi_ts_single_site, pd.DataFrame):
print('\u2705 Your data is stored in a DataFrame!')
single_scene_points += 1
else:
print('\u274C It appears your data is not stored in a DataFrame. ',
'To see what type of object your data is stored in, check its type with type(object)')
# Ensure that the date column is the index
if isinstance(student_ndvi_ts_single_site.index, pd.core.indexes.datetimes.DatetimeIndex):
print('\u2705 You have the index set to the date column!')
single_scene_points += 2
else:
print('\u274C You do not have the index set to the date column.')
# Ensure that the date column is datetime
if isinstance(student_ndvi_ts_single_site.index[0], pd._libs.tslibs.timestamps.Timestamp):
print('\u2705 The data in your date column is datetime!')
single_scene_points += 2
else:
print('\u274C The data in your date column is not datetime.')
# Ensure the site name is correct
if student_ndvi_ts_single_site.site.values[0] == 'HARV':
print('\u2705 You have the correct site name!')
single_scene_points += 5
else:
print('\u274C You do not have the correct site name.')
if np.allclose(0.281131628228094, student_ndvi_ts_single_site.mean_ndvi.values[0]):
print('\u2705 You have the correct mean NDVI value!')
single_scene_points += 5
else:
print('\u274C You do not have the correct mean ndvi value.')
print("\n \u27A1 You received {} out of 15 points for creating a dataframe.".format(
single_scene_points))
single_scene_points
# ## Task 2:
#
# In the cell below, process all of the landsat scenes. Create a DataFrame that contains the following
# information for each scene
#
#
# | | index | site | mean_ndvi |
# |---|---|---|---|
# | Date | | | |
# | 2017-01-07 | 0 | SJER | .4 |
#
# Be sure to call your dataframe at the end of the cell to ensure autograding works.
# HINT: FOR THIS STEP, leave any rows containing missing values (`NAN`).
# 1. Create dataframe of mean NDVI a. Create an empty list that will hold site, date, and mean NDVI information b. Create a for loop to loop through site paths
# i. Get list of scene paths of both sites using glob
# ii. Get shapefiles for each site using glob and pulling out index 0
# iii. Open shapefiles
# iv. Create a nested for loop to loop through each scene
# 1. Go through each scene directory and pull out date and site information using the function created earlier in the notebook
# 2. Go through each scene and create sorted list of bands in each scene using glob. Only bands 4 and 5 are needed for calculating NDVI
# 3. Go through each scene and get qa pixel layers using glob and pulling out index 0. This will pop out each qa pixel layer as the loop loops through each scene so that it's not in list form and can be worked with
# 4. Open the qa layer
# 5. Crop the qa layer using the shapefile opened in the first layer of the loop
# 6. Create an empty list that will hold bands 4 and 5 once they are cleaned and free of clouds
# 7. Create another for loop inside the already nested loop
# a. Clean the bands using the previously created function that will open the band, crop it using its associate shapefile, and specify landsat's valid range
# b. Apply cloud mask to band
# c. Append list so that it holds the cloud free bands. This list will be used to calculate mean NDVI
# 8. Calculate mean NDVI
# 9. Append the mean NDVI to the list holding the site information (the function that pulled site and date information from scene directory paths created a list as the output)
# 10. Append this list of lists to the empty list created outside the for loop at the top
#
# The below cell runs quickly and efficiently by using loops and functions to process data, which minimize repetition.
# In[9]:
# Create dataframe of NDVI including the cleaning data to deal with clouds
# Create an empty list that will hold site, date, and mean ndvi information
all_site_info = []
# Create a for loop to loop through site paths
for site in site_paths:
# Get list of scene paths of both sites using glob
dirs = glob(os.path.join(site, "landsat-crop", "*"))
# Get shapefiles for each site using glob and pulling out index 0
bounds = glob(os.path.join(site, "vector", "*-crop.shp"))[0]
# Open shapefiles
opened_bound = gpd.read_file(bounds)
# Create a nested for loop to loop through each scene
for all_dirs in dirs:
# Go through each scene directory and pull out date and site
# information using the function created earlier in the notebook
site_info = extract_sitename_date(all_dirs, [22, 26], [50, 58])
# Go through each scene and create sorted list of bands in each scene
# using glob. Only bands 4 and 5 are needed for calculating NDVI
scene_bands = sorted(glob(os.path.join(all_dirs, "*band[4-5]*")))
# Go through each scene and get qa pixel layers using glob and pulling
# out index 0. This will pop out each qa pixel layer as the loop loops
# through each scene so that it's not in list form and can be worked with
qa_layer_paths = glob(os.path.join(all_dirs, "*pixel_qa*"))[0]
# Open the qa layer
opened_layer = rxr.open_rasterio(qa_layer_paths, masked=True)
# Crop the qa layer using the shapefile opened in the first layer of
# the loop
cropped_layer = opened_layer.rio.clip(opened_bound.geometry).squeeze()
# Create an empty list that will hold bands 4 and 5 once they are
# cleaned and free of clouds
site_bands = []
# Create another for loop inside the already nested loop
for band in scene_bands:
# Clean the bands using the previously created function that will
# open the band, crop it using its associate shapefile, and specify
# landsat's valid range
clean_band = open_clean_bands(
band, opened_bound, valid_range=(0, 10000))
# Apply cloud mask to band
cloud_free_band = clean_band.where(
~cropped_layer.isin(all_masked_values))
# Append list so that it holds the cloud free bands. This list will
# be used to calculate mean NDVI
site_bands.append(cloud_free_band)
# Calculate mean NDVI
mean_ndvi = np.nanmean(
(site_bands[1]-site_bands[0]) / (site_bands[1]+site_bands[0]))
# Append the mean NDVI to the list holding the site information (the
# function that pulled site and date information from scene directory
# paths created a list as the output)
site_info.append(mean_ndvi)
# Append this list of lists to the empty list created outside the for
# loop at the top
all_site_info.append(site_info)
# Convert list into a pandas dataframe
site_info_df = pd.DataFrame(all_site_info, columns=[
"site", "date", "mean_ndvi"])
# Set index on date
indexed_site_info_df = site_info_df.set_index("date")
# Call dataframe
indexed_site_info_df
# In[10]:
# Last sanity check before creating your plot (10 points)
# Ensure that you call your dataframe at the bottom of the cell above
# and that it has columns called: mean_ndvi and site
# Ensure the data is stored in a dataframe.
student_ndvi_df = _
df_points = 0
if isinstance(student_ndvi_df, pd.DataFrame):
print('\u2705 Your data is stored in a DataFrame!')
df_points += 2
else:
print('\u274C It appears your data is not stored in a DataFrame. ',
'To see what type of object your data is stored in, check its type with type(object)')
# Check that dataframe contains the appropriate number of NAN values
if student_ndvi_df.mean_ndvi.isna().sum() == 15:
print('\u2705 Correct number of masked data values!')
df_points += 2
else:
print('\u274C The amount of null data in your dataframe is incorrect.')
# Ensure that the date column is the index
if isinstance(student_ndvi_df.index, pd.core.indexes.datetimes.DatetimeIndex):
print('\u2705 You have the index set to the date column!')
df_points += 3
else:
print('\u274C You do not have the index set to the date column.')
# Ensure that the date column is datetime
if isinstance(student_ndvi_df.index[0], pd._libs.tslibs.timestamps.Timestamp):
print('\u2705 The data in your date column is datetime!')
df_points += 3
else:
print('\u274C The data in your date column is not datetime.')
# Output for timer, # DO NOT MODIFY
end_time = datetime.now()
total_time = end_time - start_time
print(
"Your total run time for processing the data was {0}.".format(total_time))
print("\n \u27A1 You received {} out of 10 points for creating a dataframe.".format(
df_points))
df_points
# Create a figure that displays mean NDVI at the HARV and SJER locations over a year, with mean NDVI on the y-axis and the month on the x-axis using the pandas dataframe created above.
# In[11]:
# Add only the plot code to this cell
# Set figure space
fig, ax = plt.subplots(figsize=(12, 7))
# Create overall figure title
fig.suptitle(
"Mean Normalized Difference Vegetaion Index (NDVI) \nJan 2017 - Dec 2017 \nLandsat 8 with Clouds Removed")
# Create a for loop to loop through dataframe and create individual dataframes
# grouped by site for plotting
for site, site_name_df in indexed_site_info_df.dropna().groupby("site"):
ax.plot(site_name_df.index, site_name_df.mean_ndvi, marker="o", label=site)
# Set axes labels
ax.set(xlabel="Month",
ylabel="Mean NDVI")
# Format date on x axis
ax.xaxis.set_major_formatter(DateFormatter("%b"))
# Create a legend
ax.legend()
### DO NOT REMOVE LINES BELOW ###
final_masked_solution = nb.convert_axes(plt, which_axes="current")
# In[12]:
# Ignore this cell for the autograding tests
# In[13]:
# Ignore this cell for the autograding tests
# # Question 1 (10 points)
#
# Imagine that you are planning NEON’s upcoming flight season to capture remote sensing data in these locations and want to ensure that you fly the area when the vegetation is the most green.
#
# When would you recommend the flights take place for each site?
#
# Answer the question in 2-3 sentences in the Markdown cell below.
# I would recommend that the flights take place in April for the SJER site. I would recommend that HARV flights take place in July.
# # Question 2 (10 points)
#
# How could you modify your workflow to look at vegetation changes over time in each site?
#
# Answer the question in 2-3 sentences in the Markdown cell below.
# I could possibly create NDVI difference maps to examine changes between time points (months, years, etc.). Due to the way my code is set up, I could also continue to add data to the HARV and SJER directories as it becomes available and run this same code to continue to monitor changes.
# # Do not edit this cell! (10 points)
#
# The notebook includes:
# * additional Markdown cells throughout the notebook to describe:
# * the data that you used - and where it is from
# * how data are being processing
# * how the code is optimized to run fast and be more concise
# # Do not edit this cell! (20 points)
#
# The notebook will also be checked for overall clean code requirements as specified at the **top** of this notebook. Some of these requirements include (review the top cells for more specifics):
#
# * Notebook begins at cell [1] and runs on any machine in its entirety.
# * PEP 8 format is applied throughout (including lengths of comment and code lines).
# * No additional code or imports in the notebook that is not needed for the workflow.
# * Notebook is fully reproducible. This means:
# * reproducible paths using the os module.
# * data downloaded using code in the notebook.
# * all imports at top of notebook.
# ## BONUS - Export a .CSV File to Share (10 points possible)
#
# This is optional - if you export a **.csv** file with the columns specified above: Site, Date and NDVI Value you can get an additional 10 points.
#
# * FULL CREDIT: File exists in csv format and contains the columns specified.
# We will check your github repo for this file!
#
# In[14]:
# Drop na values from dataframe for exporting
no_nan_df = indexed_site_info_df.dropna()
# Export pandas dataframe to csv file
# Reproducible output
no_nan_df.to_csv(os.path.join(directory_path, "ndvi-automation", "outputs",
"ndvi_df.csv"))
# Export to my local repository
# no_nan_df.to_csv(os.path.join(et.io.HOME, "earth-analytics",
# "2022_spring",
# "assignments",
# "04_assignment",
# "ea-2022-04-ndvi-automation-rami8797",
# "ndvi_df.csv"))
| [
"pandas.DataFrame",
"earthpy.data.get_data",
"rioxarray.open_rasterio",
"os.getcwd",
"numpy.allclose",
"matplotcheck.notebook.convert_axes",
"datetime.datetime.now",
"matplotlib.pyplot.subplots",
"datetime.datetime.strptime",
"matplotlib.dates.DateFormatter",
"numpy.nanmean",
"xarray.where",
... | [((9028, 9063), 'earthpy.data.get_data', 'et.data.get_data', (['"""ndvi-automation"""'], {}), "('ndvi-automation')\n", (9044, 9063), True, 'import earthpy as et\n'), ((9115, 9166), 'os.path.join', 'os.path.join', (['et.io.HOME', '"""earth-analytics"""', '"""data"""'], {}), "(et.io.HOME, 'earth-analytics', 'data')\n", (9127, 9166), False, 'import os\n'), ((9192, 9216), 'os.chdir', 'os.chdir', (['directory_path'], {}), '(directory_path)\n', (9200, 9216), False, 'import os\n'), ((11636, 11650), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11648, 11650), False, 'from datetime import datetime\n'), ((17529, 17597), 'pandas.DataFrame', 'pd.DataFrame', (['harv_scene_info'], {'columns': "['site', 'date', 'mean_ndvi']"}), "(harv_scene_info, columns=['site', 'date', 'mean_ndvi'])\n", (17541, 17597), True, 'import pandas as pd\n'), ((19052, 19131), 'numpy.allclose', 'np.allclose', (['(0.281131628228094)', 'student_ndvi_ts_single_site.mean_ndvi.values[0]'], {}), '(0.281131628228094, student_ndvi_ts_single_site.mean_ndvi.values[0])\n', (19063, 19131), True, 'import numpy as np\n'), ((25027, 25093), 'pandas.DataFrame', 'pd.DataFrame', (['all_site_info'], {'columns': "['site', 'date', 'mean_ndvi']"}), "(all_site_info, columns=['site', 'date', 'mean_ndvi'])\n", (25039, 25093), True, 'import pandas as pd\n'), ((26693, 26707), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26705, 26707), False, 'from datetime import datetime\n'), ((27210, 27239), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (27222, 27239), True, 'import matplotlib.pyplot as plt\n'), ((27894, 27936), 'matplotcheck.notebook.convert_axes', 'nb.convert_axes', (['plt'], {'which_axes': '"""current"""'}), "(plt, which_axes='current')\n", (27909, 27936), True, 'import matplotcheck.notebook as nb\n'), ((9347, 9358), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9356, 9358), False, 'import os\n'), ((9740, 9785), 'os.path.join', 'os.path.join', (['"""ndvi-automation"""', '"""sites"""', '"""*"""'], {}), "('ndvi-automation', 'sites', '*')\n", (9752, 9785), False, 'import os\n'), ((12972, 13012), 'datetime.datetime.strptime', 'datetime.strptime', (['date_location', 'format'], {}), '(date_location, format)\n', (12989, 13012), False, 'from datetime import datetime\n'), ((14468, 14516), 'os.path.join', 'os.path.join', (['"""ndvi-automation"""', '"""sites"""', '"""HARV"""'], {}), "('ndvi-automation', 'sites', 'HARV')\n", (14480, 14516), False, 'import os\n'), ((15005, 15050), 'os.path.join', 'os.path.join', (['path', '"""vector"""', '"""HARV-crop.shp"""'], {}), "(path, 'vector', 'HARV-crop.shp')\n", (15017, 15050), False, 'import os\n'), ((15096, 15116), 'geopandas.read_file', 'gpd.read_file', (['bound'], {}), '(bound)\n', (15109, 15116), True, 'import geopandas as gpd\n'), ((22476, 22497), 'geopandas.read_file', 'gpd.read_file', (['bounds'], {}), '(bounds)\n', (22489, 22497), True, 'import geopandas as gpd\n'), ((27783, 27802), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%b"""'], {}), "('%b')\n", (27796, 27802), False, 'from matplotlib.dates import DateFormatter\n'), ((30529, 30602), 'os.path.join', 'os.path.join', (['directory_path', '"""ndvi-automation"""', '"""outputs"""', '"""ndvi_df.csv"""'], {}), "(directory_path, 'ndvi-automation', 'outputs', 'ndvi_df.csv')\n", (30541, 30602), False, 'import os\n'), ((14845, 14922), 'os.path.join', 'os.path.join', (['path', '"""landsat-crop"""', '"""LC080130302017031701T1-SC20181023151837"""'], {}), "(path, 'landsat-crop', 'LC080130302017031701T1-SC20181023151837')\n", (14857, 14922), False, 'import os\n'), ((15696, 15770), 'os.path.join', 'os.path.join', (['tif', '"""LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif"""'], {}), "(tif, 'LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif')\n", (15708, 15770), False, 'import os\n'), ((15859, 15904), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['qa_layer_path'], {'masked': '(True)'}), '(qa_layer_path, masked=True)\n', (15876, 15904), True, 'import rioxarray as rxr\n'), ((17047, 17120), 'numpy.nanmean', 'np.nanmean', (['((tif_bands[1] - tif_bands[0]) / (tif_bands[1] + tif_bands[0]))'], {}), '((tif_bands[1] - tif_bands[0]) / (tif_bands[1] + tif_bands[0]))\n', (17057, 17120), True, 'import numpy as np\n'), ((22259, 22298), 'os.path.join', 'os.path.join', (['site', '"""landsat-crop"""', '"""*"""'], {}), "(site, 'landsat-crop', '*')\n", (22271, 22298), False, 'import os\n'), ((23384, 23430), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['qa_layer_paths'], {'masked': '(True)'}), '(qa_layer_paths, masked=True)\n', (23401, 23430), True, 'import rioxarray as rxr\n'), ((24504, 24581), 'numpy.nanmean', 'np.nanmean', (['((site_bands[1] - site_bands[0]) / (site_bands[1] + site_bands[0]))'], {}), '((site_bands[1] - site_bands[0]) / (site_bands[1] + site_bands[0]))\n', (24514, 24581), True, 'import numpy as np\n'), ((22388, 22430), 'os.path.join', 'os.path.join', (['site', '"""vector"""', '"""*-crop.shp"""'], {}), "(site, 'vector', '*-crop.shp')\n", (22400, 22430), False, 'import os\n'), ((14286, 14313), 'xarray.where', 'xr.where', (['mask', '(True)', '(False)'], {}), '(mask, True, False)\n', (14294, 14313), True, 'import xarray as xr\n'), ((15575, 15607), 'os.path.join', 'os.path.join', (['tif', '"""*band[4-5]*"""'], {}), "(tif, '*band[4-5]*')\n", (15587, 15607), False, 'import os\n'), ((22982, 23019), 'os.path.join', 'os.path.join', (['all_dirs', '"""*band[4-5]*"""'], {}), "(all_dirs, '*band[4-5]*')\n", (22994, 23019), False, 'import os\n'), ((23292, 23328), 'os.path.join', 'os.path.join', (['all_dirs', '"""*pixel_qa*"""'], {}), "(all_dirs, '*pixel_qa*')\n", (23304, 23328), False, 'import os\n'), ((13950, 13991), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['band_path'], {'masked': '(True)'}), '(band_path, masked=True)\n', (13967, 13991), True, 'import rioxarray as rxr\n')] |
# -*- coding: utf-8 -*-
# @Time : 2018/8/23 22:21
# @Author : zhoujun
import os
import cv2
import numpy as np
import torch
from utils import CTCLabelConverter,AttnLabelConverter
from data_loader import get_transforms
class PytorchNet:
def __init__(self, model_path, gpu_id=None):
"""
初始化模型
:param model_path: 模型地址
:param gpu_id: 在哪一块gpu上运行
"""
checkpoint = torch.load(model_path)
print(f"load {checkpoint['epoch']} epoch params")
config = checkpoint['config']
alphabet = config['dataset']['alphabet']
if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % gpu_id)
else:
self.device = torch.device("cpu")
print('device:', self.device)
self.transform = []
for t in config['dataset']['train']['dataset']['args']['transforms']:
if t['type'] in ['ToTensor', 'Normalize']:
self.transform.append(t)
self.transform = get_transforms(self.transform)
self.gpu_id = gpu_id
img_h, img_w = 32, 100
for process in config['dataset']['train']['dataset']['args']['pre_processes']:
if process['type'] == "Resize":
img_h = process['args']['img_h']
img_w = process['args']['img_w']
break
self.img_w = img_w
self.img_h = img_h
self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
self.alphabet = alphabet
img_channel = 3 if config['dataset']['train']['dataset']['args']['img_mode'] != 'GRAY' else 1
if config['arch']['args']['prediction']['type'] == 'CTC':
self.converter = CTCLabelConverter(config['dataset']['alphabet'])
elif config['arch']['args']['prediction']['type'] == 'Attn':
self.converter = AttnLabelConverter(config['dataset']['alphabet'])
self.net = get_model(img_channel, len(self.converter.character), config['arch']['args'])
self.net.load_state_dict(checkpoint['state_dict'])
# self.net = torch.jit.load('crnn_lite_gpu.pt')
self.net.to(self.device)
self.net.eval()
sample_input = torch.zeros((2, img_channel, img_h, img_w)).to(self.device)
self.net.get_batch_max_length(sample_input)
def predict(self, img_path, model_save_path=None):
"""
对传入的图像进行预测,支持图像地址和numpy数组
:param img_path: 图像地址
:return:
"""
assert os.path.exists(img_path), 'file is not exists'
img = self.pre_processing(img_path)
tensor = self.transform(img)
tensor = tensor.unsqueeze(dim=0)
tensor = tensor.to(self.device)
preds, tensor_img = self.net(tensor)
preds = preds.softmax(dim=2).detach().cpu().numpy()
# result = decode(preds, self.alphabet, raw=True)
# print(result)
result = self.converter.decode(preds)
if model_save_path is not None:
# 输出用于部署的模型
save(self.net, tensor, model_save_path)
return result, tensor_img
def pre_processing(self, img_path):
"""
对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度
:param img_path: 图片地址
:return:
"""
img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0)
if self.img_mode == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img.shape[:2]
ratio_h = float(self.img_h) / h
new_w = int(w * ratio_h)
if new_w < self.img_w:
img = cv2.resize(img, (new_w, self.img_h))
step = np.zeros((self.img_h, self.img_w - new_w, img.shape[-1]), dtype=img.dtype)
img = np.column_stack((img, step))
else:
img = cv2.resize(img, (self.img_w, self.img_h))
return img
def save(net, input, save_path):
# 在gpu导出的模型只能在gpu使用,cpu导出的只能在cpu使用
net.eval()
traced_script_module = torch.jit.trace(net, input)
traced_script_module.save(save_path)
if __name__ == '__main__':
from modeling import get_model
import time
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"msyh.ttc", size=14)
img_path = '0.jpg'
model_path = 'crnn_None_VGG_RNN_Attn/checkpoint/model_latest.pth'
crnn_net = PytorchNet(model_path=model_path, gpu_id=0)
start = time.time()
for i in range(1):
result, img = crnn_net.predict(img_path)
break
print((time.time() - start) *1000/ 1)
label = result[0][0]
print(result)
# plt.title(label, fontproperties=font)
# plt.imshow(img.detach().cpu().numpy().squeeze().transpose((1, 2, 0)), cmap='gray')
# plt.show()
| [
"torch.jit.trace",
"matplotlib.font_manager.FontProperties",
"cv2.cvtColor",
"torch.load",
"data_loader.get_transforms",
"os.path.exists",
"utils.CTCLabelConverter",
"numpy.zeros",
"time.time",
"numpy.column_stack",
"cv2.imread",
"utils.AttnLabelConverter",
"torch.cuda.is_available",
"torc... | [((4020, 4047), 'torch.jit.trace', 'torch.jit.trace', (['net', 'input'], {}), '(net, input)\n', (4035, 4047), False, 'import torch\n'), ((4277, 4318), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': '"""msyh.ttc"""', 'size': '(14)'}), "(fname='msyh.ttc', size=14)\n", (4291, 4318), False, 'from matplotlib.font_manager import FontProperties\n'), ((4486, 4497), 'time.time', 'time.time', ([], {}), '()\n', (4495, 4497), False, 'import time\n'), ((415, 437), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (425, 437), False, 'import torch\n'), ((1057, 1087), 'data_loader.get_transforms', 'get_transforms', (['self.transform'], {}), '(self.transform)\n', (1071, 1087), False, 'from data_loader import get_transforms\n'), ((2544, 2568), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (2558, 2568), False, 'import os\n'), ((3333, 3390), 'cv2.imread', 'cv2.imread', (['img_path', "(1 if self.img_mode != 'GRAY' else 0)"], {}), "(img_path, 1 if self.img_mode != 'GRAY' else 0)\n", (3343, 3390), False, 'import cv2\n'), ((645, 670), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (668, 670), False, 'import torch\n'), ((698, 730), 'torch.device', 'torch.device', (["('cuda:%s' % gpu_id)"], {}), "('cuda:%s' % gpu_id)\n", (710, 730), False, 'import torch\n'), ((771, 790), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (783, 790), False, 'import torch\n'), ((1767, 1815), 'utils.CTCLabelConverter', 'CTCLabelConverter', (["config['dataset']['alphabet']"], {}), "(config['dataset']['alphabet'])\n", (1784, 1815), False, 'from utils import CTCLabelConverter, AttnLabelConverter\n'), ((3444, 3480), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3456, 3480), False, 'import cv2\n'), ((3633, 3669), 'cv2.resize', 'cv2.resize', (['img', '(new_w, self.img_h)'], {}), '(img, (new_w, self.img_h))\n', (3643, 3669), False, 'import cv2\n'), ((3689, 3763), 'numpy.zeros', 'np.zeros', (['(self.img_h, self.img_w - new_w, img.shape[-1])'], {'dtype': 'img.dtype'}), '((self.img_h, self.img_w - new_w, img.shape[-1]), dtype=img.dtype)\n', (3697, 3763), True, 'import numpy as np\n'), ((3782, 3810), 'numpy.column_stack', 'np.column_stack', (['(img, step)'], {}), '((img, step))\n', (3797, 3810), True, 'import numpy as np\n'), ((3843, 3884), 'cv2.resize', 'cv2.resize', (['img', '(self.img_w, self.img_h)'], {}), '(img, (self.img_w, self.img_h))\n', (3853, 3884), False, 'import cv2\n'), ((1914, 1963), 'utils.AttnLabelConverter', 'AttnLabelConverter', (["config['dataset']['alphabet']"], {}), "(config['dataset']['alphabet'])\n", (1932, 1963), False, 'from utils import CTCLabelConverter, AttnLabelConverter\n'), ((2256, 2299), 'torch.zeros', 'torch.zeros', (['(2, img_channel, img_h, img_w)'], {}), '((2, img_channel, img_h, img_w))\n', (2267, 2299), False, 'import torch\n'), ((4595, 4606), 'time.time', 'time.time', ([], {}), '()\n', (4604, 4606), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 20:37:03 2020
@author: Shiro
"""
import pandas as pd
import copy
import numpy as np
## put inside listes the csv file representing the prediction of a model
listes = ["model_pl_A-5folds-CV-seed42-bs16-mixup.csv", "model_pl_B-5folds-CV-seed42-bs16-mixup.csv"]
df = [pd.read_csv(l) for l in listes]
cols = df[0].columns[1:]
arr = np.stack([d[cols].values for d in df]).mean(0)
final = copy.deepcopy(df[0])
final[cols] = arr
# csv filename output
final.to_csv("submission_ensemblingv7-PL.csv", index=False) | [
"pandas.read_csv",
"copy.deepcopy",
"numpy.stack"
] | [((460, 480), 'copy.deepcopy', 'copy.deepcopy', (['df[0]'], {}), '(df[0])\n', (473, 480), False, 'import copy\n'), ((337, 351), 'pandas.read_csv', 'pd.read_csv', (['l'], {}), '(l)\n', (348, 351), True, 'import pandas as pd\n'), ((402, 440), 'numpy.stack', 'np.stack', (['[d[cols].values for d in df]'], {}), '([d[cols].values for d in df])\n', (410, 440), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# true parameters
c = {
'one': 0.0,
'x1': 0.6,
'x2': 0.2,
'id1': 0.2,
'id2': 0.5,
'pz': 0.2
}
# poisson dampening
pfact = 100
def dataset(N=1_000_000, K1=10, K2=100, seed=89320432):
# init random
st = np.random.RandomState(seed)
# core regressors
df = pd.DataFrame({
'one': 1,
'id1': st.randint(K1, size=N),
'id2': st.randint(K2, size=N),
'x1': st.randn(N),
'x2': st.randn(N)
})
# predictors
df['yhat0'] = c['one'] + c['x1']*df['x1'] + c['x2']*df['x2']
df['yhat'] = df['yhat0'] + c['id1']*df['id1']/pfact + c['id2']*df['id2']/pfact
# ols outcomes
df['y0'] = df['yhat0'] + st.randn(N)
df['y'] = df['yhat'] + st.randn(N)
# poisson outcomes
df['Ep0'] = np.exp(df['yhat0'])
df['Ep'] = np.exp(df['yhat'])
df['p0'] = st.poisson(df['Ep0'])
df['p'] = st.poisson(df['Ep'])
# zero-inflated poisson
df['pz'] = np.where(st.rand(N) < c['pz'], 0, df['p'])
# logit
df['Eb0'] = 1/(1+np.exp(-df['yhat0']))
df['Eb'] = 1/(1+np.exp(-df['yhat']))
df['b0'] = (st.randn(N) < df['Eb0']).astype(np.int)
df['b'] = (st.randn(N) < df['Eb']).astype(np.int)
return df
def plot_coeff(beta):
coeff = pd.DataFrame({
'id2': np.arange(len(beta)),
'beta1': beta
})
coeff['beta0'] = c['id2']*coeff['id2']/pfact
coeff['beta0'] -= coeff['beta0'].mean()
coeff['beta1'] -= coeff['beta1'].mean()
# inferred ranges
bmin = coeff[['beta0', 'beta1']].min().min()
bmax = coeff[['beta0', 'beta1']].max().max()
bvec = np.linspace(bmin, bmax, 100)
# plot estimates
fig, ax = plt.subplots(figsize=(6, 5))
coeff.plot.scatter(x='beta0', y='beta1', ax=ax, alpha=0.5);
ax.plot(bvec, bvec, c='r', linewidth=1, zorder=1);
fig.show()
def test_ols(data, y='y', x=['x1', 'x2'], fe=['id1', 'id2'], plot=False, **kwargs):
from . import linear
table = linear.ols(y=y, x=x, fe=fe, data=data, **kwargs)
if plot:
plot_coeff(table['coeff'].filter(regex='id2'))
return table
def test_jax(data, estim='poisson', y='p', x=['x1', 'x2'], fe=['id1', 'id2'], plot=False, **kwargs):
from . import general
if type(estim) is str:
estim = getattr(general, estim)
table = estim(y=y, x=x, fe=fe, data=data, **kwargs)
if plot:
plot_coeff(table['coeff'].filter(regex='id2'))
return table
def test_torch(data, estim='poisson', y='p', x=['x1', 'x2'], fe=['id1', 'id2'], plot=False, **kwargs):
from . import gentorch
if type(estim) is str:
estim = getattr(gentorch, estim)
table = estim(y=y, x=x, fe=fe, data=data, **kwargs)
if plot:
plot_coeff(table['coeff'].filter(regex='id2'))
return table
| [
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.exp",
"numpy.random.RandomState"
] | [((307, 334), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (328, 334), True, 'import numpy as np\n'), ((844, 863), 'numpy.exp', 'np.exp', (["df['yhat0']"], {}), "(df['yhat0'])\n", (850, 863), True, 'import numpy as np\n'), ((879, 897), 'numpy.exp', 'np.exp', (["df['yhat']"], {}), "(df['yhat'])\n", (885, 897), True, 'import numpy as np\n'), ((1664, 1692), 'numpy.linspace', 'np.linspace', (['bmin', 'bmax', '(100)'], {}), '(bmin, bmax, 100)\n', (1675, 1692), True, 'import numpy as np\n'), ((1729, 1757), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (1741, 1757), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1111), 'numpy.exp', 'np.exp', (["(-df['yhat0'])"], {}), "(-df['yhat0'])\n", (1097, 1111), True, 'import numpy as np\n'), ((1133, 1152), 'numpy.exp', 'np.exp', (["(-df['yhat'])"], {}), "(-df['yhat'])\n", (1139, 1152), True, 'import numpy as np\n')] |
import h5py
import numpy as np
import uuid
import yaml
import os, sys
class SaveData:
def __init__(self, logger, data_dir, timestamp):
self.logger = logger
self.data_dir = data_dir
self.timestamp = timestamp
self.dirs = self.data_dir + '/' + timestamp
if not os.path.exists(self.dirs):
os.makedirs(self.dirs)
if not os.path.exists(self.dirs + '/yamls'):
os.makedirs(self.dirs + '/yamls')
def save_hdf5_data(self, collection_name, partition_tag, vectors, ids):
hdf5_filename = self.dirs + '/' + collection_name + '_' + str(partition_tag) + '.h5'
try:
f = h5py.File(hdf5_filename, 'w')
if type(vectors[0]) == type(b'a'):
v = []
for i in vectors:
v.append(list(i))
data = np.array(v, dtype=np.uint8) #save np.array and dtype=uint8
else:
data = np.array(vectors)
f.create_dataset(name='embeddings', data=data)
f.create_dataset(name='ids', data=ids)
self.logger.debug('Successfully saved data of collection: {}/partition: {} data in {}!'.format(collection_name, partition_tag, hdf5_filename))
return hdf5_filename
except Exception as e:
self.logger.error("Error with {}".format(e))
sys.exit(1)
def save_yaml(self, collection_name, partition_tag, collection_parameter, version, save_hdf5_name):
try:
hdf2_ymal = {
'H2M':{
'milvus_version': version,
'data_path': [save_hdf5_name],
'data_dir': None,
'dest_host': '127.0.0.1',
'dest_port': 19530,
'mode': 'skip',
'dest_collection_name': collection_name,
'dest_partition_name': partition_tag,
'collection_parameter': collection_parameter,
}
}
yaml_filename = self.dirs + '/yamls/' + collection_name + '_' + str(partition_tag) + '.yaml'
with open(yaml_filename, 'w') as f:
f.write(yaml.dump(hdf2_ymal))
self.logger.debug('Successfully saved yamls of collection: {}/partition: {} data in {}!'.format(collection_name, partition_tag, yaml_filename))
except Exception as e:
self.logger.error("Error with {}".format(e))
sys.exit(1) | [
"h5py.File",
"os.makedirs",
"yaml.dump",
"os.path.exists",
"numpy.array",
"sys.exit"
] | [((304, 329), 'os.path.exists', 'os.path.exists', (['self.dirs'], {}), '(self.dirs)\n', (318, 329), False, 'import os, sys\n'), ((343, 365), 'os.makedirs', 'os.makedirs', (['self.dirs'], {}), '(self.dirs)\n', (354, 365), False, 'import os, sys\n'), ((381, 417), 'os.path.exists', 'os.path.exists', (["(self.dirs + '/yamls')"], {}), "(self.dirs + '/yamls')\n", (395, 417), False, 'import os, sys\n'), ((431, 464), 'os.makedirs', 'os.makedirs', (["(self.dirs + '/yamls')"], {}), "(self.dirs + '/yamls')\n", (442, 464), False, 'import os, sys\n'), ((673, 702), 'h5py.File', 'h5py.File', (['hdf5_filename', '"""w"""'], {}), "(hdf5_filename, 'w')\n", (682, 702), False, 'import h5py\n'), ((868, 895), 'numpy.array', 'np.array', (['v'], {'dtype': 'np.uint8'}), '(v, dtype=np.uint8)\n', (876, 895), True, 'import numpy as np\n'), ((968, 985), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (976, 985), True, 'import numpy as np\n'), ((1394, 1405), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1402, 1405), False, 'import os, sys\n'), ((2504, 2515), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2512, 2515), False, 'import os, sys\n'), ((2226, 2246), 'yaml.dump', 'yaml.dump', (['hdf2_ymal'], {}), '(hdf2_ymal)\n', (2235, 2246), False, 'import yaml\n')] |
#!/usr/bin/env python
import argparse
import os.path as osp
import re
import chainer
from chainer import cuda
import fcn
import numpy as np
import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_file')
parser.add_argument('-g', '--gpu', default=0, type=int,
help='if -1, use cpu only (default: 0)')
args = parser.parse_args()
dataset = fcn.datasets.VOC2011ClassSeg('seg11valid')
n_class = len(dataset.class_names)
basename = osp.basename(args.model_file).lower()
if basename.startswith('fcn8s-atonce') or \
basename.startswith('fcn8satonce'):
model_name = 'FCN8sAtOnce'
else:
match = re.match('^fcn(32|16|8)s.*$', basename)
if match is None:
print('Unsupported model filename: %s' % args.model_file)
quit(1)
model_name = 'FCN%ss' % match.groups()[0]
model_class = getattr(fcn.models, model_name)
model = model_class(n_class=n_class)
chainer.serializers.load_npz(args.model_file, model)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
lbl_preds, lbl_trues = [], []
for i in tqdm.trange(len(dataset)):
datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16(
dataset.get_example(i))
x_data = np.expand_dims(datum, axis=0)
if args.gpu >= 0:
x_data = cuda.to_gpu(x_data)
with chainer.no_backprop_mode():
x = chainer.Variable(x_data)
with chainer.using_config('train', False):
model(x)
lbl_pred = chainer.functions.argmax(model.score, axis=1)[0]
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
lbl_preds.append(lbl_pred)
lbl_trues.append(lbl_true)
acc, acc_cls, mean_iu, fwavacc = \
fcn.utils.label_accuracy_score(lbl_trues, lbl_preds, n_class)
print('Accuracy: %.4f' % (100 * acc))
print('AccClass: %.4f' % (100 * acc_cls))
print('Mean IoU: %.4f' % (100 * mean_iu))
print('Fwav Acc: %.4f' % (100 * fwavacc))
if __name__ == '__main__':
main()
| [
"fcn.utils.label_accuracy_score",
"chainer.Variable",
"argparse.ArgumentParser",
"chainer.serializers.load_npz",
"os.path.basename",
"chainer.cuda.get_device",
"chainer.functions.argmax",
"re.match",
"numpy.expand_dims",
"chainer.cuda.to_cpu",
"chainer.no_backprop_mode",
"chainer.cuda.to_gpu",... | [((181, 206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (204, 206), False, 'import argparse\n'), ((416, 458), 'fcn.datasets.VOC2011ClassSeg', 'fcn.datasets.VOC2011ClassSeg', (['"""seg11valid"""'], {}), "('seg11valid')\n", (444, 458), False, 'import fcn\n'), ((1010, 1062), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.model_file', 'model'], {}), '(args.model_file, model)\n', (1038, 1062), False, 'import chainer\n'), ((1860, 1921), 'fcn.utils.label_accuracy_score', 'fcn.utils.label_accuracy_score', (['lbl_trues', 'lbl_preds', 'n_class'], {}), '(lbl_trues, lbl_preds, n_class)\n', (1890, 1921), False, 'import fcn\n'), ((709, 748), 're.match', 're.match', (['"""^fcn(32|16|8)s.*$"""', 'basename'], {}), "('^fcn(32|16|8)s.*$', basename)\n", (717, 748), False, 'import re\n'), ((1343, 1372), 'numpy.expand_dims', 'np.expand_dims', (['datum'], {'axis': '(0)'}), '(datum, axis=0)\n', (1357, 1372), True, 'import numpy as np\n'), ((514, 543), 'os.path.basename', 'osp.basename', (['args.model_file'], {}), '(args.model_file)\n', (526, 543), True, 'import os.path as osp\n'), ((1420, 1439), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x_data'], {}), '(x_data)\n', (1431, 1439), False, 'from chainer import cuda\n'), ((1454, 1480), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (1478, 1480), False, 'import chainer\n'), ((1498, 1522), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (1514, 1522), False, 'import chainer\n'), ((1094, 1119), 'chainer.cuda.get_device', 'cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (1109, 1119), False, 'from chainer import cuda\n'), ((1540, 1576), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (1560, 1576), False, 'import chainer\n'), ((1706, 1740), 'chainer.cuda.to_cpu', 'chainer.cuda.to_cpu', (['lbl_pred.data'], {}), '(lbl_pred.data)\n', (1725, 1740), False, 'import chainer\n'), ((1630, 1675), 'chainer.functions.argmax', 'chainer.functions.argmax', (['model.score'], {'axis': '(1)'}), '(model.score, axis=1)\n', (1654, 1675), False, 'import chainer\n')] |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(-10, 10, 0.1)
y = np.arange(-10, 10, 0.1)
x, y = np.meshgrid(x, y)
res = 0
for i in range(3):
res = res + np.abs(x * np.sin(x) + 0.1 * x + y * np.sin(y) + 0.1 * y)
# print(f'{res}')
ax.plot_surface(x, y, res, cmap='jet')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Alpine function surface chart')
plt.show()
plt.contour(x, y, res)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Alpine function contour chart')
plt.colorbar(ax.plot_surface(x, y, res, cmap='jet'), shrink=1, aspect=5)
plt.show()
"""
x = np.arange(-10, 10, 0.1)
y = np.arange(-10, 10, 0.1)
tbh = 0
for i in range(3):
tbh = tbh + np.abs(0.0000 * np.sin(0.0000) + 0.1 * 0.0000 + 2 * np.sin(0.0000) + 0.1 * 0.0000)
print(f'AEZAKKI{tbh}')"""
| [
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((89, 101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (99, 101), True, 'import matplotlib.pyplot as plt\n'), ((144, 167), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (153, 167), True, 'import numpy as np\n'), ((176, 199), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (185, 199), True, 'import numpy as np\n'), ((211, 228), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (222, 228), True, 'import numpy as np\n'), ((415, 430), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (425, 430), True, 'import matplotlib.pyplot as plt\n'), ((435, 450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (445, 450), True, 'import matplotlib.pyplot as plt\n'), ((455, 497), 'matplotlib.pyplot.title', 'plt.title', (['"""Alpine function surface chart"""'], {}), "('Alpine function surface chart')\n", (464, 497), True, 'import matplotlib.pyplot as plt\n'), ((502, 512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (510, 512), True, 'import matplotlib.pyplot as plt\n'), ((517, 539), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'res'], {}), '(x, y, res)\n', (528, 539), True, 'import matplotlib.pyplot as plt\n'), ((544, 559), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (554, 559), True, 'import matplotlib.pyplot as plt\n'), ((564, 579), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (574, 579), True, 'import matplotlib.pyplot as plt\n'), ((584, 626), 'matplotlib.pyplot.title', 'plt.title', (['"""Alpine function contour chart"""'], {}), "('Alpine function contour chart')\n", (593, 626), True, 'import matplotlib.pyplot as plt\n'), ((708, 718), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (716, 718), True, 'import matplotlib.pyplot as plt\n'), ((321, 330), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (327, 330), True, 'import numpy as np\n'), ((295, 304), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (301, 304), True, 'import numpy as np\n')] |
# SPDX-FileCopyrightText: 2014-2020 <NAME>
#
# SPDX-License-Identifier: MIT
from __future__ import division, print_function
import pytest
import pickle
from collections import OrderedDict
import numpy as np
from symfit import (
Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model
)
from symfit.distributions import BivariateGaussian
from symfit.core.minimizers import (
BaseMinimizer, MINPACK, BFGS, NelderMead, ChainedMinimizer, BasinHopping
)
from symfit.core.objectives import (
LogLikelihood, LeastSquares, VectorLeastSquares, MinimizeModel
)
def ge_constraint(a): # Has to be in the global namespace for pickle.
return a - 1
class TestTestResult():
@classmethod
def setup_class(cls):
xdata = np.linspace(1, 10, 10)
ydata = 3 * xdata ** 2
cls.a = Parameter('a')
cls.b = Parameter('b')
x = Variable('x')
y = Variable('y')
model = Model({y: cls.a * x ** cls.b})
fit = Fit(model, x=xdata, y=ydata)
cls.fit_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, minimizer=MINPACK)
cls.minpack_result = fit.execute()
fit = Fit(model, x=xdata, objective=LogLikelihood)
cls.likelihood_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, minimizer=[BFGS, NelderMead])
cls.chained_result = fit.execute()
z = Variable('z')
constraints = [
Eq(cls.a, cls.b),
CallableNumericalModel.as_constraint(
{z: ge_constraint}, connectivity_mapping={z: {cls.a}},
constraint_type=Ge, model=model
)
]
fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
cls.constrained_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, constraints=constraints,
minimizer=BasinHopping)
cls.constrained_basinhopping_result = fit.execute()
def test_params_type(self):
assert isinstance(self.fit_result.params, OrderedDict)
def test_minimizer_output_type(self):
assert isinstance(self.fit_result.minimizer_output, dict)
assert isinstance(self.minpack_result.minimizer_output, dict)
assert isinstance(self.likelihood_result.minimizer_output, dict)
def test_fitting(self):
"""
Test if the fitting worked in the first place.
"""
assert isinstance(self.fit_result, FitResults)
assert self.fit_result.value(self.a) == pytest.approx(3.0)
assert self.fit_result.value(self.b) == pytest.approx(2.0)
assert isinstance(self.fit_result.stdev(self.a), float)
assert isinstance(self.fit_result.stdev(self.b), float)
assert isinstance(self.fit_result.r_squared, float)
# by definition since there's no fuzzyness
assert self.fit_result.r_squared == 1.0
def test_fitting_2(self):
np.random.seed(43)
mean = (0.62, 0.71) # x, y mean 0.7, 0.7
cov = [
[0.102**2, 0],
[0, 0.07**2]
]
data_1 = np.random.multivariate_normal(mean, cov, 10**5)
mean = (0.33, 0.28) # x, y mean 0.3, 0.3
cov = [ # rho = 0.25
[0.05 ** 2, 0.25 * 0.05 * 0.101],
[0.25 * 0.05 * 0.101, 0.101 ** 2]
]
data_2 = np.random.multivariate_normal(mean, cov, 10**5)
data = np.vstack((data_1, data_2))
# Insert them as y,x here as np fucks up cartesian conventions.
ydata, xedges, yedges = np.histogram2d(data[:, 1], data[:, 0], bins=200,
range=[[0.0, 1.0], [0.0, 1.0]],
density=True)
xcentres = (xedges[:-1] + xedges[1:]) / 2
ycentres = (yedges[:-1] + yedges[1:]) / 2
# Make a valid grid to match ydata
xx, yy = np.meshgrid(xcentres, ycentres, sparse=False)
x = Variable('x')
y = Variable('y')
x0_1 = Parameter('x0_1', value=0.6, min=0.5, max=0.7)
sig_x_1 = Parameter('sig_x_1', value=0.1, min=0.0, max=0.2)
y0_1 = Parameter('y0_1', value=0.7, min=0.6, max=0.8)
sig_y_1 = Parameter('sig_y_1', value=0.05, min=0.0, max=0.2)
rho_1 = Parameter('rho_1', value=0.0, min=-0.5, max=0.5)
A_1 = Parameter('A_1', value=0.5, min=0.3, max=0.7)
g_1 = A_1 * BivariateGaussian(x=x, y=y, mu_x=x0_1, mu_y=y0_1,
sig_x=sig_x_1, sig_y=sig_y_1, rho=rho_1)
x0_2 = Parameter('x0_2', value=0.3, min=0.2, max=0.4)
sig_x_2 = Parameter('sig_x_2', value=0.05, min=0.0, max=0.2)
y0_2 = Parameter('y0_2', value=0.3, min=0.2, max=0.4)
sig_y_2 = Parameter('sig_y_2', value=0.1, min=0.0, max=0.2)
rho_2 = Parameter('rho_2', value=0.26, min=0.0, max=0.8)
A_2 = Parameter('A_2', value=0.5, min=0.3, max=0.7)
g_2 = A_2 * BivariateGaussian(x=x, y=y, mu_x=x0_2, mu_y=y0_2,
sig_x=sig_x_2, sig_y=sig_y_2, rho=rho_2)
model = g_1 + g_2
fit = Fit(model, xx, yy, ydata)
fit_result = fit.execute()
assert fit_result.r_squared > 0.95
for param in fit.model.params:
try:
assert fit_result.stdev(param)**2 == pytest.approx(fit_result.variance(param))
except AssertionError:
assert fit_result.variance(param) <= 0.0
assert np.isnan(fit_result.stdev(param))
# Covariance matrix should be symmetric
for param_1 in fit.model.params:
for param_2 in fit.model.params:
assert fit_result.covariance(param_1, param_2) == pytest.approx(fit_result.covariance(param_2, param_1), rel=1e-3)
def test_minimizer_included(self):
""""The minimizer used should be included in the results."""
assert isinstance(self.constrained_result.minimizer, BaseMinimizer)
assert isinstance(self.constrained_basinhopping_result.minimizer, BaseMinimizer)
assert isinstance(self.likelihood_result.minimizer, BaseMinimizer)
assert isinstance(self.fit_result.minimizer, BaseMinimizer)
assert isinstance(self.chained_result.minimizer, ChainedMinimizer)
for minimizer, cls in zip(self.chained_result.minimizer.minimizers, [BFGS, NelderMead]):
assert isinstance(minimizer, cls)
def test_objective_included(self):
""""The objective used should be included in the results."""
assert isinstance(self.fit_result.objective, LeastSquares)
assert isinstance(self.minpack_result.objective, VectorLeastSquares)
assert isinstance(self.likelihood_result.objective, LogLikelihood)
assert isinstance(self.constrained_result.objective, LeastSquares)
assert isinstance(self.constrained_basinhopping_result.objective, LeastSquares)
def test_constraints_included(self):
"""
Test if the constraints have been properly fed to the results object so
we can easily print their compliance.
"""
# For a constrained fit we expect a list of MinimizeModel objectives.
for constrained_result in [self.constrained_result, self.constrained_basinhopping_result]:
assert isinstance(constrained_result.constraints, list)
for constraint in self.constrained_result.constraints:
assert isinstance(constraint, MinimizeModel)
def test_message_included(self):
"""Status message should be included."""
assert isinstance(self.fit_result.status_message, str)
assert isinstance(self.minpack_result.status_message, str)
assert isinstance(self.likelihood_result.status_message, str)
assert isinstance(self.constrained_result.status_message, str)
assert isinstance(self.constrained_basinhopping_result.status_message, str)
def test_pickle(self):
for fit_result in [self.fit_result, self.chained_result,
self.constrained_basinhopping_result,
self.constrained_result, self.likelihood_result]:
dumped = pickle.dumps(fit_result)
new_result = pickle.loads(dumped)
assert sorted(fit_result.__dict__.keys()) == sorted(new_result.__dict__.keys())
for k, v1 in fit_result.__dict__.items():
v2 = new_result.__dict__[k]
if k == 'minimizer':
assert type(v1) == type(v2)
elif k != 'minimizer_output': # Ignore minimizer_output
if isinstance(v1, np.ndarray):
assert v1 == pytest.approx(v2, nan_ok=True)
def test_gof_presence(self):
"""
Test if the expected goodness of fit estimators are present.
"""
assert hasattr(self.fit_result, 'objective_value')
assert hasattr(self.fit_result, 'r_squared')
assert hasattr(self.fit_result, 'chi_squared')
assert not hasattr(self.fit_result, 'log_likelihood')
assert not hasattr(self.fit_result, 'likelihood')
assert hasattr(self.minpack_result, 'objective_value')
assert hasattr(self.minpack_result, 'r_squared')
assert hasattr(self.minpack_result, 'chi_squared')
assert not hasattr(self.minpack_result, 'log_likelihood')
assert not hasattr(self.minpack_result, 'likelihood')
assert hasattr(self.likelihood_result, 'objective_value')
assert not hasattr(self.likelihood_result, 'r_squared')
assert not hasattr(self.likelihood_result, 'chi_squared')
assert hasattr(self.likelihood_result, 'log_likelihood')
assert hasattr(self.likelihood_result, 'likelihood')
| [
"pickle.loads",
"numpy.meshgrid",
"symfit.Parameter",
"symfit.Model",
"numpy.random.seed",
"symfit.Eq",
"symfit.CallableNumericalModel.as_constraint",
"numpy.histogram2d",
"symfit.Fit",
"symfit.distributions.BivariateGaussian",
"numpy.random.multivariate_normal",
"numpy.linspace",
"pytest.ap... | [((761, 783), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (772, 783), True, 'import numpy as np\n'), ((832, 846), 'symfit.Parameter', 'Parameter', (['"""a"""'], {}), "('a')\n", (841, 846), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((863, 877), 'symfit.Parameter', 'Parameter', (['"""b"""'], {}), "('b')\n", (872, 877), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((891, 904), 'symfit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (899, 904), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((917, 930), 'symfit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (925, 930), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((947, 977), 'symfit.Model', 'Model', (['{y: cls.a * x ** cls.b}'], {}), '({y: cls.a * x ** cls.b})\n', (952, 977), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((993, 1021), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'y': 'ydata'}), '(model, x=xdata, y=ydata)\n', (996, 1021), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1075, 1122), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'y': 'ydata', 'minimizer': 'MINPACK'}), '(model, x=xdata, y=ydata, minimizer=MINPACK)\n', (1078, 1122), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1180, 1224), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'objective': 'LogLikelihood'}), '(model, x=xdata, objective=LogLikelihood)\n', (1183, 1224), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1285, 1343), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'y': 'ydata', 'minimizer': '[BFGS, NelderMead]'}), '(model, x=xdata, y=ydata, minimizer=[BFGS, NelderMead])\n', (1288, 1343), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1400, 1413), 'symfit.Variable', 'Variable', (['"""z"""'], {}), "('z')\n", (1408, 1413), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1675, 1728), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'y': 'ydata', 'constraints': 'constraints'}), '(model, x=xdata, y=ydata, constraints=constraints)\n', (1678, 1728), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1790, 1867), 'symfit.Fit', 'Fit', (['model'], {'x': 'xdata', 'y': 'ydata', 'constraints': 'constraints', 'minimizer': 'BasinHopping'}), '(model, x=xdata, y=ydata, constraints=constraints, minimizer=BasinHopping)\n', (1793, 1867), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((2919, 2937), 'numpy.random.seed', 'np.random.seed', (['(43)'], {}), '(43)\n', (2933, 2937), True, 'import numpy as np\n'), ((3083, 3132), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(10 ** 5)'], {}), '(mean, cov, 10 ** 5)\n', (3112, 3132), True, 'import numpy as np\n'), ((3330, 3379), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(10 ** 5)'], {}), '(mean, cov, 10 ** 5)\n', (3359, 3379), True, 'import numpy as np\n'), ((3393, 3420), 'numpy.vstack', 'np.vstack', (['(data_1, data_2)'], {}), '((data_1, data_2))\n', (3402, 3420), True, 'import numpy as np\n'), ((3526, 3625), 'numpy.histogram2d', 'np.histogram2d', (['data[:, 1]', 'data[:, 0]'], {'bins': '(200)', 'range': '[[0.0, 1.0], [0.0, 1.0]]', 'density': '(True)'}), '(data[:, 1], data[:, 0], bins=200, range=[[0.0, 1.0], [0.0, \n 1.0]], density=True)\n', (3540, 3625), True, 'import numpy as np\n'), ((3876, 3921), 'numpy.meshgrid', 'np.meshgrid', (['xcentres', 'ycentres'], {'sparse': '(False)'}), '(xcentres, ycentres, sparse=False)\n', (3887, 3921), True, 'import numpy as np\n'), ((3935, 3948), 'symfit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (3943, 3948), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((3961, 3974), 'symfit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (3969, 3974), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((3991, 4037), 'symfit.Parameter', 'Parameter', (['"""x0_1"""'], {'value': '(0.6)', 'min': '(0.5)', 'max': '(0.7)'}), "('x0_1', value=0.6, min=0.5, max=0.7)\n", (4000, 4037), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4056, 4105), 'symfit.Parameter', 'Parameter', (['"""sig_x_1"""'], {'value': '(0.1)', 'min': '(0.0)', 'max': '(0.2)'}), "('sig_x_1', value=0.1, min=0.0, max=0.2)\n", (4065, 4105), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4121, 4167), 'symfit.Parameter', 'Parameter', (['"""y0_1"""'], {'value': '(0.7)', 'min': '(0.6)', 'max': '(0.8)'}), "('y0_1', value=0.7, min=0.6, max=0.8)\n", (4130, 4167), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4186, 4236), 'symfit.Parameter', 'Parameter', (['"""sig_y_1"""'], {'value': '(0.05)', 'min': '(0.0)', 'max': '(0.2)'}), "('sig_y_1', value=0.05, min=0.0, max=0.2)\n", (4195, 4236), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4253, 4301), 'symfit.Parameter', 'Parameter', (['"""rho_1"""'], {'value': '(0.0)', 'min': '(-0.5)', 'max': '(0.5)'}), "('rho_1', value=0.0, min=-0.5, max=0.5)\n", (4262, 4301), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4316, 4361), 'symfit.Parameter', 'Parameter', (['"""A_1"""'], {'value': '(0.5)', 'min': '(0.3)', 'max': '(0.7)'}), "('A_1', value=0.5, min=0.3, max=0.7)\n", (4325, 4361), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4527, 4573), 'symfit.Parameter', 'Parameter', (['"""x0_2"""'], {'value': '(0.3)', 'min': '(0.2)', 'max': '(0.4)'}), "('x0_2', value=0.3, min=0.2, max=0.4)\n", (4536, 4573), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4592, 4642), 'symfit.Parameter', 'Parameter', (['"""sig_x_2"""'], {'value': '(0.05)', 'min': '(0.0)', 'max': '(0.2)'}), "('sig_x_2', value=0.05, min=0.0, max=0.2)\n", (4601, 4642), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4658, 4704), 'symfit.Parameter', 'Parameter', (['"""y0_2"""'], {'value': '(0.3)', 'min': '(0.2)', 'max': '(0.4)'}), "('y0_2', value=0.3, min=0.2, max=0.4)\n", (4667, 4704), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4723, 4772), 'symfit.Parameter', 'Parameter', (['"""sig_y_2"""'], {'value': '(0.1)', 'min': '(0.0)', 'max': '(0.2)'}), "('sig_y_2', value=0.1, min=0.0, max=0.2)\n", (4732, 4772), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4789, 4837), 'symfit.Parameter', 'Parameter', (['"""rho_2"""'], {'value': '(0.26)', 'min': '(0.0)', 'max': '(0.8)'}), "('rho_2', value=0.26, min=0.0, max=0.8)\n", (4798, 4837), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((4852, 4897), 'symfit.Parameter', 'Parameter', (['"""A_2"""'], {'value': '(0.5)', 'min': '(0.3)', 'max': '(0.7)'}), "('A_2', value=0.5, min=0.3, max=0.7)\n", (4861, 4897), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((5088, 5113), 'symfit.Fit', 'Fit', (['model', 'xx', 'yy', 'ydata'], {}), '(model, xx, yy, ydata)\n', (5091, 5113), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1450, 1466), 'symfit.Eq', 'Eq', (['cls.a', 'cls.b'], {}), '(cls.a, cls.b)\n', (1452, 1466), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((1480, 1608), 'symfit.CallableNumericalModel.as_constraint', 'CallableNumericalModel.as_constraint', (['{z: ge_constraint}'], {'connectivity_mapping': '{z: {cls.a}}', 'constraint_type': 'Ge', 'model': 'model'}), '({z: ge_constraint},\n connectivity_mapping={z: {cls.a}}, constraint_type=Ge, model=model)\n', (1516, 1608), False, 'from symfit import Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model\n'), ((2505, 2523), 'pytest.approx', 'pytest.approx', (['(3.0)'], {}), '(3.0)\n', (2518, 2523), False, 'import pytest\n'), ((2572, 2590), 'pytest.approx', 'pytest.approx', (['(2.0)'], {}), '(2.0)\n', (2585, 2590), False, 'import pytest\n'), ((4382, 4477), 'symfit.distributions.BivariateGaussian', 'BivariateGaussian', ([], {'x': 'x', 'y': 'y', 'mu_x': 'x0_1', 'mu_y': 'y0_1', 'sig_x': 'sig_x_1', 'sig_y': 'sig_y_1', 'rho': 'rho_1'}), '(x=x, y=y, mu_x=x0_1, mu_y=y0_1, sig_x=sig_x_1, sig_y=\n sig_y_1, rho=rho_1)\n', (4399, 4477), False, 'from symfit.distributions import BivariateGaussian\n'), ((4918, 5013), 'symfit.distributions.BivariateGaussian', 'BivariateGaussian', ([], {'x': 'x', 'y': 'y', 'mu_x': 'x0_2', 'mu_y': 'y0_2', 'sig_x': 'sig_x_2', 'sig_y': 'sig_y_2', 'rho': 'rho_2'}), '(x=x, y=y, mu_x=x0_2, mu_y=y0_2, sig_x=sig_x_2, sig_y=\n sig_y_2, rho=rho_2)\n', (4935, 5013), False, 'from symfit.distributions import BivariateGaussian\n'), ((8148, 8172), 'pickle.dumps', 'pickle.dumps', (['fit_result'], {}), '(fit_result)\n', (8160, 8172), False, 'import pickle\n'), ((8198, 8218), 'pickle.loads', 'pickle.loads', (['dumped'], {}), '(dumped)\n', (8210, 8218), False, 'import pickle\n'), ((8655, 8685), 'pytest.approx', 'pytest.approx', (['v2'], {'nan_ok': '(True)'}), '(v2, nan_ok=True)\n', (8668, 8685), False, 'import pytest\n')] |
# coding: utf-8
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import toimage
import pandas as pd
import time
#from sklearn.model_selection import KFold
#from sklearn.model_selection import train_test_split
from keras.datasets import cifar10
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
N_CLASS = 10
N_EPOCH = 50 # 100
BATCH_SIZE = 128
INPUT_DIM = (32, 32, 3)
DATA_AUGMENTATION = True
IDG_PARAM = {'featurewise_center': False,
'samplewise_center': False,
'featurewise_std_normalization': False,
'samplewise_std_normalization': False,
'zca_whitening': True, # False
'rotation_range': 0.,
'width_shift_range': 0.1, # 0.,
'height_shift_range': 0.1, # 0.,
'shear_range': 0.,
'zoom_range': 0.,
'channel_shift_range': 0.,
'fill_mode': 'nearest',
'cval': 0.,
'horizontal_flip': True,
'vertical_flip': False,
'rescale': None,
'preprocessing_function': None
}
DIR = './result/'
MODEL_FILE = 'model.json'
WEIGHT_FILE = 'weights.h5'
HISTORY_DATA_FILE = 'history.csv'
HISTORY_IMAGE_FILE = 'history.jpg'
PARAM_EVAL_FILE = 'param_eval.csv'
class Test:
def __init__(self):
"""
data augmentation
normalize
zca whitening
make validation data from training data
change learning rate on a way
"""
pass
def main(self):
# Training
start = time.clock()
data = self.get_data()
model = self.design_model(data[0])
result = self.train_model(data, model)
self.save(result)
print('Training Time: %s min' % round((time.clock()-start)/60., 1))
print('')
# Test
self.test_model(data)
def get_data(self):
# Load CIFAR-10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
self.__draw_sample_images(X_train, y_train)
# Normalize data
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
# Onehot label
Y_train = np_utils.to_categorical(y_train, N_CLASS)
Y_test = np_utils.to_categorical(y_test, N_CLASS)
print('X_train.shape:', X_train.shape, 'Y_train.shape:', Y_train.shape)
print('X_test.shape:', X_test.shape, 'Y_test.shape:', Y_test.shape)
return X_train, Y_train, X_test, Y_test
def __draw_sample_images(self, X_train, y_train, stdout=False):
# Set background color to white
fig = plt.figure()
fig.patch.set_facecolor('white')
# Draw sample images
n_class = 10
pos = 1
for target_class in range(n_class):
# Get index list of a class
target_idx = []
for i in range(len(y_train)):
if y_train[i][0] == target_class:
target_idx.append(i)
# Draw random ten images for each class
np.random.shuffle(target_idx)
for idx in target_idx[:10]:
img = toimage(X_train[idx])
plt.subplot(10, 10, pos)
plt.imshow(img)
plt.axis('off')
pos += 1
plt.savefig(DIR+'cifar10.jpg', dpi=100)
if stdout == True:
plt.show()
def design_model(self, X_train):
# Initialize
model = Sequential()
# (Conv -> Relu) * 2 -> Pool -> Dropout
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# (Conv -> Relu) * 2 -> Pool -> Dropout
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flatten
model.add(Flatten()) # 6*6*64
# FC -> Relu -> Dropout
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# FC -> Softmax
model.add(Dense(N_CLASS))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# output model summary!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# File ".\test.py", line 13, in <module>
# from keras.utils.visualize_util import plot
# ImportError: Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work.
#model.summary()
#plot(model, show_shapes=True, to_file=os.path.join(DIR, 'model.png'))
model.summary()
return model
def train_model(self, data, model):
X_train, Y_train, X_test, Y_test = data
if not DATA_AUGMENTATION:
print('Not using data augmentation')
# Train the model
history = model.fit(X_train, Y_train,
batch_size=BATCH_SIZE,
nb_epoch=N_EPOCH,
verbose=1,
validation_data=(X_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation')
# Make a generator for training data
train_datagen = ImageDataGenerator(featurewise_center=IDG_PARAM['featurewise_center'],
samplewise_center=IDG_PARAM['samplewise_center'],
featurewise_std_normalization=IDG_PARAM['featurewise_std_normalization'],
samplewise_std_normalization=IDG_PARAM['samplewise_std_normalization'],
zca_whitening=IDG_PARAM['zca_whitening'],
rotation_range=IDG_PARAM['rotation_range'],
width_shift_range=IDG_PARAM['width_shift_range'],
height_shift_range=IDG_PARAM['height_shift_range'],
shear_range=IDG_PARAM['shear_range'],
zoom_range=IDG_PARAM['zoom_range'],
channel_shift_range=IDG_PARAM['channel_shift_range'],
fill_mode=IDG_PARAM['fill_mode'],
cval=IDG_PARAM['cval'],
horizontal_flip=IDG_PARAM['horizontal_flip'],
vertical_flip=IDG_PARAM['vertical_flip'],
rescale=IDG_PARAM['rescale'],
preprocessing_function=IDG_PARAM['preprocessing_function'])
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train, Y_train, batch_size=BATCH_SIZE)
# Make a generator for test data
test_datagen = ImageDataGenerator(zca_whitening=IDG_PARAM['zca_whitening'])
test_datagen.fit(X_test)
test_generator = test_datagen.flow(X_test, Y_test)
# Train the model
history = model.fit_generator(train_generator,
samples_per_epoch=X_train.shape[0],
nb_epoch=N_EPOCH,
validation_data=test_generator,
nb_val_samples=X_test.shape[0])
# Evaluate the model
if not DATA_AUGMENTATION:
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
else:
loss, acc = model.evaluate_generator(test_generator, val_samples=X_test.shape[0])
print('Test loss: %s, Test acc: %s' % (loss, acc))
result = {'model': model, 'history': history, 'loss': loss, 'acc': acc}
return result
def save(self, result):
"""
Save model, weight, history, parameter and evaluation
"""
model = result['model']
history = result['history']
loss = result['loss']
acc = result['acc']
# Model
model_json = model.to_json()
# Weight
with open(os.path.join(DIR, MODEL_FILE), 'w') as json_file:
json_file.write(model_json)
model.save_weights(os.path.join(DIR, WEIGHT_FILE))
# History
self.__save_history(history)
self.__plot_history(history)
# Param and evaluation
dic = IDG_PARAM
dic.update({'n_epoch': N_EPOCH, 'batch_size': BATCH_SIZE, 'loss': loss, 'acc': acc})
if os.path.exists(DIR+PARAM_EVAL_FILE):
df = pd.read_csv(DIR+PARAM_EVAL_FILE)
df = pd.concat([df, pd.DataFrame([dic])])
else:
df = pd.DataFrame([dic])
df.to_csv(DIR+PARAM_EVAL_FILE, index=False)
def __save_history(self, history, stdout=False):
df = pd.DataFrame()
df['train_loss'] = history.history['loss']
df['train_acc'] = history.history['acc']
df['valid_loss'] = history.history['val_loss']
df['valid_acc'] = history.history['val_acc']
df.to_csv(DIR+HISTORY_DATA_FILE, index=False)
if stdout == True:
print(df)
def __plot_history(self, history, stdout=False):
# Set background color to white
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.set_size_inches(16.0, 9.0, forward=True)
# Plot accuracy history
plt.subplot(1, 2, 1)
plt.plot(history.history['acc'], "o-", label="train_acc")
plt.plot(history.history['val_acc'], "o-", label="valid_acc")
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.xlim(0)
plt.ylim(0, 1)
plt.legend(loc="lower right")
# Plot loss history
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], "o-", label="train_loss",)
plt.plot(history.history['val_loss'], "o-", label="valid_loss")
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.xlim(0)
plt.ylim(0, max([history.history['loss'][0], history.history['val_loss'][0]]))
plt.legend(loc='upper right')
plt.savefig(DIR+HISTORY_IMAGE_FILE, dpi=100)
if stdout == True:
plt.show()
def test_model(self, data):
X_train, Y_train, X_test, Y_test = data
model_file = os.path.join(DIR, MODEL_FILE)
weight_file = os.path.join(DIR, WEIGHT_FILE)
with open(model_file, 'r') as fp:
model = model_from_json(fp.read())
model.load_weights(weight_file)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
if not DATA_AUGMENTATION:
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
else:
# Make a generator for test data
test_datagen = ImageDataGenerator(zca_whitening=True)
test_datagen.fit(X_test)
test_generator = test_datagen.flow(X_test, Y_test)
loss, acc = model.evaluate_generator(test_generator, val_samples=X_test.shape[0])
print('Test loss: %s, Test acc: %s' % (loss, acc))
print('')
if __name__ == "__main__":
Test().main() | [
"matplotlib.pyplot.title",
"keras.preprocessing.image.ImageDataGenerator",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"os.path.join",
"pandas.DataFrame",
"scipy.misc.toimage",
"keras.datasets.cifar10.load_data",
"matplotlib.pyplot.imshow",
"os.path.exists",
"keras.layers.Flatten",
"time.cl... | [((1581, 1593), 'time.clock', 'time.clock', ([], {}), '()\n', (1591, 1593), False, 'import time\n'), ((1915, 1934), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1932, 1934), False, 'from keras.datasets import cifar10\n'), ((2142, 2183), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'N_CLASS'], {}), '(y_train, N_CLASS)\n', (2165, 2183), False, 'from keras.utils import np_utils\n'), ((2195, 2235), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'N_CLASS'], {}), '(y_test, N_CLASS)\n', (2218, 2235), False, 'from keras.utils import np_utils\n'), ((2533, 2545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2543, 2545), True, 'import matplotlib.pyplot as plt\n'), ((3040, 3081), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(DIR + 'cifar10.jpg')"], {'dpi': '(100)'}), "(DIR + 'cifar10.jpg', dpi=100)\n", (3051, 3081), True, 'import matplotlib.pyplot as plt\n'), ((3176, 3188), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3186, 3188), False, 'from keras.models import Sequential\n'), ((7566, 7603), 'os.path.exists', 'os.path.exists', (['(DIR + PARAM_EVAL_FILE)'], {}), '(DIR + PARAM_EVAL_FILE)\n', (7580, 7603), False, 'import os\n'), ((7830, 7844), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7842, 7844), True, 'import pandas as pd\n'), ((8207, 8219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8217, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8351), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (8342, 8351), True, 'import matplotlib.pyplot as plt\n'), ((8354, 8411), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']", '"""o-"""'], {'label': '"""train_acc"""'}), "(history.history['acc'], 'o-', label='train_acc')\n", (8362, 8411), True, 'import matplotlib.pyplot as plt\n'), ((8414, 8475), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']", '"""o-"""'], {'label': '"""valid_acc"""'}), "(history.history['val_acc'], 'o-', label='valid_acc')\n", (8422, 8475), True, 'import matplotlib.pyplot as plt\n'), ((8478, 8505), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (8487, 8505), True, 'import matplotlib.pyplot as plt\n'), ((8508, 8527), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8518, 8527), True, 'import matplotlib.pyplot as plt\n'), ((8530, 8552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (8540, 8552), True, 'import matplotlib.pyplot as plt\n'), ((8555, 8566), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)'], {}), '(0)\n', (8563, 8566), True, 'import matplotlib.pyplot as plt\n'), ((8569, 8583), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (8577, 8583), True, 'import matplotlib.pyplot as plt\n'), ((8586, 8615), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (8596, 8615), True, 'import matplotlib.pyplot as plt\n'), ((8641, 8661), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (8652, 8661), True, 'import matplotlib.pyplot as plt\n'), ((8664, 8723), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']", '"""o-"""'], {'label': '"""train_loss"""'}), "(history.history['loss'], 'o-', label='train_loss')\n", (8672, 8723), True, 'import matplotlib.pyplot as plt\n'), ((8727, 8790), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']", '"""o-"""'], {'label': '"""valid_loss"""'}), "(history.history['val_loss'], 'o-', label='valid_loss')\n", (8735, 8790), True, 'import matplotlib.pyplot as plt\n'), ((8793, 8816), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (8802, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8829, 8838), True, 'import matplotlib.pyplot as plt\n'), ((8841, 8859), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (8851, 8859), True, 'import matplotlib.pyplot as plt\n'), ((8862, 8873), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)'], {}), '(0)\n', (8870, 8873), True, 'import matplotlib.pyplot as plt\n'), ((8957, 8986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8967, 8986), True, 'import matplotlib.pyplot as plt\n'), ((8990, 9036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(DIR + HISTORY_IMAGE_FILE)'], {'dpi': '(100)'}), '(DIR + HISTORY_IMAGE_FILE, dpi=100)\n', (9001, 9036), True, 'import matplotlib.pyplot as plt\n'), ((9159, 9188), 'os.path.join', 'os.path.join', (['DIR', 'MODEL_FILE'], {}), '(DIR, MODEL_FILE)\n', (9171, 9188), False, 'import os\n'), ((9205, 9235), 'os.path.join', 'os.path.join', (['DIR', 'WEIGHT_FILE'], {}), '(DIR, WEIGHT_FILE)\n', (9217, 9235), False, 'import os\n'), ((2862, 2891), 'numpy.random.shuffle', 'np.random.shuffle', (['target_idx'], {}), '(target_idx)\n', (2879, 2891), True, 'import numpy as np\n'), ((3104, 3114), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3112, 3114), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3320), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'border_mode': '"""same"""', 'input_shape': 'X_train.shape[1:]'}), "(32, 3, 3, border_mode='same', input_shape=X_train.shape[1:])\n", (3259, 3320), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3334, 3352), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3344, 3352), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3366, 3409), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'border_mode': '"""same"""'}), "(32, 3, 3, border_mode='same')\n", (3379, 3409), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3423, 3441), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3433, 3441), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3455, 3485), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3467, 3485), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3499, 3512), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3506, 3512), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3569, 3612), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""same"""'}), "(64, 3, 3, border_mode='same')\n", (3582, 3612), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3626, 3644), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3636, 3644), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3658, 3701), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'border_mode': '"""same"""'}), "(64, 3, 3, border_mode='same')\n", (3671, 3701), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3715, 3733), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3725, 3733), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3747, 3777), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3759, 3777), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3791, 3804), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3798, 3804), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3831, 3840), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3838, 3840), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3893, 3903), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (3898, 3903), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3917, 3935), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3927, 3935), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3949, 3961), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3956, 3961), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3994, 4008), 'keras.layers.Dense', 'Dense', (['N_CLASS'], {}), '(N_CLASS)\n', (3999, 4008), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((4022, 4043), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (4032, 4043), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5008, 5880), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': "IDG_PARAM['featurewise_center']", 'samplewise_center': "IDG_PARAM['samplewise_center']", 'featurewise_std_normalization': "IDG_PARAM['featurewise_std_normalization']", 'samplewise_std_normalization': "IDG_PARAM['samplewise_std_normalization']", 'zca_whitening': "IDG_PARAM['zca_whitening']", 'rotation_range': "IDG_PARAM['rotation_range']", 'width_shift_range': "IDG_PARAM['width_shift_range']", 'height_shift_range': "IDG_PARAM['height_shift_range']", 'shear_range': "IDG_PARAM['shear_range']", 'zoom_range': "IDG_PARAM['zoom_range']", 'channel_shift_range': "IDG_PARAM['channel_shift_range']", 'fill_mode': "IDG_PARAM['fill_mode']", 'cval': "IDG_PARAM['cval']", 'horizontal_flip': "IDG_PARAM['horizontal_flip']", 'vertical_flip': "IDG_PARAM['vertical_flip']", 'rescale': "IDG_PARAM['rescale']", 'preprocessing_function': "IDG_PARAM['preprocessing_function']"}), "(featurewise_center=IDG_PARAM['featurewise_center'],\n samplewise_center=IDG_PARAM['samplewise_center'],\n featurewise_std_normalization=IDG_PARAM['featurewise_std_normalization'\n ], samplewise_std_normalization=IDG_PARAM[\n 'samplewise_std_normalization'], zca_whitening=IDG_PARAM[\n 'zca_whitening'], rotation_range=IDG_PARAM['rotation_range'],\n width_shift_range=IDG_PARAM['width_shift_range'], height_shift_range=\n IDG_PARAM['height_shift_range'], shear_range=IDG_PARAM['shear_range'],\n zoom_range=IDG_PARAM['zoom_range'], channel_shift_range=IDG_PARAM[\n 'channel_shift_range'], fill_mode=IDG_PARAM['fill_mode'], cval=\n IDG_PARAM['cval'], horizontal_flip=IDG_PARAM['horizontal_flip'],\n vertical_flip=IDG_PARAM['vertical_flip'], rescale=IDG_PARAM['rescale'],\n preprocessing_function=IDG_PARAM['preprocessing_function'])\n", (5026, 5880), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6217, 6277), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'zca_whitening': "IDG_PARAM['zca_whitening']"}), "(zca_whitening=IDG_PARAM['zca_whitening'])\n", (6235, 6277), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((7319, 7349), 'os.path.join', 'os.path.join', (['DIR', 'WEIGHT_FILE'], {}), '(DIR, WEIGHT_FILE)\n', (7331, 7349), False, 'import os\n'), ((7611, 7645), 'pandas.read_csv', 'pd.read_csv', (['(DIR + PARAM_EVAL_FILE)'], {}), '(DIR + PARAM_EVAL_FILE)\n', (7622, 7645), True, 'import pandas as pd\n'), ((7705, 7724), 'pandas.DataFrame', 'pd.DataFrame', (['[dic]'], {}), '([dic])\n', (7717, 7724), True, 'import pandas as pd\n'), ((9059, 9069), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9067, 9069), True, 'import matplotlib.pyplot as plt\n'), ((9587, 9625), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'zca_whitening': '(True)'}), '(zca_whitening=True)\n', (9605, 9625), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2933, 2954), 'scipy.misc.toimage', 'toimage', (['X_train[idx]'], {}), '(X_train[idx])\n', (2940, 2954), False, 'from scipy.misc import toimage\n'), ((2959, 2983), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(10)', 'pos'], {}), '(10, 10, pos)\n', (2970, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3003), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2998, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3023), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3016, 3023), True, 'import matplotlib.pyplot as plt\n'), ((7217, 7246), 'os.path.join', 'os.path.join', (['DIR', 'MODEL_FILE'], {}), '(DIR, MODEL_FILE)\n', (7229, 7246), False, 'import os\n'), ((7667, 7686), 'pandas.DataFrame', 'pd.DataFrame', (['[dic]'], {}), '([dic])\n', (7679, 7686), True, 'import pandas as pd\n'), ((1758, 1770), 'time.clock', 'time.clock', ([], {}), '()\n', (1768, 1770), False, 'import time\n')] |
# coding=utf-8
from __future__ import division
from pprint import pprint
import logging
import numpy as np
import onnx
import onnxruntime
import torch
import torch.onnx
import cv2
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from models.decode import mot_decode
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
mean = [0.408, 0.447, 0.470] # coco and kitti not same
std = [0.289, 0.274, 0.278]
down_ratio = 4
test_scales = [1]
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad \
else tensor.cpu().numpy()
def pre_process(image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
inp_height, inp_width = (512, 512)
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - mean) /
std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(
1, 3, inp_height, inp_width)
##images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // down_ratio,
'out_width': inp_width // down_ratio}
return images, meta
if __name__ == "__main__":
onnx_path = r'D:\DeepLearning\ObjectTrackingMethod\FairMOT\models\all_dla34.onnx'
# load onnx model
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model)
img = cv2.imread(r'F:\图片\me.PNG')
images, meta = pre_process(img, 1, None)
#images = images.to(device)
# forward onnx model
ort_session = onnxruntime.InferenceSession(onnx_path)
for ii in ort_session.get_inputs():
print('onnx input: {}'.format(ii.name))
# ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(image).astype(np.float32)}
ort_inputs = {ort_session.get_inputs()[0].name: images}
ort_outs = ort_session.run(None, ort_inputs)
print(ort_outs[0])
# logger.info("ONNX model forwarded successfully")
# 后处理
# heads = {'hm': 80, 'reg': 2, 'wh': 2}
hm = torch.from_numpy(ort_outs[0]).sigmoid_()
wh = torch.from_numpy(ort_outs[2])
reg = torch.from_numpy(ort_outs[1])
dets = mot_decode(hm, wh, reg=reg, K=100)
print(dets) | [
"onnxruntime.InferenceSession",
"cv2.warpAffine",
"cv2.imread",
"numpy.array",
"torch.cuda.is_available",
"utils.image.get_affine_transform",
"torch.device",
"onnx.checker.check_model",
"models.decode.mot_decode",
"onnx.load",
"cv2.resize",
"torch.from_numpy"
] | [((347, 372), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (370, 372), False, 'import torch\n'), ((323, 343), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (335, 343), False, 'import torch\n'), ((378, 397), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (390, 397), False, 'import torch\n'), ((841, 904), 'numpy.array', 'np.array', (['[new_width / 2.0, new_height / 2.0]'], {'dtype': 'np.float32'}), '([new_width / 2.0, new_height / 2.0], dtype=np.float32)\n', (849, 904), True, 'import numpy as np\n'), ((955, 1009), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[inp_width, inp_height]'], {}), '(c, s, 0, [inp_width, inp_height])\n', (975, 1009), False, 'from utils.image import get_affine_transform\n'), ((1030, 1072), 'cv2.resize', 'cv2.resize', (['image', '(new_width, new_height)'], {}), '(image, (new_width, new_height))\n', (1040, 1072), False, 'import cv2\n'), ((1089, 1185), 'cv2.warpAffine', 'cv2.warpAffine', (['resized_image', 'trans_input', '(inp_width, inp_height)'], {'flags': 'cv2.INTER_LINEAR'}), '(resized_image, trans_input, (inp_width, inp_height), flags=\n cv2.INTER_LINEAR)\n', (1103, 1185), False, 'import cv2\n'), ((1722, 1742), 'onnx.load', 'onnx.load', (['onnx_path'], {}), '(onnx_path)\n', (1731, 1742), False, 'import onnx\n'), ((1747, 1783), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model'], {}), '(onnx_model)\n', (1771, 1783), False, 'import onnx\n'), ((1795, 1823), 'cv2.imread', 'cv2.imread', (['"""F:\\\\图片\\\\me.PNG"""'], {}), "('F:\\\\图片\\\\me.PNG')\n", (1805, 1823), False, 'import cv2\n'), ((1944, 1983), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['onnx_path'], {}), '(onnx_path)\n', (1972, 1983), False, 'import onnxruntime\n'), ((2464, 2493), 'torch.from_numpy', 'torch.from_numpy', (['ort_outs[2]'], {}), '(ort_outs[2])\n', (2480, 2493), False, 'import torch\n'), ((2504, 2533), 'torch.from_numpy', 'torch.from_numpy', (['ort_outs[1]'], {}), '(ort_outs[1])\n', (2520, 2533), False, 'import torch\n'), ((2545, 2579), 'models.decode.mot_decode', 'mot_decode', (['hm', 'wh'], {'reg': 'reg', 'K': '(100)'}), '(hm, wh, reg=reg, K=100)\n', (2555, 2579), False, 'from models.decode import mot_decode\n'), ((2414, 2443), 'torch.from_numpy', 'torch.from_numpy', (['ort_outs[0]'], {}), '(ort_outs[0])\n', (2430, 2443), False, 'import torch\n')] |
import argparse
import logging
import sys
from faker import Faker
import numpy as np
import pandas as pd
import user_agents
from ndc.utils import get_device_class, DEVICE_CLASS_NAMES
logger = logging.getLogger(__name__)
def fake_data(n=1000):
fake = Faker()
df = pd.DataFrame(
columns=('device_class', 'oui', 'dhcp_options', 'dhcp_vendor'))
random_state = np.random.RandomState()
for i in range(n):
ua_str = fake.user_agent()
ua = user_agents.parse(ua_str)
device_class = get_device_class(ua_str)
try:
brand = str(ua.device.brand).ljust(3)
except Exception:
brand = 'None'
# Create a fake MAC address, using the first 3 characters from the
# device brand to have a consistent OUI
oui = ':'.join('%02x' % x for x in (
ord(brand[0]),
ord(brand[1]),
ord(brand[2]),
))
# Create a random comma-separated list of integers, seeded with the
# length of the brand
random_state.seed(len(brand))
dhcp_options = ','.join('%s' % x for x in random_state.randint(
1, 25, len(ua.os.family) + device_class, int))
df = df.append({
'device_class': DEVICE_CLASS_NAMES[device_class],
'oui': oui,
'dhcp_options': dhcp_options,
'dhcp_vendor': brand.lower(),
}, ignore_index=True)
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--samples', default='10', type=int)
parser.add_argument('-o', '--output_file',
type=argparse.FileType('w+'), default=sys.stdout)
args = parser.parse_args()
df = fake_data(args.samples)
df.to_csv(args.output_file, index=False)
| [
"pandas.DataFrame",
"user_agents.parse",
"argparse.ArgumentParser",
"faker.Faker",
"ndc.utils.get_device_class",
"numpy.random.RandomState",
"argparse.FileType",
"logging.getLogger"
] | [((195, 222), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'import logging\n'), ((259, 266), 'faker.Faker', 'Faker', ([], {}), '()\n', (264, 266), False, 'from faker import Faker\n'), ((276, 352), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "('device_class', 'oui', 'dhcp_options', 'dhcp_vendor')"}), "(columns=('device_class', 'oui', 'dhcp_options', 'dhcp_vendor'))\n", (288, 352), True, 'import pandas as pd\n'), ((382, 405), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (403, 405), True, 'import numpy as np\n'), ((1490, 1515), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1513, 1515), False, 'import argparse\n'), ((478, 503), 'user_agents.parse', 'user_agents.parse', (['ua_str'], {}), '(ua_str)\n', (495, 503), False, 'import user_agents\n'), ((528, 552), 'ndc.utils.get_device_class', 'get_device_class', (['ua_str'], {}), '(ua_str)\n', (544, 552), False, 'from ndc.utils import get_device_class, DEVICE_CLASS_NAMES\n'), ((1660, 1683), 'argparse.FileType', 'argparse.FileType', (['"""w+"""'], {}), "('w+')\n", (1677, 1683), False, 'import argparse\n')] |
################################################################################
# Copyright (c) 2009-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import katpoint
ant = katpoint.Antenna('KAT7, -30:43:17.34, 21:24:38.46, 1038, 12.0')
freq = 1800.0
freq_range = np.arange(900.0, 2100.0, 10.0)
old_all = katpoint.Catalogue(open('/var/kat/conf/source_list.csv'), antenna=ant, flux_freq_MHz=freq)
old = old_all.filter(flux_limit_Jy=10)
pks10 = katpoint.Catalogue(open('parkes_source_list.csv'), antenna=ant, flux_freq_MHz=freq)
pks = pks10.filter(flux_limit_Jy=10)
jy1_all = katpoint.Catalogue(open('kuehr1Jy_source_list.csv'), antenna=ant, flux_freq_MHz=freq)
jy1 = jy1_all.filter(flux_limit_Jy=10)
plt.figure(1)
plt.clf()
for n, src in enumerate(old):
names = [src.name] + src.aliases
print('OLD: %s %s' % (names, ('%.1f Jy' % (src.flux_density(freq),))
if not np.isnan(src.flux_density(freq)) else ''))
print(src.description)
plt.subplot(5, 6, n + 1)
plt.plot(np.log10(freq_range), np.log10(src.flux_density(freq_range)), 'b')
jy1_src, dist_deg = jy1.closest_to(src)
if dist_deg < 3 / 60.:
print(' --> 1JY: %s %s' %
([jy1_src.name] + jy1_src.aliases,
('%.1f Jy' % (jy1_src.flux_density(freq),)) if not np.isnan(jy1_src.flux_density(freq)) else ''))
print(' %s' % jy1_src.description)
plt.plot(np.log10(freq_range), np.log10(jy1_src.flux_density(freq_range)), 'r')
jy1.remove(jy1_src.name)
pks_src, dist_deg = pks.closest_to(src)
if dist_deg < 3 / 60.:
print(' --> PKS: %s %s' %
([pks_src.name] + pks_src.aliases,
('%.1f Jy' % (pks_src.flux_density(freq),)) if not np.isnan(pks_src.flux_density(freq)) else ''))
print(' %s' % (pks_src.description))
plt.plot(np.log10(freq_range), np.log10(pks_src.flux_density(freq_range)), 'g')
pks.remove(pks_src.name)
plt.axis((np.log10(freq_range[0]), np.log10(freq_range[-1]), 0, 4))
plt.xticks([])
plt.yticks([])
print()
plt.figtext(0.5, 0.93, 'Spectra (log S vs. log v) old=b, 1Jy=r, pks=g', ha='center', va='center')
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figtext",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.log10",
"matplotlib.pyplot.xticks",
"katpoint.Antenna"
] | [((858, 921), 'katpoint.Antenna', 'katpoint.Antenna', (['"""KAT7, -30:43:17.34, 21:24:38.46, 1038, 12.0"""'], {}), "('KAT7, -30:43:17.34, 21:24:38.46, 1038, 12.0')\n", (874, 921), False, 'import katpoint\n'), ((949, 979), 'numpy.arange', 'np.arange', (['(900.0)', '(2100.0)', '(10.0)'], {}), '(900.0, 2100.0, 10.0)\n', (958, 979), True, 'import numpy as np\n'), ((1386, 1399), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1396, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1409), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1407, 1409), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2861), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.5)', '(0.93)', '"""Spectra (log S vs. log v) old=b, 1Jy=r, pks=g"""'], {'ha': '"""center"""', 'va': '"""center"""'}), "(0.5, 0.93, 'Spectra (log S vs. log v) old=b, 1Jy=r, pks=g', ha=\n 'center', va='center')\n", (2770, 2861), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1682), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(6)', '(n + 1)'], {}), '(5, 6, n + 1)\n', (1669, 1682), True, 'import matplotlib.pyplot as plt\n'), ((2712, 2726), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2722, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2745), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2741, 2745), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1716), 'numpy.log10', 'np.log10', (['freq_range'], {}), '(freq_range)\n', (1704, 1716), True, 'import numpy as np\n'), ((2094, 2114), 'numpy.log10', 'np.log10', (['freq_range'], {}), '(freq_range)\n', (2102, 2114), True, 'import numpy as np\n'), ((2532, 2552), 'numpy.log10', 'np.log10', (['freq_range'], {}), '(freq_range)\n', (2540, 2552), True, 'import numpy as np\n'), ((2650, 2673), 'numpy.log10', 'np.log10', (['freq_range[0]'], {}), '(freq_range[0])\n', (2658, 2673), True, 'import numpy as np\n'), ((2675, 2699), 'numpy.log10', 'np.log10', (['freq_range[-1]'], {}), '(freq_range[-1])\n', (2683, 2699), True, 'import numpy as np\n')] |
"""
pso.py
This code is part of Optimization of Hardware Parameters on a Real-Time Sound
Localization System paper
It contains the implementation of Particle Swarm Optimization, the strategy of
finding the best configuration
Authors:
<NAME>
<NAME>
<NAME>
<NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import itertools
import time
import os
import datetime
import mle
from classes import Swarm
import pickle
###############PARAMETERS###################
mics = 4
sampleRate = 56000
particles = 150
maxit = 200
wmax = 0.9
wmin = 0.4
c1 = 2
c2 = 2
### optimization parameters ###
k_mse = 0.6
k_g = 1
N_mse = 4
N_g = 0.49
r_dist = 0.7 #radius parameter
p_dist = 15 * mle.propSpeed/sampleRate #proximity parameter
SWARM_PATH = '' # '' if no swarm to load
###############################
part_dim = mics * 3
ub = [3] * part_dim
lb = [-3] * part_dim
############################################
#Cloud of Points
R = np.linspace(1,10,10)#15
phi = np.linspace(0, 2*np.pi, 10, endpoint=False)#24
theta = np.linspace(0, np.pi/2, 5, endpoint=True)#12
nPoints = len(R) * len(phi) * len(theta)
#Cost function
def cost(x):
mse1 = 0
mse2 = 0
array = np.reshape(x, newshape=(mics, 3))
#array = np.round(array, decimals=2)
M = mle.arrayMatrix(array)
semi_sphere = itertools.product(R, phi, theta)
for (r, p, t) in semi_sphere:
sources = mle.sph2car(r, p, t) + np.random.randn(2,3)*0.05
delay1 = mle.tdoa(sources[0], array, sr=sampleRate)
delay2 = mle.tdoa(sources[1], array, sr=sampleRate)
result1 = mle.mle_hls(delay1, array, M)
result2 = mle.mle_hls(delay2, array, M)
error1 = float(mle.dist(sources[0], result1))
error2 = float(mle.dist(sources[1], result2))
mse1 += error1**2
mse2 += error2**2
mse = max(mse1, mse2)/nPoints
radius = np.sqrt(np.sum(array**2, axis=1))
mask = radius > r_dist
dist_cost = np.sum(((radius-0.7)*mask)**2)
proximity_cost = 0
for i,j in itertools.combinations(range(mics), 2):
d = float(mle.dist(array[i], array[j])) + 1e-17
proximity_cost += p_dist*(1/d - 1/p_dist) if d < p_dist else 0
return k_mse*mse/N_mse, k_g*dist_cost/N_g, proximity_cost
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d-%H:%M:%S")
directory = "simulationsR1/" + f"mic{mics}_sr{sampleRate}_2_" + date
if not os.path.exists(directory):
os.makedirs(directory)
if SWARM_PATH: print(f"SWARM_PATH: {SWARM_PATH}")
print(f"Number of microphones: {mics}")
print(f"sampleRate: {sampleRate}")
print(f"nPoints: R({len(R)}) * phi({len(phi)}) * theta({len(theta)}) = {nPoints}")
print(f"Iterations: {maxit}")
print(f"Particles: {particles}")
print(f"wmax: {wmax} wmin: {wmin}")
print(f"c1: {c1} c2: {c2}")
print(f"k_mse: {k_mse:.3f} k_g: {k_g:.3f} N_mse: {N_mse:.3f} N_g: {N_g:.3f} r_dist: {r_dist:.3f} p_dist: {p_dist:.3f}")
arquivo = open(directory + "/scores.txt", "w")
if SWARM_PATH: arquivo.write(f"SWARM_PATH: {SWARM_PATH}\n")
arquivo.write(f"Number of microphones: {mics}\n")
arquivo.write(f"sampleRate: {sampleRate}\n")
arquivo.write(f"nPoints: R({len(R)}) * phi({len(phi)}) * theta({len(theta)}) = {nPoints}\n")
arquivo.write(f"Iterations: {maxit}\n")
arquivo.write(f"Particles: {particles}\n")
arquivo.write(f"wmax: {wmax} wmin: {wmin}\n")
arquivo.write(f"c1: {c1} c2: {c2}\n")
arquivo.write(f"k_mse: {k_mse:.3f} k_g: {k_g:.3f} N_mse: {N_mse:.3f} N_g: {N_g:.3f} r_dist: {r_dist:.3f} p_dist: {p_dist:.3f}\n\n")
arquivo.close()
ini = time.time()
ub = np.array(ub)
lb = np.array(lb)
if SWARM_PATH:
swarm_file = open(SWARM_PATH, 'rb')
swarm = pickle.load(swarm_file)
positions = swarm.getPositions()
velocities = swarm.getVelocities()
best_positions = swarm.getBestPositions()
best_costs = swarm.getBestCosts()
current_costs = swarm.getCurrentCosts()
best_position = swarm.getBestPosition()
best_cost = swarm.getBestCost()
best_mse_cost = swarm.getBestMseCost()
best_dist_cost = swarm.getBestDistCost()
best_prox_cost = swarm.getBestProxCost()
else:
positions = mle.randParticle(particles, part_dim, radius=1)
#positions =np.random.randn(individuos, dimention_individuo)
#positions = np.random.rand(particles, part_dim)
#positions = lb + positions * (ub - lb)
velocities = np.zeros_like(positions)
#velocities = np.random.rand(particles, part_dim)
#velocities = -np.abs(ub - lb) + velocities * 2 * np.abs(ub - lb)
best_positions = positions.copy()
best_costs = np.ones(particles) * np.inf
current_costs = np.zeros(particles)
best_position = []
best_cost = np.inf
best_mse_cost = np.inf
best_dist_cost = np.inf
best_prox_cost = np.inf
for i in range(0,particles):
c_mse, c_dist, c_prox = cost(positions[i])
c = c_mse + c_dist + c_prox
current_costs[i] = c
if c < best_costs[i]:
best_positions[i] = positions[i].copy()
best_costs[i] = c
if c < best_cost:
best_position = positions[i].copy()
best_cost = c
best_mse_cost = c_mse
best_dist_cost = c_dist
best_prox_cost = c_prox
global_bests = []
for it in range(0,maxit):
if(it==0):
r0=0
while(r0== 0 or r0== 0.25 or r0== 0.5 or r0== 0.75):
r0 = np.random.rand()
r=r0
else:
r = 4*r*(1-r)
w = r*wmin + (wmax-wmin)*it/(maxit)
r1 = np.random.rand(particles, part_dim)
r2 = np.random.rand(particles, part_dim)
velocities = w*velocities + c1*r1*(best_positions - positions) + c2*r2*(best_position - positions)
positions = positions + velocities
for i in range(0,particles):
c_mse, c_dist, c_prox = cost(positions[i])
c = c_mse + c_dist + c_prox
current_costs[i] = c
if c < best_costs[i]:
best_positions[i] = positions[i].copy()
best_costs[i] = c
if c < best_cost:
best_position = positions[i].copy()
best_cost = c
best_mse_cost = c_mse
best_dist_cost = c_dist
best_prox_cost = c_prox
if (it+1) % 5 ==0:
swarm = Swarm(positions=positions, velocities=velocities, bestPositions=best_positions,
bestCosts=best_costs, currentCosts=current_costs, bestPosition=best_position,
bestCost=best_cost, bestMseCost=best_mse_cost, bestDistCost=best_dist_cost,
bestProxCost=best_prox_cost)
now = datetime.datetime.now()
date = now.strftime(f"%Y-%m-%d-%H:%M:%S")
f = open(directory + f"/{mics}_{sampleRate}_it{it}_" + date + ".obj", "wb")
pickle.dump(swarm, f)
f.close()
max_cost = np.max(best_costs)
min_cost = np.min(best_costs)
mean_cost = np.mean(best_costs)
std_cost = np.std(best_costs)
global_bests.append(best_cost)
global_best = np.round(best_position, decimals=2).reshape(-1, 3)
s = f"""Iteration {it}, \nbest:{np.array_str(global_best)}, cost:{best_cost:.3f}, mse cost{best_mse_cost:.3f},
dist cost{best_dist_cost:.3f}, prox cost{best_prox_cost:.3f}\n mean:{mean_cost:.3f}, std:{std_cost:.3f},
max:{max_cost:.3f}, min:{min_cost:.3f}"""
arquivo = open(directory + "/scores.txt", "a")
arquivo.write(s + '\n')
arquivo.close()
print(s)
arquivo = open(directory + "/scores.txt", "a")
arquivo.write(f"costs:{global_bests}\n")
arquivo.write("\nBest Geometry:\n")
arquivo.write(np.array_str(global_best)+"\n")
arquivo.write(f"Geometry Cost:{best_cost}\n\n")
end= time.time()
print(f"Time {end-ini} s")
arquivo.write(f"Time {end-ini} s")
arquivo.close() | [
"pickle.dump",
"numpy.sum",
"numpy.array_str",
"numpy.ones",
"mle.mle_hls",
"pickle.load",
"numpy.mean",
"numpy.round",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.std",
"os.path.exists",
"mle.randParticle",
"numpy.max",
"mle.arrayMatrix",
"numpy.reshape",
"numpy.linspace",
"i... | [((946, 968), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (957, 968), True, 'import numpy as np\n'), ((976, 1021), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10)'], {'endpoint': '(False)'}), '(0, 2 * np.pi, 10, endpoint=False)\n', (987, 1021), True, 'import numpy as np\n'), ((1031, 1074), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', '(5)'], {'endpoint': '(True)'}), '(0, np.pi / 2, 5, endpoint=True)\n', (1042, 1074), True, 'import numpy as np\n'), ((2298, 2321), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2319, 2321), False, 'import datetime\n'), ((3570, 3581), 'time.time', 'time.time', ([], {}), '()\n', (3579, 3581), False, 'import time\n'), ((3588, 3600), 'numpy.array', 'np.array', (['ub'], {}), '(ub)\n', (3596, 3600), True, 'import numpy as np\n'), ((3606, 3618), 'numpy.array', 'np.array', (['lb'], {}), '(lb)\n', (3614, 3618), True, 'import numpy as np\n'), ((7755, 7766), 'time.time', 'time.time', ([], {}), '()\n', (7764, 7766), False, 'import time\n'), ((1184, 1217), 'numpy.reshape', 'np.reshape', (['x'], {'newshape': '(mics, 3)'}), '(x, newshape=(mics, 3))\n', (1194, 1217), True, 'import numpy as np\n'), ((1268, 1290), 'mle.arrayMatrix', 'mle.arrayMatrix', (['array'], {}), '(array)\n', (1283, 1290), False, 'import mle\n'), ((1309, 1341), 'itertools.product', 'itertools.product', (['R', 'phi', 'theta'], {}), '(R, phi, theta)\n', (1326, 1341), False, 'import itertools\n'), ((1980, 2016), 'numpy.sum', 'np.sum', (['(((radius - 0.7) * mask) ** 2)'], {}), '(((radius - 0.7) * mask) ** 2)\n', (1986, 2016), True, 'import numpy as np\n'), ((2440, 2465), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2454, 2465), False, 'import os\n'), ((2471, 2493), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2482, 2493), False, 'import os\n'), ((3686, 3709), 'pickle.load', 'pickle.load', (['swarm_file'], {}), '(swarm_file)\n', (3697, 3709), False, 'import pickle\n'), ((4154, 4201), 'mle.randParticle', 'mle.randParticle', (['particles', 'part_dim'], {'radius': '(1)'}), '(particles, part_dim, radius=1)\n', (4170, 4201), False, 'import mle\n'), ((4382, 4406), 'numpy.zeros_like', 'np.zeros_like', (['positions'], {}), '(positions)\n', (4395, 4406), True, 'import numpy as np\n'), ((4634, 4653), 'numpy.zeros', 'np.zeros', (['particles'], {}), '(particles)\n', (4642, 4653), True, 'import numpy as np\n'), ((5559, 5594), 'numpy.random.rand', 'np.random.rand', (['particles', 'part_dim'], {}), '(particles, part_dim)\n', (5573, 5594), True, 'import numpy as np\n'), ((5604, 5639), 'numpy.random.rand', 'np.random.rand', (['particles', 'part_dim'], {}), '(particles, part_dim)\n', (5618, 5639), True, 'import numpy as np\n'), ((6910, 6928), 'numpy.max', 'np.max', (['best_costs'], {}), '(best_costs)\n', (6916, 6928), True, 'import numpy as np\n'), ((6944, 6962), 'numpy.min', 'np.min', (['best_costs'], {}), '(best_costs)\n', (6950, 6962), True, 'import numpy as np\n'), ((6979, 6998), 'numpy.mean', 'np.mean', (['best_costs'], {}), '(best_costs)\n', (6986, 6998), True, 'import numpy as np\n'), ((7014, 7032), 'numpy.std', 'np.std', (['best_costs'], {}), '(best_costs)\n', (7020, 7032), True, 'import numpy as np\n'), ((1469, 1511), 'mle.tdoa', 'mle.tdoa', (['sources[0]', 'array'], {'sr': 'sampleRate'}), '(sources[0], array, sr=sampleRate)\n', (1477, 1511), False, 'import mle\n'), ((1529, 1571), 'mle.tdoa', 'mle.tdoa', (['sources[1]', 'array'], {'sr': 'sampleRate'}), '(sources[1], array, sr=sampleRate)\n', (1537, 1571), False, 'import mle\n'), ((1599, 1628), 'mle.mle_hls', 'mle.mle_hls', (['delay1', 'array', 'M'], {}), '(delay1, array, M)\n', (1610, 1628), False, 'import mle\n'), ((1647, 1676), 'mle.mle_hls', 'mle.mle_hls', (['delay2', 'array', 'M'], {}), '(delay2, array, M)\n', (1658, 1676), False, 'import mle\n'), ((1911, 1937), 'numpy.sum', 'np.sum', (['(array ** 2)'], {'axis': '(1)'}), '(array ** 2, axis=1)\n', (1917, 1937), True, 'import numpy as np\n'), ((4586, 4604), 'numpy.ones', 'np.ones', (['particles'], {}), '(particles)\n', (4593, 4604), True, 'import numpy as np\n'), ((6340, 6616), 'classes.Swarm', 'Swarm', ([], {'positions': 'positions', 'velocities': 'velocities', 'bestPositions': 'best_positions', 'bestCosts': 'best_costs', 'currentCosts': 'current_costs', 'bestPosition': 'best_position', 'bestCost': 'best_cost', 'bestMseCost': 'best_mse_cost', 'bestDistCost': 'best_dist_cost', 'bestProxCost': 'best_prox_cost'}), '(positions=positions, velocities=velocities, bestPositions=\n best_positions, bestCosts=best_costs, currentCosts=current_costs,\n bestPosition=best_position, bestCost=best_cost, bestMseCost=\n best_mse_cost, bestDistCost=best_dist_cost, bestProxCost=best_prox_cost)\n', (6345, 6616), False, 'from classes import Swarm\n'), ((6684, 6707), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6705, 6707), False, 'import datetime\n'), ((6850, 6871), 'pickle.dump', 'pickle.dump', (['swarm', 'f'], {}), '(swarm, f)\n', (6861, 6871), False, 'import pickle\n'), ((7669, 7694), 'numpy.array_str', 'np.array_str', (['global_best'], {}), '(global_best)\n', (7681, 7694), True, 'import numpy as np\n'), ((1394, 1414), 'mle.sph2car', 'mle.sph2car', (['r', 'p', 't'], {}), '(r, p, t)\n', (1405, 1414), False, 'import mle\n'), ((1705, 1734), 'mle.dist', 'mle.dist', (['sources[0]', 'result1'], {}), '(sources[0], result1)\n', (1713, 1734), False, 'import mle\n'), ((1759, 1788), 'mle.dist', 'mle.dist', (['sources[1]', 'result2'], {}), '(sources[1], result2)\n', (1767, 1788), False, 'import mle\n'), ((5434, 5450), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5448, 5450), True, 'import numpy as np\n'), ((7091, 7126), 'numpy.round', 'np.round', (['best_position'], {'decimals': '(2)'}), '(best_position, decimals=2)\n', (7099, 7126), True, 'import numpy as np\n'), ((7183, 7208), 'numpy.array_str', 'np.array_str', (['global_best'], {}), '(global_best)\n', (7195, 7208), True, 'import numpy as np\n'), ((1417, 1438), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1432, 1438), True, 'import numpy as np\n'), ((2113, 2141), 'mle.dist', 'mle.dist', (['array[i]', 'array[j]'], {}), '(array[i], array[j])\n', (2121, 2141), False, 'import mle\n')] |
import numpy as np
# KERAS: neural network lib
import keras.backend as K
from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN
from keras.layers import GlobalAveragePooling1D, merge, Flatten
from keras.layers import TimeDistributed
from keras.optimizers import RMSprop, Nadam
from keras.models import Model
from keras.utils.visualize_util import plot
from ad_model import ActionDecisionModel
class HistoryQLearner(ActionDecisionModel):
def __init__(self, seq_len, vocab_size, embd_size, hist_size,
hidden1, hidden2,
num_actions, num_objects,
alpha,gamma,exp_id="model"):
self.seq_length = seq_len
self.vocab_size = vocab_size
self.embd_size = embd_size
self.hist_size = hist_size
self.h1 = hidden1
self.h2 = hidden2
self.action_size = num_actions
self.object_size = num_objects
self.alpha = alpha
self.gamma = gamma
self.model = self.defineModels()
self.model.compile(loss="mse",optimizer=Nadam(clipvalue=0.1))
plot(self.model, show_shapes=True, to_file=exp_id+'.png')
def defineModels(self):
x = Input(shape=(self.hist_size,self.seq_length,), dtype="uint8") # (STATES x SEQUENCE)
# State Representation
w_k = TimeDistributed(Embedding(output_dim=self.embd_size, mask_zero=True,
input_dim=self.vocab_size,
input_length=self.seq_length), name="embedding")(x) # (STATES x SEQUENCE x EMBEDDING)
w_k = TimeDistributed(LSTM(self.h1, return_sequences=True), name="lstm1")(w_k) # (STATES x SEQUENCE x H1)
v_s = TimeDistributed(LSTM(self.h1, activation="relu"), name="lstm2")(w_k) # (STATES x H1)
# history based Q function approximation
q_hidden = SimpleRNN(self.h2, activation="relu", name="history_rnn")(v_s) # (H2)
# action value
qsa = Dense(self.action_size, name="action_dense")(q_hidden) # (ACTIONS)
# object value
qso = Dense(self.object_size, name="object_dense")(q_hidden) # (OBJECTS)
q = merge( [qsa,qso],
mode=lambda x: (K.expand_dims(x[0],2)+K.expand_dims(x[1],1))/2,
output_shape=lambda x: (x[0][0],x[0][1],x[1][1]))
q_model = Model(input=x,output=q)
return q_model
def defineModels_old(self):
x = Input(shape=(self.hist_size,self.seq_length,), dtype="uint8") # (STATES x SEQUENCE)
# State Representation
w_k = TimeDistributed(Embedding(output_dim=self.embd_size, mask_zero=True,
input_dim=self.vocab_size,
input_length=self.seq_length), name="embedding")(x) # (STATES x SEQUENCE x EMBEDDING)
x_k = TimeDistributed(LSTM(self.h1, return_sequences=True), name="lstm1")(w_k) # (STATES x SEQUENCE x H1)
v_s = TimeDistributed(GlobalAveragePooling1D(), name="avg")(x_k) # (STATES x H1)
# history based Q function approximation
q_hidden = SimpleRNN(self.h2, activation="relu", name="history_rnn")(v_s) # (H2)
# action value
qsa = Dense(self.action_size, name="action_dense")(q_hidden) # (ACTIONS)
# object value
qso = Dense(self.object_size, name="object_dense")(q_hidden) # (OBJECTS)
q = merge( [qsa,qso],
mode=lambda x: (K.expand_dims(x[0],2)+K.expand_dims(x[1],1))/2,
output_shape=lambda x: (x[0][0],x[0][1],x[1][1]))
q_model = Model(input=x,output=q)
return q_model
def predictQval(self,s):
return self.model.predict(np.atleast_3d(s))
def predictAction(self,s):
q = self.predictQval([s])[0]
return np.unravel_index(q.argmax(),q.shape)
def randomAction(self):
act = np.random.randint(0, self.action_size)
obj = np.random.randint(0, self.object_size)
return (act,obj)
def predictQmax(self,s):
q = self.predictQval(s)
return q.max(axis=(1,2))
def calculateTargets(self,s_batch,a_batch,r_batch,t_batch,s2_batch):
batch_size = s_batch.shape[0]
# split action tuple
act_batch, obj_batch = a_batch[:,0], a_batch[:,1]
# Calculate targets
target = self.predictQval(s_batch)
qmax = self.predictQmax(s2_batch)
# discount state values using the calculated targets
for k in xrange(batch_size):
a,o = act_batch[k],obj_batch[k]
if t_batch[k]:
# just the true reward if game is over
target[k,a,o] = r_batch[k]
else:
# reward + gamma * max a'{ Q(s', a') }
target[k,a,o] = r_batch[k] + self.gamma * qmax[k]
return target
def trainOnBatch(self,s_batch,a_batch,r_batch,t_batch,s2_batch):
target = self.calculateTargets(s_batch,a_batch,r_batch,t_batch,s2_batch)
loss = self.model.train_on_batch(s_batch,target)
return loss
def save(self,name,overwrite):
self.model.save("q_"+name, overwrite=overwrite)
| [
"keras.layers.SimpleRNN",
"keras.utils.visualize_util.plot",
"numpy.atleast_3d",
"keras.layers.GlobalAveragePooling1D",
"keras.backend.expand_dims",
"keras.layers.LSTM",
"keras.models.Model",
"keras.optimizers.Nadam",
"numpy.random.randint",
"keras.layers.Dense",
"keras.layers.Embedding",
"ker... | [((1110, 1169), 'keras.utils.visualize_util.plot', 'plot', (['self.model'], {'show_shapes': '(True)', 'to_file': "(exp_id + '.png')"}), "(self.model, show_shapes=True, to_file=exp_id + '.png')\n", (1114, 1169), False, 'from keras.utils.visualize_util import plot\n'), ((1209, 1270), 'keras.layers.Input', 'Input', ([], {'shape': '(self.hist_size, self.seq_length)', 'dtype': '"""uint8"""'}), "(shape=(self.hist_size, self.seq_length), dtype='uint8')\n", (1214, 1270), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((2333, 2357), 'keras.models.Model', 'Model', ([], {'input': 'x', 'output': 'q'}), '(input=x, output=q)\n', (2338, 2357), False, 'from keras.models import Model\n'), ((2426, 2487), 'keras.layers.Input', 'Input', ([], {'shape': '(self.hist_size, self.seq_length)', 'dtype': '"""uint8"""'}), "(shape=(self.hist_size, self.seq_length), dtype='uint8')\n", (2431, 2487), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((3539, 3563), 'keras.models.Model', 'Model', ([], {'input': 'x', 'output': 'q'}), '(input=x, output=q)\n', (3544, 3563), False, 'from keras.models import Model\n'), ((3833, 3871), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.action_size'], {}), '(0, self.action_size)\n', (3850, 3871), True, 'import numpy as np\n'), ((3886, 3924), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.object_size'], {}), '(0, self.object_size)\n', (3903, 3924), True, 'import numpy as np\n'), ((1850, 1907), 'keras.layers.SimpleRNN', 'SimpleRNN', (['self.h2'], {'activation': '"""relu"""', 'name': '"""history_rnn"""'}), "(self.h2, activation='relu', name='history_rnn')\n", (1859, 1907), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((1957, 2001), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'name': '"""action_dense"""'}), "(self.action_size, name='action_dense')\n", (1962, 2001), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((2061, 2105), 'keras.layers.Dense', 'Dense', (['self.object_size'], {'name': '"""object_dense"""'}), "(self.object_size, name='object_dense')\n", (2066, 2105), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((3056, 3113), 'keras.layers.SimpleRNN', 'SimpleRNN', (['self.h2'], {'activation': '"""relu"""', 'name': '"""history_rnn"""'}), "(self.h2, activation='relu', name='history_rnn')\n", (3065, 3113), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((3163, 3207), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'name': '"""action_dense"""'}), "(self.action_size, name='action_dense')\n", (3168, 3207), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((3267, 3311), 'keras.layers.Dense', 'Dense', (['self.object_size'], {'name': '"""object_dense"""'}), "(self.object_size, name='object_dense')\n", (3272, 3311), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((3651, 3667), 'numpy.atleast_3d', 'np.atleast_3d', (['s'], {}), '(s)\n', (3664, 3667), True, 'import numpy as np\n'), ((1080, 1100), 'keras.optimizers.Nadam', 'Nadam', ([], {'clipvalue': '(0.1)'}), '(clipvalue=0.1)\n', (1085, 1100), False, 'from keras.optimizers import RMSprop, Nadam\n'), ((1354, 1468), 'keras.layers.Embedding', 'Embedding', ([], {'output_dim': 'self.embd_size', 'mask_zero': '(True)', 'input_dim': 'self.vocab_size', 'input_length': 'self.seq_length'}), '(output_dim=self.embd_size, mask_zero=True, input_dim=self.\n vocab_size, input_length=self.seq_length)\n', (1363, 1468), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((1598, 1634), 'keras.layers.LSTM', 'LSTM', (['self.h1'], {'return_sequences': '(True)'}), '(self.h1, return_sequences=True)\n', (1602, 1634), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((1713, 1745), 'keras.layers.LSTM', 'LSTM', (['self.h1'], {'activation': '"""relu"""'}), "(self.h1, activation='relu')\n", (1717, 1745), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((2571, 2685), 'keras.layers.Embedding', 'Embedding', ([], {'output_dim': 'self.embd_size', 'mask_zero': '(True)', 'input_dim': 'self.vocab_size', 'input_length': 'self.seq_length'}), '(output_dim=self.embd_size, mask_zero=True, input_dim=self.\n vocab_size, input_length=self.seq_length)\n', (2580, 2685), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((2815, 2851), 'keras.layers.LSTM', 'LSTM', (['self.h1'], {'return_sequences': '(True)'}), '(self.h1, return_sequences=True)\n', (2819, 2851), False, 'from keras.layers import Input, Dense, Embedding, LSTM, SimpleRNN\n'), ((2929, 2953), 'keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (2951, 2953), False, 'from keras.layers import GlobalAveragePooling1D, merge, Flatten\n'), ((2196, 2218), 'keras.backend.expand_dims', 'K.expand_dims', (['x[0]', '(2)'], {}), '(x[0], 2)\n', (2209, 2218), True, 'import keras.backend as K\n'), ((2218, 2240), 'keras.backend.expand_dims', 'K.expand_dims', (['x[1]', '(1)'], {}), '(x[1], 1)\n', (2231, 2240), True, 'import keras.backend as K\n'), ((3402, 3424), 'keras.backend.expand_dims', 'K.expand_dims', (['x[0]', '(2)'], {}), '(x[0], 2)\n', (3415, 3424), True, 'import keras.backend as K\n'), ((3424, 3446), 'keras.backend.expand_dims', 'K.expand_dims', (['x[1]', '(1)'], {}), '(x[1], 1)\n', (3437, 3446), True, 'import keras.backend as K\n')] |
import numpy as np
import h5py
import matplotlib
from matplotlib import colors
import matplotlib.pyplot as plt
import matplotlib.patches as ptch
from matplotlib.artist import setp
from matplotlib.collections import PatchCollection
from matplotlib.lines import Line2D
import os
from shapely.geometry import Point, Polygon, LineString
from mpl_toolkits.axes_grid1 import ImageGrid
# Room dimensions
width = 20
height = 20
# Number of pedestrians in room initially
n_a = 1000
# Number of pedestrians exited
stat_reg_start = 9
# Initialize figure
fig = plt.figure(0)
# Create a color map of fixed colors
cmap = colors.ListedColormap(['red', 'blue'])
bounds=[0,0.5,1]
norm = colors.BoundaryNorm(bounds, cmap.N)
# Load data of which pedestrians are in the room
if os.path.exists('bigequilibrium/in_room1.npy.gz'):
in_room = np.loadtxt('bigequilibrium/in_room1.npy.gz')
sum_in_room = np.sum(in_room, axis=1)
# Time when n_a - stat_reg_start pedestrians have exited
time_stat_reg_start = np.where(sum_in_room == (n_a -stat_reg_start))[0][0]
agents_in_room = np.where(in_room[time_stat_reg_start, :] == 1)[0]
# Load pedestrian's x-positions
if os.path.exists('bigequilibrium/positions_x.npy.gz'):
positions_x = np.loadtxt('bigequilibrium/positions_x.npy.gz')
positions_x = positions_x[0::2]
# Load pedestrian's y-positions
if os.path.exists('bigequilibrium/positions_y.npy.gz'):
positions_y = np.loadtxt('bigequilibrium/positions_y.npy.gz')
positions_y = positions_y[0::2]
# Load pedestrian's radii
if os.path.exists('bigequilibrium/radius.npy.gz'):
radius = np.loadtxt('bigequilibrium/radius.npy.gz')
# Load pedestrian's strategies
if os.path.exists('bigequilibrium/strategy.npy.gz'):
strategy = np.loadtxt('bigequilibrium/strategy.npy.gz')
# Create cricles based on pedestrian's positions and radius
patches = []
for k in agents_in_room:
circle = ptch.Circle((positions_y[time_stat_reg_start, k], -positions_x[time_stat_reg_start, k] + width),
radius[k])
patches.append(circle)
# Change figure settings
ax = plt.gca()
ax.set_xlim([5.53, 34.43])
ax.set_ylim([-21, -5.92])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_aspect('equal')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
# Add colored circles to represent different strategists
p = PatchCollection(patches, cmap=cmap, norm=norm, edgecolor='black', lw=0.2)
p.set_array(strategy[time_stat_reg_start, agents_in_room])
ax.add_collection(p)
# Add legend
ax.add_patch(ptch.Circle((6.1,-6.6), 0.5, edgecolor='black', facecolor='red', lw=0.2),)
ax.add_patch(ptch.Circle((6.1,-8), 0.5, edgecolor='black', facecolor='blue', lw=0.2),)
ax.text(6.8, -7, 'Impatient', fontsize=20)
ax.text(6.8, -8.5, 'Patient', fontsize=20)
# Plot a "bottom floor"
floor_left_x = np.arange(5.5,19.5,0.1)
floor_left_y = -20.08*np.ones(len(floor_left_x))
floor_right_x = np.arange(21.5,34.5,0.1)
floor_right_y = -20.08*np.ones(len(floor_right_x))
ax.plot(floor_left_x, floor_left_y, color='black', linewidth=2.5)
ax.plot(floor_right_x, floor_right_y, color='black', linewidth=2.5)
# Plot 3 half-circles
x0 = 20.3
y0 = -20
radius0 = 9
x = np.arange(x0-radius0,x0+radius0,0.001)
y = np.sqrt(radius0**2-(x-x0)**2) + y0
ax.plot(x, y, color='black', linewidth=5)
x1 = 20.3
y1 = -20
radius1 = 6.5
x = np.arange(x1-radius1,x1+radius1,0.001)
y = np.sqrt(radius1**2-(x-x1)**2) + y1
ax.plot(x, y, color='black', linewidth=5)
x2 = 20.3
y2 = -20
radius2 = 3.5
x = np.arange(x2-radius2,x2+radius2,0.001)
y = np.sqrt(radius2**2-(x-x2)**2) + y2
ax.plot(x, y, color='black', linewidth=5)
# Plot black rectangle to represent the exit
ax.add_patch(ptch.Rectangle((19.3,-20.73), 2.4,0.7, edgecolor='black', facecolor='black'),)
# Plot EXIT sign
ax.text(18.85, -22, 'EXIT', fontsize=20, fontweight='bold')
# Label the half-circles
ax.text(x0-radius0-0.5, -21.4, 'A', fontsize=20, fontweight='bold')
ax.text(x1-radius1-0.5, -21.4, 'B', fontsize=20, fontweight='bold')
ax.text(x2-radius2-0.5, -21.4, 'C', fontsize=20, fontweight='bold')
# Save figure as pdf
plt.savefig('figure_1.pdf',
bbox_inches='tight'
)
| [
"numpy.sum",
"matplotlib.patches.Rectangle",
"matplotlib.colors.BoundaryNorm",
"os.path.exists",
"matplotlib.patches.Circle",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"matplotlib.pyplot.gca",
"matplotlib.collections.PatchCollection",
"matplotlib.colors.Listed... | [((553, 566), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (563, 566), True, 'import matplotlib.pyplot as plt\n'), ((612, 650), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['red', 'blue']"], {}), "(['red', 'blue'])\n", (633, 650), False, 'from matplotlib import colors\n'), ((675, 710), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (694, 710), False, 'from matplotlib import colors\n'), ((764, 812), 'os.path.exists', 'os.path.exists', (['"""bigequilibrium/in_room1.npy.gz"""'], {}), "('bigequilibrium/in_room1.npy.gz')\n", (778, 812), False, 'import os\n'), ((1163, 1214), 'os.path.exists', 'os.path.exists', (['"""bigequilibrium/positions_x.npy.gz"""'], {}), "('bigequilibrium/positions_x.npy.gz')\n", (1177, 1214), False, 'import os\n'), ((1354, 1405), 'os.path.exists', 'os.path.exists', (['"""bigequilibrium/positions_y.npy.gz"""'], {}), "('bigequilibrium/positions_y.npy.gz')\n", (1368, 1405), False, 'import os\n'), ((1539, 1585), 'os.path.exists', 'os.path.exists', (['"""bigequilibrium/radius.npy.gz"""'], {}), "('bigequilibrium/radius.npy.gz')\n", (1553, 1585), False, 'import os\n'), ((1678, 1726), 'os.path.exists', 'os.path.exists', (['"""bigequilibrium/strategy.npy.gz"""'], {}), "('bigequilibrium/strategy.npy.gz')\n", (1692, 1726), False, 'import os\n'), ((2096, 2105), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2103, 2105), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2568), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'cmap': 'cmap', 'norm': 'norm', 'edgecolor': '"""black"""', 'lw': '(0.2)'}), "(patches, cmap=cmap, norm=norm, edgecolor='black', lw=0.2)\n", (2510, 2568), False, 'from matplotlib.collections import PatchCollection\n'), ((2964, 2989), 'numpy.arange', 'np.arange', (['(5.5)', '(19.5)', '(0.1)'], {}), '(5.5, 19.5, 0.1)\n', (2973, 2989), True, 'import numpy as np\n'), ((3053, 3079), 'numpy.arange', 'np.arange', (['(21.5)', '(34.5)', '(0.1)'], {}), '(21.5, 34.5, 0.1)\n', (3062, 3079), True, 'import numpy as np\n'), ((3321, 3365), 'numpy.arange', 'np.arange', (['(x0 - radius0)', '(x0 + radius0)', '(0.001)'], {}), '(x0 - radius0, x0 + radius0, 0.001)\n', (3330, 3365), True, 'import numpy as np\n'), ((3479, 3523), 'numpy.arange', 'np.arange', (['(x1 - radius1)', '(x1 + radius1)', '(0.001)'], {}), '(x1 - radius1, x1 + radius1, 0.001)\n', (3488, 3523), True, 'import numpy as np\n'), ((3637, 3681), 'numpy.arange', 'np.arange', (['(x2 - radius2)', '(x2 + radius2)', '(0.001)'], {}), '(x2 - radius2, x2 + radius2, 0.001)\n', (3646, 3681), True, 'import numpy as np\n'), ((4225, 4273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure_1.pdf"""'], {'bbox_inches': '"""tight"""'}), "('figure_1.pdf', bbox_inches='tight')\n", (4236, 4273), True, 'import matplotlib.pyplot as plt\n'), ((828, 872), 'numpy.loadtxt', 'np.loadtxt', (['"""bigequilibrium/in_room1.npy.gz"""'], {}), "('bigequilibrium/in_room1.npy.gz')\n", (838, 872), True, 'import numpy as np\n'), ((891, 914), 'numpy.sum', 'np.sum', (['in_room'], {'axis': '(1)'}), '(in_room, axis=1)\n', (897, 914), True, 'import numpy as np\n'), ((1234, 1281), 'numpy.loadtxt', 'np.loadtxt', (['"""bigequilibrium/positions_x.npy.gz"""'], {}), "('bigequilibrium/positions_x.npy.gz')\n", (1244, 1281), True, 'import numpy as np\n'), ((1425, 1472), 'numpy.loadtxt', 'np.loadtxt', (['"""bigequilibrium/positions_y.npy.gz"""'], {}), "('bigequilibrium/positions_y.npy.gz')\n", (1435, 1472), True, 'import numpy as np\n'), ((1600, 1642), 'numpy.loadtxt', 'np.loadtxt', (['"""bigequilibrium/radius.npy.gz"""'], {}), "('bigequilibrium/radius.npy.gz')\n", (1610, 1642), True, 'import numpy as np\n'), ((1743, 1787), 'numpy.loadtxt', 'np.loadtxt', (['"""bigequilibrium/strategy.npy.gz"""'], {}), "('bigequilibrium/strategy.npy.gz')\n", (1753, 1787), True, 'import numpy as np\n'), ((1900, 2012), 'matplotlib.patches.Circle', 'ptch.Circle', (['(positions_y[time_stat_reg_start, k], -positions_x[time_stat_reg_start, k] +\n width)', 'radius[k]'], {}), '((positions_y[time_stat_reg_start, k], -positions_x[\n time_stat_reg_start, k] + width), radius[k])\n', (1911, 2012), True, 'import matplotlib.patches as ptch\n'), ((2676, 2749), 'matplotlib.patches.Circle', 'ptch.Circle', (['(6.1, -6.6)', '(0.5)'], {'edgecolor': '"""black"""', 'facecolor': '"""red"""', 'lw': '(0.2)'}), "((6.1, -6.6), 0.5, edgecolor='black', facecolor='red', lw=0.2)\n", (2687, 2749), True, 'import matplotlib.patches as ptch\n'), ((2764, 2836), 'matplotlib.patches.Circle', 'ptch.Circle', (['(6.1, -8)', '(0.5)'], {'edgecolor': '"""black"""', 'facecolor': '"""blue"""', 'lw': '(0.2)'}), "((6.1, -8), 0.5, edgecolor='black', facecolor='blue', lw=0.2)\n", (2775, 2836), True, 'import matplotlib.patches as ptch\n'), ((3364, 3401), 'numpy.sqrt', 'np.sqrt', (['(radius0 ** 2 - (x - x0) ** 2)'], {}), '(radius0 ** 2 - (x - x0) ** 2)\n', (3371, 3401), True, 'import numpy as np\n'), ((3522, 3559), 'numpy.sqrt', 'np.sqrt', (['(radius1 ** 2 - (x - x1) ** 2)'], {}), '(radius1 ** 2 - (x - x1) ** 2)\n', (3529, 3559), True, 'import numpy as np\n'), ((3680, 3717), 'numpy.sqrt', 'np.sqrt', (['(radius2 ** 2 - (x - x2) ** 2)'], {}), '(radius2 ** 2 - (x - x2) ** 2)\n', (3687, 3717), True, 'import numpy as np\n'), ((3816, 3894), 'matplotlib.patches.Rectangle', 'ptch.Rectangle', (['(19.3, -20.73)', '(2.4)', '(0.7)'], {'edgecolor': '"""black"""', 'facecolor': '"""black"""'}), "((19.3, -20.73), 2.4, 0.7, edgecolor='black', facecolor='black')\n", (3830, 3894), True, 'import matplotlib.patches as ptch\n'), ((1077, 1123), 'numpy.where', 'np.where', (['(in_room[time_stat_reg_start, :] == 1)'], {}), '(in_room[time_stat_reg_start, :] == 1)\n', (1085, 1123), True, 'import numpy as np\n'), ((1003, 1048), 'numpy.where', 'np.where', (['(sum_in_room == n_a - stat_reg_start)'], {}), '(sum_in_room == n_a - stat_reg_start)\n', (1011, 1048), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*-coding: utf-8 -*-
#
help = 'モデルとモデルパラメータを利用して推論実行する'
#
import logging
# basicConfig()は、 debug()やinfo()を最初に呼び出す"前"に呼び出すこと
logging.basicConfig(format='%(message)s')
level = logging.INFO
logging.getLogger('Tools').setLevel(level=level)
import cv2
import time
import argparse
import numpy as np
try:
import cupy
except ImportError:
print('not import cupy')
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.links.model.vision import resnet
import Tools.func as FNC
import Tools.getfunc as GET
from Lib.network import CNT
from create_dataset import create
def command():
parser = argparse.ArgumentParser(description=help)
parser.add_argument('model',
help='使用する学習済みモデル')
parser.add_argument('param',
help='使用するモデルパラメータ')
parser.add_argument('-ot', '--other_path', default='./Image/other/',
help='動物、怪獣の画像フォルダ (default: ./Image/other/')
parser.add_argument('-hu', '--human_path', default='./Image/people/',
help='人間の画像フォルダ (default: ./Image/people/')
parser.add_argument('-bg', '--background_path', default='./Image/background/',
help='背景の画像フォルダ (default: ./Image/background/')
parser.add_argument('-os', '--obj_size', type=int, default=64,
help='挿入する画像サイズ [default: 64 pixel]')
parser.add_argument('-on', '--obj_num', type=int, default=3,
help='画像を生成する数 [default: 3]')
parser.add_argument('-is', '--img_size', type=int, default=256,
help='生成される画像サイズ [default: 256 pixel]')
parser.add_argument('-in', '--img_num', type=int, default=20,
help='1種類あたりの画像数 [default: 20]')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID [default -1]')
parser.add_argument('--out_path', '-o', default='./result/',
help='生成物の保存先[default: ./result/]')
args = parser.parse_args()
FNC.argsPrint(args)
return args
def imgs2resnet(imgs, xp=np):
dst = [resnet.prepare(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
for img in imgs]
return xp.array(dst)
def main(args):
# jsonファイルから学習モデルのパラメータを取得する
n_out, n_unit, actfun = GET.jsonData(
args.param, ['n_out', 'n_unit', 'actfun']
)
# 学習モデルを生成する
model = L.Classifier(
CNT(n_out, n_unit, GET.actfun(actfun), base=L.ResNet50Layers(None))
)
# load_npzのpath情報を取得し、学習済みモデルを読み込む
load_path = FNC.checkModelType(args.model)
try:
chainer.serializers.load_npz(args.model, model, path=load_path)
except:
import traceback
traceback.print_exc()
print(FNC.fileFuncLine())
exit()
# GPUの設定
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
xp = cupy
else:
xp = np
# model.to_intel64()
# 画像の生成
x = []
t = []
for i in range(n_out):
x.extend(
create(args.other_path, args.human_path, args.background_path,
args.obj_size, args.img_size, args.obj_num, i, args.img_num)
)
t.extend([i]*args.img_num)
x = imgs2resnet(np.array(x), xp)
t = xp.array(t, dtype=np.int8)
print(x.shape, t.shape)
# 学習モデルを実行する
with chainer.using_config('train', False):
st = time.time()
y = model.predictor(x)
print('exec time: {0:.2f}[s]'.format(time.time() - st))
# 適合率(precisiton)と再現率(recall)とF値を検証する
# precision: 正解の人数を答えたうち、本当に正解の人数だった確率
# (正解が一人の場合に)別の人数を回答すると下がる
# recall: 正解の人数に対して、本当に正解の人数を答えられた確率
# (正解が一人でない場合に)一人だと回答すると下がる
# F score: 2/((1/recall)+(1/precision))
print('t:', t)
print('y:', y.data.argmax(axis=1))
p, r, f, _ = F.classification_summary(y, t)
precision = p.data.tolist()
recall = r.data.tolist()
F_score = f.data.tolist()
print('num|precision|recall|F')
[print('{0:3}| {1:4.3f}| {2:4.3f}| {3:4.3f}'.format(i, elem[0], elem[1], elem[2]))
for i, elem in enumerate(zip(precision, recall, F_score))]
if __name__ == '__main__':
main(command())
| [
"traceback.print_exc",
"chainer.functions.classification_summary",
"argparse.ArgumentParser",
"logging.basicConfig",
"chainer.serializers.load_npz",
"cv2.cvtColor",
"chainer.cuda.get_device_from_id",
"Tools.func.argsPrint",
"Tools.func.checkModelType",
"time.time",
"Tools.getfunc.jsonData",
"n... | [((150, 191), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""'}), "(format='%(message)s')\n", (169, 191), False, 'import logging\n'), ((657, 698), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'help'}), '(description=help)\n', (680, 698), False, 'import argparse\n'), ((2070, 2089), 'Tools.func.argsPrint', 'FNC.argsPrint', (['args'], {}), '(args)\n', (2083, 2089), True, 'import Tools.func as FNC\n'), ((2334, 2389), 'Tools.getfunc.jsonData', 'GET.jsonData', (['args.param', "['n_out', 'n_unit', 'actfun']"], {}), "(args.param, ['n_out', 'n_unit', 'actfun'])\n", (2346, 2389), True, 'import Tools.getfunc as GET\n'), ((2584, 2614), 'Tools.func.checkModelType', 'FNC.checkModelType', (['args.model'], {}), '(args.model)\n', (2602, 2614), True, 'import Tools.func as FNC\n'), ((3902, 3932), 'chainer.functions.classification_summary', 'F.classification_summary', (['y', 't'], {}), '(y, t)\n', (3926, 3932), True, 'import chainer.functions as F\n'), ((213, 239), 'logging.getLogger', 'logging.getLogger', (['"""Tools"""'], {}), "('Tools')\n", (230, 239), False, 'import logging\n'), ((2632, 2695), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.model', 'model'], {'path': 'load_path'}), '(args.model, model, path=load_path)\n', (2660, 2695), False, 'import chainer\n'), ((3301, 3312), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3309, 3312), True, 'import numpy as np\n'), ((3408, 3444), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (3428, 3444), False, 'import chainer\n'), ((3459, 3470), 'time.time', 'time.time', ([], {}), '()\n', (3468, 3470), False, 'import time\n'), ((2164, 2200), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2176, 2200), False, 'import cv2\n'), ((2474, 2492), 'Tools.getfunc.actfun', 'GET.actfun', (['actfun'], {}), '(actfun)\n', (2484, 2492), True, 'import Tools.getfunc as GET\n'), ((2741, 2762), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2760, 2762), False, 'import traceback\n'), ((3092, 3220), 'create_dataset.create', 'create', (['args.other_path', 'args.human_path', 'args.background_path', 'args.obj_size', 'args.img_size', 'args.obj_num', 'i', 'args.img_num'], {}), '(args.other_path, args.human_path, args.background_path, args.\n obj_size, args.img_size, args.obj_num, i, args.img_num)\n', (3098, 3220), False, 'from create_dataset import create\n'), ((2499, 2521), 'chainer.links.ResNet50Layers', 'L.ResNet50Layers', (['None'], {}), '(None)\n', (2515, 2521), True, 'import chainer.links as L\n'), ((2777, 2795), 'Tools.func.fileFuncLine', 'FNC.fileFuncLine', ([], {}), '()\n', (2793, 2795), True, 'import Tools.func as FNC\n'), ((2856, 2897), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['args.gpu'], {}), '(args.gpu)\n', (2887, 2897), False, 'import chainer\n'), ((3547, 3558), 'time.time', 'time.time', ([], {}), '()\n', (3556, 3558), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
"""
import numpy as np
class FibonacciCalculator:
"""
The class calculates Fibonacci series using varies dynamic programming
algorithms.
"""
def __init__(self):
self.number_of_sum = 0
self.number_of_mul = 0
self.number_of_dict_query = 0
self.memory_size = 0
def calculate_fibonacci_exp(self, n) -> int:
"""
Solve fibonacci sequence as an EXP problem.
"""
if n>= 1 and n<=2:
return 1
elif n > 2:
return self.calculate_fibonacci_exp(n-1) \
+ self.calculate_fibonacci_exp(n-2)
def calculate_fibonacci_p(self, n) -> int:
"""
Solve fibonacci sequence as a P problem.
"""
self.fibonacci_p_dict = {}
if n>= 1:
self._calculate_fibonacci_p_adddict(n)
return self.fibonacci_p_dict[n]
def _calculate_fibonacci_p_adddict(self, n):
"""
Append dictionary when solving the fibonacci problem as a P problem.
"""
if n in self.fibonacci_p_dict.keys():
pass
elif n>=1 and n<=2:
self.fibonacci_p_dict[n] = 1
else:
self._calculate_fibonacci_p_adddict(n-1)
self._calculate_fibonacci_p_adddict(n-2)
self.fibonacci_p_dict[n] = self.fibonacci_p_dict[n-1] \
+ self.fibonacci_p_dict[n-2]
def calculate_fibonacci_log(self, n) -> int:
"""
Solve the fibonacci problem as a LOG-P problem.
"""
self.fibonacci_log_dict = {}
if n>= 1:
self._calculate_fibonacci_log_adddict(n-1)
return int(np.dot(self.fibonacci_log_dict[n-1],
np.array([[0.],[1.]]))[1][0])
def _calculate_fibonacci_log_adddict(self, n):
"""
Append dictionary when solving the fibonacci problem as a
LOG-P problem.
"""
if n in self.fibonacci_log_dict.keys():
pass
elif n==1:
self.fibonacci_log_dict[n] = np.array([[0,1], [1,1]])
else:
if (n % 2) == 0:
self._calculate_fibonacci_log_adddict(n/2)
self.fibonacci_log_dict[n] = \
np.dot(self.fibonacci_log_dict[n/2],
self.fibonacci_log_dict[n/2])
else:
self._calculate_fibonacci_log_adddict(n-1)
self.fibonacci_log_dict[n] = \
np.dot(self.fibonacci_log_dict[n-1],
np.array([[0,1],[1,1]]))
| [
"numpy.dot",
"numpy.array"
] | [((2090, 2116), 'numpy.array', 'np.array', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (2098, 2116), True, 'import numpy as np\n'), ((2284, 2354), 'numpy.dot', 'np.dot', (['self.fibonacci_log_dict[n / 2]', 'self.fibonacci_log_dict[n / 2]'], {}), '(self.fibonacci_log_dict[n / 2], self.fibonacci_log_dict[n / 2])\n', (2290, 2354), True, 'import numpy as np\n'), ((2586, 2612), 'numpy.array', 'np.array', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (2594, 2612), True, 'import numpy as np\n'), ((1770, 1794), 'numpy.array', 'np.array', (['[[0.0], [1.0]]'], {}), '([[0.0], [1.0]])\n', (1778, 1794), True, 'import numpy as np\n')] |
import itertools
import torch
from scipy.stats import describe
import numpy as np
from src import CONFIG
from src.train import MetaOptimizer
WEIGHT_DECAY = 1e-3
def test():
update_params = False
meta_learner = MetaOptimizer()
model = CONFIG.model_class()
state = None
results = []
for i in itertools.count():
results.append(model.evaluate(64).item())
if i == 4000:
break
grads, deltas_opt, model_losses = model.step(update_params=update_params)
deltas_pred, state = meta_learner(grads, state)
params = torch.cat([p.reshape(-1) for p in model.params])
l = (deltas_opt - deltas_pred).norm()
if torch.isnan(l).any():
params_stats = describe(params.abs().data.numpy(), axis=None)
print(i, params_stats)
import ipdb; ipdb.set_trace()
if i % 100 == 0:
perc_diff = (deltas_opt - deltas_pred) / (deltas_opt + 1e-8)
stats = describe(perc_diff.abs().data.numpy(), axis=None)
params_stats = describe(params.abs().data.numpy(), axis=None)
print(i,
l.item(),
model_losses[0].item(),
(stats.minmax, stats.mean, stats.variance),
(params_stats.minmax, params_stats.mean, params_stats.variance),
)
if update_params:
continue
j = 0
for param in model.params:
size = np.prod(param.shape)
# delta = -0.01 * grads[j: j + size].reshape(param.shape)
delta = deltas_pred[j: j + size].reshape(param.shape)
delta -= WEIGHT_DECAY * params[j: j + size].reshape(param.shape)
param.data.add_(delta)
j += size
return results
def graph(results, title=''):
import seaborn as sns
from matplotlib import pyplot as plt
from scipy.signal import savgol_filter
sns.tsplot(savgol_filter(results, 31, 2), color=sns.xkcd_rgb['pale red'])
plt.scatter(np.arange(len(results)), results, s=2)
plt.xlabel('Iteration')
plt.ylabel('Model Loss')
if title:
plt.title(title)
plt.show()
if __name__ == '__main__':
from main import proc_flags
CONFIG.test = '/Users/alex/ml/lstm_learn_optimizer/saved/multivargauss_binary_adam_sgd_1/config.txt'
proc_flags()
CONFIG.num_steps_model = 1
results = test()
graph(results, 'multivariate gaussian binary classifier')
| [
"matplotlib.pyplot.title",
"scipy.signal.savgol_filter",
"main.proc_flags",
"matplotlib.pyplot.show",
"src.CONFIG.model_class",
"ipdb.set_trace",
"itertools.count",
"src.train.MetaOptimizer",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"torch.isnan",
"numpy.prod"
] | [((226, 241), 'src.train.MetaOptimizer', 'MetaOptimizer', ([], {}), '()\n', (239, 241), False, 'from src.train import MetaOptimizer\n'), ((255, 275), 'src.CONFIG.model_class', 'CONFIG.model_class', ([], {}), '()\n', (273, 275), False, 'from src import CONFIG\n'), ((326, 343), 'itertools.count', 'itertools.count', ([], {}), '()\n', (341, 343), False, 'import itertools\n'), ((2086, 2109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2096, 2109), True, 'from matplotlib import pyplot as plt\n'), ((2114, 2138), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Model Loss"""'], {}), "('Model Loss')\n", (2124, 2138), True, 'from matplotlib import pyplot as plt\n'), ((2184, 2194), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2192, 2194), True, 'from matplotlib import pyplot as plt\n'), ((2366, 2378), 'main.proc_flags', 'proc_flags', ([], {}), '()\n', (2376, 2378), False, 'from main import proc_flags\n'), ((1963, 1992), 'scipy.signal.savgol_filter', 'savgol_filter', (['results', '(31)', '(2)'], {}), '(results, 31, 2)\n', (1976, 1992), False, 'from scipy.signal import savgol_filter\n'), ((2162, 2178), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2171, 2178), True, 'from matplotlib import pyplot as plt\n'), ((859, 875), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (873, 875), False, 'import ipdb\n'), ((1493, 1513), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (1500, 1513), True, 'import numpy as np\n'), ((703, 717), 'torch.isnan', 'torch.isnan', (['l'], {}), '(l)\n', (714, 717), False, 'import torch\n')] |
import os
import unittest
import numpy as np
import shutil
from pylipid.plot import plot_koff
from pylipid.func import cal_koff
from pylipid.util import check_dir
class TestPlot(unittest.TestCase):
def setUp(self):
file_dir = os.path.dirname(os.path.abspath(__file__))
self.save_dir = os.path.join(file_dir, "test_plot")
check_dir(self.save_dir)
def test_koff(self):
t_total = 150
timestep = 1
durations = np.random.normal(loc=50, scale=15, size=400)
koff, restime, properties = cal_koff(durations, t_total, timestep, nbootstrap=10,
initial_guess=[1., 1., 1., 1.], cap=True)
plot_koff(durations, properties["delta_t_list"], properties["survival_rates"],
properties["n_fitted"], survival_rates_bootstraps=properties["survival_rates_boot_set"],
fig_fn=os.path.join(self.save_dir, "test_koff_plot.pdf"), title="test koff",
timeunit="ns", t_total=t_total, text=None)
# set the text printed on the right
tu = "ns"
text = "{:18s} = {:.3f} {:2s}$^{{-1}} $\n".format("$k_{{off1}}$", properties["ks"][0], tu)
text += "{:18s} = {:.3f} {:2s}$^{{-1}} $\n".format("$k_{{off2}}$", properties["ks"][1], tu)
text += "{:14s} = {:.4f}\n".format("$R^2$", properties["r_squared"])
ks_boot_avg = np.mean(properties["ks_boot_set"], axis=0)
cv_avg = 100 * np.std(properties["ks_boot_set"], axis=0) / np.mean(properties["ks_boot_set"], axis=0)
text += "{:18s} = {:.3f} {:2s}$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$",
ks_boot_avg[0], tu, cv_avg[0])
text += "{:18s} = {:.3f} {:2s}$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$",
ks_boot_avg[1], tu, cv_avg[1])
text += "{:18s} = {:.3f} {:2s}".format("$Res. Time$", properties["res_time"], tu)
plot_koff(durations, properties["delta_t_list"], properties["survival_rates"],
properties["n_fitted"], survival_rates_bootstraps=properties["survival_rates_boot_set"],
fig_fn=os.path.join(self.save_dir, "test_koff_plot_withText.pdf"), title="test koff",
timeunit="ns", t_total=t_total, text=text)
def tearDown(self):
shutil.rmtree(self.save_dir)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"os.path.abspath",
"numpy.std",
"pylipid.util.check_dir",
"pylipid.func.cal_koff",
"numpy.mean",
"numpy.random.normal",
"shutil.rmtree",
"os.path.join"
] | [((2479, 2494), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2492, 2494), False, 'import unittest\n'), ((307, 342), 'os.path.join', 'os.path.join', (['file_dir', '"""test_plot"""'], {}), "(file_dir, 'test_plot')\n", (319, 342), False, 'import os\n'), ((351, 375), 'pylipid.util.check_dir', 'check_dir', (['self.save_dir'], {}), '(self.save_dir)\n', (360, 375), False, 'from pylipid.util import check_dir\n'), ((465, 509), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(50)', 'scale': '(15)', 'size': '(400)'}), '(loc=50, scale=15, size=400)\n', (481, 509), True, 'import numpy as np\n'), ((546, 650), 'pylipid.func.cal_koff', 'cal_koff', (['durations', 't_total', 'timestep'], {'nbootstrap': '(10)', 'initial_guess': '[1.0, 1.0, 1.0, 1.0]', 'cap': '(True)'}), '(durations, t_total, timestep, nbootstrap=10, initial_guess=[1.0, \n 1.0, 1.0, 1.0], cap=True)\n', (554, 650), False, 'from pylipid.func import cal_koff\n'), ((1399, 1441), 'numpy.mean', 'np.mean', (["properties['ks_boot_set']"], {'axis': '(0)'}), "(properties['ks_boot_set'], axis=0)\n", (1406, 1441), True, 'import numpy as np\n'), ((2418, 2446), 'shutil.rmtree', 'shutil.rmtree', (['self.save_dir'], {}), '(self.save_dir)\n', (2431, 2446), False, 'import shutil\n'), ((256, 281), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (271, 281), False, 'import os\n'), ((1509, 1551), 'numpy.mean', 'np.mean', (["properties['ks_boot_set']"], {'axis': '(0)'}), "(properties['ks_boot_set'], axis=0)\n", (1516, 1551), True, 'import numpy as np\n'), ((907, 956), 'os.path.join', 'os.path.join', (['self.save_dir', '"""test_koff_plot.pdf"""'], {}), "(self.save_dir, 'test_koff_plot.pdf')\n", (919, 956), False, 'import os\n'), ((1465, 1506), 'numpy.std', 'np.std', (["properties['ks_boot_set']"], {'axis': '(0)'}), "(properties['ks_boot_set'], axis=0)\n", (1471, 1506), True, 'import numpy as np\n'), ((2244, 2302), 'os.path.join', 'os.path.join', (['self.save_dir', '"""test_koff_plot_withText.pdf"""'], {}), "(self.save_dir, 'test_koff_plot_withText.pdf')\n", (2256, 2302), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# import six
import numpy as np
import pytest
import sksurgerycore.algorithms.pivot as p
from glob import glob
def test_empty_matrices():
with pytest.raises(TypeError):
p.pivot_calibration(None)
def test_rank_lt_six():
with pytest.raises(ValueError):
file_names = glob('tests/data/PivotCalibration/1378476416922755200.txt')
arrays = [np.loadtxt(f) for f in file_names]
matrices = np.concatenate(arrays)
number_of_matrices = int(matrices.size/16)
matrices = matrices.reshape(number_of_matrices, 4, 4)
p.pivot_calibration(matrices)
def test_four_columns_matrices4x4():
with pytest.raises(ValueError):
p.pivot_calibration(np.arange(2, 11, dtype=float).reshape(3, 3))
def test_four_rows_matrices4x4():
with pytest.raises(ValueError):
p.pivot_calibration(np.arange(2, 11, dtype=float).reshape(3, 3))
def test_return_value():
file_names = glob('tests/data/PivotCalibration/*')
arrays = [np.loadtxt(f) for f in file_names]
matrices = np.concatenate(arrays)
number_of_matrices = int(matrices.size/16)
matrices = matrices.reshape(number_of_matrices, 4, 4)
x_values, residual_error =p.pivot_calibration(matrices)
assert 1.838 == round(residual_error, 3)
assert -14.476 == round(x_values[0, 0], 3)
assert 395.143 == round(x_values[1, 0], 3)
assert -7.558 == round(x_values[2, 0], 3)
assert -805.285 == round(x_values[3, 0], 3)
assert -85.448 == round(x_values[4, 0], 3)
assert -2112.066 == round(x_values[5, 0], 3)
def test_rank_if_condition():
# This test will be checking a specific if condition.
# But at the moment I dont know what data I need
# To get proper s_values to cover that if condition.
with pytest.raises(ValueError):
file_names = glob('tests/data/test_case_data.txt')
arrays = [np.loadtxt(f) for f in file_names]
matrices = np.concatenate(arrays)
number_of_matrices = int(matrices.size/16)
matrices = matrices.reshape(number_of_matrices, 4, 4)
p.pivot_calibration(matrices)
def test_pivot_with_ransac():
file_names = glob('tests/data/PivotCalibration/*')
arrays = [np.loadtxt(f) for f in file_names]
matrices = np.concatenate(arrays)
number_of_matrices = int(matrices.size/16)
matrices = matrices.reshape(number_of_matrices, 4, 4)
model_1, residual_1 = p.pivot_calibration(matrices)
print("Without RANSAC:" + str(model_1) + ", RMS=" + str(residual_1))
model_2, residual_2 = p.pivot_calibration_with_ransac(matrices, 10, 4, 0.25)
print("With RANSAC:" + str(model_2) + ", RMS=" + str(residual_2))
assert residual_2 < residual_1
model_3, residual_3 = p.pivot_calibration_with_ransac(matrices, 10, 4, 0.25, early_exit=True)
print("With Early Exit RANSAC:" + str(model_3) + ", RMS=" + str(residual_3))
| [
"sksurgerycore.algorithms.pivot.pivot_calibration",
"pytest.raises",
"numpy.arange",
"numpy.loadtxt",
"glob.glob",
"numpy.concatenate",
"sksurgerycore.algorithms.pivot.pivot_calibration_with_ransac"
] | [((966, 1003), 'glob.glob', 'glob', (['"""tests/data/PivotCalibration/*"""'], {}), "('tests/data/PivotCalibration/*')\n", (970, 1003), False, 'from glob import glob\n'), ((1068, 1090), 'numpy.concatenate', 'np.concatenate', (['arrays'], {}), '(arrays)\n', (1082, 1090), True, 'import numpy as np\n'), ((1226, 1255), 'sksurgerycore.algorithms.pivot.pivot_calibration', 'p.pivot_calibration', (['matrices'], {}), '(matrices)\n', (1245, 1255), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((2177, 2214), 'glob.glob', 'glob', (['"""tests/data/PivotCalibration/*"""'], {}), "('tests/data/PivotCalibration/*')\n", (2181, 2214), False, 'from glob import glob\n'), ((2279, 2301), 'numpy.concatenate', 'np.concatenate', (['arrays'], {}), '(arrays)\n', (2293, 2301), True, 'import numpy as np\n'), ((2433, 2462), 'sksurgerycore.algorithms.pivot.pivot_calibration', 'p.pivot_calibration', (['matrices'], {}), '(matrices)\n', (2452, 2462), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((2562, 2616), 'sksurgerycore.algorithms.pivot.pivot_calibration_with_ransac', 'p.pivot_calibration_with_ransac', (['matrices', '(10)', '(4)', '(0.25)'], {}), '(matrices, 10, 4, 0.25)\n', (2593, 2616), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((2748, 2819), 'sksurgerycore.algorithms.pivot.pivot_calibration_with_ransac', 'p.pivot_calibration_with_ransac', (['matrices', '(10)', '(4)', '(0.25)'], {'early_exit': '(True)'}), '(matrices, 10, 4, 0.25, early_exit=True)\n', (2779, 2819), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((176, 200), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (189, 200), False, 'import pytest\n'), ((210, 235), 'sksurgerycore.algorithms.pivot.pivot_calibration', 'p.pivot_calibration', (['None'], {}), '(None)\n', (229, 235), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((272, 297), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (285, 297), False, 'import pytest\n'), ((320, 379), 'glob.glob', 'glob', (['"""tests/data/PivotCalibration/1378476416922755200.txt"""'], {}), "('tests/data/PivotCalibration/1378476416922755200.txt')\n", (324, 379), False, 'from glob import glob\n'), ((452, 474), 'numpy.concatenate', 'np.concatenate', (['arrays'], {}), '(arrays)\n', (466, 474), True, 'import numpy as np\n'), ((596, 625), 'sksurgerycore.algorithms.pivot.pivot_calibration', 'p.pivot_calibration', (['matrices'], {}), '(matrices)\n', (615, 625), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((675, 700), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (688, 700), False, 'import pytest\n'), ((821, 846), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (834, 846), False, 'import pytest\n'), ((1018, 1031), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (1028, 1031), True, 'import numpy as np\n'), ((1795, 1820), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1808, 1820), False, 'import pytest\n'), ((1843, 1880), 'glob.glob', 'glob', (['"""tests/data/test_case_data.txt"""'], {}), "('tests/data/test_case_data.txt')\n", (1847, 1880), False, 'from glob import glob\n'), ((1953, 1975), 'numpy.concatenate', 'np.concatenate', (['arrays'], {}), '(arrays)\n', (1967, 1975), True, 'import numpy as np\n'), ((2097, 2126), 'sksurgerycore.algorithms.pivot.pivot_calibration', 'p.pivot_calibration', (['matrices'], {}), '(matrices)\n', (2116, 2126), True, 'import sksurgerycore.algorithms.pivot as p\n'), ((2229, 2242), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (2239, 2242), True, 'import numpy as np\n'), ((398, 411), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (408, 411), True, 'import numpy as np\n'), ((1899, 1912), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (1909, 1912), True, 'import numpy as np\n'), ((730, 759), 'numpy.arange', 'np.arange', (['(2)', '(11)'], {'dtype': 'float'}), '(2, 11, dtype=float)\n', (739, 759), True, 'import numpy as np\n'), ((876, 905), 'numpy.arange', 'np.arange', (['(2)', '(11)'], {'dtype': 'float'}), '(2, 11, dtype=float)\n', (885, 905), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import astropy.units as u
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest, assert_quantity_allclose
from ...datasets import gammapy_extra
from ...utils.testing import requires_dependency, requires_data
from ...spectrum import SpectrumObservation, models
@requires_data('gammapy-extra')
@requires_dependency('matplotlib')
@requires_dependency('scipy')
def test_spectrum_observation():
phafile = gammapy_extra.filename("datasets/hess-crab4_pha/pha_obs23523.fits")
obs = SpectrumObservation.read(phafile)
obs.peek()
@requires_dependency('scipy')
@requires_data('gammapy-extra')
def test_observation_stacking():
obs1 = SpectrumObservation.read(
'$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23523.fits')
obs2 = SpectrumObservation.read(
'$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23592.fits')
# Change threshold to make stuff more interesing
obs1.on_vector.lo_threshold = 1.2 * u.TeV
stacked_obs = SpectrumObservation.stack([obs1, obs2])
# Veryfing npred is preserved during the stacking
pwl = models.PowerLaw(index=2 * u.Unit(''),
amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),
reference=1 * u.TeV)
npred1 = obs1.predicted_counts(model=pwl)
npred2 = obs2.predicted_counts(model=pwl)
npred_stacked = stacked_obs.predicted_counts(model=pwl)
# Set npred outside safe range to 0
npred1.data[np.nonzero(obs1.on_vector.quality)] = 0
npred2.data[np.nonzero(obs2.on_vector.quality)] = 0
npred_summed = npred1 + npred2
assert_allclose(npred_stacked.data, npred_summed.data)
| [
"numpy.nonzero",
"numpy.testing.assert_allclose",
"astropy.units.Unit"
] | [((1759, 1813), 'numpy.testing.assert_allclose', 'assert_allclose', (['npred_stacked.data', 'npred_summed.data'], {}), '(npred_stacked.data, npred_summed.data)\n', (1774, 1813), False, 'from numpy.testing import assert_allclose\n'), ((1622, 1656), 'numpy.nonzero', 'np.nonzero', (['obs1.on_vector.quality'], {}), '(obs1.on_vector.quality)\n', (1632, 1656), True, 'import numpy as np\n'), ((1678, 1712), 'numpy.nonzero', 'np.nonzero', (['obs2.on_vector.quality'], {}), '(obs2.on_vector.quality)\n', (1688, 1712), True, 'import numpy as np\n'), ((1283, 1293), 'astropy.units.Unit', 'u.Unit', (['""""""'], {}), "('')\n", (1289, 1293), True, 'import astropy.units as u\n'), ((1339, 1363), 'astropy.units.Unit', 'u.Unit', (['"""cm-2 s-1 TeV-1"""'], {}), "('cm-2 s-1 TeV-1')\n", (1345, 1363), True, 'import astropy.units as u\n')] |
import math
import numpy as np
from parameter import *
if using_salome:
from parameter_salome import *
else:
from parameter_gmsh import *
if workpiece_type_id == 1:
disc_H = 0.01; #same with cutter now
length_scale = disc_R;
#if is_straight_chip:
# mesh_file = meshfolder + "/metal_cut_straight_chip.h5"
else:
#rect work coord, already provided in salome
length_scale = disc_R
if is_straight_chip:
if using_salome:
chip_Y = chip_top_y # chip_length * math.cos(cutter_angle_v*math.pi/180);
else:
chip_Y = length_scale;
# still needed velocity expression
chip_speed = cutting_speed * (feed_thickness/chip_thickness)
frequency = cutting_speed / disc_R / 2 / math.pi
direction = -1
omega = cutting_speed / (disc_R+0.5*feed_thickness)
chip_omega = cutting_speed * (feed_thickness/chip_thickness) / (chip_radius ) # correct
chip_sliding_vel_x = chip_speed*math.sin(cutter_angle_v*math.pi/180)
chip_sliding_vel_y = chip_speed*math.cos(cutter_angle_v*math.pi/180)
#print("chip_speed == chip_omega*(chip_radius+0.5*chip_thickness)?", chip_speed, chip_omega*(chip_radius+0.5*chip_thickness))
#chip_shear_angle = math.atan(-chip_start_y/chip_start_x); # phi, recently renamed in gmsh
chip_friction_end_x = chip_friction_distance*math.sin(cutter_angle_v*math.pi/180);
chip_friction_end_y = chip_friction_distance*math.cos(cutter_angle_v*math.pi/180);
#chip_sliding_end_x = chip_sliding_distance*math.sin(cutter_angle_v*math.pi/180);
#chip_sliding_end_y = chip_sliding_distance*math.cos(cutter_angle_v*math.pi/180);
chip_center_x = chip_friction_end_x - (chip_radius+chip_thickness)*math.cos(cutter_angle_v*math.pi/180);
chip_center_y = chip_friction_end_y + (chip_radius+chip_thickness)*math.sin(cutter_angle_v*math.pi/180);
# need only by python code
p_chip_end_center_x = chip_center_x + (chip_radius + 0.5*chip_thickness) * math.cos(chip_end_angle*math.pi/180);
p_chip_end_center_y = chip_center_y + (chip_radius + 0.5*chip_thickness) * math.sin(chip_end_angle*math.pi/180);
p_chip_end_center_z = 0.5*cutter_thickness;
p_chip_end_xyz = p_chip_end_center_x, p_chip_end_center_y, p_chip_end_center_z
##################################################
if is_straight_chip:
p_chip_end_o_x = chip_Y * math.tan(cutter_angle_v*math.pi/180);
p_chip_end_o_y = chip_Y;
p_chip_end_i_x = chip_Y * math.tan(cutter_angle_v*math.pi/180) - chip_thickness/math.cos(cutter_angle_v*math.pi/180);
p_chip_end_i_y = chip_Y;
def check_distance():
#line p,q
p = np.array([chip_friction_end_x, chip_friction_end_y, 0])
q = np.array([p_chip_end_o_x, p_chip_end_o_y, 0])
r1 = np.array([p_chip_end_i_x, p_chip_end_i_y, 0])
r2 = np.array([chip_start_x, chip_start_y, 0])
def t(p, q, r):
x = p-q
return np.dot(r-q, x)/np.dot(x, x)
def d(p, q, r):
return np.linalg.norm(t(p, q, r)*(p-q)+q-r)
print('check distannce must match, ', d(p, q, r1), d(p, q, r2))
# Prints 1.0
check_distance()
if using_3D:
dim = 3
else:
dim = 2
###########################################
# Code for C++ evaluation of velocity
velocity_code = '''
class Velocity : public Expression
{
public:
// Create expression with any components
Velocity() : Expression(%d) {}
// Function for evaluating expression on each cell
void eval(Array<double>& values, const Array<double>& x, const ufc::cell& cell) const
{
const double x0 = %f;
const double y0 = %f;
const double feed_thickness = %f;
const double omega = %f;
const uint cell_index = cell.index;
const size_t did = (*subdomain_id)[cell_index];
const double cx0 = %f;
const double cy0 = %f;
const double comega = %f;
const double chip_sliding_end_x = %f;
const double chip_sliding_end_y = %f;
const double chip_sliding_vel_x = %f;
const double chip_sliding_vel_y = %f;
const double cutter_angle_v = %f;
const double work_vel_x = y0*omega;
const double turning_center_x = %f;
const double turning_center_y = %f;
const double fillet_r = %f;
const double a_phi = %f;
const double a_start = -pi/2;
double a_delta = pi/2.0 - cutter_angle_v*pi/180;
const int workpiece_type_id = %d;
const int is_straight_chip = %d;
const int using_fillet_shear_zone = %d;
const int using_double_shear_heat_layer = %d;
const int work_subdomain_id = %d;
const int chip_subdomain_id = %d;
const int shear_subdomain_id = %d;
const int friction_subdomain_id = %d;
values[0] = 0.0;
values[1] = 0.0;
//values[2] = 0.0;
if(did == work_subdomain_id) { // workpiece, left and right has diff radius an center
if(workpiece_type_id == 1) { // is disc
double r = sqrt((x[0]-x0)*(x[0]-x0) + (x[1]-y0)*(x[1]-y0));
double v = omega * r;
double a = atan2((x[1]-y0), (x[0]-x0));
if (x[0]<0) {
double y0_1 = y0 + feed_thickness/2.0;
r = sqrt((x[0]-x0)*(x[0]-x0) + (x[1]-y0_1)*(x[1]-y0_1));
v = omega * r + feed_thickness/2.0;
a = atan2((x[1]-y0_1), (x[0]-x0));
}
values[0] = -v * sin(a);
values[1] = v * cos(a);
}
else { // workpiece rectangle
values[0] = work_vel_x; // only x-axis speed
values[1] = 0.0;
}
}
else if(did == chip_subdomain_id) { // chip, consisting of straight and arc sections
if (is_straight_chip == 0) {
double a = atan2((x[1]-cy0), (x[0]-cx0));
//if (x[0] < chip_sliding_end_x && x[1] > chip_sliding_end_y) {
if (a > (-cutter_angle_v*pi/180.0)) {
double r = sqrt((x[0]-cx0)*(x[0]-cx0) + (x[1]-cy0)*(x[1]-cy0));
double v = comega * r;
values[0] = v * sin(a);
values[1] = -v * cos(a);
}
}
else {
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
}
}
else if(did == shear_subdomain_id) {
if(using_fillet_shear_zone) {// shear zone has the fillet
//double a_intersection = a_t - (pi - a_phi);
double dist = sqrt((x[0]-turning_center_x)*(x[0]-turning_center_x) + (x[1]-turning_center_y)*(x[1]-turning_center_y));
double shift = dist - fillet_r;
double shifted_tcx = turning_center_x + shift * cos(a_phi);
double shifted_tcy = turning_center_y - shift * sin(a_phi);
double shifted_a = atan2((x[1]-shifted_tcy), (x[0]-shifted_tcx));
double v_chip = sqrt(chip_sliding_vel_x*chip_sliding_vel_x + chip_sliding_vel_y*chip_sliding_vel_y);
double v_feed = y0*omega;
double v = v_chip + (1.0 - (shifted_a - a_start)/a_delta) * (v_feed - v_chip);
values[0] = v * sin(-shifted_a);
values[1] = v * cos(-shifted_a);
}
else {
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
if(using_double_shear_heat_layer) { // double mapping bc
double a_t = atan2((x[1]), (x[0]));
if ((a_t - (pi - a_phi)) > 1e-3) {
values[0] = work_vel_x; // only x-axis speed
values[1] = 0;
}
}
}
}
else if(did == friction_subdomain_id) { // friction thin layer inside chip
values[0] = chip_sliding_vel_x;
values[1] = chip_sliding_vel_y;
}
else {
values[0] = 0.0;
values[1] = 0.0;
}
}
// The data stored in mesh functions
std::shared_ptr<MeshFunction<std::size_t> > subdomain_id;
};
'''%(dim, 0, -disc_R, feed_thickness, omega*direction, chip_center_x, chip_center_y, chip_omega*direction,
chip_friction_end_x, chip_friction_end_y, chip_sliding_vel_x, chip_sliding_vel_y, cutter_angle_v,
turning_center_x, turning_center_y, fillet_r, shear_angle * math.pi/180,
workpiece_type_id, is_straight_chip, int(using_fillet_shear_zone), int(using_double_shear_heat_layer),
work_subdomain_id, chip_subdomain_id, shear_subdomain_id, friction_subdomain_id
)
#print(velocity_code) | [
"math.tan",
"math.sin",
"numpy.array",
"math.cos",
"numpy.dot"
] | [((921, 961), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (929, 961), False, 'import math\n'), ((990, 1030), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (998, 1030), False, 'import math\n'), ((1293, 1333), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1301, 1333), False, 'import math\n'), ((1376, 1416), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1384, 1416), False, 'import math\n'), ((1646, 1686), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1654, 1686), False, 'import math\n'), ((1751, 1791), 'math.sin', 'math.sin', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (1759, 1791), False, 'import math\n'), ((1892, 1932), 'math.cos', 'math.cos', (['(chip_end_angle * math.pi / 180)'], {}), '(chip_end_angle * math.pi / 180)\n', (1900, 1932), False, 'import math\n'), ((2005, 2045), 'math.sin', 'math.sin', (['(chip_end_angle * math.pi / 180)'], {}), '(chip_end_angle * math.pi / 180)\n', (2013, 2045), False, 'import math\n'), ((2269, 2309), 'math.tan', 'math.tan', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2277, 2309), False, 'import math\n'), ((2543, 2598), 'numpy.array', 'np.array', (['[chip_friction_end_x, chip_friction_end_y, 0]'], {}), '([chip_friction_end_x, chip_friction_end_y, 0])\n', (2551, 2598), True, 'import numpy as np\n'), ((2611, 2656), 'numpy.array', 'np.array', (['[p_chip_end_o_x, p_chip_end_o_y, 0]'], {}), '([p_chip_end_o_x, p_chip_end_o_y, 0])\n', (2619, 2656), True, 'import numpy as np\n'), ((2670, 2715), 'numpy.array', 'np.array', (['[p_chip_end_i_x, p_chip_end_i_y, 0]'], {}), '([p_chip_end_i_x, p_chip_end_i_y, 0])\n', (2678, 2715), True, 'import numpy as np\n'), ((2729, 2770), 'numpy.array', 'np.array', (['[chip_start_x, chip_start_y, 0]'], {}), '([chip_start_x, chip_start_y, 0])\n', (2737, 2770), True, 'import numpy as np\n'), ((2366, 2406), 'math.tan', 'math.tan', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2374, 2406), False, 'import math\n'), ((2420, 2460), 'math.cos', 'math.cos', (['(cutter_angle_v * math.pi / 180)'], {}), '(cutter_angle_v * math.pi / 180)\n', (2428, 2460), False, 'import math\n'), ((2835, 2851), 'numpy.dot', 'np.dot', (['(r - q)', 'x'], {}), '(r - q, x)\n', (2841, 2851), True, 'import numpy as np\n'), ((2850, 2862), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (2856, 2862), True, 'import numpy as np\n')] |
import torch
from acquisition.acquisition_functions import expected_improvement
from acquisition.acquisition_marginalization import acquisition_expectation
import numpy as np
import cma
import time
import scipy.optimize as spo
from functools import partial
def continuous_acquisition_expectation(x_continuous, discrete_part, inference_samples, partition_samples,
n_vertices, acquisition_func, reference, batch=False):
if batch:
eval_x = torch.from_numpy(np.concatenate((np.tile(discrete_part, (len(x_continuous), 1)), x_continuous), axis = 1)).float()
results = acquisition_expectation(eval_x,inference_samples, partition_samples, n_vertices,acquisition_func, reference)
return np.array(results)
else:
eval_x = torch.from_numpy(np.concatenate((discrete_part, x_continuous))).float()
print(acquisition_expectation(eval_x, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)[0].numpy())
return acquisition_expectation(eval_x, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)[0].numpy()
def cma_es_optimizer(objective, x_init, max_acquisition, inference_samples, partition_samples, n_vertices, acquisition_func=expected_improvement, reference=None):
cont_bounds = [objective.problem.lower_bounds[objective.num_discrete:], objective.problem.upper_bounds[objective.num_discrete:]]
start_time = time.time()
es = cma.CMAEvolutionStrategy(x0=x_init[objective.num_discrete:],sigma0=0.1,inopts={'bounds': cont_bounds, "popsize": 50},)
iter = 1
total_time_in_acq = 0
while not es.stop():
iter += 1
xs = es.ask()
X = torch.tensor(xs).float()
# evaluate the acquisition function (optimizer assumes we're minimizing)
temp_time = time.time()
Y = -1 * continuous_acquisition_expectation(xs, x_init[:objective.num_discrete].numpy(),
inference_samples, partition_samples, n_vertices, acquisition_func, reference, batch=True)
total_time_in_acq += time.time() - temp_time
es.tell(xs, Y) # return the result to the optimizer
if (iter > 10):
break
best_x = torch.from_numpy(es.best.x).float()
if -1*es.best.f > max_acquisition:
return torch.cat((x_init[:objective.num_discrete], best_x), dim=0), -1*es.best.f
else:
return x_init, max_acquisition
| [
"numpy.concatenate",
"cma.CMAEvolutionStrategy",
"torch.cat",
"time.time",
"numpy.array",
"acquisition.acquisition_marginalization.acquisition_expectation",
"torch.tensor",
"torch.from_numpy"
] | [((1589, 1600), 'time.time', 'time.time', ([], {}), '()\n', (1598, 1600), False, 'import time\n'), ((1610, 1733), 'cma.CMAEvolutionStrategy', 'cma.CMAEvolutionStrategy', ([], {'x0': 'x_init[objective.num_discrete:]', 'sigma0': '(0.1)', 'inopts': "{'bounds': cont_bounds, 'popsize': 50}"}), "(x0=x_init[objective.num_discrete:], sigma0=0.1,\n inopts={'bounds': cont_bounds, 'popsize': 50})\n", (1634, 1733), False, 'import cma\n'), ((653, 767), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (676, 767), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n'), ((776, 793), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (784, 793), True, 'import numpy as np\n'), ((1973, 1984), 'time.time', 'time.time', ([], {}), '()\n', (1982, 1984), False, 'import time\n'), ((2216, 2227), 'time.time', 'time.time', ([], {}), '()\n', (2225, 2227), False, 'import time\n'), ((2356, 2383), 'torch.from_numpy', 'torch.from_numpy', (['es.best.x'], {}), '(es.best.x)\n', (2372, 2383), False, 'import torch\n'), ((2446, 2505), 'torch.cat', 'torch.cat', (['(x_init[:objective.num_discrete], best_x)'], {'dim': '(0)'}), '((x_init[:objective.num_discrete], best_x), dim=0)\n', (2455, 2505), False, 'import torch\n'), ((1847, 1863), 'torch.tensor', 'torch.tensor', (['xs'], {}), '(xs)\n', (1859, 1863), False, 'import torch\n'), ((838, 883), 'numpy.concatenate', 'np.concatenate', (['(discrete_part, x_continuous)'], {}), '((discrete_part, x_continuous))\n', (852, 883), True, 'import numpy as np\n'), ((1100, 1214), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (1123, 1214), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n'), ((907, 1021), 'acquisition.acquisition_marginalization.acquisition_expectation', 'acquisition_expectation', (['eval_x', 'inference_samples', 'partition_samples', 'n_vertices', 'acquisition_func', 'reference'], {}), '(eval_x, inference_samples, partition_samples,\n n_vertices, acquisition_func, reference)\n', (930, 1021), False, 'from acquisition.acquisition_marginalization import acquisition_expectation\n')] |
import os
import math
import numpy as np
from PIL import Image
import skimage.transform as trans
import cv2
import torch
from data import dataset_info
from data.base_dataset import BaseDataset
import util.util as util
dataset_info = dataset_info()
class AllFaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true',
help='If specified, skip sanity check of correct label-image file pairing')
return parser
def cv2_loader(self, img_str):
img_array = np.frombuffer(img_str, dtype=np.uint8)
return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
def fill_list(self, tmp_list):
length = len(tmp_list)
if length % self.opt.batchSize != 0:
end = math.ceil(length / self.opt.batchSize) * self.opt.batchSize
tmp_list = tmp_list + tmp_list[-1 * (end - length) :]
return tmp_list
def initialize(self, opt):
self.opt = opt
dataset_num = dataset_info.get_dataset(opt)
self.prefix = [dataset_info.prefix[num] for num in dataset_num]
file_list = [dataset_info.file_list[num] for num in dataset_num]
land_mark_list = [dataset_info.land_mark_list[num] for num in dataset_num]
self.params_dir = [dataset_info.params_dir[num] for num in dataset_num]
self.folder_level = [dataset_info.folder_level[num] for num in dataset_num]
self.num_datasets = len(file_list)
assert len(land_mark_list) == self.num_datasets, \
'num of landmk dir should be the num of datasets'
assert len(self.params_dir) == self.num_datasets, \
'num of params_dir should be the num of datasets'
self.dataset_lists = []
self.landmark_paths = []
self.sizes = []
for n in range(self.num_datasets):
with open(file_list[n]) as f:
img_lists = f.readlines()
img_lists = self.fill_list(img_lists)
self.sizes.append(len(img_lists))
self.dataset_lists.append(sorted(img_lists))
with open(land_mark_list[n]) as f:
landmarks = f.readlines()
landmarks = self.fill_list(landmarks)
self.landmark_paths.append(sorted(landmarks))
self.dataset_size = min(self.sizes)
self.initialized = False
def get_landmarks(self, landmark, img_list):
landmark_split = landmark.strip().split(' ')
filename1_without_ext = os.path.basename(img_list.strip())
filename2_without_ext = os.path.basename(landmark_split[0])
assert (filename1_without_ext == filename2_without_ext), \
"The image_path %s and params_path %s don't match." % \
(img_list, landmark_split[0])
label = landmark_split[1]
landmarks = landmark_split[2:]
landmarks = list(map(float, landmarks))
landmarks_array = np.array(landmarks).reshape(5, 2)
return landmarks_array, label
def get_param_file(self, img_list, dataset_num):
img_name = os.path.splitext(img_list)[0]
name_split = img_name.split("/")
folder_level = self.folder_level[dataset_num]
param_folder = os.path.join(self.params_dir[dataset_num],
"/".join([name_split[i] for i in range(len(name_split) - folder_level, len(name_split))]) + ".txt")
# params = np.loadtxt(param_folder)
return param_folder
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1)[-10:])[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2)[-10:])[0]
return filename1_without_ext == filename2_without_ext
def affine_align(self, img, landmark=None, **kwargs):
M = None
h, w, c = img.shape
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
src = src * 290 / 112
src[:, 0] += 50
src[:, 1] += 60
src = src / 400 * self.opt.crop_size
dst = landmark
# dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (self.opt.crop_size, self.opt.crop_size), borderValue=0.0)
return warped, M
def __getitem__(self, index):
# Label Image
randnum = np.random.randint(sum(self.sizes))
dataset_num = np.random.randint(self.num_datasets)
image_path = self.dataset_lists[dataset_num][index].strip()
image_path = os.path.join(self.prefix[dataset_num], image_path)
img = cv2.imread(image_path)
if img is None:
raise Exception('None Image')
param_path = self.get_param_file(image_path, dataset_num)
# img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
M = None
landmark_path = self.landmark_paths[dataset_num][index].strip()
landmarks, label = self.get_landmarks(landmark_path, image_path)
wrapped_img, M = self.affine_align(img, landmarks)
M = torch.from_numpy(M).float()
wrapped_img = wrapped_img.transpose(2, 0, 1) / 255.0
wrapped_img = torch.from_numpy(wrapped_img).float()
input_dict = {
'image': wrapped_img,
'param_path': param_path,
'M': M,
'path': image_path
}
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size
| [
"data.dataset_info",
"os.path.basename",
"cv2.cvtColor",
"numpy.frombuffer",
"math.ceil",
"cv2.imdecode",
"skimage.transform.SimilarityTransform",
"data.dataset_info.get_dataset",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.array",
"cv2.imread",
"os.path.splitext",
"os.path.join",
"... | [((234, 248), 'data.dataset_info', 'dataset_info', ([], {}), '()\n', (246, 248), False, 'from data import dataset_info\n'), ((610, 648), 'numpy.frombuffer', 'np.frombuffer', (['img_str'], {'dtype': 'np.uint8'}), '(img_str, dtype=np.uint8)\n', (623, 648), True, 'import numpy as np\n'), ((664, 705), 'cv2.imdecode', 'cv2.imdecode', (['img_array', 'cv2.IMREAD_COLOR'], {}), '(img_array, cv2.IMREAD_COLOR)\n', (676, 705), False, 'import cv2\n'), ((1063, 1092), 'data.dataset_info.get_dataset', 'dataset_info.get_dataset', (['opt'], {}), '(opt)\n', (1087, 1092), False, 'from data import dataset_info\n'), ((2629, 2664), 'os.path.basename', 'os.path.basename', (['landmark_split[0]'], {}), '(landmark_split[0])\n', (2645, 2664), False, 'import os\n'), ((3927, 4060), 'numpy.array', 'np.array', (['[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, \n 92.3655], [70.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [\n 41.5493, 92.3655], [70.7299, 92.2041]], dtype=np.float32)\n', (3935, 4060), True, 'import numpy as np\n'), ((4323, 4350), 'skimage.transform.SimilarityTransform', 'trans.SimilarityTransform', ([], {}), '()\n', (4348, 4350), True, 'import skimage.transform as trans\n'), ((4434, 4519), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(self.opt.crop_size, self.opt.crop_size)'], {'borderValue': '(0.0)'}), '(img, M, (self.opt.crop_size, self.opt.crop_size),\n borderValue=0.0)\n', (4448, 4519), False, 'import cv2\n'), ((4674, 4710), 'numpy.random.randint', 'np.random.randint', (['self.num_datasets'], {}), '(self.num_datasets)\n', (4691, 4710), True, 'import numpy as np\n'), ((4801, 4851), 'os.path.join', 'os.path.join', (['self.prefix[dataset_num]', 'image_path'], {}), '(self.prefix[dataset_num], image_path)\n', (4813, 4851), False, 'import os\n'), ((4867, 4889), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4877, 4889), False, 'import cv2\n'), ((5077, 5113), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (5089, 5113), False, 'import cv2\n'), ((3135, 3161), 'os.path.splitext', 'os.path.splitext', (['img_list'], {}), '(img_list)\n', (3151, 3161), False, 'import os\n'), ((836, 874), 'math.ceil', 'math.ceil', (['(length / self.opt.batchSize)'], {}), '(length / self.opt.batchSize)\n', (845, 874), False, 'import math\n'), ((2990, 3009), 'numpy.array', 'np.array', (['landmarks'], {}), '(landmarks)\n', (2998, 3009), True, 'import numpy as np\n'), ((5347, 5366), 'torch.from_numpy', 'torch.from_numpy', (['M'], {}), '(M)\n', (5363, 5366), False, 'import torch\n'), ((5460, 5489), 'torch.from_numpy', 'torch.from_numpy', (['wrapped_img'], {}), '(wrapped_img)\n', (5476, 5489), False, 'import torch\n'), ((3630, 3653), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (3646, 3653), False, 'import os\n'), ((3713, 3736), 'os.path.basename', 'os.path.basename', (['path2'], {}), '(path2)\n', (3729, 3736), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Read gslib file format
Created on Wen Sep 5th 2018
"""
from __future__ import absolute_import, division, print_function
__author__ = "yuhao"
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from mpl_toolkits.mplot3d import Axes3D
class SpatialData(object):
def __init__(self, file_path):
self.datafl = file_path
self.vr = None
self.property_name = None
self._2d = False
self._read_data()
def _read_data(self):
"""
read gslib file
"""
column_name = []
with open(self.datafl, 'r') as fin:
_ = fin.readline().strip()
ncols = int(fin.readline().strip())
for _ in range(ncols):
column_name.append(fin.readline().strip())
self.property_name = [item for item in column_name
if item not in ['x', 'y', 'z']]
df = pd.read_csv(self.datafl, sep='\t', header=None, names=column_name,
skiprows=ncols+2)
if 'z' not in column_name:
self._2d = True
column_name.append('z')
df['z'] = 0
self.df = df
data_dtype = np.dtype({
'names': column_name,
'formats': ['f8'] * len(column_name)})
self.vr = np.core.records.fromarrays(
df.values.transpose(), dtype=data_dtype)
def preview(self):
return self.vr.head(20)
def pdf(self, ax, bins=15):
hist, bin_edges = np.histogram(self.vr[self.property_name[0]],
bins=bins)
ax.set_title("pdf")
ax.bar(bin_edges[:-1], hist, width=bin_edges[1]-bin_edges[0],
color='red', alpha=0.5)
def cdf(self, ax):
data = self.vr[self.property_name[0]]
data = np.sort(data)
cdf = np.arange(1, len(data) + 1) / len(data)
ax.set_title("cdf")
ax.plot(data, cdf)
@property
def maximum(self):
return self.df[self.property_name[0]].max()
@property
def minimum(self):
return self.df[self.property_name[0]].min()
@property
def mean(self):
return self.df[self.property_name[0]].mean()
@property
def variance(self):
return self.df[self.property_name[0]].var()
@property
def meadian(self):
return np.median(self.vr[self.property_name[0]])
@property
def upper_quartile(self):
return self.df[self.property_name[0]].quantile(0.75)
@property
def lower_quartile(self):
return self.df[self.property_name[0]].quantile(0.25)
@property
def num(self):
return self.vr.shape[0]
def distance(self):
num = self.vr.shape[0]
return pdist(np.concatenate((self.vr['x'].reshape((num, 1)),
self.vr['y'].reshape((num, 1))), axis=1))
@property
def summary(self):
return (
"Summary\n"
"-------\n"
"Number of Points: {}\n"
"Mean: {}\n"
"Variance: {}\n"
"Minimum: {}\n"
"Lower Quartile: {}\n"
"Median: {}\n"
"Upper Quartile: {}\n"
"Maximum: {}\n").format(
self.num,
self.mean,
self.variance,
self.minimum,
self.lower_quartile,
self.meadian,
self.upper_quartile,
self.maximum)
def scatter(self, ax, prop=None):
"""
Plot scatter of data points on given axis
Parameters
----------
ax : AxesSubplot or Axes3DSubplot
axis on which the scatter plot is drawn
prop : str
property to display with colormap
"""
sc = None
prop = self.property_name[0] if prop is None else prop
if not self._2d and isinstance(ax, Axes3D):
sc = ax.scatter(
self.vr['x'], self.vr['y'], self.vr['z'],
c=prop)
else:
sc = ax.scatter(
self.vr['x'], self.vr['y'], c=prop)
return sc
| [
"pandas.read_csv",
"numpy.sort",
"numpy.histogram",
"numpy.median"
] | [((955, 1046), 'pandas.read_csv', 'pd.read_csv', (['self.datafl'], {'sep': '"""\t"""', 'header': 'None', 'names': 'column_name', 'skiprows': '(ncols + 2)'}), "(self.datafl, sep='\\t', header=None, names=column_name, skiprows\n =ncols + 2)\n", (966, 1046), True, 'import pandas as pd\n'), ((1542, 1597), 'numpy.histogram', 'np.histogram', (['self.vr[self.property_name[0]]'], {'bins': 'bins'}), '(self.vr[self.property_name[0]], bins=bins)\n', (1554, 1597), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1866, 1872), True, 'import numpy as np\n'), ((2394, 2435), 'numpy.median', 'np.median', (['self.vr[self.property_name[0]]'], {}), '(self.vr[self.property_name[0]])\n', (2403, 2435), True, 'import numpy as np\n')] |
# 数据处理
# pickle是一个将任意复杂的对象转成对象的文本或二进制表示的过程
# 也可以将这些字符串、文件或任何类似于文件的对象 unpickle 成原来的对象
import pickle
import os
import random
import numpy as np
# 标签字典
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-LOC": 3, "I-LOC": 4,
"B-ORG": 5, "I-ORG": 6
}
def read_corpus(corpus_path): # 输入train_data文件的路径,读取训练集的语料,输出train_data
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines() # 返回的是一个列表,一行数据一个元素
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char) # 字放进sent_
tag_.append(label) # tag放进tag_
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
# 由train_data来构造一个(统计非重复字)字典{'第一个字':[对应的id,该字出现的次数],'第二个字':[对应的id,该字出现的次数], , ,}
# 去除低频词,生成一个word_id的字典并保存在输入的vocab_path的路径下,
# 保存的方法是pickle模块自带的dump方法,保存后的文件格式是word2id.pkl文件
def vocab_build(vocab_path, corpus_path, min_count): # min_count设为3
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit(): # 字符是数字
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'): # 字符是字母
word = '<ENG>'
if word not in word2id: # 如果不在字典中,就加入到字典中
word2id[word] = [len(word2id)+1, 1]
else: # 在字典中就次数+1
word2id[word][1] += 1
low_freq_words = [] # 低频词
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>': # 统计低频词
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word] # 从字典中删除低频词
new_id = 1 # 重构字典
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw) # 序列化到名字为word2id.pkl文件中
def sentence2id(sent, word2id): # 输入一句话,生成一个 sentence_id
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id: # 在字典中找不到就用<UNK>表示
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path): # 通过pickle模块自带的load方法(反序列化方法)加载输出word2id.pkl文件
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab, embedding_dim): # 输入vocab,vocab就是前面得到的word2id,embedding_dim=300
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
# 返回一个len(vocab)*embedding_dim=3905*300的矩阵(每个字投射到300维)作为初始值
return embedding_mat
# padding,输入一句话,不够标准的样本用pad_mark来补齐
"""输入:seqs的形状为二维矩阵,形状为[[33,12,17,88,50]-第一句话
[52,19,14,48,66,31,89]-第二句话]
输出:seq_list为seqs经过padding后的序列
seq_len_list保留了padding之前每条样本的真实长度
seq_list和seq_len_list用来喂给feed_dict"""
def pad_sequences(sequences, pad_mark=0):
max_len = max(map(lambda x: len(x), sequences)) # 返回一个序列中长度最长的那条样本的长度
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
# 不够最大长度的样本用0补上放到列表seq_list
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
''' seqs的形状为二维矩阵,形状为[[33,12,17,88,50....]...第一句话
[52,19,14,48,66....]...第二句话
]
labels的形状为二维矩阵,形状为[[0, 0, 3, 4]....第一句话
[0, 0, 3, 4]...第二句话
]
'''
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False): # 生成batch
if shuffle: # 乱序数据
random.shuffle(data)
seqs, labels = [], []
for (sent_, tag_) in data:
sent_ = sentence2id(sent_, vocab) # 返回在字典中的编号
label_ = [tag2label[tag] for tag in tag_] # 返回tag的value值
if len(seqs) == batch_size:
yield seqs, labels # yield 是一个类似 return 的关键字,只是这个函数返回的是个生成器
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
| [
"pickle.dump",
"random.shuffle",
"numpy.float32",
"pickle.load",
"os.path.join"
] | [((2661, 2685), 'os.path.join', 'os.path.join', (['vocab_path'], {}), '(vocab_path)\n', (2673, 2685), False, 'import os\n'), ((3015, 3040), 'numpy.float32', 'np.float32', (['embedding_mat'], {}), '(embedding_mat)\n', (3025, 3040), True, 'import numpy as np\n'), ((2085, 2109), 'pickle.dump', 'pickle.dump', (['word2id', 'fw'], {}), '(word2id, fw)\n', (2096, 2109), False, 'import pickle\n'), ((2743, 2758), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (2754, 2758), False, 'import pickle\n'), ((4251, 4271), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (4265, 4271), False, 'import random\n')] |
import cv2
import numpy as np
import argparse
# we are not going to bother with objects less than 30% probability
THRESHOLD = 0.3
# the lower the value: the fewer bounding boxes will remain
SUPPRESSION_THRESHOLD = 0.3
YOLO_IMAGE_SIZE = 320
DATA_FOLDER = './data/'
CFG_FOLDER = './cfg/'
MODEL_FOLDER = './models/'
def find_objects(model_outputs):
"""
Extract the the values from prediction vectors resulted by the YOLOv3 algorithm
Returns:
box_indexes_to_keep: Idx of bounding boxes after applying "Non-max suppression"
bounding_box_locations: all vec (x, y, w, h) of each chosen bounding box
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
"""
bounding_box_locations = []
class_ids = []
confidence_values = []
# Iterate through each layers in YOLOv3 output (totally 3 layers)
for output in model_outputs:
# Iterate each bounding boxes in prediction output
for prediction in output:
class_probabilities = prediction[5:]
# "class_idx" index of object detection having the highest probability
class_idx = np.argmax(class_probabilities)
confidence = class_probabilities[class_idx]
# Only detect object having the confident larger than THRESHOLD
if confidence > THRESHOLD:
# B.c prediction[2] return between [0-1] --> Need to rescale it to match the position in 320*320 image
w, h = int(prediction[2] * YOLO_IMAGE_SIZE), int(prediction[3] * YOLO_IMAGE_SIZE)
# the center of the bounding box (we should transform these values)
x, y = int(prediction[0] * YOLO_IMAGE_SIZE - w / 2), int(prediction[1] * YOLO_IMAGE_SIZE - h / 2)
bounding_box_locations.append([x, y, w, h])
class_ids.append(class_idx)
confidence_values.append(float(confidence))
# Perform "Non-max suppression" for each prediction bounding boxes
box_indexes_to_keep = cv2.dnn.NMSBoxes(bounding_box_locations, confidence_values, THRESHOLD, SUPPRESSION_THRESHOLD)
return box_indexes_to_keep, bounding_box_locations, class_ids, confidence_values
def show_detected_images(img, bounding_box_ids, all_bounding_boxes, classes, class_ids,
confidence_values, width_ratio, height_ratio, colors):
"""
Drawing the bounding boxes on the original images
Args:
img: Original image
bounding_box_ids: Idx of predicted bounding boxes after applying "Non-max suppression"
all_bounding_boxes: all vec (x, y, w, h) of each chosen bounding box
classes: list of all classes in COCO dataset
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
width_ratio: = original_width / YOLO_IMAGE_SIZE
height_ratio: = original_height / YOLO_IMAGE_SIZE
"""
# Iterate each bounding box's idx which is kept after 'non-max suppression'
for idx in bounding_box_ids.flatten():
bounding_box = all_bounding_boxes[idx]
x, y, w, h = int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]), int(bounding_box[3])
# Transform (x,y,w,h) from resized image (320*320) to original image size
x = int(x * width_ratio)
y = int(y * height_ratio)
w = int(w * width_ratio)
h = int(h * height_ratio)
# Color for each detected box
color_box_current = colors[class_ids[idx]].tolist()
# Draw bounding box for each detected object
cv2.rectangle(img, (x, y), (x + w, y + h), color_box_current, 2)
# Title for each box
text_box = classes[int(class_ids[idx])] + ' ' + str(int(confidence_values[idx] * 100)) + '%'
cv2.putText(img, text_box, (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, color_box_current, 1)
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--video_path', type=str, default='', help='initial image path')
parser.add_argument('--class_path', type=str, default=DATA_FOLDER+'coco.names', help='initial class file path')
parser.add_argument('--cfg_path', type=str, default=CFG_FOLDER+'yolov3.cfg', help='initial cfg file path')
parser.add_argument('--weights_path', type=str, default=MODEL_FOLDER+'yolov3.weights', help='initial '
'pre-trained '
'weights file path')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt):
# Label objects for prediction (totally 80)
with open(opt.class_path) as f:
labels = list(line.strip() for line in f)
# Setting colors for each label
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Read the configuration file & initialize the weight of yolov3 model
neural_network = cv2.dnn.readNetFromDarknet(opt.cfg_path, opt.weights_path)
# define whether we run the algorithm with CPU or with GPU
# WE ARE GOING TO USE CPU !!!
neural_network.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
neural_network.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
# VIDEO PROCESSING
video_capture = cv2.VideoCapture(opt.video_path)
while video_capture.isOpened():
# Read each frame of video
is_grab, frame = video_capture.read()
original_width, original_height = frame.shape[1], frame.shape[0]
# Preprocess frame before inputting into model
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE), True, crop=False)
neural_network.setInput(blob)
# Taking the last 3 layers from pretrained models for processing the image
layer_names = neural_network.getLayerNames()
output_names = [layer_names[idx[0] - 1] for idx in neural_network.getUnconnectedOutLayers()]
# Apply "Forward propagation" with input for last 3 layers
outputs = neural_network.forward(output_names)
# Extract values from prediction vector
predicted_objects_idx, bbox_locations, class_label_ids, conf_values = find_objects(outputs)
# Show bounding boxes on the original image
show_detected_images(frame, predicted_objects_idx, bbox_locations, labels, class_label_ids, conf_values,
original_width / YOLO_IMAGE_SIZE, original_height / YOLO_IMAGE_SIZE, colors)
cv2.imshow('YOLO Algorithm', frame)
# Press "ESC" to quit the video
key = cv2.waitKey(1) & 0xff
if (key == 27) | (not is_grab): # 27 represents key "ESC"
break
# Destroy & Release the camera
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| [
"cv2.putText",
"cv2.dnn.NMSBoxes",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"cv2.dnn.readNetFromDarknet",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.destroyAllWindows"
] | [((2191, 2288), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bounding_box_locations', 'confidence_values', 'THRESHOLD', 'SUPPRESSION_THRESHOLD'], {}), '(bounding_box_locations, confidence_values, THRESHOLD,\n SUPPRESSION_THRESHOLD)\n', (2207, 2288), False, 'import cv2\n'), ((4244, 4269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4267, 4269), False, 'import argparse\n'), ((5392, 5450), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['opt.cfg_path', 'opt.weights_path'], {}), '(opt.cfg_path, opt.weights_path)\n', (5418, 5450), False, 'import cv2\n'), ((5732, 5764), 'cv2.VideoCapture', 'cv2.VideoCapture', (['opt.video_path'], {}), '(opt.video_path)\n', (5748, 5764), False, 'import cv2\n'), ((7246, 7269), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7267, 7269), False, 'import cv2\n'), ((3892, 3956), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color_box_current', '(2)'], {}), '(img, (x, y), (x + w, y + h), color_box_current, 2)\n', (3905, 3956), False, 'import cv2\n'), ((4098, 4200), 'cv2.putText', 'cv2.putText', (['img', 'text_box', '(x, y - 10)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(0.5)', 'color_box_current', '(1)'], {}), '(img, text_box, (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5,\n color_box_current, 1)\n', (4109, 4200), False, 'import cv2\n'), ((6035, 6131), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255)', '(YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE)', '(True)'], {'crop': '(False)'}), '(frame, 1 / 255, (YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE), \n True, crop=False)\n', (6056, 6131), False, 'import cv2\n'), ((6971, 7006), 'cv2.imshow', 'cv2.imshow', (['"""YOLO Algorithm"""', 'frame'], {}), "('YOLO Algorithm', frame)\n", (6981, 7006), False, 'import cv2\n'), ((1297, 1327), 'numpy.argmax', 'np.argmax', (['class_probabilities'], {}), '(class_probabilities)\n', (1306, 1327), True, 'import numpy as np\n'), ((7065, 7079), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7076, 7079), False, 'import cv2\n')] |
# ----------------------------------------------------
# Generate a random correlations
# ----------------------------------------------------
import numpy as np
def randCorr(size, lower=-1, upper=1):
"""
Create a random matrix T from uniform distribution of dimensions size x m (assumed to be 10000)
normalize the rows of T to lie in the unit sphere r = r / sqrt(r'r)
RandCorr = TT'
@param size: size of the matrix
@param lower: lower limit of the uniform distribution used to create the corr matrix
@param upper: upper limit of the uniform distribution used to create the corr matrix
@return: numpy ndarray, correlation matrix
"""
m = 1000
randomMatrix = np.random.uniform(lower, upper, (size, m))
norms = np.sum(randomMatrix**2, axis=1)
T = np.divide(randomMatrix, np.sqrt(norms).reshape(size,1))
c = np.dot(T, T.T)
c[np.diag_indices(size)] = 1.
return c
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.diag_indices",
"numpy.dot",
"numpy.sqrt"
] | [((704, 746), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper', '(size, m)'], {}), '(lower, upper, (size, m))\n', (721, 746), True, 'import numpy as np\n'), ((759, 792), 'numpy.sum', 'np.sum', (['(randomMatrix ** 2)'], {'axis': '(1)'}), '(randomMatrix ** 2, axis=1)\n', (765, 792), True, 'import numpy as np\n'), ((863, 877), 'numpy.dot', 'np.dot', (['T', 'T.T'], {}), '(T, T.T)\n', (869, 877), True, 'import numpy as np\n'), ((884, 905), 'numpy.diag_indices', 'np.diag_indices', (['size'], {}), '(size)\n', (899, 905), True, 'import numpy as np\n'), ((823, 837), 'numpy.sqrt', 'np.sqrt', (['norms'], {}), '(norms)\n', (830, 837), True, 'import numpy as np\n')] |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Test_Bitwise_Not(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_bool_data(self, shape):
input1 = np.random.randint(0, 2, shape).astype(np.bool_)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def cpu_op_exec(self, input1):
output = torch.bitwise_not(input1)
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec(self, input1):
input1 = input1.to("npu")
output = torch.bitwise_not(input1)
output = output.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
torch.bitwise_not(input1, out = input2)
output = input2.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def test_bitwise_not_bool(self, device):
npu_input1 = self.generate_bool_data((2, 3))
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int16(self, device):
npu_input1 = self.generate_data(0, 2342, (2, 3), np.int16)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int32(self, device):
npu_input1 = self.generate_data(0, 34222, (2, 3), np.int32)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int64(self, device):
npu_input1 = self.generate_data(0, 355553, (2, 3), np.int64)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_out(self, device):
shape_format = [
[[0, 2342, [2, 3], np.int16], [0, 2342, [10, 20], np.int16]],
[[0, 34222, [2, 3], np.int32], [0, 34222, [10, 20], np.int32]],
[[0, 355553, [2, 3], np.int64], [0, 355553, [1, 1], np.int64]],
]
for item in shape_format:
npu_input1 = self.generate_data(item[0][0], item[0][1], item[0][2], item[0][3])
npu_input2 = self.generate_data(item[1][0], item[1][1], item[1][2], item[1][3])
cpu_output = self.cpu_op_exec(npu_input1)
npu_output1 = self.npu_op_exec_out(npu_input1, npu_input1)
npu_output2 = self.npu_op_exec_out(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output1)
self.assertRtolEqual(cpu_output, npu_output1)
instantiate_device_type_tests(Test_Bitwise_Not, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| [
"numpy.random.uniform",
"torch.bitwise_not",
"numpy.random.randint",
"common_utils.run_tests",
"torch.from_numpy"
] | [((4174, 4185), 'common_utils.run_tests', 'run_tests', ([], {}), '()\n', (4183, 4185), False, 'from common_utils import TestCase, run_tests\n'), ((998, 1022), 'torch.from_numpy', 'torch.from_numpy', (['input1'], {}), '(input1)\n', (1014, 1022), False, 'import torch\n'), ((1178, 1202), 'torch.from_numpy', 'torch.from_numpy', (['input1'], {}), '(input1)\n', (1194, 1202), False, 'import torch\n'), ((1282, 1307), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {}), '(input1)\n', (1299, 1307), False, 'import torch\n'), ((1564, 1589), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {}), '(input1)\n', (1581, 1589), False, 'import torch\n'), ((1917, 1954), 'torch.bitwise_not', 'torch.bitwise_not', (['input1'], {'out': 'input2'}), '(input1, out=input2)\n', (1934, 1954), False, 'import torch\n'), ((924, 962), 'numpy.random.uniform', 'np.random.uniform', (['min_d', 'max_d', 'shape'], {}), '(min_d, max_d, shape)\n', (941, 962), True, 'import numpy as np\n'), ((1109, 1139), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'shape'], {}), '(0, 2, shape)\n', (1126, 1139), True, 'import numpy as np\n')] |
import numpy as np
from cyvcf2 import VCF, Variant, Writer
import os.path
HERE = os.path.dirname(__file__)
HEM_PATH = os.path.join(HERE, "test-hemi.vcf")
VCF_PATH = os.path.join(HERE, "test.vcf.gz")
def check_var(v):
s = [x.split(":")[0] for x in str(v).split("\t")[9:]]
lookup = {'0/0': 0, '0/1': 1, './1': 1, '1/.': 1, '0/.': 0, './0': 0, '1/1': 3, '.': 2, './.': 2}
expected = np.array([lookup[ss] for ss in s])
obs = v.gt_types
assert np.all(expected == obs), zip(expected, obs)
def test_hemi():
"""
make sure that we are getting the correct gt_types
for hemizygous variants
"""
for p in (HEM_PATH, VCF_PATH):
vcf = VCF(p)
for v in vcf:
check_var(v)
| [
"cyvcf2.VCF",
"numpy.array",
"numpy.all"
] | [((396, 430), 'numpy.array', 'np.array', (['[lookup[ss] for ss in s]'], {}), '([lookup[ss] for ss in s])\n', (404, 430), True, 'import numpy as np\n'), ((463, 486), 'numpy.all', 'np.all', (['(expected == obs)'], {}), '(expected == obs)\n', (469, 486), True, 'import numpy as np\n'), ((675, 681), 'cyvcf2.VCF', 'VCF', (['p'], {}), '(p)\n', (678, 681), False, 'from cyvcf2 import VCF, Variant, Writer\n')] |
import random
import numpy as np
import torch
from torch.utils import data
from torch.utils.data.dataset import Dataset
"""
Example of how to make your own dataset
"""
class ToyDataSet(Dataset):
"""
class that defines what a data-sample looks like
In the __init__ you could for example load in the data from file
and then return specific items in __getitem__
and return the length in __len__
"""
def __init__(self, length: int):
""" loads all stuff relevant for dataset """
# save the length, usually depends on data-file but here data is generated instead
self.length = length
# generate random binary labels
self.classes = [random.choice([0, 1]) for _ in range(length)]
# generate data from those labels
self.data = [np.random.normal(self.classes[i], 0.2, 2) for i in range(length)]
def __getitem__(self, item_index):
""" defines how to get one sample """
class_ = torch.tensor(self.classes[item_index]) # python scalar to torch tensor
tensor = torch.from_numpy(self.data[item_index]) # numpy array/tensor to torch array/tensor
return tensor, class_
def __len__(self):
""" defines how many samples in an epoch, independently of batch size"""
return self.length
def get_toy_loaders(length: int, batch_size: int):
""" converts a dataset to a batched dataloader """
train_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.8)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
test_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.2)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
return train_loader, test_loader
| [
"numpy.random.normal",
"random.choice",
"torch.tensor",
"torch.from_numpy"
] | [((981, 1019), 'torch.tensor', 'torch.tensor', (['self.classes[item_index]'], {}), '(self.classes[item_index])\n', (993, 1019), False, 'import torch\n'), ((1070, 1109), 'torch.from_numpy', 'torch.from_numpy', (['self.data[item_index]'], {}), '(self.data[item_index])\n', (1086, 1109), False, 'import torch\n'), ((701, 722), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (714, 722), False, 'import random\n'), ((811, 852), 'numpy.random.normal', 'np.random.normal', (['self.classes[i]', '(0.2)', '(2)'], {}), '(self.classes[i], 0.2, 2)\n', (827, 852), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.