text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Copyright 2022 Maximilien Le Clei.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from bots.network.dynamic.base import DynamicNetworkBotBase
from nets.dynamic.convolutional import Net as ConvolutionalNet
from nets.dynamic.recurrent import Net as RecurrentNet
class Bot(DynamicNetworkBotBase):
def initialize_nets(self):
self.d_input = [1, 16, 16]
if self.pop_nb == 0:
self.function = 'generator'
else: # self.pop_nb == 1:
self.function = 'discriminator'
self.convolutional_net = ConvolutionalNet(self.d_input, 'forward')
if self.function == 'generator':
self.transpose_convolution_net = ConvolutionalNet(
self.d_input, 'backward')
if self.function == 'discriminator':
self.recurrent_net = RecurrentNet(self.convolutional_net, 1)
else: # self.function == 'generator':
self.recurrent_net = RecurrentNet(
self.convolutional_net, self.transpose_convolution_net)
self.convolutional_net.output_net = self.recurrent_net
if self.function == 'generator':
self.transpose_convolution_net.output_net = self.recurrent_net
self.nets = [self.convolutional_net, self.recurrent_net]
if self.function == 'generator':
self.nets.append(self.transpose_convolution_net)
def __call__(self, x):
x = self.env_to_convolutional_net(x)
x = self.convolutional_net(x)
x = self.convolutional_net_to_recurrent_net(x)
x = self.recurrent_net(x)
if self.function == 'generator':
x = self.recurrent_net_to_transpose_convolution_net(x)
x = self.transpose_convolution_net(x)
x = self.transpose_convolution_net_to_env(x)
else: # self.function == 'discriminator':
x = self.recurrent_net_to_env(x)
return x
def env_to_convolutional_net(self, x):
if isinstance(x, np.ndarray):
x = x[None,None,:,:]
x = torch.Tensor(x)
return x
def convolutional_net_to_recurrent_net(self, x):
x = np.array(torch.Tensor(x))
return x
def recurrent_net_to_env(self, x):
x = np.array(x).squeeze()
x = np.minimum(x, 1)
return x
def recurrent_net_to_transpose_convolution_net(self, x):
for i in range(len(x)):
x[i] = torch.Tensor(x[i][None,None,None,:])
return x
def transpose_convolution_net_to_env(self, x):
x = np.array(x).squeeze()
x = np.minimum(x, 1)
return x
|
{"hexsha": "cfbae4e5c55bb13a487a0d2dab4a9a780cb016b1", "size": 3175, "ext": "py", "lang": "Python", "max_stars_repo_path": "bots/network/dynamic/conv_rnn/gravity.py", "max_stars_repo_name": "MaximilienLC/nevo", "max_stars_repo_head_hexsha": "c701a1202bc18d89a622472918733bf78ba5e304", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bots/network/dynamic/conv_rnn/gravity.py", "max_issues_repo_name": "MaximilienLC/nevo", "max_issues_repo_head_hexsha": "c701a1202bc18d89a622472918733bf78ba5e304", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bots/network/dynamic/conv_rnn/gravity.py", "max_forks_repo_name": "MaximilienLC/nevo", "max_forks_repo_head_hexsha": "c701a1202bc18d89a622472918733bf78ba5e304", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T20:44:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T20:44:09.000Z", "avg_line_length": 29.128440367, "max_line_length": 74, "alphanum_fraction": 0.6456692913, "include": true, "reason": "import numpy", "num_tokens": 721}
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
with open("../data/raw/tasks_train_addprim_jump.txt", "r", encoding="utf8") as f:
IN_seq = []
OUT_seq = []
for i, line in enumerate(f):
line = line.split(' OUT: ')
IN_seq.append(line[0][4:].strip())
OUT_seq.append(line[1].strip())
def get_voc(SEQ):
voc = []
seq_len = []
for seq in SEQ:
seq_len.append(len(seq))
wd_list = seq.split(' ')
for wd in wd_list:
if wd not in voc:
voc.append(wd)
return voc, seq_len
voc_in, inSeq_len = get_voc(IN_seq)
voc_out, outSeq_len = get_voc(OUT_seq)
with open("../data/processed/train-addprim-jump_in-out.txt", "a", encoding="utf8") as f:
for i, j in zip(IN_seq, OUT_seq):
f.write(i+'\t'+j+'\n')
|
{"hexsha": "3f85fb7b5a794ec1a1921c41dcafcc3e55fc375a", "size": 856, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_process.py", "max_stars_repo_name": "QianyunZhang/SCAN", "max_stars_repo_head_hexsha": "6d42007daecb5e6b27db230ccb2fee39fa4ad4cc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data_process.py", "max_issues_repo_name": "QianyunZhang/SCAN", "max_issues_repo_head_hexsha": "6d42007daecb5e6b27db230ccb2fee39fa4ad4cc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data_process.py", "max_forks_repo_name": "QianyunZhang/SCAN", "max_forks_repo_head_hexsha": "6d42007daecb5e6b27db230ccb2fee39fa4ad4cc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-24T20:18:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-30T17:23:15.000Z", "avg_line_length": 23.7777777778, "max_line_length": 88, "alphanum_fraction": 0.5771028037, "include": true, "reason": "import numpy", "num_tokens": 239}
|
import numpy as np
import matplotlib.pyplot as plt
plt.subplots_adjust(hspace=0.4)
t = np.arange(0.01, 20.0, 0.01)
# log y axis
plt.subplot(221)
plt.semilogy(t, np.exp(-t/5.0))
plt.title('semilogy')
plt.grid(True)
# log x axis
plt.subplot(222)
plt.semilogx(t, np.sin(2*np.pi*t))
plt.title('semilogx')
plt.grid(True)
# log x and y axis
plt.subplot(223)
plt.loglog(t, 20*np.exp(-t/10.0), basex=2)
plt.grid(True)
plt.title('loglog base 4 on x')
# with errorbars: clip non-positive values
ax = plt.subplot(224)
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
x = 10.0**np.linspace(0.0, 2.0, 20)
y = x**2.0
plt.errorbar(x, y, xerr=0.1*x, yerr=5.0 + 0.75*y)
ax.set_ylim(ymin=0.1)
ax.set_title('Errorbars go negative')
plt.show()
|
{"hexsha": "bf7372191fc1c7c230556e12b4682d3ad508f791", "size": 758, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pylab_examples/log_demo.py", "max_stars_repo_name": "argriffing/matplotlib", "max_stars_repo_head_hexsha": "5555f5463fb5f995a59f7651c0034a5d6a4c7e84", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-18T21:53:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-18T21:53:55.000Z", "max_issues_repo_path": "examples/pylab_examples/log_demo.py", "max_issues_repo_name": "argriffing/matplotlib", "max_issues_repo_head_hexsha": "5555f5463fb5f995a59f7651c0034a5d6a4c7e84", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-10T17:57:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-26T16:23:09.000Z", "max_forks_repo_path": "examples/pylab_examples/log_demo.py", "max_forks_repo_name": "kdavies4/matplotlib", "max_forks_repo_head_hexsha": "330aefbd031ee227213afe655c5158320015d45b", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2015-12-21T07:24:54.000Z", "max_forks_repo_forks_event_max_datetime": "2015-12-21T07:24:54.000Z", "avg_line_length": 19.4358974359, "max_line_length": 49, "alphanum_fraction": 0.6833773087, "include": true, "reason": "import numpy", "num_tokens": 281}
|
#*****************************************************************************
#
# Project: Automatic Mosaicing of Rectified, Collared Historic Aerial Imagery
# Purpose: Automatically tile and merge a directory of overlapping
# georectified aearial images, choosing from overlapping tiles based
# on distance to the center of the parent image and the amount of
# NoData points in the tile in order to remove collars, edges, and
# areas of most probable distortion.
# Author: Jacob Adams, jacob.adams@cachecounty.org
#
#*****************************************************************************
# MIT License
#
# Copyright (c) 2018 Cache County
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#*****************************************************************************
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import os
import csv
import numpy as np
def ceildiv(a, b):
'''
Ceiling division, from user dlitz, https://stackoverflow.com/a/17511341/674039
'''
return -(-a // b)
def GetBoundingBox(in_path):
s_fh = gdal.Open(in_path, gdal.GA_ReadOnly)
trans = s_fh.GetGeoTransform()
ulx = trans[0]
uly = trans[3]
# Calculate lower right x/y with rows/cols * cell size + origin
lrx = s_fh.RasterXSize * trans[1] + ulx
lry = s_fh.RasterYSize * trans[5] + uly
s_fh = None
return (ulx, uly, lrx, lry)
def CreateFishnetIndices(ulx, uly, lrx, lry, dimension, pixels=False, pixel_size=2.5):
'''
Creates a list of indicies that cover the given bounding box (may extend
beyond the lrx/y point) with a spacing specified by 'dimension'.
If pixels is true, assumes dimensions are in pixels and uses pixel_size.
Otherwise, dimension is in raster coordinate system.
'''
chunks = []
ref_width = lrx - ulx
ref_height = uly - lry
if pixels:
chunk_ref_size = dimension * pixel_size
else:
chunk_ref_size = dimension
num_x_chunks = int(ceildiv(ref_width, chunk_ref_size))
num_y_chunks = int(ceildiv(ref_height, chunk_ref_size))
for y_chunk in range(0, num_y_chunks):
for x_chunk in range(0, num_x_chunks):
x_index = x_chunk
y_index = y_chunk
chunk_ulx = ulx + (chunk_ref_size * x_index)
chunk_uly = uly + (-chunk_ref_size * y_index)
chunk_lrx = ulx + (chunk_ref_size * (x_index + 1))
chunk_lry = uly + (-chunk_ref_size * (y_index + 1))
chunks.append((x_index, y_index, chunk_ulx, chunk_uly, chunk_lrx,
chunk_lry))
return chunks
def create_polygon(coords):
'''
Creates a WKT polygon from a list of coordinates
'''
ring = ogr.Geometry(ogr.wkbLinearRing)
for coord in coords:
ring.AddPoint(coord[0], coord[1])
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return poly.ExportToWkt()
def CopyTilesFromRaster(root, rastername, fishnet, shp_layer, target_dir):
'''
Given a fishnet of a certain size, copy any chunks of the source raster
into individual files corresponding to the fishnet cells. Calculates
the distance from the cell center to the raster's center, and stores in
a shapefile containing the bounding box of each cell.
Returns a dictionary containing the distance to center for each sub-chunk
in the form {cell_name: (rastername, distance, nodata found in chunk) ...}
'''
distances = {}
raster_path = os.path.join(root, rastername)
# Get data from source raster
s_fh = gdal.Open(raster_path, gdal.GA_ReadOnly)
trans = s_fh.GetGeoTransform()
projection = s_fh.GetProjection()
band1 = s_fh.GetRasterBand(1)
s_nodata = band1.GetNoDataValue()
bands = s_fh.RasterCount
raster_xmin = trans[0]
raster_ymax = trans[3]
raster_xwidth = trans[1]
raster_yheight = trans[5]
driver = s_fh.GetDriver()
lzw_opts = ["compress=lzw", "tiled=yes"]
band1 = None
# Calculate lower right x/y with rows/cols * cell size + origin
raster_xmax = s_fh.RasterXSize * raster_xwidth + raster_xmin
raster_ymin = s_fh.RasterYSize * raster_yheight + raster_ymax
# Calculate raster middle
raster_xmid = (s_fh.RasterXSize / 2.) * raster_xwidth + raster_xmin
raster_ymid = (s_fh.RasterYSize / 2.) * raster_yheight + raster_ymax
# Loop through the cells in the fishnet, copying over any relevant bits of
# raster to new subchunks.
for cell in fishnet:
cell_name = "{}-{}".format(cell[0], cell[1])
cell_xmin = cell[2]
cell_xmax = cell[4]
cell_ymin = cell[5]
cell_ymax = cell[3]
cell_xmid = (cell_xmax - cell_xmin) / 2. + cell_xmin
cell_ymid = (cell_ymax - cell_ymin) / 2. + cell_ymin
# Check to see if some part of raster is inside a given fishnet
# cell.
# If cell x min or max and y min or max are inside the raster
xmin_inside = cell_xmin > raster_xmin and cell_xmin < raster_xmax
xmax_inside = cell_xmax > raster_xmin and cell_xmax < raster_xmax
ymin_inside = cell_ymin > raster_ymin and cell_ymin < raster_ymax
ymax_inside = cell_ymax > raster_ymin and cell_ymax < raster_ymax
if (xmin_inside or xmax_inside) and (ymin_inside or ymax_inside):
# Translate cell coords to raster pixels, create a numpy array intialized to nodatas, readasarray, save as cell_raster.tif
#print("{} {} {} {}".format(cell_xmin, raster_xmin, cell_ymax, raster_ymax))
# Fishnet cell origin and size as pixel indices
x_off = int((cell_xmin - raster_xmin) / raster_xwidth)
y_off = int((cell_ymax - raster_ymax) / raster_yheight)
# Add 5 pixels to x/y_size to handle gaps
x_size = int((cell_xmax - cell_xmin) / raster_xwidth) + 5
y_size = int((cell_ymin - cell_ymax) / raster_yheight) + 5
#print("{} {} {} {}".format(x_off, y_off, x_size, y_size))
# Values for ReadAsArray, these aren't changed later unelss
# the border case checks change them
# These are all in pixels
# We are adding two to read_x/y_size to slightly overread to
# catch small one or two pixel gaps in the combined rasters.
read_x_off = x_off
read_y_off = y_off
read_x_size = x_size
read_y_size = y_size
# Slice values for copying read data into slice_array
# These are the indices in the slice array where the actual
# read data should be copied to.
# These should be 0 and max size (ie, same dimension as
# read_array) unelss the border case checks change them.
sa_x_start = 0
sa_x_end = x_size
sa_y_start = 0
sa_y_end = y_size
# Edge logic
# If read exceeds bounds of image:
# Adjust x/y offset to appropriate place
# Change slice indices
# Checks both x and y, setting read and slice values for each dimension if
# needed
if x_off < 0:
read_x_off = 0
read_x_size = x_size + x_off # x_off would be negative
sa_x_start = -x_off # shift inwards -x_off spaces
if x_off + x_size > s_fh.RasterXSize:
read_x_size = s_fh.RasterXSize - x_off
sa_x_end = read_x_size # end after read_x_size spaces
if y_off < 0:
read_y_off = 0
read_y_size = y_size + y_off
sa_y_start = -y_off
if y_off + y_size > s_fh.RasterYSize:
read_y_size = s_fh.RasterYSize - y_off
sa_y_end = read_y_size
# Set up output raster
t_rastername = "{}_{}.tif".format(cell_name, rastername[:-4])
#print(t_rastername)
t_path = os.path.join(target_dir, t_rastername)
t_fh = driver.Create(t_path, x_size, y_size, bands, gdal.GDT_Int16, options=lzw_opts)
t_fh.SetProjection(projection)
# TO FIX WEIRD OFFSETS:
# Make sure tranform is set based on the top left corner of top left pixel of the source raster, not the fishnet. Using fishnet translates the whole raster to the fishnet's grid, which isn't consistent with the rasters' pixel grids
# i.e., cell_x/ymin is not the top left corner of top left pixel of the raster
# Translate from x/y_off (pixels) to raster's GCS
raster_chunk_xmin = x_off * raster_xwidth + raster_xmin
raster_chunk_ymax = y_off * raster_yheight + raster_ymax
# Transform:
# 0: x coord, top left corner of top left pixel
# 1: pixel width
# 2: 0 (for north up)
# 3: y coord, top left corner of top left pixel
# 4: 0 (for north up)
# 5: pixel height
# t_trans = (cell_xmin, raster_xwidth, 0, cell_ymax, 0, raster_yheight)
t_trans = (raster_chunk_xmin, raster_xwidth, 0, raster_chunk_ymax, 0, raster_yheight)
t_fh.SetGeoTransform(t_trans)
num_nodata = 0
# Loop through all the bands of the raster and copy to a new chunk
for band in range(1, bands + 1):
# Prep target band
t_band = t_fh.GetRasterBand(band)
t_band.SetNoDataValue(s_nodata)
# Initialize slice array to nodata (for areas of the new chunk
# that are outside the source raster)
slice_array = np.full((y_size, x_size), s_nodata)
# Read the source raster
s_band = s_fh.GetRasterBand(band)
read_array = s_band.ReadAsArray(read_x_off, read_y_off,
read_x_size, read_y_size)
num_nodata += (read_array == s_nodata).sum()
# Put source raster data into appropriate place of slice array
slice_array[sa_y_start:sa_y_end, sa_x_start:sa_x_end] = read_array
# Write source array to disk
t_band.WriteArray(slice_array)
t_band = None
s_band = None
# Close target file handle
t_fh = None
# Calculate distance from cell center to raster center
cell_center = np.array((cell_xmid, cell_ymid))
raster_center = np.array((raster_xmid, raster_ymid))
distance = np.linalg.norm(cell_center - raster_center)
new_num_nodata = num_nodata / 3.
print("{}, {}, {}, {}".format(cell_name, rastername, distance, new_num_nodata))
# Create cell bounding boxes as shapefile, with distance from the
# middle of the cell to the middle of it's parent raster saved as a
# field for future evaluation
coords = [(cell_xmin, cell_ymax),
(cell_xmax, cell_ymax),
(cell_xmax, cell_ymin),
(cell_xmin, cell_ymin),
(cell_xmin, cell_ymax)]
defn = shp_layer.GetLayerDefn()
feature = ogr.Feature(defn)
feature.SetField('raster', rastername)
feature.SetField('cell', cell_name)
feature.SetField('d_to_cent', distance)
feature.SetField('nodatas', new_num_nodata)
poly = create_polygon(coords)
geom = ogr.CreateGeometryFromWkt(poly)
feature.SetGeometry(geom)
shp_layer.CreateFeature(feature)
feature = None
poly = None
geom = None
distances[cell_name] = (rastername, distance, new_num_nodata)
# close source raster
s_fh = None
return distances
def TileRectifiedRasters(rectified_dir, shp_path, tiled_dir, fishnet_size):
'''
Tiles all the rasters in rectified_dir into tiles based on a fishnet
starting at the upper left of all the rasters and that has cells of
fishnet_size, saving them in tiled_dir. Each fishnet cell will have
multiple tiles associated with it if two or more rasters overlap. The
following information is calculated for each tile, stored in the fishnet
shapefile, and returned from the method: the parent raster, the fishnet
cell index, the distance from the center of the tile to the center of the
parent raster, and the number of nodata pixels in the tile.
Returns: A list of dictionaries containing the tile information like so:
[{cell_index: (rastername, distance, nodatas)}, {}, ...]
'''
#directory = r'e:\a_imagery\1981\rectified'
# Loop through rectified rasters, check for ul/lr x/y to get bounding box
# ulx is the smallest x value, so we set it high and check if the current
# one is lower
ulx = 999999999
# uly is the largest y, so we set low and check if greater
uly = 0
# lrx is largest x, so we set low and check if greater
lrx = 0
# lry is smallest y, so we set high and check if lower
lry = 999999999
for root, dirs, files in os.walk(rectified_dir):
for fname in files:
if fname[-4:] == ".tif":
img_path = os.path.join(root, fname)
bounds = GetBoundingBox(img_path)
if bounds[0] < ulx:
ulx = bounds[0]
if bounds[1] > uly:
uly = bounds[1]
if bounds[2] > lrx:
lrx = bounds[2]
if bounds[3] < lry:
lry = bounds[3]
# print("{}, {}; {}, {}".format(ulx, uly, lrx, lry))
# Create tiling scheme
fishnet = CreateFishnetIndices(ulx, uly, lrx, lry, fishnet_size)
# for cell in fishnet:
# print(cell)
# Set up fishnet polygons shapefile
#poly_shp = r'e:\a_imagery\1981\00fishnet.shp'
shp_driver = ogr.GetDriverByName('ESRI Shapefile')
shp_ds = shp_driver.CreateDataSource(shp_path)
srs = osr.SpatialReference()
srs.ImportFromEPSG(102742)
layer = shp_ds.CreateLayer('', srs, ogr.wkbPolygon)
layer.CreateField(ogr.FieldDefn('raster', ogr.OFTString))
layer.CreateField(ogr.FieldDefn('cell', ogr.OFTString))
layer.CreateField(ogr.FieldDefn('d_to_cent', ogr.OFTReal))
layer.CreateField(ogr.FieldDefn('nodatas', ogr.OFTReal))
# Retiled directory
#tile_dir = r'e:\a_imagery\1981\tiled'
# list containing all chunk dictionaries
all_cells = []
# Loop through rectified rasters, create tiles named by index
for root, dirs, files in os.walk(rectified_dir):
for fname in files:
if fname[-4:] == ".tif":
#print(fname)
distances = CopyTilesFromRaster(root, fname, fishnet, layer,
tiled_dir)
all_cells.append(distances)
# # Update add or overwrite cell in chunks dictionary if it isn't
# # presnt already or if the distance is shorter than the current one
# # and the new chunk has fewer nodata values
# for cell, rname_distance in distances.items():
# if cell in chunks: # Is there a chunk for this cell already?
# if rname_distance[1] < chunks[cell][1]: # is this one closer to the center of the raster than the existing one?
# if rname_distance[2] <= chunks[cell][2]: # does this one have fewer nodatas (or the same as) that the existing one?
# chunks[cell] = rname_distance
# elif cell not in chunks:
# chunks[cell] = rname_distance
# Cleanup shapefile handles
layer = None
shp_ds = None
return all_cells
def ReadChunkFromShapefile(shp_path):
driver = ogr.GetDriverByName('ESRI Shapefile')
shape_s_dh = driver.Open(shp_path, 0)
layer = shape_s_dh.GetLayer()
#[{cell_index: (rastername, distance, nodatas)}, {}, ...]
cells = []
for feature in layer:
cell_index = feature.GetField("cell")
rastername = feature.GetField("raster")
distance = feature.GetField("d_to_cent")
nodatas = feature.GetField("nodatas")
celldict = {}
celldict[cell_index] = (rastername, distance, nodatas)
cells.append(celldict)
layer = None
shape_s_dh = None
return cells
if "__main__" in __name__:
directory = r'e:\a_imagery\1981\1_rectified'
poly_shp = r'e:\a_imagery\1981\00fishnet.shp'
tile_dir = r'e:\a_imagery\1981\tiled'
csv_path = r'e:\a_imagery\1981\00cells.csv'
# directory = r'f:\1978plats'
# poly_shp = r'f:\1978plats\00fishnet.shp'
# tile_dir = r'f:\1978plats\tiled'
fishnet_size = 750
tile = True
# Retile if needed; otherwise, just read the shapefile
if tile:
all_cells = TileRectifiedRasters(directory, poly_shp, tile_dir, fishnet_size)
else:
all_cells = ReadChunkFromShapefile(poly_shp)
# dictionary containing rasternames, chunknames, distances, nodata counts
chunks = {}
# Update add or overwrite cell in chunks dictionary if it isn't
# presnt already or if the distance is shorter than the current one
# and the new chunk has fewer nodata values
for cell in all_cells:
for cell_number, cell_info in cell.items():
if cell_number in chunks: # Is there a chunk for this cell already?
if cell_info[1] < chunks[cell_number][1]: # is this one closer to the center of the raster than the existing one?
if cell_info[2] <= chunks[cell_number][2]: # does this one have fewer nodatas (or the same as) that the existing one?
chunks[cell_number] = cell_info
elif cell_number not in chunks:
chunks[cell_number] = cell_info
# Do a second pass, so that when we build a vrt and the most desirable
# chunk has an area of nodata but there's a less-desirable chunk that
# covers part of that nodata, gdalbuildvrt will add this second chunk
# underneath it and honor the nodata setting of the upper, more desirable
# chunk.
second_chunks = {}
for cell in all_cells:
for cell_number, cell_info in cell.items():
if cell_number not in chunks: # can't be in first pass dictionary
if cell_number in second_chunks:
if cell_info[1] < second_chunks[cell_number][1]: # is this one closer to the center of the raster than the existing one?
if cell_info[2] <= second_chunks[cell_number][2]: # does this one have fewer nodatas (or the same as) that the existing one?
second_chunks[cell_number] = cell_info
else:
second_chunks[cell_number] = cell_info
with open(csv_path, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
# Write second-pass chunks first, so that first pass will be seen later
# and be placed on top by gdalbuidvrt
for key, value in second_chunks.items():
chunk_name = "{}_{}".format(key, value[0])
chunk_path = os.path.join(tile_dir, chunk_name)
writer.writerow([chunk_path])
# Now first-pass, most desirable chunks:
for key, value in chunks.items():
chunk_name = "{}_{}".format(key, value[0])
chunk_path = os.path.join(tile_dir, chunk_name)
writer.writerow([chunk_path])
|
{"hexsha": "bc86c8e5ec16958c5219d467c7f9b99b9addea5c", "size": 20723, "ext": "py", "lang": "Python", "max_stars_repo_path": "rectified_mosaic.py", "max_stars_repo_name": "cachecounty/general_scripts", "max_stars_repo_head_hexsha": "c1d3bb24c1fa4a36128728870147f4f07d10fceb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rectified_mosaic.py", "max_issues_repo_name": "cachecounty/general_scripts", "max_issues_repo_head_hexsha": "c1d3bb24c1fa4a36128728870147f4f07d10fceb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rectified_mosaic.py", "max_forks_repo_name": "cachecounty/general_scripts", "max_forks_repo_head_hexsha": "c1d3bb24c1fa4a36128728870147f4f07d10fceb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-04-07T21:56:39.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-07T21:56:39.000Z", "avg_line_length": 42.1199186992, "max_line_length": 243, "alphanum_fraction": 0.6201322202, "include": true, "reason": "import numpy", "num_tokens": 5116}
|
# ******************
# MODULE DOCSTRING
# ******************
"""
LOMAP: Graph generation
=====
Alchemical free energy calculations hold increasing promise as an aid to drug
discovery efforts. However, applications of these techniques in discovery
projects have been relatively few, partly because of the difficulty of planning
and setting up calculations. The Lead Optimization Mapper (LOMAP) is an
automated algorithm to plan efficient relative free energy calculations between
potential ligands within a substantial of compounds.
"""
# *****************************************************************************
# Lomap2: A toolkit to plan alchemical relative binding affinity calculations
# Copyright 2015 - 2016 UC Irvine and the Authors
#
# Authors: Dr Gaetano Calabro' and Dr David Mobley
#
# This part of the code has been originally made by Jonathan Redmann,
# and Christopher Summa at Summa Lab, Dept. of Computer Science,
# University of New Orleans and it has just been adapded to the new Lomap code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see http://www.gnu.org/licenses/
# *****************************************************************************
# ****************
# MODULE IMPORTS
# ****************
import networkx as nx
import numpy as np
import subprocess
import matplotlib.pyplot as plt
import copy
from operator import itemgetter
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
import os.path
import logging
import tempfile
import shutil
import traceback
__all__ = ['GraphGen']
# *************************
# Graph Class
# *************************
class GraphGen(object):
"""
This class is used to set and generate the graph used to plan
binding free energy calculation
"""
def __init__(self, dbase):
"""
Inizialization function
Parameters
----------
dbase : dbase object
the molecule container
"""
self.dbase = dbase
self.maxPathLength = dbase.options.max
self.maxDistFromActive = dbase.options.max_dist_from_actives
self.similarityScoresLimit = dbase.options.cutoff
self.requireCycleCovering = not dbase.options.allow_tree
if dbase.options.radial:
self.lead_index = self.pick_lead()
else:
self.lead_index = None
# A set of nodes that will be used to save nodes that are not a cycle cover for a given subgraph
self.nonCycleNodesSet = set()
# A set of edges that will be used to save edges that are acyclic for given subgraph
self.nonCycleEdgesSet = set()
# A count of the number of nodes that are not within self.maxDistFromActive edges
# of an active
self.distanceToActiveFailures = 0
# Draw Parameters
# THIS PART MUST BE CHANGED
# Max number of displayed chemical compound images as graph nodes
self.max_images = 2000
# Max number of displayed nodes in the graph
self.max_nodes = 100
# The maximum threshold distance in angstroms unit used to select if a molecule is depicted
self.max_mol_size = 50.0
self.edge_labels = True
# The following Section has been strongly copied/adapted from the original implementation
# Generate a list related to the disconnected graphs present in the initial graph
if dbase.options.fast and dbase.options.radial:
# only enable the fast map option if use the radial option
self.initialSubgraphList = self.generate_initial_subgraph_list(fast_map=True)
else:
self.initialSubgraphList = self.generate_initial_subgraph_list()
# A list of elements made of [edge, weights] for each subgraph
self.subgraphScoresLists = self.generate_subgraph_scores_lists(self.initialSubgraphList)
# Eliminates from each subgraph those edges whose weights are less than the hard limit
self.remove_edges_below_hard_limit()
# Make a new master list of subgraphs now that there may be more disconnected components
self.workingSubgraphsList = self.generate_working_subgraphs_list()
# Make a new sorted list of [edge, weights] for each subgraph now that there may be new subgraphs
self.workingSubgraphScoresLists = self.generate_subgraph_scores_lists(self.workingSubgraphsList)
# Remove edges, whose removal does not violate constraints, from the subgraphs,
# starting with lowest similarity score first
if dbase.options.fast and dbase.options.radial:
# if we use the fast and radial option, just need to add the surrounding edges from the initial graph
self.resultGraph = self.add_surrounding_edges()
# after adding the surround edges, some subgraphs may merge into a larger graph and so need to update the
# current subgraphs
# self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
# merge all Subgraphs together for layout
# self.resultGraph = self.merge_all_subgraphs()
else:
# >>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
self.minimize_edges()
# >>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Collect together disjoint subgraphs of like charge into subgraphs
self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
# Combine separate subgraphs into a single resulting graph
self.resultGraph = self.merge_all_subgraphs()
# Make a copy of the resulting graph for later processing in connectResultingComponents()
self.copyResultGraph = self.resultGraph.copy()
# Holds list of edges that were added in the connect components phase
self.edgesAddedInFirstTreePass = []
# Add edges to the resultingGraph to connect its components
self.connect_subgraphs()
return
def pick_lead(self):
if (self.dbase.nums() * (self.dbase.nums() - 1) / 2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not self.dbase.options.hub == "None":
# hub radial option. Use the provided reference compound as a hub
hub_index = None
for i in range(0, self.dbase.nums()):
if os.path.basename(self.dbase[i].getName()) == self.dbase.options.hub:
hub_index = i
if hub_index is None:
logging.info(
"Warning: the specified center ligand %s is not in the ligand database, will not use the radial option." % self.dbase.options.hub)
return hub_index
else:
# complete radial option. Pick the compound with the highest total similarity to all other compounds to use as a hub
all_sum_i = []
for i in range(0, self.dbase.nums()):
sum_i = 0
for j in range(0, self.dbase.nums()):
sum_i += self.dbase.strict_mtx[i, j]
all_sum_i.append(sum_i)
max_value = max(all_sum_i)
max_index = [i for i, x in enumerate(all_sum_i) if x == max_value]
max_index_final = max_index[0]
return max_index_final
def generate_initial_subgraph_list(self, fast_map=False):
"""
This function generates a starting graph connecting with edges all the
compounds with a positive strict similarity score
Returns
-------
initialSubgraphList : list of NetworkX graph
the list of connected component graphs
"""
compound_graph = nx.Graph()
if (self.dbase.nums() * (self.dbase.nums() - 1) / 2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not fast_map:
# if not fast map option, connect all possible nodes to generate the initial graph
for i in range(0, self.dbase.nums()):
if i == 0:
compound_graph.add_node(i, ID=self.dbase[i].getID(),
fname_comp=os.path.basename(self.dbase[i].getName()),
active=self.dbase[i].isActive())
for j in range(i + 1, self.dbase.nums()):
if i == 0:
compound_graph.add_node(j, ID=self.dbase[j].getID(),
fname_comp=os.path.basename(self.dbase[j].getName()),
active=self.dbase[j].isActive())
wgt = self.dbase.strict_mtx[i, j]
if wgt > 0.0:
compound_graph.add_edge(i, j, similarity=wgt, strict_flag=True)
else:
# if fast map option, then add all possible radial edges as the initial graph
for i in range(0, self.dbase.nums()):
# add the node for i
compound_graph.add_node(i, ID=self.dbase[i].getID(),
fname_comp=os.path.basename(self.dbase[i].getName()))
if i != self.lead_index:
wgt = self.dbase.strict_mtx[i, self.lead_index]
if wgt > 0:
compound_graph.add_edge(i, self.lead_index, similarity=wgt, strict_flag=True)
initialSubgraphGen = [compound_graph.subgraph(c).copy() for c in nx.connected_components(compound_graph)]
initialSubgraphList = [x for x in initialSubgraphGen]
return initialSubgraphList
def generate_subgraph_scores_lists(self, subgraphList):
"""
This function generate a list of lists where each inner list is the
weights of each edge in a given subgraph in the subgraphList,
sorted from lowest to highest
Returns
-------
subgraphScoresLists : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
subgraphScoresLists = []
for subgraph in subgraphList:
weightsDictionary = nx.get_edge_attributes(subgraph, 'similarity')
subgraphWeightsList = [(edge[0], edge[1], weightsDictionary[edge]) for edge in weightsDictionary.keys()]
subgraphWeightsList.sort(key=lambda entry: entry[2])
subgraphScoresLists.append(subgraphWeightsList)
return subgraphScoresLists
def remove_edges_below_hard_limit(self):
"""
This function removes edges below the set hard limit from each subGraph
and from each weightsList
"""
totalEdges = 0
for subgraph in self.initialSubgraphList:
weightsList = self.subgraphScoresLists[self.initialSubgraphList.index(subgraph)]
index = 0
for edge in weightsList:
if edge[2] < self.similarityScoresLimit:
subgraph.remove_edge(edge[0], edge[1])
index = weightsList.index(edge)
del weightsList[:index + 1]
totalEdges = totalEdges + subgraph.number_of_edges()
def generate_working_subgraphs_list(self):
"""
After the deletition of the edges that have a weigth less than the
selected threshould the subgraph maybe disconnected and a new master
list of connected subgraphs is genereted
Returns
-------
workingSubgraphsList : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
workingSubgraphsList = []
for subgraph in self.initialSubgraphList:
newSubgraphList = [subgraph.subgraph(c).copy() for c in nx.connected_components(subgraph)]
for newSubgraph in newSubgraphList:
workingSubgraphsList.append(newSubgraph)
return workingSubgraphsList
def minimize_edges(self):
"""
Minimize edges in each subgraph while ensuring constraints are met
"""
for subgraph in self.workingSubgraphsList:
weightsList = self.workingSubgraphScoresLists[self.workingSubgraphsList.index(subgraph)]
# ISSUE ORDER IS ORIGINATED HERE
# weightsList = sorted(weightsList, key = itemgetter(1))
# This part has been copied from the original code
self.nonCycleNodesSet = self.find_non_cyclic_nodes(subgraph)
self.nonCycleEdgesSet = self.find_non_cyclic_edges(subgraph)
numberOfComponents = nx.number_connected_components(subgraph)
self.distanceToActiveFailures = self.count_distance_to_active_failures(subgraph)
if len(subgraph.edges()) > 2: # Graphs must have at least 3 edges to be minimzed
for edge in weightsList:
if self.lead_index is not None:
# Here the radial option is appplied, will check if the remove_edge is connect to
# the hub(lead) compound, if the edge is connected to the lead compound,
# then add it back into the graph.
if self.lead_index not in [edge[0], edge[1]]:
subgraph.remove_edge(edge[0], edge[1])
if self.check_constraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity=edge[2], strict_flag=True)
elif edge[2] < 1.0: # Don't remove edges with similarity 1
logging.info("Trying to remove edge %d-%d with similarity %f" % (edge[0],edge[1],edge[2]))
subgraph.remove_edge(edge[0], edge[1])
if self.check_constraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity=edge[2], strict_flag=True)
else:
logging.info("Removed edge %d-%d" % (edge[0],edge[1]))
else:
logging.info("Skipping edge %d-%d as it has similarity 1" % (edge[0],edge[1]))
def add_surrounding_edges(self):
"""
Add surrounding edges in each subgraph to make sure all nodes are in cycle
"""
for subgraph in self.workingSubgraphsList:
subgraph_nodes = subgraph.nodes()
if self.lead_index in subgraph_nodes:
# here we only consider the subgraph with lead compound
self.nonCycleNodesSet = self.find_non_cyclic_nodes(subgraph)
self.nonCycleEdgesSet = self.find_non_cyclic_edges(subgraph)
for node in self.nonCycleNodesSet:
# for each node in the noncyclenodeset, find the similarity compare to all other surrounding nodes and pick the one with the max score and connect them
node_score_list = []
for i in range(0, self.dbase.nums()):
if i != node and i != self.lead_index:
node_score_list.append(self.dbase.strict_mtx[node, i])
else:
node_score_list.append(0.0)
max_value = max(node_score_list)
if max_value > self.similarityScoresLimit:
max_index = [i for i, x in enumerate(node_score_list) if x == max_value]
max_index_final = max_index[0]
subgraph.add_edge(node, max_index_final,
similarity=self.dbase.strict_mtx[node, max_index_final], strict_flag=True)
return subgraph
def find_non_cyclic_nodes(self, subgraph):
"""
Generates a list of nodes of the subgraph that are not in a cycle
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for not cycle nodes
Returns
-------
missingNodesSet : set of graph nodes
the set of graph nodes that are not in a cycle
"""
missingNodesSet = set()
cycleNodes = []
cycleList = nx.cycle_basis(subgraph)
cycleNodes = [node for cycle in cycleList for node in cycle]
missingNodesSet = set([node for node in subgraph.nodes() if node not in cycleNodes])
return missingNodesSet
def find_non_cyclic_edges(self, subgraph):
"""
Generates a set of edges of the subgraph that are not in a cycle (called
"bridges" in networkX terminology).
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for not cycle nodes
Returns
-------
missingEdgesSet : set of graph edges
the set of edges that are not in a cycle
"""
missingEdgesSet = set(nx.bridges(subgraph))
return missingEdgesSet
def check_constraints(self, subgraph, numComp):
"""
Determine if the given subgraph still meets the constraints
Parameters
----------
subgraph : NetworkX subgraph obj
the subgraph to check for the constraints
numComp : int
the number of connected componets
Returns
-------
constraintsMet : bool
True if all the constraints are met, False otherwise
"""
constraintsMet = True
if not self.remains_connected(subgraph, numComp):
constraintsMet = False
# The requirement to keep a cycle covering is now optional
if constraintsMet and self.requireCycleCovering:
if not self.check_cycle_covering(subgraph):
constraintsMet = False
if constraintsMet:
if not self.check_max_distance(subgraph):
constraintsMet = False
if constraintsMet:
if not self.check_distance_to_active(subgraph):
constraintsMet = False
return constraintsMet
def remains_connected(self, subgraph, numComponents):
"""
Determine if the subgraph remains connected after an edge has been
removed
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletition
numComp : int
the number of connected componets
Returns
-------
isConnected : bool
True if the subgraph is connected, False otherwise
:param numComponents:
"""
isConnected = False
if numComponents == nx.number_connected_components(subgraph):
isConnected = True
else:
logging.info("Rejecting edge deletion on graph connectivity")
return isConnected
def check_cycle_covering(self, subgraph):
"""
Checks if the subgraph has a cycle covering. Note that this has been extended from
the original algorithm: we not only care if the number of acyclic nodes has
increased, but we also care if the number of acyclic edges (bridges) has increased.
Note that if the number of acyclic edges hasn't increased, then the number of
acyclic nodes hasn't either, so that test is included in the edges test.
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletion
Returns
-------
hasCovering : bool
True if the subgraph has a cycle covering, False otherwise
"""
hasCovering = True
# Have we increased the number of non-cyclic edges?
if self.find_non_cyclic_edges(subgraph).difference(self.nonCycleEdgesSet):
hasCovering = False
logging.info("Rejecting edge deletion on cycle covering")
return hasCovering
def check_max_distance(self, subgraph):
"""
Check to see if the graph has paths from all compounds to all other
compounds within the specified limit
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for the max distance between nodes
Returns
-------
withinMaxDistance : bool
True if the subgraph has all the nodes within the specified
max distance
"""
withinMaxDistance = True
for node in subgraph:
eccentricity = nx.eccentricity(subgraph, node)
if eccentricity > self.maxPathLength:
withinMaxDistance = False
logging.info("Rejecting edge deletion on graph diameter for node %d" % (node))
return withinMaxDistance
def count_distance_to_active_failures(self, subgraph):
"""
Count the number of compounds that don't have a minimum-length path to an active
within the specified limit
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for the max distance between nodes
Returns
-------
failures : int
Number of nodes that are not within the max distance to any active node
"""
failures = 0
hasActives=False
for node in subgraph.nodes():
if (subgraph.nodes[node]["active"]):
hasActives=True
if (not hasActives):
return 0 # No actives, so don't bother checking
paths = nx.shortest_path(subgraph)
for node in subgraph.nodes():
if (not subgraph.nodes[node]["active"]):
ok=False
for node2 in subgraph.nodes():
if (subgraph.nodes[node2]["active"]):
pathlen = len(paths[node][node2]) - 1 # No. edges is 1 less than no. nodes
if (pathlen <= self.maxDistFromActive): ok=True
if (not ok):
failures = failures + 1
return failures
def check_distance_to_active(self, subgraph):
"""
Check to see if we have increased the number of distance-to-active failures
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for the max distance between nodes
Returns
-------
ok : bool
True if we have not increased the number of failed nodes
"""
count = self.count_distance_to_active_failures(subgraph)
failed = (count > self.distanceToActiveFailures)
if (failed): logging.info("Rejecting edge deletion on distance-to-actives %d vs %d" % (count,self.distanceToActiveFailures))
logging.info("Checking edge deletion on distance-to-actives %d vs %d" % (count,self.distanceToActiveFailures))
return not failed
def merge_all_subgraphs(self):
"""Generates a single networkx graph object from the subgraphs that have
been processed
Returns
-------
finalGraph : NetworkX graph obj
the final graph produced merging all the subgraphs. The produced
graph may have disconneted parts
"""
finalGraph = nx.Graph()
for subgraph in self.workingSubgraphsList:
finalGraph = nx.union(finalGraph, subgraph)
return finalGraph
def connect_subgraphs(self):
"""
Adds edges to the resultGraph to connect as many components of the final
graph possible
"""
connectSuccess = self.connect_graph_components_brute_force()
while connectSuccess:
connectSuccess = self.connect_graph_components_brute_force()
# WARNING: The self.workingSubgraphsList at this point is different from
# the copy self.resultingSubgraphsList made before
connectSuccess = self.connect_graph_components_brute_force_2()
while connectSuccess:
connectSuccess = self.connect_graph_components_brute_force_2()
def connect_graph_components_brute_force(self):
"""
Adds edges to the resultGraph to connect all components that can be
connected, only one edge is added per component, to form a tree like
structure between the different components of the resultGraph
Returns
-------
bool
True if the addition of edges was possible in strict mode, False otherwise
"""
generator_graph = [self.resultGraph.subgraph(c).copy() for c in nx.connected_components(self.resultGraph)]
self.workingSubgraphsList = [x for x in generator_graph]
if len(self.workingSubgraphsList) == 1:
return False
edgesToCheck = []
edgesToCheckAdditionalInfo = []
numzeros = 0
for i in range(0, len(self.workingSubgraphsList)):
nodesOfI = self.workingSubgraphsList[i].nodes()
for j in range(i + 1, len(self.workingSubgraphsList)):
nodesOfJ = self.workingSubgraphsList[j].nodes()
# change the following lines to be compatible with networkx 2.0
for k in nodesOfI.keys():
for l in nodesOfJ.keys():
# produce an edge from nodesOfI[k] and nodesofJ[l] if nonzero weights push
# this edge into possibleEdgeList """
# print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part this
# does not seems to be true:
similarity = self.dbase.loose_mtx[nodesOfI[k]["ID"], nodesOfJ[l]["ID"]]
if similarity > 0.0:
edgesToCheck.append((nodesOfI[k]["ID"], nodesOfJ[l]["ID"], similarity))
edgesToCheckAdditionalInfo.append((nodesOfI[k]["ID"], nodesOfJ[l]["ID"], similarity, i, j))
else:
numzeros = numzeros + 1
if len(edgesToCheck) > 0:
sortedList = sorted(edgesToCheck, key=itemgetter(2), reverse=True)
sortedListAdditionalInfo = sorted(edgesToCheckAdditionalInfo, key=itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
# self.edgeFile.write("\n" + str(edgeToAdd))
edgeToAddAdditionalInfo = sortedListAdditionalInfo[0]
self.edgesAddedInFirstTreePass.append(edgeToAdd)
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag=False)
generator_graph = [self.resultGraph.subgraph(c).copy() for c in nx.connected_components(self.resultGraph)]
self.workingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def connect_graph_components_brute_force_2(self):
"""
Adds a second edge between each of the (former) components of the
resultGraph to try to provide cycles between (former) components
Returns
-------
bool
True if the addition of edges was possible in loose mode, False otherwise
"""
if len(self.resultingSubgraphsList) == 1:
return False
edgesToCheck = []
for i in range(0, len(self.resultingSubgraphsList)):
nodesOfI = self.resultingSubgraphsList[i].nodes()
for j in range(i + 1, len(self.resultingSubgraphsList)):
nodesOfJ = self.resultingSubgraphsList[j].nodes()
for k in nodesOfI.keys():
for l in nodesOfJ.keys():
# produce an edge from nodesOfI[k] and nodesofJ[l] if
# nonzero weights push this edge into possibleEdgeList """
# print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part
# this does not seems to be true: <<<<<<<<<<<<<DEBUG>>>>>>>>>>>>>>>
similarity = self.dbase.loose_mtx[nodesOfI[k]["ID"], nodesOfJ[l]["ID"]]
if similarity > 0.0:
edgesToCheck.append((nodesOfI[k]["ID"], nodesOfJ[l]["ID"], similarity))
finalEdgesToCheck = [edge for edge in edgesToCheck if edge not in self.edgesAddedInFirstTreePass]
if len(finalEdgesToCheck) > 0:
sortedList = sorted(finalEdgesToCheck, key=itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag=False)
self.copyResultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag=False)
generator_graph = [self.copyResultGraph.subgraph(c).copy() for c in nx.connected_components(self.copyResultGraph)]
self.resultingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def get_graph(self):
"""
Returns the final generated NetworkX graph
"""
return self.resultGraph
def generate_depictions(self):
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0, conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x, conf.GetAtomPosition(i).y, conf.GetAtomPosition(i).z])
for j in range(i + 1, conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x, conf.GetAtomPosition(i).y, conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi - crdj)
if dist > max_dist:
max_dist = dist
return max_dist
directory_name = tempfile.mkdtemp()
temp_graph = self.resultGraph.copy()
if nx.number_of_nodes(temp_graph) <= self.max_images:
# Draw.DrawingOptions.atomLabelFontSize=30
# Draw.DrawingOptions.dotsPerAngstrom=100
for n in temp_graph:
id_mol = temp_graph.nodes[n]['ID']
mol = self.dbase[id_mol].getMolecule()
max_dist = max_dist_mol(mol)
if max_dist < self.max_mol_size:
fname = os.path.join(directory_name, self.dbase[id_mol].getName() + ".png")
# 1, modify here to calculate the 2D structure for ligands cannot remove Hydrogens by rdkit
# 2, change the graph size to get better resolution
try:
mol = AllChem.RemoveHs(mol)
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200, 200), kekulize=False, fitimage=True, imageType='png',
#options=DrawingOptions
)
except:
###### need to ask RDKit to fix this if possible, see the code
# issue tracker for more details######
logging.info(
"Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule" %
self.dbase[id_mol].getName())
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200, 200), kekulize=False, fitimage=True, imageType='png',
#options=DrawingOptions
)
temp_graph.nodes[n]['image'] = fname
# self.resultGraph.nodes[n]['label'] = ''
temp_graph.nodes[n]['labelloc'] = 't'
temp_graph.nodes[n]['penwidth'] = 2.5
# self.resultGraph.node[n]['xlabel'] = self.resultGraph.nodes[n]['ID']
for u, v, d in temp_graph.edges(data=True):
if d['strict_flag'] == True:
temp_graph[u][v]['color'] = 'blue'
temp_graph[u][v]['penwidth'] = 2.5
else:
temp_graph[u][v]['color'] = 'red'
temp_graph[u][v]['penwidth'] = 2.5
if self.edge_labels:
temp_graph[u][v]['label'] = round(d['similarity'],2)
nx.nx_agraph.write_dot(temp_graph, self.dbase.options.name + '_tmp.dot')
cmd = 'dot -Tpng ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.png'
os.system(cmd)
cmd = 'dot -Teps ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.eps'
os.system(cmd)
cmd = 'dot -Tpdf ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.pdf'
os.system(cmd)
os.remove(self.dbase.options.name + '_tmp.dot')
shutil.rmtree(directory_name, ignore_errors=True)
# The function to output the score and connectivity txt file
def layout_info(self):
# pass the lead compound index if the radial option is on and generate the
# morph type of output required by FESetup
if self.lead_index is not None:
morph_txt = open(self.dbase.options.name + "_morph.txt", "w")
morph_data = "morph_pairs = "
with open(self.dbase.options.name + "_score_with_connection.txt", "w") as info_txt:
all_key_id = self.dbase.dic_mapping.keys()
data = ["%-10s,%-10s,%-25s,%-25s,%-15s,%-15s,%-15s,%-10s\n" % (
"Index_1", "Index_2", "Filename_1", "Filename_2", "Str_sim", "Eff_sim", "Loose_sim", "Connect")]
for i in range(len(all_key_id) - 1):
for j in range(i + 1, len(all_key_id)):
morph_string = None
connected = False
similarity=0
try:
edgedata=[d for (u,v,d) in self.resultGraph.edges(data=True) if ((u==i and v==j) or (u==j and v==i))]
similarity = edgedata[0]['similarity']
connected = True
except IndexError:
pass
Filename_i = self.dbase.dic_mapping[i]
Filename_j = self.dbase.dic_mapping[j]
MCmap = self.dbase.get_MCSmap(i,j)
mapString=""
if MCmap is not None:
mapString = MCmap
# print "Check the filename", Filename_i, Filename_j
strict_similarity = self.dbase.strict_mtx[i, j]
loose_similarity = self.dbase.loose_mtx[i, j]
true_strict_similarity = self.dbase.true_strict_mtx[i, j]
if connected:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.5f,%-15.5f,%-15.5f,%-10s,%s\n" % (
i, j, Filename_i, Filename_j, true_strict_similarity, strict_similarity, loose_similarity, "Yes",mapString)
# generate the morph type, and pick the start ligand based on the similarity
if self.lead_index is not None:
morph_i = Filename_i.split(".")[0]
morph_j = Filename_j.split(".")[0]
if i == self.lead_index:
morph_string = "%s > %s, " % (morph_i, morph_j)
elif j == self.lead_index:
morph_string = "%s > %s, " % (morph_j, morph_i)
else:
# compare i and j with the lead compound, and
# pick the one with the higher similarity as the start ligand
similarity_i = self.dbase.strict_mtx[self.lead_index, i]
similarity_j = self.dbase.strict_mtx[self.lead_index, j]
if similarity_i > similarity_j:
morph_string = "%s > %s, " % (morph_i, morph_j)
else:
morph_string = "%s > %s, " % (morph_j, morph_i)
morph_data += morph_string
else:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.5f,%-15.5f,%-15.5f,%-10s,%s\n" % (
i, j, Filename_i, Filename_j, true_strict_similarity, strict_similarity, loose_similarity, "No",mapString)
data.append(new_line)
info_txt.writelines(data)
if self.lead_index is not None:
morph_txt.write(morph_data)
def write_graph(self, output_no_images, output_no_graph):
"""
This function writes to a file the final generated NetworkX graph as
.dot and the .ps files. The mapping between molecule IDs and compounds
name is saved as text file
"""
try:
self.dbase.write_dic()
self.layout_info()
except Exception as e:
traceback.print_exc()
raise IOError("%s: %s.txt" % (str(e), self.dbase.options.name))
try:
if not output_no_images:
self.generate_depictions()
if not output_no_graph:
nx.nx_agraph.write_dot(self.resultGraph, self.dbase.options.name + '.dot')
except Exception as e:
traceback.print_exc()
raise IOError('Problems during the file generation: %s' % str(e))
logging.info(30 * '-')
log = 'The following files have been generated:'
if not output_no_graph:
log += f'\n{self.dbase.options.name}.dot\tGraph file'
if not output_no_images:
log += f'\n{self.dbase.options.name}.png\tPng file'
log += f'\n{self.dbase.options.name}.txt\tMapping Text file'
logging.info(30 * '-')
return
###### Still in developing stage ######
def draw(self):
"""
This function plots the NetworkX graph by using Matplotlib
"""
logging.info('\nDrawing....')
if nx.number_of_nodes(self.resultGraph) > self.max_nodes:
logging.info('The number of generated graph nodes %d exceed the max number of drawable nodes %s' % (
nx.number_of_nodes(self.resultGraph), self.max_nodes))
return
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0, conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x, conf.GetAtomPosition(i).y, conf.GetAtomPosition(i).z])
for j in range(i + 1, conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x, conf.GetAtomPosition(i).y, conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi - crdj)
if dist > max_dist:
max_dist = dist
return max_dist
# Determine the screen resolution by using dxpyinfo and removing massive qt dependency
command = ('xdpyinfo | grep dimensions')
p = subprocess.run(command, stdout=subprocess.PIPE, shell=True, executable='/bin/bash')
width = int(p.stdout.split()[1].split(b'x')[0])
height = int(p.stdout.split()[1].split(b'x')[1])
# Canvas scale factor
scale_canvas = 0.75
# Canvas resolution
max_canvas_size = (int(width * scale_canvas), int(height * scale_canvas))
fig = plt.figure(1, facecolor='white')
fig.set_dpi(100)
fig.set_size_inches(max_canvas_size[0] / fig.get_dpi(), max_canvas_size[1] / fig.get_dpi(), forward=True)
ax = plt.subplot(111)
plt.axis('off')
pos = nx.nx_agraph.graphviz_layout(self.resultGraph, prog="neato")
strict_edges = [(u, v) for (u, v, d) in self.resultGraph.edges(data=True) if d['strict_flag'] == True]
loose_edges = [(u, v) for (u, v, d) in self.resultGraph.edges(data=True) if d['strict_flag'] == False]
node_labels = dict([(u, d['ID']) for u, d in self.resultGraph.nodes(data=True)])
# Draw nodes
nx.draw_networkx_nodes(self.resultGraph, pos, node_size=500, node_color='r')
# Draw node labels
nx.draw_networkx_labels(self.resultGraph, pos, labels=node_labels, font_size=10)
if self.edge_labels:
edge_weight_strict = dict([((u, v,), d['similarity']) for u, v, d in self.resultGraph.edges(data=True) if
d['strict_flag'] == True])
edge_weight_loose = dict([((u, v,), d['similarity']) for u, v, d in self.resultGraph.edges(data=True) if
d['strict_flag'] == False])
for key in edge_weight_strict:
edge_weight_strict[key] = round(edge_weight_strict[key], 2)
for key in edge_weight_loose:
edge_weight_loose[key] = round(edge_weight_loose[key], 2)
# edge strict
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_strict, font_color='g')
# edge loose
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_loose, font_color='r')
# edges strict
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=strict_edges, edge_color='g')
# edges loose
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=loose_edges, edge_color='r')
if nx.number_of_nodes(self.resultGraph) <= self.max_images:
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
cut = 1.0
frame = 10
xmax = cut * max(xx for xx, yy in pos.values()) + frame
ymax = cut * max(yy for xx, yy in pos.values()) + frame
xmin = cut * min(xx for xx, yy in pos.values()) - frame
ymin = cut * min(yy for xx, yy in pos.values()) - frame
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
h = 20
w = 20
mol_size = (200, 200)
for each_node in self.resultGraph:
id_mol = self.resultGraph.nodes[each_node]['ID']
# skip remove Hs by rdkit if Hs cannot be removed
try:
mol = AllChem.RemoveHs(self.dbase[id_mol].getMolecule())
except:
###### need to ask RDKit to fix this if possible, see the code
# issue tracker for more details######
mol = self.dbase[id_mol].getMolecule()
logging.info(
"Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule" %
self.dbase[id_mol].getName())
# max_dist = max_dist_mol(mol)
# if max_dist > 7.0:
# continue
AllChem.Compute2DCoords(mol)
# add try exception for cases cannot be draw
try:
img_mol = Draw.MolToImage(mol, mol_size, kekulize=False)
except Exception as ex:
img_mol = None
logging.exception(
"This mol cannot be draw using the RDKit Draw function, need to check for more details...")
xx, yy = trans(pos[each_node])
xa, ya = trans2((xx, yy))
nodesize_1 = (300.0 / (h * 100))
nodesize_2 = (300.0 / (w * 100))
p2_2 = nodesize_2 / 2
p2_1 = nodesize_1 / 2
a = plt.axes([xa - p2_2, ya - p2_1, nodesize_2, nodesize_1])
# self.resultGraph.nodes[id_mol]['image'] = img_mol
# a.imshow(self.resultGraph.node[each_node]['image'])
a.imshow(img_mol)
a.axis('off')
# plt.savefig('graph.png', facecolor=fig.get_facecolor())
# print 'Graph .png file has been generated...'
plt.show()
return
|
{"hexsha": "0a96cdffdfdbecd376dd8fdb1b0191ae182b8eaa", "size": 45403, "ext": "py", "lang": "Python", "max_stars_repo_path": "lomap/graphgen.py", "max_stars_repo_name": "MobleyLab/Lomap", "max_stars_repo_head_hexsha": "20bf91dced66b15c48f62e0ee1274b58ea1a8be2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2017-05-08T01:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:32:45.000Z", "max_issues_repo_path": "lomap/graphgen.py", "max_issues_repo_name": "MobleyLab/Lomap", "max_issues_repo_head_hexsha": "20bf91dced66b15c48f62e0ee1274b58ea1a8be2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2016-07-12T08:29:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T22:34:06.000Z", "max_forks_repo_path": "lomap/graphgen.py", "max_forks_repo_name": "MobleyLab/Lomap", "max_forks_repo_head_hexsha": "20bf91dced66b15c48f62e0ee1274b58ea1a8be2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-04-15T14:59:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T16:42:49.000Z", "avg_line_length": 38.9391080617, "max_line_length": 171, "alphanum_fraction": 0.5792128273, "include": true, "reason": "import numpy,import networkx", "num_tokens": 9716}
|
! { dg-do compile }
!
! Fixes of "accepts invalid".
! Note that the undeclared parameter 'y' in 't1' was originally in the
! type 't'. It turned out to be convenient to defer the error until the
! type is used in the declaration of 'z'.
!
! Contributed by Janus Weil <janus@gcc.gnu.org>
!
implicit none
type :: t(i,a,x) ! { dg-error "does not|has neither" }
integer, kind :: k ! { dg-error "does not not appear in the type parameter list" }
integer :: i ! { dg-error "has neither the KIND nor LEN attribute" }
integer, kind :: a(3) ! { dg-error "must be a scalar" }
real, kind :: x ! { dg-error "must be INTEGER" }
end type
type :: t1(k,y) ! { dg-error "does not have a component" }
integer, kind :: k
end type
! This is a knock-on from the previous error
type(t1(4,4)) :: z ! { dg-error "Invalid character in name" }
end
|
{"hexsha": "aeec407fb4bea637992d7342011a91faaecd004f", "size": 880, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/pdt_8.f03", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/pdt_8.f03", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/pdt_8.f03", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 35.2, "max_line_length": 88, "alphanum_fraction": 0.625, "num_tokens": 254}
|
from sklearn.metrics import davies_bouldin_score
from glob import glob
import pandas as pd
import numpy as np
def _ravel_and_annotate(df1, df2, df1_class, cat, div, e1, e2, f1_e1, f1_e2):
df = pd.DataFrame({
"x": df1.values.ravel(),
"y": df2.values.ravel(),
"class": df1_class.values.ravel()})
df.dropna(inplace=True)
dbs = davies_bouldin_score(df.iloc[:, :2].values, df["class"])
df["diversity"] = \
f"{cat} ({np.round(div, 2)}), " \
f"e1: {e1} ({np.round(f1_e1, 1)}), " \
f"e2: {e2} ({np.round(f1_e2, 1)}) - " \
f"DBS: {np.round(dbs, 2)}"
df["diversity_e1"], df["diversity_e2"] = f"{e1} - F1={np.round(f1_e1, 1)}", f"{e2} - F1={np.round(f1_e2, 1)}"
df["diversity_score"], df["diversity_dbs"] = np.round(div, 2), np.round(dbs, 2)
return df
def _get_data(base_dir, e1, e2):
df1 = pd.read_csv(glob(base_dir + f"*/y_prob_cv_{e1}.csv")[0], index_col=0)
df1_class = pd.read_csv(glob(base_dir + f"*/y_true_cv_{e1}.csv")[0], index_col=0)
df2 = pd.read_csv(glob(base_dir + f"*/y_prob_cv_{e2}.csv")[0], index_col=0)
return df1, df2, df1_class
def get_crap_combination(base_dir, df_div, medians):
i_crap, c_crap, div_crap = \
sorted([(s, i, df_div.loc[s, i])
for i, s in df_div.idxmin().items()],
key=lambda x: x[2],
reverse=True)[0]
df1_crap, df2_crap, df1_crap_class = _get_data(base_dir, i_crap, c_crap)
return _ravel_and_annotate(df1_crap, df2_crap, df1_crap_class,
cat=f"(a) crap", div=div_crap, e1=i_crap, e2=c_crap,
f1_e1=medians[i_crap], f1_e2=medians[c_crap])
def get_low_combination(base_dir, df_div_sub, medians):
i_low, c_low, div_low = \
sorted([(s, i, df_div_sub.loc[s, i])
for i, s in df_div_sub.idxmin().items()],
key=lambda x: x[2])[0]
df1_low, df2_low, df1_low_class = _get_data(base_dir, i_low, c_low)
return _ravel_and_annotate(df1_low, df2_low, df1_low_class,
cat=f"(b) low", div=div_low, e1=i_low, e2=c_low,
f1_e1=medians[i_low], f1_e2=medians[c_low])
def get_mid_combination(base_dir, df_div_sub, medians):
s = pd.Series([df_div_sub.loc[i, j]
for i, j in [(i, j)
for i in df_div_sub.index
for j in df_div_sub.columns]])
min_limit, max_limit = s.describe(percentiles=[0.3, 0.7])[["30%", "70%"]]
i_mid, c_mid, div_mid = sorted([(i, j, df_div_sub.loc[i, j])
for i, j in [(i, j) for i in df_div_sub.index for j in df_div_sub.columns]
if min_limit < df_div_sub.loc[i, j] < max_limit], key=lambda x: x[2])[0]
df1_mid, df2_mid, df1_mid_class = _get_data(base_dir, i_mid, c_mid)
return _ravel_and_annotate(df1_mid, df2_mid, df1_mid_class,
cat=f"(c) mid", div=div_mid, e1=i_mid, e2=c_mid,
f1_e1=medians[i_mid], f1_e2=medians[c_mid])
def get_high_combination(base_dir, df_div_sub, medians):
i_high, c_high, div_high = \
sorted([(s, i, df_div_sub.loc[s, i])
for i, s in df_div_sub.idxmax().items()],
key=lambda x: x[2])[-1]
df1_high, df2_high, df1_high_class = _get_data(base_dir, i_high, c_high)
return _ravel_and_annotate(df1_high, df2_high, df1_high_class,
cat=f"(d) high", div=div_high, e1=i_high, e2=c_high,
f1_e1=medians[i_high], f1_e2=medians[c_high])
def get_highest_f1_combination(base_dir, i_highest_f1, c_highest_f1, df_div_sub, medians):
div_highest_f1 = df_div_sub.loc[i_highest_f1, c_highest_f1]
df1_highest_f1, df2_highest_f1, df1_highest_f1_class = \
_get_data(base_dir, i_highest_f1, c_highest_f1)
return _ravel_and_annotate(df1_highest_f1, df2_highest_f1, df1_highest_f1_class,
cat=f"(e) highest f1", div=div_highest_f1, e1=i_highest_f1, e2=c_highest_f1,
f1_e1=medians[i_highest_f1], f1_e2=medians[c_highest_f1])
|
{"hexsha": "c8380d63822a25d1c96b06324aadf6b257c0d3ee", "size": 4242, "ext": "py", "lang": "Python", "max_stars_repo_path": "nodes/vis/sds_5_Diversity/scripts/pairwise_diversity.py", "max_stars_repo_name": "spaenigs/peptidereactor", "max_stars_repo_head_hexsha": "17efcb993505934f5b9c2d63f5cc040bb244dde9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-03T12:30:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T07:03:38.000Z", "max_issues_repo_path": "nodes/vis/sds_5_Diversity/scripts/pairwise_diversity.py", "max_issues_repo_name": "spaenigs/peptidereactor", "max_issues_repo_head_hexsha": "17efcb993505934f5b9c2d63f5cc040bb244dde9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-04T14:52:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-04T14:52:27.000Z", "max_forks_repo_path": "nodes/vis/sds_5_Diversity/scripts/pairwise_diversity.py", "max_forks_repo_name": "spaenigs/peptidereactor", "max_forks_repo_head_hexsha": "17efcb993505934f5b9c2d63f5cc040bb244dde9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-09T16:16:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T16:16:16.000Z", "avg_line_length": 48.2045454545, "max_line_length": 113, "alphanum_fraction": 0.5907590759, "include": true, "reason": "import numpy", "num_tokens": 1356}
|
function val=intCotPow(u,n)
%%INTCOTPOWER Evaluate the integral of cot(u)^n du. A definite integral
% can be evaluated, or an indefinite integral (with a
% particular additive constant).
%
%INPUTS: u A 2XN (for definite integral) or a 1XN (for indefinite
% integrals) set of N points. For definite integrals, u(1,:) are
% the real lower bounds and u(2,:) are the real upper bounds. For
% indefinite integrals, the integral is evaluated at the points in
% u. The values in u should be real.
% n The positive integer exponent of the cotangent term.
%
%OUTPUTS: val The 1XN set of values of the integral of cot(u)^n.
%
%This function simply implements the recursion that arises from integration
%by parts from basic calculus, as are given in the tables in the back of
%[1].
%
%REFERENCES:
%[1] J. Stewart, Calculus, 7th ed. Belmont, CA: Brooks/Cole, 2012.
%
%October 2016 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
numDim=size(u,1);
if(isempty(u))
val=[];
return;
end
if(numDim==1)%An indefinite integral
val=indefIntCotPow(u,n);
else%A definite integral
val=indefIntCotPow(u(2,:),n)-indefIntCotPow(u(1,:),n);
end
end
function val=indefIntCotPow(u,n)
if(n==0)
val=u;
return;
elseif(n==1)
val=log(abs(sin(u)));
return;
end
%If here, n>=1
cotVal=cot(u);
val=0;
if(mod(n,2)==0)%If n is even
endVal=2;
else
endVal=3;
end
coeffProd=1;
for k=n:-2:endVal
val=val-coeffProd*(1/(k-1))*cotVal.^(k-1);
coeffProd=-coeffProd;
end
if(endVal==2)
%The final integral is over cot^0
val=val+coeffProd*u;
else
%The final integral is over cot^1
val=val+coeffProd*log(abs(sin(u)));
end
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
{"author": "USNavalResearchLaboratory", "repo": "TrackerComponentLibrary", "sha": "9f6e329de5be06a371757c4b853200beb6def2d0", "save_path": "github-repos/MATLAB/USNavalResearchLaboratory-TrackerComponentLibrary", "path": "github-repos/MATLAB/USNavalResearchLaboratory-TrackerComponentLibrary/TrackerComponentLibrary-9f6e329de5be06a371757c4b853200beb6def2d0/Mathematical_Functions/Specific_Integrals/intCotPow.m"}
|
@testset "$TEST $G" begin
vm = ConstVertexMap(0)
@test typeof(vm) <: AVertexMap
@test get(vm, 1, 1) == 0
@test get(vm, -1, 1) == 0
vm[1] = 1
@test vm[1] == 0
@test haskey(vm, 1)
@test haskey(vm, -1)
@test length(vm) == typemax(Int)
g = G()
vm = VertexMap(g, rand(1:10,10))
@test typeof(vm) <: AVertexMap
@test haskey(vm, 2)
@test !haskey(vm, -1)
@test valtype(vm) == Int
@test get(vm, 1, -100) != -100
@test length(Vector(vm)) == 0
g = G(10,20)
v = rand(10)
vm = VertexMap(g, v)
@test Vector(vm) == v
vm = VertexMap(g, Float64)
@test valtype(vm) == Float64
@test typeof(vm) <: AVertexMap
vm[1] = 2.
@test vm[1] == 2
@test haskey(vm, 1)
@test !haskey(vm, 2)
@test get(vm, -1, -100) == -100
vm2 = deepcopy(vm)
@test vm2 == vm
# @test sprint(show, vm) == "VertexMap: $(vm.data)"
g = G(10,20)
m = VertexMap(g, i -> i^2)
@test typeof(m.data) == Vector{V}
@test length(m.data) == 10
@test length(m) == 10
for i=1:10
@test m[i] == i^2
end
m = VertexMap(g, i -> rand(2))
@test typeof(m.data) == Vector{Vector{Float64}}
@test length(m.data) == length(m) == nv(g)
@test eltype(m) == valtype(m) == eltype(m.data) == Vector{Float64}
end # testset
|
{"hexsha": "578f651687d2c39c844e0182b02d20f7bebe56c6", "size": 1150, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/maps/vertexmap.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_stars_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2017-02-24T15:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T19:59:23.000Z", "max_issues_repo_path": "test/maps/vertexmap.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_issues_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 76, "max_issues_repo_issues_event_min_datetime": "2017-02-23T09:31:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T09:10:31.000Z", "max_forks_repo_path": "test/maps/vertexmap.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Erdos.jl-90d7349d-81aa-5495-813a-883243abfe31", "max_forks_repo_head_hexsha": "2eb248772a05eac35823a07373dd5644913c6dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-03-04T21:05:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:54:44.000Z", "avg_line_length": 20.1754385965, "max_line_length": 67, "alphanum_fraction": 0.5982608696, "num_tokens": 437}
|
import warnings
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
from scipy.interpolate import UnivariateSpline
from astropy import log
from astropy.table import Table
from ..crossspectrum import AveragedCrossspectrum, normalize_crossspectrum
from ..powerspectrum import AveragedPowerspectrum
from ..gti import cross_two_gtis, bin_intervals_from_gtis
__all__ = ["calculate_FAD_correction", "get_periodograms_from_FAD_results"]
def _get_fourier_intv(lc, start_ind, end_ind):
"""Calculate the Fourier transform of a light curve chunk.
Parameters
----------
lc : a :class:`Lightcurve` object
Input light curve
start_ind : int
Start index of the light curve chunk
end_ind : int
End index of the light curve chunk
Returns
-------
freq : array of floats
Frequencies of the Fourier transform
fft : array of complex numbers
The Fourier transform
nph : int
Number of photons in the interval of the light curve
nbins : int
Number of bins in the light curve segment
meancounts : float
The mean counts/bin in the light curve
"""
time = lc.time[start_ind:end_ind]
counts = lc.counts[start_ind:end_ind]
fourier = scipy.fftpack.fft(counts)
freq = scipy.fftpack.fftfreq(len(time), lc.dt)
good = freq > 0
nbins = time.size
return freq[good], fourier[good], np.sum(counts), nbins
def calculate_FAD_correction(lc1, lc2, segment_size, norm="none", gti=None,
plot=False, ax=None, smoothing_alg='gauss',
smoothing_length=None, verbose=False,
tolerance=0.05, strict=False, all_leahy=False,
output_file=None, return_objects=False):
"""Calculate Frequency Amplitude Difference-corrected (cross)power spectra.
Reference: Bachetti \& Huppenkothen, 2018, ApJ, 853L, 21
The two input light curve must be strictly simultaneous, and recorded by
two independent detectors with similar responses, so that the count rates
are similar and dead time is independent.
The method does not apply to different energy channels of the same
instrument, or to the signal observed by two instruments with very
different responses. See the paper for caveats.
Parameters
----------
lc1: class:`stingray.ligthtcurve.Lightcurve`
Light curve from channel 1
lc2: class:`stingray.ligthtcurve.Lightcurve`
Light curve from channel 2. Must be strictly simultaneous to ``lc1``
and have the same binning time. Also, it must be strictly independent,
e.g. from a different detector. There must be no dead time cross-talk
between the two light curves.
segment_size: float
The final Fourier products are averaged over many segments of the
input light curves. This is the length of each segment being averaged.
Note that the light curve must be long enough to have at least 30
segments, as the result gets better as one averages more and more
segments.
norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``
The normalization of the (real part of the) cross spectrum.
Other parameters
----------------
plot : bool, default False
Plot diagnostics: check if the smoothed Fourier difference scatter is
a good approximation of the data scatter.
ax : :class:`matplotlib.axes.axes` object
If not None and ``plot`` is True, use this axis object to produce
the diagnostic plot. Otherwise, create a new figure.
smoothing_alg : {'gauss', ...}
Smoothing algorithm. For now, the only smoothing algorithm allowed is
``gauss``, which applies a Gaussian Filter from `scipy`.
smoothing_length : int, default ``segment_size * 3``
Number of bins to smooth in gaussian window smoothing
verbose: bool, default False
Print out information on the outcome of the algorithm (recommended)
tolerance : float, default 0.05
Accepted relative error on the FAD-corrected Fourier amplitude, to be
used as success diagnostics.
Should be
```
stdtheor = 2 / np.sqrt(n)
std = (average_corrected_fourier_diff / n).std()
np.abs((std - stdtheor) / stdtheor) < tolerance
```
strict : bool, default False
Decide what to do if the condition on tolerance is not met. If True,
raise a ``RuntimeError``. If False, just throw a warning.
all_leahy : **deprecated** bool, default False
Save all spectra in Leahy normalization. Otherwise, leave unnormalized.
output_file : str, default None
Name of an output file (any extension automatically recognized by
Astropy is fine)
Returns
-------
results : class:`astropy.table.Table` object or ``dict`` or ``str``
The content of ``results`` depends on whether ``return_objects`` is
True or False.
If ``return_objects==False``,
``results`` is a `Table` with the following columns:
+ pds1: the corrected PDS of ``lc1``
+ pds2: the corrected PDS of ``lc2``
+ cs: the corrected cospectrum
+ ptot: the corrected PDS of lc1 + lc2
If ``return_objects`` is True, ``results`` is a ``dict``, with keys
named like the columns
listed above but with `AveragePowerspectrum` or
`AverageCrossspectrum` objects instead of arrays.
"""
if smoothing_length is None:
smoothing_length = segment_size * 3
if gti is None:
gti = cross_two_gtis(lc1.gti, lc2.gti)
if all_leahy:
warnings.warn("`all_leahy` is deprecated. Use `norm` instead! " +
" Setting `norm`=`leahy`.", DeprecationWarning)
norm="leahy"
lc1.gti = gti
lc2.gti = gti
lc1.apply_gtis()
lc2.apply_gtis()
summed_lc = lc1 + lc2
start_inds, end_inds = \
bin_intervals_from_gtis(gti, segment_size, lc1.time,
dt=lc1.dt)
freq = 0
# These will be the final averaged periodograms. Initializing with a single
# scalar 0, but the final products will be arrays.
pds1 = 0
pds2 = 0
ptot = 0
cs = 0
n = 0
nph1_tot = nph2_tot = nph_tot = 0
average_diff = average_diff_uncorr = 0
if plot:
if ax is None:
fig, ax = plt.subplots()
for start_ind, end_ind in zip(start_inds, end_inds):
freq, f1, nph1, nbins1 = _get_fourier_intv(lc1, start_ind,
end_ind)
f1_leahy = f1 * np.sqrt(2 / nph1)
freq, f2, nph2, nbins2 = _get_fourier_intv(lc2, start_ind,
end_ind)
f2_leahy = f2 * np.sqrt(2 / nph2)
freq, ftot, nphtot, nbinstot = \
_get_fourier_intv(summed_lc, start_ind, end_ind)
ftot_leahy = ftot * np.sqrt(2 / nphtot)
nph1_tot += nph1
nph2_tot += nph2
nph_tot += nphtot
fourier_diff = f1_leahy - f2_leahy
if smoothing_alg == 'gauss':
smooth_real = gaussian_filter1d(fourier_diff.real ** 2,
smoothing_length)
else:
raise ValueError("Unknown smoothing algorithm: {}".format(
smoothing_alg))
if plot:
ax.scatter(freq, fourier_diff.real, s=1)
p1 = (f1 * f1.conj()).real
p1 = p1 / smooth_real * 2
p2 = (f2 * f2.conj()).real
p2 = p2 / smooth_real * 2
pt = (ftot * ftot.conj()).real
pt = pt / smooth_real * 2
c = (f2 * f1.conj()).real
c = c / smooth_real * 2
power1 = normalize_crossspectrum(p1, segment_size, nbins1, nph1,
nph1, norm=norm)
power2 = normalize_crossspectrum(p2, segment_size, nbins2, nph2,
nph2, norm=norm)
power_tot = normalize_crossspectrum(pt, segment_size, nbinstot, nphtot,
nphtot, norm=norm)
cs_power = normalize_crossspectrum(c, segment_size, nbins1, nph1,
nph2, norm=norm)
if n == 0 and plot:
ax.plot(freq, smooth_real, zorder=10, lw=3)
ax.plot(freq, f1_leahy.real, zorder=5, lw=1)
ax.plot(freq, f2_leahy.real, zorder=5, lw=1)
ptot += power_tot
pds1 += power1
pds2 += power2
cs += cs_power
average_diff += fourier_diff / smooth_real ** 0.5 * np.sqrt(2)
average_diff_uncorr += fourier_diff
n += 1
std = (average_diff / n).std()
stdtheor = 2 / np.sqrt(n)
stduncorr = (average_diff_uncorr / n).std()
is_compliant = np.abs((std - stdtheor) / stdtheor) < tolerance
verbose_string = \
'''
-------- FAD correction ----------
I smoothed over {smoothing_length} power spectral bins
{n} intervals averaged.
The uncorrected standard deviation of the Fourier
differences is {stduncorr} (dead-time affected!)
The final standard deviation of the FAD-corrected
Fourier differences is {std}. For the results to be
acceptable, this should be close to {stdtheor}
to within {tolerance} %.
In this case, the results ARE {compl}complying.
{additional}
----------------------------------
'''.format(smoothing_length=smoothing_length,
n=n,
stduncorr=stduncorr,
std=std,
stdtheor=stdtheor,
tolerance=tolerance * 100,
compl='NOT ' if not is_compliant else '',
additional='Maybe something is not right.' if not is_compliant else '')
if verbose and is_compliant:
log.info(verbose_string)
elif not is_compliant:
warnings.warn(verbose_string)
if strict and not is_compliant:
raise RuntimeError('Results are not compliant, and `strict` mode '
'selected. Exiting.')
results = Table()
print("n: " + str(n))
results['freq'] = freq
results['pds1'] = pds1 / n
results['pds2'] = pds2 / n
results['cs'] = cs / n
results['ptot'] = ptot / n
results['fad'] = average_diff / n
results.meta['fad_delta'] = (std - stdtheor) / stdtheor
results.meta['is_compliant'] = is_compliant
results.meta['n'] = n
results.meta['nph1'] = nph1_tot
results.meta['nph2'] = nph2_tot
results.meta['nph'] = nph_tot
results.meta['norm'] = 'leahy' if all_leahy else 'none'
results.meta['smoothing_length'] = smoothing_length
results.meta['df'] = np.mean(np.diff(freq))
if output_file is not None:
results.write(output_file, overwrite=True)
if return_objects:
result_table = results
results = {}
results['pds1'] = \
get_periodograms_from_FAD_results(result_table, kind='pds1')
results['pds2'] = \
get_periodograms_from_FAD_results(result_table, kind='pds2')
results['cs'] = \
get_periodograms_from_FAD_results(result_table, kind='cs')
results['ptot'] = \
get_periodograms_from_FAD_results(result_table, kind='ptot')
results['fad'] = result_table['fad']
return results
def get_periodograms_from_FAD_results(FAD_results, kind='ptot'):
"""Get Stingray periodograms from FAD results.
Parameters
----------
FAD_results : :class:`astropy.table.Table` object or `str`
Results from `calculate_FAD_correction`, either as a Table or an output
file name
kind : :class:`str`, one of ['ptot', 'pds1', 'pds2', 'cs']
Kind of periodogram to get (E.g., 'ptot' -> PDS from the sum of the two
light curves, 'cs' -> cospectrum, etc.)
Returns
-------
results : `AveragedCrossspectrum` or `Averagedpowerspectrum` object
The periodogram.
"""
if isinstance(FAD_results, str):
FAD_results = Table.read(FAD_results)
if kind.startswith('p') and kind in FAD_results.colnames:
powersp = AveragedPowerspectrum()
powersp.nphot = FAD_results.meta['nph']
if '1' in kind:
powersp.nphots = FAD_results.meta['nph1']
elif '2' in kind:
powersp.nphots = FAD_results.meta['nph2']
elif kind == 'cs':
powersp = AveragedCrossspectrum(power_type='real')
powersp.nphots1 = FAD_results.meta['nph1']
powersp.nphots2 = FAD_results.meta['nph2']
else:
raise ValueError("Unknown periodogram type")
powersp.freq = FAD_results['freq']
powersp.power = FAD_results[kind]
powersp.power_err = np.zeros_like(powersp.power)
powersp.m = FAD_results.meta['n']
powersp.df = FAD_results.meta['df']
powersp.n = len(powersp.freq)
powersp.norm = FAD_results.meta['norm']
return powersp
|
{"hexsha": "162dcab3605ff57bb719e60a8d38e0295ed5f2bd", "size": 13024, "ext": "py", "lang": "Python", "max_stars_repo_path": "stingray/deadtime/fad.py", "max_stars_repo_name": "jdswinbank/stingray", "max_stars_repo_head_hexsha": "1a96e9fb78d1129feb7f28a4c49bd4bac25bdba6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stingray/deadtime/fad.py", "max_issues_repo_name": "jdswinbank/stingray", "max_issues_repo_head_hexsha": "1a96e9fb78d1129feb7f28a4c49bd4bac25bdba6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-24T21:21:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-24T21:21:54.000Z", "max_forks_repo_path": "stingray/deadtime/fad.py", "max_forks_repo_name": "jdswinbank/stingray", "max_forks_repo_head_hexsha": "1a96e9fb78d1129feb7f28a4c49bd4bac25bdba6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8787878788, "max_line_length": 86, "alphanum_fraction": 0.6161701474, "include": true, "reason": "import numpy,import scipy,from scipy,from astropy", "num_tokens": 3333}
|
import numpy as np
from sklearn.preprocessing import LabelBinarizer
class GaussianNB(object):
"""
朴素贝叶斯分类器,适用于连续型数据。
"""
@staticmethod
def gaussfunc(x, mu, singma):
"""高斯函数
:param x: 数据集
:param mu: 均值
:param singma: 方差
:return:
"""
sqsingma = singma @ singma
numerator = -np.exp(np.sum((x - mu) ** 2, axis=1) / (2 * sqsingma))
return numerator / np.sqrt(2 * np.pi * sqsingma)
def fit(self, X, y):
"""
:param X_: shape = [n_samples, n_features]
:param y: shape = [n_samples]
:return: self
"""
self.classes, self.classes_count = np.unique(y, return_counts=True)
self.mean = np.zeros((self.classes_count.shape[0],
X.shape[1]), dtype=np.float64)
self.var = np.zeros((self.classes_count.shape[0],
X.shape[1]), dtype=np.float64)
for i, label in enumerate(self.classes):
x_i = X[y == label]
self.mean[i, :] = np.mean(x_i, axis=0)
self.var[i, :] = np.var(x_i, axis=0)
return self
def predict(self, X):
"""
:param X: shape = [n_samples, n_features]
:return: shape = [n_samples]
"""
likelihood = []
for i in range(self.classes.shape[0]):
likelihood.append(self.classes_count[i] *
GaussianNB.gaussfunc(X, self.mean[i, :],
self.var[i, :]))
likelihood = np.array(likelihood).T
return np.argmax(likelihood, axis=1)
class MultinomialNB(object):
"""
朴素贝叶斯分类器,适用于离散型数据。
"""
def __init__(self, alpha=1.0):
self.alpha = alpha
def fit(self, X, y):
"""
:param X_: shape = [n_samples, n_features]
:param y: shape = [n_samples]
:return: self
"""
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes = labelbin.classes_
self.class_count = np.zeros(Y.shape[1], dtype=np.float64)
self.feature_count = np.zeros((Y.shape[1], X.shape[1]),
dtype=np.float64)
self.feature_count += Y.T @ X
self.class_count += Y.sum(axis=0)
smoothed_fc = self.feature_count + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def predict(self, X):
"""
:param X: shape = [n_samples, n_features]
:return: shape = [n_samples]
"""
likelihood = X @ self.feature_log_prob.T
return np.argmax(likelihood, axis=1)
|
{"hexsha": "ebec2d5979871f692178f522264b4f6542c093ee", "size": 2781, "ext": "py", "lang": "Python", "max_stars_repo_path": "plume/naive_bayes.py", "max_stars_repo_name": "KEVINYZY/plume", "max_stars_repo_head_hexsha": "ae7b688d0d7b5f3dd5eeb975a302e8a5524d4255", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-03-06T18:01:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-09T16:17:02.000Z", "max_issues_repo_path": "plume/naive_bayes.py", "max_issues_repo_name": "liuslnlp/plume", "max_issues_repo_head_hexsha": "dbd523861bfb9abad8a52b1de28de85c0f128807", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plume/naive_bayes.py", "max_forks_repo_name": "liuslnlp/plume", "max_forks_repo_head_hexsha": "dbd523861bfb9abad8a52b1de28de85c0f128807", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-08-12T10:24:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-27T03:15:30.000Z", "avg_line_length": 30.9, "max_line_length": 75, "alphanum_fraction": 0.5195972672, "include": true, "reason": "import numpy", "num_tokens": 727}
|
[STATEMENT]
lemma store_instr_privilege:
assumes a1: "s' = snd (fst (store_instr instr
(s::(('a::len) sparc_state))))
\<and> (((get_S (cpu_reg_val PSR s)))::word1) = 0"
shows "(((get_S (cpu_reg_val PSR
(s'::(('a::len) sparc_state)))))::word1) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. get_S (cpu_reg_val PSR s') = 0
[PROOF STEP]
using a1
[PROOF STATE]
proof (prove)
using this:
s' = snd (fst (store_instr instr s)) \<and> get_S (cpu_reg_val PSR s) = 0
goal (1 subgoal):
1. get_S (cpu_reg_val PSR s') = 0
[PROOF STEP]
apply (simp add: store_instr_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s' = snd (fst ((let instr_name = fst instr; op_list = snd instr in do psr_val \<leftarrow> gets (cpu_reg_val PSR);
if (instr_name = load_store_type STA \<or> instr_name = load_store_type STDA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STBA) \<and> get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else if instr_name \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (op_list ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if instr_name = load_store_type STA \<or> instr_name = load_store_type STBA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STDA then get_operand_w5 (op_list ! 4) else get_operand_w5 (op_list ! 3)) (get_S psr_val)
od) s)) \<and> get_S (cpu_reg_val PSR s) = 0 \<Longrightarrow> get_S (cpu_reg_val PSR (snd (fst ((let instr_name = fst instr; op_list = snd instr in do psr_val \<leftarrow> gets (cpu_reg_val PSR);
if (instr_name = load_store_type STA \<or> instr_name = load_store_type STDA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STBA) \<and> get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else if instr_name \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (op_list ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if instr_name = load_store_type STA \<or> instr_name = load_store_type STBA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STDA then get_operand_w5 (op_list ! 4) else get_operand_w5 (op_list ! 3)) (get_S psr_val)
od) s)))) = 0
[PROOF STEP]
apply (simp add: simpler_gets_def bind_def h1_def h2_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s' = snd (fst ((let instr_name = fst instr; op_list = snd instr in (\<lambda>s. (if (instr_name = load_store_type STA \<or> instr_name = load_store_type STDA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STBA) \<and> get_S (cpu_reg_val PSR s) = 0 then do raise_trap privileged_instruction;
return ()
od else if instr_name \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (op_list ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if instr_name = load_store_type STA \<or> instr_name = load_store_type STBA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STDA then get_operand_w5 (op_list ! 4) else get_operand_w5 (op_list ! 3)) (get_S (cpu_reg_val PSR s))) s)) s)) \<and> get_S (cpu_reg_val PSR s) = 0 \<Longrightarrow> get_S (cpu_reg_val PSR (snd (fst ((let instr_name = fst instr; op_list = snd instr in (\<lambda>s. (if (instr_name = load_store_type STA \<or> instr_name = load_store_type STDA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STBA) \<and> get_S (cpu_reg_val PSR s) = 0 then do raise_trap privileged_instruction;
return ()
od else if instr_name \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (op_list ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if instr_name = load_store_type STA \<or> instr_name = load_store_type STBA \<or> instr_name = load_store_type STHA \<or> instr_name = load_store_type STDA then get_operand_w5 (op_list ! 4) else get_operand_w5 (op_list ! 3)) (get_S (cpu_reg_val PSR s))) s)) s)))) = 0
[PROOF STEP]
apply (simp add: Let_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s' = snd (fst ((if (fst instr = load_store_type STA \<or> fst instr = load_store_type STDA \<or> fst instr = load_store_type STHA \<or> fst instr = load_store_type STBA) \<and> get_S (cpu_reg_val PSR s) = 0 then do raise_trap privileged_instruction;
return ()
od else if fst instr \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (snd instr ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if fst instr = load_store_type STA \<or> fst instr = load_store_type STBA \<or> fst instr = load_store_type STHA \<or> fst instr = load_store_type STDA then get_operand_w5 (snd instr ! 4) else get_operand_w5 (snd instr ! 3)) (get_S (cpu_reg_val PSR s))) s)) \<and> get_S (cpu_reg_val PSR s) = 0 \<Longrightarrow> (fst instr = load_store_type STA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STDA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STHA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STBA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr \<noteq> load_store_type STA \<and> fst instr \<noteq> load_store_type STDA \<and> fst instr \<noteq> load_store_type STHA \<and> fst instr \<noteq> load_store_type STBA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (store_sub1 instr (get_operand_w5 (snd instr ! 3)) 0 s)))) = 0)
[PROOF STEP]
using raise_trap_privilege store_sub1_privilege
[PROOF STATE]
proof (prove)
using this:
get_S (cpu_reg_val PSR ?s) = 0 \<and> ?s' = snd (fst (raise_trap ?t ?s)) \<Longrightarrow> get_S (cpu_reg_val PSR ?s') = 0
?s' = snd (fst (store_sub1 ?instr ?rd ?s_val ?s)) \<and> get_S (cpu_reg_val PSR ?s) = 0 \<Longrightarrow> get_S (cpu_reg_val PSR ?s') = 0
goal (1 subgoal):
1. s' = snd (fst ((if (fst instr = load_store_type STA \<or> fst instr = load_store_type STDA \<or> fst instr = load_store_type STHA \<or> fst instr = load_store_type STBA) \<and> get_S (cpu_reg_val PSR s) = 0 then do raise_trap privileged_instruction;
return ()
od else if fst instr \<in> {load_store_type STA, load_store_type STDA, load_store_type STHA, load_store_type STBA} \<and> get_operand_flag (snd instr ! 0) = 1 then do raise_trap illegal_instruction;
return ()
od else store_sub1 instr (if fst instr = load_store_type STA \<or> fst instr = load_store_type STBA \<or> fst instr = load_store_type STHA \<or> fst instr = load_store_type STDA then get_operand_w5 (snd instr ! 4) else get_operand_w5 (snd instr ! 3)) (get_S (cpu_reg_val PSR s))) s)) \<and> get_S (cpu_reg_val PSR s) = 0 \<Longrightarrow> (fst instr = load_store_type STA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STDA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STHA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr = load_store_type STBA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (raise_trap privileged_instruction s)))) = 0) \<and> (fst instr \<noteq> load_store_type STA \<and> fst instr \<noteq> load_store_type STDA \<and> fst instr \<noteq> load_store_type STHA \<and> fst instr \<noteq> load_store_type STBA \<longrightarrow> get_S (cpu_reg_val PSR (snd (fst (store_sub1 instr (get_operand_w5 (snd instr ! 3)) 0 s)))) = 0)
[PROOF STEP]
by blast
|
{"llama_tokens": 3299, "file": "SPARCv8_SparcModel_MMU_Sparc_Properties", "length": 6}
|
C @(#)newptibs.f 20.2 3/29/99
C****************************************************************
C
C File: newptibs.f
C
C Purpose: Routine to create a mew, unique bus name
C
c Return code: n = 0 : Success
c n = 1 : Error - cannot create a unique name
c n = 2 : Error - more thatn MAXBUS entities
c
C Author: Walt Powell Date: 14 Jan 1999
C Called by: ldptibus.f
C
C****************************************************************
integer function newptibs (busname, basekv, kx, indx)
integer kx, indx
character *(*) busname
real basekv
include 'ipfinc/parametr.inc'
include 'ipfinc/prt.inc'
include 'ipfinc/pti_data.inc'
include 'ipfinc/blank.inc'
include 'ipfinc/alt_case.inc'
include 'ipfinc/alpha.inc'
include 'ipfinc/bus.inc'
include 'ipfinc/cbus.inc'
include 'ipfinc/area.inc'
include 'ipfinc/wsccbase.inc'
logical finished
integer add_bus
newptibs = 0
finished = .false.
itag = 48 ! Begin with "0,1,2, ..."
do while (.not. finished)
write (busname, 10040) busname(1:6), '#', char(itag)
10040 format (a, a, a)
indx = add_bus (busname, basekv, kx)
if (indx .gt. 0) then
finished = .true.
else if (indx .eq. 0) then
finished = .true.
newptibs = 2
else if (itag .eq. 57) then
tag = 65 ! Change to "A,B,C ..."
else if (itag .ge. 90) then
finished = .true.
newptibs = 1
else
itag = itag + 1
endif
enddo
return
end
|
{"hexsha": "3fb0af42f59f7c2e541837c4aa25da36c225e928", "size": 1812, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/newptibs.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/newptibs.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/newptibs.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 29.7049180328, "max_line_length": 67, "alphanum_fraction": 0.460816777, "num_tokens": 514}
|
! =============================================================================
! Test netCDF time series
!
! This unit test checks to write multiple time steps.
! =============================================================================
program test_netcdf_time_series_2
use unit_test
use netcdf_writer
implicit none
integer, parameter :: nx = 5, ny = 10, nt = 3
integer :: ix, iy, ncid, dimids(3), t
integer :: var_id = -1, cnt(3), start(3)
double precision :: dset(ny, nx)
logical :: passed
do ix = 1, nx
do iy = 1, ny
dset(iy, ix) = iy + (ix-1) * ny
enddo
enddo
call create_netcdf_file(ncfname='nctest.nc', &
overwrite=.true., &
ncid=ncid)
call define_netcdf_dimension(ncid, "x", nx, dimids(1))
call define_netcdf_dimension(ncid, "y", ny, dimids(2))
call define_netcdf_dimension(ncid, "t", NF90_UNLIMITED, dimids(3))
call define_netcdf_dataset(ncid, 'x_velocity', '', '', 'm/s', NF90_DOUBLE, dimids, var_id)
call close_definition(ncid)
do t = 1, nt
cnt = (/ nx, ny, 1 /)
start = (/ 1, 1, t /)
call open_netcdf_file(ncfname='nctest.nc', &
access_flag=NF90_WRITE, &
ncid=ncid)
call write_netcdf_dataset(ncid, var_id, dset, start, cnt)
call close_netcdf_file(ncid)
dset = 1.0d0 + dset
enddo
passed = (ncerr == 0)
call delete_netcdf_file(ncfname='nctest.nc')
call print_result_logical('Test netCDF write time series 2', passed)
end program test_netcdf_time_series_2
|
{"hexsha": "93c6fb232eb8ea4ed3f4b6c42e908d533bb2f1cb", "size": 1740, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "unit-tests/netcdf/test_netcdf_time_series_2.f90", "max_stars_repo_name": "matt-frey/epic", "max_stars_repo_head_hexsha": "954ebc44f2c041eee98bd14e22a85540a0c6c4bb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-11-11T10:50:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T00:11:41.000Z", "max_issues_repo_path": "unit-tests/netcdf/test_netcdf_time_series_2.f90", "max_issues_repo_name": "matt-frey/epic", "max_issues_repo_head_hexsha": "954ebc44f2c041eee98bd14e22a85540a0c6c4bb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2021-11-12T17:09:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:50:58.000Z", "max_forks_repo_path": "unit-tests/netcdf/test_netcdf_time_series_2.f90", "max_forks_repo_name": "matt-frey/epic", "max_forks_repo_head_hexsha": "954ebc44f2c041eee98bd14e22a85540a0c6c4bb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4915254237, "max_line_length": 94, "alphanum_fraction": 0.5040229885, "num_tokens": 468}
|
# -*- coding: utf-8 -*-
"""
Created on 10/25/2021
@author: maxcurie
"""
import pandas as pd
import numpy as np
import csv
import time
import random
import concurrent.futures as future #for CPU parallelization
import sys
sys.path.insert(1, './../Tools')
from DispersionRelationDeterminantFullConductivityZeff import Dispersion
#**********Start of user block***************
path='.'
Output_csv=path+'/0MTM_scan.csv'
nu_list=np.arange(1.,10,1.)
zeff_list=np.arange(1,2.5,0.5)
eta_list=np.arange(0.5,5,0.8)
shat_list=np.arange(0.02,0.1,0.02)
beta_list=np.arange(0.0005,0.003,0.0005)
ky_list=np.arange(0.01,0.1,0.01)
mu_list=np.arange(0,2,0.3)
xstar=10.
ModIndex=1 #global dispersion
#**********end of user block****************
def Dispersion_calc(para_list):
[nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv]=para_list
w0=Dispersion(nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar)
omega=np.real(w0)
gamma=np.imag(w0)
with open(Output_csv, 'a+', newline='') as csvfile: #adding a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow([ omega, gamma,nu,zeff,eta,shat,beta,ky,\
ModIndex,mu,xstar ])
csvfile.close()
return w0
if __name__ == '__main__':
with open(Output_csv, 'w', newline='') as csvfile: #clear all and then write a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow(['omega_omega_n','gamma_omega_n',\
'nu','zeff','eta','shat','beta','ky',\
'ModIndex','mu','xstar'])
csvfile.close()
para_list=[]
for nu in nu_list:
for zeff in zeff_list:
for eta in eta_list:
for shat in shat_list:
for beta in beta_list:
for ky in ky_list:
for mu in mu_list:
para_list.append([nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv])
print(para_list[0])
random.shuffle(para_list)
print(para_list[0])
print('***********************************')
print('*********paraellel calcuation******')
start=time.time()
with future.ProcessPoolExecutor() as executor:
results = executor.map(Dispersion_calc, para_list)
for result0 in results:
print( 'omega='+str(result0) )
end=time.time()
print(f"Runtime of the program is {end - start} s")
|
{"hexsha": "4988be8eafcc9f1eea84b1b23d9ebec5d2a20217", "size": 2413, "ext": "py", "lang": "Python", "max_stars_repo_path": "SLiM_NN/0MTMDispersion_list_Calc_parallel.py", "max_stars_repo_name": "maxcurie1996/SLiM", "max_stars_repo_head_hexsha": "bdb480f7e73ce9d9b3ff58e4b0245c514e2c64ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SLiM_NN/0MTMDispersion_list_Calc_parallel.py", "max_issues_repo_name": "maxcurie1996/SLiM", "max_issues_repo_head_hexsha": "bdb480f7e73ce9d9b3ff58e4b0245c514e2c64ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SLiM_NN/0MTMDispersion_list_Calc_parallel.py", "max_forks_repo_name": "maxcurie1996/SLiM", "max_forks_repo_head_hexsha": "bdb480f7e73ce9d9b3ff58e4b0245c514e2c64ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1625, "max_line_length": 105, "alphanum_fraction": 0.6021549938, "include": true, "reason": "import numpy", "num_tokens": 676}
|
import os
import numpy as np
import matplotlib as mpl
mpl.rcParams['axes.formatter.useoffset'] = False
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.collections as collections
import matplotlib.lines as mpll
import matplotlib.colors as mplc
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import atm
def emaps(planet, eigeny, outdir, proj='ortho'):
ncurves, ny = eigeny.shape
if proj == 'ortho':
extent = (-90, 90, -90, 90)
fname = 'emaps-ecl.png'
elif proj == 'rect':
extent = (-180, 180, -90, 90)
fname = 'emaps-rect.png'
elif proj == 'moll':
extent = (-180, 180, -90, 90)
fname = 'emaps-moll.png'
lmax = np.int(ny**0.5 - 1)
ncols = np.int(np.sqrt(ncurves) // 1)
nrows = np.int(ncurves // ncols + (ncurves % ncols != 0))
npane = ncols * nrows
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, squeeze=False,
sharex=True, sharey=True)
for j in range(ncurves):
planet.map[1:,:] = 0
xloc = j % ncols
yloc = j // ncols
ax = axes[yloc, xloc]
yi = 1
for l in range(1, lmax + 1):
for m in range(-l, l + 1):
planet.map[l, m] = eigeny[j, yi]
yi += 1
ax.imshow(planet.map.render(theta=0, projection=proj).eval(),
origin="lower",
cmap="plasma",
extent=extent)
# Axes are wrong for non-rectangular projections
if proj == 'ortho' or proj == 'moll':
ax.axis('off')
# Empty subplots
for j in range(ncurves, npane):
xloc = j % ncols
yloc = j // ncols
ax = axes[yloc, xloc]
ax.axis('off')
fig.tight_layout()
plt.savefig(os.path.join(outdir, fname))
plt.close(fig)
def lightcurves(t, lcs, outdir):
nharm, nt = lcs.shape
l = 1
m = -1
pos = True
fig, ax = plt.subplots(1, figsize=(8,5))
for i in range(nharm):
plt.plot(t, lcs[i], label=r"${}Y_{{{}{}}}$".format(["-", "+"][pos],
l, m))
if pos:
pos = False
else:
pos = True
if l == m:
l += 1
m = -l
else:
m += 1
plt.ylabel('Normalized Flux')
plt.xlabel('Time (days)')
plt.legend(ncol=l, fontsize=6)
fig.tight_layout()
plt.savefig(os.path.join(outdir, 'lightcurves.png'))
plt.close(fig)
def eigencurves(t, lcs, outdir, ncurves=None):
if type(ncurves) == type(None):
ncurves = lcs.shape[0]
fig, ax = plt.subplots(1, figsize=(8,5))
for i in range(ncurves):
plt.plot(t, lcs[i], label="E-curve {}".format(i+1))
plt.ylabel('Normalized Flux')
plt.xlabel('Time (days)')
plt.legend(fontsize=6)
fig.tight_layout()
plt.savefig(os.path.join(outdir, 'eigencurves.png'))
plt.close(fig)
def ecurvepower(evalues, outdir):
ncurves = len(evalues)
num = np.arange(1, ncurves + 1)
fig, axes = plt.subplots(nrows=2)
axes[0].plot(num, evalues / np.sum(evalues), 'ob')
axes[0].set_xlabel('E-curve Number')
axes[0].set_ylabel('Normalized Power')
axes[1].semilogy(num, evalues / np.sum(evalues), 'ob')
axes[1].set_xlabel('E-curve Number')
axes[1].set_ylabel('Normalized Power')
fig.tight_layout()
plt.savefig(os.path.join(outdir, 'ecurvepower.png'))
plt.close(fig)
def pltmaps(fit, proj='rect'):
nmaps = len(fit.wlmid)
ncols = np.int(np.sqrt(nmaps) // 1)
nrows = nmaps // ncols + (nmaps % ncols != 0)
xsize = 7. / 3. * ncols
if proj == 'rect':
ysize = 7. / 3. / 2. * nrows
elif proj == 'ortho':
ysize = 7. / 3. * nrows
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, squeeze=False)
naxes = nrows * ncols
extra = nmaps % ncols
vmax = np.max([np.max(m.tmap[~np.isnan(m.tmap)]) for m in fit.maps])
vmin = np.min([np.min(m.tmap[~np.isnan(m.tmap)]) for m in fit.maps])
if proj == 'rect':
extent = (-180, 180, -90, 90)
elif proj == 'ortho':
extent = (-90, 90, -90, 90)
# The weird placement of the subplots in this figure is a long-
# standing known bug in matplotlib with no straightforward
# solution. Probably not worth fixing here. See
# https://github.com/matplotlib/matplotlib/issues/5463
for i in range(naxes):
irow = i // ncols
icol = i % ncols
ax = axes[irow,icol]
if i >= nmaps:
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False,
left=False, right=False)
continue
im = ax.imshow(fit.maps[i].tmap, origin='lower', cmap='plasma',
extent=extent, vmin=vmin, vmax=vmax)
ax.set_title('{:.2f} um'.format(fit.wlmid[i]))
if icol == 0:
ax.set_ylabel(r'Latitude ($^\circ$)')
if i >= naxes - ncols - (ncols - extra):
ax.set_xlabel(r'Longitude ($^\circ$)')
fig.tight_layout()
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.15, 0.03, 0.75])
fig.colorbar(im, cax=cax, label='Temperature (K)')
plt.savefig(os.path.join(fit.cfg.outdir,
'bestfit-{}-maps.png'.format(proj)))
plt.close(fig)
def tmap_unc(fit, proj='rect'):
nmaps = len(fit.wlmid)
ncols = np.int(np.sqrt(nmaps) // 1)
nrows = nmaps // ncols + (nmaps % ncols != 0)
xsize = 7. / 3. * ncols
if proj == 'rect':
ysize = 7. / 3. / 2. * nrows
elif proj == 'ortho':
ysize = 7. / 3. * nrows
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, squeeze=False)
naxes = nrows * ncols
extra = nmaps % ncols
vmax = np.max([np.max(m.tmapunc[~np.isnan(m.tmap)]) for m in fit.maps])
vmin = np.min([np.min(m.tmapunc[~np.isnan(m.tmap)]) for m in fit.maps])
if proj == 'rect':
extent = (-180, 180, -90, 90)
elif proj == 'ortho':
extent = (-90, 90, -90, 90)
# The weird placement of the subplots in this figure is a long-
# standing known bug in matplotlib with no straightforward
# solution. Probably not worth fixing here. See
# https://github.com/matplotlib/matplotlib/issues/5463
for i in range(naxes):
irow = i // ncols
icol = i % ncols
ax = axes[irow,icol]
if i >= nmaps:
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False,
left=False, right=False)
continue
im = ax.imshow(fit.maps[i].tmapunc, origin='lower', cmap='plasma',
extent=extent, vmin=vmin, vmax=vmax)
ax.set_title('{:.2f} um'.format(fit.wlmid[i]))
if icol == 0:
ax.set_ylabel(r'Latitude ($^\circ$)')
if i >= naxes - ncols - (ncols - extra):
ax.set_xlabel(r'Longitude ($^\circ$)')
fig.tight_layout()
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.15, 0.03, 0.75])
fig.colorbar(im, cax=cax, label='Temperature Uncertainty (K)')
plt.savefig(os.path.join(fit.cfg.outdir,
'bestfit-{}-maps-unc.png'.format(proj)))
plt.close(fig)
def bestfit(fit):
t = fit.t
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
nfilt = len(fit.wlmid)
nt = len(t)
hratios = np.zeros(nfilt+1)
hratios[0] = 0.5
hratios[1:] = 0.5 / nfilt
gridspec_kw = {'height_ratios':hratios}
fig, axes = plt.subplots(nrows=nfilt+1, ncols=1, sharex=True,
gridspec_kw=gridspec_kw, figsize=(8,10))
nt = len(t)
for i in range(nfilt):
axes[0].plot(t, fit.maps[i].bestfit, zorder=2, color=colors[i],
label='{:.2f} um'.format(fit.wlmid[i]))
axes[0].scatter(t, fit.flux[i], s=0.1, zorder=1, color=colors[i])
axes[0].legend()
axes[0].set_ylabel(r'($F_s + F_p$)/$F_s$')
for i in range(nfilt):
axes[i+1].scatter(t, fit.flux[i] - fit.maps[i].bestfit, s=0.1,
color=colors[i])
axes[i+1].set_ylabel('Residuals')
axes[i+1].axhline(0, 0, 1, color='black', linestyle='--')
if i == nfilt-1:
axes[i+1].set_xlabel('Time (days)')
fig.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'bestfit-lcs.png'))
plt.close(fig)
def ecurveweights(fit):
nwl = len(fit.wlmid)
maxweight = -np.inf
minweight = np.inf
maxcurves = np.max([m.ncurves for m in fit.maps])
if nwl == 1:
shifts = [0]
else:
shifts = np.linspace(-0.2, 0.2, num=nwl, endpoint=True)
fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True)
for i in range(nwl):
ncurves = fit.maps[i].ncurves
npar = ncurves + 2
weights = fit.maps[i].bestp[:ncurves]
uncs = fit.maps[i].stdp[:ncurves]
axes[0].errorbar(np.arange(ncurves) + shifts[i] + 1,
weights, uncs, fmt='o',
label="{:.2f} um".format(fit.wlmid[i]))
axes[0].set_ylabel("E-curve weight")
maxweight = np.max((maxweight, np.max(weights)))
minweight = np.min((minweight, np.min(weights)))
axes[1].scatter(np.arange(ncurves) + shifts[i] + 1,
np.abs(weights / uncs))
axes[1].set_ylabel("E-curve Significance")
axes[1].set_xlabel("E-curve number")
axes[1].set_yscale('log')
yrange = maxweight - minweight
axes[0].set_ylim((minweight - 0.1 * yrange,
maxweight + 0.1 * yrange))
axes[0].legend()
xlim = axes[1].get_xlim()
axes[1].hlines(3, 0, nwl*maxcurves+1, linestyles='--',
label=r'3$\sigma$')
axes[1].set_xlim(xlim)
axes[1].legend()
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'ecurveweight.png'))
plt.close(fig)
def hshist(fit):
'''
Makes a plot of hotspot location posterior distribution
'''
nmaps = len(fit.maps)
fig, axes = plt.subplots(nrows=2, ncols=nmaps, sharey='row',
squeeze=False)
for i in range(nmaps):
# Latitude
ax = axes[0][i]
ax.hist(fit.maps[i].hslocpost[0], bins=20)
ax.set_xlabel('Latitude (deg)')
ylim = ax.get_ylim()
ax.vlines(fit.maps[i].hslocbest[0], ylim[0], ylim[1], color='red')
ax.set_ylim(ylim)
if i == 0:
ax.set_ylabel('Samples')
# Longitude
ax = axes[1][i]
ax.hist(fit.maps[i].hslocpost[1], bins=20)
ax.set_xlabel('Longitude (deg)')
ylim = ax.get_ylim()
ax.vlines(fit.maps[i].hslocbest[1], ylim[0], ylim[1], color='red')
ax.set_ylim(ylim)
if i == 0:
ax.set_ylabel('Samples')
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'hotspot-hist.png'))
plt.close(fig)
def bestfitlcsspec(fit):
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
nfilt, nt = fit.specbestmodel.shape
hratios = np.zeros(nfilt+1)
hratios[0] = 0.5
hratios[1:] = 0.5 / nfilt
gridspec_kw = {'height_ratios':hratios}
fig, axes = plt.subplots(nrows=nfilt+1, ncols=1, sharex=True,
gridspec_kw=gridspec_kw, figsize=(8,10))
for i in range(nfilt):
axes[0].scatter(fit.t, fit.flux[i], s=0.1, zorder=1,
color=colors[i])
axes[0].plot(fit.t, fit.specbestmodel[i],
label='{:.2f} um'.format(fit.wlmid[i]), zorder=2,
color=colors[i])
axes[0].legend()
axes[0].set_ylabel(r'($F_s + F_p$)/$F_s$')
for i in range(nfilt):
axes[i+1].scatter(fit.t, fit.flux[i] - fit.specbestmodel[i], s=0.1,
color=colors[i])
axes[i+1].set_ylabel('Residuals')
axes[i+1].axhline(0, 0, 1, color='black', linestyle='--')
if i == nfilt-1:
axes[i+1].set_xlabel('Time (days)')
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'bestfit-lcs-spec.png'))
plt.close(fig)
def bestfittgrid(fit):
fig, ax = plt.subplots(figsize=(6,8))
# Match colors to light curves
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Line colors from colormap
cmap = mpl.cm.get_cmap('hsv')
nmaps = len(fit.maps)
# Latitude index
ieq = fit.cfg.twod.nlat // 2
cfnorm_lines = np.nanmax(fit.cf)
cfnorm_dots = np.nanmax(np.sum(fit.cf, axis=2))
for i in range(fit.cfg.twod.nlat):
for j in range(fit.cfg.twod.nlon):
lat = fit.lat[i,j]
lon = fit.lon[i,j]
if i == ieq:
label = "Lat: {:.1f}, Lon: {:.1f}".format(lat, lon)
ic = lon / 360.
if ic < 0.0:
ic += 1.0
color = cmap(ic)
zorder = 2
else:
label = None
color = 'gray'
zorder = 1
if ((lon + fit.dlon < fit.minvislon) or
(lon - fit.dlon > fit.maxvislon)):
linestyle = '--'
else:
linestyle = '-'
points = np.array([fit.besttgrid[:,i,j], fit.p]).T.reshape(-1,1,2)
segments = np.concatenate([points[:-1], points[1:]],
axis=1)
norm = plt.Normalize(0, 1)
lc = collections.LineCollection(segments,
cmap=gradient_cmap(color),
norm=norm, zorder=zorder)
lc.set_array(np.max(fit.cf[i,j,:-1], axis=1) / cfnorm_lines)
line = ax.add_collection(lc)
if linestyle != '--':
for k in range(nmaps):
alpha = np.sum(fit.cf[i,j,:,k]) / cfnorm_dots
alpha = np.round(alpha, 2)
ax.scatter(fit.tmaps[k,i,j], fit.pmaps[k,i,j],
c=colors[k], marker='o', zorder=3, s=1,
alpha=alpha)
# Build custom legend
legend_elements = []
for i in range(nmaps):
label = str(np.round(fit.wlmid[i], 2)) + ' um'
legend_elements.append(mpll.Line2D([0], [0], color='w',
label=label,
marker='o',
markerfacecolor=colors[i],
markersize=4))
ax.set_yscale('log')
ax.invert_yaxis()
ax.legend(handles=legend_elements, loc='best')
ax.set_xlabel("Temperature (K)")
ax.set_ylabel("Pressure (bars)")
plt.tight_layout()
cax = inset_axes(plt.gca(), width='5%', height='25%',
loc='lower right')
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(vmin=0, vmax=360))
cbar = plt.colorbar(sm, cax=cax, label=r'Longitude ($^\circ$)')
cbar.set_ticks(np.linspace(0, 360, 5, endpoint=True))
cax.yaxis.set_ticks_position('left')
cax.yaxis.set_label_position('left')
plt.savefig(os.path.join(fit.cfg.outdir, 'bestfit-tp.png'))
plt.close(fig)
def visanimation(fit, fps=60, step=10):
fig = plt.figure()
ims = []
Writer = animation.writers['pillow']
writer = Writer(fps=fps)
plt.xlabel('Longitude (deg)')
plt.ylabel('Latitude (deg)')
plt.yticks(np.linspace( -90, 90, 13, endpoint=True))
plt.xticks(np.linspace(-180, 180, 13, endpoint=True))
nt = len(fit.t)
for i in range(0, nt, step):
im = plt.imshow(fit.vis[i], animated=True,
vmax=np.max(fit.vis), vmin=np.min(fit.vis),
extent=(-180, 180, -90, 90))
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50,
blit=True, repeat_delay=1000)
ani.save(os.path.join(fit.cfg.outdir, 'vis.gif'), dpi=300, writer=writer)
plt.close(fig)
def fluxmapanimation(fit, fps=60, step=10):
nmaps = len(fit.wlmid)
ncols = np.min((nmaps, 3))
nrows = nmaps // ncols + (nmaps % ncols != 0)
xsize = 7. / 3. * ncols
ysize = 7. / 3. / 2. * nrows
figsize = (xsize, ysize)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, squeeze=False, figsize=figsize)
vmax = np.max(fit.fmaps[~np.isnan(fit.fmaps)])
vmin = np.min(fit.fmaps[~np.isnan(fit.fmaps)])
extent = (-180, 180, -90, 90)
all_ims = []
Writer = animation.writers['pillow']
writer = Writer(fps=fps)
for j in range(0, len(fit.t), step):
frame_ims = []
for i in range(nmaps):
irow = i // ncols
icol = i % ncols
ax = axes[irow,icol]
im = ax.imshow(fit.fmaps[i]*fit.vis[j],
origin='lower', cmap='plasma',
extent=extent,
vmin=vmin, vmax=vmax)
#plt.colorbar(im, ax=ax)
ax.set_title('{:.2f} um'.format(fit.wlmid[i]))
frame_ims.append(im)
all_ims.append(frame_ims)
ani = animation.ArtistAnimation(fig, all_ims, interval=50,
blit=True, repeat_delay=1000)
ani.save(os.path.join(fit.cfg.outdir, 'fmaps.gif'), dpi=300, writer=writer)
plt.close(fig)
def tau(fit, ilat=None, ilon=None):
fig, ax = plt.subplots()
cfg = fit.cfg
if type(ilat) == type(None):
ilat = cfg.twod.nlat // 2
if type(ilon) == type(None):
ilon = cfg.twod.nlon // 2
nlat, nlon = fit.taugrid.shape
npress, nwn = fit.taugrid[0,0].shape
wn = fit.modelwngrid
wl = 10000 / fit.modelwngrid
p = fit.p
logp = np.log10(p)
maxlogp = np.max(logp)
minlogp = np.min(logp)
logwl = np.log10(wl)
maxlogwl = np.max(logwl)
minlogwl = np.min(logwl)
tau = fit.taugrid[ilat,ilon]
plt.imshow(np.flip(np.exp(-tau)), aspect='auto',
extent=(minlogwl, maxlogwl, maxlogp, minlogp),
cmap='magma')
yticks = plt.yticks()[0]
plt.yticks(yticks, [r"$10^{{{:.0f}}}$".format(y) for y in yticks])
plt.ylim((maxlogp, minlogp))
xticks = plt.xticks()[0]
plt.xticks(xticks, np.round(10.**xticks, 2))
plt.xlim((minlogwl, maxlogwl))
plt.xlabel('Wavelength (um)')
plt.ylabel('Pressure (bars)')
nfilt = len(fit.filtwl)
ax = plt.gca()
transform = mpl.transforms.blended_transform_factory(
ax.transData, ax.transAxes)
# Note: assumes all filters are normalized to 1, and plots them
# in the top tenth of the image.
for i in range(nfilt):
plt.plot(np.log10(fit.filtwl[i]), 1.0 - fit.filttrans[i]/10.,
transform=transform, label='{:.2f} um'.format(fit.wlmid[i]),
linestyle='--')
leg = plt.legend(frameon=False, ncol=4, fontsize=8)
for text in leg.get_texts():
text.set_color("white")
plt.colorbar(label=r'$e^{-\tau}$')
plt.savefig(os.path.join(fit.cfg.outdir, 'transmission.png'))
plt.close(fig)
def pmaps3d(fit, animate=False):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
nmaps = fit.pmaps.shape[0]
tmax = np.nanmax(fit.tmaps)
tmin = np.nanmin(fit.tmaps)
def init():
for i in range(nmaps):
cm = mpl.cm.coolwarm((fit.tmaps[i] - tmin)/(tmax - tmin))
ax.plot_surface(fit.lat, fit.lon, np.log10(fit.pmaps[i]),
facecolors=cm, linewidth=3, shade=False)
ax.plot_wireframe(fit.lat, fit.lon,
np.log10(fit.pmaps[i]), linewidth=0.5,
color=colors[i])
ax.invert_zaxis()
ax.set_xlabel('Latitude (deg)')
ax.set_ylabel('Longitude (deg)')
ax.set_zlabel('log(p) (bars)')
plt.tight_layout()
return fig,
init()
plt.savefig(os.path.join(fit.cfg.outdir, 'pmaps.png'))
plt.close(fig)
if not animate:
return
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
nframes = 80
Writer = animation.writers['pillow']
writer = Writer(fps=15)
base_azim = 45.0
base_elev = 15.0
azim_vary = np.concatenate((np.linspace(0., 45., nframes // 4),
np.linspace(45., 0., nframes // 4),
np.zeros(nframes // 2)))
azim = base_azim + azim_vary
elev_vary = np.concatenate((np.zeros(nframes // 2),
np.linspace(0., 30., nframes // 4),
np.linspace(30., 0., nframes // 4)))
elev = base_elev + elev_vary
def animate(i):
ax.view_init(elev=elev[i], azim=azim[i])
return fig,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=nframes, interval=20, blit=True)
anim.save(os.path.join(fit.cfg.outdir, 'pmaps3d.gif'), dpi=300,
writer=writer)
plt.close(fig)
def tgrid_unc(fit):
'''
Plots the temperature profiles of the atmosphere at various
important locations, with uncertainties.
'''
ncols = 2
nrows = 2
fig, axes = plt.subplots(ncols=ncols, nrows=nrows, sharex=True,
sharey=True)
mcmcout = np.load(fit.cfg.outdir + '/3dmcmc.npz')
niter, npar = fit.posterior3d.shape
nlev, nlat, nlon = fit.besttgrid.shape
# Limit calculations if large number of samples
ncalc = np.min((5000, niter))
tgridpost = np.zeros((ncalc, nlev, nlat, nlon))
for i in range(ncalc):
ipost = i * niter // ncalc
pmaps = atm.pmaps(fit.posterior3d[ipost], fit)
tgridpost[i], p = atm.tgrid(nlev, nlat, nlon, fit.tmaps,
pmaps, fit.cfg.threed.pbot,
fit.cfg.threed.ptop,
fit.posterior3d[ipost],
fit.nparams3d, fit.modeltype3d,
interptype=fit.cfg.threed.interp,
oob=fit.cfg.threed.oob,
smooth=fit.cfg.threed.smooth)
# Collapse to 1D for easier indexing
lat = np.unique(fit.lat)
lon = np.unique(fit.lon)
for i in range(ncols*nrows):
irow = i // nrows
icol = i % ncols
ax = axes[irow, icol]
# Hotspot
if i == 0:
# Average over all maps
hslatavg = np.mean([a.hslocbest[0] for a in fit.maps])
hslonavg = np.mean([a.hslocbest[1] for a in fit.maps])
ilat = np.abs(lat - hslatavg).argmin()
ilon = np.abs(lon - hslonavg).argmin()
title = 'Hotspot'
# Substellar point
if i == 1:
ilat = np.abs(lat - 0.0).argmin()
ilon = np.abs(lon - 0.0).argmin()
title = 'Substellar'
# West terminator
if i == 2:
ilat = np.abs(lat - 0.0).argmin()
ilon = np.abs(lon + 90.0).argmin()
title = 'West Terminator'
# East terminator
if i == 3:
ilat = np.abs(lat - 0.0).argmin()
ilon = np.abs(lon - 90.0).argmin()
title = 'East Terminator'
tdist = tgridpost[:,:,ilat,ilon]
l1 = np.percentile(tdist, 15.87, axis=0)
l2 = np.percentile(tdist, 2.28, axis=0)
h1 = np.percentile(tdist, 84.13, axis=0)
h2 = np.percentile(tdist, 97.72, axis=0)
bf = fit.besttgrid[:,ilat,ilon]
ax.fill_betweenx(fit.p, l2, h2, facecolor='royalblue')
ax.fill_betweenx(fit.p, l1, h1, facecolor='cornflowerblue')
ax.semilogy(bf, fit.p, label='Best Fit', color='black')
if irow == 1:
ax.set_xlabel('Temperature (K)')
if icol == 0:
ax.set_ylabel('Pressure (bars)')
if i == 0:
plt.gca().invert_yaxis()
subtitle = r'$\theta={}, \phi={}$'.format(lat[ilat], lon[ilon])
ax.set_title(title + '\n' + subtitle)
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'tgrid_unc.png'))
plt.close(fig)
def tmapunc(fit):
nmaps = len(fit.maps)
ncols = np.int(np.sqrt(nmaps) // 1)
nrows = np.int((nmaps // ncols) + (nmaps % ncols != 0))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True)
fig.set_size_inches(2*ncols, 2*nrows)
for i in range(nmaps):
irow = i // ncols
icol = i % ncols
ax = axes[irow, icol]
map = fit.maps[i]
npost, nlat, nlon = map.tmappost.shape
ilat = nlat // 2
for j in range(npost):
ax.plot(fit.lon[ilat], map.tmappost[j,ilat], color='gray',
alpha=0.01)
ax.errorbar(fit.lon[ilat], map.tmap[ilat], map.tmapunc[ilat])
#plt.autoscale(False)
#ax.vlines((fit.minvislon, fit.maxvislon), 0.0, 5000.,
# color='red')
#plt.autoscale(True)
ax.set_title(r"{:.2f} $\mu$m".format(map.wlmid))
if irow == nrows:
ax.set_xlabel("Longitude (deg)")
if icol == 0:
ax.set_ylabel("Temperature (K)")
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'tmapunc.png'))
plt.close(fig)
def cf_by_location(fit):
nlat, nlon, nlev, nfilt = fit.cf.shape
fig, axes = plt.subplots(nrows=nlat, ncols=nlon, sharey=True, sharex=True)
fig.set_size_inches(16, 8)
# Place labels on a single large axes object
bigax = fig.add_subplot(111, frameon=False)
bigax.spines['top'].set_color('none')
bigax.spines['bottom'].set_color('none')
bigax.spines['left'].set_color('none')
bigax.spines['right'].set_color('none')
bigax.tick_params(labelcolor='w', top=False, bottom=False,
left=False, right=False)
bigax.set_ylabel('Pressure (bars)', labelpad=20)
bigax.set_xlabel('Contribution (arbitrary)', labelpad=10)
cmap = mpl.cm.get_cmap('rainbow')
for i in range(nlat):
for j in range(nlon):
ax = axes[i,j]
for k in range(nfilt):
color = cmap(k / nfilt)
label = os.path.split(fit.cfg.twod.filtfiles[k])[1]
ax.semilogy(fit.cf[i,j,:,k], fit.p, color=color,
label=label)
if i == nlat - 1:
ax.set_xlabel(r'{}$^\circ$'.format(np.round(fit.lon[i,j], 2)))
if j == 0:
ax.set_ylabel(r'{}$^\circ$'.format(np.round(fit.lat[i,j], 2)))
if i == nlat -1 and j == nlon - 1:
ax.invert_yaxis()
ax.set_xticklabels([])
ax.tick_params(axis='y', labelsize=6)
# Since we share y axes, this inverts them all
#plt.gca().invert_yaxis()
#plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'cf.png'))
plt.close(fig)
def cf_by_filter(fit):
nlat, nlon, nlev, nfilt = fit.cf.shape
ncols = np.int(np.sqrt(nfilt) // 1)
nrows = np.int((nfilt // ncols) + (nfilt % ncols != 0))
naxes = nrows * ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True)
fig.set_size_inches(8, 8)
extra = nfilt % ncols
ieq = nlat // 2
for i in range(naxes):
irow = i // ncols
icol = i % ncols
ax = axes[irow, icol]
# Hide extra axes and move on
if i >= nfilt:
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False,
left=False, right=False)
continue
cmap = mpl.cm.get_cmap('hsv')
for j in range(nlat):
for k in range(nlon):
if j == ieq:
ic = fit.lon[j,k] / 360.
if ic < 0:
ic += 1
color = cmap(ic)
label = r"${} ^\circ$".format(np.round(fit.lon[j,k], 2))
zorder = 1
else:
color = 'gray'
label = None
zorder = 0
ax.semilogy(fit.cf[j,k,:,i], fit.p, color=color,
label=label, zorder=zorder)
if icol == 0:
ax.set_ylabel('Pressure (bars)')
if i >= naxes - ncols - (ncols - extra):
ax.set_xlabel('Contribution (arbitrary)')
ax.set_title("{} um".format(np.round(fit.wlmid[i], 2)))
plt.gca().invert_yaxis()
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, 'cf-by-filter.png'))
plt.close(fig)
def cf_slice(fit, ilat=None, ilon=None, fname=None):
if ilat is not None and ilon is not None:
print("Must specify either ilat or ilon, not both.")
return
nlat, nlon, nlev, nfilt = fit.cf.shape
logp = np.log10(fit.p)
minlogp = np.min(logp)
maxlogp = np.max(logp)
# Default behavior is slice along the equator
if ilat is None and ilon is None:
latslice = nlat // 2
lonslice = np.arange(nlon)
xmin = -180.
xmax = 180.
xlabel = 'Longitude (deg)'
elif ilat is None and ilon is not None:
latslice = np.arange(nlat)
lonslice = ilon
xmin = -90.
xmax = 90.
xlabel = 'Latitude (deg)'
elif ilat is not None and ilon is None:
latslice = ilat
lonslice = np.arange(nlon)
xmin = -180.
xmax = 180.
xlabel = 'Longitude (deg)'
if fname is None:
fname = 'cf-slice.png'
gridspec_kw = {}
gridspec_kw['width_ratios'] = np.concatenate((np.ones(nfilt), [0.1]))
fig, axes = plt.subplots(ncols=nfilt + 1, gridspec_kw=gridspec_kw)
fig.set_size_inches(3*nfilt+1, 5)
vmin = np.nanmin(fit.cf[latslice, lonslice])
vmax = np.nanmax(fit.cf[latslice, lonslice])
extent = (xmin, xmax, maxlogp, minlogp)
for i in range(nfilt):
ax = axes[i]
im = ax.imshow(fit.cf[latslice, lonslice,:,i].T, vmin=vmin,
vmax=vmax, origin='lower', extent=extent,
aspect='auto')
if ilon is None:
ax.plot(fit.lon[latslice],
np.log10(fit.pmaps[i,latslice,lonslice]), color='red')
else:
ax.plot(fit.lat[:,lonslice],
np.log10(fit.pmaps[i,latslic,lonslice]), color='red')
if i == 0:
ax.set_ylabel('Log(p) (bars)')
ax.set_xlabel(xlabel)
fig.colorbar(im, cax=axes[-1], label='Contribution')
plt.tight_layout()
plt.savefig(os.path.join(fit.cfg.outdir, fname))
plt.close(fig)
# Function adapted from https://towardsdatascience.com/beautiful-custom-colormaps-with-matplotlib-5bab3d1f0e72
def gradient_cmap(color):
'''
Utility function to make colormaps which are a
gradient from white to the specified color.
'''
rgb_color = mplc.to_rgb(color)
dec_color = np.array(rgb_color) #/ 256
white = [1., 1., 1.]
dec_colors = [white, dec_color]
cdict = {}
# Just two colors
loclist = [0,1]
for num, col in enumerate(['red', 'green', 'blue']):
col_list = [[loclist[i], dec_colors[i][num], dec_colors[i][num]] \
for i in range(2)]
cdict[col] = col_list
cmap = mplc.LinearSegmentedColormap(color, segmentdata=cdict, N=256)
return cmap
|
{"hexsha": "a6fde66bfcadb7ddda978b60dd5828be90d7de88", "size": 32861, "ext": "py", "lang": "Python", "max_stars_repo_path": "theresa/lib/plots.py", "max_stars_repo_name": "rychallener/theresa", "max_stars_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-16T19:37:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T20:16:05.000Z", "max_issues_repo_path": "theresa/lib/plots.py", "max_issues_repo_name": "rychallener/theresa", "max_issues_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theresa/lib/plots.py", "max_forks_repo_name": "rychallener/theresa", "max_forks_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6275264678, "max_line_length": 110, "alphanum_fraction": 0.5277076169, "include": true, "reason": "import numpy", "num_tokens": 9265}
|
from BurstCube.NoahSim import burstutils
import numpy as np
def test_length():
x = [0,1,0]
testmag = burstutils.length(x)
assert (np.abs(testmag - 1) < 1e-7)
def test_angle():
#used to find one separation
x = [1,0,0]
y = [0,1,0]
testang = burstutils.angle(x,y)
assert (np.abs(testang - np.pi/2) < 1e-7)
"""
def test_chiresponse():
testAs = burstutils.chiresponse(np.array([np.pi/4,7*np.pi/4]))
np.testing.assert_allclose(testAs,(0.768438,0),1e-3)
"""
def test_response():
#These are array intakes, the mask inside burstutils.response takes care of the separation being too far stuff.
Atest= np.array([0.7853981633974483])
xtest = np.array([0.76])
testR = burstutils.response(Atest,xtest)
assert (np.abs(testR- 0.768438) < 1e-3)
def test_lookupA():
#Testing just one of the lookup tables, they were built in parallel anyways so this should suffice for all.
fakenorm = np.array([0,0,1])
fakesource = np.array([0,0,1])
testx = burstutils.look_up_A(fakenorm,fakesource,array=False) #testx because it returns x normally, the exponent cosine is multiplied to that matches the response of the scint.
assert (np.abs(testx - .76) < 1e-7) #currently all set as .76, gonna have to refine all of these later on if MEGAlib says otherwise.
|
{"hexsha": "347dd6f16bd47bf14b71adfd89cc3c22761c7708", "size": 1270, "ext": "py", "lang": "Python", "max_stars_repo_path": "BurstCube/tests/test_utils.py", "max_stars_repo_name": "nkasmanoff/Simulation", "max_stars_repo_head_hexsha": "38d47db79cebe8504a03424c564f2207ae2275ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BurstCube/tests/test_utils.py", "max_issues_repo_name": "nkasmanoff/Simulation", "max_issues_repo_head_hexsha": "38d47db79cebe8504a03424c564f2207ae2275ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BurstCube/tests/test_utils.py", "max_forks_repo_name": "nkasmanoff/Simulation", "max_forks_repo_head_hexsha": "38d47db79cebe8504a03424c564f2207ae2275ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0212765957, "max_line_length": 179, "alphanum_fraction": 0.7118110236, "include": true, "reason": "import numpy", "num_tokens": 391}
|
/*
* This file is part of the Geneva library collection.
*
* See the NOTICE file in the top-level directory of the Geneva library
* collection for a list of contributors and copyright information.
*
* The following license applies to the code IN THIS FILE:
*
* ***************************************************************************
*
* Boost Software License - Version 1.0 - August 17th, 2003
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare derivative works of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* ***************************************************************************
*
* NOTE THAT THE BOOST-LICENSE DOES NOT APPLY TO ANY OTHER FILES OF THE
* GENEVA LIBRARY, UNLESS THIS IS EXPLICITLY STATED IN THE CORRESPONDING FILE!
*/
#pragma once
// Global checks, defines and includes needed for all of Geneva
#include "common/GGlobalDefines.hpp"
// Standard header files go here
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <exception>
#include <vector>
#include <memory>
#include <tuple>
#include <mutex>
#include <cstdlib>
#include <cmath>
#include <chrono>
#include <iomanip>
#include <time.h>
#include <ctime>
#include <cstdio>
// Boost header files go here
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/lexical_cast.hpp>
// Geneva header files go here
#include "common/GSingletonT.hpp"
#include "common/GCommonEnums.hpp"
#include "common/GTupleIO.hpp"
#include "common/GExceptions.hpp"
namespace Gem {
namespace Common {
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* This class defines the interface of log targets, i.e. targets for the logging of
* messages through the GLogStreamer class. Essentially all that is needed is
* the log function. Pointers to this class are stored in the GLogStreamer. They
* point to objects of the GConsoleLogger or GFileLogger classes, or other log targets
* defined by the user.
*/
class GBaseLogTarget {
public:
/*************************************************************************/
// Defaulted or deleted constructors, destructor and assignment operators
G_API_COMMON GBaseLogTarget() = default;
G_API_COMMON GBaseLogTarget(GBaseLogTarget const&) = default;
G_API_COMMON GBaseLogTarget(GBaseLogTarget &&) = default;
virtual G_API_COMMON ~GBaseLogTarget() BASE = default;
G_API_COMMON GBaseLogTarget& operator=(GBaseLogTarget const&) = default;
G_API_COMMON GBaseLogTarget& operator=(GBaseLogTarget &&) = default;
/*************************************************************************/
/** @brief The logging interface */
virtual G_API_COMMON void log(std::string const&) const BASE = 0;
/** @brief Adds an extension to the output */
virtual G_API_COMMON void logWithSource(std::string const&, std::string const&) const BASE = 0;
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* The console logger writes log messages to the console.
*/
class GConsoleLogger : public GBaseLogTarget {
public:
/*************************************************************************/
// Defaulted or deleted constructors, destructor and assignment operators
// rule of five
G_API_COMMON GConsoleLogger() = default;
G_API_COMMON GConsoleLogger(GConsoleLogger const&) = delete;
G_API_COMMON GConsoleLogger(GConsoleLogger &&) = default;
G_API_COMMON ~GConsoleLogger() override = default;
G_API_COMMON GConsoleLogger& operator=(GConsoleLogger const&) = delete;
G_API_COMMON GConsoleLogger& operator=(GConsoleLogger &&) = default;
/*************************************************************************/
/** @brief Implements the logging to the console */
G_API_COMMON void log(std::string const&) const override;
/** @brief Adds a specifier to the output */
G_API_COMMON void logWithSource(
std::string const&, std::string const&
) const override;
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* The file logger writes log messages to a file.
*/
class GFileLogger : public GBaseLogTarget {
public:
/** @brief This constructor accepts a boost path to a file name as argument */
explicit G_API_COMMON GFileLogger(boost::filesystem::path const&);
/*************************************************************************/
// Defaulted or deleted constructors, destructor and assignment operators
// rule of five
G_API_COMMON GFileLogger() = default;
G_API_COMMON GFileLogger(GFileLogger const&) = delete;
G_API_COMMON GFileLogger(GFileLogger &&) = default;
G_API_COMMON ~GFileLogger() override = default;
G_API_COMMON GFileLogger& operator=(GFileLogger const&) = delete;
G_API_COMMON GFileLogger& operator=(GFileLogger &&) = default;
/*************************************************************************/
/** @brief Implements logging to a file on disk */
G_API_COMMON void log(std::string const&) const override;
/** @brief Adds an extension to the output file */
G_API_COMMON void logWithSource(
std::string const&, std::string const&
) const override;
private:
std::string m_fname = "Geneva-Library-Collection.log"; ///< The name of the log file
mutable bool m_first = true; ///< Indicates whether any logging has already been done
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* This class serves as the front end of the logging infrastructure. An object of
* this type is accessible through a singleton to all entities in the program.
* Upon invocation of the streaming operator it produces an object which is supposed
* to handle the rest of the work, either using the log targets stored in the
* GLogger object or letting manipulators output the work.
*/
template<class S> // "S" means "streamer"
class GLogger
{
public:
/***************************************************************************/
// Defaulted or deleted constructors / destructor and assignment operators
// Rule of five
GLogger() = default;
GLogger(GLogger<S> const&) = delete;
GLogger(GLogger<S> &&) noexcept(false) = default;
~GLogger() = default;
GLogger<S>& operator=(GLogger<S> const&) = delete;
GLogger<S>& operator=(GLogger<S> &&) noexcept(false) = default;
/***************************************************************************/
/**
* This function will forward all arguments to a newly created object
* of type S. Note that the function returns the S object by value. It
* will not survive beyond the end of the stream-chain.
*/
template<typename T>
S operator<<(T const& t) {
S s;
s << t;
return s;
}
/******************************************************************************/
/**
* Needed for ostringstream
*/
S operator<<(std::ostream &( *val )(std::ostream &)) {
S s;
s << val;
return s;
}
/******************************************************************************/
/**
* Needed for ostringstream
*/
S operator<<(std::ios &( *val )(std::ios &)) {
S s;
s << val;
return s;
}
/******************************************************************************/
/**
* Needed for ostringstream
*/
S operator<<(std::ios_base &( *val )(std::ios_base &)) {
S s;
s << val;
return s;
}
/***************************************************************************/
/**
* This function instructs the logger architecture to emit additional
* specifications for the data being logged. When writing to the console,
* a corresponding text will be emitted. When writing to a file, the
* modifier will be appended with an underscore to the filename.
*/
S operator()(std::string const& extension) {
S s(extension);
return s;
}
/***************************************************************************/
/**
* This function instructs the logger architecture to emit data to the file
* specified by the boost::path object
*/
S operator()(boost::filesystem::path p) {
S s(p);
return s;
}
/***************************************************************************/
/**
* Allows to set the default log target
*/
void setDefaultLogTarget(std::shared_ptr <GBaseLogTarget> gblt) {
if (gblt) {
m_default_logger = gblt;
} else {
raiseException(
"In GLogger::setDefaultLogTarget(): Error!" << std::endl
<< "Tried to register empty default logger" << std::endl
);
}
}
/***************************************************************************/
/**
* Adds a log target, such as console or file
*/
void addLogTarget(std::shared_ptr <GBaseLogTarget> gblt) {
if (gblt) {
m_log_cnt.push_back(gblt);
} else {
raiseException(
"In GLogger::addLogTarget(): Error!" << std::endl
<< "Tried to register empty logger" << std::endl
);
}
}
/***************************************************************************/
/**
* Checks whether any log targets are present
*/
bool hasLogTargets() const {
return not m_log_cnt.empty();
}
/***************************************************************************/
/**
* Clears local log-targets
*/
void resetLogTargets() {
m_log_cnt.clear();
}
/***************************************************************************/
/**
* Allows S-objects to submit strings to the log targets. Note that this
* function is thread-safe and thus may be called from different threads.
* Note that this function throws if no logging targets have been registered.
*/
void log(std::string const& message) const {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
if (not m_log_cnt.empty()) {
// Do the actual logging
for(auto const& cit: m_log_cnt) {
cit->log(message);
}
} else {
if (m_default_logger) {
m_default_logger->log(message);
} else {
raiseException(
"In GLogger::log(): Error!" << std::endl
<< "No loggers found" << std::endl
);
}
}
}
/***************************************************************************/
/**
* Allows S-objects to submit strings to the log targets. Note that this
* function is thread-safe and thus may be called from different threads.
* Note that this function throws if no logging targets have been registered.
*/
void logWithSource(std::string const& message, std::string const& extension) const {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
if (not m_log_cnt.empty()) {
// Do the actual logging
for(auto cit: m_log_cnt) { // std::shared_ptr max be copied
cit->logWithSource(message, extension);
}
} else {
if (m_default_logger) {
m_default_logger->logWithSource(message, extension);
} else {
raiseException(
"In GLogger::logWithSource(): Error!" << std::endl
<< "No loggers found" << std::endl
);
}
}
}
/***************************************************************************/
/**
* Throws an exception from a global position. This prevents exceptions thrown
* from within threads from getting lost.
*/
void throwException(std::string const& error) {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
throw(gemfony_exception(error));
}
/***************************************************************************/
/**
* Initiates the termination sequence
*/
void terminateApplication(std::string const& error) {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
std::cerr << error;
std::terminate();
}
/***************************************************************************/
/**
* Output to stdout
*/
void toStdOut(std::string const& message) {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
std::cout << message;
}
/***************************************************************************/
/**
* Output to stderr
*/
void toStdErr(std::string const& message) {
// Make sure only one entity outputs data
std::unique_lock<std::mutex> lk(m_logger_mutex);
std::cerr << message;
}
private:
/***************************************************************************/
std::vector<std::shared_ptr<GBaseLogTarget>> m_log_cnt; ///< Contains the log targets
mutable std::mutex m_logger_mutex; ///< Needed for concurrent access to the log targets
std::shared_ptr<GBaseLogTarget> m_default_logger = std::make_shared<GConsoleLogger>(); ///< The default log target
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* Objects of this class need to be added as the last element of a logging or
* exception chain, possibly wrapped into a macro giving it information about
* the file and lines from which it has been called.
*/
class GManipulator {
public:
/** @brief A constructor that stores the logging type only */
explicit G_API_COMMON GManipulator(logType);
/** @brief A constructor that stores both accompanying information and the logging type */
G_API_COMMON GManipulator(
std::string const&, logType
);
/*************************************************************************/
// Deleted and defaulted constructors, destructor and assignment operators.
// Rule of five ...
G_API_COMMON GManipulator() = delete; ///< Intentionally deleted -- enforce specific log types
G_API_COMMON GManipulator(GManipulator const&) = default;
G_API_COMMON GManipulator(GManipulator &&) noexcept = default;
G_API_COMMON GManipulator& operator=(GManipulator const&) = default;
G_API_COMMON GManipulator& operator=(GManipulator &&) = default;
/*************************************************************************/
/** @brief Retrieves the stored logging type */
G_API_COMMON logType getLogType() const;
/** @brief Retrieves stored accompanying information (if any) */
G_API_COMMON std::string getAccompInfo() const;
/** @brief Checks whether any accompanying information is available */
G_API_COMMON bool hasAccompInfo() const;
private:
std::string m_accomp_info; ///< Holds accompanying information
logType m_log_type; ///< Holds the type of logging event used for instantiating the manipulator
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
/**
* Every entity in Geneva should be able to throw exceptions, regardless of whether
* this happens from within a thread a in the context of serial execution. The output
* should go to different log targets defined by the user, such as stdout or a file
* (or possibly both). Emitting as much information as possible should be encouraged.
* Hence adding information to the exception handler should be as easy as adding
* data to a stream.
*/
class GLogStreamer {
public:
/** @brief A constructor that adds an extension string to the output */
explicit G_API_COMMON GLogStreamer(std::string const&);
/** @brief A constructor that logs data to a file specified by a boost::filesystem::path object */
explicit G_API_COMMON GLogStreamer(boost::filesystem::path);
/*************************************************************************/
// Deleted and defaulted constructors, destructor and assignment operators.
// Rule of five ...
G_API_COMMON GLogStreamer() = default;
G_API_COMMON GLogStreamer(GLogStreamer const&) = delete;
G_API_COMMON GLogStreamer(GLogStreamer &&) = default;
/** @brief A standard destructor */
virtual G_API_COMMON ~GLogStreamer() BASE = default;
G_API_COMMON GLogStreamer& operator=(GLogStreamer const&) = delete;
G_API_COMMON GLogStreamer& operator=(GLogStreamer &&) = default;
/*************************************************************************/
/** @brief Needed for std::ostringstream */
G_API_COMMON GLogStreamer &operator<<(std::ostream &(*val)(std::ostream &));
/** @brief Needed for std::ostringstream */
G_API_COMMON GLogStreamer &operator<<(std::ios &(*val)(std::ios &));
/** @brief Needed for std::ostringstream */
G_API_COMMON GLogStreamer &operator<<(std::ios_base &(*val)(std::ios_base &));
/** @brief A GManipulator object triggers the actual logging procedure */
G_API_COMMON void operator<<(GManipulator const& gm);
/** @brief Returns the content of the stream */
G_API_COMMON std::string content() const;
/** @brief Resets the stream content */
G_API_COMMON void reset();
/** @brief Checks whether an extension string has been registered */
G_API_COMMON bool hasExtension() const;
/** @brief The content of the extension_ string */
G_API_COMMON std::string getExtension() const;
/** @brief Checks whether a log file name has been registered */
G_API_COMMON bool hasOneTimeLogFile() const;
/** @brief The name of the manually specified file */
G_API_COMMON boost::filesystem::path getOneTimeLogFile() const;
/****************************************************************************/
/**
* Output of all standard values and types with a predefined operator<<
*/
template<typename T>
GLogStreamer &operator<<(T const& val) {
m_oss << val;
return *this;
}
/****************************************************************************/
private:
/**
* Retrieve a string representing the current time and date. Note that
* this function is duplicated from a function in GCommonHelperFunctions.hpp
* in order to break circular header inclusion.
*
* @return A string representing the current time and date
*/
static std::string currentTimeAsString() {
#if BOOST_COMP_GNUC && (BOOST_COMP_GNUC < BOOST_VERSION_NUMBER(5,0,0))
return std::string("Dummy (g++ < 5.0 does not support put_time)");
#else
std::ostringstream oss;
std::time_t now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
struct tm time_info{};
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
localtime_s(&time_info, &now);
#else // We assume a POSIX-compliand platform
localtime_r(&now, &time_info);
#endif
oss << std::put_time(&time_info, "%c");
return oss.str();
#endif
}
std::ostringstream m_oss; ///< Holds the actual streamed data
std::string m_extension; ///< Additional information about the logging source
boost::filesystem::path m_log_file; ///< The name of a manually specified log file
};
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
} /* namespace Common */
} /* namespace Gem */
/******************************************************************************/
/**
* We currently require the global GLogStreamer object to be a singleton
*/
using log_singleton = Gem::Common::GSingletonT<Gem::Common::GLogger<Gem::Common::GLogStreamer>>;
#define glogger_ptr log_singleton::Instance(0)
#define glogger (*(log_singleton::Instance(0)))
/******************************************************************************/
////////////////////////////////////////////////////////////////////////////////
/******************************************************************************/
// Some related defines
#define LOCATIONSTRING std::string("in file ") + std::string(__FILE__) + std::string(" near line ") + std::to_string(__LINE__)
#define GEXCEPTION Gem::Common::GManipulator( LOCATIONSTRING, Gem::Common::logType::EXCEPTION)
#define GTERMINATION Gem::Common::GManipulator( LOCATIONSTRING, Gem::Common::logType::TERMINATION)
#define GWARNING Gem::Common::GManipulator( LOCATIONSTRING, Gem::Common::logType::WARNING)
#define GLOGGING Gem::Common::GManipulator( Gem::Common::logType::LOGGING)
#define GFILE Gem::Common::GManipulator( Gem::Common::logType::FILE)
#define GSTDOUT Gem::Common::GManipulator( Gem::Common::logType::STDOUT)
#define GSTDERR Gem::Common::GManipulator( LOCATIONSTRING, Gem::Common::logType::STDERR)
/******************************************************************************/
|
{"hexsha": "92ae767ec4be6b8aaf93fd8d05d7d4e09a55c089", "size": 22622, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/common/GLogger.hpp", "max_stars_repo_name": "madmongo1/geneva", "max_stars_repo_head_hexsha": "15f1046ce578cb83f3ed5c2b3ae9f52f7cf4934f", "max_stars_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/common/GLogger.hpp", "max_issues_repo_name": "madmongo1/geneva", "max_issues_repo_head_hexsha": "15f1046ce578cb83f3ed5c2b3ae9f52f7cf4934f", "max_issues_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/common/GLogger.hpp", "max_forks_repo_name": "madmongo1/geneva", "max_forks_repo_head_hexsha": "15f1046ce578cb83f3ed5c2b3ae9f52f7cf4934f", "max_forks_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4536423841, "max_line_length": 126, "alphanum_fraction": 0.5609583591, "num_tokens": 4438}
|
import unittest
from vnpy.analyze.util.cal_returns import CalReturns
import pandas as pd
from sympy import *
class TestDict(unittest.TestCase):
def test_cal_annual_returns(self):
trades = {pd.Timestamp('2015-01-01'): 50000, pd.Timestamp('2016-01-01'): 50000, pd.Timestamp('2017-01-01'): 50000}
end_dates = pd.Timestamp('2021-01-01')
end_cash = 192000
result = CalReturns.annual_returns(trades, end_dates, end_cash)
print(result)
def test_annual_returns(self):
print('xxx')
def test_dataframe(self):
trade_date = [pd.Timestamp('2018-01-01'), pd.Timestamp('2018-05-01'), pd.Timestamp('2019-06-01')]
invest = [1, 2, 4]
trade_year = [date.year for date in trade_date]
df = pd.DataFrame(columns=('date', 'invest'))
df['date'] = trade_year
df['invest'] = invest
result = df.groupby('date').sum()
print(result)
def test_return(self):
# 预期年化收益率=分红率/PE+(1-分红率)*PB/PE,分红率统一设定为25%
# ROE * (1-d)+ 1/PE *d ,d为分红率
# PE 45.0994 PB 5.68441
hong = 0.25
pe = 45.0994
pb = 5.68441
print(hong/pe + 0.75*pb/pe)
def test_pos(self):
# abs(2 - 10 * value_total) ** 1.618
print(abs(2 - 10 * 0.15) ** 1.618)
print(1.9 ** 1.618)
|
{"hexsha": "0c0e4ad2305d1d537759261079c6eede1f0b5ca9", "size": 1321, "ext": "py", "lang": "Python", "max_stars_repo_path": "vnpy/analyze/test/cal_test.py", "max_stars_repo_name": "CatTiger/vnpy", "max_stars_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vnpy/analyze/test/cal_test.py", "max_issues_repo_name": "CatTiger/vnpy", "max_issues_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-21T02:42:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-21T02:42:32.000Z", "max_forks_repo_path": "vnpy/analyze/test/cal_test.py", "max_forks_repo_name": "CatTiger/vnpy", "max_forks_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2195121951, "max_line_length": 122, "alphanum_fraction": 0.5897047691, "include": true, "reason": "from sympy", "num_tokens": 438}
|
module Othermethods
using Random,Distributions
using LinearAlgebra
using FastGaussQuadrature
#using DoubleExponentialFormulas
#using DifferentialEquations
#using QuadGK
import ..CGmethods:bicg,cg,shiftedcg,reducedshiftedcg
import ..Diracoperators:DdagD_operator,DdagD_Staggered_operator,DdagDND_Staggered_operator
import ..Fermionfields:gauss_distribution_fermi_Z2!
import ..Fermionfields
function Tn(n,x)
return cos(n*acos(x))
end
function fitfunc(an,x)
val = 0
for n=1:2:length(an)-1
val += an[n+1]*Tn(n,x)
end
return val
end
function integcheb(an,a,b)
wa = 0
for k=1:length(an)-2
wa += (Tn(k,b)-Tn(k,a))*(an[k-1+1]-an[k+1+1])/k
end
wa /= 2
return wa
end
function calc_vecfunc_cg(func,vec_x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
DN = Adiag .+ ANmat
numdata = length(vec_x)
data = zeros(Float64,numdata)
#Aϕ2 = zero(Aϕ)
#ϕtemp2 = zero(ϕtemp1)
#x = zero(ϕ)
#Nx = zero(x)
#=
vec_Ax = Array{typeof(x),1}(undef,numdata)
for l=1:numdata
vec_Ax[l] = zero(x)
end
=#
vec_tinv = Adiag[1,1]*(1 ./ vec_x .- 1) #(D/t + A) = ((D(1/t - 1)I + D I + A)
for k=1:M
for i=1:N
ϕ[i] = rand([-1,1])
#ϕ[i] = randn()
#ϕ[i] = rand([-1,1,-im,im])
#r = rand()*2π
#ϕ[i] = cos(r)+im*sin(r)
end
mul!(Aϕ,ANmat,ϕ)
#cg(ϕtemp1,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
#shiftedcg(vec_x,vec_β,x,WdagW,univ.η,eps = univ.fparam.eps,maxsteps= univ.fparam.MaxCGstep)
#@time shiftedcg(vec_Ax,vec_tinv,x,DN,ϕ,eps = 1e-15,maxsteps= 3000)
ϕtemp1 .= 0
θ = reducedshiftedcg(Aϕ,vec_tinv,ϕtemp1,DN,ϕ,eps = 1e-15,maxsteps= 3000)
θ ./= (M .* vec_x)
data .+= real.(θ)
#=
for l=1:numdata
t = vec_x[l]
DtN = Adiag .+ t*ANmat
cg(ϕtemp2,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ2,ANmat,ϕtemp2)
val = real(ϕ ⋅ Aϕ2 /M)
println("$t data,val ",real(θ[l]),"\t",val)
end
=#
#=
for l=1:numdata
#mul!(Aϕ,ANmat,vec_Ax[l])
valshift = real(θ[l]) #real(Nx ⋅ vec_Ax[l] /(M*vec_x[l]))
#println("val1 ",valshift,"\t theta ",real(θ[l]))
#=
mul!(Aϕ,ANmat,vec_Ax[l])
valshift = real(ϕ ⋅ Aϕ /(M*vec_x[l]))
println("val2 ",valshift)
=#
data[l] += valshift
t = vec_x[l]
DtN = Adiag .+ t*ANmat
cg(ϕtemp1,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val = real(ϕ ⋅ Aϕ /M)
println("$t data,val ",valshift,"\t",val)
#val += ϕ ⋅ Aϕ /M
end
ϕtemp1 .= 0
=#
#=
for l=1:numdata
vec_Ax[l] .= 0
end
=#
end
#exit()
return data
exit()
for (i,x) in enumerate(vec_x)
#val1 = func(x)
#println("1 ",val1)
val1,val2 = calc_fa(x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
#println("2 ",val1)
#exit()
println("1 ",data[i])
data[i] = val1
println("2 ",data[i])
end
return data
end
function calc_vecfunc(func,vec_x)
N = length(vec_x)
data = zeros(Float64,N)
for (i,x) in enumerate(vec_x)
data[i] = func(x)
end
return data
end
function calc_fa(t,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
DtN = Adiag .+ t*ANmat
if t < 1e-15
DtN2 = Adiag
else
DtN2 = Adiag ./t .+ ANmat
end
#count += 1
#println(count)
val = 0
val2 = 0
#println("random")
for k=1:M
#gauss_distribution_fermi_Z2!(ϕ)
for i=1:N
ϕ[i] = rand([-1,1])
end
#rand!(d,ϕ)
#Dinvϕ = DtN \ ϕ
cg(ϕtemp1,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val += ϕ ⋅ Aϕ /M
if t < 1e-15
ϕtemp1 = ϕ/Adiag[1,1]
mul!(Aϕ,ANmat,ϕtemp1)
val2 += ϕ ⋅ Aϕ /M
else
cg(ϕtemp1,DtN2,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val2 += ϕ ⋅ Aϕ /(M*t)
end
#exit()
#val += ϕ'*Aϕ/M#ANmat*Dinvϕ/M
end
println("ts: $t ",real(val),"\t",real(val2))
return real(val),real(val2)
end
#calc_f,m,nc,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1
function chebyshevfit(func,m::T,nc,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1) where T <: Int
Δt = (π/2)/2m
vec_x = zeros(Float64,2m+1)
for i=0:2m
t = i*Δt
x = cos(t)
vec_x[i+1] = x
end
@time data = calc_vecfunc_cg(func,vec_x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
#fp = open("hiseki.dat","w")
#close(fp)
#=
data1 = calc_vecfunc(func,vec_x)
for i=1:length(data)
println(data[i],"\t",data1[i])
end
exit()
=#
an = zeros(Float64,nc+1)
for n=1:2:nc
for i=1:m
k = 2i-2
k1 = 2i-1
k2 = 2i
t = k*Δt
t1 = k1*Δt
t2 = k2*Δt
val = (2/π)*2*cos(n*t)*data[k+1]
val1 = (2/π)*2*cos(n*t1)*data[k1+1]
val2 = (2/π)*2*cos(n*t2)*data[k2+1]
an[n+1] += Δt*(val + 4*val1 + val2)/3
end
end
return an
end
function chebyshevfit(func,m,nc)
Δt = (π/2)/2m
vec_x = zeros(Float64,2m+1)
for i=0:2m
t = i*Δt
x = cos(t)
vec_x[i+1] = x
end
data = calc_vecfunc(vec_x,func)
an = zeros(Float64,nc+1)
for n=1:2:nc
for i=1:m
k = 2i-2
k1 = 2i-1
k2 = 2i
t = k*Δt
t1 = k1*Δt
t2 = k2*Δt
val = (2/π)*2*cos(n*t)*data[k+1]
val1 = (2/π)*2*cos(n*t1)*data[k1+1]
val2 = (2/π)*2*cos(n*t2)*data[k2+1]
an[n+1] += Δt*(val + 4*val1 + val2)/3
end
end
return an
data = zeros(Float64,m)
data1 = zeros(Float64,m)
data2 = zeros(Float64,m)
count = 0
for i=1:m
t = (2i-2)*Δt
x = cos(t)
t1 = (2i-1)*Δt
x1 = cos(t1)
t2 = 2i*Δt
x2 = cos(t2)
val = func(x)
val1 = func(x1)
val2 = func(x2)
data[i] = val
data1[i] = val1
data2[i] = val2
end
an = zeros(Float64,nc+1)
for n=1:2:nc
for i=1:m
t = (2i-2)*Δt
t1 = (2i-1)*Δt
t2 = 2i*Δt
val = (2/π)*2*cos(n*t)*data[i]
val1 = (2/π)*2*cos(n*t1)*data1[i]
val2 = (2/π)*2*cos(n*t2)*data2[i]
an[n+1] += Δt*(val + 4*val1 + val2)/3
end
end
return an
fp = open("hiseki.dat","w")
for i=1:m
t = (2i-2)*Δt
x = cos(t)
t1 = (2i-1)*Δt
x1 = cos(t1)
t2 = 2i*Δt
x2 = cos(t2)
val = data[i]
val1 = data1[i]
val2 = data2[i]
valx = 0
for n=1:2:nc
valx += an[n+1]*Tn(n,x)
end
println("$x $val $valx")
println(fp,"$x $val $valx")
end
close(fp)
wa2 = 0
Δx = 1/2m
for i=1:m
x = (2i-2)*Δx
x1 = (2i-1)*Δx
x2 = 2i*Δx
val = fitfunc(an,x)
val1 = fitfunc(an,x1)
val2 = fitfunc(an,x2)
wa2 += Δx*(val + 4*val1 + val2)/3
end
println(wa2)
return wa2
exit()
return wa
end
function tdlogdet(A,M,m,tempvecs;filename=nothing,nc = 20,nonc = false)
N,_ = size(A)
Adiag = zero(A)
d = Normal(0,1)
ϕ = tempvecs[1]
Aϕ = tempvecs[2]
ϕtemp1 = tempvecs[3]
#=
for i=1:M
rand!(d,ϕ)
mul!(Aϕ,A,ϕ)
for k=1:N
Adiag[k,k] += real(conj(ϕ[k])*Aϕ[k]/M)
end
end
=#
for k=1:N
Adiag[k,k] = A[1,1]
end
#for k=1:N
# println("$k $(A[k,k]) $(Adiag[k,k])")
#end
ANmat = A .- Adiag
Δ0 = 0
for k=1:N
Δ0 += real(log(Adiag[k,k]))
end
Δx = 1/2m
#@time ldet = real(logdet(Matrix(A)))
#println("log det A(exact): $ldet")
count = 0
function calc_f(t)
DtN = Adiag .+ t*ANmat
if t < 1e-15
DtN2 = Adiag
else
DtN2 = Adiag ./t .+ ANmat
end
#count += 1
#println(count)
val = 0
val2 = 0
#println("random")
for k=1:M
#gauss_distribution_fermi_Z2!(ϕ)
for i=1:N
ϕ[i] = rand([-1,1])
end
#rand!(d,ϕ)
#Dinvϕ = DtN \ ϕ
cg(ϕtemp1,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val += ϕ ⋅ Aϕ /M
if t < 1e-15
ϕtemp1 = ϕ/Adiag[1,1]
mul!(Aϕ,ANmat,ϕtemp1)
val2 += ϕ ⋅ Aϕ /M
else
cg(ϕtemp1,DtN2,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val2 += ϕ ⋅ Aϕ /(M*t)
end
#exit()
#val += ϕ'*Aϕ/M#ANmat*Dinvϕ/M
end
println("t: $t ",real(val),"\t",real(val2))
return real(val)
end
function calc_finv(t)
if t == 0
DtN = Adiag
else
DtN = Adiag ./t .+ ANmat
end
#count += 1
#println(count)
val = 0
#println("random")
for k=1:M
#gauss_distribution_fermi_Z2!(ϕ)
for i=1:N
ϕ[i] = rand([-1,1])
end
#rand!(d,ϕ)
#Dinvϕ = DtN \ ϕ
if t == 0
ϕtemp1 = ϕ/Adiag[1,1]
mul!(Aϕ,ANmat,ϕtemp1)
val += ϕ ⋅ Aϕ /M
else
cg(ϕtemp1,DtN,ϕ,eps = 1e-15,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val += ϕ ⋅ Aϕ /(M*t)
end
#exit()
#val += ϕ'*Aϕ/M#ANmat*Dinvϕ/M
end
println("t: $t ",val)
return real(val)
end
Δx = 1/2m
Δ = Δ0
Δc = Δ0
#I,E = quadgk(calc_f, 0,1,maxevals=3m)
#println(I)
#println(E)
#exit()
nodes, weights = gausslegendre( 1000 )
vec_x = nodes ./ 2 .+ 1/2
@time data = calc_vecfunc_cg(calc_f,vec_x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
Δ += dot(weights,data)/2
return Δ
#nc = 20
if nonc == false
#@time ldet = real(logdet(Matrix(A)))
ldet = 0
println("log det A(exact): $ldet")
nodes, weights = gausslegendre( 1000 )
vec_x = nodes ./ 2 .+ 1/2
#=
@time data = calc_vecfunc_cg(calc_f,vec_x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
Δ += dot(weights,data)/2
println(" Δ ",Δ)
exit()
=#
#=
fpk = open("kdep_L8_nc10_complex.dat","w")
kmax=32
for k=1:kmax
M2 = 10
nc2 = 10
m2 = 1000
@time an = chebyshevfit(calc_f,m2,nc2,M2,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
Δ = integcheb(an,0,1) + Δ0
#@time data = calc_vecfunc_cg(calc_f,vec_x,M2,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
#Δ = dot(weights,data)/2 + Δ0
println("$k $Δ $ldet")
println(fpk,"$k $Δ $ldet")
end
close(fpk)
exit()
=#
fp4 = open("MGL10_complex_k4.dat","w")
kmax=4
for M2 in [16,32,64,128,256,512,1024]
Δ = Δ0
for k=1:kmax
@time data = calc_vecfunc_cg(calc_f,vec_x,M2,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
Δ += (dot(weights,data)/2)/kmax
end
#Δ = dot(weights,data)/2 + Δ0
println("M = $M2, Δ = $Δ, exact: $ldet ")
println(fp4,"$M2 $Δ $ldet ")
end
close(fp4)
exit()
#=
vec_x = zeros(Float64,2m+1)
for i=0:2m
x = i*Δx
vec_x[i+1] = x
end
@time data = calc_vecfunc_cg(calc_f,vec_x,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
for i=1:m
k = 2i-2
k1 = 2i-1
k2 = 2i
x = k*Δx
x1 = k1*Δx
x2 = k2*Δx
val = data[k+1]
val1 = data[k1+1]
val2 = data[k2+1]
Δ += Δx*(val + 4*val1 + val2)/3
end
println(" Δ ",Δ)
=#
fp4 = open("Mnc.dat","w")
an = chebyshevfit(calc_f,m,nc,M,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
for M2 in [16,32,64,128,256]
for nc2 in [10,20,50,100,150,200]
an = chebyshevfit(calc_f,m,nc2,M2,N,ANmat,Adiag,ϕ,Aϕ,ϕtemp1)
Δ3 = integcheb(an,0,1) + Δ0
println("M = $M2, nc = $nc2, Δ = $Δ3, exact: $ldet ")
println(fp4,"$M2 $nc2 $Δ3 $ldet ")
end
end
close(fp4)
exit()
Δ3 = integcheb(an,0,1) + Δ0
println("an: ",an," Δ ",Δ3)
#aninv = chebyshevfit(calc_finv,m,nc)
#println("aninv: ",aninv)
#Δ3 = integcheb(aninv,0,1) + Δ0
#println("aninv: ",aninv," Δ ",Δ3)
exit()
end
if filename != nothing
fp = open(filename,"w")
end
for i=1:m
x = (2i-2)*Δx
x1 = (2i-1)*Δx
x2 = 2i*Δx
if nonc
val = calc_f(x)
val1 = calc_f(x1)
val2 = calc_f(x2)
else
valc = fitfunc(an,x)
val1c = fitfunc(an,x1)
val2c = fitfunc(an,x2)
end
if filename != nothing
if nonc
println(fp,x,"\t",val)
println(fp,x1,"\t",val1)
println(fp,x2,"\t",val2)
else
println(fp,x,"\t",valc)
println(fp,x1,"\t",val1c)
println(fp,x2,"\t",val2c)
end
flush(fp)
end
if nonc
Δ += Δx*(val + 4*val1 + val2)/3
else
Δc += Δx*(valc + 4*val1c + val2c)/3
end
#println("i=$i ",Δ,"\t",ldet)
end
if nonc
return real(Δ)
else
return real(Δ3)
end
println(Δ)
println("cheb ",Δc)
println("cheb2 ", Δ3)
@time ldet = real(logdet(Matrix(A)))
println("log det A(exact): $ldet")
exit()
if filename != nothing
close(fp)
end
#exit()
return real(Δ)
end
function tdlogdet(A::T,M,m;filename=nothing,nc = 20,nonc = false) where T <: DdagD_operator
ic,ix,iy,iz,it,α = 1,1,1,1,1,1
temps = A.dirac._temporal_fermi
xi = temps[7]
x0 = temps[8]
Fermionfields.clear!(x0)
Fermionfields.clear!(xi)
x0[ic,ix,iy,iz,it,α] = 1
mul!(xi,A,x0,(ix,iy,iz,it,α))
diagvalue = xi[ic,ix,iy,iz,it,α]
#println(diagvalue)
N,_ = size(A)
Δ0 = 0
Δ0 = N*real(log(diagvalue))
Δx = 1/2m
ϕ = temps[7]
ϕtemp1 = temps[8]
Aϕ = temps[9]
ANmat = DdagDND_Staggered_operator(A) #non-diagonal part of A
#@time ldet = real(logdet(Matrix(A)))
#println("log det A(exact): $ldet")
count = 0
function calc_f(t)
DtN = DdagD_Staggered_operator(A,t)
#DtN = Adiag .+ t*ANmat
#count += 1
#println(count)
val = 0
#println("random")
for k=1:M
gauss_distribution_fermi_Z2!(ϕ)
cg(ϕtemp1,DtN,ϕ,eps = 1e-10,maxsteps= 3000)
mul!(Aϕ,ANmat,ϕtemp1)
val += ϕ ⋅ Aϕ /M
println("t: $t ",val)
exit()
#val += ϕ'*Aϕ/M#ANmat*Dinvϕ/M
end
return real(val)
end
Δx = 1/2m
Δ = Δ0
Δc = Δ0
#I,E = quadgk(calc_f, 0,1,maxevals=3m)
#println(I)
#println(E)
#exit()
#nc = 20
if nonc == false
an = chebyshevfit(calc_f,m,nc)
Δ3 = integcheb(an,0,1) + Δ0
end
if filename != nothing
fp = open(filename,"w")
end
for i=1:m
x = (2i-2)*Δx
x1 = (2i-1)*Δx
x2 = 2i*Δx
if nonc
val = calc_f(x)
val1 = calc_f(x1)
val2 = calc_f(x2)
else
valc = fitfunc(an,x)
val1c = fitfunc(an,x1)
val2c = fitfunc(an,x2)
end
if filename != nothing
if nonc
println(fp,x,"\t",val)
println(fp,x1,"\t",val1)
println(fp,x2,"\t",val2)
else
println(fp,x,"\t",valc)
println(fp,x1,"\t",val1c)
println(fp,x2,"\t",val2c)
end
flush(fp)
end
if nonc
Δ += Δx*(val + 4*val1 + val2)/3
else
Δc += Δx*(valc + 4*val1c + val2c)/3
end
#println("i=$i ",Δ,"\t",ldet)
end
if nonc
return real(Δ)
else
return real(Δ3)
end
end
end
|
{"hexsha": "648489c121a655e49ae9e20965ee908733ff6aaa", "size": 20498, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/fermions/misc.jl", "max_stars_repo_name": "RJaBi/LatticeQCD.jl", "max_stars_repo_head_hexsha": "a900545295a981a50a33c8aea8f5994b5bf1650a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-12-05T06:23:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T19:23:38.000Z", "max_issues_repo_path": "src/fermions/misc.jl", "max_issues_repo_name": "RJaBi/LatticeQCD.jl", "max_issues_repo_head_hexsha": "a900545295a981a50a33c8aea8f5994b5bf1650a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-19T07:24:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T23:07:43.000Z", "max_forks_repo_path": "src/fermions/misc.jl", "max_forks_repo_name": "RJaBi/LatticeQCD.jl", "max_forks_repo_head_hexsha": "a900545295a981a50a33c8aea8f5994b5bf1650a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-12-08T04:29:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-22T15:58:15.000Z", "avg_line_length": 25.0280830281, "max_line_length": 105, "alphanum_fraction": 0.3682310469, "num_tokens": 6419}
|
#define BOOST_TEST_MODULE MyTest
#include <boost/test/unit_test.hpp>
#include "../src/shared/helpers.hpp"
#include <ostream>
std::ostream& operator<<(std::ostream& os, const vec_t& vec) {
os << "[";
int sz = vec.size();
for (const auto& v : vec) {
os << v;
sz--;
if (sz) {
os << ", ";
}
}
os << "]";
return os;
}
// vector serialization for BOOST_REQUIRE_EQUAL
// https://stackoverflow.com/questions/17572583/boost-check-fails-to-compile-operator-for-custom-types
namespace boost { namespace test_tools { namespace tt_detail {
template<>
struct print_log_value<vec_t> {
void operator()(std::ostream & os, const vec_t& vec) {
::operator<<(os, vec);
}
};
}}}
BOOST_AUTO_TEST_SUITE(ulta_ha)
BOOST_AUTO_TEST_CASE(add_non_negative) {
BOOST_REQUIRE_EQUAL(0, add(0, 0));
BOOST_REQUIRE_EQUAL(5, add(2, 3));
BOOST_REQUIRE_EQUAL(201, add(123, 78));
}
BOOST_AUTO_TEST_CASE(add_non_negative_overflow) {
// ...
}
// ===================================
BOOST_AUTO_TEST_CASE(empty_set) {
vec_t result;
uint64_t largest_sum;
find_largest_sum(11, {}, result, largest_sum);
const vec_t expected{};
BOOST_REQUIRE_EQUAL(expected, result);
BOOST_REQUIRE_EQUAL(0, largest_sum);
}
BOOST_AUTO_TEST_CASE(duplicate_elements) {
vec_t result;
uint64_t largest_sum;
find_largest_sum(3, {1, 1}, result, largest_sum);
const vec_t expected{1, 1};
BOOST_REQUIRE_EQUAL(expected, result);
BOOST_REQUIRE_EQUAL(2, largest_sum);
}
BOOST_AUTO_TEST_CASE(sample) {
vec_t result;
uint64_t largest_sum;
find_largest_sum(11, {1, 2, 3, 4, 5, 6, 7}, result, largest_sum);
// one possible answer would be
const vec_t expected{5, 6};
BOOST_REQUIRE_EQUAL(expected, result);
BOOST_REQUIRE_EQUAL(11, largest_sum);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "9f38521c28ec315b0f00d7bbad3d9781e5672253", "size": 1866, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_main.cpp", "max_stars_repo_name": "justefg/ultra_ha", "max_stars_repo_head_hexsha": "681d4a5db7c03742fad8e0357d4d3c818afb7737", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_main.cpp", "max_issues_repo_name": "justefg/ultra_ha", "max_issues_repo_head_hexsha": "681d4a5db7c03742fad8e0357d4d3c818afb7737", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_main.cpp", "max_forks_repo_name": "justefg/ultra_ha", "max_forks_repo_head_hexsha": "681d4a5db7c03742fad8e0357d4d3c818afb7737", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5616438356, "max_line_length": 102, "alphanum_fraction": 0.6623794212, "num_tokens": 486}
|
{-# OPTIONS --without-K --safe #-}
module PlainPi where
open import Data.Empty
open import Data.Unit
open import Data.Sum
open import Data.Product
open import Relation.Binary.PropositionalEquality
infixr 70 _×ᵤ_
infixr 60 _+ᵤ_
infixr 50 _⊚_
------------------------------------------------------------------------------
-- Pi
data 𝕌 : Set
⟦_⟧ : (A : 𝕌) → Set
data _⟷_ : 𝕌 → 𝕌 → Set
eval : {A B : 𝕌} → (A ⟷ B) → ⟦ A ⟧ → ⟦ B ⟧
data 𝕌 where
𝟘 : 𝕌
𝟙 : 𝕌
_+ᵤ_ : 𝕌 → 𝕌 → 𝕌
_×ᵤ_ : 𝕌 → 𝕌 → 𝕌
⟦ 𝟘 ⟧ = ⊥
⟦ 𝟙 ⟧ = ⊤
⟦ t₁ +ᵤ t₂ ⟧ = ⟦ t₁ ⟧ ⊎ ⟦ t₂ ⟧
⟦ t₁ ×ᵤ t₂ ⟧ = ⟦ t₁ ⟧ × ⟦ t₂ ⟧
data _⟷_ where
unite₊l : {t : 𝕌} → 𝟘 +ᵤ t ⟷ t
uniti₊l : {t : 𝕌} → t ⟷ 𝟘 +ᵤ t
unite₊r : {t : 𝕌} → t +ᵤ 𝟘 ⟷ t
uniti₊r : {t : 𝕌} → t ⟷ t +ᵤ 𝟘
swap₊ : {t₁ t₂ : 𝕌} → t₁ +ᵤ t₂ ⟷ t₂ +ᵤ t₁
assocl₊ : {t₁ t₂ t₃ : 𝕌} → t₁ +ᵤ (t₂ +ᵤ t₃) ⟷ (t₁ +ᵤ t₂) +ᵤ t₃
assocr₊ : {t₁ t₂ t₃ : 𝕌} → (t₁ +ᵤ t₂) +ᵤ t₃ ⟷ t₁ +ᵤ (t₂ +ᵤ t₃)
unite⋆l : {t : 𝕌} → 𝟙 ×ᵤ t ⟷ t
uniti⋆l : {t : 𝕌} → t ⟷ 𝟙 ×ᵤ t
unite⋆r : {t : 𝕌} → t ×ᵤ 𝟙 ⟷ t
uniti⋆r : {t : 𝕌} → t ⟷ t ×ᵤ 𝟙
swap⋆ : {t₁ t₂ : 𝕌} → t₁ ×ᵤ t₂ ⟷ t₂ ×ᵤ t₁
assocl⋆ : {t₁ t₂ t₃ : 𝕌} → t₁ ×ᵤ (t₂ ×ᵤ t₃) ⟷ (t₁ ×ᵤ t₂) ×ᵤ t₃
assocr⋆ : {t₁ t₂ t₃ : 𝕌} → (t₁ ×ᵤ t₂) ×ᵤ t₃ ⟷ t₁ ×ᵤ (t₂ ×ᵤ t₃)
absorbr : {t : 𝕌} → 𝟘 ×ᵤ t ⟷ 𝟘
absorbl : {t : 𝕌} → t ×ᵤ 𝟘 ⟷ 𝟘
factorzr : {t : 𝕌} → 𝟘 ⟷ t ×ᵤ 𝟘
factorzl : {t : 𝕌} → 𝟘 ⟷ 𝟘 ×ᵤ t
dist : {t₁ t₂ t₃ : 𝕌} → (t₁ +ᵤ t₂) ×ᵤ t₃ ⟷ (t₁ ×ᵤ t₃) +ᵤ (t₂ ×ᵤ t₃)
factor : {t₁ t₂ t₃ : 𝕌} → (t₁ ×ᵤ t₃) +ᵤ (t₂ ×ᵤ t₃) ⟷ (t₁ +ᵤ t₂) ×ᵤ t₃
distl : {t₁ t₂ t₃ : 𝕌} → t₁ ×ᵤ (t₂ +ᵤ t₃) ⟷ (t₁ ×ᵤ t₂) +ᵤ (t₁ ×ᵤ t₃)
factorl : {t₁ t₂ t₃ : 𝕌 } → (t₁ ×ᵤ t₂) +ᵤ (t₁ ×ᵤ t₃) ⟷ t₁ ×ᵤ (t₂ +ᵤ t₃)
id⟷ : {t : 𝕌} → t ⟷ t
_⊚_ : {t₁ t₂ t₃ : 𝕌} → (t₁ ⟷ t₂) → (t₂ ⟷ t₃) → (t₁ ⟷ t₃)
_⊕_ : {t₁ t₂ t₃ t₄ : 𝕌} → (t₁ ⟷ t₃) → (t₂ ⟷ t₄) → (t₁ +ᵤ t₂ ⟷ t₃ +ᵤ t₄)
_⊗_ : {t₁ t₂ t₃ t₄ : 𝕌} → (t₁ ⟷ t₃) → (t₂ ⟷ t₄) → (t₁ ×ᵤ t₂ ⟷ t₃ ×ᵤ t₄)
eval unite₊l (inj₂ v) = v
eval uniti₊l v = inj₂ v
eval unite₊r (inj₁ v) = v
eval uniti₊r v = inj₁ v
eval swap₊ (inj₁ v) = inj₂ v
eval swap₊ (inj₂ v) = inj₁ v
eval assocl₊ (inj₁ v) = inj₁ (inj₁ v)
eval assocl₊ (inj₂ (inj₁ v)) = inj₁ (inj₂ v)
eval assocl₊ (inj₂ (inj₂ v)) = inj₂ v
eval assocr₊ (inj₁ (inj₁ v)) = inj₁ v
eval assocr₊ (inj₁ (inj₂ v)) = inj₂ (inj₁ v)
eval assocr₊ (inj₂ v) = inj₂ (inj₂ v)
eval unite⋆l (tt , v) = v
eval uniti⋆l v = (tt , v)
eval unite⋆r (v , tt) = v
eval uniti⋆r v = (v , tt)
eval swap⋆ (v₁ , v₂) = (v₂ , v₁)
eval assocl⋆ (v₁ , (v₂ , v₃)) = ((v₁ , v₂) , v₃)
eval assocr⋆ ((v₁ , v₂) , v₃) = (v₁ , (v₂ , v₃))
eval absorbl ()
eval absorbr ()
eval factorzl ()
eval factorzr ()
eval dist (inj₁ v₁ , v₃) = inj₁ (v₁ , v₃)
eval dist (inj₂ v₂ , v₃) = inj₂ (v₂ , v₃)
eval factor (inj₁ (v₁ , v₃)) = (inj₁ v₁ , v₃)
eval factor (inj₂ (v₂ , v₃)) = (inj₂ v₂ , v₃)
eval distl (v , inj₁ v₁) = inj₁ (v , v₁)
eval distl (v , inj₂ v₂) = inj₂ (v , v₂)
eval factorl (inj₁ (v , v₁)) = (v , inj₁ v₁)
eval factorl (inj₂ (v , v₂)) = (v , inj₂ v₂)
eval id⟷ v = v
eval (c₁ ⊚ c₂) v = eval c₂ (eval c₁ v)
eval (c₁ ⊕ c₂) (inj₁ v) = inj₁ (eval c₁ v)
eval (c₁ ⊕ c₂) (inj₂ v) = inj₂ (eval c₂ v)
eval (c₁ ⊗ c₂) (v₁ , v₂) = (eval c₁ v₁ , eval c₂ v₂)
|
{"hexsha": "6589a2e144c3fd7bb0922286f07048f5d57d9e99", "size": 3159, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "fracGC/PlainPi.agda", "max_stars_repo_name": "JacquesCarette/pi-dual", "max_stars_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2015-08-18T21:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T01:07:57.000Z", "max_issues_repo_path": "fracGC/PlainPi.agda", "max_issues_repo_name": "JacquesCarette/pi-dual", "max_issues_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-07T16:27:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-29T20:41:23.000Z", "max_forks_repo_path": "fracGC/PlainPi.agda", "max_forks_repo_name": "JacquesCarette/pi-dual", "max_forks_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-05-29T01:56:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-10T09:47:13.000Z", "avg_line_length": 32.2346938776, "max_line_length": 78, "alphanum_fraction": 0.4795821462, "num_tokens": 2015}
|
#! /usr/bin/env python
import math
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import macrodensity as md
# import argparse
# parser = argparse.ArgumentParser()
# # Input argument
# parser.add_argument('--input_file', default=f'{homedir}/task1/models/two_stream/test/test1.mp4', help='Input file to predict')
# parser.add_argument('--checkpoint_path', default=f'{settings1.checkpoints}/two_stream/AC_CE_EF_FG/_ckpt_epoch_34.ckpt')
# #parser.add_argument('--checkpoint_path', default=f'{settings1.checkpoints}/two_stream/AC_CE_EF_FG/_ckpt_epoch_46.ckpt', help='path to load checkpoints')
# parser.add_argument('--hparams_path', default=f'{homedir}/task1/models/two_stream/lightning_logs/AC_CE_EF_FG/alexnet_False_convLSTM/version_1/hparams.yaml', help='path to load hyperparameters')
# args = parser.parse_args()
job_identifier = "minimization1"
input_file = f'../../thesis/Slab/Doped/HER_H20/hydrogen_binding/0001/{job_identifier}/LOCPOT'
lattice_vector = 29.966237
output_file = f'../results/{job_identifier}_planar.dat'
# No need to alter anything after here
#------------------------------------------------------------------
# Get the potential
# This section should not be altered
#------------------------------------------------------------------
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
#------------------------------------------------------------------
## POTENTIAL
planar = md.planar_average(grid_pot,NGX,NGY,NGZ)
## MACROSCOPIC AVERAGE
macro = md.macroscopic_average(planar,lattice_vector,resolution_z)
plt.plot(planar)
plt.plot(macro)
plt.savefig('Planar.eps')
plt.show()
plt.savefig(f"../results/{job_identifier}.jpg")
np.savetxt(output_file,planar)
##------------------------------------------------------------------
|
{"hexsha": "15dbf08db1b6584619e8fc545269ae1070aea1fd", "size": 2012, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/PlanarAverage.py", "max_stars_repo_name": "Zarand3r/MacroDensity", "max_stars_repo_head_hexsha": "dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/PlanarAverage.py", "max_issues_repo_name": "Zarand3r/MacroDensity", "max_issues_repo_head_hexsha": "dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/PlanarAverage.py", "max_forks_repo_name": "Zarand3r/MacroDensity", "max_forks_repo_head_hexsha": "dc1afd60e2c3e8f6f81ef0cad85619bc1469c7ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7111111111, "max_line_length": 195, "alphanum_fraction": 0.680417495, "include": true, "reason": "import numpy", "num_tokens": 499}
|
[STATEMENT]
lemma fmap_resT_simps [simp]:
"fmap\<cdot>f\<cdot>(\<bottom>::'a\<cdot>'f::functor resT) = \<bottom>"
"fmap\<cdot>f\<cdot>(Done\<cdot>x :: 'a\<cdot>'f::functor resT) = Done\<cdot>(f\<cdot>x)"
"fmap\<cdot>f\<cdot>(More\<cdot>m :: 'a\<cdot>'f::functor resT) = More\<cdot>(fmap\<cdot>(fmap\<cdot>f)\<cdot>m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fmap\<cdot>f\<cdot>\<bottom> = \<bottom> &&& fmap\<cdot>f\<cdot>(Done\<cdot>x) = Done\<cdot>(f\<cdot>x) &&& fmap\<cdot>f\<cdot>(More\<cdot>m) = More\<cdot>(fmap\<cdot>(fmap\<cdot>f)\<cdot>m)
[PROOF STEP]
unfolding fmap_def [where 'f="'f resT"]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. COERCE((udom \<rightarrow> udom) \<rightarrow> udom\<cdot>'f resT \<rightarrow> udom\<cdot>'f resT, ('a \<rightarrow> 'b) \<rightarrow> 'a\<cdot>'f resT \<rightarrow> 'b\<cdot>'f resT)\<cdot>fmapU\<cdot>f\<cdot>\<bottom> = \<bottom> &&& COERCE((udom \<rightarrow> udom) \<rightarrow> udom\<cdot>'f resT \<rightarrow> udom\<cdot>'f resT, ('a \<rightarrow> 'b) \<rightarrow> 'a\<cdot>'f resT \<rightarrow> 'b\<cdot>'f resT)\<cdot>fmapU\<cdot>f\<cdot>(Done\<cdot>x) = Done\<cdot>(f\<cdot>x) &&& COERCE((udom \<rightarrow> udom) \<rightarrow> udom\<cdot>'f resT \<rightarrow> udom\<cdot>'f resT, ('a \<rightarrow> 'b) \<rightarrow> 'a\<cdot>'f resT \<rightarrow> 'b\<cdot>'f resT)\<cdot>fmapU\<cdot>f\<cdot>(More\<cdot>m) = More\<cdot>(fmap\<cdot>(COERCE((udom \<rightarrow> udom) \<rightarrow> udom\<cdot>'f resT \<rightarrow> udom\<cdot>'f resT, ('a \<rightarrow> 'b) \<rightarrow> 'a\<cdot>'f resT \<rightarrow> 'b\<cdot>'f resT)\<cdot>fmapU\<cdot>f)\<cdot>m)
[PROOF STEP]
by (simp_all add: coerce_simp)
|
{"llama_tokens": 682, "file": "Tycon_Resumption_Transformer", "length": 2}
|
# _*_coding:utf-8_*_
# @auther:FelixFu
# @Date: 2021.4.14
# @github:https://github.com/felixfu520
import os
import time
import numpy as np
from tqdm import tqdm
import torch
from torchvision import transforms
from utils import transforms as local_transforms
from base import BaseTrainer, DataPrefetcher
from utils.metrics import eval_metrics, AverageMeter, confusionMatrix
import models
# ---------------------------- mixed models
# def get_instance(module, name, config, *args):
# # GET THE CORRESPONDING CLASS / FCT
# return getattr(module, config[name]['type'])(*args, **config[name]['args'])
#
#
# from collections import OrderedDict
# def get_model():
# # print(f'\nLoading checkpoint : /home/felixfu/cds/classification/saved/BDD-Densenet161_assist/05-17_13-59/best_model.pth')
# checkpoint_a = torch.load("/home/felixfu/cds/classification/saved/BDD-Resnet34/05-11_12-55/best_model.pth")
#
# model_a = get_instance(models, 'arch', checkpoint_a["configs"], 29)
#
# # for cpu inference, remove module
# new_state_dict = OrderedDict()
# for k, v in checkpoint_a['state_dict'].items():
# name = k[7:]
# new_state_dict[name] = v
# checkpoint_a = new_state_dict
# model_a.load_state_dict(checkpoint_a)
#
# return model_a
# ----------------------------------------------------------------
class Trainer(BaseTrainer):
def __init__(self, model, loss, resume, config, train_loader, val_loader=None, train_logger=None, prefetch=True):
""" Trainer 类
__init__:
1、TRANSORMS FOR VISUALIZATION
2、预读取
_train_epoch:
"""
super(Trainer, self).__init__(model, loss, resume, config, train_loader, val_loader, train_logger)
self.wrt_mode, self.wrt_step = 'train_', 0
self.log_step = config['trainer'].get('log_per_iter', int(np.sqrt(self.train_loader.batch_size)))
if config['trainer']['log_per_iter']:
self.log_step = int(self.log_step / self.train_loader.batch_size) + 1
self.num_classes = self.train_loader.dataset.num_classes
# TRANSORMS FOR VISUALIZATION
self.restore_transform = transforms.Compose([
local_transforms.DeNormalize(self.train_loader.MEAN, self.train_loader.STD),
transforms.ToPILImage()])
self.viz_transform = transforms.Compose([
transforms.Resize((400, 400)),
transforms.ToTensor()])
# 预读取
if self.device == torch.device('cpu'):
prefetch = False
if prefetch:
self.train_loader = DataPrefetcher(train_loader, device=self.device)
self.val_loader = DataPrefetcher(val_loader, device=self.device)
torch.backends.cudnn.benchmark = True
def _train_epoch(self, epoch):
self.logger.info('\n')
# 是否freeze_bn
self.model.train()
if self.config['arch']['args']['freeze_bn']:
if isinstance(self.model, torch.nn.DataParallel):
self.model.module.freeze_bn()
else:
self.model.freeze_bn()
self.wrt_mode = 'train'
tic = time.time()
self._reset_metrics() # 重置指标:loss、top1、top2
tbar = tqdm(self.train_loader, ncols=130)
for batch_idx, (data, target, image_path) in enumerate(tbar):
self.data_time.update(time.time() - tic) # 读取数据的时间
if self.device == torch.device('cuda:0'):
data, target = data.to(self.device), target.to(self.device)
self.loss.to(self.device)
# LOSS & OPTIMIZE
self.optimizer.zero_grad()
output = self.model(data)
loss = self.loss(output, target)
# 是否平均loss
if isinstance(self.loss, torch.nn.DataParallel):
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.total_loss.update(loss.item()) # 更新loss
# measure elapsed time
self.batch_time.update(time.time() - tic) # batch训练的时间
tic = time.time()
# LOGGING & TENSORBOARD
if batch_idx % self.log_step == 0:
self.wrt_step = (epoch - 1) * len(self.train_loader) + batch_idx
self.writer.add_scalar(f'{self.wrt_mode}/losses', loss.item(), self.wrt_step)
# FOR EVAL
topk = eval_metrics(output, target, topk=(1, 2)) # topk is tensor
self._update_metrics(topk)
# PRINT INFO
tbar.set_description(
'TRAIN ({}) | Loss: {:.3f} | Top1Acc {:.2f} Top2Acc {:.2f} | B {:.2f} D {:.2f} |'.format(
epoch, self.total_loss.average,
self.precision_top1.average.item(), self.precision_top2.average.item(),
self.batch_time.average, self.data_time.average))
# METRICS TO TENSORBOARD
self.writer.add_scalar(f'{self.wrt_mode}/top1', self.precision_top1.average.item(),
self.wrt_step) # self.wrt_step
self.writer.add_scalar(f'{self.wrt_mode}/top2', self.precision_top2.average.item(),
self.wrt_step) # self.wrt_step
for i, opt_group in enumerate(self.optimizer.param_groups):
self.writer.add_scalar(f'{self.wrt_mode}/Learning_rate_{i}', opt_group['lr'],
self.wrt_step) # self.wrt_step
# RETURN LOSS & METRICS
log = {'losses': self.total_loss.average,
"top1": self.precision_top1.average.item(),
"top2": self.precision_top2.average.item()}
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
if self.val_loader is None:
self.logger.warning('Not data loader was passed for the validation step, No validation is performed !')
return {}
self.logger.info('\n###### EVALUATION ######')
self.model.eval()
self.wrt_mode = 'val'
self._reset_metrics()
tbar = tqdm(self.val_loader, ncols=130)
with torch.no_grad():
for batch_idx, (data, target, image_path) in enumerate(tbar):
if self.device == torch.device("cuda:0"):
data, target = data.to(self.device), target.to(self.device)
# LOSS
output = self.model(data)
# -----------------------mixed model
# model_a = get_model().to(self.device)
# output = 0.6 * output + 0.4 * model_a(data)
# for i, o in enumerate(torch.argmax(output, dim=1).cpu().numpy()):
# if o in [7,8,12,15]:
# output_a = model_a(torch.unsqueeze(data[i],dim=0))
# output[i] = output[i] + output_a*0.5
# ******
loss = self.loss(output, target)
if isinstance(self.loss, torch.nn.DataParallel):
loss = loss.mean()
self.total_loss.update(loss.item())
topk = eval_metrics(output, target, topk=(1, 2))
self._update_metrics(topk)
self.confusion_matrix = confusionMatrix(output, target, self.confusion_matrix)
# PRINT INFO
tbar.set_description(
'EVAL ({}) | Loss: {:.3f} | Top1Acc {:.2f} Top2Acc {:.2f} |'.format(
epoch, self.total_loss.average,
self.precision_top1.average.item(), self.precision_top2.average.item()))
# METRICS TO TENSORBOARD
self.wrt_step = (epoch) * len(self.val_loader)
self.writer.add_scalar(f'{self.wrt_mode}/losses', self.total_loss.average, self.wrt_step) # self.wrt_step
self.writer.add_scalar(f'{self.wrt_mode}/top1', self.precision_top1.average.item(),
self.wrt_step) # self.wrt_step
self.writer.add_scalar(f'{self.wrt_mode}/top2', self.precision_top2.average.item(),
self.wrt_step) # self.wrt_step
# RETURN LOSS & METRICS
log = {'losses': self.total_loss.average,
"top1": self.precision_top1.average.item(),
"top2": self.precision_top2.average.item()}
# print confusion matrix
confusion_file = open(os.path.join(self.checkpoint_dir, "confusion.txt"), 'a+')
label_path = os.path.join(self.config["train_loader"]["args"]["data_dir"], "labels.txt")
labels = []
with open(label_path, 'r') as f:
for line in f:
labels.append(line.split()[0])
print("{0:10}".format(""), end="")
confusion_file.write("{0:8}".format(""))
for name in labels:
print("{0:10}".format(name), end="")
confusion_file.write("{0:8}".format(name))
print("{0:10}".format("Precision"))
confusion_file.write("{0:8}\n".format("Precision"))
for i in range(self.train_loader.dataset.num_classes):
print("{0:10}".format(labels[i]), end="")
confusion_file.write("{0:8}".format(labels[i]))
for j in range(self.train_loader.dataset.num_classes):
if i == j:
print("{0:10}".format(str("-" + str(self.confusion_matrix[i][j])) + "-"), end="")
confusion_file.write("{0:8}".format(str("-" + str(self.confusion_matrix[i][j])) + "-"))
else:
print("{0:10}".format(str(self.confusion_matrix[i][j])), end="")
confusion_file.write("{0:8}".format(str(self.confusion_matrix[i][j])))
precision = 0.0 + self.confusion_matrix[i][i] / sum(self.confusion_matrix[i])
print("{0:.4f}".format(precision))
confusion_file.write("{0:8}\n".format(precision))
return log
def _reset_metrics(self):
self.batch_time = AverageMeter() # batch训练时间
self.data_time = AverageMeter() # 读取数据时间
self.total_loss = AverageMeter()
self.precision_top1, self.precision_top2 = AverageMeter(), AverageMeter()
self.confusion_matrix = [[0 for j in range(self.train_loader.dataset.num_classes)] for i in
range(self.train_loader.dataset.num_classes)]
def _update_metrics(self, tops):
self.precision_top1.update(tops[0].item())
self.precision_top2.update(tops[1].item())
|
{"hexsha": "ec89576629d21c42f3167d580722ce02904e0e3a", "size": 10658, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "FelixFu520/classification", "max_stars_repo_head_hexsha": "2e8119c229bc978ecaecac0361bf8b573c553d43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "FelixFu520/classification", "max_issues_repo_head_hexsha": "2e8119c229bc978ecaecac0361bf8b573c553d43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "FelixFu520/classification", "max_forks_repo_head_hexsha": "2e8119c229bc978ecaecac0361bf8b573c553d43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1497975709, "max_line_length": 129, "alphanum_fraction": 0.5676487146, "include": true, "reason": "import numpy", "num_tokens": 2421}
|
import cv2
import numpy as np
class SiftMatchScorer:
# score_names = 'sift_matches', 'sift_inliers'
score_names = 'SMatches', 'SPrecision'
def __init__(self, example):
self.example = example
self.sift_matches = example['pred_sift']
resized_tg_arr, resized_tg_mask = np.array(example['resized_tg_im']), example['resized_tg_mask']
self.tg_arr, self.tg_mask = resized_tg_arr, resized_tg_mask
self.kp_cs, self.kp_tg = example['kp_cs'], example['kp_tg']
def _sm(self):
# """Ratio of sift matches wrt total pixels in target mask"""
# ratio = len(self.sift_matches) / self.tg_mask.sum()
# assert(0. <= ratio <= 1.)
# return ratio
"""Number of sift matches divided by 300"""
num_matches = min(len(self.sift_matches) / 300.0, 1)
assert(0. <= num_matches <= 1.)
return num_matches
def _sift_inliers(self):
"""Ratio of inliers wrt total number of sift matches"""
if len(self.sift_matches) == 0:
return 0. # give worst possible score
src_pts = np.float32([self.kp_tg[m.queryIdx].pt for m in self.sift_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([self.kp_cs[m.trainIdx].pt for m in self.sift_matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
ratio_inliers = mask.mean()
assert(0. <= ratio_inliers <= 1.)
return ratio_inliers
def _sp(self):
"""SIFT Precision"""
bbox = self.example['cs_dt_uv_bbox']
# Size of dst_pts is [num_matches, 2]
dst_pts = np.float32([self.kp_cs[m.trainIdx].pt for m in self.sift_matches])
# Corner case when there are zero matches.
# In this case, sift precision should be zero.
if len(dst_pts) == 0:
return 0.
resized_cs_im_size = self.example['resized_cs_im'].size
dst_pts_uv = dst_pts / resized_cs_im_size
u1, v1, u2, v2 = self.example['cs_dt_uv_bbox']
# Checking which dst_pts_uv lie inside cs_dt_uv_bbox
dst_pts_u, dst_pts_v = dst_pts_uv[:, 0], dst_pts_uv[:, 1]
# np.logical_and.reduce because np.logical_and only takes two arguments.
is_dst_pt_inside_dt = np.logical_and.reduce([u1 <= dst_pts_u,
u2 >= dst_pts_u,
v1 <= dst_pts_v,
v2 >= dst_pts_v])
sp_score = is_dst_pt_inside_dt.mean()
assert(0. <= sp_score <= 1.)
return sp_score
def get_scores(self):
# return self._sift_matches(), self._sift_inliers()
return self._sm(), self._sp()
if __name__ == '__main__':
import pickle
from baselines.sift.test import test
from flowmatch.networks.flownet import FlowNet
from flowmatch.utils import load_config
# Loading an (un-preprocessed) example that was saved as a pickle file.
with open('/home/sancha/repos/osid/sandbox/example.pkl', 'rb') as f:
example = pickle.load(f)
cfg = load_config('../../pipeline/configs/tdid_avd2_manual_easy.yaml')
net = FlowNet(cfg.flownet)
sift_matches = test(net, example)
example['pred_sift'] = sift_matches
# Reclassification.
reclass_scorer = SiftMatchScorer(example)
scores = reclass_scorer.get_scores()
|
{"hexsha": "8f2a80ae27733a763741b54626e9d814aacdcbb6", "size": 3412, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/sift/siftscorer.py", "max_stars_repo_name": "siddancha/FlowVerify", "max_stars_repo_head_hexsha": "a1b80e7c47a23479b91e87fd12b09b59a346c464", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-09T14:33:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-09T14:33:41.000Z", "max_issues_repo_path": "baselines/sift/siftscorer.py", "max_issues_repo_name": "siddancha/FlowVerify", "max_issues_repo_head_hexsha": "a1b80e7c47a23479b91e87fd12b09b59a346c464", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/sift/siftscorer.py", "max_forks_repo_name": "siddancha/FlowVerify", "max_forks_repo_head_hexsha": "a1b80e7c47a23479b91e87fd12b09b59a346c464", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2978723404, "max_line_length": 104, "alphanum_fraction": 0.6148886284, "include": true, "reason": "import numpy", "num_tokens": 894}
|
from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from utils import *
from loss_functions import *
from scipy.misc import imsave
class MR2CT(object):
def __init__(self, sess, batch_size=10, depth_MR=32, height_MR=32,
width_MR=32, depth_CT=32, height_CT=24,
width_CT=24, l_num=2, wd=0.0005, checkpoint_dir=None, path_patients_h5=None, learning_rate=2e-8):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.l_num=l_num
self.wd=wd
self.learning_rate=learning_rate
self.batch_size=batch_size
self.depth_MR=depth_MR
self.height_MR=height_MR
self.width_MR=width_MR
self.depth_CT=depth_CT
self.height_CT=height_CT
self.width_CT=width_CT
self.checkpoint_dir = checkpoint_dir
self.data_generator = Generator_3D_patches(path_patients_h5,self.batch_size)
self.build_model()
def build_model(self):
self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
batch_size_tf = tf.shape(self.inputMR)[0] #variable batchsize so we can test here
self.train_phase = tf.placeholder(tf.bool, name='phase_train')
self.G = self.generator(self.inputMR,batch_size_tf)
print 'shape output G ',self.G.get_shape()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
print 'learning rate ',self.learning_rate
#self.g_optim =tf.train.AdamOptimizer(self.learning_rate).minimize(self.g_loss)
#tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.g_loss)
self.merged = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
self.saver = tf.train.Saver()
def generator(self,inputMR,batch_size_tf):
######## FCN for the 32x32x32 to 24x24x24 , added dilaion by yourself####################################
conv1_a = conv_op_3d_bn(inputMR, name="conv1_a", kh=5, kw=5, kz=5, n_out=48, dh=1, dw=1, dz=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#30
conv2_a = conv_op_3d_bn(conv1_a, name="conv2_a", kh=3, kw=3, kz=3, n_out=96, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
conv3_a = conv_op_3d_bn(conv2_a, name="conv3_a", kh=3, kw=3, kz=3, n_out=128, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#28
conv4_a = conv_op_3d_bn(conv3_a, name="conv4_a", kh=5, kw=5, kz=5, n_out=96, dh=1, dw=1, dz=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
conv5_a = conv_op_3d_bn(conv4_a, name="conv5_a", kh=3, kw=3, kz=3, n_out=48, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#26
conv6_a = conv_op_3d_bn(conv5_a, name="conv6_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
#conv7_a = conv_op_3d_bn(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#24
conv7_a = conv_op_3d_norelu(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME')#24 I modified it here,dong
self.MR_16_downsampled=conv7_a#JUST FOR TEST
return conv7_a
def train(self, config):
path_test='/home/dongnie/warehouse/prostate/ganData64to24Test'
print 'global_step ', self.global_step.name
print 'trainable vars '
for v in tf.trainable_variables():
print v.name
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
self.sess.run(tf.initialize_all_variables())
temp = set(tf.all_variables())
start = self.global_step.eval() # get last global_step
print("Start from:", start)
############ This is for only initializing adam vars####################
temp = set(tf.all_variables())
self.g_optim =tf.train.AdamOptimizer(self.learning_rate).minimize(self.g_loss)
self.sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
print("Start after adam (should be the same):", start)
#####################################
for it in range(start,config.iterations):
X,y=self.data_generator.next()
# Update G network
_, loss_eval, layer_out_eval = self.sess.run([self.g_optim, self.g_loss, self.MR_16_downsampled],
feed_dict={ self.inputMR: X, self.CT_GT:y, self.train_phase: True })
self.global_step.assign(it).eval() # set and update(eval) global_step with index, i
if it%config.show_every==0:#show loss every show_every its
print 'it ',it,'loss ',loss_eval
print 'layer min ', np.min(layer_out_eval)
print 'layer max ', np.max(layer_out_eval)
print 'layer mean ', np.mean(layer_out_eval)
# print 'trainable vars '
# for v in tf.trainable_variables():
# print v.name
# data_var=self.sess.run(v)
# grads = tf.gradients(self.g_loss, v)
# var_grad_val = self.sess.run(grads, feed_dict={self.inputMR: X, self.CT_GT:y })
# print 'grad min ', np.min(var_grad_val)
# print 'grad max ', np.max(var_grad_val)
# print 'grad mean ', np.mean(var_grad_val)
# #print 'shape ',data_var.shape
# print 'filter min ', np.min(data_var)
# print 'filter max ', np.max(data_var)
# print 'filter mean ', np.mean(data_var)
#self.writer.add_summary(summary, it)
# print 'trainable vars '
if it%config.test_every==0 and it!=0:#==0:#test one subject
mr_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_MRI.nii'))
ct_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_CT.nii'))
mrnp=sitk.GetArrayFromImage(mr_test_itk)
#mu=np.mean(mrnp)
#mrnp=(mrnp-mu)/(np.max(mrnp)-np.min(mrnp))
ctnp=sitk.GetArrayFromImage(ct_test_itk)
print mrnp.dtype
print ctnp.dtype
ct_estimated=self.test_1_subject(mrnp,ctnp,[32,32,32],[24,24,24],[5,5,2])
psnrval=psnr(ct_estimated,ctnp)
print ct_estimated.dtype
print ctnp.dtype
print 'psnr= ',psnrval
volout=sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,'ct_estimated_{}'.format(it)+'.nii.gz')
if it%config.save_every==0:#save weights every save_every iterations
self.save(self.checkpoint_dir, it)
def evaluate(self,patch_MR):
""" patch_MR is a np array of shape [H,W,nchans]
"""
patch_MR=np.expand_dims(patch_MR,axis=0)#[1,H,W,nchans]
patch_MR=np.expand_dims(patch_MR,axis=4)#[1,H,W,nchans]
patch_CT_pred, MR16_eval= self.sess.run([self.G,self.MR_16_downsampled],
feed_dict={ self.inputMR: patch_MR, self.train_phase: False})
patch_CT_pred=np.squeeze(patch_CT_pred)#[Z,H,W]
#imsave('mr32.png',np.squeeze(MR16_eval[0,:,:,2]))
#imsave('ctpred.png',np.squeeze(patch_CT_pred[0,:,:,0]))
#print 'mean of layer ',np.mean(MR16_eval)
#print 'min ct estimated ',np.min(patch_CT_pred)
#print 'max ct estimated ',np.max(patch_CT_pred)
#print 'mean of ctpatch estimated ',np.mean(patch_CT_pred)
return patch_CT_pred
def test_1_subject(self,MR_image,CT_GT,MR_patch_sz,CT_patch_sz,step):
"""
receives an MR image and returns an estimated CT image of the same size
"""
matFA=MR_image
matSeg=CT_GT
dFA=MR_patch_sz
dSeg=CT_patch_sz
eps=1e-5
[row,col,leng]=matFA.shape
margin1=int((dFA[0]-dSeg[0])/2)
margin2=int((dFA[1]-dSeg[1])/2)
margin3=int((dFA[2]-dSeg[2])/2)
cubicCnt=0
marginD=[margin1,margin2,margin3]
print 'matFA shape is ',matFA.shape
matFAOut=np.zeros([row+2*marginD[0],col+2*marginD[1],leng+2*marginD[2]])
print 'matFAOut shape is ',matFAOut.shape
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA
# matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[0:marginD[0],:,:] #we'd better flip it along the first dimension
# matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[row-marginD[0]:matFA.shape[0],:,:] #we'd better flip it along the 1st dimension
# matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,0:marginD[1],:] #we'd better flip it along the 2nd dimension
# matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,col-marginD[1]:matFA.shape[1],:] #we'd better to flip it along the 2nd dimension
# matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,0:marginD[2]] #we'd better flip it along the 3rd dimension
# matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,leng-marginD[2]:matFA.shape[2]]
if margin1!=0:
matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[marginD[0]-1::-1,:,:] #reverse 0:marginD[0]
matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[matFA.shape[0]-1:row-marginD[0]-1:-1,:,:] #we'd better flip it along the 1st dimension
if margin2!=0:
matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,marginD[1]-1::-1,:] #we'd flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,matFA.shape[1]-1:col-marginD[1]-1:-1,:] #we'd flip it along the 2nd dimension
if margin3!=0:
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,marginD[2]-1::-1] #we'd better flip it along the 3rd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,matFA.shape[2]-1:leng-marginD[2]-1:-1]
matOut=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))
used=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))+eps
#fid=open('trainxxx_list.txt','a');
for i in range(0,row-dSeg[0],step[0]):
for j in range(0,col-dSeg[1],step[1]):
for k in range(0,leng-dSeg[2],step[2]):
volSeg=matSeg[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]
#print 'volSeg shape is ',volSeg.shape
volFA=matFAOut[i:i+dSeg[0]+2*marginD[0],j:j+dSeg[1]+2*marginD[1],k:k+dSeg[2]+2*marginD[2]]
#print 'volFA shape is ',volFA.shape
#mynet.blobs['dataMR'].data[0,0,...]=volFA
#mynet.forward()
#temppremat = mynet.blobs['softmax'].data[0].argmax(axis=0) #Note you have add softmax layer in deploy prototxt
temppremat=self.evaluate(volFA)
#print 'patchout shape ',temppremat.shape
#temppremat=volSeg
matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+temppremat;
used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+1;
matOut=matOut/used
return matOut
def save(self, checkpoint_dir, step):
model_name = "MR2CT.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
return True
else:
return False
|
{"hexsha": "f726815c9d9c875e611e685946fa5bb15c9a3094", "size": 13843, "ext": "py", "lang": "Python", "max_stars_repo_path": "3dversion/g_model.py", "max_stars_repo_name": "ginobilinie/medSynthesis", "max_stars_repo_head_hexsha": "fee24e955313b4032855901327a7485390866e91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2017-03-24T11:51:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T00:14:52.000Z", "max_issues_repo_path": "3dversion/g_model.py", "max_issues_repo_name": "liu123-t/medSynthesis", "max_issues_repo_head_hexsha": "7373f2974356bf1a944ed25183273c3e132d63af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2018-02-03T15:55:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-16T10:27:22.000Z", "max_forks_repo_path": "3dversion/g_model.py", "max_forks_repo_name": "liu123-t/medSynthesis", "max_forks_repo_head_hexsha": "7373f2974356bf1a944ed25183273c3e132d63af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2017-03-18T09:17:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-05T02:59:22.000Z", "avg_line_length": 53.2423076923, "max_line_length": 200, "alphanum_fraction": 0.609549953, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4035}
|
[STATEMENT]
lemma wf_design_implies:
assumes "(\<And> b . b \<in># \<B> \<Longrightarrow> b \<subseteq> V)"
assumes "\<And> b . b \<in># \<B> \<Longrightarrow> b \<noteq> {}"
assumes "finite V"
assumes "\<B> \<noteq> {#}"
assumes "V \<noteq> {}"
shows "design V \<B>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. design V \<B>
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
?b \<in># \<B> \<Longrightarrow> ?b \<subseteq> V
?b \<in># \<B> \<Longrightarrow> ?b \<noteq> {}
finite V
\<B> \<noteq> {#}
V \<noteq> {}
goal (1 subgoal):
1. design V \<B>
[PROOF STEP]
by (unfold_locales) simp_all
|
{"llama_tokens": 262, "file": "Design_Theory_Design_Basics", "length": 2}
|
'''This module provides functions to visualise the terrain'''
from terrain import Hex, HexGrid
from matplotlib.patches import RegularPolygon
import matplotlib.pyplot as plt
import numpy as np
def display_graph(colour_map):
'''display_graph displays a colour map'''
# Define plot figure and axes
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
# Add hexagons
for h in colour_map:
# Convert from hexagonal co-ordinate to cartesian co-ordinate
x = h.j * np.sqrt(3) / 2 - h.k * np.sqrt(3) / 2
y = h.i + h.j / 2 + h.k / 2
# Create hexagon
hexagon = RegularPolygon((x,y),
numVertices=6,
radius=np.sqrt(3) / 3.0,
orientation=np.radians(30),
facecolor=colour_map[h],
alpha=0.2,
edgecolor='k')
ax.add_patch(hexagon)
ax.scatter(x, y, alpha=0)
# Display plot
plt.show()
|
{"hexsha": "6b566f185c2fe01b7c83242a544e48a25016ca34", "size": 1053, "ext": "py", "lang": "Python", "max_stars_repo_path": "nuclear_confusion/visualisation.py", "max_stars_repo_name": "will-albuquerque/nuclear-confusion", "max_stars_repo_head_hexsha": "9e717aac6a29acd8c66632ea9ddb16bf510c8ca1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nuclear_confusion/visualisation.py", "max_issues_repo_name": "will-albuquerque/nuclear-confusion", "max_issues_repo_head_hexsha": "9e717aac6a29acd8c66632ea9ddb16bf510c8ca1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nuclear_confusion/visualisation.py", "max_forks_repo_name": "will-albuquerque/nuclear-confusion", "max_forks_repo_head_hexsha": "9e717aac6a29acd8c66632ea9ddb16bf510c8ca1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0857142857, "max_line_length": 69, "alphanum_fraction": 0.5270655271, "include": true, "reason": "import numpy", "num_tokens": 243}
|
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import numpy.random as npr
import torch
from ..model.bbox_transform import bbox_transform
from ..model.config import cfg
from ..utils.bbox import bbox_overlaps
def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):
"""Same as the anchor target layer in original Fast/er RCNN """
A = num_anchors
total_anchors = all_anchors.shape[0]
K = total_anchors / num_anchors
# allow boxes to sit over the edge by a small amount
_allowed_border = 0
# map of shape (..., H, W)
height, width = rpn_cls_score.shape[1:3]
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -_allowed_border) &
(all_anchors[:, 1] >= -_allowed_border) &
(all_anchors[:, 2] < im_info[1] + _allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + _allowed_border) # height
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
# first set the negatives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
# only the positive ones have regression targets
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
rpn_labels = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4))
rpn_bbox_targets = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4))
rpn_bbox_inside_weights = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4))
rpn_bbox_outside_weights = bbox_outside_weights
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 5
return bbox_transform(torch.from_numpy(ex_rois), torch.from_numpy(gt_rois[:, :4])).numpy()
|
{"hexsha": "e671d162088a81205cf4fcc171851e7e16dbccf2", "size": 5992, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/frcnn/frcnn/layer_utils/anchor_target_layer.py", "max_stars_repo_name": "TTTREE/Trackor_base", "max_stars_repo_head_hexsha": "12ed78c736695bf039cd8b94f7010c815938b86c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-09-05T08:57:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T13:31:49.000Z", "max_issues_repo_path": "src/frcnn/frcnn/layer_utils/anchor_target_layer.py", "max_issues_repo_name": "TTTREE/Trackor_base", "max_issues_repo_head_hexsha": "12ed78c736695bf039cd8b94f7010c815938b86c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:14:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:57:51.000Z", "max_forks_repo_path": "src/frcnn/frcnn/layer_utils/anchor_target_layer.py", "max_forks_repo_name": "TTTREE/Trackor_base", "max_forks_repo_head_hexsha": "12ed78c736695bf039cd8b94f7010c815938b86c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-28T07:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-28T07:48:20.000Z", "avg_line_length": 36.3151515152, "max_line_length": 98, "alphanum_fraction": 0.6949265688, "include": true, "reason": "import numpy", "num_tokens": 1695}
|
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{\ont{} sequencing for \mtb{} transmission clustering}
\label{chap:clustering}
\ifpdf
\graphicspath{{Chapter2/Figs/Raster/}{Chapter2/Figs/PDF/}{Chapter2/Figs/}}
\else
\graphicspath{{Chapter2/Figs/Vector/}{Chapter2/Figs/}}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\setcounter{section}{-1}
\section{Publication and collaboration acknowledgements}
\label{sec:ch2-acknowledge}
A manuscript comprising the work in this chapter and \autoref{chap:dst} is currently in preparation. The DNA extractions and sequencing of the data in this chapter were performed by: Marie Sylvianne Rabodoarivelo and Simon Grandjean Lapierre for the Madagascar samples; Anzaan Dippenaar, Anastasia Koch, and Helen Cox for the South African samples; Sara Goodwin at the Next Generation Genomics Core within Cold Spring Harbor Laboratory performed the PacBio sequencing for the Madagascar samples; and Sophie George, Grace Smith and Esther Robinson for the English samples. Fan Yang-Turner of the Nuffield Department of Medicine, University of Oxford, performed the variant-calling of all Illumina samples. All other work in this chapter is my own.
%=========================================================================
\section{Introduction}
\textit{Mycobacterium tuberculosis} is the causative agent of the infectious disease tuberculosis (TB). It accounts for more deaths than any other pathogen each year \cite{who2020} — as such, the epidemiology of \mtb{} transmission is of the utmost importance. Whole-genome sequencing (WGS) has established itself as a vital tool for identifying possible transmission clusters and is being used by some leading public health agencies to aid contact tracing \cite{phe-tb-england,brooks2020}. Illumina is considered the gold standard for this type of WGS work. However, Illumina is not readily available in many high-burden TB settings and requires considerable time and resources to start and maintain. \ont{} has shown itself to be adept in these types of settings, having been used to notable effect during recent Zika \cite{faria2016} and Ebola \cite{quick2016,Hoenen2016} outbreaks. Even in environments where resource availability is not an obstacle, \ont{}'s rapid turnaround time has been used for monitoring COVID-19 and informing infection control measures \cite{meredith2020}. The time and resources required to set up \ont{} sequencing are far lower than Illumina, but despite this, there has been little work done to assess its suitability for \mtb{} WGS-based transmission clustering. The lack of work in this space likely stems from the long-held belief that due to its higher sequencing error rate, \ont{} is not capable of such fine-grained analyses. However, \ont{} has seen considerable improvements in its accuracy in recent years \cite{wick2019}, and studies using variant calls from the technology are becoming increasingly common \cite{sanderson2020,watson2020}. In particular, Public Health England (PHE) has investigated the use of \ont{} for the analysis of Shiga toxin-producing \ecoli{} and found it to be well-suited to the application \cite{greig2021}.
In this chapter, we evaluate whether \ont{} sequencing can provide \mtb{} transmission clusters consistent with Illumina. To facilitate this investigation, we collect a new dataset of 150 samples - from Madagascar, South Africa, and England - sequenced on both Illumina and \ont{} platforms. We first assess \ont{} variant calls and outline a filtering strategy to provide Illumina-level precision. Next, we use these variant calls to cluster samples based on single nucleotide polymorphism (SNP) distance thresholds and find \ont{} does not miss any samples from their expected cluster. Finally, we confirm that reliable clustering of samples from a mixture of Illumina and \ont{} modalities is achievable.
\mtb{} has a "closed" pan-genome; all species members share most (but not all) gene content. In \autoref{chap:denovo} we sought to improve variant calling of bacterial pan-genomes with genome graphs. The work in that chapter concentrated on \ecoli{}, which has an "open" pan-genome. In the interest of understanding how such genome graph methods can aid in closed pan-genomes, we additionally assess transmission clusters produced from \pandora{} variant calls. We construct two \mtb{} reference graphs from different densities of population variation towards this end. While the clustering from \pandora{} does not perform to the standards of the single-reference caller BCFtools, we gain many insights for the improvement of \pandora{} and the construction of genome graphs.
%=========================================================================
\section{Dataset}
\label{sec:ch2-dataset}
The data used for the work in this chapter, and \autoref{chap:dst}, are patient-derived \mtb{} isolates from culture. We gathered samples from Madagascar (118), South Africa (83), and England's National Mycobacteria Reference Service in Birmingham (46), giving us a total of 247 samples.
Each sample was sequenced on both \ont{} and Illumina platforms. We aimed to perform all sequencing for a sample from a single DNA extraction. Performing all sequencing on the same DNA extract ensures that any variation identified between technologies for the same sample would be due to differences in the sequencing platform and not \textit{in vitro} evolution.
As these samples are not reference isolates, and we want to be able to compare both Illumina and \ont{} to a "truth", we also sequenced 35 of the Malagasy isolates with PacBio.
\subsection{Illumina sequencing}
An extended description of isolate selection, DNA extraction, and sequencing methods is provided in \autoref{app:dna-sequencing}.
\subsubsection{Madagascar}
Illumina sequencing was carried out on the HiSeq 2500 platform at the Wellcome Trust Centre for Human Genetics, Oxford, and paired-end libraries were prepared according to the manufacturer’s instruction.
\subsubsection{England}
Illumina sequencing was performed on a MiSeq instrument at Public Health England (Birmingham) by Grace Smith, Esther Robinson and their team. Sample preparation and sequencing methodology were as described previously \cite{Votintseva2015}.
\subsubsection{South Africa}
% note from anzaan
% Many of these WGS will be published soon. I am waiting for the final list and then I’ll include their accession numbers so that we can refer to them here. If all were published, we also don’t have to upload them to ENA (or which ever data repository)
Paired-end genomic libraries were prepared using the Illumina Nextera XT library or NEBNext Ultra TM II FS DNA Library Preparation Kits (Illumina Inc, San Diego, CA, USA) according to the manufacturers’ instructions. Pooled samples were sequenced on an Illumina HiSeq2500 or NextSeq500 instrument.
\subsection{\ont{} sequencing}
\subsubsection{Madagascar}
\ont{} library preparation was carried out using the Oxford Nanopore Technology (ONT) Ligation Sequencing Kit 1D (SQK-LSK108) and the Native Barcoding Kit 1D (EXP-NBD103) according to the ONT standard protocols. One microgram of DNA was used as input for each library. Multiplexed sequencing was performed by pooling 6-8 barcoded DNA samples. Prepared libraries were loaded onto an R9.4 flow cell and sequenced on a Minion device with ONT MinKNOW software.
\subsubsection{England}
\ont{} sequencing was performed at the John Radcliffe Hospital, Oxford, by Sophie George.
\subsubsection{South Africa}
Remnant stored DNA used for Illumina WGS from each isolate was retrieved from storage and used for Nanopore library preparation. Per isolate, one microgram of undigested DNA was prepared for Nanopore sequencing using the ligation sequencing kit (SQK-LSK109). In addition, the native barcoding expansion kit (EXP-NBD104) was used for multiplexing. The protocols for sequencing genomic DNA by ligation and native barcoding were carried out according to the manufacturers’ instructions. Multiplexed sequencing libraries consisted of 6-12 barcoded DNA samples, and all libraries were sequenced using SpotON R9.4.1 flow cells on a MinION device.
\subsection{PacBio sequencing}
Thirty-five of the Malagasy samples were sequenced and processed at the Next Generation Genomics Core within Cold Spring Harbor Laboratory with PacBio Sequel 1M V2 SMRT cells. The circular consensus was generated via the SMRTlink graphical user interface version 6.0.0.47841. \autoref{app:pacbio-seq} outlines the full details of the sequencing protocol.
\subsection{Data preprocessing}
All \ont{} data for this project was basecalled and de-multiplexed using the \ont{} proprietary software tool \guppy{} (version 3.4.5). We used default parameters for basecalling, and the only non-default parameter used for de-multiplexing was to trim barcodes from the resulting sequences.
%=========================================================================
\section{Genome assemblies for validating variant calls}
\label{sec:asm_results}
A central component of the work in this chapter is validating the quality of variant calls - without being biased by assuming short reads are the "truth". In \autoref{sec:var-calls} we compare the precision and recall of \ont{} and Illumina variant calls. A necessary component of such analysis is a reference genome for each sample. Thirty-five of the Malagasy samples in the dataset were sent for PacBio Circular Consensus Sequencing (CCS) in addition to the matched sequencing on both the \ont{} and Illumina platforms. PacBio CCS produces so-called high-fidelity sequencing reads with a base-level accuracy greater than 99.8\% \cite{wenger2019}. These reads have such a high accuracy because each one is the consensus from multiple passes of the DNA enzyme around a circular copy of the original double-stranded read. As the CCS reads are both long and accurate, they are regularly used to produce high-quality \denovo{} assemblies and complete existing reference genomes \cite{garg2021,masonbrink2021}. For the samples with available PacBio data, we construct high-quality assemblies to use as reference genomes.
We chose samples with greater than 30x coverage across all three sequencing technologies to produce high-quality assemblies. In total, this left us with 9 Malagasy samples. There have been many new genome assembly methods produced since the last known assessment of \mtb{} long-read assemblies \cite{bainomugisa2018}. As such, we benchmarked five assemblers and chose the best for each sample. The reason for this comparison is that different assembly algorithms can produce quite varied results depending on sequencing technology used, species, or computational resource availability \cite{wick2020,demaio2019}. The assembly tools used were Canu (v2.0;\cite{koren2017,nurk2020}), Flye (v2.8;\cite{flye2019}), Unicycler (v0.4.8;\cite{wick2017}), HASLR (v0.8a1;\cite{haslr2020}), and Spades(v3.14.0;\cite{antipov2016}). \autoref{app:asm} presents the complete benchmark.
In summary, we use the unpolished PacBio-only assemblies produced by \flye{} as reference genomes for eight samples. Although we assembled nine samples, we exclude one from further analysis due to significant contamination.
%=========================================================================
\section{Quality control}
\label{sec:ch2-qc}
Before any variant calling, all samples were subjected to a quality control (QC) pipeline to ensure all data used was of the highest quality.
The first step in this QC was decontamination of both Illumina and \ont{} sequencing reads. We use the decontamination database from \vrb{clockwork} (\url{https://github.com/iqbal-lab-org/clockwork}), which contains a wide range of organisms, including viral, human, \mtb{}, Nontuberculous Mycobacteria (NTM), and nasopharyngeal-associated bacterial genomes. Next, reads are mapped to the database using \vrb{bwa mem} (Illumina;v0.7.17) \cite{li2013} and \vrb{minimap2} (\ont{};v2.17) \cite{li2018}. A read is retained if it has any mapping to a non-contamination genome (\mtb{}) in the database and is output to a final decontaminated fastq file. All other reads are considered contamination.
All decontaminated fastq files were randomly subsampled to a depth of 60x (Illumina) and 150x (\ont{}) using \vrb{rasusa} \cite{rasusa2019}. The reason for subsampling is to limit large read sets that can drastically slow down later steps in the analysis process and do not provide any benefit \cite{demaio2019}. Any sample with a depth less than the maximum threshold remains unchanged by this subsampling.
The last step in the QC pipeline is to assign lineages for each sample. A panel of lineage-defining SNPs \cite{Shitikov2017,Rutaihwa2019,stucki2016} is used in conjunction with a sample's Illumina VCF from \autoref{sec:illumina-var-call} for the lineage assignment. At each lineage-defining position in the sample's VCF, we determine if the called allele is the same as the panel allele. If it is, we add the full lineage that allele defines (e.g. $4.1.1$) to a list of called lineages. We abandon lineage assignment for a sample if more than one heterozygous call is made at lineage-defining positions. After classifying all of a sample's lineage-defining positions, we then produce a lineage assignment based on the list of called lineages. We use the most recent common ancestor of all the called lineages as the lineage assignment. For example, if the called lineages are $[4, 4.2.3, 4.2.5]$, the lineage assignment would be $4.2$. Finally, a mixed-lineage assignment is made if more than one called lineage is from a different major lineage group. For example, $[4, 4.2.3, 4.2.5, 3.2]$ would still be called lineage $4.2$; however, $[4, 4.2.3, 4.2.5, 3.2, 3.1]$ would be deemed mixed.
The purpose of QC is to ensure that all samples used in later analyses are of the highest quality. By highest quality, we mean all samples have perfectly matched Illumina and \ont{} data, sufficient read depth of coverage on both sequencing technologies (Illumina $\ge 20$ and \ont{} $\ge 30$), no contamination, and no evidence of a mixed \mtb{} population. Fifty-eight samples failed to pass our QC measures, and 39 had non-matched Illumina and \ont{} data. One of the samples that failed QC is part of the eight PacBio samples we generated assemblies for in \autoref{sec:asm_results} - we exclude this sample from any further analysis. In total, we use the 150 samples that have passed QC in the remainder of this chapter and \autoref{chap:dst}.
%=========================================================================
\section{Construction of \mtb{} reference graphs}
\label{sec:tbprg}
A parallel line of investigation in this chapter is to assess the benefit of \pandora{} for \mtb{} transmission cluster inference. In particular, we focus on its use of prior knowledge about variation in a population and ability to genotype collections of samples against a reference chosen to be maximally close to all samples (\autoref{sec:pandora-compare}). \pandora{} requires a pan-genome reference graph (\panrg{}) in order to operate; for the work in this chapter, we chose to construct a reference \panrg{} based on the \mtb{} reference genome, H37Rv (accession NC\_000962.3). We add variants sampled from 15,211 global \mtb{} isolates gathered by the \cryptic{} consortium \cite{cryptic2021data}. We sampled at two different rates to evaluate how varying complexity of \panrg{}s affect variant-calling precision and recall.
To ensure the reference \panrg{} is not biased towards a particular lineage, we first split the global \cryptic{} VCF into separate lineage VCFs. We assign lineages for each of the global samples using the same approach as in \autoref{sec:ch2-qc}. In addition, we include variant calls from 14 high-quality \mtb{} assemblies, representing lineages 1-7 \cite{chiner2019,gramtools2021}. The two \panrg{} complexities we construct were termed "sparse" and "dense". From each lineage VCF, we took a random subsample of 50 and 200 samples and combined them into single sparse and dense VCFs, respectively. Note, we use the same fixed random seed for the subsampling to ensure the sparse \panrg{} is a subset of the dense \panrg{}. Finally, we filtered the resulting VCFs to remove any positions with no alternate allele calls or that failed the filtering applied by the \cryptic{} pipeline \vrb{clockwork}. One exception is that we did not remove positions that overlap a genome mask of repetitive regions in H37Rv \cite{tbmask2014}.
The reference \panrg{} that \pandora{} uses is actually a collection of local \prg{}s (loci). These loci are effectively partitions of the original genome; one can partition based on any criteria. We chose to split the H37Rv genome based on the genomic features outlined in the accompanying General Feature Format (GFF) file from the NCBI database. We also retain the segments \emph{between} the features - so-called intergenic regions (IGRs). We combine genomic features with overlapping coordinates (e.g., transcribed on opposite strands or different reading frames) into a single locus and join any locus (feature or IGR) shorter than 500bp with its 3' neighbour. By building the reference \panrg{} in this manner, we ensure representation of every position in the H37Rv genome amongst all loci. We then remove any locus with 30\% or more of its positions overlapping the H37Rv genome mask mentioned above \cite{tbmask2014}. Refer to \autoref{app:mask} for a detailed description of how we chose this masking strategy.
We form the sparse and dense \panrg{}s by applying the variants from the respective VCF to the template (reference) sequence of each locus; for each position in the VCF, we infer the locus it corresponds to. We then take all (called) alternate alleles and create a sequence for each; that is, the template sequence, with the reference allele replaced by the alternate allele. Note, we disregard any indels longer than 20bp or that span a locus boundary. Finally, all of these sequences are pooled into a single fasta file for each locus.
The multi-sequence fasta file for each locus is then subjected to multiple sequence alignment (MSA) using MAFFT (v7.471; \cite{nakamura2018}). We use the accurate global alignment setting, G-INS-i \cite{katoh2016}, with default parameters, using the \vrb{ginsi} script provided with MAFFT. The resulting MSA is then converted to a \pandora{}-compatible \prg{} using the \makeprg{} program (v0.1.1; \autoref{sec:make_prg}; \cite{pandora}) with a maximum nesting level of 5 and minimum match length of 7. All of the local \prg{}s are then combined into a single \panrg{} file and indexed with \pandora{} using a \kmer{} size of 15 and window size of 14. In the end, we have two \panrg{} files - sparse and dense.
\subsection{Computational performance}
\label{sec:tbprg-comp-perf}
An important consideration for the usability of any genomic method is the computational cost in terms of time and memory resources. The construction process just outlined need only be run once, and then it can be used as a reference for subsequent \pandora{} usage. However, it is necessary to understand the time and resources required in order to identify bottlenecks. Additionally, if the resource usage is high enough, it may also limit who can build a reference graph. We outline the time and memory requirements for each step of the graph construction in \autoref{tab:build-prg}. All times are on a single compute node with 32 CPU cores. We only report the MSA, \makeprg{}, and \pandora{} index steps as these are necessary steps; those preceding use little time and memory and can be done in several different ways.
\begin{table}
\centering
\resizebox{\textwidth}{!}{%
\begin{tabular}{@{}lllllll@{}}
\toprule
& \multicolumn{3}{l}{Sparse} & \multicolumn{3}{l}{Dense} \\ \midrule
Step & CPU time (sec) & Real time (H:m) & Max. RAM (GB) & CPU time (sec) & Real time (H:m) & Max. RAM (GB) \\
MSA & 138576 & 1:16 & \textbf{209} & 445284 & 3:56 & \textbf{301} \\
Make PRG & 3746 & 0:04 & 0.9 & 4269 & 0:05 & 0.9 \\
Index & 142 & 0:01 & 1.5 & 361 & 0:01 & 1.7 \\ \bottomrule
\end{tabular}%
}
\caption{Computational time and memory (RAM) usage for the main steps of building a \mtb{} reference graph. Sparse and Dense refer to two different densities with respect to the number of variants used. All steps were run on a single compute node with 32 CPU cores. MSA=multiple sequence alignment;PRG=population reference graph.}
\label{tab:build-prg}
\end{table}
The most notable computational usage value from \autoref{tab:build-prg} is the fact that the sparse and dense MSAs require 209 and 301GB of memory, respectively. The exact cause of this high memory usage is not precisely known because we run the MSA of all loci in parallel and do not receive memory usage for each CPU core. However, MSA memory usage is known to scale exponentially with the number of sequences \cite{Wang1994}, and the sparse and dense loci with the highest number of sequences had 180 and 335 sequences, respectively. Therefore, we could reduce memory usage by placing a cap on the number of sequences in a locus. In addition, there are memory-saving options within MAFFT that could also be used, along with reducing the number of CPU cores, at the cost of increasing the runtime.
%=========================================================================
\section{Variant calling and filtering assessment}
\label{sec:var-calls}
One approach to determining genetic distance is to count the number of SNPs that differentiate two samples. These distances enable the identification of possible transmission clusters based on some predefined number of expected SNPs. Filtering of variant calls is integral to creating trusted variant calls on which to base such distances. However, there are many such filters used for Illumina genomic data, and they can produce inconsistent results \cite{walter2020}. As our focus is on whether \ont{} can be used for public health applications, we use the PHE Illumina pipeline - COMPASS \cite{Jajou2019} - as a comparison.
Before attempting to define SNP thresholds for \ont{} data, we explore the impact of a range of filtering parameters for both \bcftools{} and \pandora{}. The aim of this filter calibration is ultimately to determine if SNP-calling precision for \ont{} is comparable with Illumina, and if not, how close can we get it.
We evaluate the resulting, filtered SNP calls against the COMPASS Illumina SNP calls for the seven samples with high-quality PacBio assemblies (see \autoref{sec:asm_results}) to ensure no bias for Illumina or \ont{}. For others interested in investigating variant filters for \ont{} data, we also hope this calibration acts as a good starting point for deeper analysis.
\subsection{Validating variant calls}
\label{sec:validate-var-calls}
We evaluate the precision and recall of SNPs using the method outlined in \autoref{sec:denovo-empirical-eval} - \vrb{varifier} - with a flank length of 100bp \cite{minos}. The samples we evaluated are those seven with PacBio assemblies that passed QC. As a truth genome for each, we use the unpolished \flye{} PacBio assembly, along with a mask for low-quality regions. These low-quality regions were identified by aligning the sample's Illumina reads to the assembly with \vrb{bwa mem} and flagging any position with less than 10 reads mapping to it or less than 90\% agreement (see \autoref{app:asm_disagree}).
\subsection{Illumina variant calling}
\label{sec:illumina-var-call}
Fan Yang-Turner performed the Illumina variant calls (see \autoref{sec:ch2-acknowledge}) with the COMPASS pipeline used by PHE. Briefly, reads are mapped to H37Rv, and \vrb{samtools mpileup} is used to identify SNPs \cite{samtools2009}. SNPs are filtered based on the following criteria: i) must have at least five high-quality supporting reads, ii) must have at least one read in each direction, iii) 75\% of reads must be high-quality, iv) the diploid genotype must be homozygous, v) fraction of reads supporting the major allele must be at least 90\%. In addition, any SNPs falling within masked sites - as defined by aligning the H37Rv to itself and identifying repetitive regions \cite{tbmask2014} - are excluded. This mask is the same as in \autoref{sec:tbprg}.
\subsection{\ont{} variant calling: BCFtools}
\label{sec:bcftools-filters}
As there is no standard variant caller used for \mtb{} \ont{} data, we chose to use BCFtools (v1.11; \cite{bcftools2021}), as it has a long history of use in bioinformatics and is one of the main variant callers used for Illumina data. It is readily available to all users and is much more user-friendly than other available tools, some of which have only be trained on Human data \cite{clair2020}, or require the raw \ont{} data \cite{nanopolish2015}. Another main reason for its use is that it is the updated form of the \vrb{samtools} pipeline used by COMPASS and thus provides a somewhat "fair" comparison.
\ont{} reads were aligned to H37Rv using \vrb{minimap2}, with options to produce SAM output containing no secondary alignments. The resulting SAM file is provided as input to the \bcftools{} subcommand \vrb{mpileup} with the option to skip indels. The resulting pileup is then used to call SNPs with \vrb{bcftools call} using the multiallelic caller with a haploid model and an option to skip indels.
There are many fields in the resulting VCF relating to the information about the reads that support each position. After a thorough examination of how filtering based on each field impacts precision and recall, we settled on five filters for \bcftools{}. First, we filter out positions with a quality (QUAL field) score less than 60. The quality is a log-scaled probability for the assertion made by the alternate allele. Second, a read position bias (RPB) of at least 0.05 is required. RPB indicates a bias for support from the ends of reads, as they are usually low quality. Third, we filter out positions with a segregation-based metric (SGB) less than -0.5. SGB is a measure of how read depths across alleles match expected depths. Fourth, variant distance bias (VDB) less than 0.002 is filtered out. VDB measures whether a variant's position is randomly distributed within the reads that support it or biased (e.g. near the start). Fifth, the fraction of reads supporting the called allele (FRS) must be 90\% or more.
\autoref{fig:bcftools-filters} shows how the addition of each of these filters impacts precision (proportion of calls that are correct) and recall (proportion of variants found) for \bcftools{} compared with COMPASS (Illumina). The trade-off between precision and recall is dependent on the question one is trying to answer. For transmission clustering, we place greater importance on precision as we seek to ensure the SNPs used are of the highest quality. The consequence of this is we miss some variants - compared to COMPASS.
The filtering we use for the remainder of the transmission inference
work is to apply all five filters mentioned above. These filters, represented by the yellow box in \autoref{fig:bcftools-filters}, lead to median precision and recall of 99.94\% and 84.26\%, respectively for the seven validation samples with PacBio assemblies. This is compared to the COMPASS median precision and recall values of 100\% and 92.58\%, respectively.
In summary, we produce \ont{} variant calls with equivalent precision to Illumina but with lower recall.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/bcftools-precision-recall-filters.png}
\caption{{Precision (left) and recall (right) of SNPs for COMPASS (red) and a selection of \bcftools{} filters. \vrb{\#nofilter} (blue) is \bcftools{} with no filtering of variants. \vrb{QUAL} (purple) is \bcftools{} SNPs with a quality score of 60 or more. \vrb{+RPB+VDB+SGB} (grey) indicates \bcftools{} variants with the INFO field values $\ge 0.05$, $\ge 0.002$, and $\le -0.5$, respectively, plus QUAL. \vrb{+FRS} (yellow) shows \bcftools{} SNPs with all previous filters, plus only SNPs where the fraction of reads supporting the variant is at least 90\%. Note: the precision plot y-axis was cut causing some \vrb{\#nofilter} points to be hidden.
{\label{fig:bcftools-filters}}
}}
\end{center}
\end{figure}
\subsection{\ont{} variant calling: Pandora}
\label{sec:pandora-filters}
When assessing the best filters for increasing the precision of variant calls from \pandora{}, we are also interested in determining whether \panrg{} density has a noticeable impact on performance. Therefore, we use the sparse and dense \panrg{}s from \autoref{sec:tbprg} and look at the precision and recall these produce for the same filters.
\subsubsection{Single-sample}
\label{sec:map-var-calls}
For each sample, we discover \denovo{} variants using the method outlined in \autoref{sec:denovo-insert} using the \vrb{discover} command of \pandora{} (version 0.8.0). We use default parameters, except limiting the number of novel variants from a candidate region to 10. Novel variants are added to the original MSAs from \autoref{sec:tbprg} with the \vrb{--add} routine in MAFFT \cite{katoh2012}. Next, \makeprg{} is run on the subsequent alignments, and the resulting updated \panrg{} is indexed with \pandora{}. Finally, the \vrb{map} routine of \pandora{} genotypes a sample's reads and produces a VCF. To be able to compare the \pandora{} VCF to the truth assemblies, we instruct \pandora{} to output coordinates with respect to the H37Rv reference sequence for each locus. Running \pandora{} in this way leads to some alleles being quite long and redundant, so we use \vrb{bcftools norm} to trim unused alleles and reduce variants down to their most succinct representation.
We apply four filters to the \pandora{} variant calls. First, there must be at least 3 reads supporting the called allele. Second, we keep positions with a strand bias of at least 1\%, which is the lowest depth on the forward or reverse strand divided by the total depth. This is a somewhat different definition of strand bias to that used in human genetics \cite{guo2012effect}; our definition is testing whether there are significantly more reads on one strand. For example, in this definition, if the forward and reverse strand have read depths of 1 and 9, respectively, the strand bias is 10\%. Therefore, this example position would not be filtered out. Third, a genotype confidence score no less than 5. This score is the difference between the log-likelihoods of the called allele and the next most likely allele. Fourth, the fraction of reads supporting the called allele (FRS) must be at least 90\% - calculated the same way as in \autoref{sec:bcftools-filters}.
The results of incrementally applying these filters, along with no filters and COMPASS, are shown in \autoref{fig:pandora-filters-snps}. Of the two \panrg{}s used, \pandora{}'s best median precision (100\%) is with the sparse \panrg{} and all filters applied. With all filters, the sparse \panrg{} leads to a median recall of 71.99\%. When compared to the COMPASS median precision and recall values of 100\% and 92.58\%, respectively, \pandora{} produces \ont{} SNP calls with equivalent precision to Illumina, but with 20.59\% less recall (SNPs \emph{and} indels are assessed in \autoref{app:pandora-all-vars}). Part of the recall disparity between \pandora{} and COMPASS is explained by the masking of loci in the reference graph (see \autoref{sec:tbprg} and \autoref{app:mask}). Despite this large difference in recall, we chose to use all of the filters outlined above because, as mentioned earlier, we value \ont{} precision more than recall for this transmission cluster work - our genomic epidemiologist collaborators in Oxford/PHE, with whom we are doing this, prefer precision. (We note that others argue for allowing more SNPs and are happy to deal with the higher SNP error rates \cite{walter2020}.)
In nearly every filtering combination, the sparse \panrg{} lead to higher recall \emph{and} precision, albeit marginally. As a result, the remaining work featuring \pandora{} in this chapter will use the sparse \panrg{} given the increased computational cost of using the dense \panrg{} (\autoref{sec:tbprg-comp-perf} and \autoref{sec:var-call-comp-perf}), without any benefit for precision and recall.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/pandora-precision-recall-filters-snps.png}
\caption{{Precision (bottom) and recall (top) of SNPs for COMPASS (purple) and \pandora{} with sparse (red) and dense (blue) \panrg{}s. The \pandora{} boxes start with no filters on the left, with each box moving to the right adding a filter to the previous box. The COMPASS box is a reference to the precision and recall of Illumina variant calls. Linear PRG density refers to the fact that COMPASS uses a single, linear reference genome as opposed to \pandora{}, which uses a genome graph. The black points refer to single data points for the seven samples used. MIN\_COV=minimum depth of coverage;MIN\_SB=minimum strand bias;MIN\_GT\_CONF=minimum genotype confidence score;MIN\_FRS=minimum fraction of read support.
{\label{fig:pandora-filters-snps}}%
}}
\end{center}
\end{figure}
\subsubsection{Multi-sample}
\pandora{}'s \vrb{map} routine infers a consensus sequence for a single sample and outputs variant calls with respect to that consensus. However, \pandora{} also has a multi-sample counterpart - the \vrb{compare} command. The \compare{} routine infers a single reference sequence that is maximally close to \emph{all} samples. It outputs a locus presence/absence matrix, along with a VCF of genotypes for all samples with respect to the inferred reference sequence (see \autoref{sec:pandora-compare} for a description of the \compare{} method). As \compare{} was designed for analysing collections of (potentially divergent) samples, we assess its ability to describe \mtb{} transmission clusters.
The process for calling variants using \compare{} is first to aggregate the novel variants discovered for each sample in \autoref{sec:map-var-calls}. Then, instead of creating an updated \panrg{} for each sample, we use the aggregated novel variants to update the MSAs and \prg{}s as in \autoref{sec:map-var-calls}. Thus, in the end, we have a \panrg{} that has segregating variants for all samples, rather than the \panrg{}s used by \map{}, which only has the novel variants for a single sample. Next, we run \vrb{pandora compare} using these updated sparse and dense \panrg{}s and filter the resulting VCF as per \autoref{sec:pandora-filters}.
As a result of its design, it is not possible to provide \compare{} with a reference to base VCF coordinates on (as in \autoref{sec:map-var-calls}). Consequently, we cannot assess the precision and recall for the seven samples as above. However, we have evaluated the accuracy of \compare{} in \autoref{sec:denovo-empirical}; therefore, in this chapter, we assess its usefulness for calculating genetic relatedness.
\subsection{Computational performance}
\label{sec:var-call-comp-perf}
\subsubsection{Single sample methods}
In addition to the quality of the variant calls, the computational cost of producing them is also important. The CPU time and maximum memory usage for performing the \ont{} variant calling is shown in \autoref{fig:var-comp-perf}. \pandora{}'s performance is broken down into the individual stages, while \bcftools{} is represented by a single job (\vrb{pileup\_nanopore}). The median maximum memory for \bcftools{} was 8.2GB, although the maximum was as high as 58.5GB. This is compared to the highest \pandora{} step - updating the MSAs with novel variants - with a median maximum memory usage of 9.7GB and 13.3GB for the sparse and dense \panrg{}s respectively. The highest memory usage for \pandora{} was 18.6GB during the updating of MSAs, nearly 40GB lower than the peak of \bcftools{}. However, we note that the peak memory usage for \pandora{}'s sparse and dense \panrg{} construction (\autoref{sec:tbprg-comp-perf}) was 209 and 301GB, respectively, although this is a one-off cost and does not need to be run for each sample.
The median CPU time for \bcftools{} was 35129 seconds, or 9.75 hours, with the longest run coming in at 138364 seconds (38.4 hours). To be able to compare \bcftools{} with \pandora{} as a whole, we can sum the median CPU time over each step, which gives 21704 and 53194 seconds, or 6.0 and 14.7 hours, for the sparse and dense \panrg{}s respectively. As with the memory usage, the longest runtime component of the \pandora{} pipeline was updating the MSAs with \denovo{} variants. Note, \bcftools{}' \vrb{mpileup} command is not parallelised, while \pandora{}'s steps are, so the wall clock time for the two is dependent on the number of CPU cores available.
\subsubsection{Multi sample method}
The time and memory of \compare{} is not directly comparable to \bcftools{} and \pandora{} \vrb{map} as it runs on all samples at the same time. Additionally, the novel variant discovery phase of \pandora{} \vrb{map} for all samples technically contributes to the overall runtime of \compare{}. As the performance of this discovery step has already been reported, we outline the remainder of the \compare{} steps in \autoref{tab:compare-perf}. In total, the remainder of the sparse and dense \panrg{} steps took 58.4 and 83.1 CPU hours, respectively. The actual wall clock time for these steps was 5.5 and 7.3 hours using 32 CPUs. Maximum memory usage occurred while updating the MSAs and peaked at 38 and 37GB for the sparse and dense \panrg{}s, respectively.
\begin{figure}
\centering
\includegraphics[width=0.95\textwidth]{Chapter2/Figs/cpu_and_memory.png}
\caption{The CPU time (in seconds; y-axis; top) and maximum memory usage (in megabytes; y-axis; bottom) for each \ont{} variant-calling job. Sparse (red) and dense (blue) refer to \pandora{} steps with the respective density \panrg{}. \vrb{bcftools} (purple) only has one step (\vrb{pileup nanopore}). The violins represent the distribution of CPU time over all samples. Note, the y-axis for both plots is log-scaled.}
\label{fig:var-comp-perf}
\end{figure}
\begin{table}
\centering
\resizebox{\textwidth}{!}{%
\begin{tabular}{@{}lllllll@{}}
\toprule
& \multicolumn{3}{l}{Sparse} & \multicolumn{3}{l}{Dense} \\ \midrule
Step & CPU time (sec) & Real time (H:m) & Max. RAM (GB) & CPU time (sec) & Real time (H:m) & Max. RAM (GB) \\
Update MSA & 114677 & 1:01 & 38 & 130221 & 1:15 & 37 \\
Make PRG & 4700 & 0:05 & 1.2 & 5403 & 0:06 & 1.1 \\
Index & 538 & 0:01 & 2.1 & 1224 & 0:02 & 2.4 \\
Compare & 90486 & 4:25 & 5.4 & 162294 & 6:04 & 6.1 \\ \bottomrule
\end{tabular}%
}
\caption{CPU and wall clock time, and memory (RAM) usage for the main steps of running \pandora{}'s multi-sample routine \vrb{compare}. Sparse and Dense refer to two different densities with respect to the number of variants used. All steps were run on a single compute node with 32 CPU cores. MSA=multiple sequence alignment; PRG=population reference graph.}
\label{tab:compare-perf}
\end{table}
\subsection{Summary}
\label{sec:var-summary}
In summary, \autoref{fig:prec-recall-filters} shows that our selection of filters for \ont{} variant callers provides precision on-par with Illumina. However, this precision comes at the cost of a loss in recall. Additionally, both \bcftools{} and \pandora{} have considerable computational costs compared to what is typical for Illumina data.
The remainder of this chapter explores how the SNP calls from \ont{} can be used to calculate distances between samples and define putative transmission clusters from these distances. To recapitulate, we are using SNP calls from \bcftools{} (per-sample), \map{} (per-sample; sparse \panrg{}), and \compare{} (multi-sample; sparse \panrg{}). We are especially interested in how similar the pairwise distances are between samples and sequencing modality and whether the same distance thresholds used for Illumina can also be used for \ont{}.
\begin{figure}
\begin{center}
\includegraphics[width=0.9\columnwidth]{Chapter2/Figs/combined-precision-recall-filters-snps.png}
\caption{{Precision (left) and recall (right) of filtered SNPs for COMPASS (red), \bcftools{} (blue), and \pandora{} (purple). Each black point represents one of seven evaluation samples.
{\label{fig:prec-recall-filters}}%
}}
\end{center}
\end{figure}
%=========================================================================
\section{Pairwise SNP distance comparison}
\label{sec:snp-dist}
When attempting to infer transmission clusters, one approach defines a SNP distance threshold and says that any genomes within this distance of each other are clustered (possible transmissions) \cite{walker2013}. It follows that the SNPs used must be trusted. Having shown we can achieve SNP precision on-par with Illumina using \ont{} data (see \autoref{sec:var-calls}), we investigate the pairwise SNP distance between samples produced by both Illumina and \ont{} sequencing technologies. The intention here is to determine whether the thresholds typically used for Illumina data can also be used for \ont{}, or whether adjustments are required.
To determine the distance between samples, we first generate sample consensus sequences. We do this for each variant-caller: COMPASS (Illumina), \bcftools{} (\ont{}), and \pandora{} \vrb{map} (\ont{}) (not \compare{} - see below). A consensus sequence is obtained by applying the calls from a given VCF (from \autoref{sec:var-calls}) to the \mtb{} reference genome. We nullify (mark as \vrb{N}) any positions where: i) the position failed filtering, ii) the reference genome position does not appear in the VCF file (except for \map{}), iii) the called genotype is null, or iv) the position is within the reference genome mask.
Next, all sample consensus sequences for a variant-caller are joined into a single FASTA file and a pairwise distance matrix is calculated using \vrb{snp-dists} (version 0.7.0) \cite{snp-dists}. In the case of \compare{} (multi-sample mode), we cannot follow this approach for generating a consensus sequence and distance matrix due to the inability to translate the coordinates from a graph to a linear reference. However, as \compare{} selects a cohort-specific reference, it effectively allows one to go directly to a distance matrix. Therefore, we generate a genotype array instead of a consensus sequence by extracting the called genotype for each sample at each site (VCF entry). Where a site has failed a filter, we use a genotype value of -2. To calculate the distance between two samples, we compare their genotype arrays; if either sample's genotype is $<0$ (i.e., null or filtered) or the genotypes are the same, we record a distance of 0, otherwise 1. The sum of these comparisons for each genotype is the distance between the two samples.
The pairwise SNP distance relationship is presented in \autoref{fig:dotplot}. For a given pair of samples, we plot their SNP distance, based on the COMPASS (Illumina) variant calls (x-axis), against the SNP distance for the same pair, based on the \ont{} variant calls (y-axis). All pairwise comparisons between a sample and itself are absent from the visualisation, and only a single value was used for each pair (i.e., we keep sample1 vs. sample2 and discard sample2 vs. sample1 as they are the same). RANSAC Robust Linear Regression \cite{fischler1981}, as implemented in the Python library \vrb{scikit-learn} \cite{scikitlearn}, was used for determining a linear equation and line-of-best fit for the relationship between pairwise Illumina and \ont{} SNP distance.
If the same thresholds used for Illumina can also be used for \ont{}, we would expect the distances to be the same and the bulk of the points in the plot to fall on the dashed, diagonal identity line in \autoref{fig:dotplot}. What we see instead is a linear relationship that falls \emph{under} this identity line - for all \ont{} variant callers. Given the filtered \ont{} SNP calls made by \bcftools{} and \pandora{} have lower recall than Illumina (\autoref{sec:var-summary}), this is expected, as they miss some SNPs found by Illumina.
We highlight one important observation in the zoomed inset of \autoref{fig:dotplot}. As SNP thresholds used for \mtb{} are generally well below 100 \cite{stimson2019}, it makes more sense to base SNP distance relationships on those samples that are "close". And indeed, when we zoom in on pairs of samples within 100 (Illumina) SNPs of each other, we see an association that is closer to the identity line. Fitting a linear model to this close subset of pairwise distances yields a relationship defined by the equation $y=0.806x+0.593$ for \bcftools{}, $y=0.575x+13.544$ for \pandora{} \vrb{map}, and $y=0.342x+0.765$ for \compare{}. Replacing $x$ with an Illumina SNP threshold gives the (predicted) equivalent \ont{} SNP threshold based on these relationships. For example, at an Illumina SNP distance of 12, the linear equation would predict a corresponding \bcftools{} \ont{} SNP distance of 10.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/combined-dotplots.png}
\caption{{Pairwise SNP distance relationship between Illumina (COMPASS; x-axis) and \ont{} (\bcftools{} (red), and \pandora{} single-sample (blue) and multi-sample (purple) mode; y-axis) data. Each point represents the SNP distance between two samples for the two sequencing modalities. The black, dashed line shows the identity line (i.e. $y=x$) and the coloured lines shows the line of best fit based on the robust linear model fit to the data. The zoomed inset shows all pairs where the COMPASS distance is $\le 100$.
{\label{fig:dotplot}}
}}
\end{center}
\end{figure}
% see https://github.com/mbhall88/head_to_head_pipeline/issues/61 for a full investigation of the outliers discussed below
In the middle-left of the inset in \autoref{fig:dotplot} a small cluster of \pandora{} \vrb{map} (blue) points can be seen. These have an approximate pairwise \ont{} distance of 100, but \texttildelow10 for Illumina. Upon further investigation, the cause of the large discrepancy in the distance was due to \pandora{} \vrb{map} failing to identify (and filter) some heterozygous calls. Two samples, in particular, occur as one member in all of the major outlying pairs. 94\% of the false-positive differences leading to the large \ont{} distances occur at positions that are filtered due to evidence of heterozygosity in COMPASS. That is, in the Illumina consensus sequence, these positions are ignored due to filtering and do not count as a difference. However, \pandora{} did not have sufficient read depth on both alleles to trigger the FRS filter (\autoref{sec:pandora-filters}) - leading to a passing variant call that differs from the sample it is being compared with.
\noindent
The relationship between Illumina and \ont{} distances is indeed linear for all three variant-calling methodologies. While the relationship is not identical, we will attempt to use a linear model fit to the relationship to infer what \ont{} SNP distance threshold is likely to align with a given Illumina threshold for defining putative transmission clusters.
%=========================================================================
\section{\ont{} transmission clustering}
\label{sec:clustering}
While the relationship between Illumina and \ont{} pairwise SNP distance is enlightening, ultimately, the fundamental question is: do \ont{} SNPs lead to transmission clusters consistent with those obtained with Illumina SNPs? To answer this question, we compare Illumina- and \ont{}-based clusters for four Illumina SNP thresholds.
Selecting a SNP threshold to infer transmission clusters from has seen a variety of values recommended \cite{stimson2019}. As we seek to show concordance of \ont{} data with PHE's Illumina-based strategy, we opt to investigate Illumina threshold values 0, 2, 5, and 12. PHE define two cases as clustered if they have a SNP distance $\le 12$ as "\emph{12 SNPs represents the maximum SNP difference between 2 isolates for which epidemiological links have previously been identified \cite{walker2013} and is a conservative measure for reporting isolate relatedness}" \cite{phe-tb-england}. Five was likewise selected as Walker \etal{} \cite{walker2013} found it to indicate membership in a recent transmission chain. Finally, threshold values 0 and 2 were chosen to provide insight into the level of granularity possible and are of clinical interest in some settings (personal correspondence with Tim Peto). For each of these four thresholds, we investigate what corresponding \ont{} SNP distance threshold yields the most similar clustering.
\subsection{Transmission cluster similarity}
\label{sec:cluster-similarity}
We use the distance matrices from \autoref{sec:snp-dist} to infer transmission clusters. To cluster samples, for a given SNP threshold $t$, we use pairs of samples with a distance $\le t$ to define a graph, $G=(V,E)$, where samples (nodes, $V$) are connected by weighted edges ($E$), with the weight of an edge indicating the distance between the two samples it connects. We define clusters as the set of connected components $\{C_1, C_2...C_N\}\in G$, where $N$ is the number of clusters. That is, a cluster (connected component), $C_i$, is a subgraph of $G$ where a path exists between any two samples in $C_i$, but no path exists to any samples in the rest of $G$. With this definition, all clusters have a minimum of two members.
To assess how closely \ont{} SNP-based clustering approximates Illumina SNP-based clustering, we adapt a similarity measure on sets; the Tversky Index \cite{tversky1977}. We define the Illumina clustering as $G$ and the \ont{} clustering as $H$. We are interested in being able to quantify the recall and precision of the \ont{} clustering with respect to Illumina. In this sense, recall describes the proportion of clustered samples in $G$ clustered with the expected (correct) samples in $H$. Likewise, precision in this context tells us when extra samples are added to existing clusters by $H$ or when clusters in $G$ are joined in $H$.
In order to be able to define precision and recall when comparing two clustering graphs $G$ and $H$, we define the Tversky Index
\begin{equation}
\label{eq:tversky-index}
TI(n, G, H)=\frac{\left|C_{n,G}\cap C_{n,H}\right|}{\left|C_{n,G}\cap C_{n,H}\right|+\alpha |C_{n,G}-C_{n,H}|+\beta |C_{n,H}-C_{n,G}|}
\end{equation}
where $C_{n,G}$ is the cluster in $G$ that sample $n$ is a member of. When $\alpha = 1$ and $\beta=0$ in \autoref{eq:tversky-index}, we get a metric analogous to recall - as described above. Therefore, we define recall, $R$, for a single sample $n$ as
\begin{equation}
\label{eq:recall}
R(n, G, H)=\frac{\left|C_{n,G}\cap C_{n,H}\right|}{\left|C_{n,G}\cap C_{n,H}\right|+|C_{n,G}-C_{n,H}|}=\frac{\left|C_{n,G}\cap C_{n,H}\right|}{|C_{n,G}|}
\end{equation}
When $\alpha = 0$ and $\beta = 1$ in \autoref{eq:tversky-index}, we get a metric analogous to precision. As such, we define precision $P$, for a single sample $n$ as
\begin{equation}
\label{eq:precision}
P(n, G, H)=\frac{\left|C_{n,G}\cap C_{n,H}\right|}{\left|C_{n,G}\cap C_{n,H}\right|+|C_{n,H}-C_{n,G}|}=\frac{\left|C_{n,G}\cap C_{n,H}\right|}{|C_{n,H}|}
\end{equation}
With these definitions for a single sample, we can assess the recall and precision of the \ont{} clustering, $H$, with respect to the Illumina clustering, $G$, by averaging each metric over all samples in $G$. This gives us the Sample-Averaged Cluster Recall (SACR)
\begin{equation}
\label{eq:sacr}
SACR=\frac{\sum_{n}^{V_G}R(n, G, H)}{|V_G|}
\end{equation}
where $V_G$ is the set of samples (nodes) in $G$ (Illumina graph). Likewise, we define the Sample-Averaged Cluster Precision (SACP) as
\begin{equation}
\label{eq:sacp}
SACP=\frac{\sum_{n}^{V_G}P(n, G, H)}{|V_G|}
\end{equation}
SACR states, on average, what proportion of the samples clustered together in $G$ are also clustered together in $H$ (\ont{}) - it is a measure of how many true positives \ont{} retains. Inversely, SACP states, on average, what proportion of the samples clustered together in $H$ are also clustered together in $G$ - it is a measure of how many extra samples \ont{} adds to clusters.
However, SACR and SACP do not inherently account for when $H$ has clusters containing only samples deemed non-clustered (singleton) in $G$. In order to quantify any extra clustering by $H$, we establish the Excess Clustering Rate (XCR) as the proportion of singletons (disconnected nodes) in $G$ that are connected in $H$. We define XCR as
\begin{equation}
\label{eq:xcr}
XCR = \frac{|S_G-S_H|}{|S_G|}
\end{equation}
where $S_G$ and $S_H$ are the sets of singletons in the respective graphs.
\noindent
We assess the cluster similarities using the Python programming language with the \vrb{networkx} library \cite{networkx}. For a given threshold, we create the Illumina clustering (graph), $G$, and the \ont{} clustering, $H$ - as described above - and use these to calculate the SACR, SACP, and XCR using \autoref{eq:sacr}, \autoref{eq:sacp}, and \autoref{eq:xcr} respectively.
% ===========================================================
\subsubsection{An illustrated example of cluster similarity metrics}
\label{sec:cluster-example}
\autoref{sec:cluster-similarity} outlines three metrics - SACR, SACP and XCR - for evaluating the similarity between two different strategies for transmission clustering. In order to provide the reader with greater intuition for the purpose of each metric, we present an illustrated example in \autoref{fig:cluster-example}.
We take \autoref{fig:example-truth} to be the truth clusters and \autoref{fig:example-test} to be test clusters. These are akin to Illumina and \ont{} clusters, respectively, in \autoref{sec:cluster-similarity}. The individual recall and precision values (defined in \autoref{eq:recall} and \autoref{eq:precision}) for each sample in \autoref{fig:example-truth} are shown in \autoref{tab:cluster-example}. SACR and SACP (defined in \autoref{eq:sacr} and \autoref{eq:sacp}) are \emph{sample-averaged}, so their values for this example are 0.82 and 0.83 respectively.
To highlight the objective of SACR, we use the truth and test clusters containing the sample $F$. Samples $F$, $G$, $H$ and $I$ are shared between both, but $J$ is missing from the test cluster. To calculate the individual recall for $F$, we take the intersection size of the truth and test clusters it exists in and divide it by the size of the truth cluster - $\frac{4}{5}=0.8$. We do the same for the precision of sample $D$, except we divide by the size of the test cluster - giving $\frac{2}{3}=0.66$.
The relevance of the XCR metric is best exemplified by the test cluster containing samples $L$ and $M$. As we calculate SACR and SACP for all samples in the \emph{truth} clusters, these two samples would be ignored. However, they are samples that - according to the truth - should not be part of any cluster (singletons). Therefore, SACR and SACP cannot capture these extra clusterings if they do not contain clustered truth samples. XCR covers this limitation and is the proportion of singletons in the truth that are clustered in the test (see \autoref{eq:xcr}). As \autoref{fig:cluster-example} does not show singletons, let us pretend there are 20 singletons in the truth (including samples $L$ and $M$). This would give an XCR of $2/20=0.1$.
\begin{figure}
\centering
\begin{subfigure}[b]{0.4\textwidth}
\centering
\includegraphics[width=\textwidth]{Chapter2/Figs/illumina-cluster-example.png}
\caption{Truth clusters}
\label{fig:example-truth}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.4\textwidth}
\centering
\includegraphics[width=\textwidth]{Chapter2/Figs/ont-cluster-example.png}
\caption{Test clusters}
\label{fig:example-test}
\end{subfigure}
\caption{Illustrative examples of transmission clustering. \textbf{a)} represents truth clusters, while \textbf{b)} is clustering from some "test" method we would like to compare to \textbf{a}. The nodes represent samples with the numbers on the edges connecting them indicating the distance between those two samples. The red nodes indicate samples with a clustering disparity between the two clusterings. Note, we do not show singletons (disconnected nodes) - e.g., $J$ is missing from \textbf{(b)}.}
\label{fig:cluster-example}
\end{figure}
\begin{table}
\centering
\begin{tabular}{@{}lll@{}}
\toprule
sample & recall & precision \\ \midrule
A & 1.0 & 1.0 \\
B & 1.0 & 1.0 \\
C & 1.0 & 1.0 \\
D & 1.0 & 0.66 \\
E & 1.0 & 0.66 \\
F & 0.8 & 1.0 \\
G & 0.8 & 1.0 \\
H & 0.8 & 1.0 \\
I & 0.8 & 1.0 \\
J & 0.0 & 0.0 \\ \bottomrule
\end{tabular}
\caption{Cluster recall and precision results for each sample in \autoref{fig:cluster-example}.}
\label{tab:cluster-example}
\end{table}
\subsubsection{Summary}
To summarise, for each sample in an Illumina-defined cluster, SACR is the proportion of samples in its Illumina cluster also in its \ont{} cluster - averaged over all samples. SACP is the proportion of samples in its \ont{} cluster also in its Illumina cluster - averaged over all samples. SACR indicates whether samples have been missed from \ont{} clustering (false negatives), and SACP reveals if additional samples are being added to \ont{} clusters (false positives). One shortcoming of SACR and SACP is that they do not account for when the \ont{} clustering contains clusters where no member of the cluster is part of an Illumina cluster. To that end, XCR is the proportion of Illumina non-clustered (singleton) samples added to a cluster by \ont{}. For example, an XCR value of 0.1 would indicate that 10\% of non-clustered samples were part of a cluster in the \ont{} clustering. We provide an illustrated, worked example of these metrics in \autoref{sec:cluster-example}.
Of the metrics outlined above, our primary focus is SACR, as samples missed from clusters are of particular concern for public health agencies.
\subsection{Evaluation of transmission clusters}
\label{sec:eval-clusters}
The clustering produced for the four Illumina clusters of 0, 2, 5, and 12 are shown in \autoref{fig:clustering-t0}, \autoref{fig:clustering-t2}, \autoref{fig:clustering-t5}, and \autoref{fig:clustering-t12}, respectively. We discuss the results for each \ont{} variant caller below.
Note, we initially evaluated how well the linear model-based thresholds (from \autoref{sec:snp-dist}) performed for each variant caller but found that hand-picked thresholds produced more accurate clusters (see \autoref{app:dist-sweep}).
\subsubsection{BCFtools}
\label{sec:bcftools-clustering}
For the four Illumina SNP distance thresholds of interest - 0, 2, 5, and 12 - the corresponding \bcftools{} thresholds we use are 0, 2, 5, and 11. We chose to forego the model-based predicted thresholds and instead use the hand-picked ones based on a threshold parameter-sweep outlined in \autoref{app:dist-sweep-bcftools}.
The \bcftools{} clustering results are summarised in \autoref{tab:bcftools-cluster-summary} for all four SNP thresholds analysed. Of note, \bcftools{} achieves a SACR of 1.0 at all thresholds - meaning \ont{} does not miss any samples from their correct clustering.
For the SNP threshold of 0 (\autoref{fig:clustering-t0}; top-right), \bcftools{} perfectly recreated the Illumina clusters, with the addition of a cluster of two samples that were singletons (not clustered) in Illumina. At the SNP threshold of 2 (\autoref{fig:clustering-t2}; top-right), \bcftools{} clustering only differed from Illumina by the addition of one singleton to a cluster of three (cluster 1). SNP threshold 5 (\autoref{fig:clustering-t5}; top-right) had the highest XCR (0.057) due to two new singleton clusters of size 2 and 3 and the addition of 2 singletons to a cluster of 5 (cluster 1). The lowest SACP was at threshold 12 (\autoref{fig:clustering-t12}; top-right) due to the joining of clusters 1 and 2, and clusters 7 and 8, and with three singletons being added to existing clusters.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/clustering-t0.png}
\caption{{Agreement of Illumina and \ont{} transmission clustering at an Illumina SNP threshold of \textbf{0}. The expected (Illumina) clusters are shown in the top-left panel. The other panels show the \ont{}-based clustering from \bcftools{} (top-right), \map{} (bottom-left), and \compare{} (bottom-right), with the title indicating the SNP threshold used for clustering. Nodes are coloured and numbered according to their Illumina cluster membership. Samples not clustered (singletons) in Illumina are represented as white boxes with red stripes and are named "S". Clusters are horizontally aligned and connected with black lines. Where a sample that Illumina clustered is \emph{not} clustered by \ont{}, the sample retains its original colour and number but is represented as an unconnected node on the top row of the plot. Each \ont{} panel has a legend showing the SACR, SACP, and XCR value with respect to the Illumina clustering. Note, the order of nodes and length of edges has no significance. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.
{\label{fig:clustering-t0}}
}}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/clustering-t2.png}
\caption{{Agreement of Illumina and \ont{} transmission clustering at an Illumina SNP threshold of \textbf{2}. The expected (Illumina) clusters are shown in the top-left panel. The other panels show the \ont{}-based clustering from \bcftools{} (top-right), \map{} (bottom-left), and \compare{} (bottom-right), with the title indicating the SNP threshold used for clustering. Nodes are coloured and numbered according to their Illumina cluster membership. Samples not clustered (singletons) in Illumina are represented as white boxes with red stripes and are named "S". Clusters are horizontally aligned and connected with black lines. Where a sample that Illumina clustered is \emph{not} clustered by \ont{}, the sample retains its original colour and number but is represented as an unconnected node on the top row of the plot. Each \ont{} panel has a legend showing the SACR, SACP, and XCR value with respect to the Illumina clustering. Note, the order of nodes and length of edges has no significance. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.
{\label{fig:clustering-t2}}
}}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/clustering-t5.png}
\caption{{Agreement of Illumina and \ont{} transmission clustering at an Illumina SNP threshold of \textbf{5}. The expected (Illumina) clusters are shown in the top-left panel. The other panels show the \ont{}-based clustering from \bcftools{} (top-right), \map{} (bottom-left), and \compare{} (bottom-right), with the title indicating the SNP threshold used for clustering. Nodes are coloured and numbered according to their Illumina cluster membership. Samples not clustered (singletons) in Illumina are represented as white boxes with red stripes and are named "S". Clusters are horizontally aligned and connected with black lines. Where a sample that Illumina clustered is \emph{not} clustered by \ont{}, the sample retains its original colour and number but is represented as an unconnected node on the top row of the plot. Each \ont{} panel has a legend showing the SACR, SACP, and XCR value with respect to the Illumina clustering. Note, the order of nodes and length of edges has no significance. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.
{\label{fig:clustering-t5}}
}}
\end{center}
\end{figure}
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/clustering-t12.png}
\caption{{Agreement of Illumina and \ont{} transmission clustering at an Illumina SNP threshold of \textbf{12}. The expected (Illumina) clusters are shown in the top-left panel. The other panels show the \ont{}-based clustering from \bcftools{} (top-right), \map{} (bottom-left), and \compare{} (bottom-right), with the title indicating the SNP threshold used for clustering. Nodes are coloured and numbered according to their Illumina cluster membership. Samples not clustered (singletons) in Illumina are represented as white boxes with red stripes and are named "S". Clusters are horizontally aligned and connected with black lines. Where a sample that Illumina clustered is \emph{not} clustered by \ont{}, the sample retains its original colour and number but is represented as an unconnected node on the top row of the plot. Each \ont{} panel has a legend showing the SACR, SACP, and XCR value with respect to the Illumina clustering. Note, the order of nodes and length of edges has no significance. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.
{\label{fig:clustering-t12}}
}}
\end{center}
\end{figure}
\begin{table}
\centering
\begin{tabular}{@{}llll@{}}
\toprule
Threshold & SACR & SACP & XCR \\ \midrule
0 & 1.0 & 1.0 & 0.015 (2/137) \\
2 & 1.0 & 0.966 & 0.008 (1/128) \\
5 & 1.0 & 0.949 & 0.057 (7/122) \\
12 (11) & 1.0 & 0.845 & 0.031 (3/97) \\ \bottomrule
\end{tabular}
\caption{Summary of \bcftools{} clustering metrics for four (Illumina) SNP distance thresholds. The threshold(s) in parentheses are the \ont{} equivalent threshold used. The fractions in parentheses for XCR indicate the underlying numbers. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.}
\label{tab:bcftools-cluster-summary}
\end{table}
\subsubsection{Pandora single-sample}
For \pandora{} single-sample (\map{}), we also chose to use the hand-picked SNP distance thresholds from analysis in \autoref{app:dist-sweep}. These are 16, 18, 18, and 27 for the Illumina thresholds of interest 0, 2, 5, and 12, respectively. The clustering results for each of these thresholds are summarised in \autoref{tab:map-cluster-summary}.
At no threshold was \map{} clustering able to achieve perfect SACR, SACP or XCR. In particular, all thresholds had an SACP value less than 0.69 and an XCR greater than 0.11. These results outline the fact that many singletons were erroneously clustered, and many clusters merged. In large part, this is expected due to the much wider spread of distances along the y-axis, in the inset of \autoref{fig:dotplot}, when comparing \map{} to \bcftools{} or \compare{}. Although the SACR values are not as low as the SACP, we place a higher value on them.
For the (Illumina) SNP threshold of 0 (\autoref{fig:clustering-t0}; bottom-left), \map{} failed to recreate cluster 4. In addition, clusters 1 and 2 were merged with a singleton added, and four new clusters of singletons (one with 9 members) we created. At the SNP threshold of 2 (\autoref{fig:clustering-t2}; bottom-left), \map{} clustering failed to recreate cluster 7, joined clusters 4 and 5, and clustered 18 singletons. SNP threshold 5 (\autoref{fig:clustering-t5}; bottom-left) failed to recreate clusters 8 and 9; merged clusters 4, 5, and 6; and clustered an additional 14 singletons. Finally, at threshold 12 (\autoref{fig:clustering-t12}; bottom-left), \map{} failed to recreate clusters 11, 12, and 13; missed one sample from cluster 9; merged clusters 1, 2, and 5 and also joined clusters 7 and 8; and clustered 12 singletons.
\begin{table}
\centering
\begin{tabular}{@{}llll@{}}
\toprule
Threshold & SACR & SACP & XCR \\ \midrule
0 (16) & 0.846 & 0.628 & 0.146 (20/137) \\
2 (18) & 0.909 & 0.688 & 0.141 (18/128) \\
5 (18) & 0.857 & 0.643 & 0.115 (11/122) \\
12 (27) & 0.852 & 0.621 & 0.124 (12/97) \\ \bottomrule
\end{tabular}
\caption{Summary of \pandora{} single-sample clustering metrics for four (Illumina) SNP distance thresholds. The threshold(s) in parentheses are the \ont{} equivalent threshold used. The fractions in parentheses for XCR indicate the underlying numbers. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.}
\label{tab:map-cluster-summary}
\end{table}
\subsubsection{Pandora multi-sample}
The SNP thresholds we use for \compare{} (multi-sample) clustering are 0, 1, 3, and 7. The results of this clustering are summarised in \autoref{tab:compare-cluster-summary}. One important result is that unlike the single-sample approach of \pandora{}, the multi-sample mode leads to perfect SACR across all thresholds. Additionally, clustering at the threshold of 0 (\autoref{fig:clustering-t0}; bottom-right) perfectly mirrors Illumina. At a threshold of 2 (\autoref{fig:clustering-t2}; bottom-right), there was one singleton added to an otherwise perfect cluster (cluster 1) and two additional singleton clusters of size 2 (doubletons).
For threshold 5 (\autoref{fig:clustering-t5}; bottom-right), \compare{} merged clusters 4, 5, and 6, as well as clusters 1 and 2. Three singletons were added to the merged cluster 1/2 and four new doubletons were created. Threshold 12 (\autoref{fig:clustering-t12}; bottom-right) likewise saw cluster mergers (1/2/5 and 7/8), two new doubletons, and six singletons added to existing clusters; however, SACR remained perfect.
\begin{table}
\centering
\begin{tabular}{@{}llll@{}}
\toprule
Threshold & SACR & SACP & XCR \\ \midrule
0 & 1.0 & 1.0 & 0.0 (0/137) \\
2 (1) & 1.0 & 0.966 & 0.039 (5/128) \\
5 (3) & 1.0 & 0.690 & 0.090 (11/122) \\
12 (7) & 1.0 & 0.772 & 0.103 (10/97) \\ \bottomrule
\end{tabular}
\caption{Summary of \pandora{} multi-sample clustering metrics for four (Illumina) SNP distance thresholds. The threshold(s) in parentheses are the \ont{} equivalent threshold used. The fractions in parentheses for XCR indicate the underlying numbers. SACR=sample-averaged cluster recall; SACP=sample-averaged cluster precision; XCR=excess clustering rate.}
\label{tab:compare-cluster-summary}
\end{table}
\subsection{Summary}
\label{sec:cluster-summary}
The results presented in this section show that when using \bcftools{} for variant-calling, \ont{} is capable of producing transmission clusters with a high degree of similarity to Illumina. Most importantly, no samples deemed part of a cluster by Illumina were missed by \bcftools{} (i.e., $SACR=1.0$). However, as the SNP threshold increases, \bcftools{} erroneously adds more samples to clusters - or joins existing clusters - with an SACP of 0.845 at a SNP threshold of 12. That is, on average, 84.5\% of the members in a sample's \ont{} cluster are also in its Illumina cluster.
We have also shown that the Illumina SNP thresholds of 0, 2, and 5 are also valid for \ont{} variant calls from \bcftools{} and the threshold of 12 needs only decrease to 11.
We additionally investigated whether the genome graph method of \pandora{} can produce accurate transmission clusters. While the single-sample approach did not yield outstanding results, the multi-sample method shows promise. For all SNP thresholds assessed, \compare{} did not miss any samples from clustering. The SACP values for thresholds 0 and 2 were as good as \bcftools{}, but at thresholds 5 and 12 \compare{} did not perform as well. For example, at threshold 12, \bcftools{} erroneously added two doubleton clusters and three singletons to larger clusters, while \compare{} added three doubletons and six singletons.
\noindent
In conclusion, we recommend clustering \ont{} data based on \bcftools{} SNP calls for concordant clusters with Illumina.
%=========================================================================
\section{Mixed Illumina and \ont{} transmission clusters}
\label{sec:mixed-clustering}
Having established that \ont{} data can recreate Illumina-defined transmission clusters with high recall and acceptable precision, we turn to the question of whether this holds when mixing Illumina and \ont{} data.
Inferring transmission clusters from a mixture of sequencing modalities would allow greater integration across datasets from various sources and prevent laboratories from being locked into one sequencing technology. As the uptake of \ont{} sequencing increases, it seems inevitable that there will be cases where comparisons between these sequencing modalities are necessary. To address this question, we simulate varying degrees of \ont{}/Illumina mixtures and look at how this impacts clustering. To this end, we investigate what the impact (if any) of combining Illumina and \ont{} datasets is on SACR, SACP and XCR (see \autoref{sec:cluster-similarity} for definitions). For the \ont{} data, we use the \bcftools{} distance matrices as they were shown to be the most concordant with Illumina (\autoref{sec:cluster-summary}).
First, we get a sense of how comparable the distances are likely to be by looking at the "self-distance" for each sample - the distance between a sample's Illumina and \ont{} data. As the sequencing data originate from the same source, we know the self-distance for any sample \emph{should} be 0. However, we also know there are major technical differences between Illumina and \ont{}; therefore, small variability in self-distance is likely. We plot the self-distances in \autoref{fig:self-dist} and see that 64\% (96/150) of the samples have a distance of 0 between their Illumina (COMPASS) and \ont{} (\bcftools{}) data, with 84\% (126/150) less than 2 SNPs apart. All samples have a self-distance of less than 9, except one sample (\vrb{mada\_1-33}), which has a self-distance of 53. We investigated the possibility of a sample mix-up being the cause of this discrepancy but could not find any such convincing evidence.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/mixed_self_dist.png}
\caption{{Mixed modality "self-distance". This plot shows the SNP distance (x-axis) between each sample's COMPASS (Illumina) and \bcftools{} (\ont{}) VCF calls.
\label{fig:self-dist}
}}
\end{center}
\end{figure}
\noindent
Next, we look at the pairwise SNP distance relationship, akin to that in \autoref{sec:snp-dist}. \autoref{fig:mixed-dotplot} shows that the mixed SNP distances have a similar relationship to the single-technology correlation in \autoref{fig:dotplot}. The difference, however, is that in \autoref{fig:mixed-dotplot}, the y-axis represents the distance between one sample's Illumina data and the other's \ont{}. There are twice as many data points in this plot as the distance between two samples is not necessarily reciprocal for mixed modality distances (as we saw with the self-distances). That is, for two samples $a$ and $b$, $distance(a_I,b_N) \neq distance(a_N, b_I)$, where $I$ and $N$ refer to Illumina and \ont{} data respectively.
In the zoomed inset window of \autoref{fig:mixed-dotplot}, there is a cluster of outlying points with a higher mixed distance than Illumina distance. All of these points relate to combinations of 6 particular samples. We investigated these samples for evidence of a sample swap or low data quality, but nothing was found to support such a claim. In reality, it just seems the \ont{} data for some of the samples are quite different to the Illumina data of the other samples.
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/mixed-dotplot.png}
\caption{{The relationship of the distance between all pairs of samples based on Illumina (COMPASS) VCF calls (X-axis) and mixed COMPASS-\bcftools{} calls (Y-axis). The black, dashed line indicates the relationship we would expect if the distance between a pair of samples were the same for both approaches. The blue line indicates the line of best fit based on fitting a robust linear regression model to the data. The inset gives a closer look at the relationship for all sample pairs where the COMPASS distance is less than or equal to 100 SNPs. The legend indicates the linear equations for the lines. Note: to prevent model skew, we do not include self-distance pairs.
{\label{fig:mixed-dotplot}}
}}
\end{center}
\end{figure}
\noindent
We now examine transmission clusters for mixtures of \ont{} and Illumina data using the same SNP thresholds from \autoref{sec:clustering}. The SNP threshold we use when comparing different modalities is the Illumina SNP threshold. The mixture ratios we investigate are 0.01, 0.05, 0.1, 0.25, 0.5, 0.75, and 0.9. That is, for a ratio of 0.25, we \emph{randomly} allocate 25\% of the samples to \ont{} and the remainder to Illumina. For each SNP threshold and ratio, we calculate the XCR, SACR and SACP that the clustering produces. We repeat this process 1000 times for each threshold and ratio to simulate different mixtures of sample/technology pairs. The intention for simulating so many different mixed pairs is to provide insight into how robust clustering is to different ratios of sequencing datasets.
The results of these simulations are shown in \autoref{fig:mixed-sims} (full summary statistics in \autoref{tab:mixed-sims-full}). We found that for all SNP thresholds and ratios, the median SACR was 1.0. In other words, regardless of the \ont{}/Illumina mixture ratio, for all thresholds we used, no sample is missed from its expected clustering - on average. The SACP values decrease somewhat as the \ont{} ratio increases. However, the lowest median SACP value was 0.845 (threshold 12, ratio 0.9), which is also the SACP value obtained for the \ont{}-only clustering in \autoref{sec:bcftools-clustering} with the same threshold. The XCR values tend to increase slightly with the addition of more \ont{} samples. In the most extreme case, 0.057 was the highest XCR value in any simulation (SNP threshold 5). Incidentally, this is the same as the XCR obtained for the \ont{}-only clustering of the same SNP threshold, which equates to 7 of the 122 non-clustered samples being clustered. However, regardless of the XCR, no samples that should have been clustered were missed (on average).
\begin{figure}
\begin{center}
\includegraphics[width=0.90\columnwidth]{Chapter2/Figs/mixed_simulations.png}
\caption{{Simulating various ratios (x-axis) of \ont{}/Illumina sample mixtures. The different thresholds (subplots) indicate the cutoff for defining samples as part of a cluster. The y-axis depicts the Sample-Averaged Cluster Precision and Recall (SACP/SACR) and Excess Clustering Rate (XCR) distributions over all simulation runs (XCR is shown as (1-XCR) for better axis-scaling). For each ratio/threshold combination we run 1000 simulations where the \ont{} and Illumina data is randomly split into the relevant ratio and clusters are defined based on the relevant threshold. The titles for each subplot indicate the SNP threshold used when comparing Illumina (Ill.), \ont{} (NP), or mixed-technology sample pairs.
{\label{fig:mixed-sims}}%
}}
\end{center}
\end{figure}
\subsection{Summary}
In this section, we have shown that putative transmission clusters constructed using mixtures of Illumina and \ont{} data are consistent with those produced by Illumina data alone. As such, datasets from different sequencing technologies can be combined for transmission clustering analysis using the methods in this chapter.
%=========================================================================
\section{Discussion}
Recent work from Smith \etal{} is the first effort to assess \ont{} for the clustering of \mtb{} samples based on genetic distance \cite{smith2020}. While their work had more samples than ours (431), the SNP distance comparison details were very brief and only presented for a subset of 14 samples. They present the results as a distance matrix and leave it as an exercise for the reader to compare the Illumina and \ont{} matrices. There is no quantification of the clustering similarities or investigation into whether Illumina and \ont{} data can be mixed for this application. In contrast, the work presented in this chapter provides a detailed analysis of all of these topics - and more.
In addition to the conventional single-reference variant-calling approach, we also assessed the performance of the genome graph method presented in \autoref{chap:denovo}, for \mtb{}. We built two \mtb{} population reference graphs with different variant densities. Intuition would say that the more variants in the \panrg{}, the better the ability to find and call variants. However, we found the opposite. The sparse \panrg{} produced marginally higher precision and recall, on average, compared to its dense counterpart. As the computational resources required to construct and operate the sparse \panrg{} are a lot less than the dense, we chose to use it for the subsequent analysis. The lack of improvement by adding more variants is consistent with previous work from Pritt \etal{}, who found a ceiling in gains by adding more variants \cite{pritt2018}. They note that eventually, the extra variants cause complexity "blow-ups" that manifest as increased computational resource requirements and reference ambiguity, all of which lead to a decay in overall performance. This reduction in performance is the same thing we see. Many of the errors made by the dense \panrg{} relate to shared \kmer{}s between alternate paths through sites in the graph. These shared \kmer{}s, in turn, confuse the genotyping by adding coverage to multiple alleles. We discuss this further in \autoref{sec:improve-prg} and investigate this complexity problem further in \autoref{chap:dst}.
The initial step in this chapter was the first investigation of the precision and recall of \ont{} variant calls for \mtb{}. Previous work from Bainomugisa \etal{} has only looked at one sample and assessed variants in the \ppe{} genes \cite{bainomugisa2018}. While there are several \ont{} variant callers recently published, we chose to use \bcftools{} due to its similarity to the Illumina strategy we are comparing to and for its ease of use. Many of the \ont{} variant callers are neural network-based and require considerable bioinformatics knowledge to operate and, in some cases, require training of variant models. As our goal in this chapter is to investigate the use of \ont{} by public health laboratories (and for clinical purposes), we try to use methods that can be easily duplicated by others who may not have extensive bioinformatics training. It is difficult to directly compare our precision and recall values to other \ont{} variant-calling studies as we value precision higher than recall for the work in this chapter. Much of the \ont{} variant-calling benchmarks focus on balancing precision and recall. The precision from both \ont{} variant-calling strategies we analysed were consistent with Illumina and much higher than previous \ont{} benchmarks \cite{clair2020,clairvoyant2019}. However, we acknowledge the unfair comparison to other works given the different focus. Recall for both \bcftools{} and \pandora{} were lower than Illumina - by quite a lot for \pandora{}. Compared to other \ont{} variant-calling work, the recall values we can obtain are a few percentage points below the best \cite{sanderson2020,clair2020}. Given that we also report results for various variant filtering levels, we hope these can be used by others who may place a higher value on recall.
One unfortunate limitation of the variant-calling validation was the number of PacBio assemblies we could use. We sent 35 samples for PacBio sequencing, but we only received sufficient data for the assembly of 9 samples - and two of those failed QC due to technical difficulties in the sequencing lab. These results would have been even more robust with 35 validation samples; however, seven is comparable with the numbers used in other \ont{} variant calling evaluation work \cite{sanderson2020,clair2020,clairvoyant2019}.
We have outlined three new metrics for comparing the similarity of two transmission clustering approaches, the sample-averaged cluster recall (SACR) and precision (SACP), and the excess clustering rate (XCR). SACR and SACP are derived from the set-similarity measure, the Tversky Index \cite{tversky1977}. XCR has not been described elsewhere to the best of our knowledge. Cluster similarity is a rich field of research, yet, there are not many examples of this quantitative approach to comparing transmission cluster methods. Of the studies that \emph{do} compare clusters between methods, none provide the level of information provided by SACR, SACP, and XCR collectively. Meehan \etal{} use a clustering rate metric, which is the number of samples clustered, minus the number of clusters and then divided by the number of samples \cite{meehan2018}. Roetzer \etal{} focused on manually comparing a single large cluster but did not compare all clusters \cite{roetzer2013}. Perhaps the closest to our approach is that by Stimson \etal{} \cite{stimson2019}, who use an information theory metric called variation of information (VI) \cite{meila2007}. VI works well and is not too dissimilar to our approach. It measures how much information is lost and gained in between two clustering approaches.
Our main reason to forego these previous methods in favour of our three has to do with the granularity of information. The studies mentioned all use a single metric to classify the performance of the clustering. However, using SACR, SACP and XCR, we see how changes in the methods for producing clusters impact whether samples are missed from clusters (SACR), wrongfully added to existing clusters (SACP), or if previously unclustered samples form new clusters (XCR). Such granularity allows users to tweak their clustering approach to meet their situation. For example, while we place a higher value on SACR, others may find the reduction of cluster merging is of more importance and can focus on improving SACP instead. A single metric does not allow for this kind of targeted evaluation.
The first important finding of this chapter is that \ont{} data can produce transmission clusters comparable to Illumina. Indeed, \bcftools{} and \compare{} do not miss any samples from clusters - the most important consideration for transmission chain investigation \cite{walker2013}. This result agrees with the only other \mtb{} study of this kind \cite{smith2020}. Additionally, \ont{}'s suitability for transmission investigation has been confirmed for other pathogens such as Human metapneumovirus \cite{xu2020}, Shiga toxin-producing \ecoli{} (STEC) \cite{greig2021}, and \textit{Neisseria gonorrhoeae} \cite{sanderson2020}.
It is essential to highlight that the focus of this work is not as a variant-calling benchmark for WGS technologies. We acknowledge that COMPASS may not be the best Illumina-based variant calling strategy. Indeed, there are many bioinformatic pipelines available for the analysis of \mtb{} Illumina data, all with different results from one another \cite{walter2020}. Instead, we take an approach used by PHE and ask whether \ont{} can provide information of the same quality. In effect, our study is a "non-inferiority" one; we are attempting to show that \ont{} is not \emph{worse} than Illumina; as such, we can treat Illumina as "truth" in this respect. For the application of clustering \mtb{} genomes based on genetic distance, we find \ont{} does provide comparable information when using \bcftools{} to call variants. In addition, we found that using the multi-sample comparison mode of \pandora{} we also succeed in clustering all samples that should be clustered, albeit at the cost of adding more false-positive connections.
While the precision of variant calls for \pandora{} was as high as Illumina, the clustering produced by the single-sample mode (\vrb{map}) was much worse than the other approaches. In general, the distances between samples based on \pandora{} \vrb{map} variant calls were much higher than the Illumina data implied they should be. One point that contributes to this difference is the subtle difference in how we generate the \pandora{} \vrb{map} consensus sequences. The main difference compared to \bcftools{} and Illumina is that when a position in the H37Rv reference genome is missing from the \pandora{} \vrb{map} VCF, we assume it is the reference position, rather than nullifying it as we do with COMPASS and \bcftools{}. We initially took the nullify approach for missing positions but found this lead to a substantial under-calling of the distances. The bulk of the extra pairwise differences (false positives) called by \pandora{} \vrb{map} were positions missing from one of the samples and present in the other. 96\% of those false-positives positions were filtered out in the COMPASS and \bcftools{} VCFs due to evidence of heterozygosity. Ultimately, this issue stems from the fact that COMPASS and \bcftools{} make calls at all positions of the genome (with read depth), while \pandora{} only makes calls at sites with alternate alleles. A new approach for calculating the distance between \pandora{} single-sample VCFs certainly warrants further investigation.
The difference in clustering obtained by the two \pandora{} approaches highlights their intended use cases. The multi-sample approach, \compare{}, was designed for allowing the comparison of collections of samples. It integrates information from \emph{all} samples by selecting a consensus sequence that best approximates them and then calls variation against that consensus. This approach allows for easily identifying differences between samples as the VCF produced by \compare{} has genotype information for all samples at all sites. While \compare{} did not miss any samples from their correct clusters, it did incorrectly join some clusters and create new clusters from samples Illumina deemed singletons. This incorrect joining of samples and clusters is not entirely unexpected. Incorrectly joining samples indicates that the distances between samples are lower than expected for \compare{} (this is supported by \autoref{fig:dotplot}). Given the \pandora{} variant calls showed significantly lower recall than COMPASS and \bcftools{} (see \autoref{fig:prec-recall-filters}), a smaller distance between samples is expected. Two obvious ways of improving recall are masking less of the genome (see \autoref{app:mask}) or using less stringent variant filters (\autoref{sec:pandora-filters}).
In addition to acknowledging that this variant-calling approach may not be the absolute best approach, we also acknowledge that SNP distance clustering has shortcomings. Again, our intention is not to claim to be the best clustering method but to mimic the process currently used by PHE - which is the SNP threshold approach used here. Stimson \etal{} recently published a notable study showing that combining a SNP threshold approach with epidemiological data can lead to superior transmission chain reconstruction compared to SNP threshold alone \cite{stimson2019}. With the establishment of \ont{}'s ability to provide accurate SNP threshold-based clusters, it seems certain that the inclusion of epidemiological data using the same approach as Stimson \etal{} can only improve inference for this application.
With the knowledge that \ont{} can detect likely clusters of transmission for \mtb{}, we ask a logical next question: can transmission clusters be accurately constructed from a mixture of \ont{} and Illumina data? As \ont{} sequencing becomes increasingly pervasive, it seems inevitable that groups using different sequencing modalities will want to compare data. We find that they can be mixed and produce clusters consistent with Illumina-only data.
This analysis is the first known case (to the author's knowledge) of testing this mixing of data for \mtb{}. The mixture of \ont{} and Illumina consensus sequences have been investigated for hepatitis C \cite{riaz2021}, and STEC \cite{greig2021}, with the authors also finding the modalities can be mixed without degradation of results. Others have also compared phylogenetic trees constructed from a combination of the two modalities \cite{lijun2020,McNaughton2019,greig2021} with similar findings. Perhaps the unique insight from our work is that we assess the effect of different mixture ratios on clustering.
Another interesting insight from this study of technology mixtures is self-distance (\autoref{fig:self-dist}). In their work on \textit{N. gonorrhoeae}, Sanderson \etal{} found a median self-distance of 5, with a range of 1-10 and interquartile range (IQR) of 3-6 ($n=8$) \cite{sanderson2020}. While Greig \etal{} saw self-distances of 5 and 6 ($n=4$) in STEC \cite{greig2021}. In contrast, we found a (\bcftools{}) median self-distance of 0 with an IQR of 0-1 ($n=150$). Our range was 0-53, and with the outlier of 53 removed, the range becomes 0-8. Both of these studies used similar variant filtering strategies to ours, except with different variant callers - highlighting the need for continued standardisation of \ont{} variant calling, or even recommendations for specific species.
%=========================================================================
\section{Conclusion}
In conclusion, the work in this chapter has shown that \ont{} data can produce transmission clusters consistent with those from Illumina. Additionally, it is also possible to mix data of the two modalities and produce concordant clusters. Finally, we provide the first evaluation of \ont{} variant-calling for \mtb{}, and three new metrics for assessing transmission cluster similarity.
These results are consistent with another \mtb{} \ont{}-based transmission cluster study and similar work on other bacterial and viral pathogens. As a result, we believe \ont{} sequencing has reached sufficient quality to be considered for public health investigation of transmission clusters.
%=========================================================================
\section{Future work}
\subsection{Dataset with known epidemiological information}
Perhaps the most important follow up of the work in this chapter is to gather a dataset with epidemiologically linked cases and known transmission clusters. While these datasets do exist for Illumina data, there are none yet with matched Illumina and \ont{} sequencing. Matched sequencing data is necessary to know that differences in DNA are solely driven by sequencing technology. A dataset with solid evidence for transmission clusters would remove the main limitation to this chapter and be an even stronger statement for the use of \ont{} sequencing in public health laboratories.
\subsection{Computational performance of variant calling}
\label{sec:fw-comp-perf}
% bcftools baq work https://github.com/mbhall88/head_to_head_pipeline/issues/38#issuecomment-661680608
In \autoref{sec:var-call-comp-perf} we assessed the time and memory usage for variant calling with \bcftools{} and \pandora{}. \bcftools{} in particular had, in the worst case, the highest memory and CPU of the callers. Nearly all of this time and memory is spent realigning reads in the pileup in order to calculate the base alignment quality (BAQ) score \cite{li2011}. When we disabled this BAQ setting for one sample, the CPU time dropped from 3 hours to 30 minutes (6-fold decrease) and peak memory reduced from 58GB to 70MB (829-fold decrease). However, this did come at the cost of a slight reduction in precision and recall. As we write this chapter, the newest release of BCFtools (version 1.13) has addressed this problem by only doing the BAQ realignment in areas overlapping problematic indel sites. Their testing shows this drastically reduces the peak memory and overall runtime and \emph{increased} recall (the realignment can sometimes be detrimental). As such, an obvious task for future development would be to rerun this analysis with the latest \bcftools{} version and assess the expected changes in computational resource usage and recall.
Much of the memory and CPU time in the \pandora{} pipelines lies in updating the multiple sequence alignments used to build the \panrg{} after novel variants have been added. Recent work by Leandro Ishi in our research group has produced a prototype of the \makeprg{} program that significantly reduces the time and memory required to update the \panrg{} (as discussed in \autoref{denovo-fw-insert}). It remains to be seen whether these updates will also improve \pandora{}'s precision and recall, but it will undoubtedly improve the computational requirements.
\subsection{Improving \panrg{} construction}
\label{sec:improve-prg}
The current process for building the \mtb{} \panrg{} is, for each locus, to apply a single VCF alternate allele to the reference sequence for that locus and collect all of these mutated sequences into a multi-sequence FASTA file. One limitation of this approach is that variants do not always occur in isolation like this. Where this becomes important is when turning an MSA into a \prg{} with \makeprg{}. An important parameter in this process is the minimum match length, $m$. When two variants are within $m$ positions of each other, creating two separate sequences for them (as we do) creates alternate paths in the \prg{}, with neither path containing the correct allele combination. This situation is best understood with an example. We set $m$ to 3 and have two variants at positions 2 and 4 - in a hypothetical genome sequence \vrb{AAAC}. The first variant is a SNP changing an \vrb{A} to a \vrb{T} and the second a \vrb{C} to a \vrb{G}. The two mutated sequences we produce for these two variants are \vrb{ATAC} and \vrb{AAAG}. Because these two sequences do not have a minimum match length of 3 or more, they become two alternate paths in the \prg{}. However, these two variants come from the same sample, so in reality, the true sequence is \vrb{ATAG}. When using the \prg{} containing the two alternate alleles, if we have a sample that contains both of the variants (i.e., \vrb{ATAG}), it does not match either of the two alleles in our \prg{}, even though they come from a sample with \vrb{ATAG} at this site. Ultimately, we rely on the \denovo{} variant discovery from \autoref{chap:denovo} to fix this. Unfortunately, this does not always work and, as we will see in \autoref{chap:dst}, even if \denovo{} discovery adds the correct allele combination, \vrb{ATAG}, we now have three alleles that could share minimizing \kmer{}s. Having shared minimizers over multiple alleles at the same position can lead to read coverage across all of those alleles - ultimately skewing genotyping.
One way to minimise these excess alleles would be to construct the \panrg{} by producing a single sequence at each locus from \emph{all} variants for a sample - rather than a sequence for each variant we use its actual haplotype. The reason we did not construct the \panrg{} in this fashion in \autoref{sec:tbprg} was that for each locus, we would have had to perform an MSA on $n$ sequences - where $n$ is the number of samples. Instead, we chose to apply single variants as the number of variants in a locus was, in most cases, \emph{much} smaller than $n$ and thus, the MSA ran quicker and used much less memory.
In addition to improvements in variant inclusion, some changes can be made in the masking of loci. Our current method of removing loci from the \panrg{} when they have 30\% or more overlap with a genome mask (\autoref{app:mask}) leads to approximately 6\% of loci being removed - 10\% of the genome. As the genome mask used covers 7.4\% of the genome, we remove more than is necessary, which impacts our recall. A recent study by Marin \etal{} has shown this genome mask to be excessive, and they present a new mask that covers only 4\% of the H37Rv reference genome \cite{marin2021}. So a first step for improving the recall of \pandora{} would be to rebuild the \panrg{} using this new mask.
%=========================================================================
\section{Availability of data and materials}
The pipelines and scripts used in this chapter are available at \url{https://github.com/mbhall88/head_to_head_pipeline}. A special mention must go to the workflow management program \vrb{snakemake} \cite{snakemake2021}, which was used to coordinate all analyses. All figures were generated using the Python libraries \vrb{matplotlib} \cite{matplotlib}, \vrb{seaborn} \cite{seaborn}, and \vrb{bokeh} \cite{bokeh}.
|
{"hexsha": "484e177400a9bdcc04d68620498e53a092545cb9", "size": 101633, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter2/chapter2.tex", "max_stars_repo_name": "mbhall88/thesis", "max_stars_repo_head_hexsha": "29d60859a88bd72a7e2233c06e83121b3f8383a6", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-14T05:19:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T14:36:58.000Z", "max_issues_repo_path": "Chapter2/chapter2.tex", "max_issues_repo_name": "mbhall88/thesis", "max_issues_repo_head_hexsha": "29d60859a88bd72a7e2233c06e83121b3f8383a6", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter2/chapter2.tex", "max_forks_repo_name": "mbhall88/thesis", "max_forks_repo_head_hexsha": "29d60859a88bd72a7e2233c06e83121b3f8383a6", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 153.5241691843, "max_line_length": 1999, "alphanum_fraction": 0.7654698769, "num_tokens": 24702}
|
SUBROUTINE A16(X, Y)
!$OMP PARALLEL
!$OMP CRITICAL(XAXIS)
CALL DEQUEUE()
!$OMP END CRITICAL(XAXIS)
CALL WORK()
!$OMP CRITICAL
CALL DEQUEUE()
!$OMP END CRITICAL
CALL WORK()
!$OMP END PARALLEL
END SUBROUTINE A16
|
{"hexsha": "de81e71df1e38d66e3d166c31593dba8a75ec73b", "size": 252, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/OpenMP_tests/fortran/critical.f90", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/OpenMP_tests/fortran/critical.f90", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/OpenMP_tests/fortran/critical.f90", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 14.0, "max_line_length": 26, "alphanum_fraction": 0.6111111111, "num_tokens": 81}
|
subroutine read_seviri(mype,val_sev,ithin,rmesh,jsatid,&
gstime,infile,lunout,obstype,nread,ndata,nodata,twind,sis, &
mype_root,mype_sub,npe_sub,mpi_comm_sub,nobs, &
nrec_start,dval_use)
!$$$ subprogram documentation block
! . . . .
! subprogram: read_seviri read seviri bufr data
! prgmmr: liu, haixia org: np23 date: 2009-08-10
!
! abstract: This routine reads BUFR format SEVIRI 1b radiance (brightness
! temperature) files, which are bufrized from the NESDIS 1b data. Optionally, the
! data are thinned to a specified resolution using simple
! quality control checks.
!
! When running the gsi in regional mode, the code only
! retains those observations that fall within the regional
! domain
!
! program history log:
! 2009-08-10 hliu
! 2011-04-08 li - (1) use nst_gsi, nstinfo, fac_dtl, fac_tsl and add NSST vars
! (2) get zob, tz_tr (call skindepth and cal_tztr)
! (3) interpolate NSST Variables to Obs. location (call deter_nst)
! (4) add more elements (nstinfo) in data array
! 2011-08-01 lueken - added module use deter_sfc_mod
! 2012-03-05 akella - nst now controlled via coupler
! 2013-01-26 parrish - change from grdcrd to grdcrd1 (to allow successful debug compile on WCOSS)
! 2015-02-23 Rancic/Thomas - add thin4d to time window logical
! 2015-10-01 guo - consolidate use of ob location (in deg)
! 2018-05-21 j.jin - added time-thinning. Moved the checking of thin4d into satthin.F90.
!
! input argument list:
! mype - mpi task id
! val_sev - weighting factor applied to super obs
! ithin - flag to thin data
! rmesh - thinning mesh size (km)
! jsatid - satellite to read
! gstime - analysis time in minutes from reference date
! infile - unit from which to read BUFR data
! lunout - unit to which to write data for further processing
! obstype - observation type to process
! twind - input group time window (hours)
! sis - satellite/instrument/sensor indicator
! nrec_start - first subset with useful information
!
! output argument list:
! nread - number of BUFR SEVIRI 1b observations read
! ndata - number of BUFR SEVIRI 1b profiles retained for further processing
! nodata - number of BUFR SEVIRI 1b observations retained for further processing
! nobs - array of observations on each subdomain for each processor
!
! attributes:
! language: f90
! machine: ibm RS/6000 SP
!
!$$$
use kinds, only: r_kind,r_double,i_kind
use satthin, only: super_val,itxmax,makegrids,map2tgrid,destroygrids, &
checkob,finalcheck,score_crit
use satthin, only: radthin_time_info,tdiff2crit
use gridmod, only: diagnostic_reg,regional,nlat,nlon,txy2ll,tll2xy,rlats,rlons
use constants, only: deg2rad,zero,one,rad2deg,r60inv
use obsmod, only: bmiss
use obsmod, only: time_window_max
use radinfo, only: iuse_rad,jpch_rad,nusis
use gsi_4dvar, only: l4dvar,l4densvar,iwinbgn,winlen
use deter_sfc_mod, only: deter_sfc
use gsi_nstcouplermod, only: nst_gsi,nstinfo
use gsi_nstcouplermod, only: gsi_nstcoupler_skindepth, gsi_nstcoupler_deter
use mpimod, only: npe
use mpimod, only: ierror,mpi_itype,mpi_sum
! use radiance_mod, only: rad_obs_type
implicit none
! Declare passed variables
character(len=*),intent(in):: infile,obstype,jsatid
character(len=20),intent(in):: sis
integer(i_kind),intent(in):: mype,lunout,ithin,nrec_start
integer(i_kind),intent(inout):: ndata,nodata
integer(i_kind),intent(inout):: nread
integer(i_kind),dimension(npe),intent(inout):: nobs
real(r_kind),intent(in):: rmesh,gstime,twind
real(r_kind),intent(inout):: val_sev
integer(i_kind),intent(in) :: mype_root
integer(i_kind),intent(in) :: mype_sub
integer(i_kind),intent(in) :: npe_sub
integer(i_kind),intent(in) :: mpi_comm_sub
logical ,intent(in) :: dval_use
! Declare local parameters
real(r_kind),parameter:: r70=70.0_r_kind
real(r_kind),parameter:: r65=65.0_r_kind
real(r_kind),parameter:: r360=360.0_r_kind
real(r_kind),parameter:: tbmin=50.0_r_kind
real(r_kind),parameter:: tbmax=550.0_r_kind
! Declare local variables
logical outside,iuse,assim,clrsky,allsky
character(8) subset,subcsr,subasr
character(80):: hdrsevi ! seviri header
integer(i_kind) nchanl,ilath,ilonh,ilzah,iszah,irec,next
integer(i_kind) nmind,lnbufr,idate,ilat,ilon,nhdr,nchn,ncld,nbrst,jj
integer(i_kind) ireadmg,ireadsb,iret,nreal,nele,itt
integer(i_kind) itx,i,k,isflg,kidsat,n,iscan,idomsfc
integer(i_kind) idate5(5),maxinfo
integer(i_kind),allocatable,dimension(:)::nrec
real(r_kind) dg2ew,sstime,tdiff,t4dv,sfcr
real(r_kind) dlon,dlat,crit1,dist1
real(r_kind) dlon_earth,dlat_earth
real(r_kind) dlon_earth_deg,dlat_earth_deg
real(r_kind) pred
real(r_kind),dimension(0:4):: rlndsea
real(r_kind),dimension(0:3):: sfcpct
real(r_kind),dimension(0:3):: ts
real(r_kind) :: tsavg,vty,vfr,sty,stp,sm,sn,zz,ff10
real(r_kind),allocatable,dimension(:,:):: data_all
real(r_kind),allocatable,dimension(:):: hdr ! seviri imager header
real(r_kind),allocatable,dimension(:,:):: datasev1,datasev2 ! seviri imager data
real(r_kind) rclrsky
real(r_kind) :: zob,tref,dtw,dtc,tz_tr
real(r_kind) cdist,disterr,disterrmax,dlon00,dlat00
integer(i_kind) ntest
logical :: allchnmiss
real(r_kind) :: ptime,timeinflat,crit0
integer(i_kind) :: ithin_time,n_tbin,it_mesh
integer(i_kind),allocatable,dimension(:):: randsd
integer(i_kind),allocatable,dimension(:):: subset_num, subset_nnsb
real(r_kind), allocatable,dimension(:):: rd_tdiffs
integer(i_kind) :: sdsize,jrec,nnmsg,nnsb
!**************************************************************************
! Initialize variables
maxinfo=31
lnbufr = 10
disterrmax=zero
ntest=0
dg2ew = r360*deg2rad
ilon=3
ilat=4
if (nst_gsi > 0 ) then
call gsi_nstcoupler_skindepth(obstype, zob) ! get penetration depth (zob) for the obstype
endif
! HLIU: NEED TO confirm
rlndsea(0) = zero
rlndsea(1) = 15._r_kind
rlndsea(2) = 10._r_kind
rlndsea(3) = 15._r_kind
rlndsea(4) = 30._r_kind
nread=0
ndata=0
nodata=0
nchanl=8 ! the channel number
ilath=8 ! the position of latitude in the header
ilonh=9 ! the position of longitude in the header
ilzah=10 ! satellite zenith angle
iszah=11 ! solar zenith angle
subcsr='NC021043' ! sub message
subasr='NC021042' ! sub message
! If all channels of a given sensor are set to monitor or not
! assimilate mode (iuse_rad<1), reset relative weight to zero.
! We do not want such observations affecting the relative
! weighting between observations within a given thinning group.
assim=.false.
search: do i=1,jpch_rad
if ((trim(nusis(i))==trim(sis)) .and. (iuse_rad(i)>0)) then
assim=.true.
exit search
endif
end do search
if (.not.assim) val_sev=zero
call radthin_time_info(obstype, jsatid, sis, ptime, ithin_time)
if( ptime > 0.0_r_kind) then
n_tbin=nint(2*time_window_max/ptime)
else
n_tbin=1
endif
! Open bufr file.
call closbf(lnbufr)
open(lnbufr,file=trim(infile),form='unformatted')
call openbf(lnbufr,'IN',lnbufr)
call datelen(10)
call readmg(lnbufr,subset,idate,iret)
! Check the data set
if( iret/=0) then
write(6,*) 'READ_SEVIRI: SKIP PROCESSING OF SEVIRI FILE'
write(6,*) 'infile=', lnbufr, infile
return
endif
clrsky=.false.
allsky=.false.
if(subset == subcsr) then
clrsky=.true.
elseif(subset == subasr) then
allsky=.true.
else
write(6,*) 'READ_SEVIRI: SKIP PROCESSING OF SEVIRI FILE'
write(6,*) 'infile=', lnbufr, infile,' subset=', subset
return
endif
! Make thinning grids
call makegrids(rmesh,ithin,n_tbin=n_tbin)
! Set BUFR string based on seviri data set
if (clrsky) then
hdrsevi='SAID YEAR MNTH DAYS HOUR MINU SECO CLATH CLONH SAZA SOZA'
nhdr=11
nchn=12
ncld=nchn
nbrst=nchn
else if (allsky) then
hdrsevi='SAID YEAR MNTH DAYS HOUR MINU SECO CLATH CLONH'
nhdr=9
nchn=11
ncld=2
nbrst=nchn*6 ! channel dependent: all, clear, cloudy, low, middle and high clouds
endif
allocate(datasev1(1,ncld)) ! not channel dependent
allocate(datasev2(1,nbrst)) ! channel dependent: all, clear, cloudy, low, middle and high clouds
allocate(hdr(nhdr))
! Allocate arrays to hold all data for given satellite
if(dval_use) maxinfo = maxinfo + 2
nreal = maxinfo + nstinfo
nele = nreal + nchanl
allocate(data_all(nele,itxmax),nrec(itxmax))
! Reopen unit to bufr file
call closbf(lnbufr)
if(jsatid == 'm08') kidsat = 55
if(jsatid == 'm09') kidsat = 56
if(jsatid == 'm10') kidsat = 57
if(jsatid == 'm11') kidsat = 70
if( ithin_time == 5) then
call read_subset_nnsb
endif
open(lnbufr,file=infile,form='unformatted')
call openbf(lnbufr,'IN',lnbufr)
nrec=999999
irec=0
next=0
jrec=0
! Big loop over bufr file
read_msg: do while (ireadmg(lnbufr,subset,idate) >= 0)
irec=irec+1
if(irec < nrec_start) cycle read_msg
if( ithin_time == 5) then
jrec=jrec+1
if (allocated(rd_tdiffs)) deallocate(rd_tdiffs)
allocate(rd_tdiffs(subset_nnsb(jrec)))
call random_number(harvest=rd_tdiffs)
endif
next=next+1
if(next == npe_sub)next=0
if(next /= mype_sub)cycle
nnsb=0
read_loop: do while (ireadsb(lnbufr) == 0)
if( ithin_time == 5) then
nnsb=nnsb+1
endif
! Read through each record
call ufbint(lnbufr,hdr,nhdr,1,iret,hdrsevi)
if(nint(hdr(1)) /= kidsat) cycle read_loop
if (clrsky) then ! asr bufr has no sza
! remove the obs whose satellite zenith angles larger than 65 degree
if ( hdr(ilzah) > r65 ) cycle read_loop
end if
! Convert obs location from degrees to radians
if (hdr(ilonh)>=r360) hdr(ilonh)=hdr(ilonh)-r360
if (hdr(ilonh)< zero) hdr(ilonh)=hdr(ilonh)+r360
dlon_earth_deg=hdr(ilonh)
dlat_earth_deg=hdr(ilath)
dlon_earth=hdr(ilonh)*deg2rad
dlat_earth=hdr(ilath)*deg2rad
! If regional, map obs lat,lon to rotated grid.
if(regional)then
! Convert to rotated coordinate. dlon centered on 180 (pi),
! so always positive for limited area
call tll2xy(dlon_earth,dlat_earth,dlon,dlat,outside)
if(diagnostic_reg) then
call txy2ll(dlon,dlat,dlon00,dlat00)
ntest=ntest+1
cdist=sin(dlat_earth)*sin(dlat00)+cos(dlat_earth)*cos(dlat00)* &
(sin(dlon_earth)*sin(dlon00)+cos(dlon_earth)*cos(dlon00))
cdist=max(-one,min(cdist,one))
disterr=acos(cdist)*rad2deg
disterrmax=max(disterrmax,disterr)
end if
! Check to see if in domain. outside=.true. if dlon_earth,
! dlat_earth outside domain, =.false. if inside
if(outside) cycle read_loop
! Global case
else
dlon=dlon_earth
dlat=dlat_earth
call grdcrd1(dlat,rlats,nlat,1)
call grdcrd1(dlon,rlons,nlon,1)
endif
! Compare relative obs time with window. If obs
! falls outside of window, don't use this obs
idate5(1) = hdr(2) ! year
idate5(2) = hdr(3) ! month
idate5(3) = hdr(4) ! day
idate5(4) = hdr(5) ! hours
idate5(5) = hdr(6) ! minutes
call w3fs21(idate5,nmind)
t4dv = (real((nmind-iwinbgn),r_kind) + real(hdr(7),r_kind)*r60inv)*r60inv
sstime = real(nmind,r_kind) + real(hdr(7),r_kind)*r60inv
tdiff=(sstime-gstime)*r60inv
if (l4dvar.or.l4densvar) then
if (t4dv<zero .OR. t4dv>winlen) cycle read_loop
else
if (abs(tdiff)>twind) cycle read_loop
endif
crit0=0.01_r_kind
timeinflat=6.0_r_kind
if( ithin_time == 5) then
tdiff = rd_tdiffs(nnsb)
endif
call tdiff2crit(tdiff,ptime,ithin_time,timeinflat,crit0,crit1,it_mesh)
call map2tgrid(dlat_earth,dlon_earth,dist1,crit1,itx,ithin,itt,iuse,sis,it_mesh=it_mesh)
if(.not. iuse)cycle read_loop
nread=nread+nchanl
call ufbrep(lnbufr,datasev1,1,ncld,iret,'NCLDMNT')
rclrsky=bmiss
do n=1,ncld
if(datasev1(1,n)>= zero .and. datasev1(1,n) <= 100.0_r_kind ) then
rclrsky=datasev1(1,n)
! first QC filter out data with less clear sky fraction
if ( rclrsky < r70 ) cycle read_loop
end if
end do
call ufbrep(lnbufr,datasev2,1,nbrst,iret,'TMBRST')
allchnmiss=.true.
do n=4,11
if(datasev2(1,n)<500.) then
allchnmiss=.false.
end if
end do
if(allchnmiss) cycle read_loop
! Locate the observation on the analysis grid. Get sst and land/sea/ice
! mask.
! isflg - surface flag
! 0 sea
! 1 land
! 2 sea ice
! 3 snow
! 4 mixed
call deter_sfc(dlat,dlon,dlat_earth,dlon_earth,t4dv,isflg,idomsfc,sfcpct, &
ts,tsavg,vty,vfr,sty,stp,sm,sn,zz,ff10,sfcr)
crit1=crit1+rlndsea(isflg)
! call checkob(dist1,crit1,itx,iuse)
! if(.not. iuse)cycle read_loop
! Set common predictor parameters
!test
pred=zero
!test
! Compute "score" for observation. All scores>=0.0. Lowest score is "best"
crit1 = crit1+pred
call finalcheck(dist1,crit1,itx,iuse)
if(.not. iuse)cycle read_loop
iscan = nint(hdr(ilzah))+1.001_r_kind ! integer scan position HLIU check this
!
! interpolate NSST variables to Obs. location and get dtw, dtc, tz_tr
!
if ( nst_gsi > 0 ) then
tref = ts(0)
dtw = zero
dtc = zero
tz_tr = one
if ( sfcpct(0) > zero ) then
call gsi_nstcoupler_deter(dlat_earth,dlon_earth,t4dv,zob,tref,dtw,dtc,tz_tr)
endif
endif
! Transfer information to work array
data_all( 1,itx) = hdr(1) ! satellite id
data_all( 2,itx) = t4dv ! analysis relative time
data_all( 3,itx) = dlon ! grid relative longitude
data_all( 4,itx) = dlat ! grid relative latitude
data_all( 5,itx) = hdr(ilzah)*deg2rad ! satellite zenith angle (radians)
data_all( 6,itx) = bmiss ! satellite azimuth angle (radians)
data_all( 7,itx) = rclrsky ! clear sky amount
data_all( 8,itx) = iscan ! integer scan position
data_all( 9,itx) = hdr(iszah) ! solar zenith angle
data_all(10,itx) = bmiss ! solar azimuth angle
data_all(11,itx) = sfcpct(0) ! sea percentage of
data_all(12,itx) = sfcpct(1) ! land percentage
data_all(13,itx) = sfcpct(2) ! sea ice percentage
data_all(14,itx) = sfcpct(3) ! snow percentage
data_all(15,itx)= ts(0) ! ocean skin temperature
data_all(16,itx)= ts(1) ! land skin temperature
data_all(17,itx)= ts(2) ! ice skin temperature
data_all(18,itx)= ts(3) ! snow skin temperature
data_all(19,itx)= tsavg ! average skin temperature
data_all(20,itx)= vty ! vegetation type
data_all(21,itx)= vfr ! vegetation fraction
data_all(22,itx)= sty ! soil type
data_all(23,itx)= stp ! soil temperature
data_all(24,itx)= sm ! soil moisture
data_all(25,itx)= sn ! snow depth
data_all(26,itx)= zz ! surface height
data_all(27,itx)= idomsfc + 0.001_r_kind ! dominate surface type
data_all(28,itx)= sfcr ! surface roughness
data_all(29,itx)= ff10 ! ten meter wind factor
data_all(30,itx) = dlon_earth_deg ! earth relative longitude (degrees)
data_all(31,itx) = dlat_earth_deg ! earth relative latitude (degrees)
if(dval_use)then
data_all(32,itx) = val_sev
data_all(33,itx) = itt
end if
if ( nst_gsi > 0 ) then
data_all(maxinfo+1,itx) = tref ! foundation temperature
data_all(maxinfo+2,itx) = dtw ! dt_warm at zob
data_all(maxinfo+3,itx) = dtc ! dt_cool at zob
data_all(maxinfo+4,itx) = tz_tr ! d(Tz)/d(Tr)
endif
do k=1,nchanl
if (clrsky) then
data_all(k+nreal,itx)=datasev2(1,k+3) ! for chn 4,5,6,7,8,9,10,11
else if (allsky) then
jj=(k+2)*6+1
data_all(k+nreal,itx)=datasev2(1,jj) ! all-sky radiance for chn 4,5,6,7,8,9,10,11
end if
end do
nrec(itx)=irec
! End of satellite read block
enddo read_loop
if(allocated(rd_tdiffs)) deallocate(rd_tdiffs)
enddo read_msg
call closbf(lnbufr)
call combine_radobs(mype_sub,mype_root,npe_sub,mpi_comm_sub,&
nele,itxmax,nread,ndata,data_all,score_crit,nrec)
! Allow single task to check for bad obs, update superobs sum,
! and write out data to scratch file for further processing.
if (mype_sub==mype_root.and.ndata>0) then
do n=1,ndata
do k=1,nchanl
if(data_all(k+nreal,n) > tbmin .and. &
data_all(k+nreal,n) < tbmax)nodata=nodata+1
end do
end do
if(dval_use .and. assim)then
do n=1,ndata
itt=nint(data_all(33,n))
super_val(itt)=super_val(itt)+val_sev
end do
end if
! Write retained data to local file
call count_obs(ndata,nele,ilat,ilon,data_all,nobs)
write(lunout) obstype,sis,nreal,nchanl,ilat,ilon
write(lunout) ((data_all(k,n),k=1,nele),n=1,ndata)
endif
! Deallocate local arrays
deallocate(data_all,nrec)
deallocate(hdr,datasev2,datasev1)
if(allocated(subset_num)) deallocate(subset_num)
if(allocated(subset_nnsb)) deallocate(subset_nnsb)
! Deallocate satthin arrays
call destroygrids
! Print data counts
! write(6,9000) infile,sis,nread,rmesh,ndata
!000 format(' READ_SEVIRI: infile=',a10,&
! ' sis=',a20,&
! ' nread=',i10, &
! ' rmesh=',f7.3,' ndata=',i10)
if(diagnostic_reg.and.ntest>0) write(6,*)'READ_SEVIRI: ',&
'mype,ntest,disterrmax=',mype,ntest,disterrmax
! End of routine
! return
contains
subroutine read_subset_nnsb
implicit none
open(lnbufr,file=infile,form='unformatted')
call openbf(lnbufr,'IN',lnbufr)
call random_seed(size=sdsize)
allocate(randsd(sdsize))
do i=1,sdsize
randsd(i)=int(gstime,i_kind)+kidsat
end do
call random_seed(put=randsd)
deallocate(randsd)
nnmsg=0
irec=0
read_msg1: do while (ireadmg(lnbufr,subset,idate) >= 0)
irec=irec+1
if(irec < nrec_start) cycle read_msg1
nnmsg=nnmsg+1
enddo read_msg1
call closbf(lnbufr)
allocate(subset_num(nnmsg))
allocate(subset_nnsb(nnmsg))
subset_num=0
subset_nnsb=0
open(lnbufr,file=infile,form='unformatted')
call openbf(lnbufr,'IN',lnbufr)
nnmsg=0
irec=0
next=0
read_msg2: do while (ireadmg(lnbufr,subset,idate) >= 0)
irec=irec+1
if(irec < nrec_start) cycle read_msg2
nnmsg=nnmsg+1
next=next+1
if(next == npe_sub)next=0
if(next /= mype_sub)cycle read_msg2
nnsb=0
read_loop2: do while (ireadsb(lnbufr) == 0)
nnsb=nnsb+1
enddo read_loop2
subset_num(nnmsg)=nnsb
enddo read_msg2
call closbf(lnbufr)
if (npe_sub > 1 ) then
call mpi_allreduce(subset_num, subset_nnsb, nnmsg,mpi_itype,mpi_sum,mpi_comm_sub,ierror)
else
subset_nnsb = subset_num
endif
end subroutine read_subset_nnsb
end subroutine read_seviri
|
{"hexsha": "030002c43339ff7770531242e5d9ffc2ca12de7d", "size": 20818, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "GEOSaana_GridComp/GSI_GridComp/read_seviri.f90", "max_stars_repo_name": "GEOS-ESM/GEOSana_GridComp", "max_stars_repo_head_hexsha": "cf33607613754313a2383bb7e7b3d29c856b9daf", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GEOSaana_GridComp/GSI_GridComp/read_seviri.f90", "max_issues_repo_name": "GEOS-ESM/GEOSana_GridComp", "max_issues_repo_head_hexsha": "cf33607613754313a2383bb7e7b3d29c856b9daf", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2019-08-15T20:38:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T15:20:38.000Z", "max_forks_repo_path": "GEOSaana_GridComp/GSI_GridComp/read_seviri.f90", "max_forks_repo_name": "GEOS-ESM/GEOSana_GridComp", "max_forks_repo_head_hexsha": "cf33607613754313a2383bb7e7b3d29c856b9daf", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-20T23:40:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-11T08:20:51.000Z", "avg_line_length": 35.5863247863, "max_line_length": 102, "alphanum_fraction": 0.6156210971, "num_tokens": 6614}
|
import numpy as np
import cv2
import struct
import random
from torchvision.transforms import ToTensor, ToPILImage
img_transform = ToTensor()
def get_pred_data(file_paths,width=1600):
for file_path in file_paths:
with open(file_path, 'rb') as f:
header_size = np.fromfile(f, dtype='uint32', count=1)[0]
header = np.fromfile(f, dtype='uint8', count=header_size - 4)
formatcode = "".join([chr(c) for c in header[:8]])
Illustration_size = header_size - 36
Illustration = "".join([chr(c) for c in header[8:Illustration_size + 8]])
Code_type = "".join([chr(c) for c in header[Illustration_size + 8:Illustration_size + 28]])
Code_length = header[Illustration_size + 28] + header[Illustration_size + 29] << 4
Bits_per_pixel = header[Illustration_size + 30] + header[Illustration_size + 31] << 4
# print(header_size, formatcode, Illustration)
# print(Code_type, Code_length, Bits_per_pixel)
# print()
Image_height = np.fromfile(f, dtype='uint32', count=1)[0]
Image_width = np.fromfile(f, dtype='uint32', count=1)[0]
Line_number = np.fromfile(f, dtype='uint32', count=1)[0]
page_np = np.ones((Image_height * 4, Image_width), dtype=np.uint8) * 255
page_label = []
boxes = []
Y1 = 0
Y2 = 0
margin = 0
for ln in range(Line_number):
Char_number = np.fromfile(f, dtype='uint32', count=1)[0]
Label = np.fromfile(f, dtype='uint16', count=Char_number)
# print(Label)
Label_str = "".join([struct.pack('H', c).decode('GBK', errors='ignore') for c in Label])
# print(Label_str, Char_number)
Top_left = np.fromfile(f, dtype='uint32', count=2)
Top, Left = Top_left[0], Top_left[1]
Height = np.fromfile(f, dtype='uint32', count=1)[0]
# Top+=ln*Image_height//Line_number//8
Width = np.fromfile(f, dtype='uint32', count=1)[0]
Bitmap = np.fromfile(f, dtype='uint8', count=Height * Width).reshape([Height, Width])
contours, hierarchy = cv2.findContours(
255 - Bitmap, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# if random.random()<0.5:
# Top+=random.uniform(-0.2,0.2)*Height
# Top = int(Top)
all_contours = []
for contour in contours:
for points in contour:
all_contours.append(points)
all_contours = np.array(all_contours)
rect = cv2.minAreaRect(all_contours)
rect_w = max(rect[1])
rect_h = min(rect[1])
# Top-=int(ln*Image_height//Line_number//10)
if rect_w < Image_width * 0.25:
x1, y1, x2, y2 = cv2.boundingRect(all_contours)
bbox = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
else:
bbox = cv2.boxPoints(rect)
bbox = sorted(bbox, key=lambda x: x[0])
new_bbox = []
new_bbox += sorted(bbox[:2], key=lambda x: x[1])
new_bbox += sorted(bbox[2:], key=lambda x: -x[1])
bbox = [new_bbox[0], new_bbox[3], new_bbox[2], new_bbox[1]]
# left_w = random.uniform(-1, 1) * rect_h
# right_w = random.uniform(-1, 1) * rect_h
# bbox[0][0] += left_w
# bbox[1][0] += right_w
# bbox[2][0] += right_w
# bbox[3][0] += left_w
# top_h=random.uniform(-0.2, 0.2) * rect_h
# bottom_h = random.uniform(-0.2, 0.2) * rect_h
# bbox[0][1] += top_h
# bbox[1][1] += top_h
# bbox[2][1] += bottom_h
# bbox[3][1] += bottom_h
bbox = np.int0(bbox)
bbox[:, 0] += Left
bbox[:, 1] += Top
origin_sub = page_np[Top:Top + Height, Left:Left + Width]
page_np[Top:Top + Height, Left:Left + Width] = (origin_sub > Bitmap) * Bitmap + (origin_sub <= Bitmap) * origin_sub
if ln == 0:
Y1 = max(Top - 64, 0)
if ln == Line_number - 1:
Y2 = Top + Height
# cv2.drawContours(page_np, [bbox], -1, 128, 2)
# cv2.imshow('1', cv2.resize(page_np[Y1:, :],dsize=None,fx=0.5,fy=0.5))
# cv2.waitKey()
bbox[:, 1] -= Y1
boxes.append(bbox)
Label_str = Label_str.replace('\x00', '')
Label_str = Label_str.replace('〔', '(')
Label_str = Label_str.replace('〕', ')')
Label_str = Label_str.replace('"', '"')
Label_str = Label_str.replace('%', '%')
Label_str = Label_str.replace('(', '(')
Label_str = Label_str.replace(')', ')')
Label_str = Label_str.replace(',', ',')
Label_str = Label_str.replace('-', '-')
Label_str = Label_str.replace('.', '.')
Label_str = Label_str.replace('/', '/')
Label_str = Label_str.replace('0', '0')
Label_str = Label_str.replace('1', '1')
Label_str = Label_str.replace('2', '2')
Label_str = Label_str.replace('3', '3')
Label_str = Label_str.replace('4', '4')
Label_str = Label_str.replace('5', '5')
Label_str = Label_str.replace('6', '6')
Label_str = Label_str.replace('7', '7')
Label_str = Label_str.replace('8', '8')
Label_str = Label_str.replace('9', '9')
Label_str = Label_str.replace(':', ':')
Label_str = Label_str.replace(';', ';')
Label_str = Label_str.replace('?', '?')
Label_str = Label_str.replace('A', 'A')
Label_str = Label_str.replace('B', 'B')
Label_str = Label_str.replace('C', 'C')
Label_str = Label_str.replace('F', 'F')
Label_str = Label_str.replace('G', 'G')
Label_str = Label_str.replace('H', 'H')
Label_str = Label_str.replace('M', 'M')
Label_str = Label_str.replace('N', 'N')
Label_str = Label_str.replace('O', 'O')
Label_str = Label_str.replace('P', 'P')
Label_str = Label_str.replace('R', 'R')
Label_str = Label_str.replace('S', 'S')
Label_str = Label_str.replace('V', 'V')
Label_str = Label_str.replace('W', 'W')
Label_str = Label_str.replace('a', 'a')
Label_str = Label_str.replace('d', 'd')
Label_str = Label_str.replace('e', 'e')
Label_str = Label_str.replace('h', 'h')
Label_str = Label_str.replace('i', 'i')
Label_str = Label_str.replace('l', 'l')
Label_str = Label_str.replace('m', 'm')
Label_str = Label_str.replace('n', 'n')
Label_str = Label_str.replace('o', 'o')
Label_str = Label_str.replace('p', 'p')
Label_str = Label_str.replace('r', 'r')
Label_str = Label_str.replace('s', 's')
Label_str = Label_str.replace('t', 't')
Label_str = Label_str.replace('u', 'u')
Label_str = Label_str.replace('y', 'y')
page_label.append(Label_str)
Y2 = min(Image_height * 4, Y2 + 64)
img_np = page_np[Y1:Y2, :]
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
boxes = np.array(boxes, dtype=np.float)
h, w, _ = img_np.shape
short_edge = max(h,w)
if short_edge > width:
# 保证短边 >= inputsize
scale = width / short_edge
img_np = cv2.resize(img_np, dsize=None, fx=scale, fy=scale)
boxes *= scale
img_tensor = img_transform(img_np).unsqueeze(0)
import numpy as np
import cv2
import struct
import random
from torchvision.transforms import ToTensor, ToPILImage
img_transform = ToTensor()
def get_pred_data(file_paths,width=1600):
for file_path in file_paths:
with open(file_path, 'rb') as f:
header_size = np.fromfile(f, dtype='uint32', count=1)[0]
header = np.fromfile(f, dtype='uint8', count=header_size - 4)
formatcode = "".join([chr(c) for c in header[:8]])
Illustration_size = header_size - 36
Illustration = "".join([chr(c) for c in header[8:Illustration_size + 8]])
Code_type = "".join([chr(c) for c in header[Illustration_size + 8:Illustration_size + 28]])
Code_length = header[Illustration_size + 28] + header[Illustration_size + 29] << 4
Bits_per_pixel = header[Illustration_size + 30] + header[Illustration_size + 31] << 4
# print(header_size, formatcode, Illustration)
# print(Code_type, Code_length, Bits_per_pixel)
# print()
Image_height = np.fromfile(f, dtype='uint32', count=1)[0]
Image_width = np.fromfile(f, dtype='uint32', count=1)[0]
Line_number = np.fromfile(f, dtype='uint32', count=1)[0]
page_np = np.ones((Image_height * 4, Image_width), dtype=np.uint8) * 255
page_label = []
boxes = []
Y1 = 0
Y2 = 0
margin = 0
for ln in range(Line_number):
Char_number = np.fromfile(f, dtype='uint32', count=1)[0]
Label = np.fromfile(f, dtype='uint16', count=Char_number)
# print(Label)
Label_str = "".join([struct.pack('H', c).decode('GBK', errors='ignore') for c in Label])
# print(Label_str, Char_number)
Top_left = np.fromfile(f, dtype='uint32', count=2)
Top, Left = Top_left[0], Top_left[1]
Height = np.fromfile(f, dtype='uint32', count=1)[0]
# Top+=ln*Image_height//Line_number//8
Width = np.fromfile(f, dtype='uint32', count=1)[0]
Bitmap = np.fromfile(f, dtype='uint8', count=Height * Width).reshape([Height, Width])
contours, hierarchy = cv2.findContours(
255 - Bitmap, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# if random.random()<0.5:
# Top+=random.uniform(-0.2,0.2)*Height
# Top = int(Top)
all_contours = []
for contour in contours:
for points in contour:
all_contours.append(points)
all_contours = np.array(all_contours)
rect = cv2.minAreaRect(all_contours)
rect_w = max(rect[1])
rect_h = min(rect[1])
# Top-=int(ln*Image_height//Line_number//10)
if rect_w < Image_width * 0.25:
x1, y1, x2, y2 = cv2.boundingRect(all_contours)
bbox = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
else:
bbox = cv2.boxPoints(rect)
bbox = sorted(bbox, key=lambda x: x[0])
new_bbox = []
new_bbox += sorted(bbox[:2], key=lambda x: x[1])
new_bbox += sorted(bbox[2:], key=lambda x: -x[1])
bbox = [new_bbox[0], new_bbox[3], new_bbox[2], new_bbox[1]]
# left_w = random.uniform(-1, 1) * rect_h
# right_w = random.uniform(-1, 1) * rect_h
# bbox[0][0] += left_w
# bbox[1][0] += right_w
# bbox[2][0] += right_w
# bbox[3][0] += left_w
# top_h=random.uniform(-0.2, 0.2) * rect_h
# bottom_h = random.uniform(-0.2, 0.2) * rect_h
# bbox[0][1] += top_h
# bbox[1][1] += top_h
# bbox[2][1] += bottom_h
# bbox[3][1] += bottom_h
bbox = np.int0(bbox)
bbox[:, 0] += Left
bbox[:, 1] += Top
origin_sub = page_np[Top:Top + Height, Left:Left + Width]
page_np[Top:Top + Height, Left:Left + Width] = (origin_sub > Bitmap) * Bitmap + (origin_sub <= Bitmap) * origin_sub
if ln == 0:
Y1 = max(Top - 64, 0)
if ln == Line_number - 1:
Y2 = Top + Height
# cv2.drawContours(page_np, [bbox], -1, 128, 2)
# cv2.imshow('1', cv2.resize(page_np[Y1:, :],dsize=None,fx=0.5,fy=0.5))
# cv2.waitKey()
bbox[:, 1] -= Y1
boxes.append(bbox)
Label_str = Label_str.replace('\x00', '')
Label_str = Label_str.replace('〔', '(')
Label_str = Label_str.replace('〕', ')')
Label_str = Label_str.replace('"', '"')
Label_str = Label_str.replace('%', '%')
Label_str = Label_str.replace('(', '(')
Label_str = Label_str.replace(')', ')')
Label_str = Label_str.replace(',', ',')
Label_str = Label_str.replace('-', '-')
Label_str = Label_str.replace('.', '.')
Label_str = Label_str.replace('/', '/')
Label_str = Label_str.replace('0', '0')
Label_str = Label_str.replace('1', '1')
Label_str = Label_str.replace('2', '2')
Label_str = Label_str.replace('3', '3')
Label_str = Label_str.replace('4', '4')
Label_str = Label_str.replace('5', '5')
Label_str = Label_str.replace('6', '6')
Label_str = Label_str.replace('7', '7')
Label_str = Label_str.replace('8', '8')
Label_str = Label_str.replace('9', '9')
Label_str = Label_str.replace(':', ':')
Label_str = Label_str.replace(';', ';')
Label_str = Label_str.replace('?', '?')
Label_str = Label_str.replace('A', 'A')
Label_str = Label_str.replace('B', 'B')
Label_str = Label_str.replace('C', 'C')
Label_str = Label_str.replace('F', 'F')
Label_str = Label_str.replace('G', 'G')
Label_str = Label_str.replace('H', 'H')
Label_str = Label_str.replace('M', 'M')
Label_str = Label_str.replace('N', 'N')
Label_str = Label_str.replace('O', 'O')
Label_str = Label_str.replace('P', 'P')
Label_str = Label_str.replace('R', 'R')
Label_str = Label_str.replace('S', 'S')
Label_str = Label_str.replace('V', 'V')
Label_str = Label_str.replace('W', 'W')
Label_str = Label_str.replace('a', 'a')
Label_str = Label_str.replace('d', 'd')
Label_str = Label_str.replace('e', 'e')
Label_str = Label_str.replace('h', 'h')
Label_str = Label_str.replace('i', 'i')
Label_str = Label_str.replace('l', 'l')
Label_str = Label_str.replace('m', 'm')
Label_str = Label_str.replace('n', 'n')
Label_str = Label_str.replace('o', 'o')
Label_str = Label_str.replace('p', 'p')
Label_str = Label_str.replace('r', 'r')
Label_str = Label_str.replace('s', 's')
Label_str = Label_str.replace('t', 't')
Label_str = Label_str.replace('u', 'u')
Label_str = Label_str.replace('y', 'y')
page_label.append(Label_str)
Y2 = min(Image_height * 4, Y2 + 64)
img_np = page_np[Y1:Y2, :]
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
boxes = np.array(boxes, dtype=np.float)
h, w, _ = img_np.shape
short_edge = max(h,w)
if short_edge > width:
# 保证短边 >= inputsize
scale = width / short_edge
img_np = cv2.resize(img_np, dsize=None, fx=scale, fy=scale)
boxes *= scale
img_tensor = img_transform(img_np).unsqueeze(0)
yield img_np, img_tensor, [boxes], page_label
|
{"hexsha": "e3b6a5e5964254eae34b6237f7c52a26d485c0c6", "size": 16566, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/get_dgrl_data.py", "max_stars_repo_name": "grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel", "max_stars_repo_head_hexsha": "00334215b63b12284a74e26fa0fbf15f09a046a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-05-10T04:10:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T14:36:08.000Z", "max_issues_repo_path": "utils/get_dgrl_data.py", "max_issues_repo_name": "grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel", "max_issues_repo_head_hexsha": "00334215b63b12284a74e26fa0fbf15f09a046a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-07-08T06:29:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-02T08:51:01.000Z", "max_forks_repo_path": "utils/get_dgrl_data.py", "max_forks_repo_name": "grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel", "max_forks_repo_head_hexsha": "00334215b63b12284a74e26fa0fbf15f09a046a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-14T02:39:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T02:38:58.000Z", "avg_line_length": 49.0118343195, "max_line_length": 131, "alphanum_fraction": 0.4947482796, "include": true, "reason": "import numpy", "num_tokens": 4246}
|
import numpy as np
class ReplayBuffer():
'''Experience Replay buffer. Implemented as a cyclic array
of fixed size for efficiency.
'''
def __init__(self, config):
self.max_size = config['size']
self.array = []
self.position = 0
self.rng = np.random.default_rng(config['seed'])
def push(self, x, a, r, gamma, xp):
if len(self.array) < self.max_size:
self.array.append(None)
self.array[self.position] = (x, a, r, gamma, xp)
self.position = (self.position + 1) % self.max_size
def sample(self, batch_size):
indices = self.rng.integers(len(self.array), size=batch_size)
return [self.array[i] for i in indices]
|
{"hexsha": "a70c632d436b735c12e9bb7fae4bdfe1008e62df", "size": 721, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "EhsanEI/pytorch-rl", "max_stars_repo_head_hexsha": "d473e465607087b75bc958fb0407fb6c8f41097e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "EhsanEI/pytorch-rl", "max_issues_repo_head_hexsha": "d473e465607087b75bc958fb0407fb6c8f41097e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "EhsanEI/pytorch-rl", "max_forks_repo_head_hexsha": "d473e465607087b75bc958fb0407fb6c8f41097e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7037037037, "max_line_length": 69, "alphanum_fraction": 0.6074895978, "include": true, "reason": "import numpy", "num_tokens": 175}
|
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import logging
import numpy as np
from sklearn.svm import LinearSVC as SVC
from ..hyp_defs import float_cpu
from ..hyp_model import HypModel
from ..utils.math import softmax
class LinearSVMC(HypModel):
"""Linear Support Vector Machine for Classification.
Attributes:
A: Linear transformation coefficients (num_feats, num_classes)
b: biases (num_classes, )
penalty: str, ‘l1’ or ‘l2’, default: ‘l2’ ,
C: Regularization parameter.
The strength of the regularization is inversely proportional to C.
Must be strictly positive.
loss: str, 'hinge' or 'squared_hinge', default: 'squared_hinge'.
use_bias: if True, it uses bias, otherwise bias is zero.
bias_scaling: float, default 1.
In this case, x becomes [x, bias_scaling], i.e.
a “synthetic” feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic_feature_weight.
Note! the synthetic feature weight is subject to l1/l2
regularization as all other features.
To lessen the effect of regularization on synthetic feature weight
bias_scaling has to be increased.
class_weight: dict or ‘balanced’, default=None
Set the parameter C of class i to class_weight[i]*C for SVC.
If not given, all classes are supposed to have weight one.
The “balanced” mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input
data as n_samples / (n_classes * np.bincount(y)).
random_state: RandomState instance or None, optional, default: None
max_iter: int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
dual: bool, default: False
Dual or primal formulation.
tol: float, default: 1e-4
Tolerance for stopping criteria.
multi_class: {‘ovr’, ‘crammer_singer’}, default=’ovr’
Determines the multi-class strategy if y contains more than
two classes. "ovr" trains n_classes one-vs-rest classifiers,
while "crammer_singer" optimizes a joint objective over all
classes. While crammer_singer is interesting from a theoretical
perspective as it is consistent,
it is seldom used in practice as it rarely leads to better
accuracy and is more expensive to compute.
If "crammer_singer" is chosen, the options loss,
penalty and dual will be ignored.
verbose: int, default: 0
balance_class_weight: if True and class_weight is None, it makes class_weight="balanced".
lr_seed: seed form RandomState, used when random_state is None.
"""
def __init__(
self,
A=None,
b=None,
penalty="l2",
C=1.0,
loss="squared_hinge",
use_bias=True,
bias_scaling=1,
class_weight=None,
random_state=None,
max_iter=100,
dual=True,
tol=0.0001,
multi_class="ovr",
verbose=0,
balance_class_weight=True,
lr_seed=1024,
**kwargs
):
super().__init__(**kwargs)
if class_weight is None and balance_class_weight:
class_weight = "balanced"
if random_state is None:
random_state = np.random.RandomState(seed=lr_seed)
self.use_bias = use_bias
self.bias_scaling = bias_scaling
self.balance_class_weight = balance_class_weight
logging.debug(class_weight)
self.svm = SVC(
penalty=penalty,
C=C,
loss=loss,
dual=dual,
tol=tol,
fit_intercept=use_bias,
intercept_scaling=bias_scaling,
class_weight=class_weight,
random_state=random_state,
max_iter=max_iter,
multi_class=multi_class,
verbose=verbose,
)
if A is not None:
self.svm.coef_ = A.T
if b is not None:
self.svm.intercept_ = b
@property
def A(self):
return self.svm.coef_.T
@property
def b(self):
return self.svm.intercept_ * self.bias_scaling
def get_config(self):
"""Gets configuration hyperparams.
Returns:
Dictionary with config hyperparams.
"""
config = {
"use_bias": self.use_bias,
"bias_scaling": self.bias_scaling,
"balance_class_weight": self.balance_class_weight,
}
base_config = super(LinearSVMC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def predict(self, x, eval_type="logit"):
"""Evaluates the SVM
Args:
x: input features (num_samples, feat_dim),
it can be (num_samples,) if feat_dim=1.
eval_type: evaluationg method: logit (log-likelihood ratio),
bin-log-post (binary log-posteriors),
bin-post (binary posteriors)
cat-log-post (categorical log-posteriors),
cat-post (categorical posteriors)
Returns:
Ouput scores (num_samples, num_classes)
"""
s = np.dot(x, self.A) + self.b
if eval_type == "bin-log-post":
return np.log(1 + np.exp(-s))
if eval_type == "bin-post":
return 1 / (1 + np.exp(-s))
if eval_type == "cat-post":
return softmax(s)
if eval_type == "cat-log-post":
return np.log(softmax(s))
return s
def __call__(self, x, eval_type="logit"):
"""Evaluates the SVM
Args:
x: input features (num_samples, feat_dim),
it can be (num_samples,) if feat_dim=1.
eval_type: evaluationg method: logit (log-likelihood ratio),
bin-log-post (binary log-posteriors),
bin-post (binary posteriors)
cat-log-post (categorical log-posteriors),
cat-post (categorical posteriors)
Returns:
Ouput scores (num_samples, num_classes)
"""
return self.predict(x, eval_type)
def fit(self, x, class_ids, sample_weight=None):
"""Estimates the parameters of the model.
Args:
x: input features (num_samples, feat_dim), it can be (num_samples,) if feat_dim=1.
class_ids: class integer [0, num_classes-1] identifier (num_samples,)
sample_weight: weight of each sample in the estimation (num_samples,)
"""
self.svm.fit(x, class_ids, sample_weight=sample_weight)
def save_params(self, f):
params = {"A": self.A, "b": self.b}
self._save_params_from_dict(f, params)
@classmethod
def load_params(cls, f, config):
param_list = ["A", "b"]
params = cls._load_params_to_dict(f, config["name"], param_list)
kwargs = dict(list(config.items()) + list(params.items()))
return cls(**kwargs)
@staticmethod
def filter_class_args(prefix=None, **kwargs):
"""Extracts the hyperparams of the class from a dictionary.
Returns:
Hyperparamter dictionary to initialize the class.
"""
valid_args = (
"penalty",
"C",
"loss",
"use_bias",
"bias_scaling",
"class_weight",
"lr_seed",
"max_iter",
"dual",
"tol",
"multi_class",
"verbose",
"balance_class_weight",
"name",
)
return dict((k, kwargs[k]) for k in valid_args if k in kwargs)
filter_train_args = filter_class_args
@staticmethod
def add_class_args(parser, prefix=None):
"""It adds the arguments corresponding to the class to jsonarparse.
Args:
parser: jsonargparse object
prefix: argument prefix.
"""
if prefix is None:
p1 = "--"
p2 = ""
else:
p1 = "--" + prefix + "."
p2 = prefix + "."
parser.add_argument(
p1 + "penalty",
default="l2",
choices=["l2", "l1"],
help="used to specify the norm used in the penalization",
)
parser.add_argument(
p1 + "c",
dest=(p2 + "C"),
default=1.0,
type=float,
help="inverse of regularization strength",
)
parser.add_argument(
p1 + "loss",
default="squared_hinge",
choices=["hinge", "squared_hinge"],
help="type of loss",
)
parser.add_argument(
p1 + "no-use-bias",
dest=(p2 + "use_bias"),
default=True,
action="store_false",
help="Not use bias",
)
parser.add_argument(
p1 + "bias-scaling",
default=1.0,
type=float,
help=(
"useful only when the solver liblinear is used "
"and use_bias is set to True"
),
)
parser.add_argument(
p1 + "lr-seed", default=1024, type=int, help="random number generator seed"
)
parser.add_argument(
p1 + "max-iter",
default=100,
type=int,
help="only for the newton-cg, sag and lbfgs solvers",
)
parser.add_argument(
p1 + "no-dual",
dest=(p2 + "dual"),
default=True,
action="store_false",
help=(
"dual or primal formulation. "
"Dual formulation is only implemented for "
"l2 penalty with liblinear solver"
),
)
parser.add_argument(
p1 + "tol", default=1e-4, type=float, help="tolerance for stopping criteria"
)
parser.add_argument(
p1 + "multi-class",
default="ovr",
choices=["ovr", "crammer_singer"],
help=(
"ovr fits a binary problem for each class else "
"it minimizes the multinomial loss."
),
)
parser.add_argument(
p1 + "verbose",
default=0,
type=int,
help="For the liblinear and lbfgs solvers",
)
parser.add_argument(
p1 + "balance-class-weight",
default=False,
action="store_true",
help="Balances the weight of each class when computing W",
)
parser.add_argument(p1 + "name", default="svc", help="model name")
@staticmethod
def filter_eval_args(prefix, **kwargs):
"""Extracts the evaluation time hyperparams of the class from a dictionary.
Returns:
Hyperparameters to evaluate the class.
"""
valid_args = ("model_file", "eval_type")
return dict((k, kwargs[k]) for k in valid_args if k in kwargs)
@staticmethod
def add_eval_args(parser, prefix=None):
"""It adds the arguments needed to evaluate the class to jsonarparse.
Args:
parser: jsonargparse object
prefix: argument prefix.
"""
if prefix is None:
p1 = "--"
p2 = ""
else:
p1 = "--" + prefix + "."
p2 = prefix + "."
parser.add_argument(p1 + "model-file", required=True, help=("model file"))
parser.add_argument(
p1 + "eval-type",
default="logit",
choices=["logit", "bin-logpost", "bin-post", "cat-logpost", "cat-post"],
help=("type of evaluation"),
)
# for backward compatibility
filter_train_args = filter_class_args
add_argparse_args = add_class_args
add_argparse_train_args = add_class_args
add_argparse_eval_args = add_eval_args
|
{"hexsha": "244e0dc01864a058179b70274cb3fd7c2909597f", "size": 12475, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyperion/classifiers/linear_svmc.py", "max_stars_repo_name": "hyperion-ml/hyperion", "max_stars_repo_head_hexsha": "c4c9eee0acab1ba572843373245da12d00dfffaa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-12-19T04:24:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T03:24:04.000Z", "max_issues_repo_path": "hyperion/classifiers/linear_svmc.py", "max_issues_repo_name": "hyperion-ml/hyperion", "max_issues_repo_head_hexsha": "c4c9eee0acab1ba572843373245da12d00dfffaa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hyperion/classifiers/linear_svmc.py", "max_forks_repo_name": "hyperion-ml/hyperion", "max_forks_repo_head_hexsha": "c4c9eee0acab1ba572843373245da12d00dfffaa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-12-14T20:41:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T14:18:11.000Z", "avg_line_length": 33.9918256131, "max_line_length": 95, "alphanum_fraction": 0.5551102204, "include": true, "reason": "import numpy", "num_tokens": 2728}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 00:59:12 2020
@author: Jon
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
true_kon = 0.070353336309323 # This is to Matlab
true_koff = 0.464397161485740
true_emission = 41354.89875953371
true_noise = 15364.95018917365
kon_256 = 0.0703975370151
koff_256 = 0.464528461567
emission_256 = 41355.4421665
noise_256 = 15383.4553458
kon_128 = 0.0702521547606
koff_128 = 0.462781716998
emission_128 = 41349.5580493
noise_128 = 15425.418704
kon_64 = 0.0693153852427
koff_64 = 0.467115326453
emission_64 = 42188.8476558
noise_64 = 15603.8805624
kon_32 = 0.0670500953392
koff_32 = 0.451529655946
emission_32 = 42357.8369306
noise_32 = 15950.0835477
kon_16 = 0.0629044588372
koff_16 = 0.419355892181
emission_16 = 42031.6615316
noise_16 = 16549.2031647
kon_8 = 0.0620334755086
koff_8 = 0.434113981492
emission_8 = 44399.6417218
noise_8 = 17317.3710659
index = np.array([8, 16, 32, 64, 128, 256])
emissions = np.array([emission_8, emission_16, emission_32, emission_64, emission_128,
emission_256])
noise = np.array([noise_8, noise_16, noise_32, noise_64, noise_128, noise_256])
prob_on = np.array([kon_8, kon_16, kon_32, kon_64, kon_128, kon_256])
prob_off = np.array([koff_8, koff_16, koff_32, koff_64, koff_128, koff_256])
def rel_error(true_value, inferred_value):
rel_calc = abs((inferred_value - true_value) / true_value)
rel_calc2 = rel_calc * 100
return rel_calc2
noise_error_holder = np.zeros((len(index),))
for i in np.arange(0, len(index)):
noise_error_holder[i,] = rel_error(true_noise, noise[i])
prob_on_error_holder = np.zeros((len(index),))
for j in np.arange(0, len(index)):
prob_on_error_holder[j,] = rel_error(true_kon, prob_on[j])
prob_off_error_holder = np.zeros((len(index),))
for k in np.arange(0, len(index)):
prob_off_error_holder[k,] = rel_error(true_koff, prob_off[k])
emissions_error_holder = np.zeros((len(index),))
for l in np.arange(0, len(index)):
emissions_error_holder[l,] = rel_error(true_emission, emissions[l])
#%%
params = {'legend.fontsize': 18}
plt.rcParams.update(params)
plt.rcParams.update({'font.size': 18})
plt.rcParams['font.family'] = 'arial'
noise_dict = {'8': rel_error(true_noise, noise_8), '16': rel_error(true_noise, noise_16),
'32': rel_error(true_noise, noise_32), '64': rel_error(true_noise, noise_64),
'128': rel_error(true_noise, noise_128), '256': rel_error(true_noise, noise_256)}
noise_names = list(noise_dict.keys())
noise_values = list(noise_dict.values())
prob_off_dict = {'8': rel_error(true_koff, koff_8), '16': rel_error(true_koff, koff_16), '32': rel_error(true_koff, koff_32),
'64': rel_error(true_koff, koff_64), '128': rel_error(true_koff, koff_128),
'256': rel_error(true_koff, koff_256)}
prob_off_names = list(prob_off_dict.keys())
prob_off_values = list(prob_off_dict.values())
prob_on_dict = {'8': rel_error(true_kon, kon_8), '16': rel_error(true_kon, kon_16), '32': rel_error(true_kon, kon_32),
'64': rel_error(true_kon, kon_64), '128': rel_error(true_kon, kon_128),
'256': rel_error(true_kon, kon_256)}
prob_on_names = list(prob_on_dict.keys())
prob_on_values = list(prob_on_dict.values())
emissions_dict = {'8': rel_error(true_emission, emission_8), '16': rel_error(true_emission, emission_16),
'32': rel_error(true_emission, emission_32), '64': rel_error(true_emission, emission_64),
'128': rel_error(true_emission, emission_128), '256': rel_error(true_emission, emission_256)}
emissions_names = list(emissions_dict.keys())
emissions_values = list(emissions_dict.values())
plt.figure(1, figsize = (8,6), dpi=300)
plt.plot(noise_names, noise_values, label = 'Noise', marker = 's', linewidth=3)
plt.plot(prob_off_names, prob_off_values, label = '$k_{off}$', marker = '.', linewidth=3)
plt.plot(prob_on_names, prob_on_values, label = '$k_{on}$', marker = 'o', linewidth=3)
plt.plot(emissions_names, emissions_values, label = 'Emission', marker = 'v', linewidth=3)
plt.xlabel('Number of Allowed States (M)')
plt.ylabel('Relative Error (%)')
plt.ylim((0,35))
#plt.xlim((0, 300))
#plt.title('Plot of Convergence of Truncated Model Parameters to Full Model Parameters')
#plt.title('Plot of Convergence of Truncated and Full Model Parameters')
plt.legend()
plt.savefig('pebwt_reborn_convergence.pdf', dpi=300, transparent=True)
plt.savefig('pebwt_reborn_convergence.svg')
|
{"hexsha": "52187ce04c8fd469ae564917b5fbbd69a3a9670a", "size": 4630, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_datasets/reproduce_paper_figures/hindsight_parameter_convergence.py", "max_stars_repo_name": "ManchesterBioinference/burstInfer", "max_stars_repo_head_hexsha": "933bc76ae8e7fadc36bab1b6bf07ed18e5978a01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-05T05:09:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T05:09:53.000Z", "max_issues_repo_path": "example_datasets/reproduce_paper_figures/hindsight_parameter_convergence.py", "max_issues_repo_name": "ManchesterBioinference/burstInfer", "max_issues_repo_head_hexsha": "933bc76ae8e7fadc36bab1b6bf07ed18e5978a01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-08T20:42:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T17:57:22.000Z", "max_forks_repo_path": "example_datasets/reproduce_paper_figures/hindsight_parameter_convergence.py", "max_forks_repo_name": "ManchesterBioinference/burstInfer", "max_forks_repo_head_hexsha": "933bc76ae8e7fadc36bab1b6bf07ed18e5978a01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8120300752, "max_line_length": 125, "alphanum_fraction": 0.7172786177, "include": true, "reason": "import numpy", "num_tokens": 1454}
|
#include <vector>
#include "Drone.h"
#include <math.h>
#include <boost/asio/io_service.hpp>
#include <Fullnavdata.h>
#include <gnuplot_iostream.h>
#include <deque>
/*
* PRIVATE HEADER
*/
#define DRONE_IP "10.42.0.10"
#define CALIBRATION_FILE "res/calib_bd2.xml"
#define HULLPROTECTIONON true
/*
* MAIN
*/
int main(){
/// ***************************************************************************** CONNECT TO AND CONFIGURE THE DRONE
Drone d;
assert(d.connect());
// Waiting for the drone to be ready
while(!d.isRunning()){ sleep(1); }
std::cout << "INITIALISATION IS OK" << std::endl;
// Flat trim at start. The drone MUST be on the ground at that time
if(d.blockingFlatTrim()) {
std::cout << "FLAT TRIM IS OK" << std::endl;
}else{
std::cerr << "FLAT TRIM NOT OK" << std::endl;
return 1;
}
assert(d.setHullPresence(HULLPROTECTIONON));
assert(d.setVideoAutorecord(false));
//assert(d.stopStreaming());
assert(d.useFullNavdata());
sleep(1);
assert(d.isUsingFullNavdata());
Gnuplot gp;
gp << "set title \"Sent timestamp\"\n";
std::deque<double> uptime;
int64_t tmp_up;
int64_t prev_tmp_up;
double delta_time_up;
Gnuplot gp2;
gp2 << "set title \"Received timestamp\"\n";
std::deque<double> rt;
int64_t tmp_rt;
int64_t prev_tmp_rt;
double delta_time_rt;
while(true)
{
if (uptime.size() > 150) {
uptime.pop_front();
rt.pop_front();
}
prev_tmp_up = tmp_up;
prev_tmp_rt = tmp_rt;
d.getFullNavdata()->lock();
tmp_up = d._navdata->get_sent_drone_uptime();
tmp_rt = d._navdata->get_received_time_computer();
d._navdata->release();
delta_time_rt = tmp_rt - prev_tmp_rt;
delta_time_up = tmp_up - prev_tmp_up;
std::cout << prev_tmp_rt << " -> " << tmp_rt << " = " << delta_time_rt << std::endl;
std::cout << prev_tmp_up << " -> " << tmp_up << " = " << delta_time_rt << std::endl;
uptime.push_back(delta_time_up);
rt.push_back(delta_time_rt);
gp << "plot '-' binary" << gp.binFmt1d(uptime, "array") << "with lines title \"ST\"\n";
gp.sendBinary1d(uptime);
gp.flush();
gp2 << "plot '-' binary" << gp2.binFmt1d(rt, "array") << "with lines title \"RT\"\n";
gp2.sendBinary1d(rt);
gp2.flush();
// 100Hz
//sleep(1);
usleep(25 * 1000);
}
return 0;
}
|
{"hexsha": "4d4e80515878864461d80c13600a8914d2bdd5e7", "size": 2562, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Bebop2cpp-master/demo/timestamp.cpp", "max_stars_repo_name": "jmenden1/SNAP", "max_stars_repo_head_hexsha": "a253aa052e4568cdc35e7ff5789b3e716ccbb4da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-08-31T23:09:30.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-31T23:09:30.000Z", "max_issues_repo_path": "Bebop2cpp-master/demo/timestamp.cpp", "max_issues_repo_name": "jmenden1/SNAP", "max_issues_repo_head_hexsha": "a253aa052e4568cdc35e7ff5789b3e716ccbb4da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Bebop2cpp-master/demo/timestamp.cpp", "max_forks_repo_name": "jmenden1/SNAP", "max_forks_repo_head_hexsha": "a253aa052e4568cdc35e7ff5789b3e716ccbb4da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-01-21T16:21:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-11T18:16:02.000Z", "avg_line_length": 23.9439252336, "max_line_length": 120, "alphanum_fraction": 0.5526932084, "num_tokens": 686}
|
import json, urllib.request
import requests
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import re
import matplotlib.pyplot as plt
%matplotlib inline
Username = input("What's your Username?")
Password = input("What's your Password")
url = input("Enter a URL")
if url == "":
url = "https://knetminer.com/beta/knetspace/api/v1/networks/acf96c4d-74ff-4fb1-a65a-745c20d2981a/?format=json"
api_host = "https://knetminer.com/beta/knetspace" # The Host of the API
#your_knetspace_username = "xhakanai" #Takes Username and password for use in the token.
#your_knetspace_password = "verysecureknetpassword" #Takes Username and password for use in the token.
session = requests.Session() #Requests a session to the server.
token = session.post(api_host + '/auth/jwt/', json={'username_or_email': Username, 'password': Password}).json() #This authenticates the session.
me = session.get(api_host + '/api/v1/me').json()
response = session.get(url) #This establishes a connection to the server.
if response.status_code == 200:
knetspace_json = response.json()
print(knetspace_json) #This will then print the contents of the web token, i.e the Username, Email, etc, of the account connected the the URL.
knetspace_json['graph'].keys() #Going through the json
knetspace_json['graph']['allGraphData']['ondexmetadata'] #Going through parts of the json to find concepts and relations
meta_data_dict = knetspace_json['graph']['allGraphData']['ondexmetadata'] #Turns out metadata has what is needed
meta_data_keys = list(meta_data_dict.keys())
concepts_dict, relationships_dict = {}, {}
for i, v in enumerate(meta_data_dict.values()): #For interation in ondexmetadata, store it as a value
print(meta_data_keys[i], v)
if "concepts" in meta_data_keys[i]:
concepts_dict[meta_data_keys[i]] = v
if "relations" in meta_data_keys[i]:
relationships_dict[meta_data_keys[i]] = v
concept_type_dict, concept_count = {}, []
concept_id_name = []
for i in range(0, len(concepts_dict['concepts'])):
concept_count.append(concepts_dict['concepts'][i]['ofType'])
concept_id_name.append([concepts_dict['concepts'][i]['ofType'], concepts_dict['concepts'][i]['value']])
try:
concept_type_dict[concepts_dict['concepts'][i]['id']] = concept_id_name[i]
except:
print(f"Failed for iteration {i}")
pass
relationships_type_dict, relations_count = {}, []
relationships_id_name = []
for i in range(0, len(relationships_dict['relations'])):
relations_count.append(relationships_dict['relations'][i]['ofType'])
relationships_id_name.append([relationships_dict['relations'][i]['ofType'], relationships_dict['relations'][i]['toConcept']])
try:
relationships_type_dict[relationships_dict['relations'][i]['id']] = relationships_id_name[i]
except:
print(f"Failed for iteration {i}")
pass
abstract_dict = {}
for i in range (0, len(concepts_dict['concepts'])):
concepts_dict['concepts'][i]['ofType']
if concepts_dict['concepts'][i]['ofType'] == 'Publication':
concepts_dict['concepts'][i]['attributes'][2]
for j in range (0, len(concepts_dict['concepts'][i]['attributes'])):
if concepts_dict['concepts'][i]['attributes'][j]['attrname'] == 'Abstract':
abstract_dict['value'] = concepts_dict['concepts'][i]['attributes'][j]['value']
concept_count = dict(Counter(concept_count))
relations_count = dict(Counter(relations_count))
stop_words = ['the', 'a', '<span', '', 'is', 'and', 'of', 'are', 'during', 'which', 'both', 'that', 'on', 'two', 'our', 'in', 'well', 'known', 'about', 'We', 'Show', 'Here', 'also', 'has', None]
abstract_list = []
for i in abstract_dict['value'].split(' '):
for j in range(0,len(stop_words)):
if stop_words[j] == i:
pass
else:
abstract_list.append(i)
abstract_count_dict = dict(Counter(abstract_list))
af = pd.DataFrame(abstract_count_dict.items(), columns = ['Abstract','Count'])
word2 = WordCloud(background_color="black", collocations=False).generate(text)
text = " ".join(Abstract for Abstract in af.Abstract)
plt.figure(figsize=(30,15))
plt.imshow(word2, interpolation='bilinear')
plt.axis('off')
wordcloud.to_file("wordcloud2.png")
plt.savefig('plot.png', dpi=300, bbox_inches='tight')
plt.show()
# Getting relationship counts
relationship_counts = []
for i in range(0, len(relationships_dict['relations'])):
from_concept = relationships_dict['relations'][i]['fromConcept'] # Variable from_concept is set to be equal to the strings between relations and fromConcept in the relations_dict
to_concept = relationships_dict['relations'][i]['toConcept'] # Variable to_concept is set to be equal to the strings between relations and toConcept in the relations_dict
for k in concept_type_dict.keys(): # For the Key in the concept_type_dict, if it is the same as to_concept, append the key 1. The same applies to from_concept
if k == to_concept:
if "<span style" not in concept_type_dict[to_concept][1]: # If <span style is found anywhere in the dictionary, remove it.
relationship_counts.append(concept_type_dict[k][1])
if k == from_concept:
if "<span style" not in concept_type_dict[from_concept][1]:
relationship_counts.append(concept_type_dict[k][1])
relationship_counter=(dict(Counter(relationship_counts)))
df = pd.DataFrame(relationship_counter.items(), columns = ['Name','Count'])
updated_df = df[df['Name'].apply(lambda x: "PMID" not in x)]
stopwords=set(STOPWORDS)
stopwords.update(["Proteins", "Genes", "Relations", "Concepts", "PMID", "Protein"])
wordcloud = WordCloud(stopwords=stopwords, background_color="white").generate('text')
text = " ".join(name for name in updated_df.Name)
wordcloud = WordCloud().generate(text)
wordcloud = WordCloud(max_font_size=150, max_words=1000, background_color="black").generate(text)
plt.figure(figsize=(30,15))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
wordcloud.to_file("wordcloud.png")
plt.savefig('plot.png', dpi=300, bbox_inches='tight')
plt.show()
else:
print(f"Response failed due to error code {response.status_code}")
|
{"hexsha": "13c3d5ae79eff8b75243a4b17e0439f7f9333d74", "size": 6802, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "Rothamsted/wordcloud", "max_stars_repo_head_hexsha": "06f26f13126f5599b57485da292a2177b42e2b72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-22T18:07:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T18:07:25.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "Rothamsted/wordcloud", "max_issues_repo_head_hexsha": "06f26f13126f5599b57485da292a2177b42e2b72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "Rothamsted/wordcloud", "max_forks_repo_head_hexsha": "06f26f13126f5599b57485da292a2177b42e2b72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-14T10:33:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T10:33:53.000Z", "avg_line_length": 51.5303030303, "max_line_length": 199, "alphanum_fraction": 0.6605410173, "include": true, "reason": "import numpy", "num_tokens": 1609}
|
#!/usr/bin/python
"""Given a GT and a Prediction file, evaluate predictions
"""
import json
import time
import pickle
import sys
import csv
import argparse
import os
import os.path as osp
import shutil
import copy
from collections import defaultdict as dd
import datetime
import numpy as np
import matplotlib.pyplot as plt
from pycocotools import mask as mask_utils
from PIL import Image
from scipy.misc import imread
import pprint
__author__ = "Tribhuvanesh Orekondy"
__maintainer__ = "Tribhuvanesh Orekondy"
__email__ = "orekondy@mpi-inf.mpg.de"
__status__ = "Development"
class VISPRSegEval:
"""
GT format:
{
#--------- One per anno file ---------
'created_at: '2017-08-29 15:25:11.001926',
'stats': { ..... },
'annotations': {
#--------- One per image ---------
'2017_235123' : {
'image_id': '2017_235123',
'image_path': 'images/val2017/2017_18072751.jpg'
'image_height': 1024,
'image_width' : 2048,
'attributes': [ #--------- One per instance ---------
{
'instance_id': 4,
'attr_id': 'a105_face_all',
'polygons': [[], ], # polygon [[x1 y1 x2 y2, ...], [x1 y1 x2 y2, ...], ]
'area': [float, ...], # One per region (instance can exist as multiple regions)
'bbox': [[x, y, width, height], ...] # One per region
'segmentation': RLE # polygons encoded as RLE (see MS-COCO format)
}
]
}
}
}
Prediction file format:
(Almost the same as COCO segmentation format: http://cocodataset.org/dataset.htm#format)
[
{
'image_id': '2017_235123',
'attr_id': 'a105_face_all',
'segmentation': RLE,
'score': float,
}
le
"""
def __init__(self, gt_path, pred_path):
self.gt_path = gt_path
self.pred_path = pred_path
self.vispr_gt_full = json.load(open(gt_path))
self.vispr_gt = self.vispr_gt_full['annotations']
self.vispr_pred = json.load(open(pred_path))
self.evalImgs = dd(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = dd(list) # Map (image_id, attr_id) -> [gt_detections, ]
self._pds = dd(list) # Map (image_id, attr_id) -> [detections, ]
self.ious = {} # Map (image_id, attr_id) -> IoU matrix (preds x gt)
self.params = Params()
self.params.imgIds = sorted(np.unique(self.vispr_gt.keys()))
self.params.attrIds = sorted(np.unique(self.vispr_gt_full['stats']['present_attr']))
self._paramsEval = {} # parameters for evaluation
pred_imgIds = np.unique([e['image_id'] for e in self.vispr_pred])
print '# Predicted Images = ', len(pred_imgIds)
print '# GT Images = ', len(self.params.imgIds)
print '# Common = ', len(set(pred_imgIds) & set(self.params.imgIds))
print '# Attributes = ', len(self.params.attrIds)
self.stats = []
self.stats_str = ""
def prepare(self):
"""
Populate _gts and _pds
:return:
"""
# --- Prepared GT ----------------------------------------------------------------------------------------------
next_gt_id = 0
for image_id, anno_entry in self.vispr_gt.iteritems():
image_height, image_width = anno_entry['image_height'], anno_entry['image_width']
for gt in anno_entry['attributes']:
if gt.get('segmentation', None) is None:
# Obtain RLE of mask if this doesn't already exist
rles = mask_utils.frPyObjects(gt['polygons'], image_height, image_width)
rle = mask_utils.merge(rles)
gt['segmentation'] = rle
del gt['polygons'] # Free memory
gt['id'] = '{}_{}'.format(image_id, gt['instance_id'])
gt['id'] = int(gt['id'].replace('_', ''))
# gt['id'] = next_gt_id
# next_gt_id += 1
gt['iscrowd'] = gt.get('iscrowd', 0)
gt['area'] = np.sum(gt['area'])
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
attr_id = gt['attr_id']
self._gts[(image_id, attr_id)].append(gt)
# --- Prepared Predictions -------------------------------------------------------------------------------------
next_pred_id = 0
next_pred_id_dd = dd(int)
for pred in self.vispr_pred:
image_id = pred['image_id']
attr_id = pred['attr_id']
assert pred.get('segmentation', None) is not None
# pred['id'] = next_pred_id
# next_pred_id += 1
pred['id'] = '{}_{}'.format(image_id, next_pred_id_dd[image_id])
pred['id'] = int(pred['id'].replace('_', ''))
next_pred_id_dd[image_id] += 1
pred['area'] = mask_utils.area(pred['segmentation'])
self._pds[(image_id, attr_id)].append(pred)
self.evalImgs = dd(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
# --- Stats -------------------------------------------------------------------------------------
print
for idx, (low, high) in enumerate(self.params.areaRng):
count = 0
for gts in self._gts.values():
for gt in gts:
if low < gt['area'] < high:
count += 1
print '# GT objects ({}) = {}'.format(self.params.areaRngLbl[idx], count)
count = 0
for pds in self._pds.values():
for pd in pds:
if low < pd['area'] < high:
count += 1
print '# PD objects ({}) = {}'.format(self.params.areaRngLbl[idx], count)
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
tic = time.time()
print('Running per image evaluation...')
p = self.params
p.imgIds = list(np.unique(p.imgIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self.prepare()
# loop through images, area range, max detection number
attr_ids = p.attrIds
computeIoU = self.computeIoU
self.ious = {(image_id, attr_id): computeIoU(image_id, attr_id)
for image_id in p.imgIds
for attr_id in attr_ids}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(image_id, attr_id, areaRng, maxDet)
for attr_id in attr_ids
for areaRng in p.areaRng
for image_id in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def computeIoU(self, image_id, attr_id):
"""
If there are <n_g> GT annotations and <n_d> detections, this produces a IoU matrix of size <n_d x n_g>
:param image_id:
:param attr_id:
:return:
"""
p = self.params
gt = self._gts[image_id, attr_id] # List of annotations for this image-category
dt = self._pds[image_id, attr_id] # List of predictions for this image-category
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0:p.maxDets[-1]]
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = mask_utils.iou(d, g, iscrowd)
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
"""
perform evaluation for single category and image
:return: dict (single image results)
"""
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._pds[imgId, catId]
else:
gt = [_ for cId in p.attrIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.attrIds for _ in self._pds[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
ignore_count = 0
for g in gt:
if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]):
g['_ignore'] = 1
# print "g['ignore'] = {}, (g['area'](={}) < aRng[0](={}) or g['area'](={}) > aRng[1](={}))".format(g['ignore'], g['area'], aRng[0], g['area'], aRng[1])
ignore_count += 1
else:
g['_ignore'] = 0
# print '{} / {} ignored'.format(ignore_count, len(gt))
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p=None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.attrIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories
recall = -np.ones((T, K, A, M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.attrIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
except:
pass
precision[t, :, k, a, m] = np.array(q)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100, catind=None):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} | attr_id={:>25s} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
catStr = 'all' if catind is None else p.attrIds[catind]
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
if catind is None:
s = s[:, :, :, aind, mind]
else:
s = s[:, :, catind, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
if catind is None:
s = s[:, :, aind, mind]
else:
s = s[:, catind, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
line = iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, catStr, mean_s)
print line
self.stats_str += line + '\n'
return mean_s
def _summarizeDets():
n_attr = len(self.params.attrIds)
stats = np.zeros((12 + n_attr,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
print
for k in range(n_attr):
stats[12+k] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2], catind=k)
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
self.stats = _summarizeDets()
def __str__(self):
self.summarize()
class Params:
"""
Adapted from coco evaluation api
"""
def setDetParams(self):
self.imgIds = []
self.attrIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def __init__(self):
self.setDetParams()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("gt_file", type=str, help="GT File")
parser.add_argument("pred_file", type=str, help="Predicted file")
parser.add_argument("-r", "--row", action='store_true', default=False,
help="Print an additional row to aid pasting results into a spreadsheet")
args = parser.parse_args()
params = vars(args)
vispr = VISPRSegEval(params['gt_file'], params['pred_file'])
print
vispr.evaluate()
vispr.accumulate()
vispr.summarize()
if params['row']:
print
# You can now copy-paste this line into a spreadsheet. Seems like this does not work from within tmux.
print 'Overall scores: '
print '\t'.join(map(lambda x: '{}'.format(x), vispr.stats[:12].tolist()))
print 'Class scores: '
print '\t'.join(map(lambda x: '{}'.format(x), vispr.stats[12:].tolist()))
if __name__ == '__main__':
main()
|
{"hexsha": "c6dd07a811d6e90bb589c2d0e616bf450894f1f6", "size": 21661, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/evaltools/evaluate.py", "max_stars_repo_name": "tribhuvanesh/visual_redactions", "max_stars_repo_head_hexsha": "93fac7b5cd9fc7e81341380408df6a8a4f8f6189", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2018-07-03T09:30:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-23T05:46:11.000Z", "max_issues_repo_path": "tools/evaltools/evaluate.py", "max_issues_repo_name": "tribhuvanesh/visual_redactions", "max_issues_repo_head_hexsha": "93fac7b5cd9fc7e81341380408df6a8a4f8f6189", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-07-03T13:42:33.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-15T13:17:17.000Z", "max_forks_repo_path": "tools/evaltools/evaluate.py", "max_forks_repo_name": "tribhuvanesh/visual_redactions", "max_forks_repo_head_hexsha": "93fac7b5cd9fc7e81341380408df6a8a4f8f6189", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-07-25T02:47:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T13:28:49.000Z", "avg_line_length": 41.1024667932, "max_line_length": 168, "alphanum_fraction": 0.4960066479, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5654}
|
import time
import numpy
import struct
import threading
from . import tunePDNR_covMat_v3
from .touchcomm_manager import TouchcommManager
debug = False
def log(message):
if debug:
print(message)
else:
pass
def convert_chunk(i):
return struct.unpack('<f', bytearray(i))[0]
def convert_to_float(i, n):
for x in range(0, len(i), n):
chunk = i[x:n+x]
if len(chunk) < n:
break
yield convert_chunk(chunk)
class GearSelectionManager(object):
_instance = None
_lock = threading.Lock()
_initialized = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = super(GearSelectionManager, cls).__new__(cls)
return cls._instance
def __init__(self):
log("GearSelectionManager")
if GearSelectionManager._initialized:
return
self._thread = None
self._stop_event = False
self._tc = None
self._tcm = None
self._total = 0
self._progress = 0
self._sweep = ""
self._pdnr_index = 0
self._pdnr_tuning = []
self._noise_output = [[], [], []]
GearSelectionManager._initialized = True
def _connect_tc(self):
self._tcm = TouchcommManager()
self._tc = self._tcm.getInstance()
def _disconnect_tc(self):
if self._tcm is not None:
self._tcm.disconnect()
self._tcm = None
def _set_static_config(self, static):
self._tc.sendCommand(56)
self._tc.getResponse()
arg = self._tc.decoder.encodeStaticConfig(static)
self._tc.sendCommand(57, arg)
self._tc.getResponse()
self._tc.sendCommand(55)
self._tc.getResponse()
time.sleep(0.1)
def _set_dynamic_config(self, dynamic):
self._tc.setDynamicConfig(dynamic)
def _set_pdnr(self, static, basisAmpStdevTransRx, basisVectorsTransRx, basisAmpStdevAbsRx, basisVectorsAbsRx, basisAmpStdevAbsTx, basisVectorsAbsTx):
static['ifpConfig.pdnrConfigs[0].basisAmpStdevTransRx'] = basisAmpStdevTransRx
static['ifpConfig.pdnrConfigs[0].basisVectorsTransRx'] = basisVectorsTransRx
static['ifpConfig.pdnrConfigs[0].basisAmpStdevAbsRx'] = basisAmpStdevAbsRx
static['ifpConfig.pdnrConfigs[0].basisVectorsAbsRx'] = basisVectorsAbsRx
static['ifpConfig.pdnrConfigs[0].basisAmpStdevAbsTx'] = basisAmpStdevAbsTx
static['ifpConfig.pdnrConfigs[0].basisVectorsAbsTx'] = basisVectorsAbsTx
def _set_pdnr_to_zeros(self, static):
self._set_pdnr(static,
[0] * len(static['ifpConfig.pdnrConfigs[0].basisAmpStdevTransRx']),
[0] * len(static['ifpConfig.pdnrConfigs[0].basisVectorsTransRx']),
[0] * len(static['ifpConfig.pdnrConfigs[0].basisAmpStdevAbsRx']),
[0] * len(static['ifpConfig.pdnrConfigs[0].basisVectorsAbsRx']),
[0] * len(static['ifpConfig.pdnrConfigs[0].basisAmpStdevAbsTx']),
[0] * len(static['ifpConfig.pdnrConfigs[0].basisVectorsAbsTx']))
def _set_trans_sensing_freqs(self, static, integDur, rstretchDur):
static['integDur'][2] = integDur
static['daqParams.freqTable[2].rstretchDur'] = rstretchDur
def _set_absTx_sensing_freqs(self, static, integDur, rstretchDur):
static['integDur'][4] = integDur
static['daqParams.freqTable[4].rstretchDur'] = rstretchDur
def _set_absRx_sensing_freqs(self, static, integDur, rstretchDur):
static['integDur'][3] = integDur
static['daqParams.freqTable[3].rstretchDur'] = rstretchDur
def set_trans_gears(self, gears, num_gears, commit):
if not gears:
return
try:
self._connect_tc()
integDur = gears[0]
rstretchDur = [0] * num_gears
for idx in range(1, len(gears)):
rstretchDur[idx] = gears[idx] - integDur
self._tc.reset()
self._tc.getAppInfo()
static = self._tc.getStaticConfig()
self._set_trans_sensing_freqs(static, integDur, rstretchDur)
self._set_static_config(static)
if commit:
self._tc.commitConfig()
except Exception as e:
print("GearSelectionManager Exception (set_trans_gears): {}".format(e))
self._disconnect_tc()
def set_abs_gears(self, gears, num_gears, commit):
if not gears:
return
try:
self._connect_tc()
integDur = gears[0]
rstretchDur = [0] * num_gears
for idx in range(1, len(gears)):
rstretchDur[idx] = gears[idx] - integDur
self._tc.reset()
self._tc.getAppInfo()
static = self._tc.getStaticConfig()
self._set_absTx_sensing_freqs(static, integDur, rstretchDur)
self._set_absRx_sensing_freqs(static, integDur, rstretchDur)
self._set_static_config(static)
if commit:
self._tc.commitConfig()
except Exception as e:
print("GearSelectionManager Exception (set_abs_gears): {}".format(e))
self._disconnect_tc()
def clear_pdnr_tuning(self):
self._pdnr_tuning = []
self._pdnr_index = 0
def pre_pdnr_sweep(self, int_durs, num_gears, baseline_frames, gram_data_frames):
log("int_durs = {}, num_gears = {}, baseline_frames = {}, gram_data_frames = {}".format(int_durs, num_gears, baseline_frames, gram_data_frames))
try:
time.sleep(0.1)
self._connect_tc()
self._total = len(int_durs) * 3
self._progress = 0
self._sweep = "started"
self._tc.reset()
self._tc.getAppInfo()
static = self._tc.getStaticConfig()
self._set_pdnr_to_zeros(static)
static['adnsEnabled'] = 1
self._set_static_config(static)
dynamic = self._tc.getDynamicConfig()
dynamic['disableNoiseMitigation'] = 1
dynamic['inhibitFrequencyShift'] = 1
dynamic['requestedFrequency'] = 0
dynamic['requestedFrequencyAbs'] = 0
self._set_dynamic_config(dynamic)
rx_count = static['rxCount']
tx_count = static['txCount']
self._pdnr_tuning.append([])
covmat_cmd_arg = [baseline_frames & 0xff, (baseline_frames >> 8) & 0xff, gram_data_frames & 0xff, (gram_data_frames >> 8) & 0xff]
for int_dur in int_durs:
if self._stop_event:
self._tc.reset()
self._sweep = "stopped"
return
raw_reports = []
float_reports = []
self._set_trans_sensing_freqs(static, int_dur, [0]*num_gears)
self._set_absTx_sensing_freqs(static, int_dur, [0]*num_gears)
self._set_absRx_sensing_freqs(static, int_dur, [0]*num_gears)
self._set_static_config(static)
self._tc.sendCommand(0xC3, covmat_cmd_arg)
self._tc.getResponse()
log('Received response to COMM_CMD_GET_PDNR_COVMAT')
while True:
if self._stop_event:
self._tc.reset()
self._sweep = "stopped"
return
report = self._tc.getReport(10)
log(report)
raw_reports.append(report)
self._progress += 1
if len(raw_reports) >= 3:
break
log('Received %d reports\n' % (len(raw_reports)))
for report in raw_reports:
converted = list(convert_to_float(report[1][8:], 4))
#log(report)
log('Report index %d' % (report[1][0]))
log('%d data entries' % (len(converted)))
log(converted)
log('\n')
float_reports.append(converted)
float_reports[0] = numpy.array(float_reports[0][0:rx_count*rx_count]).reshape(-1, rx_count)
float_reports[1] = numpy.array(float_reports[1][0:rx_count*rx_count]).reshape(-1, rx_count)
float_reports[2] = numpy.array(float_reports[2][0:20*20]).reshape(-1, 20)
config = {
'updatePdnrConfigData': False,
'imageRxes': static['imageRxes'],
'adnsEnabled': static['adnsEnabled'],
'ifpConfig.pdnrConfigs[0].basisAmpStdevAbsRx': static['ifpConfig.pdnrConfigs[0].basisAmpStdevAbsRx']
}
pdnr = tunePDNR_covMat_v3.pdnrTuningFromCovMats(config, 1, gram_data_frames, tx_count, float_reports[0], float_reports[1], float_reports[2])
pdnr['basisAmpStdevTransRx'] = [float(s) for s in pdnr['basisAmpStdevTransRx'].split(',')]
pdnr['basisVectorsTransRx'] = [int(s) for s in pdnr['basisVectorsTransRx'].split(',')]
pdnr['basisAmpStdevAbsRx'] = [float(s) for s in pdnr['basisAmpStdevAbsRx'].split(',')]
pdnr['basisVectorsAbsRx'] = [int(s) for s in pdnr['basisVectorsAbsRx'].split(',')]
pdnr['basisAmpStdevAbsTx'] = [float(s) for s in pdnr['basisAmpStdevAbsTx'].split(',')]
pdnr['basisVectorsAbsTx'] = [int(s) for s in pdnr['basisVectorsAbsTx'].split(',')]
self._pdnr_tuning[-1].append(pdnr)
self._sweep = "completed"
log(self._pdnr_tuning)
except Exception as e:
print("GearSelectionManager Exception (pre_pdnr_sweep): {}".format(e))
self._sweep = "stopped"
self._disconnect_tc()
def pdnr_sweep(self, int_durs, num_gears, baseline_frames, gram_data_frames):
log("int_durs = {}, num_gears = {}, baseline_frames = {}, gram_data_frames = {}".format(int_durs, num_gears, baseline_frames, gram_data_frames))
try:
time.sleep(0.1)
self._connect_tc()
self._noise_output = [[], [], []]
self._total = len(int_durs) * 3
self._progress = 0
self._sweep = "started"
self._tc.reset()
self._tc.getAppInfo()
static = self._tc.getStaticConfig()
dynamic = self._tc.getDynamicConfig()
dynamic['disableNoiseMitigation'] = 0
dynamic['inhibitFrequencyShift'] = 1
dynamic['requestedFrequency'] = 0
dynamic['requestedFrequencyAbs'] = 0
self._set_dynamic_config(dynamic)
covmat_cmd_arg = [baseline_frames & 0xff, (baseline_frames >> 8) & 0xff, gram_data_frames & 0xff, (gram_data_frames >> 8) & 0xff]
for idx, int_dur in enumerate(int_durs):
if self._stop_event:
self._tc.reset()
self._sweep = "stopped"
return
raw_reports = []
self._set_pdnr(static,
self._pdnr_tuning[self._pdnr_index][idx]['basisAmpStdevTransRx'],
self._pdnr_tuning[self._pdnr_index][idx]['basisVectorsTransRx'],
self._pdnr_tuning[self._pdnr_index][idx]['basisAmpStdevAbsRx'],
self._pdnr_tuning[self._pdnr_index][idx]['basisVectorsAbsRx'],
self._pdnr_tuning[self._pdnr_index][idx]['basisAmpStdevAbsTx'],
self._pdnr_tuning[self._pdnr_index][idx]['basisVectorsAbsTx'])
self._set_trans_sensing_freqs(static, int_dur, [0]*num_gears)
self._set_absTx_sensing_freqs(static, int_dur, [0]*num_gears)
self._set_absRx_sensing_freqs(static, int_dur, [0]*num_gears)
static['forceFreshReport'] = 1
self._set_static_config(static)
self._tc.sendCommand(0xC3, covmat_cmd_arg)
self._tc.getResponse()
log('Received response to COMM_CMD_GET_PDNR_COVMAT')
while True:
if self._stop_event:
self._tc.reset()
self._sweep = "stopped"
return
report = self._tc.getReport(10)
raw_reports.append(report)
self._progress += 1
if len(raw_reports) >= 3:
break
log('Received %d reports\n' % (len(raw_reports)))
self._noise_output[0].append(next(convert_to_float(raw_reports[0][1][4:8], 4)))
self._noise_output[1].append(next(convert_to_float(raw_reports[1][1][4:8], 4)))
self._noise_output[2].append(next(convert_to_float(raw_reports[2][1][4:8], 4)))
self._pdnr_index += 1
time.sleep(0.1)
self._sweep = "completed"
log(self._noise_output)
except Exception as e:
print("GearSelectionManager Exception (pdnr_sweep): {}".format(e))
self._sweep = "stopped"
self._disconnect_tc()
def get_noise_output(self):
return self._noise_output
def get_progress(self):
return self._total, self._progress, self._sweep
def reset_progress(self):
self._noise_output = [[], [], []]
self._total = 0
self._progress = 0
self._sweep = ""
def join(self):
if self._thread is not None:
self._thread.join()
self._thread = None
def stop(self, disconnect=False):
self._stop_event = True
self.join()
if disconnect:
self._disconnect_tc()
def function(self, fn, args=None):
log(fn)
data = {}
try:
if args is None:
data = getattr(self, fn)()
else:
if "sweep" in fn:
self._thread = threading.Thread(target=getattr(self, fn), args=args)
self._stop_event = False
self._thread.start()
else:
data = getattr(self, fn)(*args)
except Exception as e:
print("GearSelectionManager Exception ({}): {}".format(fn, e))
raise e
return data
|
{"hexsha": "d6fafe56f059cf9fe177c57b57664860c14ca25c", "size": 14432, "ext": "py", "lang": "Python", "max_stars_repo_path": "webds_api/gear_selection_manager.py", "max_stars_repo_name": "qmao/webds_api", "max_stars_repo_head_hexsha": "ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "webds_api/gear_selection_manager.py", "max_issues_repo_name": "qmao/webds_api", "max_issues_repo_head_hexsha": "ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "webds_api/gear_selection_manager.py", "max_forks_repo_name": "qmao/webds_api", "max_forks_repo_head_hexsha": "ca5bf9ad3c1304be223b7e47c57ee10fb40d92e1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0805970149, "max_line_length": 156, "alphanum_fraction": 0.5722699557, "include": true, "reason": "import numpy", "num_tokens": 3529}
|
[STATEMENT]
lemma path_append_target:
"target q (p1@p2) = target (target q p1) p2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. target q (p1 @ p2) = target (target q p1) p2
[PROOF STEP]
by (induction p1) (simp+)
|
{"llama_tokens": 97, "file": "FSM_Tests_FSM", "length": 1}
|
#!/usr/bin/env python3
import os
import sys
from scipy.spatial.transform import Rotation as R
import numpy as np
import torch as tr
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Line3DCollection
# import kornia.geometry
from raycast import ray_triangle_intersection
import kornia.geometry
import pyqtgraph.opengl as gl
import time
from collections import namedtuple
try:
# sys.path.append(os.path.expanduser(
# '~/Repos/Experiments/PhoneBot/control/'))
from phonebot.core.vis.viewer.proxy_commands import AddPlotCommand, AddLinesCommand
from phonebot.core.vis.viewer.proxy_command import ProxyCommand
from phonebot.core.vis.viewer import ProxyViewer
except ImportError:
print('sad')
class AddGridCommand(ProxyCommand):
def __init__(self, name='grid', size=(100, 100, 1), spacing=(1, 1, 1)):
self.size_ = size
self.spacing_ = spacing
super().__init__(name)
def __call__(self, viewer: ProxyViewer):
item = gl.GLGridItem()
item.setSize(*self.size_)
item.setSpacing(*self.spacing_)
viewer.items_[self.name] = item
viewer.widget_.addItem(item)
class AddPointsCommand(ProxyCommand):
def __init__(self, name='points'):
super().__init__(name)
def __call__(self, viewer: ProxyViewer):
item = gl.GLScatterPlotItem()
item.pos = np.empty((0, 3)) # prevent abort due to pyqtgraph bug
viewer.items_[self.name] = item
viewer.handlers_[self.name] = item.setData
viewer.widget_.addItem(item)
def cube_signs():
return np.asarray([(1, -1, 1),
(1, -1, -1),
(1, 1, -1),
(1, 1, 1),
(-1, -1, 1),
(-1, -1, -1),
(-1, 1, -1),
(-1, 1, 1)], dtype=np.int32)
def cube_indices():
return np.asarray([(4, 0, 3),
(4, 3, 7),
(0, 1, 2),
(0, 2, 3),
(1, 5, 6),
(1, 6, 2),
(5, 4, 7),
(5, 7, 6),
(7, 3, 2),
(7, 2, 6),
(0, 5, 1),
(0, 4, 5)], dtype=np.int32)
class Config(object):
def __init__(self):
# Ray-tracing configuration.
self.fov = (0.5*np.deg2rad(45), 2*np.pi) # Field of view
self.res = (32, 1024) # Resolution
self.fps = 10.0 # 10Hz
self.win = 0.1 # Aggregation Window, set to 1 rev
# Scene generation configuration.
self.xlim = [-60.0, 60.0]
self.ylim = [-60.0, 60.0]
self.zlim = [0.0, 3.0] # TODO(yycho0108): Validate +z=up
self.lim = np.asarray(
[self.xlim, self.ylim, self.zlim]).astype(np.float32) # 3x2
self.min_num_objects = 1
self.max_num_objects = 64
self.max_gen_iterations = 128
self.dim_lim = np.asarray([
[0.5, 3.0],
[1.5, 4.5],
[1.0, 2.0]], dtype=np.float32)
self.vmax = 18.0 # m/s
self.wmax = 2.0 # rad/s
self.ray_z = 2.0
self.max_ray_distance = 100.0
# Compute configuration.
self.use_gpu = True
self.finalize() # Compute cache for derived parameters.
self.convert() # Convert quantities to values.
def finalize(self):
v_fov, h_fov = self.fov
v_res, h_res = self.res
# Create uniformly spaced bins.
self.v_ang = np.linspace(-v_fov/2, v_fov/2, v_res)
self.h_ang = np.linspace(-h_fov/2, h_fov/2, h_res)
# Bins -> Grid
self.grid = np.stack(np.meshgrid(
self.v_ang, self.h_ang, indexing='ij'), axis=-1)
self.v_grid, self.h_grid = [self.grid[..., i] for i in range(2)]
# Grid -> Rays
v_cos, v_sin = np.cos(self.v_grid), np.sin(self.v_grid)
h_cos, h_sin = np.cos(self.h_grid), np.sin(self.h_grid)
self.rays = np.stack(
[h_cos * v_cos, h_sin * v_cos, v_sin], axis=-1).astype(np.float32)
# Build Approximate timestamp offsets (mostly experimental)
stamps = (1.0 / self.fps) * (self.h_grid / (2*np.pi))
stamps -= stamps.min() # start from offset=0, somewhat arbitrarily.
self.stamps = stamps.astype(np.float32)
self.use_gpu = self.use_gpu and tr.cuda.is_available()
def convert(self):
device = tr.device('cuda:0' if self.use_gpu else 'cpu')
self.device = device
pos_lim = np.float32([self.xlim, self.ylim, [-np.pi, np.pi]])
vel_lim = np.float32([[0.0, self.vmax], [0.0, self.wmax]])
d = dict(
dim_lim=tr.from_numpy(self.dim_lim).to(device),
pos_lim=tr.from_numpy(pos_lim).to(device),
vel_lim=tr.from_numpy(vel_lim).to(device),
rays=tr.from_numpy(self.rays).to(device),
stamps=tr.from_numpy(self.stamps).to(device),
cube_signs=tr.from_numpy(cube_signs()).to(device),
cube_indices=tr.from_numpy(cube_indices()).long().to(device),
)
self.values = namedtuple('Values', sorted(d))(**d)
def get_bounding_box(dim, pose):
""" NOTE(yycho0108): 2D top-down bbox! """
ccw_signs = tr.tensor([(1, 1), (1, -1), (-1, -1), (-1, 1)]) # (4,2)
corners = 0.5 * dim[..., None, :2] * ccw_signs # (...,4,2)
c, s = tr.cos(pose[..., -1]), tr.sin(pose[..., -1])
rmat = tr.stack([c, -s, s, c], dim=-1).reshape(pose.shape[:-1] + (2, 2))
corners = tr.einsum('...ac,...bc->...ba', rmat, corners)
corners += pose[..., None, :2]
return corners
def bbox_intersects(lhs, rhs):
# assert(lhs.shape == rhs.shape)
for polygon in [lhs, rhs]:
# for each polygon, look at each edge of the polygon, and determine if it separates
# the two shapes
for i1 in range(len(polygon)):
# grab 2 vertices to create an edge
i2 = (i1 + 1) % len(polygon)
p1 = polygon[i1]
p2 = polygon[i2]
# find the line perpendicular to this edge
nx, ny = p2[1] - p1[1], p1[0] - p2[0]
minA, maxA = None, None
# for each vertex in the first shape, project it onto the line perpendicular to the edge
# and keep track of the min and max of these values
for j in range(len(lhs)):
projected = nx * lhs[j][0] + ny * lhs[j][1]
if (minA is None) or (projected < minA):
minA = projected
if (maxA is None) or (projected > maxA):
maxA = projected
# for each vertex in the second shape, project it onto the line perpendicular to the edge
# and keep track of the min and max of these values
minB, maxB = None, None
for j in range(len(rhs)):
projected = nx * rhs[j][0] + ny * rhs[j][1]
if (minB is None) or (projected < minB):
minB = projected
if (maxB is None) or (projected > maxB):
maxB = projected
# if there is no overlap between the projects, the edge we are looking at separates the two
# polygons, and we know there is no overlap
if (maxA < minB) or (maxB < minA):
return False
return True
def create_config() -> Config:
return Config()
def create_vehicle(num=(), config: Config = Config()):
# Create distributions.
dim_dist = tr.distributions.Uniform(
config.values.dim_lim[:, 0],
config.values.dim_lim[:, 1]
)
pos_dist = tr.distributions.Uniform(
config.values.pos_lim[:, 0],
config.values.pos_lim[:, 1]
)
vel_dist = tr.distributions.Uniform(
config.values.vel_lim[:, 0],
config.values.vel_lim[:, 1]
)
# Generate vehicle from configured distributions.
dim = dim_dist.rsample(num)
pos = pos_dist.rsample(num)
vel = vel_dist.rsample(num)
v = vel[..., 0]
w = vel[..., 1]
h = pos[..., -1]
c, s = tr.cos(h), tr.sin(h)
vx, vy = v*c, v*s
vel = tr.stack([vx, vy, w], dim=-1)
return (dim, pos), vel
def create_scene(config: Config):
# Determine number of objects to generate.
num_objects = np.random.randint(
config.min_num_objects, config.max_num_objects+1)
(dim, pos), vel = create_vehicle((num_objects,), config)
# Also create ground
gdim = config.lim[..., 1] - config.lim[..., 0]
gdim[0] += 2.0 * config.max_ray_distance
gdim[1] += 2.0 * config.max_ray_distance
gdim[2] = 0.01
gpos = (0.0, 0.0, 0.0)
gvel = (0.0, 0.0, 0.0)
# Append ground.
dim = tr.cat((dim, tr.tensor(gdim).to(config.device).view(1, 3)), 0)
pos = tr.cat((pos, tr.tensor(gpos).to(config.device).view(1, 3)), 0)
vel = tr.cat((vel, tr.tensor(gvel).to(config.device).view(1, 3)), 0)
return [dim, pos], vel
# NOTE(ycho-or): see below for no-collision scene generation.
# objects = []
# bboxes = []
# for i in range(num_objects):
# for _ in range(config.max_gen_iterations):
# # Generate random vehicle.
# vehicle, velocity = create_vehicle((), config)
# dim, pose = vehicle
# bbox = get_bounding_box(dim, pose)
# # Check collision with existing objects.
# for prev_bbox in bboxes:
# if bbox_intersects(prev_bbox, bbox):
# continue
# # Append object to result.
# objects.append((vehicle, velocity))
# bboxes.append(bbox)
# break
return objects
def random_rotation_matrix(size: tr.Size):
# Unit vector.
theta = tr.acos(2.0 * tr.rand(size) - 1.0)
phi = (2 * np.pi) * tr.rand(size)
ct, st = tr.cos(theta), tr.sin(theta)
cp, sp = tr.cos(phi), tr.sin(phi)
rvec = tr.stack([cp*ct, cp*st, sp], axis=-1)
# Angle mag.
angle = (2 * np.pi) * tr.rand(size)
rvec *= angle[..., None]
rmat = kornia.angle_axis_to_rotation_matrix(rvec.view(-1, 3))
return rmat.view(size + (3, 3))
def create_rays(pose: tr.Tensor, velocity: tr.Tensor, stamp: tr.Tensor, config: Config):
# Currently pose/velocity are both assumed 2D.
# NOTE(yycho0108): Currently ray transform is coincident to vehicle transform.
# Consider applying offsets here instead ?
ray_pose = pose[None, None, :] + \
velocity[None, None, :] * \
config.values.stamps[..., None]
# Convert 2D (x,y) -> 3D (x,y,z) Ray origin
ray_z = tr.full_like(ray_pose[..., 2:], config.ray_z)
ray_origin = tr.cat((ray_pose[..., :2], ray_z), -1)
# ray_origin = np.insert(ray_pose[..., :2], 2, config.ray_z, axis=-1)
# Rotate vector according to pose
#rvec = np.zeros_like(ray_pose)
#rvec[..., :2] = 0
#r = R.from_rotvec(rvec.reshape(-1, 3))
#ray_direction = r.apply(config.rays.reshape(-1, 3))
rvec = tr.zeros_like(ray_pose)
rvec[..., 2] = ray_pose[..., 2]
rmat = kornia.angle_axis_to_rotation_matrix(rvec.view(-1, 3)).view(
ray_pose.shape[:-1] + (3, 3)).float()
ray_dirs = tr.einsum('...ab,...b->...a', rmat, config.values.rays)
ray_dirs = ray_dirs.view(config.rays.shape)
# Format output and return.
rays = (ray_origin, ray_dirs)
return (rays, config.values.stamps + stamp)
def get_axes(h):
shape = h.shape + (3, 3)
axes = np.zeros(shape)
c, s = np.cos(h), np.sin(h)
axes[..., 0, 0] = c
axes[..., 0, 1] = -s
axes[..., 1, 0] = s
axes[..., 1, 1] = c
axes[..., 2, 2] = 1
return axes
def get_vertices(dim: tr.Tensor, poses: tr.Tensor, config: Config):
# Create canonial bounding box from dimensions.
bbox = 0.5 * dim[:, None] * config.values.cube_signs[None, :]
# bbox -> O83
# Extract transform from pose.
poses = poses.view((-1,) + poses.shape[-2:]) # -> (R, O, 3)
rvec = tr.tensor([0, 0, 1]).to(config.device).view(1, 1, 3) * poses
rmat = kornia.angle_axis_to_rotation_matrix(
rvec.view(-1, 3)).view(poses.shape[:-1] + (3, 3))
# rmat -> (RO33)
# Apply transform.
bbox = tr.einsum('abde,bce->abcd', rmat.float(), bbox) # RO83
bbox[..., :2] += poses[:, :, None, :2]
bbox[..., 2] += 0.5*dim[None, :, None, 2] # lift bbox up (s.t. zmin=0)
# Extract triangles from cube.
vertices = bbox[:, :, config.values.cube_indices] # (R,O,12,3,3)
return vertices
def get_triangles(stamps, scene, config: Config):
# Extract scene.
(dim, pos0), vel = scene
# Apply motion to initial pose, based on velocity.
pos = pos0.view(1, -1, 3) + vel.view(1, -1, 3) * stamps.view(-1, 1, 1)
pos = pos.view(stamps.shape + pos0.shape) # => (R,O,3)
vertices = get_vertices(dim, pos, config)
# Format result and return.
return vertices.reshape(-1, 3, 3)
def raytrace(rays, stamps, scene, config):
ray_origin, ray_vector = rays
triangles = get_triangles(stamps, scene, config)
triangles = triangles.reshape(ray_origin.shape[:-1] + (-1, 3, 3))
hits, dists = ray_triangle_intersection(
ray_origin.float(),
ray_vector.float(),
triangles.float(), broadcast_triangle=False,
max_distance=config.max_ray_distance
)
return hits, dists
def apply_pose(cloud, stamp, stamps, pose, velocity, config: Config):
# Extract transforms.
dt = stamps - stamp
pose_at_stamp = pose[None, None, :] + \
dt[..., None] * velocity[None, None, :]
# Rotation about z axis
rvec = tr.zeros_like(pose_at_stamp)
rvec[..., 2] = pose_at_stamp[..., 2]
rmat = kornia.angle_axis_to_rotation_matrix(
rvec.view(-1, 3)).view(rvec.shape[:-1]+(3, 3)).float()
# Apply transforms.
cloud = tr.einsum('...ab,...b->...a', rmat, cloud)
cloud[..., :2] += pose[..., :2]
cloud[..., 2] += config.ray_z
return cloud
def main():
# Generate scene from config.
config = create_config()
scene = create_scene(config)
(dim, pose), velocity = create_vehicle((), config)
# Initialize Viewer.
data_queue, event_queue, command_queue = ProxyViewer.create()
command_queue.put(AddGridCommand(
name='grid', size=(400, 400, 1), spacing=(10, 10, 10)))
command_queue.put(AddPointsCommand(name='cloud'))
command_queue.put(AddLinesCommand(name='obstacles'))
command_queue.put(AddPointsCommand(name='self'))
while True:
if not event_queue.empty():
key = event_queue.get_nowait()
if(key == ord('Q')):
break
rays, stamps = create_rays(pose, velocity, 0.0, config)
hits, dists = raytrace(rays, stamps, scene, config)
range_image = tr.where(hits, dists, tr.full_like(dists, float('inf')))
range_image = tr.min(range_image, dim=-1).values
cloud = config.values.rays * range_image[..., None]
cloud = apply_pose(cloud, 0.0, stamps, pose, velocity, config)
cloud = cloud.cpu().numpy().reshape(-1, 3)
obstacle_vertices = get_vertices(*scene[0], config).reshape(-1, 3, 3)
obstacle_vertices = obstacle_vertices.cpu().numpy()
lines = obstacle_vertices[:, [
(0, 1), (1, 2), (2, 0)], :].reshape(-1, 2, 3)
vpos = pose[..., :2].cpu().numpy()
vpos = np.asarray([[vpos[0], vpos[1], config.ray_z]])
# Send data for visualization.
data_queue.put(dict(
cloud=dict(pos=cloud.reshape(-1, 3)),
obstacles=dict(pos=lines),
self=dict(pos=vpos, color=(1, 0, 0, 1), size=10.0)
))
# Apply velocity
pose += velocity * config.win
scene[0][1] += scene[1] * config.win
# time.sleep(0.001)
if __name__ == '__main__':
main()
|
{"hexsha": "b18097ac7d73674bf13f62bb9e6b10a6fa1f2a54", "size": 15905, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "yycho0108/PyTorchRayTracer", "max_stars_repo_head_hexsha": "51ca7f952b8774b13526e1b2ac8df2536c83e822", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "yycho0108/PyTorchRayTracer", "max_issues_repo_head_hexsha": "51ca7f952b8774b13526e1b2ac8df2536c83e822", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "yycho0108/PyTorchRayTracer", "max_forks_repo_head_hexsha": "51ca7f952b8774b13526e1b2ac8df2536c83e822", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2780172414, "max_line_length": 103, "alphanum_fraction": 0.5676202452, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4541}
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import numpy as np
from scipy.io.wavfile import read, write
from scipy import signal
import math
import torch
import os
def cosine_decay(init_val, final_val, step, decay_steps):
alpha = final_val / init_val
cosine_decay = 0.5 * (1 + math.cos(math.pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return init_val * decayed
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.IntTensor(max_len))
mask = ids < lengths.unsqueeze(1)
return mask
def preemphasize(wav, k=0.97):
return signal.lfilter([1, -k], [1], wav)
def de_emphasize(wav, k=0.97):
return signal.lfilter([1], [1, -k], wav)
def load_wav_to_torch(path, max_value=32768):
wav = np.load(path)
wav = preemphasize(wav)
return torch.FloatTensor(wav.astype(np.float32))
def dc_notch_filter(wav):
# code from speex
notch_radius = 0.982
den = notch_radius ** 2 + 0.7 * (1 - notch_radius) ** 2
b = np.array([1, -2, 1]) * notch_radius
a = np.array([1, -2 * notch_radius, den])
return signal.lfilter(b, a, wav)
def save_wav(wav, path, sr=22050):
wav = dc_notch_filter(wav)
f1 = 0.8 * 32768 / max(0.01, np.max(np.abs(wav)))
f2 = np.sign(wav) * np.power(np.abs(wav), 0.95)
wav = f1 * f2
write(path, sr, wav.astype(np.int16))
def load_metadata(dirname, filename='train.txt', split="|"):
with open(os.path.join(dirname, filename)) as f:
def split_line(line):
parts = line.strip().split(split)
wav_path = os.path.join(dirname, 'audio', parts[0])
text = parts[-1]
return wav_path, text
return [split_line(line) for line in f.readlines()]
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
|
{"hexsha": "2f59fee0e4166b5be21f9244a7f13851218ea072", "size": 3638, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/utils.py", "max_stars_repo_name": "leijue222/tacotron2", "max_stars_repo_head_hexsha": "5950728a91e7a9355f42f658e00db2a2aef94247", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 93, "max_stars_repo_stars_event_min_datetime": "2018-08-24T07:49:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:21:53.000Z", "max_issues_repo_path": "common/utils.py", "max_issues_repo_name": "leijue222/tacotron2", "max_issues_repo_head_hexsha": "5950728a91e7a9355f42f658e00db2a2aef94247", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2019-10-16T10:38:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-22T11:44:19.000Z", "max_forks_repo_path": "common/utils.py", "max_forks_repo_name": "leijue222/tacotron2", "max_forks_repo_head_hexsha": "5950728a91e7a9355f42f658e00db2a2aef94247", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2019-10-16T11:44:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T09:38:35.000Z", "avg_line_length": 37.5051546392, "max_line_length": 82, "alphanum_fraction": 0.6643760308, "include": true, "reason": "import numpy,from scipy", "num_tokens": 889}
|
import numpy as np
from dateutil.parser import parse
from statistics import median
class GraphDatas:
def __init__(self):
self.names = list()
self.values = list()
self.files = list()
self.legends = list()
# Filtre
self.xf = None
self.yf = None
self.namesTMP = list()
self.valuesTMP = list()
def isOnlyNumbers(self, dataList):
"""
Method to check if a list is composed only of digits.
:param dataList: a list
:return: a boolean
Example(s):
>>> graph = GraphDatas()
>>> liste1 = ["a","b","c","d"]
>>> liste2 = ["a","b","3","4"]
>>> liste3 = ["1","2","3","4"]
>>> graph.isOnlyNumbers(liste1)
False
>>> graph.isOnlyNumbers(liste2)
False
>>> graph.isOnlyNumbers(liste3)
True
"""
for elem in dataList:
try:
float(elem)
except ValueError:
return False
return True
def isOnlyDate(self, dataList):
"""
Method to check if a list is composed only of dates
:param dataList: a list
:return: a boolean
Example(s):
>>> graph = GraphDatas()
>>> liste1 = ["a","b","c","d"]
>>> liste2 = ["2020-08-12","2020-08-12","2020-08-12","2020-08-12"]
>>> liste3 = ["1","2","3","2020-08-12"]
>>> graph.isOnlyDate(liste1)
False
>>> graph.isOnlyDate(liste2)
True
>>> graph.isOnlyDate(liste3)
True
"""
cpt = 0
for elem in dataList:
try:
parse(elem)
cpt+=1
except ValueError:
pass
return cpt == len(dataList)
def convert_list_of_dates(self, dataList):
"""
Method to convert a list composed only of dates into a list of date object.
:param dataList:
Example(s):
>>> graph = GraphDatas()
>>> liste1 = ["a","b","c","d"]
>>> newList = graph.convert_list_of_dates(liste1)
>>> newList
['a', 'b', 'c', 'd']
>>> liste2 = ["2020-08-12","2020-08-12","2020-08-12","2020-08-12"]
>>> newList = graph.convert_list_of_dates(liste2)
>>> newList
[datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0)]
"""
if self.isOnlyDate(dataList):
res = list()
for elem in dataList:
res += [parse(elem)]
return res
return dataList
def convert_list_of_num(self, dataList):
"""
Method to convert a list composed only of digits into a list of numbers.
Example(s):
>>> graph = GraphDatas()
>>> liste2 = ["a","b",3,4]
>>> newList = graph.convert_list_of_num(liste2)
>>> newList
['a', 'b', 3, 4]
>>> liste3 = ["1","2","3","4"]
>>> newList = graph.convert_list_of_num(liste3)
>>> newList
[1.0, 2.0, 3.0, 4.0]
"""
if self.isOnlyNumbers(dataList):
res = list()
for elem in dataList:
res += [float(elem)]
return res
return dataList
def convertList(self, dataList):
"""
Method to convert in the correct types the datas
:param dataList: a list
:return: a list
Example(s):
>>> graph = GraphDatas()
>>> liste1 = ["a","b","c","d"]
>>> newList = graph.convertList(liste1)
>>> newList
['a', 'b', 'c', 'd']
>>> liste2 = ["2020-08-12","2020-08-12","2020-08-12","2020-08-12"]
>>> newList = graph.convertList(liste2)
>>> newList
[datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0), datetime.datetime(2020, 8, 12, 0, 0)]
>>> liste3 = ["1","2","3","4"]
>>> newList = graph.convertList(liste3)
>>> newList
[1.0, 2.0, 3.0, 4.0]
"""
if self.isOnlyNumbers(dataList): # Obligatoirement en premier car un liste du num est detecté comme date !
return self.convert_list_of_num(dataList)
elif self.isOnlyDate(dataList):
return self.convert_list_of_dates(dataList)
else:
return dataList
def addNames(self, namesList):
"""
Method to add a list of datas to names list.
:param namesList : a list of datas.
Example(s):
>>> graph = GraphDatas()
>>> graph.names
[]
>>> graph.addNames(['A','B','C','D'])
>>> graph.names
[array(['A', 'B', 'C', 'D'], dtype='<U1')]
>>> graph.addNames(12)
Traceback (most recent call last):
...
AssertionError: namesList has to be a list
"""
assert (type(namesList) == list), "namesList has to be a list"
self.names += [np.asarray(self.convertList(namesList))]
def addValues(self, valuesList):
"""
Method to add datas to values list.
:param valuesList : a list of datas.
Example(s):
>>> graph = GraphDatas()
>>> graph.values
[]
>>> graph.addValues(["1","2","3","4"])
>>> graph.values
[array([1., 2., 3., 4.])]
>>> graph.addValues(12)
Traceback (most recent call last):
...
AssertionError: valuesList has to be a list
"""
assert (type(valuesList) == list), "valuesList has to be a list"
self.values += [np.asarray(self.convertList(valuesList))]
def addLegends(self, legend):
"""
Method to ... todo
:param legend: a string
Example(s):
>>> graph = GraphDatas()
>>> graph.legends
[]
>>> graph.addLegends(["test"])
>>> graph.legends
['test']
>>> graph.addLegends(1)
Traceback (most recent call last):
...
AssertionError: a legend has to be a list
"""
assert(type(legend) == list), "a legend has to be a list"
for elem in legend:
self.legends.append(elem)
def getNames(self):
"""
Method to return a list : names
:return: a list
Example(s):
>>> g = GraphDatas()
>>> g.getNames()
[]
>>> g.addNames(['11','22'])
>>> g.getNames()
[array([11., 22.])]
"""
if self.names == [] and self.values != []:
v = list()
maxinvalues = max([len(x) for x in self.values])
for i in range(0,len(self.values)):
v.append([i for i in range(0, maxinvalues)])
self.names = v
return v
else:
return self.names
def getValues(self):
"""
Method to return a list : values
:return: a list
Example(s):
>>> g = GraphDatas()
>>> g.getValues()
[]
>>> g.addValues(['1','2'])
>>> g.getValues()
[array([1., 2.])]
"""
return self.values
def getFiles(self):
"""
Method to return a list : files names
:return: a list
Example(s):
>>> g = GraphDatas()
>>> g.getFiles()
[]
>>> g.files.append('f1')
>>> g.files.append('f2')
>>> g.getFiles()
['f1', 'f2']
"""
return self.files
def getLegends(self):
"""
Method to return a list : legends
:return: a list
"""
return self.legends
def processingDatas(self, expression):
"""
Method to process datas. (x-1) in the expression if you want to change values by using the previous one.
:param expression: a string
Example(s):
>>> graph = GraphDatas()
>>> graph.values
[]
>>> graph.addValues(["1","2","3","4"])
>>> graph.values
[array([1., 2., 3., 4.])]
>>> graph.processingDatas("x+2")
[array([3., 4., 5., 6.])]
>>> graph.processingDatas("x*100")
[array([300., 400., 500., 600.])]
>>> graph.processingDatas("x+(2/20)")
[array([300.1, 400.1, 500.1, 600.1])]
>>> graph2 = GraphDatas()
>>> graph2.addValues(["1","2","3","4"])
>>> graph2.processingDatas("2*(x**2)")
[array([ 2., 8., 18., 32.])]
>>> graph3 = GraphDatas()
>>> graph3.addValues(["10","100","1000","10000"])
>>> graph3.processingDatas("x = x - (x-1)")
[array([ 10., 90., 900., 9000.])]
>>> graph3.processingDatas("x = x + (x-1) + 1")
[array([ 10., 101., 991., 9901.])]
"""
assert(type(expression) == str), "expression has to be a string"
for i in range(len(self.values)):
if "(x-1)" in expression:
self.previousValueExpression(expression,i)
else:
self.normalExpression(expression, i)
return self.values
def normalExpression(self,expression, i):
"""
Method used in processingData. normalExpression is here to simplify the code. Tested in processingData.
"""
value = "self.values[" + str(i) + "]"
exp = expression.replace("x", value)
code = "{} = {}".format(value, exp)
exec(code)
def previousValueExpression(self,expression, i):
"""
Method used in processingData. PreviousValueExpression is here to simplify the code. Tested in processingData.
Use (x-1) in the expression if you want to change values by using the previous one.
"""
value_cpy = [x for x in self.values[i]]
for j in range(1,len(self.values[i])):
tmp1 = "self.values[" + str(i) + "]["+str(j)+"]"
tmp2 = "value_cpy["+str(j-1) + "]"
code = expression.replace("(x-1)", tmp2)
code = code.replace("x", tmp1)
exec(code)
def setXf(self, xfilter):
"""
Method to set X filter
:param xfilter: a String
Example(s):
>>> g = GraphDatas()
>>> g.setXf("x < 10")
>>> g.xf
'x < 10'
"""
self.xf = xfilter
self.filtersCall()
def setYf(self, yfilter):
"""
Method to set Y filter
:param yfilter: a String
Example(s):
>>> g = GraphDatas()
>>> g.setYf("y != 12")
>>> g.yf
'y != 12'
"""
self.yf = yfilter
self.filtersCall()
def filtersCall(self):
"""
Method to call each type of filter (x and y)
"""
if self.xf is not None:
self.filterValueX(self.xf)
elif self.yf is not None:
self.filterValueY(self.yf)
def evalExpression(self, elem, exp):
"""
Method to evaluate an expression
:param elem: an element from a list
:param exp: and expression
:return: a boolean
Example(s):
>>> graph = GraphDatas()
>>> graph.evalExpression(3,"x >= 2 and x < 5 and x != 4")
True
>>> graph.evalExpression(3,"x >= 20 and x < 5 and x != 4")
False
>>> graph.evalExpression(3,"y >= 0 and y < 50 and y != 4")
True
"""
if type(elem) == np.str_:
tmp = exp.replace("x", "'"+elem+"'")
tmp = tmp.replace("y", "'"+elem+"'")
else:
tmp = exp.replace("x", str(elem))
tmp = tmp.replace("y", str(elem))
return eval(tmp)
def initTmpList(self):
"""
Method to init a list
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4'])
>>> g.addValues(['5','6','7','8'])
>>> g.addValues(['9','10','11','12'])
>>> g.names
[array([1., 2., 3., 4.])]
>>> g.values
[array([5., 6., 7., 8.]), array([ 9., 10., 11., 12.])]
>>> g.initTmpList()
>>> g.namesTMP
[[]]
>>> g.valuesTMP
[[], []]
"""
for names in self.names:
self.namesTMP.append([])
for values in self.values:
self.valuesTMP.append([])
def addNamesValuesIndex(self, index):
"""
Method to append names and values for elements at index given.
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4'])
>>> g.addValues(['5','6','7','8'])
>>> g.addValues(['9','10','11','12'])
>>> g.initTmpList()
>>> g.addNamesValuesIndex(2)
>>> g.namesTMP
[['3.0']]
>>> g.valuesTMP
[['7.0'], ['11.0']]
"""
for i in range(len(self.names)):
self.namesTMP[i].append(str(self.names[i][index]))
for j in range(len(self.values)):
self.valuesTMP[j].append(str(self.values[j][index]))
def replaceNamesAndValues(self):
"""
Method to clear names and values list, then add names and values from temporary list (filter)
"""
self.names = list()
self.values = list()
for e in self.namesTMP:
self.addNames(e)
for e2 in self.valuesTMP:
self.addValues(e2)
def filterValueX(self, expression):
"""
Method to apply a filter on x values.
:param expression: a String
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4'])
>>> g.addValues(['5','6','7','8'])
>>> g.addValues(['9','10','11','12'])
>>> g.values
[array([5., 6., 7., 8.]), array([ 9., 10., 11., 12.])]
>>> g.filterValueX("x != 3")
>>> g.values
[array([5., 6., 8.]), array([ 9., 10., 12.])]
>>> g.names
[array([1., 2., 4.])]
"""
try:
n = self.names[0]
self.initTmpList()
for i in range(len(n)):
if self.evalExpression(n[i], expression):
self.addNamesValuesIndex(i)
self.replaceNamesAndValues()
except IndexError:
pass
def filterValueY(self, expression):
"""
Method to apply a filter on y values.
:param expression: a String
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4'])
>>> g.addValues(['5','6','7','8'])
>>> g.addValues(['9','10','11','12'])
>>> g.values
[array([5., 6., 7., 8.]), array([ 9., 10., 11., 12.])]
>>> g.filterValueY("y < 7 or y > 10")
>>> g.values
[array([ 5., 6., nan, nan]), array([nan, nan, 11., 12.])]
"""
for sublist in range(len(self.values)):
for elem in range(len(self.values[sublist])):
if not self.evalExpression(self.values[sublist][elem], expression):
self.values[sublist][elem] = np.nan
def averageLst(self,lst):
"""
Method to calculate the average of a list. Round applied
Example(s):
>>> g = GraphDatas()
>>> lst = g.averageLst([1,2,3,3])
>>> lst
2.25
"""
return round(sum(lst) / len(lst), 2)
def replaceValues(self, lst):
"""
Method to replace values
Example(s):
>>> g = GraphDatas()
>>> g.addValues(["1", "2", "3"])
>>> g.values
[array([1., 2., 3.])]
>>> g.replaceValues([["4", "5", "6"]])
>>> g.values
[array([4., 5., 6.])]
"""
for i in range(0, len(self.values)):
for j in range(0, len(self.values[i])):
tmp = lst[i][j]
if tmp == 'nan':
self.values[i][j] = np.nan
else:
self.values[i][j] = tmp
def movingValues(self, type, n):
"""
Method to calculate moving values ( average , min, max, median).
This function is tested in 4 differents functions :
- movingAverage
- movingMinimum
- movingMaximum
- movingMedian
:param type: a String ( average, min, max, median)
:param n: an int, the number of values taken into account
"""
lst = list()
for sublist in range(0,len(self.values)):
sublist_tmp = list()
length = len(self.values[sublist])
if length > 2:
for i in range(0, length):
if (i + 1 - n) >= 0:
tmp = self.values[sublist][(i-n+1):i+1]
if type == "average":
sublist_tmp.append(self.averageLst(tmp))
elif type == "min":
sublist_tmp.append(min(tmp))
elif type == "max":
sublist_tmp.append(max(tmp))
elif type == "mediane":
sublist_tmp.append(median(tmp))
else:
sublist_tmp.append('nan')
lst.append(sublist_tmp)
self.replaceValues(lst)
def movingAverage(self, n):
"""
Method to calculate moving average.
:param n: an int, the number of values taken into account
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g.movingAverage(3)
>>> g.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, 0.43, 0.63, 0.67, 0.53, 0.2 , -0.07, -0.13, -0.07])]
>>> g2 = GraphDatas()
>>> g2.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g2.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g2.movingAverage(6)
>>> g2.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, nan, nan, nan, 0.48, 0.42, 0.3 , 0.2 , 0.07])]
"""
self.movingValues("average", n)
def movingMinimum(self, n):
"""
Method to calculate moving minimum.
:param n: an int, the number of values taken into account
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g.movingMinimum(3)
>>> g.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, 0.3, 0.4, 0.5, 0.2, -0.1, -0.3, -0.3, -0.3])]
>>> g2 = GraphDatas()
>>> g2.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g2.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g2.movingMinimum(6)
>>> g2.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, nan, nan, nan, 0.2, -0.1, -0.3, -0.3, -0.3])]
"""
self.movingValues("min", n)
def movingMaximum(self, n):
"""
Method to calculate moving maximum.
:param n: an int, the number of values taken into account
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g.movingMaximum(3)
>>> g.values #doctest: +NORMALIZE_WHITESPACE
[array([nan, nan, 0.6, 0.9, 0.9, 0.9, 0.5, 0.2, 0. , 0.1])]
>>> g2 = GraphDatas()
>>> g2.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g2.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g2.movingMaximum(6)
>>> g2.values #doctest: +NORMALIZE_WHITESPACE
[array([nan, nan, nan, nan, nan, 0.9, 0.9, 0.9, 0.9, 0.5])]
"""
self.movingValues("max", n)
def movingMedian(self, n):
"""
Method to calculate moving median.
:param n: an int, the number of values taken into account
Example(s):
>>> g = GraphDatas()
>>> g.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g.movingMedian(3)
>>> g.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, 0.4, 0.6, 0.6, 0.5, 0.2, -0.1, -0.1, 0. ])]
>>> g2 = GraphDatas()
>>> g2.addNames(['1','2','3','4','5','6','7','8','9','10'])
>>> g2.addValues(['0.3','0.40','0.60','0.90','0.50','0.20','-0.10','-0.30','0.0','0.10'])
>>> g2.movingMedian(6)
>>> g2.values #doctest: +NORMALIZE_WHITESPACE
[array([ nan, nan, nan, nan, nan, 0.45, 0.45, 0.35, 0.1 , 0.05])]
"""
self.movingValues("mediane", n)
|
{"hexsha": "d3ae9d343bfbd634f35203e19e1f8367ef588b8d", "size": 21060, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GraphDatas.py", "max_stars_repo_name": "AntoineMeresse/Terminal-chart", "max_stars_repo_head_hexsha": "eff66c32d78c394849176c7777bf7c203dbac5b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GraphDatas.py", "max_issues_repo_name": "AntoineMeresse/Terminal-chart", "max_issues_repo_head_hexsha": "eff66c32d78c394849176c7777bf7c203dbac5b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GraphDatas.py", "max_forks_repo_name": "AntoineMeresse/Terminal-chart", "max_forks_repo_head_hexsha": "eff66c32d78c394849176c7777bf7c203dbac5b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8723404255, "max_line_length": 160, "alphanum_fraction": 0.4700854701, "include": true, "reason": "import numpy", "num_tokens": 5824}
|
import numpy as np
a = np.ones([2, 3, 2], np.float32)
a = [[[0.1, 0.2],
[-0.3, 0.4],
[0.5, 0.6]],
[[0.7, 0.8],
[0.9, 1.0],
[1.1, 1.2]]]
b = np.ones([2], np.float32)
b = [0.2, 0.3]
print np.multiply(a, b)
|
{"hexsha": "d4cf693112d5fa2680cd7e92cedc1cc43c1d8fdc", "size": 236, "ext": "py", "lang": "Python", "max_stars_repo_path": "blaze/blaze/test/utest_data/operator/op/elementwise/elementwise_mul.py", "max_stars_repo_name": "Ru-Xiang/x-deeplearning", "max_stars_repo_head_hexsha": "04cc0497150920c64b06bb8c314ef89977a3427a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4071, "max_stars_repo_stars_event_min_datetime": "2018-12-13T04:17:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:29:35.000Z", "max_issues_repo_path": "blaze/blaze/test/utest_data/operator/op/elementwise/elementwise_mul.py", "max_issues_repo_name": "laozhuang727/x-deeplearning", "max_issues_repo_head_hexsha": "781545783a4e2bbbda48fc64318fb2c6d8bbb3cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 359, "max_issues_repo_issues_event_min_datetime": "2018-12-21T01:14:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T07:18:02.000Z", "max_forks_repo_path": "blaze/blaze/test/utest_data/operator/op/elementwise/elementwise_mul.py", "max_forks_repo_name": "laozhuang727/x-deeplearning", "max_forks_repo_head_hexsha": "781545783a4e2bbbda48fc64318fb2c6d8bbb3cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1054, "max_forks_repo_forks_event_min_datetime": "2018-12-20T09:57:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:16:53.000Z", "avg_line_length": 15.7333333333, "max_line_length": 34, "alphanum_fraction": 0.4152542373, "include": true, "reason": "import numpy", "num_tokens": 120}
|
# pylint: disable-msg=E1101,W0612
from __future__ import division
from datetime import datetime, timedelta, time
import nose
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta, TimedeltaIndex, isnull, notnull,
bdate_range, date_range, timedelta_range, Int64Index)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long, PY3_2
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
assert_index_equal,
ensure_clean)
from pandas.tseries.offsets import Day, Second, Hour
import pandas.util.testing as tm
from numpy.random import rand, randn
from pandas import _np_version_under1p8
iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_construction(self):
expected = np.timedelta64(10,'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10,unit='d').value, expected)
self.assertEqual(Timedelta(10.0,unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10,'s').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10,seconds=10).value, expected)
self.assertEqual(Timedelta(days=10,milliseconds=10*1000).value, expected)
self.assertEqual(Timedelta(days=10,microseconds=10*1000*1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days':'D', 'seconds':'s', 'microseconds':'us',
'milliseconds':'ms', 'minutes':'m', 'hours':'h', 'weeks':'W'}
npdtypes = [np.int64, np.int32, np.int16,
np.float64, np.float32, np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(**{pykwarg:npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'), Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'), Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'), timedelta(days=10,hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10,hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -timedelta(days=10,hours=1,minutes=1,seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=1,microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(days=10,hours=1,minutes=1,seconds=31,microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on the days)
self.assertRaises(ValueError, lambda : Timedelta('-10 days -1 h 1.5m 1s 3us'))
# roundtripping both for string and value
for v in ['1s',
'-1s',
'1us',
'-1us',
'1 day',
'-1 day',
'-23:59:59.999999',
'-1 days +23:59:59.999999',
'-1ns',
'1ns',
'-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value),td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)),td)
self.assertEqual(Timedelta(td._repr_base(format='all')),td)
# floats
expected = np.timedelta64(10,'s').astype('m8[ns]').view('i8') + np.timedelta64(500,'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5,unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value,iNaT)
self.assertEqual(Timedelta('nat').value,iNaT)
self.assertEqual(Timedelta('NAT').value,iNaT)
self.assertTrue(isnull(Timestamp('nat')))
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),Timedelta('0 days, 00:00:02'))
# invalid
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta",
lambda : Timedelta())
tm.assertRaisesRegexp(ValueError,
"cannot create timedelta string convert",
lambda : Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a TimeDelta from the passed arguments, allowed keywords are ",
lambda : Timedelta(day=10))
def test_repr(self):
self.assertEqual(repr(Timedelta(10,unit='d')),"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10,unit='s')),"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10,unit='ms')),"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10,unit='ms')),"Timedelta('-1 days +23:59:59.990000')")
def test_identity(self):
td = Timedelta(10,unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_conversion(self):
for td in [ Timedelta(10,unit='d'), Timedelta('1 days, 10:11:12.012345') ]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta)
and not isinstance(pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_ops(self):
td = Timedelta(10,unit='d')
self.assertEqual(-td,Timedelta(-10,unit='d'))
self.assertEqual(+td,Timedelta(10,unit='d'))
self.assertEqual(td - td, Timedelta(0,unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20,unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20,unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5,unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is pd.NaT)
# invert
self.assertEqual(-td,Timedelta('-10d'))
self.assertEqual(td * -1,Timedelta('-10d'))
self.assertEqual(-1 * td,Timedelta('-10d'))
self.assertEqual(abs(-td),Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda : Timedelta(11,unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda : td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda : td + 2)
self.assertRaises(TypeError, lambda : td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1,'D')
self.assertEqual(result, td.value/float(86400*1e9))
result = td / np.timedelta64(1,'s')
self.assertEqual(result, td.value/float(1e9))
result = td / np.timedelta64(1,'ns')
self.assertEqual(result, td.value)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other, np.array([1]))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td, np.array([1]))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_fields(self):
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days,1)
self.assertEqual(rng.seconds,10*3600+11*60+12)
self.assertEqual(rng.microseconds,0)
self.assertEqual(rng.nanoseconds,0)
self.assertRaises(AttributeError, lambda : rng.hours)
self.assertRaises(AttributeError, lambda : rng.minutes)
self.assertRaises(AttributeError, lambda : rng.milliseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td),Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td,Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value,49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value,-49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days,-1)
self.assertEqual(rng.seconds,10*3600+11*60+12)
self.assertEqual(rng.microseconds,100*1000+123)
self.assertEqual(rng.nanoseconds,456)
self.assertRaises(AttributeError, lambda : rng.hours)
self.assertRaises(AttributeError, lambda : rng.minutes)
self.assertRaises(AttributeError, lambda : rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days,-1)
self.assertEqual(tup.hours,23)
self.assertEqual(tup.minutes,59)
self.assertEqual(tup.seconds,59)
self.assertEqual(tup.milliseconds,999)
self.assertEqual(tup.microseconds,999)
self.assertEqual(tup.nanoseconds,0)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days,-2)
self.assertEqual(tup.hours,23)
self.assertEqual(tup.minutes,59)
self.assertEqual(tup.seconds,59)
self.assertEqual(tup.milliseconds,999)
self.assertEqual(tup.microseconds,999)
self.assertEqual(tup.nanoseconds,0)
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5),unit='D')
result = timedelta_range('0 days',periods=5,freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11),unit='D')
result = timedelta_range('0 days','10 days',freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5),unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02','5 days, 00:00:02',freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1,3,5,7,9],unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02',periods=5,freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50),unit='T')*30
result = timedelta_range('0 days',freq='30T',periods=50)
tm.assert_index_equal(result, expected)
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), conv(-d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result.astype('int64'), tslib.iNaT)
result = to_timedelta(['', ''])
self.assertTrue(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ])
expected = to_timedelta([0,10],unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
# arrays of various dtypes
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='s')
expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='m')
expected = TimedeltaIndex([ np.timedelta64(1,'m') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='int64')
result = to_timedelta(arr,unit='h')
expected = TimedeltaIndex([ np.timedelta64(1,'h') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([ np.timedelta64(1,'s') ]*5)
tm.assert_index_equal(result, expected)
arr = np.array([1]*5,dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([ np.timedelta64(1,'D') ]*5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3)*1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array([0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5),unit=unit)
expected = TimedeltaIndex([ np.timedelta64(i,transform(unit)) for i in np.arange(5).tolist() ])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2,unit=unit)
expected = Timedelta(np.timedelta64(2,transform(unit)).astype('timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y','M','W','D','y','w','d']:
testit(unit,lambda x: x.upper())
for unit in ['days','day','Day','Days']:
testit(unit,lambda x: 'D')
for unit in ['h','m','s','ms','us','ns','H','S','MS','US','NS']:
testit(unit,lambda x: x.lower())
# offsets
# m
testit('T',lambda x: 'm')
# ms
testit('L',lambda x: 'ms')
# these will error
self.assertRaises(ValueError, lambda : to_timedelta([1,2],unit='foo'))
self.assertRaises(ValueError, lambda : to_timedelta(1,unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda :to_timedelta(time(second=1)))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600,'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:08')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew','kurt','sem','var','prod']:
self.assertRaises(TypeError, lambda : getattr(td,op)())
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10,unit='s'),
timedelta(seconds=10),
np.timedelta64(10,'s'),
np.timedelta64(10000000000,'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1,seconds=10),
np.timedelta64(1,'D')+np.timedelta64(10,'s'),
pd.offsets.Day()+pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
self.assertEqual(result, expected)
result = Timedelta(days=1,hours=1,minutes=1,weeks=1,seconds=1,milliseconds=1,microseconds=1,nanoseconds=1)
expected = Timedelta(694861001001001)
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + 5*Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
self.assertEqual(result, expected)
self.assertRaises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = s1 + pd.NaT # NaT is datetime, not timedelta
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
a = pd.to_timedelta(list_of_strings)
b = Series(list_of_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v,v_p)
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days','10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = self.round_trip_pickle(rng)
tm.assert_index_equal(rng,rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days','10 days',freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
self.assertTrue((result['B'] == td).all())
def test_astype(self):
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s')
self.assert_numpy_array_equal(rng.days, np.array([1,1],dtype='int64'))
self.assert_numpy_array_equal(rng.seconds, np.array([10*3600+11*60+12,10*3600+11*60+13],dtype='int64'))
self.assert_numpy_array_equal(rng.microseconds, np.array([100*1000+123,100*1000+123],dtype='int64'))
self.assert_numpy_array_equal(rng.nanoseconds, np.array([456,456],dtype='int64'))
self.assertRaises(AttributeError, lambda : rng.hours)
self.assertRaises(AttributeError, lambda : rng.minutes)
self.assertRaises(AttributeError, lambda : rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1]))
tm.assert_series_equal(s.dt.seconds,Series([10*3600+11*60+12,np.nan],index=[0,1]))
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
expected = TimedeltaIndex(['1 days','1 days 00:00:05',
'2 days','2 days 00:00:02','0 days 00:00:03'])
result = TimedeltaIndex(['1 days','1 days, 00:00:05',
np.timedelta64(2,'D'),
timedelta(days=2,seconds=2),
pd.offsets.Second(3)])
tm.assert_index_equal(result,expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05', '0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00.400', '0 days 00:00:00.450', '0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'), expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
periods='foo', freq='D')
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
self.assertRaises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1,2,3],unit='d')
self.assertTrue(result.equals(expected))
from_ints = TimedeltaIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming freq
self.assertRaises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'],
freq='D')
self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101',periods=4)) - \
Series(date_range('20121201',periods=4))
td[2] += timedelta(minutes=5,seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1,'D')
expected = Series([31,31,(31*86400+5*60+3)/86400.0,np.nan])
assert_series_equal(result,expected)
result = td.astype('timedelta64[D]')
expected = Series([31,31,31,np.nan])
assert_series_equal(result,expected)
result = td / np.timedelta64(1,'s')
expected = Series([31*86400,31*86400,31*86400+5*60+3,np.nan])
assert_series_equal(result,expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result,expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1,'D')
expected = Index([31,31,(31*86400+5*60+3)/86400.0,np.nan])
assert_index_equal(result,expected)
result = td.astype('timedelta64[D]')
expected = Index([31,31,31,np.nan])
assert_index_equal(result,expected)
result = td / np.timedelta64(1,'s')
expected = Index([31*86400,31*86400,31*86400+5*60+3,np.nan])
assert_index_equal(result,expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result,expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True]+[False]*7)
self.assert_numpy_array_equal(result, exp)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
if PY3_2:
raise nose.SkipTest('nat comparisons on 3.2 broken')
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2,'D'),
np.timedelta64(2,'D'),
np.timedelta64('nat'), np.timedelta64('nat'),
np.timedelta64(1,'D') + np.timedelta64(2,'s'),
np.timedelta64(5,'D') + np.timedelta64(3,'s')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
self.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
tm.assert_isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d','1d','2d'])
self.assertTrue(idx.equals(list(idx)))
non_td = Index(list('abc'))
self.assertFalse(idx.equals(list(non_td)))
def test_union(self):
i1 = timedelta_range('1day',periods=5)
i2 = timedelta_range('3day',periods=5)
result = i1.union(i2)
expected = timedelta_range('1day',periods=7)
self.assert_numpy_array_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d','1d','2d'])
ordered = TimedeltaIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
left = timedelta_range("1 day","30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
self.assertEqual(len(result), 0)
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00',periods=3,freq='h')
tm.assert_index_equal(result,expected)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day','2 day','2 day','3 day','3day', '4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day','3day'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05','1 day 00:00:01','1 day 00:00:02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = TimedeltaIndex(['4d','1d','2d'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = TimedeltaIndex(['4day','1day','2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day','1day','5day','2day'],name='idx')
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:01','1day 00:00:02','1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01','1day 00:00:02','1day 00:00:03','1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(['1 day','3 day','4 day', '5 day'],freq=None,name='idx')
cases ={0: expected_0, -5: expected_0,
-1: expected_4, 4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d','2 d','3 d',
'7 d','8 d','9 d','10d'], freq=None, name='idx')
cases ={(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_take(self):
tds = ['1day 02:00:00','1 day 04:00:00','1 day 10:00:00']
idx = TimedeltaIndex(start='1d',end='2d',freq='H',name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2,4,10]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assert_isinstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
tm.assert_index_equal(cols, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d','5d', freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day','1 day','2 day',
'2 day','3 day','3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = TimedeltaIndex(['1 day','2 day','3 day'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestSlicing(tm.TestCase):
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h',periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
self.assertEqual(result, s.irow(133))
self.assertRaises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us',periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
self.assertEqual(result, s.irow(1001))
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
timedelta_range('0', periods=20, freq='H'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
{"hexsha": "bc51e01ca9bdffdd24fc615243df12aab021cd6f", "size": 56923, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tseries/tests/test_timedeltas.py", "max_stars_repo_name": "jnmclarty/pandas", "max_stars_repo_head_hexsha": "b7c32718ae987a46b69e11496c8d042af86be514", "max_stars_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-24T18:37:20.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-24T18:37:35.000Z", "max_issues_repo_path": "pandas/tseries/tests/test_timedeltas.py", "max_issues_repo_name": "jnmclarty/pandas", "max_issues_repo_head_hexsha": "b7c32718ae987a46b69e11496c8d042af86be514", "max_issues_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandas/tseries/tests/test_timedeltas.py", "max_forks_repo_name": "jnmclarty/pandas", "max_forks_repo_head_hexsha": "b7c32718ae987a46b69e11496c8d042af86be514", "max_forks_repo_licenses": ["PSF-2.0", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2282685512, "max_line_length": 141, "alphanum_fraction": 0.6051332502, "include": true, "reason": "import numpy,from numpy", "num_tokens": 14590}
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
def get_path_list(root_path):
# Obtains list of path directories from root path
# Returns: list(s) containing the names of the sub-directories in the root directory
train_root_path_content = []
for i in os.listdir(root_path):
train_root_path_content.append(i)
return train_root_path_content
#--------------------------------------------------------------------------------------------
def get_class_names(root_path, train_names):
# Obtains a list of training images path and a list of image classes id
# Returns:
# - A list containing all image paths in the train directories
# - A list containing all image classes id
train_images_path = []
train_images_id = []
for i, folder_name in enumerate(train_names):
image_folder = root_path + '/' + folder_name
for image_files in os.listdir(image_folder):
image_path = image_folder + '/' + image_files
train_images_path.append(image_path)
train_images_id.append(i)
return train_images_path, train_images_id
#--------------------------------------------------------------------------------------------
def get_train_images_data(image_path_list):
# Loads a list of train images from given path list
# Returns:
# - A list containing all loaded training images
loaded_images = []
for images in image_path_list:
# read_image = cv2.imread(images, 0)
read_image = cv2.imread(images)
loaded_images.append(read_image)
return loaded_images
#--------------------------------------------------------------------------------------------
def detect_faces_and_filter(image_list, image_classes_list=None):
# Detects any faces from a given image list and filters said face if
# the face on the given image is more or less than one
# Returns
# - A list containing all filtered and cropped face images in grayscale
# - A list containing all filtered faces location saved in rectangle
# - A list containing all filtered image classes id
filtered_and_cropped_face = []
filtered_and_located_face = []
filtered_id = []
# Haar Cascade to detect face(s)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# detection process
for i, images in enumerate(image_list):
gray_image = cv2.cvtColor(images, cv2.COLOR_BGR2GRAY)
detected_faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.2, minNeighbors=5)
# only process a single face in the picture
if len(detected_faces) != 1:
continue
# face = x, y, w, h
for x, y, w, h in detected_faces:
# cut
face_rect = gray_image[y:y+h, x:x+w]
filtered_and_cropped_face.append(face_rect)
filtered_and_located_face.append([x, y, w, h])
#filter_id.append(i)
if image_classes_list != None:
filtered_id.append(image_classes_list[i])
return filtered_and_cropped_face, filtered_and_located_face, filtered_id
#--------------------------------------------------------------------------------------------
def train(train_face_grays, image_classes_list):
# Creates and trains a classifier object
# Returns
# - A classifier object after being trained with images of cropped faces
face_detector = cv2.face.LBPHFaceRecognizer_create()
face_detector.train(train_face_grays, np.array(image_classes_list))
return face_detector
#--------------------------------------------------------------------------------------------
def get_test_images_data(test_root_path, image_path_list):
# Loads a list of test images from a given path list
# Returns
# - List containing all loaded test images
test_image_list = []
for images in image_path_list:
test_image_read = cv2.imread(test_root_path + '/' + images)
test_image_list.append(test_image_read)
return test_image_list
#--------------------------------------------------------------------------------------------
def predict(classifier, test_faces_gray):
# Predicts the test image
# Returns
# - List containing all prediction results from given test faces
prediction_results = []
for images in test_faces_gray:
result, confidence = classifier.predict(images)
prediction_results.append(result)
return prediction_results
#--------------------------------------------------------------------------------------------
def draw_prediction_results(predict_results, test_image_list, test_faces_rects, train_names):
# Draws prediction results on the given test images
# Returns
# - List containing all test images after being highlighted with prediction results
draw_results = []
for i, images in enumerate(test_image_list):
prediction = predict_results[i]
drawn = 0
# place rectangle and text only once for every test image
for x,y,w,h in test_faces_rects:
if drawn == 0:
cv2.rectangle(images, (x,y), (x+w, y+h), (0, 255, 0), 2)
text = train_names[prediction]
cv2.putText(images, text, (x+5, y-10), 0, 0.5, (0,255,0, 2))
'''
Parameters:
1 variable
2 text to be written
3 coordinates of text
4 font face
5 font size
6 text color (bgr)
'''
drawn = 1
else:
continue
draw_results.append(images)
return draw_results
#--------------------------------------------------------------------------------------------
def combine_results(predicted_test_image_list):
# Combines all predicted test image result into a single image
# Returns
# - Array (ndarray) containing image data after being combined
combined_images = predicted_test_image_list[0]
for image in predicted_test_image_list[1:]:
combined_images = np.hstack((combined_images, image))
return combined_images
#--------------------------------------------------------------------------------------------
def show_result(image):
# Shows the prediction image
cv2.imshow("Final Results", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
# Modify value of train_root_path with the location of your training data root directory
train_root_path = "dataset/train"
train_names = get_path_list(train_root_path)
image_path_list, image_classes_list = get_class_names(train_root_path, train_names)
train_image_list = get_train_images_data(image_path_list)
train_face_grays, _, filtered_classes_list = detect_faces_and_filter(train_image_list, image_classes_list)
classifier = train(train_face_grays, filtered_classes_list)
# Modify value of test_image_path with the location of your testing data root directory
test_root_path = "dataset/test"
test_names = get_path_list(test_root_path)
test_image_list = get_test_images_data(test_root_path, test_names)
test_faces_gray, test_faces_rects, _ = detect_faces_and_filter(test_image_list)
predict_results = predict(classifier, test_faces_gray)
predicted_test_image_list = draw_prediction_results(predict_results, test_image_list, test_faces_rects, train_names)
final_image_result = combine_results(predicted_test_image_list)
show_result(final_image_result)
|
{"hexsha": "d103b13830f6bac39ee8b3f4a26f8b73f268eb6b", "size": 7649, "ext": "py", "lang": "Python", "max_stars_repo_path": "Main.py", "max_stars_repo_name": "AimAndIgnite/face-recognition", "max_stars_repo_head_hexsha": "29a2e98955f73b28c19ef9b76aeeb911c7bbd345", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Main.py", "max_issues_repo_name": "AimAndIgnite/face-recognition", "max_issues_repo_head_hexsha": "29a2e98955f73b28c19ef9b76aeeb911c7bbd345", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Main.py", "max_forks_repo_name": "AimAndIgnite/face-recognition", "max_forks_repo_head_hexsha": "29a2e98955f73b28c19ef9b76aeeb911c7bbd345", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4708994709, "max_line_length": 120, "alphanum_fraction": 0.6124983658, "include": true, "reason": "import numpy", "num_tokens": 1524}
|
###################################################################################################
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###################################################################################################
import numpy as np
import pandas as pd
import networkx as nx
import math
from shapely.geometry import LineString
from scipy.spatial.distance import cdist
def WeightedKernelDensityEstimation(X, Weights, bandwidth, Y, max_mb_per_chunk = 1000):
"""
Computes a Weighted Kernel Density Estimation
Parameters
----------
X : array
input points
Weights : array
array of weights associated to points
bandwidth : float
bandwidth for kernel density estimation
Y : array
points where density estimations will be performed
Returns
----------
pd.Series
returns an array of the estimated densities rescaled between [0;1]
"""
def get_megabytes_pairwise_distances_allocation(X, Y):
# Calculate MB needed to allocate pairwise distances
return len(X) * len(Y) * 8 * 1e-6
# During this procedure, pairwise euclidean distances are computed between inputs points X and points to estimate Y
# For this reason, Y is divided in chunks to avoid big memory allocations. At most, X megabytes per chunk are allocated for pairwise distances
Y_split = np.array_split( Y, math.ceil( get_megabytes_pairwise_distances_allocation(X,Y) / max_mb_per_chunk ) )
"""
### Step by step
# Weighed KDE: Sum{ Weight_i * K( (X-Xi) / h) }
W_norm = np.array( Weights / np.sum(Weights) )
cdist_values = cdist( Y, X, 'euclidean') / bandwidth
Ks = np.exp( -.5 * ( cdist_values ) ** 2 )
PDF = np.sum( Ks * W_norm, axis=1)
"""
"""
### Complete version. Memory consuming
PDF = np.sum( np.exp( -.5 * ( cdist( Y, X, 'euclidean') / bandwidth ) ** 2 ) * ( np.array( Weights / np.sum(Weights) ) ), axis=1)
"""
### Divide Y in chunks to avoid big memory allocations
PDF = np.concatenate( [ np.sum( np.exp( -.5 * ( cdist( Y_i, X, 'euclidean') / bandwidth ) ** 2 ) * ( np.array( Weights / np.sum(Weights) ) ), axis=1) for Y_i in Y_split ] )
# Rescale
return pd.Series( PDF / PDF.sum() )
def cut_in_two(line):
"""
Cuts input line into two lines of equal length
Parameters
----------
line : shapely.LineString
input line
Returns
----------
list (LineString, LineString, Point)
two lines and the middle point cutting input line
"""
from shapely.geometry import Point, LineString
# Get final distance value
distance = line.length/2
# Cuts a line in two at a distance from its starting point
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [LineString(coords[:i+1]), LineString(coords[i:]), pd]
if pd > distance:
cp = line.interpolate(distance)
return [ LineString(coords[:i] + [(cp.x, cp.y)]), LineString([(cp.x, cp.y)] + coords[i:]), cp]
class NodeCounter:
"""
Node negative counter. Utils for node osmid creation. Start on -1 and it auto decrements
"""
def __init__(self):
self._num = 0
def get_num(self):
self._num -= 1
return self._num
def verify_divide_edge(G, u, v, key, data, node_creation_counter, max_edge_length):
"""
Verify if edge(u,v)[key] length is higher than a certain threshold
In this case, divide edge(u,v) in two edges of equal length
Assign negative values to the edges new osm id
Call recursively to continue dividing each of the lines if necessary
Parameters
----------
G : networkx multidigraph
input graph
u : node
origin node
v : node
destination node
key : int
(u,v) arc identifier
data : dict
arc data
node_creation_counter : NodeCounter
node identifier creation
max_edge_length : float
maximum tolerated edge length
Returns
----------
"""
# Input: Two communicated nodes (u, v)
if ( data["length"] <= max_edge_length ): # Already satisfy condition?
return
# Get geometry connecting (u,v)
if ( data.get("geometry",None) ): # Geometry exists
geometry = data["geometry"]
else: # Real geometry is a straight line between the two nodes
P_U = G.node[u]["x"], G.node[u]["y"]
P_V = G.node[v]["x"], G.node[v]["y"]
geometry = LineString( (P_U, P_V) )
# Get geometries for edge(u,middle), edge(middle,v) and node(middle)
line1, line2, middle_point = cut_in_two(geometry)
# Copy edge(u,v) data to conserve attributes. Modify its length
data_e1 = data.copy()
data_e2 = data.copy()
# Associate correct length
data_e1["length"] = line1.length
data_e2["length"] = line2.length
# Assign geometries
data_e1["geometry"] = line1
data_e2["geometry"] = line2
# Create new node: Middle distance of edge
x,y = list(middle_point.coords)[0]
# Set a new unique osmid: Negative (as in OSM2PGSQL, created objects contain negative osmid)
node_osmid = node_creation_counter.get_num()
node_data = {'osmid':node_osmid, 'x':x, 'y':y}
# Add middle node with its corresponding data
G.add_node(node_osmid)
nx.set_node_attributes(G, {node_osmid : node_data } )
# Add edges (u,middle) and (middle,v)
G.add_edge(u, node_osmid)
nx.set_edge_attributes(G, { (u, node_osmid, 0): data_e1 } )
G.add_edge(node_osmid, v)
nx.set_edge_attributes(G, { (node_osmid, v, 0): data_e2 } )
# Remove edge (u,v)
G.remove_edge(u,v,key=key)
# Recursively verify created edges and divide if necessary. Use last added key to identify the edge
last_key = len( G[u][node_osmid] ) -1
verify_divide_edge(G, u, node_osmid, last_key, data_e1, node_creation_counter, max_edge_length)
last_key = len( G[node_osmid][v] ) -1
verify_divide_edge(G, node_osmid, v, last_key, data_e2, node_creation_counter, max_edge_length)
def divide_long_edges_graph(G, max_edge_length):
"""
Divide all edges with a higher length than input threshold by means of dividing the arcs and creating new nodes
Parameters
----------
G : networkx multidigraph
input graph
max_edge_length : float
maximum tolerated edge length
Returns
----------
"""
# Negative osm_id indicate created nodes
node_creation_counter = NodeCounter()
for u, v, key, data in list( G.edges(data=True, keys=True) ):
if ( data["length"] > max_edge_length ):
# Divide the edge (u,v) recursively
verify_divide_edge(G, u, v, key, data, node_creation_counter, max_edge_length)
|
{"hexsha": "ed5af093981696667d13fc8d69429526e50081f9", "size": 6495, "ext": "py", "lang": "Python", "max_stars_repo_path": "urbansprawl/sprawl/utils.py", "max_stars_repo_name": "welegent2010/urban-sprawl", "max_stars_repo_head_hexsha": "b26bdf7889fdba1382259be7c14e7e0d8f535cd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 55, "max_stars_repo_stars_event_min_datetime": "2018-01-12T10:45:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T16:07:42.000Z", "max_issues_repo_path": "urbansprawl/sprawl/utils.py", "max_issues_repo_name": "welegent2010/urban-sprawl", "max_issues_repo_head_hexsha": "b26bdf7889fdba1382259be7c14e7e0d8f535cd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-06-08T21:12:53.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-26T10:29:15.000Z", "max_forks_repo_path": "urbansprawl/sprawl/utils.py", "max_forks_repo_name": "welegent2010/urban-sprawl", "max_forks_repo_head_hexsha": "b26bdf7889fdba1382259be7c14e7e0d8f535cd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-06-11T21:35:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:39:06.000Z", "avg_line_length": 32.475, "max_line_length": 177, "alphanum_fraction": 0.6663587375, "include": true, "reason": "import numpy,from scipy,import networkx", "num_tokens": 1740}
|
from src.agent.agent import Agent
from src.config.config import Config
from config.key import CONFIG_KEY
import numpy as np
from src.util.sampler import Sampler
import easy_tf_log
from src.core import Basic
from src.util.noiseAdder import noise_adder
class TargetAgent(Agent):
key_list = Config.load_json(file_path=CONFIG_KEY + '/ddpgAgentKey.json')
def __init__(self, config, real_env, cyber_env, model, sampler=Sampler()):
super(TargetAgent, self).__init__(config=config,
env=real_env,
model=model,
sampler=sampler)
self.real_env = real_env
self.cyber_env = cyber_env
self.env = None
self._env_status = self.config.config_dict['REAL_ENVIRONMENT_STATUS']
self._real_env_sample_count = 0
self._cyber_env_sample_count = 0
self.SamplerTraingCount=0
@property
def env_sample_count(self):
if self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
return self._real_env_sample_count
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
return self._cyber_env_sample_count
@env_sample_count.setter
def env_sample_count(self, new_value):
if self.status == self.status_key['TEST']:
return
if self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self._real_env_sample_count = new_value
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
self._cyber_env_sample_count = new_value
@property
def status(self):
return self._status
@status.setter
def status(self, new_value):
if new_value != Basic.status_key['TRAIN'] and new_value != Basic.status_key['TEST']:
raise KeyError('New Status: %d did not existed' % new_value)
if new_value == Basic.status_key['TEST'] and self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self.sampler.env_status = ['TEST_ENVIRONMENT_STATUS']
if self._status == new_value:
return
self._status = new_value
self.model.status = new_value
@property
def env_status(self):
return self._env_status
@env_status.setter
def env_status(self, new_sta):
self._env_status = new_sta
if self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self.env = self.real_env
self.model.env_status = self.model.config.config_dict['REAL_ENVIRONMENT_STATUS']
if self.status == self.status_key['TEST']:
self.sampler.env_status = self.sampler.config.config_dict['TEST_ENVIRONMENT_STATUS']
else:
self.sampler.env_status = self.sampler.config.config_dict['REAL_ENVIRONMENT_STATUS']
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
self.env = self.cyber_env
self.model.env_status = self.model.config.config_dict['CYBER_ENVIRONMENT_STATUS']
self.sampler.env_status = self.sampler.config.config_dict['CYBER_ENVIRONMENT_STATUS']
else:
raise ValueError('Wrong Agent Environment Env Status: %d' % new_sta)
@property
def current_env_status(self):
if self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
return 'REAL_ENVIRONMENT_STATUS'
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
return 'CYBER_ENVIRONMENT_STATUS'
def predict(self, state, *args, **kwargs):
state = np.reshape(state, [-1])
count = self._real_env_sample_count
eps = 1.0 - (self.config.config_dict['EPS'] - self.config.config_dict['EPS_GREEDY_FINAL_VALUE']) * \
(count / self.config.config_dict['EPS_ZERO_FLAG'])
if eps < 0:
eps = 0.0
rand_eps = np.random.rand(1)
if self.config.config_dict['EPS_GREEDY_FLAG'] == 1 and rand_eps < eps and self.status == self.status_key['TRAIN']:
res = self.env.action_space.sample()
else:
res = np.array(self.model.predict(state))
if self.config.config_dict['NOISE_FLAG'] > 0 and self.status == self.status_key['TRAIN']:
res, noise = noise_adder(action=res, agent=self)
for i in range(len(noise)):
easy_tf_log.tflog(key=self.name + '_ACTION_NOISE_DIM_' + str(i), value=noise[i])
return np.reshape(res, [-1])
def sample(self, env, sample_count, store_flag=False, agent_print_log_flag=False, resetNoise_Flag=False):
if self.status == self.status_key['TEST']:
self.sampler.reset(env=env, agent=self)
if self.model.config.config_dict['NOISE_FLAG']==2:
resetNoise_Flag = True
else:
resetNoise_Flag = False
return super().sample(env, sample_count, store_flag, agent_print_log_flag, resetNoise_Flag)
def train(self, sampler_train_flag=0):
if self.model.memory_length >= self.model.config.config_dict['BATCH_SIZE']:
res_dict = self.model.update()
else:
res_dict = None
# TODO add the train process of sampler
# if sampler_train_flag>0 and self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
# self.SamplerTraingCount +=1.0
# ####do the training
# ###get the ne_data from memory
# self.sampler.count_new_real_samples =sampler_train_flag
# new_idx = np.arange(self.model.real_data_memory.observations0.length-self.sampler.count_new_real_samples, self.model.real_data_memory.observations0.length)
# new_data_states = self.model.real_data_memory.observations0.get_batch(new_idx)
# ###get all data from memory
# all_idx = new_idx #np.arange(self.model.real_data_memory.observations0.length)
# all_data_states = self.model.real_data_memory.observations0.get_batch(all_idx)
# all_data_actions = self.model.real_data_memory.actions.get_batch(all_idx)
# all_data_nstates = self.model.real_data_memory.observations1.get_batch(all_idx)
# ####predcit the states
# state_est_input = new_data_states
# state_est_label = self.SamplerTraingCount*np.ones([new_data_states.shape[0],1])
# dyn_error_est_input = all_data_states
# prd_nstates = self.cyber_env.model.predict(sess=self.cyber_env.sess,
# state_input=all_data_states,
# action_input=all_data_actions)
# ####get the error for each sample
# dyn_error_est_label = np.sum((all_data_nstates-prd_nstates)**2,1)
# ####normalize the error into range [0,1]
# # dyn_error_est_label = (dyn_error_est_label-np.min(dyn_error_est_label))/(np.max(dyn_error_est_label)-np.min(dyn_error_est_label))
# # print("dyn_error_est_label=", dyn_error_est_label)
# dyn_error_est_label = dyn_error_est_label.reshape([-1,1])
# print("state_est_input.shape=", state_est_input.shape)
# print("dyn_error_est_input.shape=", dyn_error_est_input.shape)
# self.sampler.train(state_est_input, state_est_label, dyn_error_est_input, dyn_error_est_label)
return res_dict
def store_one_sample(self, state, next_state, action, reward, done):
self.model.store_one_sample(state=state,
next_state=next_state,
action=action,
reward=reward,
done=done)
def init(self):
self.model.init()
self.model.reset()
super().init()
def print_log_queue(self, status):
self.status = status
reward_list = []
while self.log_queue.qsize() > 0:
reward_list.append(self.log_queue.get()[self.name + '_SAMPLE_REWARD'])
if len(reward_list) > 0:
reward_list = np.array(reward_list)
sum = np.sum(reward_list).item()
mean = np.mean(reward_list).item()
std = np.mean(reward_list).item()
env_status = None
if self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
env_status = 'REAL_ENV'
elif self.env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
env_status = 'CYBER_ENV'
print("%s %s Reward: Sum: %f Average %f Std %f" % (self.name, env_status, sum, mean, std))
if self.status == self.status_key['TRAIN']:
self.log_file_content.append({'INDEX': self.log_print_count,
'REWARD_SUM': sum,
'REWARD_MEAN': mean,
'REWARD_STD': std,
'REAL_SAMPLE_COUNT': self._real_env_sample_count,
'CYBER_SAMPLE_COUNT': self._cyber_env_sample_count,
'ENV': env_status})
elif self.status == self.status_key['TEST']:
self.log_file_content.append({'INDEX': self.log_print_count,
'REWARD_SUM': sum,
'REWARD_MEAN': mean,
'REWARD_STD': std,
'REAL_SAMPLE_COUNT': self._real_env_sample_count,
'CYBER_SAMPLE_COUNT': self._cyber_env_sample_count,
'ENV': env_status})
self.log_print_count += 1
# TODO HOW TO ELEGANT CHANGE THIS
if self.model and hasattr(self.model, 'print_log_queue') and callable(self.model.print_log_queue):
self.model.print_log_queue(status=status)
def reset(self):
super().reset()
self.model.reset()
if __name__ == '__main__':
from config import CONFIG
from src.model.ddpgModel.ddpgModel import DDPGModel
conf = Config(standard_key_list=TargetAgent.key_list)
conf.load_config(path=CONFIG + '/ddpgAgentTestConfig.json')
ddog_con = Config(standard_key_list=DDPGModel.key_list)
ddog_con.load_config(path=CONFIG + '/targetModelTestConfig.json')
ddpg = DDPGModel(config=ddog_con)
a = TargetAgent(config=conf,
real_env=2,
cyber_env=1,
model=ddpg)
|
{"hexsha": "c150b88a37c341fec57ffdb46e36dca4a2120b15", "size": 10836, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/dong/targetAgent.py", "max_stars_repo_name": "Lukeeeeee/DataCenterJobSchedulingSolution", "max_stars_repo_head_hexsha": "9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo/dong/targetAgent.py", "max_issues_repo_name": "Lukeeeeee/DataCenterJobSchedulingSolution", "max_issues_repo_head_hexsha": "9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/dong/targetAgent.py", "max_forks_repo_name": "Lukeeeeee/DataCenterJobSchedulingSolution", "max_forks_repo_head_hexsha": "9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.7356828194, "max_line_length": 169, "alphanum_fraction": 0.6088039867, "include": true, "reason": "import numpy", "num_tokens": 2294}
|
#include <iostream>
#include <typeinfo>
#include <map>
using std::cout;
using std::endl;
#include "viennagrid/forwards.hpp"
#include "viennagrid/storage/view.hpp"
#include "viennagrid/storage/container_collection.hpp"
#include "viennagrid/storage/inserter.hpp"
#include "viennagrid/storage/id_generator.hpp"
#include "viennagrid/storage/hidden_key_map.hpp"
#include "viennagrid/storage/range.hpp"
#include "viennagrid/topology/vertex.hpp"
#include "viennagrid/topology/line.hpp"
#include "viennagrid/topology/simplex.hpp"
#include "viennagrid/element/element_key.hpp"
#include "viennagrid/element/element_orientation.hpp"
#include "viennagrid/config/element_config.hpp"
#include "viennagrid/config/mesh_config.hpp"
#include "viennagrid/point.hpp"
#include "viennagrid/mesh/mesh.hpp"
#include "viennagrid/mesh/element_creation.hpp"
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/io.hpp>
namespace ublas = boost::numeric::ublas;
typedef ublas::vector<double> vector_type;
class my_mesh_config
{
private:
//typedef viennagrid::storage::pointer_handle_tag handle_tag;
//typedef viennagrid::storage::iterator_handle_tag handle_tag;
typedef viennagrid::storage::id_handle_tag handle_tag;
public:
typedef viennagrid::config::result_of::full_mesh_config< viennagrid::tetrahedron_tag, vector_type, handle_tag >::type type;
};
int main()
{
//
// typedefing and setting up the geometric mesh
//
typedef viennagrid::mesh<my_mesh_config> mesh_type;
mesh_type mesh;
//
// typedefs for the element types
//
typedef viennagrid::result_of::element<mesh_type, viennagrid::vertex_tag>::type vertex_type;
typedef viennagrid::result_of::handle<mesh_type, viennagrid::vertex_tag>::type vertex_handle_type;
typedef viennagrid::result_of::element<mesh_type, viennagrid::line_tag>::type line_type;
typedef viennagrid::result_of::element<mesh_type, viennagrid::triangle_tag>::type triangle_type;
typedef viennagrid::result_of::element<mesh_type, viennagrid::tetrahedron_tag>::type tetrahedron_type;
typedef viennagrid::result_of::handle<mesh_type, viennagrid::tetrahedron_tag>::type tetrahedron_handle_type;
// typedef viennagrid::result_of::handle<mesh_type, viennagrid::polygon_tag>::type tetrahedron_handle_type;
//
// Adding a tetrahedron
//
// creates four vertices within the mesh, vh is short vor vertex handle
// make_element is responsible for resizing all meta-info container which are associated with vertex_type
vertex_handle_type vh0 = viennagrid::make_vertex( mesh );
vertex_handle_type vh1 = viennagrid::make_vertex( mesh );
vertex_handle_type vh2 = viennagrid::make_vertex( mesh );
vertex_handle_type vh3 = viennagrid::make_vertex( mesh );
// create geometric information for the vertices
vector_type p0(3);
p0[0] = 0.0; p0[1] = 0.0; p0[2] = 0.0;
vector_type p1(3);
p1[0] = 1.0; p1[1] = 0.0; p1[2] = 0.0;
vector_type p2(3);
p2[0] = 0.0; p2[1] = 1.0; p2[2] = 0.0;
vector_type p3(3);
p3[0] = 0.0; p3[1] = 0.0; p3[2] = 1.0;
// set the geometric information for the vertices
// is equivalent to viennagrid::look_up<vector_type>(mesh, vhX)
viennagrid::point(mesh, vh0) = p0;
viennagrid::point(mesh, vh1) = p1;
viennagrid::point(mesh, vh2) = p2;
viennagrid::point(mesh, vh3) = p3;
// creates a handle buffer for the vertex handles of the tetdrahedron
std::vector<vertex_handle_type> handles(4);
handles[0] = vh0; handles[1] = vh1; handles[2] = vh2; handles[3] = vh3;
// creates the tetrahedron within the mesh, all boundary cell generation is done here implicit
tetrahedron_handle_type tet = viennagrid::make_element<tetrahedron_type>( mesh, handles.begin(), handles.end() );
std::cout << tet << std::endl;
// set a double value to a tetdrahedron
// viennagrid::look_up<double>(mesh, tet) = 1.0;
//
// display the mesh content
//
cout << "All vertices of the mesh" << endl;
std::copy( viennagrid::elements<viennagrid::vertex_tag>(mesh).begin(), viennagrid::elements<viennagrid::vertex_tag>(mesh).end(), std::ostream_iterator<vertex_type>(cout, "\n") );
cout << endl;
cout << "All lines of the mesh" << endl;
std::copy( viennagrid::elements<viennagrid::line_tag>(mesh).begin(), viennagrid::elements<viennagrid::line_tag>(mesh).end(), std::ostream_iterator<line_type>(cout, "\n") );
cout << endl;
cout << "All triangles of the mesh" << endl;
std::copy( viennagrid::elements<viennagrid::triangle_tag>(mesh).begin(), viennagrid::elements<viennagrid::triangle_tag>(mesh).end(), std::ostream_iterator<triangle_type>(cout, "\n") );
cout << endl;
cout << "All tetraherons of the mesh" << endl;
std::copy( viennagrid::elements<viennagrid::tetrahedron_tag>(mesh).begin(), viennagrid::elements<viennagrid::tetrahedron_tag>(mesh).end(), std::ostream_iterator<tetrahedron_type>(cout, "\n") );
cout << endl;
//
// doing some boundary cell iteration
//
const tetrahedron_type & test_tet = *viennagrid::elements<viennagrid::tetrahedron_tag>(mesh).begin();
const triangle_type & test_tri = *viennagrid::elements<viennagrid::triangle_tag>(mesh).begin();
typedef viennagrid::result_of::const_element_range<tetrahedron_type, viennagrid::triangle_tag>::type tetrahedron_triangle_range;
typedef viennagrid::result_of::const_iterator<tetrahedron_triangle_range>::type tetrahedron_triangle_iterator;
cout << "All triangles of the first tetdrahedron in the mesh" << endl;
tetrahedron_triangle_range tri_range = viennagrid::elements<viennagrid::triangle_tag>(test_tet);
for (tetrahedron_triangle_iterator it = tri_range.begin(); it != tri_range.end(); ++it)
cout << *it << endl;
cout << endl;
cout << "Once more with std::copy" << endl;
std::copy( tri_range.begin(), tri_range.end(), std::ostream_iterator<triangle_type>(cout, "\n") );
cout << endl;
typedef viennagrid::result_of::const_element_range<triangle_type, viennagrid::line_tag>::type triangle_line_range;
typedef viennagrid::result_of::const_iterator<triangle_line_range>::type triangle_line_iterator;
cout << "All lines of the first triangle in the mesh" << endl;
triangle_line_range lin_range = viennagrid::elements<viennagrid::line_tag>(test_tri);
for (triangle_line_iterator it = lin_range.begin(); it != lin_range.end(); ++it)
cout << *it << endl;
cout << endl;
cout << "Once more with std::copy" << endl;
std::copy( lin_range.begin(), lin_range.end(), std::ostream_iterator<line_type>(cout, "\n") );
cout << endl;
//
// geometric iteration
//
// iterating over all vertices and piping out the point information
// typedef viennagrid::result_of::const_element_range<tetrahedron_type, viennagrid::vertex_tag>::type tetrahedron_vertex_range;
// typedef viennagrid::result_of::const_iterator<tetrahedron_vertex_range>::type tetrahedron_vertex_iterator;
//
// cout << "All vertices of the first tetdrahedron in the mesh USING ncells<dim>()" << endl;
// tetrahedron_vertex_range vtx_range = viennagrid::elements<viennagrid::vertex_tag>(test_tet);
// for (tetrahedron_vertex_iterator it = vtx_range.begin(); it != vtx_range.end(); ++it)
// cout << *it << " geometric information: " << viennagrid::look_up<vector_type>( mesh, *it ) << endl;
// cout << endl;
//
//
// typedef viennagrid::result_of::const_element_range<mesh_type, viennagrid::vertex_tag>::type mesh_vertex_range_2;
// typedef viennagrid::result_of::const_iterator<mesh_vertex_range_2>::type mesh_vertex_iterator_2;
//
// cout << "All vertices of the first tetdrahedron in the mesh USING elements<tag>()" << endl;
// mesh_vertex_range_2 mesh_vtx_range_2 = viennagrid::elements<viennagrid::vertex_tag>(mesh);
// for (mesh_vertex_iterator_2 it = mesh_vtx_range_2.begin(); it != mesh_vtx_range_2.end(); ++it)
// cout << *it << " geometric information: " << viennagrid::look_up<vector_type>( mesh, *it ) << endl;
// cout << endl;
//
//
// typedef viennagrid::result_of::const_element_range<mesh_type, vertex_type>::type mesh_vertex_range_3;
// typedef viennagrid::result_of::const_iterator<mesh_vertex_range_3>::type mesh_vertex_iterator_3;
//
// cout << "All vertices of the first tetdrahedron in the mesh USING elements<type>()" << endl;
// mesh_vertex_range_3 mesh_vtx_range_3 = viennagrid::elements<vertex_type>(mesh);
// for (mesh_vertex_iterator_3 it = mesh_vtx_range_3.begin(); it != mesh_vtx_range_3.end(); ++it)
// cout << *it << " geometric information: " << viennagrid::look_up<vector_type>( mesh, *it ) << endl;
// cout << endl;
//
//
// // iterating over all tetrahedrons and piping out the double meta-information
// typedef viennagrid::result_of::const_element_range<mesh_type, viennagrid::tetrahedron_tag>::type tetrahedron_range;
// typedef viennagrid::result_of::const_iterator<tetrahedron_range>::type tetrahedron_iterator;
//
// cout << "All tetdrahedrons in the mesh" << endl;
// tetrahedron_range tet_range = viennagrid::elements<viennagrid::tetrahedron_tag>(mesh);
// for (tetrahedron_iterator it = tet_range.begin(); it != tet_range.end(); ++it)
// {
// cout << *it << endl;
// cout << " geometric information: " << viennagrid::look_up<double>( mesh, *it ) << endl;
// }
// cout << endl;
const mesh_type & test = mesh;
typedef viennagrid::result_of::const_element_range<mesh_type, viennagrid::vertex_tag>::type const_vertex_range;
typedef viennagrid::result_of::iterator<const_vertex_range>::type const_vertex_iterator;
const_vertex_range r = viennagrid::elements<viennagrid::vertex_tag>(test);
for (const_vertex_iterator i = r.begin(); i != r.end(); ++i)
{
cout << *i << endl;
cout << viennagrid::point(test, *i) << endl;
}
return 0;
}
|
{"hexsha": "4f8397322e170427d439dac4de8dcc7f34e3087f", "size": 10045, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/tutorial/old/mesh_test_2.cpp", "max_stars_repo_name": "viennagrid/viennagrid-dev", "max_stars_repo_head_hexsha": "6e47c8d098a0b691d6b9988f2444cd11d440f4c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2015-09-13T03:50:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-27T14:24:49.000Z", "max_issues_repo_path": "examples/tutorial/old/mesh_test_2.cpp", "max_issues_repo_name": "viennagrid/viennagrid-dev", "max_issues_repo_head_hexsha": "6e47c8d098a0b691d6b9988f2444cd11d440f4c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/tutorial/old/mesh_test_2.cpp", "max_forks_repo_name": "viennagrid/viennagrid-dev", "max_forks_repo_head_hexsha": "6e47c8d098a0b691d6b9988f2444cd11d440f4c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2015-07-03T07:14:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T00:51:58.000Z", "avg_line_length": 38.3396946565, "max_line_length": 197, "alphanum_fraction": 0.7036336486, "num_tokens": 2749}
|
# file for homotopy functions
# TODO: add mechanism for selecting which homotopy function to use
# TODO: better idea: move the homotopy function to NLSolver, because it is
# physics agnostic
import PDESolver.evalHomotopy
"""
This function calls the appropriate homotopy function for the Euler module.
"""
function evalHomotopy(mesh::AbstractMesh, sbp::AbstractOperator,
eqn::EulerData{Tsol, Tres},
opts::Dict, res::Abstract3DArray, t = 0.0) where {Tsol, Tres}
fill!(res, 0)
if opts["homotopy_function"] == "ViscousBO"
sensor = ShockSensorBO{Tsol, Tres}(mesh, sbp, opts)
capture = eqn.shock_capturing
res_orig = eqn.res
eqn.res = res
applyShockCapturing(mesh, sbp, eqn, opts, sensor, capture)
eqn.res = res_orig
elseif opts["homotopy_function"] == "FirstOrderDissipation"
calcHomotopyDiss(mesh, sbp, eqn, opts, res)
else
error("unrecognized homotop_function: $(opts["homotopy_function"])")
end
return nothing
end
"""
Calculate a first order accurate dissipation to use as a homotopy function
Inputs:
mesh: a DG mesh
sbp: an SBP operator
eqn: an EulerData object
opts: options dictionary
Inputs/Outputs:
res: 3D array to store the homotopy function in
Note eqn.res is *not* modified by this function.
Aliasing restrictions: none
"""
function calcHomotopyDiss(mesh::AbstractDGMesh{Tmsh}, sbp,
eqn::EulerData{Tsol, Tres}, opts,
res::Abstract3DArray{Tres}) where {Tsol, Tres, Tmsh}
# println("\nentered calcHomotopyDiss")
# some checks for when parallelism is enabled
@assert opts["parallel_data"] == PARALLEL_DATA_ELEMENT
for i=1:mesh.npeers
@assert eqn.shared_data[i].recv_waited
end
fill!(res, 0.0)
#----------------------------------------------------------------------------
# volume dissipation
t1 = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerElement)
t2 = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerElement)
fill!(res, 0.0)
nrm2 = zeros(Tmsh, mesh.dim)
# compute the D operator in each direction
D = zeros(mesh.numNodesPerElement, mesh.numNodesPerElement, mesh.dim)
for d=1:mesh.dim
D[:, :, d] = inv(diagm(sbp.w))*sbp.Q[:, :, d]
end
for el=1:mesh.numEl
q_el = sview(eqn.q, :, :, el)
res_el = sview(res, :, :, el)
for d1=1:mesh.dim
fill!(t1, 0.0)
fill!(t2, 0.0)
differentiateElement!(sbp, d1, q_el, t1)
for j=1:mesh.numNodesPerElement
q_j = sview(eqn.q, :, j, el)
for k=1:mesh.dim
nrm2[k] = mesh.dxidx[d1, k, j, el]
end
lambda_max = getLambdaMax(eqn.params, q_j, nrm2)
for k=1:mesh.numDofPerNode
t2[k, j] = lambda_max*t1[k, j]
end
end # end loop j
weakDifferentiateElement!(sbp, d1, t2, res_el, SummationByParts.Add(), true)
end # end loop dim
end # end loop el
#----------------------------------------------------------------------------
# interface dissipation
# interpolate to face rather than using eqn.q_face, in case it hasn't been
# updated since eqn.q was updated
q_faceL = zeros(Tsol, mesh.numDofPerNode, mesh.numNodesPerFace)
q_faceR = zeros(q_faceL)
# nrm2 = eqn.params.nrm2
flux = zeros(Tres, mesh.numDofPerNode, mesh.numNodesPerFace)
for i=1:mesh.numInterfaces
iface_i = mesh.interfaces[i]
qL = sview(eqn.q, :, :, iface_i.elementL)
qR = sview(eqn.q, :, :, iface_i.elementR)
resL = sview(res, :, :, iface_i.elementL)
resR = sview(res, :, :, iface_i.elementR)
# fill!(q_faceL, 0.0)
# fill!(q_faceR, 0.0)
interiorFaceInterpolate!(mesh.sbpface, iface_i, qL, qR, q_faceL, q_faceR)
# calculate numerical flux at each face node
for j=1:mesh.numNodesPerFace
qL_j = sview(q_faceL, :, j)
qR_j = sview(q_faceR, :, j)
# get the face normal
nrm2 = sview(mesh.nrm_face, :, j, i)
lambda_max = getLambdaMaxSimple(eqn.params, qL_j, qR_j, nrm2)
for k=1:mesh.numDofPerNode
flux[k, j] = 0.5*lambda_max*(qL_j[k] - qR_j[k])
end
end # end loop j
# integrate over the face
interiorFaceIntegrate!(mesh.sbpface, iface_i, flux, resL, resR)
end # end loop i
# the boundary term makes the predictor-corrector algorithm converge slower
#----------------------------------------------------------------------------
# boundary dissipation
# use q_faceL, nrm2, flux from interface dissipation
if opts["homotopy_addBoundaryIntegrals"]
qg = zeros(Tsol, mesh.numDofPerNode) # boundary state
for i=1:mesh.numBoundaryFaces
bndry_i = mesh.bndryfaces[i]
qL = sview(eqn.q, :, :, bndry_i.element)
resL = sview(res, :, :, bndry_i.element)
fill!(q_faceL, 0.0)
boundaryFaceInterpolate!(mesh.sbpface, bndry_i.face, qL, q_faceL)
# q_faceL = sview(eqn.q_bndry, :, :, i)
for j=1:mesh.numNodesPerFace
q_j = sview(q_faceL, :, j)
# dxidx_j = sview(mesh.dxidx_bndry, :, :, j, i)
# calculate boundary state
coords = sview(mesh.coords_bndry, :, j, i)
calcFreeStream(eqn.params, coords, qg)
# calcInvChannelIC(eqn.params, coords, qg)
# calculate face normal
nrm2 = sview(mesh.nrm_bndry, :, j, i)
# calculate lambda_max
lambda_max = getLambdaMaxSimple(eqn.params, q_j, qg, nrm2)
# calculate dissipation
for k=1:mesh.numDofPerNode
flux[k, j] = 0.5*lambda_max*(q_j[k] - qg[k])
end
end # end loop j
# integrate over the face
boundaryFaceIntegrate!(mesh.sbpface, bndry_i.face, flux, resL)
end # end loop i
end # end if addBoundaryIntegrals
#----------------------------------------------------------------------------
# shared face integrals
# use q_faceL, q_faceR, flux from above
workarr = zeros(q_faceR)
for peer=1:mesh.npeers
# get data for this peer
bndries_local = mesh.bndries_local[peer]
interfaces_peer = mesh.shared_interfaces[peer]
qR_peer = eqn.shared_data[peer].q_recv
nrm_peer = mesh.nrm_sharedface[peer]
start_elnum = mesh.shared_element_offsets[peer]
for i=1:length(bndries_local)
bndry_i = bndries_local[i]
iface_i = interfaces_peer[i]
qL_i = sview(eqn.q, :, :, bndry_i.element)
qR_i = sview(qR_peer, :, :, iface_i.elementR - start_elnum + 1)
resL = sview(res, :, :, bndry_i.element)
# interpolate to face
interiorFaceInterpolate!(mesh.sbpface, iface_i, qL_i, qR_i, q_faceL, q_faceR)
# compute flux at every face node
for j=1:mesh.numNodesPerFace
qL_j = sview(q_faceL, :, j)
qR_j = sview(q_faceR, :, j)
nrm2 = sview(nrm_peer, :, j, i)
# get max wave speed
lambda_max = getLambdaMaxSimple(eqn.params, qL_j, qR_j, nrm2)
# calculate flux
for k=1:mesh.numDofPerNode
flux[k, j] = 0.5*lambda_max*(qL_j[k] - qR_j[k])
end
end # end loop j
# integrate over the face
boundaryFaceIntegrate!(mesh.sbpface, bndry_i.face, flux, resL)
end # end loop i
end # end loop peer
# negate for consistency with the physics module
for i=1:length(res)
res[i] = -res[i]
end
# println("homotopy residual norm = ", norm(vec(res)))
return nothing
end
|
{"hexsha": "2dcbae277de512f3bc72604a7b8c47e90e46401c", "size": 7328, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/solver/euler/homotopy.jl", "max_stars_repo_name": "OptimalDesignLab/PDESolver.jl", "max_stars_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-10-30T17:12:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T10:29:45.000Z", "max_issues_repo_path": "src/solver/euler/homotopy.jl", "max_issues_repo_name": "tangwang-USTC/PDESolver.jl", "max_issues_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 163, "max_issues_repo_issues_event_min_datetime": "2015-07-14T19:15:21.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-08T21:24:41.000Z", "max_forks_repo_path": "src/solver/euler/homotopy.jl", "max_forks_repo_name": "tangwang-USTC/PDESolver.jl", "max_forks_repo_head_hexsha": "328ef45f764ab99a9d5cc3c5e4c0a4c56b263279", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-05-20T15:36:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T17:57:33.000Z", "avg_line_length": 29.1952191235, "max_line_length": 83, "alphanum_fraction": 0.6173580786, "num_tokens": 2268}
|
function chooseBestModels_HN(pathExperimentsBinary,pathExperimentsTime,fSetNames,nameOutcomes,metric,maxOrder)
% -------------------------------------------------------------------------
% function chooseBestModels_HN(pathWORK,nExp,fSetName,nameOutcomes,maxOrder)
% -------------------------------------------------------------------------
% DESCRIPTION:
% This function requires user input to choose the set types and model orders
% providing the best parsimonious models for all outcomes analyzed in the
% HN study. See ref.[1] for more details.
% -------------------------------------------------------------------------
% REFERENCE:
% [1] Vallieres, M. et al. (2015). FDG-PET/CT radiomics models for the
% early prediction of different tumour outcomes in head and neck cancer.
% The Journal of Nuclear Medicine, aa(bb), xxx-yyy.
% doi:
% -------------------------------------------------------------------------
% INPUTS:
% 1. pathExperiments: Full path to the directory containing all experiments.
% --> Ex: '/myProject/WORKSPACE/CV-BASED_RESULTS'
% 2. nExp: Numerical value specifying the number of experiments to analyze.
% --> Ex: 10
% 3. fSetName: Cell of strings specifying the name of the type of feature
% set analyzed.
% --> Ex: {'PET','CT','SEPARATE','FUSED'}
% 4. nameOutcomes: Cell of strings specifying the outcome names to analyze.
% --> Ex: {'Failure','Locoregional','Distant','Death'}
% 5. maxOrder: Integer specifying the maximal multivariable model order.
% --> Ex: 10
% -------------------------------------------------------------------------
% OUTPUTS: Final prediction models saved in a folder named 'FINAL_MODELS/nameOutcome/fSetName'
% in the given experiment folder.
% -------------------------------------------------------------------------
% AUTHOR(S): Martin Vallieres <mart.vallieres@gmail.com>
% -------------------------------------------------------------------------
% HISTORY:
% - Creation: March 2016
%--------------------------------------------------------------------------
% STATEMENT:
% This file is part of <https://github.com/mvallieres/radiomics/>,
% a package providing MATLAB programming tools for radiomics analysis.
% --> Copyright (C) 2015 Martin Vallieres
%
% This package is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This package is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this package. If not, see <http://www.gnu.org/licenses/>.
% -------------------------------------------------------------------------
startpath = pwd;
cd(pathExperimentsBinary)
nOutcomes = length(nameOutcomes);
nType = length(fSetNames);
fprintf('\n')
cd(pathExperimentsTime), pathExperimentTime = pathExperimentsTime;
cd(pathExperimentTime), mkdir('FINAL_MODELS'), cd('FINAL_MODELS'), pathFinalModelsTime = pwd;
cd(pathExperimentsBinary), pathExperimentBinary = pathExperimentsBinary;
cd(pathExperimentBinary), mkdir('FINAL_MODELS'), cd('FINAL_MODELS'), pathFinalModelsBinary = pwd;
for o = 1:nOutcomes
cd(pathFinalModelsTime), mkdir(nameOutcomes{o}), cd(fullfile(pathFinalModelsTime,nameOutcomes{o})), pathOutcomeTime = pwd;
cd(pathFinalModelsBinary), mkdir(nameOutcomes{o}), cd(fullfile(pathFinalModelsBinary,nameOutcomes{o})), pathOutcomeBinary = pwd;
plotPredictionResults_HN(fullfile(pathExperimentBinary,'RESULTS'),nameOutcomes{o},fSetNames,metric,maxOrder)
fprintf('\n======== DISPLAYING PREDICTION RESULTS FOR "%s" OUTCOME, "%s" METRIC ========\n',nameOutcomes{o},metric)
for f = 1:nType
cd(pathOutcomeTime), mkdir(fSetNames{f}), cd(fullfile(pathOutcomeTime,fSetNames{f})), pathFinalTime = pwd;
cd(pathOutcomeBinary), mkdir(fSetNames{f}), cd(fullfile(pathOutcomeBinary,fSetNames{f})), pathFinalBinary = pwd;
while 1
order = input(['Which model order of the ',fSetNames{f},' feature set provides the best parsimonious model? \n' ...
'--> Type a number between 1 to ',num2str(maxOrder),' and press ENTER \n' ...
'ANSWER: ']);
fprintf('\n')
if isnumeric(order) && order <= maxOrder && order >= 1
break
end
end
cd(fullfile(pathExperimentBinary,'RESULTS'))
results = load(['RESULTS_',fSetNames{f},'_',nameOutcomes{o}]); results = struct2cell(results); results = results{1};
finalModel = results.(['Order',num2str(order)]);
finalModel.Order = order;
finalModel.outcome = nameOutcomes{o};
cd(pathFinalBinary), save('finalModel','finalModel')
cd(pathFinalTime), save('finalModel','finalModel')
end
close all
end
cd(startpath)
end
|
{"author": "mvallieres", "repo": "radiomics", "sha": "d3a61737730e1b2b46d04c9e22a3fcc390912f1a", "save_path": "github-repos/MATLAB/mvallieres-radiomics", "path": "github-repos/MATLAB/mvallieres-radiomics/radiomics-d3a61737730e1b2b46d04c9e22a3fcc390912f1a/STUDIES/HN_study/Functions/MULTIVARIABLE_MODELING/chooseBestModels_HN.m"}
|
(* Author: Tobias Nipkow, Alex Krauss, Christian Urban *)
section "Regular sets"
theory Regular_Set
imports Main
begin
type_synonym 'a lang = "'a list set"
definition conc :: "'a lang \<Rightarrow> 'a lang \<Rightarrow> 'a lang" (infixr "@@" 75) where
"A @@ B = {xs@ys | xs ys. xs:A & ys:B}"
text \<open>checks the code preprocessor for set comprehensions\<close>
export_code conc checking SML
overloading lang_pow == "compow :: nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
begin
primrec lang_pow :: "nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang" where
"lang_pow 0 A = {[]}" |
"lang_pow (Suc n) A = A @@ (lang_pow n A)"
end
text \<open>for code generation\<close>
definition lang_pow :: "nat \<Rightarrow> 'a lang \<Rightarrow> 'a lang" where
lang_pow_code_def [code_abbrev]: "lang_pow = compow"
hide_const (open) lang_pow
definition star :: "'a lang \<Rightarrow> 'a lang" where
"star A = (\<Union>n. A ^^ n)"
subsection\<open>@{term "(@@)"}\<close>
lemma concI[simp,intro]: "u : A \<Longrightarrow> v : B \<Longrightarrow> u@v : A @@ B"
by (auto simp add: conc_def)
lemma concE[elim]:
assumes "w \<in> A @@ B"
obtains u v where "u \<in> A" "v \<in> B" "w = u@v"
using assms by (auto simp: conc_def)
lemma conc_mono: "A \<subseteq> C \<Longrightarrow> B \<subseteq> D \<Longrightarrow> A @@ B \<subseteq> C @@ D"
by (auto simp: conc_def)
lemma conc_empty[simp]: shows "{} @@ A = {}" and "A @@ {} = {}"
by auto
lemma conc_epsilon[simp]: shows "{[]} @@ A = A" and "A @@ {[]} = A"
by (simp_all add:conc_def)
lemma conc_assoc: "(A @@ B) @@ C = A @@ (B @@ C)"
by (auto elim!: concE) (simp only: append_assoc[symmetric] concI)
lemma conc_Un_distrib:
shows "A @@ (B \<union> C) = A @@ B \<union> A @@ C"
and "(A \<union> B) @@ C = A @@ C \<union> B @@ C"
by auto
lemma conc_UNION_distrib:
shows "A @@ \<Union>(M ` I) = \<Union>((%i. A @@ M i) ` I)"
and "\<Union>(M ` I) @@ A = \<Union>((%i. M i @@ A) ` I)"
by auto
lemma conc_subset_lists: "A \<subseteq> lists S \<Longrightarrow> B \<subseteq> lists S \<Longrightarrow> A @@ B \<subseteq> lists S"
by(fastforce simp: conc_def in_lists_conv_set)
lemma Nil_in_conc[simp]: "[] \<in> A @@ B \<longleftrightarrow> [] \<in> A \<and> [] \<in> B"
by (metis append_is_Nil_conv concE concI)
lemma concI_if_Nil1: "[] \<in> A \<Longrightarrow> xs : B \<Longrightarrow> xs \<in> A @@ B"
by (metis append_Nil concI)
lemma conc_Diff_if_Nil1: "[] \<in> A \<Longrightarrow> A @@ B = (A - {[]}) @@ B \<union> B"
by (fastforce elim: concI_if_Nil1)
lemma concI_if_Nil2: "[] \<in> B \<Longrightarrow> xs : A \<Longrightarrow> xs \<in> A @@ B"
by (metis append_Nil2 concI)
lemma conc_Diff_if_Nil2: "[] \<in> B \<Longrightarrow> A @@ B = A @@ (B - {[]}) \<union> A"
by (fastforce elim: concI_if_Nil2)
lemma singleton_in_conc:
"[x] : A @@ B \<longleftrightarrow> [x] : A \<and> [] : B \<or> [] : A \<and> [x] : B"
by (fastforce simp: Cons_eq_append_conv append_eq_Cons_conv
conc_Diff_if_Nil1 conc_Diff_if_Nil2)
subsection\<open>@{term "A ^^ n"}\<close>
lemma lang_pow_add: "A ^^ (n + m) = A ^^ n @@ A ^^ m"
by (induct n) (auto simp: conc_assoc)
lemma lang_pow_empty: "{} ^^ n = (if n = 0 then {[]} else {})"
by (induct n) auto
lemma lang_pow_empty_Suc[simp]: "({}::'a lang) ^^ Suc n = {}"
by (simp add: lang_pow_empty)
lemma length_lang_pow_ub:
"\<forall>w \<in> A. length w \<le> k \<Longrightarrow> w : A^^n \<Longrightarrow> length w \<le> k*n"
by(induct n arbitrary: w) (fastforce simp: conc_def)+
lemma length_lang_pow_lb:
"\<forall>w \<in> A. length w \<ge> k \<Longrightarrow> w : A^^n \<Longrightarrow> length w \<ge> k*n"
by(induct n arbitrary: w) (fastforce simp: conc_def)+
lemma lang_pow_subset_lists: "A \<subseteq> lists S \<Longrightarrow> A ^^ n \<subseteq> lists S"
by(induct n)(auto simp: conc_subset_lists)
subsection\<open>@{const star}\<close>
lemma star_subset_lists: "A \<subseteq> lists S \<Longrightarrow> star A \<subseteq> lists S"
unfolding star_def by(blast dest: lang_pow_subset_lists)
lemma star_if_lang_pow[simp]: "w : A ^^ n \<Longrightarrow> w : star A"
by (auto simp: star_def)
lemma Nil_in_star[iff]: "[] : star A"
proof (rule star_if_lang_pow)
show "[] : A ^^ 0" by simp
qed
lemma star_if_lang[simp]: assumes "w : A" shows "w : star A"
proof (rule star_if_lang_pow)
show "w : A ^^ 1" using \<open>w : A\<close> by simp
qed
lemma append_in_starI[simp]:
assumes "u : star A" and "v : star A" shows "u@v : star A"
proof -
from \<open>u : star A\<close> obtain m where "u : A ^^ m" by (auto simp: star_def)
moreover
from \<open>v : star A\<close> obtain n where "v : A ^^ n" by (auto simp: star_def)
ultimately have "u@v : A ^^ (m+n)" by (simp add: lang_pow_add)
thus ?thesis by simp
qed
lemma conc_star_star: "star A @@ star A = star A"
by (auto simp: conc_def)
lemma conc_star_comm:
shows "A @@ star A = star A @@ A"
unfolding star_def conc_pow_comm conc_UNION_distrib
by simp
lemma star_induct[consumes 1, case_names Nil append, induct set: star]:
assumes "w : star A"
and "P []"
and step: "!!u v. u : A \<Longrightarrow> v : star A \<Longrightarrow> P v \<Longrightarrow> P (u@v)"
shows "P w"
proof -
{ fix n have "w : A ^^ n \<Longrightarrow> P w"
by (induct n arbitrary: w) (auto intro: \<open>P []\<close> step star_if_lang_pow) }
with \<open>w : star A\<close> show "P w" by (auto simp: star_def)
qed
lemma star_empty[simp]: "star {} = {[]}"
by (auto elim: star_induct)
lemma star_epsilon[simp]: "star {[]} = {[]}"
by (auto elim: star_induct)
lemma star_idemp[simp]: "star (star A) = star A"
by (auto elim: star_induct)
lemma star_unfold_left: "star A = A @@ star A \<union> {[]}" (is "?L = ?R")
proof
show "?L \<subseteq> ?R" by (rule, erule star_induct) auto
qed auto
lemma concat_in_star: "set ws \<subseteq> A \<Longrightarrow> concat ws : star A"
by (induct ws) simp_all
lemma in_star_iff_concat:
"w \<in> star A = (\<exists>ws. set ws \<subseteq> A \<and> w = concat ws)"
(is "_ = (\<exists>ws. ?R w ws)")
proof
assume "w : star A" thus "\<exists>ws. ?R w ws"
proof induct
case Nil have "?R [] []" by simp
thus ?case ..
next
case (append u v)
then obtain ws where "set ws \<subseteq> A \<and> v = concat ws" by blast
with append have "?R (u@v) (u#ws)" by auto
thus ?case ..
qed
next
assume "\<exists>us. ?R w us" thus "w : star A"
by (auto simp: concat_in_star)
qed
lemma star_conv_concat: "star A = {concat ws|ws. set ws \<subseteq> A}"
by (fastforce simp: in_star_iff_concat)
lemma star_insert_eps[simp]: "star (insert [] A) = star(A)"
proof-
{ fix us
have "set us \<subseteq> insert [] A \<Longrightarrow> \<exists>vs. concat us = concat vs \<and> set vs \<subseteq> A"
(is "?P \<Longrightarrow> \<exists>vs. ?Q vs")
proof
let ?vs = "filter (%u. u \<noteq> []) us"
show "?P \<Longrightarrow> ?Q ?vs" by (induct us) auto
qed
} thus ?thesis by (auto simp: star_conv_concat)
qed
lemma star_unfold_left_Nil: "star A = (A - {[]}) @@ (star A) \<union> {[]}"
by (metis insert_Diff_single star_insert_eps star_unfold_left)
lemma star_Diff_Nil_fold: "(A - {[]}) @@ star A = star A - {[]}"
proof -
have "[] \<notin> (A - {[]}) @@ star A" by simp
thus ?thesis using star_unfold_left_Nil by blast
qed
lemma star_decom:
assumes a: "x \<in> star A" "x \<noteq> []"
shows "\<exists>a b. x = a @ b \<and> a \<noteq> [] \<and> a \<in> A \<and> b \<in> star A"
using a by (induct rule: star_induct) (blast)+
subsection \<open>Left-Quotients of languages\<close>
definition Deriv :: "'a \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
where "Deriv x A = { xs. x#xs \<in> A }"
definition Derivs :: "'a list \<Rightarrow> 'a lang \<Rightarrow> 'a lang"
where "Derivs xs A = { ys. xs @ ys \<in> A }"
abbreviation
Derivss :: "'a list \<Rightarrow> 'a lang set \<Rightarrow> 'a lang"
where
"Derivss s As \<equiv> \<Union> (Derivs s ` As)"
lemma Deriv_empty[simp]: "Deriv a {} = {}"
and Deriv_epsilon[simp]: "Deriv a {[]} = {}"
and Deriv_char[simp]: "Deriv a {[b]} = (if a = b then {[]} else {})"
and Deriv_union[simp]: "Deriv a (A \<union> B) = Deriv a A \<union> Deriv a B"
and Deriv_inter[simp]: "Deriv a (A \<inter> B) = Deriv a A \<inter> Deriv a B"
and Deriv_compl[simp]: "Deriv a (-A) = - Deriv a A"
and Deriv_Union[simp]: "Deriv a (Union M) = Union(Deriv a ` M)"
and Deriv_UN[simp]: "Deriv a (UN x:I. S x) = (UN x:I. Deriv a (S x))"
by (auto simp: Deriv_def)
lemma Der_conc [simp]:
shows "Deriv c (A @@ B) = (Deriv c A) @@ B \<union> (if [] \<in> A then Deriv c B else {})"
unfolding Deriv_def conc_def
by (auto simp add: Cons_eq_append_conv)
lemma Deriv_star [simp]:
shows "Deriv c (star A) = (Deriv c A) @@ star A"
proof -
have "Deriv c (star A) = Deriv c ({[]} \<union> A @@ star A)"
by (metis star_unfold_left sup.commute)
also have "... = Deriv c (A @@ star A)"
unfolding Deriv_union by (simp)
also have "... = (Deriv c A) @@ (star A) \<union> (if [] \<in> A then Deriv c (star A) else {})"
by simp
also have "... = (Deriv c A) @@ star A"
unfolding conc_def Deriv_def
using star_decom by (force simp add: Cons_eq_append_conv)
finally show "Deriv c (star A) = (Deriv c A) @@ star A" .
qed
lemma Deriv_diff[simp]:
shows "Deriv c (A - B) = Deriv c A - Deriv c B"
by(auto simp add: Deriv_def)
lemma Deriv_lists[simp]: "c : S \<Longrightarrow> Deriv c (lists S) = lists S"
by(auto simp add: Deriv_def)
lemma Derivs_simps [simp]:
shows "Derivs [] A = A"
and "Derivs (c # s) A = Derivs s (Deriv c A)"
and "Derivs (s1 @ s2) A = Derivs s2 (Derivs s1 A)"
unfolding Derivs_def Deriv_def by auto
lemma in_fold_Deriv: "v \<in> fold Deriv w L \<longleftrightarrow> w @ v \<in> L"
by (induct w arbitrary: L) (simp_all add: Deriv_def)
lemma Derivs_alt_def [code]: "Derivs w L = fold Deriv w L"
by (induct w arbitrary: L) simp_all
lemma Deriv_code [code]:
"Deriv x A = tl ` Set.filter (\<lambda>xs. case xs of x' # _ \<Rightarrow> x = x' | _ \<Rightarrow> False) A"
by (auto simp: Deriv_def Set.filter_def image_iff tl_def split: list.splits)
subsection \<open>Shuffle product\<close>
definition Shuffle (infixr "\<parallel>" 80) where
"Shuffle A B = \<Union>{shuffles xs ys | xs ys. xs \<in> A \<and> ys \<in> B}"
lemma Deriv_Shuffle[simp]:
"Deriv a (A \<parallel> B) = Deriv a A \<parallel> B \<union> A \<parallel> Deriv a B"
unfolding Shuffle_def Deriv_def by (fastforce simp: Cons_in_shuffles_iff neq_Nil_conv)
lemma shuffle_subset_lists:
assumes "A \<subseteq> lists S" "B \<subseteq> lists S"
shows "A \<parallel> B \<subseteq> lists S"
unfolding Shuffle_def proof safe
fix x and zs xs ys :: "'a list"
assume zs: "zs \<in> shuffles xs ys" "x \<in> set zs" and "xs \<in> A" "ys \<in> B"
with assms have "xs \<in> lists S" "ys \<in> lists S" by auto
with zs show "x \<in> S" by (induct xs ys arbitrary: zs rule: shuffles.induct) auto
qed
lemma Nil_in_Shuffle[simp]: "[] \<in> A \<parallel> B \<longleftrightarrow> [] \<in> A \<and> [] \<in> B"
unfolding Shuffle_def by force
lemma shuffle_UNION_distrib:
shows "A \<parallel> \<Union>(M ` I) = \<Union>((%i. A \<parallel> M i) ` I)"
and "\<Union>(M ` I) \<parallel> A = \<Union>((%i. M i \<parallel> A) ` I)"
unfolding Shuffle_def by fast+
lemma Shuffle_empty[simp]:
"A \<parallel> {} = {}"
"{} \<parallel> B = {}"
unfolding Shuffle_def by auto
subsection \<open>Arden's Lemma\<close>
lemma arden_helper:
assumes eq: "X = A @@ X \<union> B"
shows "X = (A ^^ Suc n) @@ X \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)"
proof (induct n)
case 0
show "X = (A ^^ Suc 0) @@ X \<union> (\<Union>m\<le>0. (A ^^ m) @@ B)"
using eq by simp
next
case (Suc n)
have ih: "X = (A ^^ Suc n) @@ X \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)" by fact
also have "\<dots> = (A ^^ Suc n) @@ (A @@ X \<union> B) \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)" using eq by simp
also have "\<dots> = (A ^^ Suc (Suc n)) @@ X \<union> ((A ^^ Suc n) @@ B) \<union> (\<Union>m\<le>n. (A ^^ m) @@ B)"
by (simp add: conc_Un_distrib conc_assoc[symmetric] conc_pow_comm)
also have "\<dots> = (A ^^ Suc (Suc n)) @@ X \<union> (\<Union>m\<le>Suc n. (A ^^ m) @@ B)"
by (auto simp add: atMost_Suc)
finally show "X = (A ^^ Suc (Suc n)) @@ X \<union> (\<Union>m\<le>Suc n. (A ^^ m) @@ B)" .
qed
lemma Arden:
assumes "[] \<notin> A"
shows "X = A @@ X \<union> B \<longleftrightarrow> X = star A @@ B"
proof
assume eq: "X = A @@ X \<union> B"
{ fix w assume "w : X"
let ?n = "size w"
from \<open>[] \<notin> A\<close> have "\<forall>u \<in> A. length u \<ge> 1"
by (metis Suc_eq_plus1 add_leD2 le_0_eq length_0_conv not_less_eq_eq)
hence "\<forall>u \<in> A^^(?n+1). length u \<ge> ?n+1"
by (metis length_lang_pow_lb nat_mult_1)
hence "\<forall>u \<in> A^^(?n+1)@@X. length u \<ge> ?n+1"
by(auto simp only: conc_def length_append)
hence "w \<notin> A^^(?n+1)@@X" by auto
hence "w : star A @@ B" using \<open>w : X\<close> using arden_helper[OF eq, where n="?n"]
by (auto simp add: star_def conc_UNION_distrib)
} moreover
{ fix w assume "w : star A @@ B"
hence "\<exists>n. w \<in> A^^n @@ B" by(auto simp: conc_def star_def)
hence "w : X" using arden_helper[OF eq] by blast
} ultimately show "X = star A @@ B" by blast
next
assume eq: "X = star A @@ B"
have "star A = A @@ star A \<union> {[]}"
by (rule star_unfold_left)
then have "star A @@ B = (A @@ star A \<union> {[]}) @@ B"
by metis
also have "\<dots> = (A @@ star A) @@ B \<union> B"
unfolding conc_Un_distrib by simp
also have "\<dots> = A @@ (star A @@ B) \<union> B"
by (simp only: conc_assoc)
finally show "X = A @@ X \<union> B"
using eq by blast
qed
lemma reversed_arden_helper:
assumes eq: "X = X @@ A \<union> B"
shows "X = X @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))"
proof (induct n)
case 0
show "X = X @@ (A ^^ Suc 0) \<union> (\<Union>m\<le>0. B @@ (A ^^ m))"
using eq by simp
next
case (Suc n)
have ih: "X = X @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))" by fact
also have "\<dots> = (X @@ A \<union> B) @@ (A ^^ Suc n) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))" using eq by simp
also have "\<dots> = X @@ (A ^^ Suc (Suc n)) \<union> (B @@ (A ^^ Suc n)) \<union> (\<Union>m\<le>n. B @@ (A ^^ m))"
by (simp add: conc_Un_distrib conc_assoc)
also have "\<dots> = X @@ (A ^^ Suc (Suc n)) \<union> (\<Union>m\<le>Suc n. B @@ (A ^^ m))"
by (auto simp add: atMost_Suc)
finally show "X = X @@ (A ^^ Suc (Suc n)) \<union> (\<Union>m\<le>Suc n. B @@ (A ^^ m))" .
qed
theorem reversed_Arden:
assumes nemp: "[] \<notin> A"
shows "X = X @@ A \<union> B \<longleftrightarrow> X = B @@ star A"
proof
assume eq: "X = X @@ A \<union> B"
{ fix w assume "w : X"
let ?n = "size w"
from \<open>[] \<notin> A\<close> have "\<forall>u \<in> A. length u \<ge> 1"
by (metis Suc_eq_plus1 add_leD2 le_0_eq length_0_conv not_less_eq_eq)
hence "\<forall>u \<in> A^^(?n+1). length u \<ge> ?n+1"
by (metis length_lang_pow_lb nat_mult_1)
hence "\<forall>u \<in> X @@ A^^(?n+1). length u \<ge> ?n+1"
by(auto simp only: conc_def length_append)
hence "w \<notin> X @@ A^^(?n+1)" by auto
hence "w : B @@ star A" using \<open>w : X\<close> using reversed_arden_helper[OF eq, where n="?n"]
by (auto simp add: star_def conc_UNION_distrib)
} moreover
{ fix w assume "w : B @@ star A"
hence "\<exists>n. w \<in> B @@ A^^n" by (auto simp: conc_def star_def)
hence "w : X" using reversed_arden_helper[OF eq] by blast
} ultimately show "X = B @@ star A" by blast
next
assume eq: "X = B @@ star A"
have "star A = {[]} \<union> star A @@ A"
unfolding conc_star_comm[symmetric]
by(metis Un_commute star_unfold_left)
then have "B @@ star A = B @@ ({[]} \<union> star A @@ A)"
by metis
also have "\<dots> = B \<union> B @@ (star A @@ A)"
unfolding conc_Un_distrib by simp
also have "\<dots> = B \<union> (B @@ star A) @@ A"
by (simp only: conc_assoc)
finally show "X = X @@ A \<union> B"
using eq by blast
qed
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Evaluation/Regular-Sets/Regular_Set.thy"}
|
# A script that takes a model as input, predicts the
# validation set of Semeval2017A and save the results
# in y_preds.txt
import os
import warnings
import argparse
import errno
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score, accuracy_score, recall_score
import torch
from torch.utils.data import DataLoader
import numpy as np
import sys
from config import EMB_PATH
from dataloading import SentenceDataset
from models.BaselineDNN import BaselineDNN
from models.LSTMNet import LSTMNet
from models.LSTMpool import LSTMpool
from models.NN_Attention import NN_Attention
from models.LSTM_Attention import LSTM_Attention
from models.BiLSTM_Attention import BiLSTM_Attention
from training import train_dataset, eval_dataset
from utils.load_datasets import load_MR, load_Semeval2017A
from utils.load_embeddings import load_word_vectors
from nltk.tokenize import TweetTokenizer
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
EMBEDDINGS = os.path.join(EMB_PATH, 'glove.twitter.27B.50d.txt')
EMB_DIM = 50
EMB_TRAINABLE = False
DATASET = "Semeval2017A"
# If your computer has a CUDA compatible gpu use it, otherwise use the cpu
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load word embeddings
print("loading word embeddings...")
word2idx, idx2word, embeddings = load_word_vectors(EMBEDDINGS, EMB_DIM)
# Load the raw data
if DATASET == "Semeval2017A":
_, _, X_test, y_test = load_Semeval2017A()
else:
raise ValueError("Invalid dataset")
# Convert data labels from strings to integers
# Create a new label encoder
le = LabelEncoder()
# Encode test set labels
y_test = le.fit_transform(y_test)
# Compute number of classes made by the encoder
n_classes = le.classes_.size
# Define our PyTorch-based Dataset
test_set = SentenceDataset(X_test, y_test, word2idx)
# Define our PyTorch-based DataLoader
# Batch size is 1 because we want to
# write results in a file in the same
# order.
test_loader = DataLoader(test_set)
# Load user model.
model = torch.load(sys.argv[1])
# Define criterion for evaluation.
loss_function = torch.nn.CrossEntropyLoss()
model.eval()
# Obtain the model's device ID
device = next(model.parameters()).device
f = open("y_preds.txt", 'w')
# IMPORTANT: in evaluation mode, we don't want to keep the gradients
# so we do everything under torch.no_grad()
with torch.no_grad():
for index, batch in enumerate(test_loader, 1):
# Get the inputs (batch)
inputs, labels, lengths = batch
# Step 1 - move the batch tensors to the right device
inputs = inputs.to(device)
labels = labels.to(device)
# Step 2 - forward pass: y' = model(x)
y_preds = model(inputs, lengths) # EX9
# Step 3 - make predictions (class = argmax of posteriors)
y_preds_arg = torch.argmax(y_preds)
# Step 4 - write the predictions
f.write(str(y_preds_arg.item()) + '\n')
f.close()
print("Predictions saved succesfully")
|
{"hexsha": "61546c736edf212bfb167d64c488d2c238306e44", "size": 3047, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lab3/Lab/predict-validation.py", "max_stars_repo_name": "PanosAntoniadis/slp-ntua", "max_stars_repo_head_hexsha": "f144cd82fddbdfab27dd1ed025fb0d4c9a83a15f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-03-17T17:34:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-26T07:59:32.000Z", "max_issues_repo_path": "Lab3/Lab/predict-validation.py", "max_issues_repo_name": "PanosAntoniadis/slp-ntua", "max_issues_repo_head_hexsha": "f144cd82fddbdfab27dd1ed025fb0d4c9a83a15f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lab3/Lab/predict-validation.py", "max_forks_repo_name": "PanosAntoniadis/slp-ntua", "max_forks_repo_head_hexsha": "f144cd82fddbdfab27dd1ed025fb0d4c9a83a15f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-11T13:06:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-07T16:05:28.000Z", "avg_line_length": 31.7395833333, "max_line_length": 74, "alphanum_fraction": 0.7610764687, "include": true, "reason": "import numpy", "num_tokens": 747}
|
#include <bluetoe/options.hpp>
#include <string>
#define BOOST_TEST_MODULE
#include <boost/test/included/unit_test.hpp>
#include <type_traits>
template < typename >
struct template_a {};
template < typename >
struct template_b {};
BOOST_AUTO_TEST_CASE( select_type )
{
BOOST_CHECK( ( std::is_same< typename bluetoe::details::select_type< true, int, bool >::type, int >::value ) );
BOOST_CHECK( ( std::is_same< typename bluetoe::details::select_type< false, int, bool >::type, bool >::value ) );
}
BOOST_AUTO_TEST_CASE( select_template_t1 )
{
BOOST_CHECK( (
std::is_same<
bluetoe::details::select_template_t1< true, template_a, template_b >::template type< int >,
template_a< int > >::value
) );
BOOST_CHECK( (
std::is_same<
bluetoe::details::select_template_t1< false, template_a, template_b >::template type< int >,
template_b< int > >::value
) );
}
BOOST_AUTO_TEST_CASE( or_type )
{
BOOST_CHECK( ( std::is_same< bluetoe::details::or_type< int, char, long >::type, char >::value ) );
BOOST_CHECK( ( std::is_same< bluetoe::details::or_type< int, int, long >::type, long >::value ) );
BOOST_CHECK( ( std::is_same< bluetoe::details::or_type< int, int, int >::type, int >::value ) );
}
BOOST_AUTO_TEST_CASE( not_type )
{
BOOST_CHECK( !( bluetoe::details::not_type< std::true_type >::type::value ) );
BOOST_CHECK( ( bluetoe::details::not_type< std::false_type >::type::value ) );
}
namespace {
struct meta1 {};
struct meta2 {};
struct meta3 {};
struct meta4 {};
struct meta1a : meta1 {};
struct meta12 : meta1, meta2 {};
struct type1 {
typedef meta1 meta_type;
static std::string name() {
return "type1";
}
};
struct type1a {
typedef meta1a meta_type;
static std::string name() {
return "type1a";
}
};
struct type12 {
typedef meta12 meta_type;
static std::string name() {
return "type12";
}
};
struct type11 {
typedef meta1 meta_type;
static std::string name() {
return "type11";
}
};
struct type2 {
typedef meta2 meta_type;
static std::string name() {
return "type2";
}
};
struct type3 {
typedef meta3 meta_type;
static std::string name() {
return "type3";
}
};
struct type4 {};
}
BOOST_AUTO_TEST_CASE( extract_meta_type )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::extract_meta_type< type1 >::meta_type,
meta1
>::value ) );
BOOST_CHECK( ( bluetoe::details::extract_meta_type< type1 >::has_meta_type::value ) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::extract_meta_type< type4 >::meta_type,
bluetoe::details::no_such_type
>::value ) );
BOOST_CHECK( ( !bluetoe::details::extract_meta_type< type4 >::has_meta_type::value ) );
}
BOOST_AUTO_TEST_CASE( find_meta_type_in_empty_list )
{
BOOST_CHECK( ( std::is_same< typename bluetoe::details::find_by_meta_type< meta1 >::type, bluetoe::details::no_such_type >::value ) );
}
BOOST_AUTO_TEST_CASE( find_meta_type_first_and_only_element )
{
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type1 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type1a >::type, type1a >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type12 >::type, type12 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2 >::type, bluetoe::details::no_such_type >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type4 >::type, bluetoe::details::no_such_type >::value ) );
}
BOOST_AUTO_TEST_CASE( find_meta_type_in_larger_list )
{
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type1, type2 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2, type1, type3 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2, type3, type4, type1 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2, type3, type4, type1, type11 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2, type3, type4, type1 >::type, type1 >::value ) );
BOOST_CHECK( (
std::is_same< typename bluetoe::details::find_by_meta_type< meta1, type2, type3, type4 >::type, bluetoe::details::no_such_type >::value ) );
}
BOOST_AUTO_TEST_CASE( find_by_not_meta_type_in_empty_list )
{
BOOST_CHECK( ( std::is_same< typename bluetoe::details::find_by_not_meta_type< meta1 >::type, bluetoe::details::no_such_type >::value ) );
}
BOOST_AUTO_TEST_CASE( find_by_not_meta_type_in_one_element )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type1 >::type,
bluetoe::details::no_such_type
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type2 >::type,
type2
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type12 >::type,
bluetoe::details::no_such_type
>::value
) );
}
BOOST_AUTO_TEST_CASE( find_by_not_meta_type_in_list )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type1, type2, type12 >::type,
type2
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type1, type12 >::type,
bluetoe::details::no_such_type
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_by_not_meta_type< meta1, type2, type3 >::type,
type2
>::value
) );
}
BOOST_AUTO_TEST_CASE( count_by_meta_type_in_empty_list )
{
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1 >::count ), 0 );
}
BOOST_AUTO_TEST_CASE( count_by_meta_type )
{
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type1 >::count ), 1 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type2 >::count ), 0 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type1, type2 >::count ), 1 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type2, type1 >::count ), 1 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type4, type1, type3, type1 >::count ), 2 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type4, type3, type2 >::count ), 0 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_by_meta_type< meta1, type1, type3, type2, type12 >::count ), 2 );
}
BOOST_AUTO_TEST_CASE( option_is_not_set_in_an_empty_list )
{
BOOST_CHECK( !bluetoe::details::has_option< int >::value );
}
BOOST_AUTO_TEST_CASE( option_is_set )
{
BOOST_CHECK( !( bluetoe::details::has_option< int, char >::value ) );
BOOST_CHECK( !( bluetoe::details::has_option< int, char, float >::value ) );
BOOST_CHECK( !( bluetoe::details::has_option< int, char, float, bool >::value ) );
BOOST_CHECK( ( bluetoe::details::has_option< int, char, int >::value ) );
BOOST_CHECK( ( bluetoe::details::has_option< int, int, char >::value ) );
BOOST_CHECK( ( bluetoe::details::has_option< int, char, int, char >::value ) );
}
BOOST_AUTO_TEST_CASE( find_all_by_meta_type_empty_list )
{
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< int >::type, std::tuple<> >::value ) );
}
BOOST_AUTO_TEST_CASE( find_all_by_meta_type )
{
// no resulting element
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type2, type3 >::type,
std::tuple<> >::value ) );
// one resulting element
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type2, type3, type1 >::type,
std::tuple< type1 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type2, type1, type3 >::type,
std::tuple< type1 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type1, type3, type2 >::type,
std::tuple< type1 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type1 >::type,
std::tuple< type1 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type1a, type3, type2 >::type,
std::tuple< type1a > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type12 >::type,
std::tuple< type12 > >::value ) );
// more than one result elements
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type11, type2, type3, type12, type1 >::type,
std::tuple< type11, type12, type1 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type2, type1a, type11, type3 >::type,
std::tuple< type1a, type11 > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::find_all_by_meta_type< meta1, type1, type3, type11, type2 >::type,
std::tuple< type1, type11 > >::value ) );
}
namespace {
typedef std::vector< std::string > string_list;
struct register_calls
{
explicit register_calls( string_list& l ) : list( l )
{
}
template< typename O >
void each()
{
list.push_back( O::name() );
}
string_list& list;
};
struct count_calls
{
explicit count_calls( int& c ) : counts( c )
{
}
template< typename O >
void each()
{
++counts;
}
int& counts;
};
}
BOOST_AUTO_TEST_CASE( for_each_empty )
{
string_list list;
bluetoe::details::for_<>::each( register_calls( list ) );
BOOST_CHECK( list.empty() );
}
BOOST_AUTO_TEST_CASE( for_each_one_element )
{
string_list list;
bluetoe::details::for_< type1 >::each( register_calls( list ) );
const string_list expected_result = { "type1" };
BOOST_CHECK_EQUAL_COLLECTIONS( list.begin(), list.end(), expected_result.begin(), expected_result.end() );
}
BOOST_AUTO_TEST_CASE( for_each_many_elements )
{
string_list list;
bluetoe::details::for_< type1, type1, type2, type3 >::each( register_calls( list ) );
const string_list expected_result = { "type1", "type1", "type2", "type3" };
BOOST_CHECK_EQUAL_COLLECTIONS( list.begin(), list.end(), expected_result.begin(), expected_result.end() );
}
BOOST_AUTO_TEST_CASE( for_each_feed_by_an_tuple )
{
string_list list;
bluetoe::details::for_< std::tuple< type1, type1, type2, type3 > >::each( register_calls( list ) );
const string_list expected_result = { "type1", "type1", "type2", "type3" };
BOOST_CHECK_EQUAL_COLLECTIONS( list.begin(), list.end(), expected_result.begin(), expected_result.end() );
}
BOOST_AUTO_TEST_CASE( for_each_over_tuple_of_empty_tuple )
{
int count = 0;
bluetoe::details::for_<
std::tuple<
std::tuple<>
> >::each( count_calls( count ) );
BOOST_CHECK_EQUAL( count, 1 );
}
BOOST_AUTO_TEST_CASE( for_each_over_tuple_of_tuples )
{
int count = 0;
bluetoe::details::for_<
std::tuple<
std::tuple< int, char >,
std::tuple< bool, double >
> >::each( count_calls( count ) );
BOOST_CHECK_EQUAL( count, 2 );
}
BOOST_AUTO_TEST_CASE( group_by_meta_type_empty )
{
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple<> >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple<>, meta1 >::type,
std::tuple< std::tuple< meta1 > > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple<>, meta1, meta2 >::type,
std::tuple< std::tuple< meta1 >, std::tuple< meta2 > > >::value ) );
}
BOOST_AUTO_TEST_CASE( group_by_meta_type )
{
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple< type1, type2, type3 > >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple< type1, type2, type3, type12 >, meta1 >::type,
std::tuple< std::tuple< meta1, type1, type12 > > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple< type1, type2, type3, type12 >, meta1, meta2 >::type,
std::tuple< std::tuple< meta1, type1, type12 >, std::tuple< meta2, type2, type12 > > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types< std::tuple< type1, type2, type3, type12 >, meta1, meta2, meta4 >::type,
std::tuple< std::tuple< meta1, type1, type12 >, std::tuple< meta2, type2, type12 >, std::tuple< meta4 > > >::value ) );
}
BOOST_AUTO_TEST_CASE( group_by_meta_types_without_empty_groups )
{
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::group_by_meta_types_without_empty_groups< std::tuple< type1, type2, type3 > >::type,
std::tuple<> >::value ) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::group_by_meta_types_without_empty_groups<
std::tuple< type1, type2, type3, type12 >,
meta1, meta2, meta4
>::type,
std::tuple< std::tuple< meta1, type1, type12 >, std::tuple< meta2, type2, type12 > >
>::value ) );
}
BOOST_AUTO_TEST_CASE( remove_if_equal )
{
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple<>, int >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< int >, int >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< float >, int >::type,
std::tuple< float > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< float, double, int, int, char >, int >::type,
std::tuple< float, double, char > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< int, float, double, char, int >, int >::type,
std::tuple< float, double, char > >::value ) );
}
namespace {
template < class T >
struct templ {};
template < class T >
struct other_templ {};
}
BOOST_AUTO_TEST_CASE( remove_if_with_one_wildcard )
{
typedef templ< bluetoe::details::wildcard > with_wildcard;
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple<>, with_wildcard >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< int >, with_wildcard >::type,
std::tuple< int > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< int, other_templ< int > >, with_wildcard >::type,
std::tuple< int, other_templ< int > > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< templ< char >, int, templ< int > >, with_wildcard >::type,
std::tuple< int > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< std::tuple< int > >, std::tuple< bluetoe::details::wildcard > >::type,
std::tuple<> >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal< std::tuple< int, std::tuple< int, int >, std::tuple< int > >, std::tuple< bluetoe::details::wildcard > >::type,
std::tuple< int, std::tuple< int, int > > >::value ) );
BOOST_CHECK( ( std::is_same<
typename bluetoe::details::remove_if_equal<
std::tuple< std::tuple< meta1, type1, type12 >, std::tuple< meta2, type2, type12 >, std::tuple< meta4 > >,
std::tuple< bluetoe::details::wildcard >
>::type,
std::tuple< std::tuple< meta1, type1, type12 >, std::tuple< meta2, type2, type12 > > >::value ) );
}
namespace {
template < typename T >
struct is_int : std::false_type {};
template <>
struct is_int< int > : std::true_type {};
}
BOOST_AUTO_TEST_CASE( count_if )
{
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple<>, is_int >::value ), 0 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple< int >, is_int >::value ), 1 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple< char >, is_int >::value ), 0 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple< int, char, int >, is_int >::value ), 2 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple< char, bool, int, float >, is_int >::value ), 1 );
BOOST_CHECK_EQUAL( ( bluetoe::details::count_if< std::tuple< char, int >, is_int >::value ), 1 );
}
namespace {
template < class T >
struct by_value : T {};
}
BOOST_AUTO_TEST_CASE( sum_by )
{
BOOST_CHECK_EQUAL( ( bluetoe::details::sum_by< std::tuple<>, by_value >::value ), 0 );
BOOST_CHECK_EQUAL( (
bluetoe::details::sum_by<
std::tuple<
std::integral_constant< int, 4 >
>, by_value >::value ),
4 );
BOOST_CHECK_EQUAL( (
bluetoe::details::sum_by<
std::tuple<
std::integral_constant< int, 4 >,
std::integral_constant< int, 1 >
>, by_value >::value ),
5 );
}
BOOST_AUTO_TEST_CASE( find_if )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple< float, char, int >, is_int >::type,
int >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple< int, float, char >, is_int >::type,
int >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple< float, int, char >, is_int >::type,
int >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple< int >, is_int >::type,
int >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple<>, is_int >::type,
bluetoe::details::no_such_type >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::find_if< std::tuple< float, bool >, is_int >::type,
bluetoe::details::no_such_type >::value
) );
}
BOOST_AUTO_TEST_CASE( last_from_pack )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_from_pack< int >::type,
int >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_from_pack< float, int >::type,
int >::value
) );
}
namespace {
std::string ctor_order;
template < char C >
struct tag
{
tag()
{
ctor_order += C;
}
};
template < typename List >
static void test_derive_from( const std::string& expected )
{
ctor_order = "";
bluetoe::details::derive_from< List > instance;
static_cast< void >( instance );
BOOST_CHECK_EQUAL( ctor_order, expected );
}
}
BOOST_AUTO_TEST_CASE( derive_from )
{
test_derive_from< std::tuple<> >( "" );
test_derive_from< std::tuple< tag<'A'> > >( "A" );
test_derive_from< std::tuple< tag<'A'>, tag<'B'> > >( "AB" );
test_derive_from< std::tuple< tag<'A'>, tag<'B'>, tag<'C'> > >( "ABC" );
}
namespace {
template <
typename List,
typename E >
struct add_only_tags;
template <
char C,
typename ... Es >
struct add_only_tags< std::tuple< Es... >, tag< C > >
{
typedef std::tuple< tag< C >, Es... > type;
};
template < typename ... Ms, typename T >
struct add_only_tags< std::tuple< Ms... >, T >
{
typedef std::tuple< Ms... > type;
};
}
BOOST_AUTO_TEST_CASE( fold )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::fold< std::tuple<>, add_only_tags >::type,
std::tuple<> >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::fold< std::tuple< int, tag<'A'>, float, tag<'B'> >, add_only_tags >::type,
std::tuple< tag<'A'>, tag<'B'> > >::value
) );
}
BOOST_AUTO_TEST_CASE( transform_list )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::transform_list< std::tuple<>, bluetoe::details::extract_meta_type >::type,
std::tuple<> >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::transform_list<
std::tuple< type1 >,
bluetoe::details::extract_meta_type
>::type,
std::tuple< meta1 > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::transform_list<
std::tuple< type1, type2 >,
bluetoe::details::extract_meta_type
>::type,
std::tuple< meta1, meta2 > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::transform_list<
std::tuple< type1, type2, type3 >,
bluetoe::details::extract_meta_type
>::type,
std::tuple< meta1, meta2, meta3 > >::value
) );
}
BOOST_AUTO_TEST_CASE( index_of )
{
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< int, int >::value ) );
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< int, std::tuple< int > >::value ) );
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< int, int, float >::value ) );
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< int, std::tuple< int, float > >::value ) );
BOOST_CHECK_EQUAL( 1, int( bluetoe::details::index_of< float, int, float >::value ) );
BOOST_CHECK_EQUAL( 1, int( bluetoe::details::index_of< float, std::tuple< int, float > >::value ) );
BOOST_CHECK_EQUAL( 2, int( bluetoe::details::index_of< bool, int, float, bool >::value ) );
BOOST_CHECK_EQUAL( 1, int( bluetoe::details::index_of< float, std::tuple< int, float, bool > >::value ) );
// not in list
BOOST_CHECK_EQUAL( 3, int( bluetoe::details::index_of< double, std::tuple< int, float, bool > >::value ) );
// not in empty list
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< double, std::tuple<> >::value ) );
BOOST_CHECK_EQUAL( 0, int( bluetoe::details::index_of< double >::value ) );
}
template < class A, class B >
struct sort_createria
{
using type = typename bluetoe::details::select_type< A::value < B::value, std::true_type, std::false_type >::type;
};
template < int I >
using int_ = std::integral_constant< int, I >;
BOOST_AUTO_TEST_CASE( stable_sort_order )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria >::type,
std::tuple<> >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, std::tuple<> >::type,
std::tuple<> >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 1 > >::type,
std::tuple< int_< 1 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 1 >, int_< 2 > >::type,
std::tuple< int_< 1 >, int_< 2 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 2 >, int_< 1 > >::type,
std::tuple< int_< 1 >, int_< 2 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 1 >, int_< 2 >, int_< 3 > >::type,
std::tuple< int_< 1 >, int_< 2 >, int_< 3 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 2 >, int_< 1 >, int_< 3 > >::type,
std::tuple< int_< 1 >, int_< 2 >, int_< 3 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 3 >, int_< 2 >, int_< 1 > >::type,
std::tuple< int_< 1 >, int_< 2 >, int_< 3 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 1 >, int_< 3 >, int_< 2 > >::type,
std::tuple< int_< 1 >, int_< 2 >, int_< 3 > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< sort_createria, int_< 1 >, int_< 3 >, int_< 5 >, int_< 2 >, int_< 1 > >::type,
std::tuple< int_< 1 >, int_< 1 >, int_< 2 >, int_< 3 >, int_< 5 > > >::value
) );
}
template < char A, char B >
struct item {
static const char key = A;
};
template < class first, class second >
struct order_item
{
using type = typename bluetoe::details::select_type<
first::key < second::key,
std::true_type,
std::false_type >::type;
};
BOOST_AUTO_TEST_CASE( stable_sort_stability )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< order_item, item< 'A', 'A' > >::type,
std::tuple< item< 'A', 'A' > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< order_item, item< 'A', 'A' >, item< 'A', 'B' > >::type,
std::tuple< item< 'A', 'A' >, item< 'A', 'B' > > >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::stable_sort< order_item, item< 'B', 'A' >, item< 'A', 'B' >, item< 'B', 'B' >, item< 'A', 'C' > >::type,
std::tuple< item< 'A', 'B' >, item< 'A', 'C' >, item< 'B', 'A' >, item< 'B', 'B' > > >::value
) );
}
BOOST_AUTO_TEST_CASE( last_type )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_type< std::tuple<> >::type,
bluetoe::details::no_such_type >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_type< std::tuple<>, char >::type,
char >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_type< std::tuple< char > >::type,
char >::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::last_type< std::tuple< char, double > >::type,
double >::value
) );
}
BOOST_AUTO_TEST_CASE( map_find )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_find< std::tuple<>, int >::type,
bluetoe::details::no_such_type
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_find< std::tuple<>, int, float >::type,
float
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_find<
std::tuple<
bluetoe::details::pair< int, char >
>, int >::type,
char
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_find<
std::tuple<
bluetoe::details::pair< int, char >, bluetoe::details::pair< float, bool >
>, float >::type,
bool
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_find<
std::tuple<
bluetoe::details::pair< int, char >, bluetoe::details::pair< float, bool >
>, double >::type,
bluetoe::details::no_such_type
>::value
) );
}
BOOST_AUTO_TEST_CASE( map_erase )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_erase< std::tuple<>, int >::type,
std::tuple<>
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_erase< std::tuple< bluetoe::details::pair< int, char > >, int >::type,
std::tuple<>
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_erase< std::tuple< bluetoe::details::pair< int, char > >, bool >::type,
std::tuple< bluetoe::details::pair< int, char > >
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_erase<
std::tuple<
bluetoe::details::pair< int, char >,
bluetoe::details::pair< bool, char >,
bluetoe::details::pair< float, char >
>, bool >::type,
std::tuple< bluetoe::details::pair< int, char >, bluetoe::details::pair< float, char > >
>::value
) );
}
BOOST_AUTO_TEST_CASE( map_insert )
{
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_insert< std::tuple<>, int, char >::type,
std::tuple< bluetoe::details::pair< int, char > >
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_insert< std::tuple< bluetoe::details::pair< int, char > >, int, char >::type,
std::tuple< bluetoe::details::pair< int, char > >
>::value
) );
BOOST_CHECK( (
std::is_same<
typename bluetoe::details::map_insert< std::tuple< bluetoe::details::pair< int, bool > >, int, char >::type,
std::tuple< bluetoe::details::pair< int, char > >
>::value
) );
}
|
{"hexsha": "4476dceb95af3c34b72edeae2e1a1baa70f073ae", "size": 31167, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/options_tests.cpp", "max_stars_repo_name": "obruns/bluetoe", "max_stars_repo_head_hexsha": "de13682bce39335878262212f3615254e0af1702", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-02-01T09:38:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-01T09:38:00.000Z", "max_issues_repo_path": "tests/options_tests.cpp", "max_issues_repo_name": "obruns/bluetoe", "max_issues_repo_head_hexsha": "de13682bce39335878262212f3615254e0af1702", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/options_tests.cpp", "max_forks_repo_name": "obruns/bluetoe", "max_forks_repo_head_hexsha": "de13682bce39335878262212f3615254e0af1702", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.073778664, "max_line_length": 163, "alphanum_fraction": 0.5871915808, "num_tokens": 8629}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2010 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(* Instantiation of the Ring tactic for the naturals of Arith $*)
Require Import Bool.
Require Export LegacyRing.
Require Export Arith.
Require Import Eqdep_dec.
Local Open Scope nat_scope.
Fixpoint nateq (n m:nat) {struct m} : bool :=
match n, m with
| O, O => true
| S n', S m' => nateq n' m'
| _, _ => false
end.
Lemma nateq_prop : forall n m:nat, Is_true (nateq n m) -> n = m.
Proof.
simple induction n; simple induction m; intros; try contradiction.
trivial.
unfold Is_true in H1.
rewrite (H n1 H1).
trivial.
Qed.
Hint Resolve nateq_prop: arithring.
Definition NatTheory : Semi_Ring_Theory plus mult 1 0 nateq.
split; intros; auto with arith arithring.
(* apply (fun n m p:nat => plus_reg_l m p n) with (n := n).
trivial.*)
Defined.
Add Legacy Semi Ring nat plus mult 1 0 nateq NatTheory [ 0 S ].
Goal forall n:nat, S n = 1 + n.
intro; reflexivity.
Save S_to_plus_one.
(* Replace all occurrences of (S exp) by (plus (S O) exp), except when
exp is already O and only for those occurrences than can be reached by going
down plus and mult operations *)
Ltac rewrite_S_to_plus_term t :=
match constr:(t) with
| 1 => constr:(1)
| (S ?X1) =>
let t1 := rewrite_S_to_plus_term X1 in
constr:(1 + t1)
| (?X1 + ?X2) =>
let t1 := rewrite_S_to_plus_term X1
with t2 := rewrite_S_to_plus_term X2 in
constr:(t1 + t2)
| (?X1 * ?X2) =>
let t1 := rewrite_S_to_plus_term X1
with t2 := rewrite_S_to_plus_term X2 in
constr:(t1 * t2)
| _ => constr:(t)
end.
(* Apply S_to_plus on both sides of an equality *)
Ltac rewrite_S_to_plus :=
match goal with
| |- (?X1 = ?X2) =>
try
let t1 :=
(**) (**)
rewrite_S_to_plus_term X1
with t2 := rewrite_S_to_plus_term X2 in
change (t1 = t2)
| |- (?X1 = ?X2) =>
try
let t1 :=
(**) (**)
rewrite_S_to_plus_term X1
with t2 := rewrite_S_to_plus_term X2 in
change (t1 = t2)
end.
Ltac ring_nat := rewrite_S_to_plus; ring.
|
{"author": "coq-contribs", "repo": "legacy-ring", "sha": "6aa7bb505bd40523bb9a5cf6151f13abd21eae65", "save_path": "github-repos/coq/coq-contribs-legacy-ring", "path": "github-repos/coq/coq-contribs-legacy-ring/legacy-ring-6aa7bb505bd40523bb9a5cf6151f13abd21eae65/LegacyArithRing.v"}
|
import os
import networkx as nx
import numpy as np
from six import iteritems
from opensfm import types
import opensfm.dataset
def normalized(x):
return x / np.linalg.norm(x)
def camera_pose(position, lookat, up):
'''
Pose from position and look at direction
>>> position = [1.0, 2.0, 3.0]
>>> lookat = [0., 10.0, 2.0]
>>> up = [0.0, 0.0, 1.0]
>>> pose = camera_pose(position, lookat, up)
>>> np.allclose(pose.get_origin(), position)
True
>>> d = normalized(pose.transform(lookat))
>>> np.allclose(d, [0, 0, 1])
True
'''
ez = normalized(np.array(lookat) - np.array(position))
ex = normalized(np.cross(ez, up))
ey = normalized(np.cross(ez, ex))
pose = types.Pose()
pose.set_rotation_matrix([ex, ey, ez])
pose.set_origin(position)
return pose
class CubeDataset:
'''
Dataset of cameras looking at point in a cube
>>> d = CubeDataset(3, 10, 0.1, 0.3)
>>> len(d.cameras)
3
>>> len(d.shots)
3
>>> len(d.points)
10
'''
def __init__(self, num_cameras, num_points, noise, outlier_fraction):
self.cameras = {}
for i in range(num_cameras):
camera = types.PerspectiveCamera()
camera.id = 'camera' + str(i)
camera.focal = 0.9
camera.k1 = -0.1
camera.k2 = 0.01
camera.height = 600
camera.width = 800
self.cameras[camera.id] = camera
self.shots = {}
for i in range(num_cameras):
alpha = float(i) / (num_cameras - 1)
position = [alpha, -5.0, 0.5]
lookat = [1.0 - alpha, alpha, alpha]
up = [alpha * 0.2, alpha * 0.2, 1.0]
shot = types.Shot()
shot.id = 'shot' + str(i)
shot.camera = self.cameras['camera' + str(i)]
shot.pose = camera_pose(position, lookat, up)
self.shots[shot.id] = shot
points = np.random.rand(num_points, 3)
self.points = {'point' + str(i): p for i, p in enumerate(points)}
g = nx.Graph()
for shot_id, shot in iteritems(self.shots):
for point_id, point in iteritems(self.points):
feature = shot.project(point)
feature += np.random.rand(*feature.shape)*noise
g.add_node(shot_id, bipartite=0)
g.add_node(point_id, bipartite=1)
g.add_edge(shot_id, point_id, feature=feature,
feature_id=point_id, feature_color=(0, 0, 0))
self.tracks = g
def create_berlin_test_folder(tmpdir):
path = str(tmpdir.mkdir('berlin'))
os.symlink(os.path.abspath('data/berlin/images'),
os.path.join(path, 'images'))
os.symlink(os.path.abspath('data/berlin/masks'),
os.path.join(path, 'masks'))
os.symlink(os.path.abspath('data/berlin/gcp_list.txt'),
os.path.join(path, 'gcp_list.txt'))
return opensfm.dataset.DataSet(path)
|
{"hexsha": "40a5fe77fef07a7efa70eaf873671a285f0d182a", "size": 2995, "ext": "py", "lang": "Python", "max_stars_repo_path": "opensfm/test/data_generation.py", "max_stars_repo_name": "Oktosha/OpenSfM", "max_stars_repo_head_hexsha": "17d34c16cdb62b383af1c671ee3237098d183ec2", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2111, "max_stars_repo_stars_event_min_datetime": "2019-01-29T07:01:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T06:48:14.000Z", "max_issues_repo_path": "opensfm/test/data_generation.py", "max_issues_repo_name": "Oktosha/OpenSfM", "max_issues_repo_head_hexsha": "17d34c16cdb62b383af1c671ee3237098d183ec2", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 131, "max_issues_repo_issues_event_min_datetime": "2019-02-18T10:56:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T12:07:00.000Z", "max_forks_repo_path": "opensfm/test/data_generation.py", "max_forks_repo_name": "Oktosha/OpenSfM", "max_forks_repo_head_hexsha": "17d34c16cdb62b383af1c671ee3237098d183ec2", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 421, "max_forks_repo_forks_event_min_datetime": "2019-02-12T07:59:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T05:22:01.000Z", "avg_line_length": 30.2525252525, "max_line_length": 73, "alphanum_fraction": 0.5616026711, "include": true, "reason": "import numpy,import networkx", "num_tokens": 823}
|
from abc import ABCMeta, abstractmethod
from IPython import embed
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import CrossEntropyLoss
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from typing import Optional
class Attacker(metaclass=ABCMeta):
@abstractmethod
def attack(self, inputs, targets, normalize_input):
raise NotImplementedError
class PGD(Attacker):
def __init__(self, random_start=True, step_size=0.1,
epsilon=0.3, num_steps=40,
norm='linf', est_grad=None):
super(PGD, self).__init__()
self.criterion = CrossEntropyLoss(reduction='none')
self.epsilon = epsilon
self.norm = norm
self.num_steps = num_steps
self.random_start = random_start
self.step_size = step_size
self.est_grad = est_grad
def _compute_loss(self, inputs, targets, normalize_input=None):
if normalize_input is not None:
logits = self.model(normalize_input(inputs))
else:
logits = self.model(inputs)
loss = self.criterion(logits, targets)
return loss
def _perturb_linf(self, inputs, targets, normalize_input=None):
x = inputs.clone().detach()
if self.random_start:
x += torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
x = torch.clamp(x, 0, 1)
for i in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
losses = self._compute_loss(x, targets, normalize_input)
loss = losses.mean()
if self.est_grad is None:
grad = torch.autograd.grad(loss, x)[0]
else:
f = lambda _x, _y: self._compute_loss(_x, _y, normalize_input)
grad = calc_est_grad(f, x, targets, *self.est_grad)
x = x + self.step_size * torch.sign(grad)
x = torch.min(torch.max(x, inputs - self.epsilon), inputs + self.epsilon).clamp(0, 1)
return x.detach()
def _perturb_l2(self, inputs, targets, normalize_input=None):
batch_size = inputs.shape[0]
x = inputs.clone().detach()
if self.random_start:
x = x + (torch.rand_like(x) - 0.5).renorm(p=2, dim=0, maxnorm=self.epsilon)
x = torch.clamp(x, 0, 1)
for i in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
losses = self._compute_loss(x, targets, normalize_input)
loss = losses.mean()
if self.est_grad is None:
grad = torch.autograd.grad(loss, x)[0]
else:
f = lambda _x, _y: self._compute_loss(_x, _y, normalize_input)
grad = calc_est_grad(f, x, targets, *self.est_grad)
grad_norms = grad.view(batch_size, -1).norm(p=2, dim=1)
grad.div_(grad_norms.view(-1, 1, 1, 1))
x = x + self.step_size*grad
# project on the l2 ball
x = inputs + torch.renorm(x - inputs, p=2, dim=0, maxnorm=self.epsilon)
x = torch.clamp(x, 0, 1)
return x.detach()
def attack(self, model, inputs, targets, normalize_input=None):
self.model = model
if self.norm == 'l2':
return self._perturb_l2(inputs, targets, normalize_input)
elif self.norm == 'linf':
return self._perturb_linf(inputs, targets, normalize_input)
else:
raise Exception
# Taken from https://github.com/MadryLab/robustness
def calc_est_grad(func, x, y, rad, num_samples):
B, *_ = x.shape
Q = num_samples//2
N = len(x.shape) - 1
with torch.no_grad():
# Q * B * C * H * W
extender = [1]*N
queries = x.repeat(Q, *extender)
noise = torch.randn_like(queries)
norm = noise.view(B*Q, -1).norm(dim=-1).view(B*Q, *extender)
noise = noise / norm
noise = torch.cat([-noise, noise])
queries = torch.cat([queries, queries])
y_shape = [1] * (len(y.shape) - 1)
l = func(queries + rad * noise, y.repeat(2*Q, *y_shape)).view(-1, *extender)
grad = (l.view(2*Q, B, *extender) * noise.view(2*Q, B, *noise.shape[1:])).mean(dim=0)
return grad
# Taken from https://github.com/Hadisalman/smoothing-adversarial
class NormalizeLayer(torch.nn.Module):
"""Standardize the channels of a batch of images by subtracting the dataset mean
and dividing by the dataset standard deviation.
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self, means, sds):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(NormalizeLayer, self).__init__()
self.means = torch.tensor(means).cuda()
self.sds = torch.tensor(sds).cuda()
def forward(self, input):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
sds = self.sds.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return (input - means)/sds
|
{"hexsha": "cee82be06fdb15b38a5cb93654120c3960af1ad2", "size": 5453, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonClient/robustness_challenge/robustness/tools/attacks.py", "max_stars_repo_name": "Hadisalman/AirSim", "max_stars_repo_head_hexsha": "ed0a1aba69ef4aa8941aac9bdecc9c4e7a21bbf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-21T19:30:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T14:16:14.000Z", "max_issues_repo_path": "PythonClient/robustness_challenge/robustness/tools/attacks.py", "max_issues_repo_name": "Hadisalman/AirSim-robustness", "max_issues_repo_head_hexsha": "ed0a1aba69ef4aa8941aac9bdecc9c4e7a21bbf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonClient/robustness_challenge/robustness/tools/attacks.py", "max_forks_repo_name": "Hadisalman/AirSim-robustness", "max_forks_repo_head_hexsha": "ed0a1aba69ef4aa8941aac9bdecc9c4e7a21bbf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0955882353, "max_line_length": 102, "alphanum_fraction": 0.6066385476, "include": true, "reason": "import numpy", "num_tokens": 1376}
|
import numpy as np
def SY_StdNthDer(y, n=2):
'''
SY_StdNthDer Standard deviation of the nth derivative of the time series.
Based on an idea by Vladimir Vassilevsky, a DSP and Mixed Signal Design
Consultant in a Matlab forum, who stated that You can measure the standard
deviation of the nth derivative, if you like".
cf. http://www.mathworks.de/matlabcentral/newsreader/view_thread/136539
The derivative is estimated very simply by simply taking successive increments
of the time series; the process is repeated to obtain higher order
derivatives.
Note that this idea is popular in the heart-rate variability literature, cf.
cf. "Do Existing Measures ... ", Brennan et. al. (2001), IEEE Trans Biomed Eng 48(11)
(and function MD_hrv_classic)
:param y: time series to analyze
:param n: the order of derivative to analyze
:return: the standard deviation of the nth derivative of the time series
'''
yd = np.diff(y, n) # approximate way to calculate derivative
if yd.size is 0:
print("Time series too short to compute differences")
out = np.std(yd, ddof=1)
return out
|
{"hexsha": "6ff6572cbfd910a1808c1d4d2e7a061fedea54c9", "size": 1164, "ext": "py", "lang": "Python", "max_stars_repo_path": "Operations/SY_StdNthDer.py", "max_stars_repo_name": "ClarkLabUVA/hctsa-py", "max_stars_repo_head_hexsha": "4382a7e852d21cdfefdac1a4a09ea6e11abd9be1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-14T00:16:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T05:49:12.000Z", "max_issues_repo_path": "Operations/SY_StdNthDer.py", "max_issues_repo_name": "fairscape/hctsa-py", "max_issues_repo_head_hexsha": "4382a7e852d21cdfefdac1a4a09ea6e11abd9be1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Operations/SY_StdNthDer.py", "max_forks_repo_name": "fairscape/hctsa-py", "max_forks_repo_head_hexsha": "4382a7e852d21cdfefdac1a4a09ea6e11abd9be1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-08-14T00:22:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T05:31:14.000Z", "avg_line_length": 33.2571428571, "max_line_length": 89, "alphanum_fraction": 0.7121993127, "include": true, "reason": "import numpy", "num_tokens": 286}
|
"""Implementation of MPI based remote operators."""
from bempp.api.assembly.blocked_operator import BlockedOperator as _BlockedOperator
from bempp.api.assembly.discrete_boundary_operator import _DiscreteOperatorBase
from mpi4py import MPI
import numpy as _np
MPI_SIZE = MPI.COMM_WORLD.Get_size()
MPI_RANK = MPI.COMM_WORLD.Get_rank()
COMM = MPI.COMM_WORLD
_REMOTE_MANAGER = None
class Message:
"""Messages for remote workers."""
def __init__(self, status, operator_tag=None, is_complex=False, nelements=None):
self.status = status
self.is_complex = is_complex
self.operator_tag = operator_tag
self.nelements = nelements
class RemoteManager:
"""Manage remote worker execution."""
def __init__(self):
self._op_data = {}
self._op_location = {}
self._tags = {}
self._tags_counter = 0
self._rank_counter = 1
def register(self, op):
"""Register operator for remote execution."""
tag = self._tags_counter
rank = self._rank_counter
self._op_data[tag] = op
self._op_location[tag] = rank
self._tags[op] = tag
self._tags_counter += 1
self._rank_counter = (1 + self._rank_counter) % MPI_SIZE
if self._rank_counter == 0:
self._rank_counter = 1
def execute_worker(self):
"""Only execute on workers."""
while True:
msg, tag, data = self.receive_data(0)
if msg == "SHUTDOWN":
break
elif msg == "ASSEMBLE":
self._op_data[tag].weak_form()
elif msg == "SYNCHRONIZE":
self.send_data("SYNCHRONIZED", 0, tag)
elif msg == "GET_DTYPE":
self.send_data(
_np.dtype(self._op_data[tag].weak_form().dtype).name, 0, tag
)
elif msg == "DATA":
result = self._op_data[tag].weak_form() @ data
self.send_data("DATA", 0, tag, result)
def send_data(self, msg, dest, operator_tag=None, data=None):
"""Send data to destination rank."""
COMM.send(
Message(
msg,
operator_tag=operator_tag,
is_complex=_np.iscomplexobj(data),
nelements=len(data) if data is not None else None,
),
dest=dest,
)
if msg == "DATA":
COMM.Send(data, dest=dest)
def send_error(self, msg, operator_tag):
"""Send error message to master."""
COMM.send(Message(msg, operator_tag=operator_tag), dest=0)
def receive_data(self, source):
"""Receive data."""
message = COMM.recv(source=source)
if message.status == "SHUTDOWN":
return message.status, None, None
elif message.status == "DATA":
if message.is_complex:
dtype = _np.complex128
else:
dtype = _np.float64
data = _np.empty(message.nelements, dtype=dtype)
COMM.Recv(data, source=source)
return message.status, message.operator_tag, data
else:
return message.status, message.operator_tag, None
def submit_computation(self, op, x):
"""Submit computation to operator."""
tag = self._tags[op]
rank = self._op_location[tag]
self.send_data("DATA", rank, tag, x)
def receive_result(self, op):
"""Receive result from a specific operator."""
tag = self._tags[op]
rank = self._op_location[tag]
_, _, data = self.receive_data(rank)
return data
def assemble(self, op):
"""Assemble a given remote operator."""
tag = self._tags[op]
rank = self._op_location[tag]
self.send_data("ASSEMBLE", rank, tag)
def barrier(self):
"""Barrier operation for workers."""
for rank in range(1, MPI_SIZE):
self.send_data("SYNCHRONIZE", rank, 0)
msg, _, _ = self.receive_data(rank)
if msg != "SYNCHRONIZED":
raise Exception(
f"Error: expected message 'SYNCHRONIZED' from rank {0}, received"
f" {msg}"
)
def get_operator_dtype(self, tag):
"""Get dtype of remote operator."""
rank = self._op_location[tag]
self.send_data("GET_DTYPE", rank, tag)
msg, _, _ = self.receive_data(rank)
return _np.dtype(msg)
def shutdown(self):
"""Shutdown all workers."""
if MPI_SIZE == 0:
return
for worker in range(1, MPI_SIZE):
self.send_data("SHUTDOWN", worker)
def _group_by_rank(self, tags):
"""
Group tags by for parallel computation.
Given a list of tags (with possible repetitions).
Return a list of list of positions in the tags list
that is sorted so that each sublist does not have repeating
ranks. This allows to distribute the computations effectively
on the ranks.
"""
output = []
tags_with_indices = {index: tag for index, tag in enumerate(tags)}
while len(tags_with_indices) > 0:
current_indices = []
current_ranks = []
for index, tag in tags_with_indices.items():
rank = self._op_location[tag]
if rank not in current_ranks:
current_indices.append(index)
current_ranks.append(rank)
for index in current_indices:
del tags_with_indices[index]
output.append(current_indices)
return output
def assemble_parallel(self, ops):
"""Assemble a list of operators in parallel."""
tags = [self._tags[op] for op in ops]
grouped_indices = self._group_by_rank(tags)
for index_list in grouped_indices:
for index in index_list:
self.assemble(ops[index])
self.barrier()
def compute_parallel(self, tasks):
"""
Compute in parallel.
Parameters
----------
tasks : list
A list [(op1, data1), (op2, data2), ...]
of operators and associated vectors.
Returns a list [result1, result2, ...]
of the results of the computation.
Operators are allowed to appear repeatedly in the list
and are scheduled correctly.
"""
tags = [self._tags[op] for op, _ in tasks]
results = len(tasks) * [None]
grouped_indices = self._group_by_rank(tags)
for index_list in grouped_indices:
for index in index_list:
op, data = tasks[index]
self.submit_computation(op, data)
for index in index_list:
op, data = tasks[index]
results[index] = self.receive_result(op)
return results
@property
def tags(self):
"""Return tags."""
return self._tags
def get_remote_manager():
"""Initialize remote manager."""
global _REMOTE_MANAGER
if _REMOTE_MANAGER is None:
_REMOTE_MANAGER = RemoteManager()
return _REMOTE_MANAGER
class RemoteBlockedOperator(_BlockedOperator):
"""Define a remote blocked operator."""
def __init__(self, m, n):
"""Initialize an m x n remote blocked operator."""
super().__init__(m, n)
def _assemble(self):
"""Assemble the operator."""
if not self._fill_complete():
raise ValueError("Each row and column must have at least one operator")
return RemoteBlockedDiscreteOperator(self._operators)
class RemoteBlockedDiscreteOperator(_DiscreteOperatorBase):
"""Implementation of a discrete blocked boundary operator."""
def __init__(self, ops):
"""
Construct an operator from a two dimensional Numpy array of operators.
ops is a list of list containing discrete boundary operators or None.
A None entry is equivalent to a zero discrete boundary operator.
"""
# pylint: disable=too-many-branches
self._manager = get_remote_manager()
if not isinstance(ops, _np.ndarray):
ops = _np.array(ops)
rows = ops.shape[0]
cols = ops.shape[1]
self._ndims = (rows, cols)
self._operators = _np.empty((rows, cols), dtype=_np.object)
self._tags = _np.empty((rows, cols), dtype=_np.int)
self._rows = -_np.ones(rows, dtype=int)
self._cols = -_np.ones(cols, dtype=int)
for i in range(rows):
for j in range(cols):
if ops[i, j] is None:
continue
if self._rows[i] != -1:
if self._get_shape(ops[i, j])[0] != self._rows[i]:
raise ValueError(
"Block row {0} has incompatible ".format(i)
+ " operator sizes."
)
else:
self._rows[i] = self._get_shape(ops[i, j])[0]
if self._cols[j] != -1:
if self._get_shape(ops[i, j])[1] != self._cols[j]:
raise ValueError(
"Block column {0} has incompatible".format(j)
+ "operator sizes."
)
else:
self._cols[j] = self._get_shape(ops[i, j])[1]
self._operators[i, j] = ops[i, j]
if not self._fill_complete():
raise ValueError("Each row and column must contain at least one operator.")
for i in range(rows):
for j in range(cols):
if self._operators[i, j] is None:
self._tags[i, j] = -1
else:
self._tags[i, j] = self._manager.tags[self._operators[i, j]]
shape = (_np.sum(self._rows), _np.sum(self._cols))
is_complex = False
for i in range(rows):
for j in range(cols):
if self._tags[i, j] == -1:
continue
self._manager.assemble(self._operators[i, j])
self._manager.barrier()
for i in range(rows):
for j in range(cols):
if self._tags[i, j] == -1:
continue
dtype = self._manager.get_operator_dtype(self._tags[i, j])
if dtype in ["complex128", "complex64"]:
is_complex = True
if is_complex:
dtype = _np.complex128
else:
dtype = _np.float64
super().__init__(dtype, shape)
def __getitem__(self, key):
"""Return the object at position (i, j)."""
return self._operators[key]
def _get_shape(self, op):
"""Get shape of boundary operator."""
return (op.dual_to_range.global_dof_count, op.domain.global_dof_count)
def _fill_complete(self):
if (-1 in self._rows) or (-1 in self._cols):
return False
return True
def _matvec(self, x):
from bempp.api.utils.data_types import combined_type
if not self._fill_complete():
raise ValueError("Not all rows or columns contain operators.")
ndims = len(x.shape)
x = x.ravel()
row_dim = 0
res = _np.zeros(self.shape[0], dtype=combined_type(self.dtype, x.dtype))
compute_tasks = []
# Submit the computations
for i in range(self._ndims[0]):
col_dim = 0
for j in range(self._ndims[1]):
if self._tags[i, j] != -1:
# If self._tags[i, j] == -1 the operator is None
local_x = x[col_dim : col_dim + self._cols[j]]
compute_tasks.append((self._operators[i, j], local_x))
col_dim += self._cols[j]
row_dim += self._rows[i]
# Execute computations
compute_results = self._manager.compute_parallel(compute_tasks)
# Get results back
row_dim = 0
task_count = 0
for i in range(self._ndims[0]):
col_dim = 0
for j in range(self._ndims[1]):
if self._tags[i, j] == -1:
continue
res[row_dim : row_dim + self._rows[i]] += compute_results[task_count]
task_count += 1
row_dim += self._rows[i]
if ndims == 2:
res = res.reshape(-1, 1)
return res
def _get_row_dimensions(self):
return self._rows
def _get_column_dimensions(self):
return self._cols
@property
def A(self):
"""Return as dense Numpy array."""
raise NotImplementedError()
def _transpose(self):
"""Implement the transpose."""
raise NotImplementedError()
def _adjoint(self):
"""Implement the adjoint."""
raise NotImplementedError()
row_dimensions = property(_get_row_dimensions)
column_dimensions = property(_get_column_dimensions)
|
{"hexsha": "9d63b1daf79eec46e9770671e81fd72a0aea8688", "size": 13117, "ext": "py", "lang": "Python", "max_stars_repo_path": "bempp/api/utils/remote_operator.py", "max_stars_repo_name": "pescap/bempp-cl", "max_stars_repo_head_hexsha": "3a68666e8db0e873d418b734289067483f68f12e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bempp/api/utils/remote_operator.py", "max_issues_repo_name": "pescap/bempp-cl", "max_issues_repo_head_hexsha": "3a68666e8db0e873d418b734289067483f68f12e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bempp/api/utils/remote_operator.py", "max_forks_repo_name": "pescap/bempp-cl", "max_forks_repo_head_hexsha": "3a68666e8db0e873d418b734289067483f68f12e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5046511628, "max_line_length": 87, "alphanum_fraction": 0.555538614, "include": true, "reason": "import numpy", "num_tokens": 2864}
|
import numpy as np
with open("./input.txt", "r") as f:
connections = [line.strip().split("-") for line in f.readlines()]
class Node():
def __init__(self, name):
self.big = name[0].isupper()
self.visited = False
self.name = name
self.connections = set()
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
__repr__ = __str__
def number_of_paths_to_end(self, level=0):
print(" ".join([""]*2*level), "Now visiting: ", self, level)
if self.name == "end":
return 1
else:
if not self.big:
self.visited = True
count = sum(child.number_of_paths_to_end(level+1)
for child in self.connections if not child.visited)
self.visited = False
return count
nodes = {"start": Node("start"), "end": Node("end")}
for left, right in connections:
if left not in nodes:
nodes[left] = Node(left)
if right not in nodes:
nodes[right] = Node(right)
nodes[left].connections.add(nodes[right])
nodes[right].connections.add(nodes[left])
# remove leaves
nodes = {name: node for (name, node) in nodes.items() if (len(
node.connections) > 1 or node.name in {"start", "end"}) and not node.big}
print(nodes["start"].number_of_paths_to_end())
# grid = np.array([[int(x) for x in line.strip()]
# for line in raw], dtype=np.uint8)
|
{"hexsha": "8b4a00f179de56bb85b8bf7a795ad6eeb9b7d2b4", "size": 1478, "ext": "py", "lang": "Python", "max_stars_repo_path": "Day_12_1/main.py", "max_stars_repo_name": "SV-97/AdventOfCode2021", "max_stars_repo_head_hexsha": "f86040af489b965eece7d27258b4ea1078fd88fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Day_12_1/main.py", "max_issues_repo_name": "SV-97/AdventOfCode2021", "max_issues_repo_head_hexsha": "f86040af489b965eece7d27258b4ea1078fd88fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Day_12_1/main.py", "max_forks_repo_name": "SV-97/AdventOfCode2021", "max_forks_repo_head_hexsha": "f86040af489b965eece7d27258b4ea1078fd88fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-16T15:14:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T15:14:40.000Z", "avg_line_length": 28.4230769231, "max_line_length": 77, "alphanum_fraction": 0.5825439783, "include": true, "reason": "import numpy", "num_tokens": 364}
|
# -*- coding: utf-8 -*-
""" utils/plot_tools """
import copy
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from lcksvd.constants import PlotFilter
from lcksvd.core.exceptions.plot_tools import ColourMapInvalid, ColourListInvalid
class LearnedRepresentationPlotter:
"""
Hold method to plot the representation matrix in three ways:
- plot_basic_figure : blue markers
- plot_colored_basic_figure : colored markers
- plot_filtered_colored_image: markers filtered. See method defintion for options
Usage:
plotter = LearnedRepresentationPlotter(predictions=predictions, gamma=gamma,
label_index=Label.INDEX, custom_colours=COLOURS)
plotter.plot_basic_figure(show_legend=False, show_grid=False, file_saving_name='', marker=',')
plotter.plot_colored_basic_figure(show_legend=False, show_grid=False, file_saving_name='', marker=',')
plotter.plot_filtered_colored_image(show_legend=False, show_grid=False, file_saving_name='', filter_by=PlotFilter.UNIQUE, marker='.')
# Functor style
# plot_basic_figure
LearnedRepresentationPlotter(predictions=predictions, gamma=gamma,label_index=Label.INDEX, custom_colours=COLOURS)(simple='')
# plot_colored_basic_figure
LearnedRepresentationPlotter(predictions=predictions, gamma=gamma,label_index=Label.INDEX, custom_colours=COLOURS)()
# plot_filtered_colored_image
LearnedRepresentationPlotter(predictions=predictions, gamma=gamma,label_index=Label.INDEX, custom_colours=COLOURS)(filter_by=PlotFilter.SHARED)
"""
clusters = None
def __init__(self, *args, **kwargs):
"""
Verifies and initializes the instance
Kwargs:
fontsize (int): font size
figsize (tuple): size of the figure
dpi (int): image resolution (default 200)
predictions (np.ndarray): first object returned by lcksvd.classification
gamma (np.ndarray): second object returned by lcksvd.classification
label_index (dict): dictionary with keys and labels. e.g.:
{0: 'label1', 1: 'label2', ...}
custom_colours (list or tuple): list containig the colour names for labels. See https://matplotlib.org/3.2.1/tutorials/colors/colors.html
colormap (str): matplotlib colormap name. e.g. Set1 or Dark2. see: https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
"""
self.fontsize = kwargs.get('fontsize', 12)
self.figsize = kwargs.get('figsize', (10.4, 4.8))
self.dpi = kwargs.get('dpi', 200)
self.predictions = kwargs.get('predictions', None)
self.gamma = kwargs.get('gamma', None)
self.label_index = kwargs.get('label_index', dict)
custom_colours = kwargs.get('custom_colours', None)
colormap = kwargs.get('colormap', None)
if custom_colours:
assert isinstance(custom_colours, (list, tuple))
if len(custom_colours) < len(self.label_index):
raise ColourListInvalid
self.colours = custom_colours
elif colormap:
assert isinstance(colormap, str)
try:
cmap = cm.get_cmap(colormap)
except ValueError as error:
raise error
else:
if len(cmap.colors) < len(self.label_index):
raise ColourMapInvalid
self.colours = cmap.colors[:len(self.label_index)]
else:
self.colours = tuple([np.random.rand(3) for _ in range(len(self.label_index))])
assert isinstance(self.fontsize, int)
assert isinstance(self.figsize, tuple)
assert isinstance(self.dpi, int)
assert self.predictions is not None
assert isinstance(self.predictions, np.ndarray)
assert self.gamma is not None
assert isinstance(self.gamma, np.ndarray)
self.fig = plt.figure(figsize=self.figsize, dpi=self.dpi)
self.ax = plt.subplot(111)
self.sorted_indexes = np.argsort(self.predictions)
self.gamma = self.gamma[:, self.sorted_indexes]
self.cluster_lengths = []
# Getting length of clusters
for i in range(len(self.label_index)):
self.cluster_lengths.append(len(np.nonzero(self.predictions == i)[0]))
def __call__(self, **kwargs):
""" Functor """
if 'simple' in kwargs:
kwargs.pop('simple')
return self.plot_basic_figure(**kwargs)
if 'filter_by' in kwargs:
return self.plot_filtered_colored_image(**kwargs)
return self.plot_colored_basic_figure(**kwargs)
def __plot_vertical_lines(self):
""" Plots vertical lines that separates signals from different classes """
jump = 0
num_clusters = len(self.cluster_lengths)
for index, length in enumerate(self.cluster_lengths):
print("cluster length {}".format(length))
if index != num_clusters - 1:
plt.axvline(length + jump, c='black')
jump += length
def __plot_clusters(self, marker='.', markersize=12):
"""
Plots the cluster using the provided marker
Args:
marker (str): plot marker. See https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
markersize (float): marker size (only works when marker != ',')
"""
assert isinstance(marker, str)
assert isinstance(markersize, float)
for index, item in enumerate(zip(self.clusters, self.colours)):
np_cluster = np.array(tuple(item[0]))
if np_cluster.any():
plt.plot(
np_cluster[:, 0].tolist(), np_cluster[:, 1].tolist(),
marker, markersize=markersize, color=item[1],
label=self.label_index[index]
)
def __plot_and_save(self, show_legend=False, show_grid=False, file_saving_name=''):
"""
* Adds axes names, legend (optional), grid (optional) to the plot
* Displays the image plotted
* Saves the image as PNG (optional)
Args:
show_legend (bool): display or not the legend
show_grid (bool): display or not the grid
file_saving_name (str): filename without extension
"""
assert isinstance(show_legend, bool)
assert isinstance(show_grid, bool)
assert isinstance(file_saving_name, str)
plt.xlabel('Sparse representation from test signals', fontsize=self.fontsize)
plt.ylabel('Atoms', fontsize=self.fontsize)
if show_legend:
self.ax.legend(loc='upper center', bbox_to_anchor=(0., 1.02, 1., .102),
ncol=4, mode="expand", fancybox=True, shadow=True)
if show_grid:
plt.grid(True)
plt.show()
if file_saving_name:
self.fig.savefig('{}.png'.format(file_saving_name), bbox_inches='tight')
def __create_clusters(self):
"""
Creates the clusters of learned representations
It assumes that the same number of singals per class was provided
"""
self.clusters = []
jump = 0
for length in self.cluster_lengths:
x_s, y_s = np.nonzero(self.gamma[:, jump:jump+length].astype(bool).T)
x_s = x_s + jump
self.clusters.append(np.array(tuple(zip(x_s, y_s))))
jump += length
def plot_basic_figure(
self, show_legend=False, show_grid=False, file_saving_name='', marker=',',
markersize=12
):
"""
Plots an image using only blue pixels
Args:
show_legend (bool): display or not the legend
show_grid (bool): display or not the grid
file_saving_name (str): filename without extension
marker (str): plot marker. See https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
markersize (float): marker size (only works when marker != ',')
"""
assert isinstance(marker, str)
assert isinstance(markersize, float)
self.__plot_vertical_lines()
nonzeros = np.nonzero(self.gamma.astype(bool).T)
plt.plot(*nonzeros, 'b{}'.format(marker), markersize=markersize)
self.__plot_and_save(show_legend, show_grid, file_saving_name)
def plot_colored_basic_figure(
self, show_legend=False, show_grid=False, file_saving_name='', marker=',',
markersize=12
):
"""
Plots and image using class-colored markers
Args:
show_legend (bool): display or not the legend
show_grid (bool): display or not the grid
file_saving_name (str): filename without extension
marker (str): plot marker. See https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
markersize (float): marker size (only works when marker != ',')
"""
self.__plot_vertical_lines()
self.__create_clusters()
self.__plot_clusters(marker, markersize)
self.__plot_and_save(show_legend, show_grid, file_saving_name)
def __filter_clusters(self, filter_by=PlotFilter.UNIQUE):
"""
Filters the clusters and returnss shared attoms or class unique atoms based on the
filter_by parameter.
Args:
filter_by (str): plots only class unique atoms [u] or class shared atoms [s]. See constants.PlotFilter definition
"""
assert isinstance(filter_by, str)
assert PlotFilter.is_valid_option(filter_by)
cleaned_clusters = []
for index, cluster in enumerate(self.clusters):
y_s = set(cluster[:, 1])
for index_2, cluster_ in enumerate(self.clusters):
if index != index_2:
if filter_by == PlotFilter.SHARED:
y_s.intersection_update(set(cluster_[:, 1]))
else:
y_s.difference_update(set(cluster_[:, 1]))
cleaned_clusters.append(copy.deepcopy(cluster))
cleaned_index = np.isin(cleaned_clusters[index][:, 1], tuple(y_s))
cleaned_clusters[index] = cleaned_clusters[index][cleaned_index, :]
self.clusters = cleaned_clusters
def plot_filtered_colored_image(
self, show_legend=False, show_grid=False, file_saving_name='',
filter_by=PlotFilter.UNIQUE, marker='.', markersize=12
):
"""
Filters the class-colored clusters and plots them
Args:
show_legend (bool): display or not the legend
show_grid (bool): display or not the grid
file_saving_name (str): filename without extension
filter_by (str): plots only class unique atoms [u] or class shared atoms [s]. See constants.PlotFilter definition
marker (str): plot marker. See https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
markersize (float): marker size (only works when marker != ',')
"""
self.__plot_vertical_lines()
self.__create_clusters()
self.__filter_clusters(filter_by)
self.__plot_clusters(marker, markersize)
self.__plot_and_save(show_legend, show_grid, file_saving_name)
class AtomsPlotter:
"""
Plots dictionary atoms
Usage:
AtomsPlotter(dictionary=D, img_width=128, img_height=96, n_rows=5, n_cols=5)()
"""
def __init__(self, **kwargs):
"""
Initialises the instance
Kwargs:
dictionary (np.ndarray): Learned dictionary
img_width (int): Image width
img_height (int): Image height
n_row (int): Rows number
n_cols (int): Cols number
"""
self.dictionary = kwargs.get('dictionary', None)
self.img_width = kwargs.get('img_width')
self.img_height = kwargs.get('img_height')
self.n_rows = kwargs.get('n_rows', 5)
self.n_cols = kwargs.get('n_cols', 8)
assert isinstance(self.dictionary, np.ndarray)
assert isinstance(self.img_width, int)
assert isinstance(self.img_height, int)
assert isinstance(self.n_rows, int)
assert isinstance(self.n_cols, int)
assert self.n_rows * self.n_cols <= self.dictionary.shape[1], \
"n_rows * n_cols cannot be bigger than the number of dictionary atoms"
def __call__(self, file_saving_name=''):
""" Functor """
self._plot(file_saving_name)
def _plot(self, file_saving_name=''):
"""
* Plots a determined number of dictionary atoms in grayscale
* If a filename is provided then the figure is saved.
Args:
file_saving_name (str): name (without extension) used to saved the figure
"""
assert isinstance(file_saving_name, str)
plt.close('all')
fig, axes = plt.subplots(self.n_rows, self.n_cols, figsize=(20, 10),
subplot_kw={'xticks': [], 'yticks': []})
axes_list = axes.flat if self.n_rows*self.n_cols > 1 else [axes]
for index, ax in enumerate(axes_list):
ax.imshow(self.dictionary[:, index].reshape(self.img_height, self.img_width), cmap=plt.cm.gray)
plt.tight_layout()
plt.show()
if file_saving_name:
fig.savefig('{}.png'.format(file_saving_name), bbox_inches='tight')
|
{"hexsha": "574739bf30ce7b399fc07cb854b2009fb641f4bc", "size": 13819, "ext": "py", "lang": "Python", "max_stars_repo_path": "lcksvd/utils/plot_tools.py", "max_stars_repo_name": "QNZhang/LC-KSVD", "max_stars_repo_head_hexsha": "08a7810bf7913422859f14fadcb51c8fbe5f5ef3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lcksvd/utils/plot_tools.py", "max_issues_repo_name": "QNZhang/LC-KSVD", "max_issues_repo_head_hexsha": "08a7810bf7913422859f14fadcb51c8fbe5f5ef3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lcksvd/utils/plot_tools.py", "max_forks_repo_name": "QNZhang/LC-KSVD", "max_forks_repo_head_hexsha": "08a7810bf7913422859f14fadcb51c8fbe5f5ef3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-04T19:51:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T19:51:22.000Z", "avg_line_length": 41.0059347181, "max_line_length": 155, "alphanum_fraction": 0.6131413272, "include": true, "reason": "import numpy", "num_tokens": 2969}
|
"""
# KNN
k nearest neighbours
Pro
- Relatively simple
Cons
- Computationally intensive
- Hard to represent relationships between features
"""
from sklearn import tree
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# set up
iris = load_iris()
# f(X) = y
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5)
def accuracy_of(classifier):
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print(accuracy_score(y_test, predictions))
class KNN:
def __init__(self):
self._X_train = []
self._y_train = []
def fit(self, X_train, y_train):
self._X_train = X_train
self._y_train = y_train
def predict(self, X_test):
return [self.closet(x) for x in X_test]
def closet(self, x):
from scipy.spatial import distance
indexed_distances = [ (index, distance.euclidean(x, t)) for index, t in enumerate( self._X_train ) ]
indexed_distances.sort(key=lambda x: x[1])
return self._y_train[indexed_distances[0][0]]
accuracy_of(KNN())
|
{"hexsha": "65958dc1f01020460be3c7962f4e67331c01ad29", "size": 1189, "ext": "py", "lang": "Python", "max_stars_repo_path": "google-machine-learning-recipes/implement-knn-classifier.py", "max_stars_repo_name": "NicholasTD07/demos", "max_stars_repo_head_hexsha": "1eb346bdbb3db764d977a202a662b8e3b4b8ef51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "google-machine-learning-recipes/implement-knn-classifier.py", "max_issues_repo_name": "NicholasTD07/demos", "max_issues_repo_head_hexsha": "1eb346bdbb3db764d977a202a662b8e3b4b8ef51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "google-machine-learning-recipes/implement-knn-classifier.py", "max_forks_repo_name": "NicholasTD07/demos", "max_forks_repo_head_hexsha": "1eb346bdbb3db764d977a202a662b8e3b4b8ef51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2321428571, "max_line_length": 108, "alphanum_fraction": 0.6955424727, "include": true, "reason": "from scipy", "num_tokens": 296}
|
# -*- coding: utf-8 -*-
"""Demo182_RareCategories_HighCardinality.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oGG64IaKf-Oavjh6VAY0q8H9R2CLNq3B
## Rare Labels
- Values present for a small percentage
- Usually present less than 5%
- Concept of cardinality
## Rare label consequences
- May add information in low cardinality
- May add noise is high cardinality
### Engineering Rare Labels
- Replacing by most frequent label
- Grouping all rare labels together
Categorical variables can have:
- One predominant category
- A small number of categories
- High cardinality
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/train.csv")
data.head()
data.columns
data = data[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp',
'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']]
# get number of categories in variables
categoricals = []
for col in data.columns:
if data[col].dtypes =='O':
print('{} categories : {} '.format(col, len(data[col].unique())))
categoricals.append(col)
# Get variables with more than n categories
n = 8
cats = []
for col in data.columns:
if data[col].dtypes =='O':
if len(data[col].unique())>n:
print('{} categories : {} '.format(col, len(data[col].unique())))
cats.append(col)
for col in cats:
if data[col].dtypes =='O': # if the variable is categorical
print(100*data.groupby(col)[col].count()/np.float(len(data)))
print()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data[cats], data['Survived'],
test_size=0.2)
X_train.shape, X_test.shape
def label_encoder(X_train, X_test, columns, na_flag = False):
import random
for col in columns:
mapper = {k:i for i, k in enumerate(X_train[col].unique(), 0)}
if na_flag:
mapper[np.nan] = np.nan
X_train.loc[:, col] = X_train.loc[:, col].map(mapper)
X_test.loc[:, col] = X_test.loc[:, col].map(mapper)
X_test[col] = X_test[col].fillna(random.choice(list(mapper.values())))
label_encoder(X_train, X_test, cats)
X_train.isnull().sum()
X_test.isnull().sum()
sns.set()
for i in cats:
plt.figure()
sns.distplot(X_train[i], kde=False)
def new_label_imputation(Xtrain, Xtest, threshold, cats):
X_train, X_test = Xtrain.copy(), Xtest.copy()
for col in cats:
rows = len(X_train)
temp_df = pd.Series(100*X_train[col].value_counts() / rows)
nonrares = temp_df[temp_df>=threshold].index # non-rare labels
X_train[col] = np.where(Xtrain[col].isin(nonrares), Xtrain[col], 'rare')
X_test[col] = np.where(Xtest[col].isin(nonrares), Xtest[col], 'rare')
return X_train, X_test
X_train_rare, X_test_rare = new_label_imputation(X_train, X_test, 1, cats)
label_encoder(X_train_rare, X_test_rare, cats)
sns.set()
for i in cats:
fig, ax = plt.subplots(1,2, figsize=(10,5))
sns.distplot(X_train[i], kde=False, ax=ax[0])
sns.distplot(X_train_rare[i], kde=False, ax=ax[1])
def frequent_imputation(Xtrain, Xtest, threshold, cats):
X_train, X_test = Xtrain.copy(), Xtest.copy()
for col in cats:
rows = len(X_train)
temp_df = pd.Series(100*X_train[col].value_counts() / rows)
nonrares = temp_df[temp_df>=threshold].index # non-rare labels
frequent_cat = X_train.groupby(col)[col].count().sort_values().tail(1).index.values[0]
X_train[col] = np.where(Xtrain[col].isin(nonrares), Xtrain[col], frequent_cat)
X_test[col] = np.where(Xtest[col].isin(nonrares), Xtest[col], frequent_cat)
return X_train, X_test
X_train_freq, X_test_freq = frequent_imputation(X_train, X_test, 1, cats)
sns.set()
for i in cats:
fig, ax = plt.subplots(1,2, figsize=(10,5))
sns.distplot(X_train[i], kde=False, ax=ax[0])
sns.distplot(X_train_freq[i], kde=False, ax=ax[1])
def classifier(X_train, y_train, X_test, y_test, cols, model):
from sklearn.metrics import accuracy_score
model.fit(X_train[cols],y_train)
y_pred = model.predict(X_test[cols])
y_pred = np.round(y_pred).flatten()
print(accuracy_score(y_test, y_pred))
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
classifier(X_train_rare, y_train, X_test_rare, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.linear_model import RidgeClassifier
model = RidgeClassifier()
classifier(X_train_rare, y_train, X_test_rare, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.linear_model import RidgeClassifierCV
model = RidgeClassifierCV()
classifier(X_train_rare, y_train, X_test_rare, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.svm import SVC
model = SVC()
classifier(X_train_rare, y_train, X_test_rare, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
classifier(X_train, y_train, X_test, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.svm import LinearSVC
model = LinearSVC()
classifier(X_train, y_train, X_test, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
classifier(X_train, y_train, X_test, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
classifier(X_train, y_train, X_test, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
from sklearn.linear_model import SGDClassifier
model = SGDClassifier()
classifier(X_train, y_train, X_test, y_test, cats, model)
classifier(X_train_freq, y_train, X_test_freq, y_test, cats, model)
|
{"hexsha": "bd6349c8b34430aab3f90a84b04c7d951885e79a", "size": 6211, "ext": "py", "lang": "Python", "max_stars_repo_path": "FeatureEngineeringPy_DataScience/demo182_rarecategories_highcardinality.py", "max_stars_repo_name": "mahnooranjum/Programming_DataScience", "max_stars_repo_head_hexsha": "f7a4215d4615b3f8460c3a1944a585628cf6930d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FeatureEngineeringPy_DataScience/demo182_rarecategories_highcardinality.py", "max_issues_repo_name": "mahnooranjum/Programming_DataScience", "max_issues_repo_head_hexsha": "f7a4215d4615b3f8460c3a1944a585628cf6930d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FeatureEngineeringPy_DataScience/demo182_rarecategories_highcardinality.py", "max_forks_repo_name": "mahnooranjum/Programming_DataScience", "max_forks_repo_head_hexsha": "f7a4215d4615b3f8460c3a1944a585628cf6930d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8512820513, "max_line_length": 92, "alphanum_fraction": 0.7193688617, "include": true, "reason": "import numpy", "num_tokens": 1665}
|
"""
Tests for simple_sbml
"""
from SBMLKinetics.common import constants as cn
from SBMLKinetics.common import simple_sbml
from SBMLKinetics.common.simple_sbml import SimpleSBML
from SBMLKinetics.common.reaction import Reaction
from SBMLKinetics.common import util
from tests.common import helpers
import copy
import numpy as np
import os
import libsbml
import unittest
import tellurium as te
import zipfile
IGNORE_TEST = False
IS_PLOT = False
NO_NAME = "dummy"
#############################
# Tests
#############################
class TestSimpleSBML(unittest.TestCase):
def setUp(self):
self.simple = helpers.getSimple()
def testConstructor(self):
if IGNORE_TEST:
return
def test(a_list, a_type):
self.assertGreater(len(a_list), 0)
self.assertTrue(isinstance(a_list[0], a_type))
#
test(self.simple.reactions, Reaction)
test(self.simple.species, libsbml.Species)
test(self.simple.parameters, libsbml.Parameter)
self.assertTrue(isinstance(self.simple.model,
libsbml.Model))
simple = helpers.getSimple_BIOMD56()
self.assertGreater(len(simple.function_definitions), 0)
def testGet(self):
if IGNORE_TEST:
return
def test(func, a_list):
this_id = a_list[0].getId()
an_object = func(this_id)
self.assertEqual(an_object, a_list[0])
#
test(self.simple.getReaction, self.simple.reactions)
test(self.simple.getSpecies, self.simple.species)
test(self.simple.getParameter, self.simple.parameters)
def testConstructWithRoadrunner(self):
if IGNORE_TEST:
return
model = te.loadSBMLModel(helpers.TEST_PATH)
simple = helpers.getSimple()
self.assertGreater(len(simple.reactions), 0)
class TestFunctions(unittest.TestCase):
def testReadURL(self):
pass
def _testIterator(self, itr):
for item in itr:
self.assertTrue(isinstance(item.model,
SimpleSBML))
COUNT = 5
itr = simple_sbml.modelIterator(final=COUNT)
item_number = -1
for item in itr:
self.assertTrue(isinstance(item.filename, str))
item_number = item.number
self.assertEqual(item_number, COUNT - 1)
def testModelIterator1(self):
if IGNORE_TEST:
return
self._testIterator(simple_sbml.modelIterator(final=1))
def testGetZipfilePath(self):
if IGNORE_TEST:
return
ffiles, zipper = simple_sbml.getZipfilePaths()
for ffile in ffiles:
try:
fid = zipper.open(ffile)
fid.close()
except:
assertTrue(False)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "2b7f36816beb1119aa43d99bef44ca8cc00f2f35", "size": 2579, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_simple_sbml.py", "max_stars_repo_name": "ModelEngineering/Kinetics-Validator", "max_stars_repo_head_hexsha": "9350da492fd9c1482b50332f386632e6db0e7ed2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_simple_sbml.py", "max_issues_repo_name": "ModelEngineering/Kinetics-Validator", "max_issues_repo_head_hexsha": "9350da492fd9c1482b50332f386632e6db0e7ed2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_simple_sbml.py", "max_forks_repo_name": "ModelEngineering/Kinetics-Validator", "max_forks_repo_head_hexsha": "9350da492fd9c1482b50332f386632e6db0e7ed2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5619047619, "max_line_length": 59, "alphanum_fraction": 0.6901899961, "include": true, "reason": "import numpy", "num_tokens": 606}
|
#
# Copyright (c) Sinergise, 2019 -- 2021.
#
# This file belongs to subproject "field-delineation" of project NIVA (www.niva4cap.eu).
# All rights reserved.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
#
from typing import Iterable, Dict, List
import os
import numpy as np
import pandas as pd
from dataclasses import dataclass
from fd.utils import BaseConfig, prepare_filesystem
@dataclass
class ComputeNormalizationConfig(BaseConfig):
npz_files_folder: str
metadata_file: str
def stats_per_npz_ts(npz_file: str, config: ComputeNormalizationConfig) -> Dict[str, np.array]:
filesystem = prepare_filesystem(config)
data = np.load(filesystem.openbin(os.path.join(config.npz_files_folder, npz_file), 'rb'), allow_pickle=True)
features = data['X']
return {'mean': np.mean(features, axis=(1, 2)),
'median': np.median(features, axis=(1, 2)),
'perc_1': np.percentile(features, q=1, axis=(1, 2)),
'perc_5': np.percentile(features, q=5, axis=(1, 2)),
'perc_95': np.percentile(features, q=95, axis=(1, 2)),
'perc_99': np.percentile(features, q=99, axis=(1, 2)),
'std': np.std(features, axis=(1, 2)),
'minimum': np.min(features, axis=(1, 2)),
'maximum': np.max(features, axis=(1, 2)),
'timestamp': data['timestamps'],
'patchlet': data['eopatches']
}
def concat_npz_results(stat: str, results: List[Dict[str, np.array]]) -> np.array:
return np.concatenate([x[stat] for x in results])
def create_per_band_norm_dataframe(concatenated_stats: Dict[str, np.array], stats_keys: Iterable[str],
identifier_keys: Iterable[str]) -> pd.DataFrame:
norm_df_dict = {}
n_bands = concatenated_stats[stats_keys[0]].shape[-1]
for stat in stats_keys:
for band in range(0, n_bands):
norm_df_dict[f'{stat}_b{band}'] = concatenated_stats[stat][..., band]
for identifier in identifier_keys:
norm_df_dict[f'{identifier}'] = concatenated_stats[identifier]
return pd.DataFrame(norm_df_dict)
|
{"hexsha": "1aed84ceab6d8609f094b45af88e6c26e337a3bf", "size": 2188, "ext": "py", "lang": "Python", "max_stars_repo_path": "fd/compute_normalization.py", "max_stars_repo_name": "SFrav/field-delineation", "max_stars_repo_head_hexsha": "205530069d716d8bb69a1fc9fd8d523178f470a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2021-01-20T16:32:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:42:12.000Z", "max_issues_repo_path": "fd/compute_normalization.py", "max_issues_repo_name": "florentdemelezi/field-delineation", "max_issues_repo_head_hexsha": "205530069d716d8bb69a1fc9fd8d523178f470a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-13T17:41:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:45:11.000Z", "max_forks_repo_path": "fd/compute_normalization.py", "max_forks_repo_name": "florentdemelezi/field-delineation", "max_forks_repo_head_hexsha": "205530069d716d8bb69a1fc9fd8d523178f470a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2021-02-19T16:15:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T09:21:37.000Z", "avg_line_length": 36.4666666667, "max_line_length": 112, "alphanum_fraction": 0.657678245, "include": true, "reason": "import numpy", "num_tokens": 561}
|
import csv
import logging
import os
from tempfile import NamedTemporaryFile
from pprint import pformat
from typing import Mapping
from dateutil import parser as DatetimeParser
import attr
import pydicom
from hashlib import md5
import numpy as np
from crud.abc import Serializable
from .report import RadiologyReport
from ..utils.dicom import DicomLevel, DicomFormatError
from ..utils import dicom_simplify, pack_data, unpack_data, ExceptionHandlingIterator
from ..utils.gateways import orthanc_id, Montage
from io import BytesIO
@attr.s(cmp=False, hash=False)
class Dixel(Serializable):
"""
"Dixels" are DICOMish-elements (following pixels, voxels, and texels). They
may include metadata, tags, a file, and a pixel array. All Dixels have an id,
typically following the Orthanc format, and a DicomLevel (study, series, or instance).
DIANA endpoints handle and store dixel instances. Some functions may
take a dixel identifier and return the dixel instance.
"""
meta = attr.ib(factory=dict) #: Metadata dict
tags = attr.ib(factory=dict) #: Dicom tag dict
level = attr.ib(default=DicomLevel.STUDIES,
converter=DicomLevel) #: Study, series, instance
# Making this init=False removes it from the serializer
# Use a "from" constructor or add "file" manually after creation
file = attr.ib(default=None, repr=False, init=False) #: Stores binary file representation
pixels = attr.ib(default=None, repr=False, init=False) #: Stores pixel array representation
report = attr.ib(default=None, repr=False, init=False) #: Stores study report as RadiologyReport
#: Stores information about sub-dixels (series for study, instances for series)
children = attr.ib(init=False, factory=list, repr=False)
#: Stores reference to parent dixel (study for series, series for instances)
parent = attr.ib(default=None, repr=False)
def instances(self):
if self.level == DicomLevel.INSTANCES:
yield self
elif self.level == DicomLevel.SERIES:
for inst in self.children:
yield inst
elif self.level == DicomLevel.STUDIES:
for ser in self.children:
for inst in ser.children:
yield inst
else:
raise TypeError
def simplify_tags(self):
self.tags = dicom_simplify(self.tags)
# Copy all simplified datetimes into meta
for tag, value in self.tags.items():
if tag.endswith("DateTime"):
self.meta[tag] = value
def to_csv(self, filepath, mode='a+'):
write_headers = False if os.path.exists(filepath) else True
with open(filepath, mode, newline="") as csvFile:
writer = csv.writer(csvFile)
if write_headers:
writer.writerow(list(self.meta.keys()) + list(self.tags.keys()))
row = []
for k, v in self.meta.items():
if k == "ReportText":
row.append(self.report)
else:
row.append(v)
for k, v in self.tags.items():
row.append(v)
writer.writerow(row)
@staticmethod
def from_pydicom(ds: pydicom.Dataset, fn: str=None, file=None):
"""Generate a Dixel from a pydicom dataset"""
meta = {
'FileName': fn,
'TransferSyntaxUID': ds.file_meta.TransferSyntaxUID,
'TransferSyntax': str(ds.file_meta.TransferSyntaxUID),
'MediaStorage': str(ds.file_meta.MediaStorageSOPClassUID),
}
def dictify_ds(ds):
output = dict()
_ds = ExceptionHandlingIterator(ds)
for elem in _ds:
if elem.keyword == "PixelData":
continue
# Deal with that separately
elif not elem.value or not elem.keyword:
continue
elif elem.VR == "PN":
output[elem.keyword] = str(elem.value)
# print(elem.value)
elif elem.VM != 1 and elem.VR == 'SQ':
# elif elem.keyword == "AdmittingDiagnosesCodeSequence":
# print(f"Diagnosis Code: VM {elem.VM} VR {elem.VR}")
output[elem.keyword] = [dictify_ds(item) for item in elem]
elif elem.VM != 1:
# print(f"VM ne 1: VM {elem.VM} VR {elem.VR}")
output[elem.keyword] = [item for item in elem]
elif elem.VR != 'SQ':
output[elem.keyword] = elem.value
else:
output[elem.keyword] = [dictify_ds(item) for item in elem]
# print(output)
return output
tags = dictify_ds(ds)
# MONOCHROME, RGB etc.
if (0x0028, 0x0004) in ds:
tags['PhotometricInterpretation'] = ds[0x0028, 0x0004].value
# logging.debug(pformat(tags))
d = Dixel(meta=meta,
tags=tags,
level=DicomLevel.INSTANCES)
d.simplify_tags()
# TODO: If the creation times are going to be "now", use the file creation time instead?
if not d.tags.get("PatientID") and d.tags.get("PatientName"):
logging.warning("Imputing missing PatientID from PatientName")
new_id = md5(d.tags.get("PatientName").encode('utf8')).hexdigest()
d.tags["PatientID"] = new_id
if hasattr(ds, "PixelData"):
# Don't need file, can recreate it
logging.warning("Creating file with new PatientID tag, OID will be valid")
ds_edit = ds
ds_edit.PatientID = new_id
with NamedTemporaryFile() as f:
ds_edit.save_as(filename=f.name, write_like_original=True)
file = f.read()
elif not hasattr(ds, "PixelData") and file:
# Read pixels out of file and _then_ recreate it
logging.warning("Loading pixels and creating file with new PatientID tag, OID will be valid")
ds_edit = pydicom.read_file(BytesIO(file), stop_before_pixels=False)
ds_edit.PatientID = new_id
with NamedTemporaryFile() as f:
ds_edit.save_as(filename=f.name, write_like_original=True)
file = f.read()
else:
logging.warning("No file to update, OID will be invalid")
if not d.tags.get('PatientID') or \
not d.tags.get('StudyInstanceUID') or \
not d.tags.get('SeriesInstanceUID') or \
not d.tags.get('SOPInstanceUID'):
raise DicomFormatError("File is missing required tags")
if file:
d.file = file
if hasattr(ds, "PixelData"):
d.pixels = ds.pixel_array
return d
@staticmethod
def from_montage_csv(data: Mapping):
"""Generate a dixel from a line in a Montage csv download"""
tags = {
"AccessionNumber": data["Accession Number"],
"PatientID": data["Patient MRN"],
'StudyDescription': data['Exam Description'],
'ReferringPhysicianName': data['Ordered By'],
'PatientSex': data['Patient Sex'],
"StudyDate": data['Exam Completed Date'],
'Organization': data['Organization'],
}
meta = {
'PatientName': "{}^{}".format(
data["Patient Last Name"].upper(),
data["Patient First Name"].upper()),
'PatientAge': data['Patient Age'],
"OrderCode": data["Exam Code"],
"PatientStatus": data["Patient Status"],
"ReportText": data["Report Text"],
}
d = Dixel(meta=meta,
tags=tags,
level=DicomLevel.STUDIES)
d.report = RadiologyReport(meta['ReportText'])
return d
@staticmethod
def from_montage_json(data: Mapping):
"""
Generate a dixel from a Montage JSON result (as returned by
the Montage Endpoint.
Metadata includes Montage-mapped CPT codes; to dereference them
to real CPT codes and body parts, call Montage().get_meta(dixel)
"""
# logging.debug(pformat(data['exam_type']))
# TODO: Check event flags for various event types to get ordering, study, and reading
try:
referring_physician = data['events'][0].get('provider')
if referring_physician:
referring_physician = referring_physician.get('name')
study_datetime = None
if len(data['events']) > 1:
# Last event is usually read I think, take event _before_ last one
study_event = data['events'][-2]
if study_event.get('date'):
study_datetime = DatetimeParser.parse(study_event['date'])
else:
# Otherwise just take whatever is last
study_event = data['events'][-1]
if study_event.get('date'):
study_datetime = DatetimeParser.parse(study_event['date'])
montage_cpts = []
for resource in data["exam_type"]["cpts"]:
code = resource.split("/")[-2]
montage_cpts.append(code)
tags = {
"AccessionNumber": data["accession_number"],
"PatientID": data["patient_mrn"],
'StudyDescription': data['exam_type']['description'],
'ReferringPhysicianName': referring_physician,
'PatientSex': data['patient_sex'],
'Organization': data['organization']['label'],
"Modality": data['exam_type']['modality']['label']
}
meta = {
'BodyParts': None, # Placeholder for meta
'CPTCodes': None, # Placeholder for meta
'PatientName': "{}^{}".format(
data["patient_last_name"].upper(),
data["patient_first_name"].upper()),
'PatientAge': data['patient_age'],
"OrderCode": data["exam_type"]["code"],
"PatientStatus": data["patient_status"],
"ReportText": Montage.clean_text(data['text']),
"ReadingPhysiciansName": data['events'][-1]['provider']['name'],
'StudyDateTime': study_datetime,
"MontageCPTCodes": montage_cpts
}
except KeyError:
meta = {
"BodyParts": None,
"CPTCodes": None,
"MontageCPTCodes": data['meta']['MontageCPTCodes'],
"OrderCode": data['meta']['OrderCode'],
"PatientAge": data['meta']['PatientAge'],
"PatientName": data['meta']['PatientName'],
"PatientStatus": data['meta']['PatientStatus'],
"ReadingPhysiciansName": data['meta']['ReadingPhysiciansName'],
"ReportText": data['meta']['ReportText'],
"StudyDateTime": data['meta']['StudyDateTime']
}
tags = {
"AccessionNumber": data['tags']['AccessionNumber'],
"Modality": data['tags']['Modality'],
"Organization": data['tags']['Organization'],
"PatientID": data['tags']['PatientID'],
"PatientSex": data['tags']['PatientSex'],
"ReferringPhysicianName": data['tags']['ReferringPhysicianName'],
"StudyDescription": data['tags']['StudyDescription']
}
d = Dixel(meta=meta,
tags=tags,
level=DicomLevel.STUDIES)
d.report = RadiologyReport(meta['ReportText'])
return d
@staticmethod
def from_orthanc(meta: Mapping=None, tags: Mapping=None,
level: DicomLevel=DicomLevel.STUDIES, file=None):
"""Generate a dixel from an Orthanc json tag dictionary"""
d = Dixel(meta=meta,
tags=tags,
level=level)
d.simplify_tags()
if file:
d.file = file
return d
def parent_oid(self, level=DicomLevel.STUDIES):
if level == DicomLevel.STUDIES:
return orthanc_id(self.tags.get('PatientID'),
self.tags.get('StudyInstanceUID'))
elif level == DicomLevel.SERIES:
return orthanc_id(self.tags.get('PatientID'),
self.tags.get('StudyInstanceUID'),
self.tags.get('SeriesInstanceUID'))
raise ValueError("Unknown parent level requested {}".format(level))
def oid(self):
"""Compute Orthanc ID"""
if not self.meta.get('ID'):
if self.level == DicomLevel.STUDIES:
self.meta['ID'] = orthanc_id(self.tags.get('PatientID'),
self.tags.get('StudyInstanceUID'))
elif self.level == DicomLevel.SERIES:
self.meta['ID'] = orthanc_id(self.tags.get('PatientID'),
self.tags.get('StudyInstanceUID'),
self.tags.get('SeriesInstanceUID'))
elif self.level == DicomLevel.INSTANCES:
self.meta['ID'] = orthanc_id(self.tags.get('PatientID'),
self.tags.get('StudyInstanceUID'),
self.tags.get('SeriesInstanceUID'),
self.tags.get('SOPInstanceUID'))
else:
raise ValueError("Unknown DicomLevel for oid")
return self.meta.get('ID')
def sid(self):
"""Serializer id alias to tags['AccessionNumber']"""
return self.tags.get('AccessionNumber')
def __cmp__(self, other):
logging.debug(self.sid)
logging.debug(other.sid)
return self.sid == other.sid
@property
def acc_num(self):
return self.tags.get("AccessionNumber")
@property
def fn(self):
"""Filename alias for meta['Filename']"""
if not self.meta.get("FileName"):
self.meta["FileName"] = self.tags["AccessionNumber"]
return self.meta.get('FileName')
@property
def image_base_fn(self):
"""Filename for image instance"""
return "{acc}-{ser:04}-{ins:04}".format(acc=self.tags['AccessionNumber'] or self.tags['StudyInstanceUID'],
ser=self.tags["SeriesNumber"],
ins=self.tags["InstanceNumber"])
def get_pixels(self):
if self.pixels is None:
raise TypeError
if self.meta.get('PhotometricInterpretation') == "RGB":
pixels = self.pixels.reshape([self.pixels.shape[1], self.pixels.shape[2], 3])
else:
pixels = self.pixels
if self.tags.get("RescaleSlope") and \
self.tags.get("RescaleIntercept"):
if pixels.dtype == "uint16":
pixels = np.int16(pixels)
pixels *= int(self.tags.get("RescaleSlope"))
pixels += int(self.tags.get("RescaleIntercept"))
else:
logging.debug("No rescale slope/intercept in DICOM header")
return pixels
@property
def pixel_spacing(self):
if not self.tags.get("PixelSpacing"):
raise ValueError("No pixel spacing info available")
# Return spacing values as floats (in mm)
return [float(x) for x in self.tags.get("PixelSpacing")]
@property
def image_orientation_patient(self):
if not self.tags.get("ImageOrientationPatient"):
raise ValueError("No patient orientation info available")
return [float(x) for x in self.tags.get("ImageOrientationPatient")]
def query(self):
return {
"PatientName": "",
"PatientID": "",
"PatientBirthDate": "",
"PatientSex": "",
"AccessionNumber": self.tags.get("AccessionNumber"),
"StudyDescription": "",
"StudyInstanceUID": "",
"StudyDate": "",
"StudyTime": "",
}
def pack_fields(self, fkey, fields=None):
rv = pack_data({**self.tags, **self.meta}, fkey, fields)
return rv
def unpack_fields(self, data, fkey, prefix=None):
unpacked = unpack_data(data, fkey)
for k, v in unpacked.items():
if prefix:
k = "{}-{}".format(prefix, k)
self.meta[k] = v
Dixel.register()
|
{"hexsha": "3bb8b4eeb8ad9c3694864ed56795e0c9ad94a98c", "size": 16817, "ext": "py", "lang": "Python", "max_stars_repo_path": "package/diana/dixel/dixel.py", "max_stars_repo_name": "thomasyi17/diana2", "max_stars_repo_head_hexsha": "2167053dfe15b782d96cb1e695047433f302d4dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-02-12T23:26:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T08:53:58.000Z", "max_issues_repo_path": "package/diana/dixel/dixel.py", "max_issues_repo_name": "thomasyi17/diana2", "max_issues_repo_head_hexsha": "2167053dfe15b782d96cb1e695047433f302d4dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-01-23T21:13:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-28T15:45:51.000Z", "max_forks_repo_path": "package/diana/dixel/dixel.py", "max_forks_repo_name": "thomasyi17/diana2", "max_forks_repo_head_hexsha": "2167053dfe15b782d96cb1e695047433f302d4dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-01-23T20:22:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T03:27:04.000Z", "avg_line_length": 38.5711009174, "max_line_length": 114, "alphanum_fraction": 0.5516441696, "include": true, "reason": "import numpy", "num_tokens": 3547}
|
#/usr/bin/env python3
import numpy as np
from scipy.special import jn
import scipy.constants as sc
## Shared Helper Functions
def beta_from_gamma(gamma):
return(np.sqrt(1-np.power(gamma,-2)))
def k_beta_(gamma_0, k_p):
return(k_p/np.sqrt(2*gamma_0))
def r_beta_(a_beta, gamma_0, k_beta):
return(a_beta/gamma_0/k_beta)
def alpha_0(a_beta, gamma_0, theta):
return(1 - np.cos(theta)*beta_from_gamma(gamma_0)*(1 - np.power(a_beta/gamma_0/2,2)))
def alpha_x(k, r_beta, theta, phi):
return(k*r_beta*np.sin(theta)*np.cos(phi))
def alpha_z(k, k_beta, r_beta, beta_z, theta):
return(np.cos(theta)*beta_z*k*k_beta*r_beta*r_beta/8)
### Esarey approximate calculation
def calculate_psd_esarey(k, N_beta, a_beta, gamma_0, k_p, theta, phi):
if theta != 0:
raise NotImplementedError("Off axis radiation not yet implemented.")
if phi != 0:
raise NotImplementedError("Off axis radiation not yet implemented.")
k_beta = k_beta_(gamma_0, k_p)
r_beta = r_beta_(a_beta, gamma_0, k_beta)
k_min = np.min(np.asarray(k))
k_max = np.max(np.asarray(k))
n_min = 1
n_max = np.ceil( np.max(np.asarray( alpha_0(a_beta, gamma_0, theta)
*k_max/k_beta)))
n_range = np.arange(n_min, n_max+1, 2)
n = np.outer(np.ones(k.shape), n_range)
k = np.outer(k, np.ones(n_range.shape))
k_n = n*k_beta/alpha_0(a_beta, gamma_0, theta)
alpha = np.power(a_beta,2)*np.power(8*k_beta*np.power(gamma_0,2),-1)
Fn = n*alpha*k*np.power(jn((n-1)/2,alpha*k) + jn((n+1)/2, alpha*k),2)
Rn = np.power(np.sinc(n*N_beta* (k/k_n -1)),2)
pref = np.power(2*sc.e*gamma_0*N_beta,2)*k/(sc.c*k_n*(1 + 0.5*a_beta**2))
return(np.sum(pref * Rn * Fn, -1))
### Full Calculation including crossterms
def Ixn(k, n, N_beta, a_beta, gamma_0, k_p, theta, phi):
beta_z = beta_from_gamma(gamma_0)
k_beta = k_beta_(gamma_0, k_p)
r_beta = r_beta_(a_beta, gamma_0, k_beta)
sinc_term = np.sinc(N_beta*( alpha_0(a_beta, gamma_0, theta)*k/k_beta - n))
if theta == 0:
if n/2 == int(n/2):
sum_term = 0 #even harmonics don't contribute on axis
else:
sum_term = jn((n-1)/2, alpha_z(k, k_beta, r_beta, beta_z, theta)) + jn((n+1)/2, alpha_z(k, k_beta, r_beta, beta_z, theta))
else:
raise NotImplementedError("Off axis radiation not yet implemented.")
return(sinc_term * sum_term)
def calculate_psd_full(k, N_beta, a_beta, gamma_0, k_p, theta, phi, sinc_overlap_periods=5, mtol=0.1):
if theta != 0:
raise NotImplementedError("Off axis radiation not yet implemented.")
if phi != 0:
raise NotImplementedError("Off axis radiation not yet implemented.")
k_min = np.min(np.asarray(k))
k_max = np.max(np.asarray(k))
k_beta = k_beta_(gamma_0, k_p)
r_beta = r_beta_(a_beta, gamma_0, k_beta)
n_min = np.floor(np.min(np.asarray(alpha_0(a_beta, gamma_0, theta)))*k_min/k_beta) - np.ceil(sinc_overlap_periods/N_beta)
n_max = np.ceil(np.max(np.asarray(alpha_0(a_beta, gamma_0, theta)))*k_max/k_beta) + np.ceil(sinc_overlap_periods/N_beta)
n_range = np.arange(n_min,n_max+1,1)
I_xn = np.empty(n_range.shape + k.shape)
for i,n in enumerate(n_range):
I_xn[i] = Ixn(k, n, N_beta, a_beta, gamma_0, k_p, theta, phi)
I_x = np.pi*N_beta * r_beta* np.sum(I_xn,0)
return( 0.25*np.power(sc.e * k / np.pi,2)/sc.c * np.power(I_x,2))
|
{"hexsha": "4fa8d19ed593878d8680bf3472393719966c81b0", "size": 3497, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyutils/radiation.py", "max_stars_repo_name": "Telemin/pyutils", "max_stars_repo_head_hexsha": "923185eb06bc37af4640d12f7cc52df68331e20f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-05-30T01:37:17.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-30T01:37:17.000Z", "max_issues_repo_path": "src/pyutils/radiation.py", "max_issues_repo_name": "Telemin/pyutils", "max_issues_repo_head_hexsha": "923185eb06bc37af4640d12f7cc52df68331e20f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pyutils/radiation.py", "max_forks_repo_name": "Telemin/pyutils", "max_forks_repo_head_hexsha": "923185eb06bc37af4640d12f7cc52df68331e20f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.625, "max_line_length": 134, "alphanum_fraction": 0.643408636, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1106}
|
[STATEMENT]
lemma f_last_message_hold_length[simp]: "length (xs \<longmapsto>\<^sub>f k) = length xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (xs \<longmapsto> k) = length xs
[PROOF STEP]
apply (case_tac "k = 0", simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. k \<noteq> 0 \<Longrightarrow> length (xs \<longmapsto> k) = length xs
[PROOF STEP]
apply (simp add: f_last_message_hold_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < k \<Longrightarrow> length (concat (map last_message_hold (list_slice2 xs k))) = length xs
[PROOF STEP]
apply (induct xs rule: append_constant_length_induct[of k])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>ys. \<lbrakk>k = 0 \<or> length ys < k; 0 < k\<rbrakk> \<Longrightarrow> length (concat (map last_message_hold (list_slice2 ys k))) = length ys
2. \<And>xs ys. \<lbrakk>length xs = k; 0 < k \<Longrightarrow> length (concat (map last_message_hold (list_slice2 ys k))) = length ys; 0 < k\<rbrakk> \<Longrightarrow> length (concat (map last_message_hold (list_slice2 (xs @ ys) k))) = length (xs @ ys)
[PROOF STEP]
apply (simp add: list_slice2_le)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xs ys. \<lbrakk>length xs = k; 0 < k \<Longrightarrow> length (concat (map last_message_hold (list_slice2 ys k))) = length ys; 0 < k\<rbrakk> \<Longrightarrow> length (concat (map last_message_hold (list_slice2 (xs @ ys) k))) = length (xs @ ys)
[PROOF STEP]
apply (simp add: list_slice2_append_mod list_slice2_mod_0 list_slice_div_eq_1)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 629, "file": "AutoFocus-Stream_AF_Stream", "length": 6}
|
from fides import Optimizer, BFGS, SR1, DFP, HybridUpdate, SubSpaceDim, \
StepBackStrategy
import numpy as np
import logging
import pytest
import fides
import time
def rosen(x):
f = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
return f
def rosengrad(x):
f = rosen(x)
g = np.array([-400 * (x[1] - x[0] ** 2) * x[0] - 2 * (1 - x[0]),
200 * (x[1] - x[0] ** 2)])
return f, g
def rosenboth(x):
f, g = rosengrad(x)
h = np.array([[1200 * x[0] ** 2 - 400 * x[1] + 2, -400 * x[0]],
[-400 * x[0], 200]])
return f, g, h
def rosenrandomfail(x):
f, g, h = rosenboth(x)
p = 1/4 # elementwise probability for nan
if np.random.choice(a=[True, False], p=[p, 1-p]):
f = np.nan
g[np.random.choice(a=[True, False], size=g.shape, p=[p, 1-p])] = np.nan
h[np.random.choice(a=[True, False], size=h.shape, p=[p, 1-p])] = np.nan
return f, g, h
def rosenwrongf(x):
f, g, h = rosenboth(x)
return np.ones((1, 1)) * f, g, h
def rosentransg(x):
f, g, h = rosenboth(x)
return f, np.expand_dims(g, 1).T, h
def rosenexpandg(x):
f, g, h = rosenboth(x)
return f, np.expand_dims(g, 1), h
def rosenshortg(x):
f, g, h = rosenboth(x)
return f, g[0], h
def rosenshorth(x):
f, g, h = rosenboth(x)
return f, g, h[0, 0] * np.ones((1, 1))
def rosennonsquarh(x):
f, g, h = rosenboth(x)
return f, g, h[0, :]
def finite_bounds_include_optimum():
lb = np.array([-2, -1.5])
ub = np.array([1.5, 2])
x0 = np.zeros(lb.shape)
return lb, ub, x0
def finite_bounds_exlude_optimum():
lb = np.array([-2, -1.5])
ub = np.array([0.99, 0.99])
x0 = (lb + ub) / 2
return lb, ub, x0
def unbounded_and_init():
lb = np.array([-np.inf, -np.inf])
ub = np.array([np.inf, np.inf])
x0 = np.zeros(lb.shape)
return lb, ub, x0
@pytest.mark.parametrize("stepback", [StepBackStrategy.REFLECT,
StepBackStrategy.SINGLE_REFLECT,
StepBackStrategy.TRUNCATE,
StepBackStrategy.MIXED])
@pytest.mark.parametrize("refine", [True, False])
@pytest.mark.parametrize("sgradient", [True, False])
@pytest.mark.parametrize("subspace_dim", [SubSpaceDim.STEIHAUG,
SubSpaceDim.FULL,
SubSpaceDim.TWO])
@pytest.mark.parametrize("bounds_and_init", [unbounded_and_init(),
finite_bounds_include_optimum(),
finite_bounds_exlude_optimum()])
@pytest.mark.parametrize("fun, happ", [
(rosenboth, None),
(rosengrad, SR1()),
(rosengrad, BFGS()),
(rosengrad, DFP()),
(rosenboth, HybridUpdate(BFGS())),
(rosenboth, HybridUpdate(SR1())),
(rosenboth, HybridUpdate(init_with_hess=True)),
])
def test_minimize_hess_approx(bounds_and_init, fun, happ, subspace_dim,
stepback, refine, sgradient):
lb, ub, x0 = bounds_and_init
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
hessian_update=happ if happ is not None else None,
options={fides.Options.FATOL: 0,
fides.Options.SUBSPACE_DIM: subspace_dim,
fides.Options.STEPBACK_STRAT: stepback,
fides.Options.MAXITER: 1e3,
fides.Options.REFINE_STEPBACK: refine,
fides.Options.SCALED_GRADIENT: sgradient}
)
opt.minimize(x0)
assert opt.fval >= opt.fval_min
if opt.fval == opt.fval_min:
assert np.isclose(opt.grad, opt.grad_min).all()
assert np.isclose(opt.x, opt.x_min).all()
if np.all(ub > 1):
assert np.isclose(opt.x, [1, 1]).all()
assert np.isclose(opt.grad, np.zeros(opt.x.shape), atol=1e-6).all()
@pytest.mark.parametrize("stepback", [StepBackStrategy.REFLECT,
StepBackStrategy.TRUNCATE])
@pytest.mark.parametrize("subspace_dim", [SubSpaceDim.FULL,
SubSpaceDim.TWO])
def test_multistart(subspace_dim, stepback):
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosenboth
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0,
fides.Options.SUBSPACE_DIM: subspace_dim,
fides.Options.STEPBACK_STRAT: stepback,
fides.Options.REFINE_STEPBACK: False,
fides.Options.MAXITER: 1e3}
)
for _ in range(int(1e2)):
x0 = np.random.random(x0.shape) * (ub-lb) + lb
opt.minimize(x0)
assert opt.fval >= opt.fval_min
if opt.fval == opt.fval_min:
assert np.isclose(opt.grad, opt.grad_min).all()
assert np.isclose(opt.x, opt.x_min).all()
if np.all(ub > 1):
assert np.isclose(opt.x, [1, 1]).all()
assert np.isclose(opt.grad, np.zeros(opt.x.shape), atol=1e-6).all()
def test_multistart_randomfail():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosenrandomfail
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0,
fides.Options.MAXITER: 1e3}
)
for _ in range(int(1e2)):
with pytest.raises(RuntimeError):
x0 = np.random.random(x0.shape) * (ub - lb) + lb
opt.minimize(x0)
@pytest.mark.parametrize("fun", [rosennonsquarh, rosenwrongf, rosenshorth,
rosentransg, rosenshortg, rosenexpandg])
def test_wrong_dim(fun):
lb, ub, x0 = finite_bounds_exlude_optimum()
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0,
fides.Options.MAXITER: 1e3}
)
with pytest.raises(ValueError):
x0 = np.random.random(x0.shape) * (ub - lb) + lb
opt.minimize(x0)
def test_hess_and_hessian_update():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosenboth
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0},
hessian_update=DFP()
)
with pytest.raises(ValueError):
opt.minimize(x0)
def test_no_grad():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosen
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0},
hessian_update=DFP()
)
with pytest.raises(ValueError):
opt.minimize(x0)
def test_wrong_x():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosen
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0},
hessian_update=DFP()
)
with pytest.raises(ValueError):
opt.minimize(np.expand_dims(x0, 1))
def test_maxiter_maxtime():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosengrad
opt = Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={fides.Options.FATOL: 0},
hessian_update=DFP()
)
tstart = time.time()
opt.minimize(x0)
t_elapsed = time.time() - tstart
maxiter = opt.iteration - 1
maxtime = t_elapsed/10
opt.options[fides.Options.MAXITER] = maxiter
opt.minimize(x0)
assert opt.exitflag == fides.ExitFlag.MAXITER
del opt.options[fides.Options.MAXITER]
opt.options[fides.Options.MAXTIME] = maxtime
opt.minimize(x0)
assert opt.exitflag == fides.ExitFlag.MAXTIME
del opt.options[fides.Options.MAXTIME]
def test_wrong_options():
lb, ub, x0 = finite_bounds_exlude_optimum()
fun = rosenboth
with pytest.raises(ValueError):
Optimizer(
fun, ub=ub, lb=lb, verbose=logging.INFO,
options={'option_doesnt_exist': 1}
)
|
{"hexsha": "54471c6f222c9af3d76690a0655e18be177f9d0d", "size": 7910, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_minimize.py", "max_stars_repo_name": "dweindl/fides", "max_stars_repo_head_hexsha": "3124c664d301c12499eb052047be7833577c4e74", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_minimize.py", "max_issues_repo_name": "dweindl/fides", "max_issues_repo_head_hexsha": "3124c664d301c12499eb052047be7833577c4e74", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_minimize.py", "max_forks_repo_name": "dweindl/fides", "max_forks_repo_head_hexsha": "3124c664d301c12499eb052047be7833577c4e74", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9965870307, "max_line_length": 79, "alphanum_fraction": 0.5785082174, "include": true, "reason": "import numpy", "num_tokens": 2288}
|
function CssiSpaceWeatherData(arg0::JString)
return CssiSpaceWeatherData((JString,), arg0)
end
function CssiSpaceWeatherData(arg0::JString, arg1::DataProvidersManager, arg2::TimeScale)
return CssiSpaceWeatherData((JString, DataProvidersManager, TimeScale), arg0, arg1, arg2)
end
function equals(obj::Object, arg0::Object)
return jcall(obj, "equals", jboolean, (Object,), arg0)
end
function get24_hours_kp(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "get24HoursKp", jdouble, (AbsoluteDate,), arg0)
end
function get_ap(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getAp", Vector{jdouble}, (AbsoluteDate,), arg0)
end
function get_average_flux(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getAverageFlux", jdouble, (AbsoluteDate,), arg0)
end
function get_class(obj::Object)
return jcall(obj, "getClass", Class, ())
end
function get_daily_flux(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getDailyFlux", jdouble, (AbsoluteDate,), arg0)
end
function get_instant_flux(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getInstantFlux", jdouble, (AbsoluteDate,), arg0)
end
function get_max_date(obj::CssiSpaceWeatherData)
return jcall(obj, "getMaxDate", AbsoluteDate, ())
end
function get_mean_flux(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getMeanFlux", jdouble, (AbsoluteDate,), arg0)
end
function get_min_date(obj::CssiSpaceWeatherData)
return jcall(obj, "getMinDate", AbsoluteDate, ())
end
function get_supported_names(obj::CssiSpaceWeatherData)
return jcall(obj, "getSupportedNames", JString, ())
end
function get_three_hourly_kp(obj::CssiSpaceWeatherData, arg0::AbsoluteDate)
return jcall(obj, "getThreeHourlyKP", jdouble, (AbsoluteDate,), arg0)
end
function hash_code(obj::Object)
return jcall(obj, "hashCode", jint, ())
end
function notify(obj::Object)
return jcall(obj, "notify", void, ())
end
function notify_all(obj::Object)
return jcall(obj, "notifyAll", void, ())
end
function to_string(obj::Object)
return jcall(obj, "toString", JString, ())
end
function wait(obj::Object)
return jcall(obj, "wait", void, ())
end
function wait(obj::Object, arg0::jlong)
return jcall(obj, "wait", void, (jlong,), arg0)
end
function wait(obj::Object, arg0::jlong, arg1::jint)
return jcall(obj, "wait", void, (jlong, jint), arg0, arg1)
end
|
{"hexsha": "db86dc8567f4c6b04ba5381d65b5f849850e6caf", "size": 2454, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/OrekitWrapper/ModelsWrapper/EarthWrapper/AtmosphereWrapper/DataWrapper/cssi_space_weather_data.jl", "max_stars_repo_name": "JuliaAstrodynamics/Orekit.jl", "max_stars_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-07T12:26:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T16:02:35.000Z", "max_issues_repo_path": "gen/OrekitWrapper/ModelsWrapper/EarthWrapper/AtmosphereWrapper/DataWrapper/cssi_space_weather_data.jl", "max_issues_repo_name": "JuliaSpace/Orekit.jl", "max_issues_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-05T10:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T05:17:19.000Z", "max_forks_repo_path": "gen/OrekitWrapper/ModelsWrapper/EarthWrapper/AtmosphereWrapper/DataWrapper/cssi_space_weather_data.jl", "max_forks_repo_name": "JuliaSpace/Orekit.jl", "max_forks_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8705882353, "max_line_length": 93, "alphanum_fraction": 0.7432762836, "num_tokens": 701}
|
import os
import time
import pickle
import collections
import numpy as np
import scipy.ndimage as ndimage
import sgolay2
from .focus_stacker import FocusStacker
class ImageStackCollector:
DEFAULT_IMAGES_PER_STEP = 20
DEFAULT_SETTLING_TIME = 2.0
DEFAULT_FOCUS_STACKER_PARAM = {
'laplacian_kernel_size' : 5,
'gaussian_blur_kernel_size' : 5,
}
DEFAULT_MEDIAN_FILTER_SIZE = 21
DEFAULT_SGOLAY_WINDOW_SIZE = 51
DEFAULT_SGOLAY_POLY_ORDER = 3
def __init__(self, min_val=-0.05, max_val=0.05, num=10):
self.images_per_step = self.DEFAULT_IMAGES_PER_STEP
self.settling_time = self.DEFAULT_SETTLING_TIME
self.focus_stacker_param = self.DEFAULT_FOCUS_STACKER_PARAM
self.median_filter_size = self.DEFAULT_MEDIAN_FILTER_SIZE
self.sgolay_window_size = self.DEFAULT_SGOLAY_WINDOW_SIZE
self.sgolay_poly_order = self.DEFAULT_SGOLAY_POLY_ORDER
self.set_range(min_val, max_val, num)
self.step_to_image_list = collections.OrderedDict()
self.step_to_image_median = collections.OrderedDict()
self.t_step = 0.0
self.index = self.num
self.focus_image = None
self.depth_image = None
def set_range(self, min_val, max_val, num):
self.steps = np.linspace(min_val, max_val, num)
@property
def ready(self):
if (self.focus_image is not None) and (self.depth_image is not None):
return True
else:
return False
@property
def min_val(self):
return self.steps.min()
@property
def max_val(self):
return self.steps.max()
@property
def num(self):
return self.steps.size
@property
def running(self):
return self.index < self.num
@property
def is_first(self):
return self.index == -1
@property
def step_complete(self):
if self.running and self.index >= 0:
val = self.steps[self.index]
return len(self.step_to_image_list[val]) >= self.images_per_step
else:
return True
@property
def settled(self):
return (time.time() - self.t_step) > self.settling_time
def start(self):
self.clear()
self.index = -1
def stop(self):
self.index = self.num
def clear(self):
self.step_to_image_list = collections.OrderedDict()
self.step_to_image_median = collections.OrderedDict()
self.focus_image = None
self.depth_image = None
def next_step(self):
if self.index > -1:
val = self.steps[self.index]
image_array = np.array(self.step_to_image_list[val])
image_median = np.median(image_array, axis=0).astype(np.uint8)
self.step_to_image_median[val] = image_median
self.index += 1
self.t_step = time.time()
if self.index < self.num:
val = self.steps[self.index]
self.step_to_image_list[val] = []
return val
else:
return None
def add_image(self, image):
val = self.steps[self.index]
self.step_to_image_list[val].append(image)
def calc_focus_and_depth_images(self):
# Get list of images and depths and compute focus and depth map images
image_list = [image for (depth,image) in self.step_to_image_median.items()]
depth_list = [depth for (depth,image) in self.step_to_image_median.items()]
fs = FocusStacker(**self.focus_stacker_param)
focus_image, depth_image = fs.focus_stack(image_list, depth_list)
# Clean up depth map image
depth_image = ndimage.median_filter(depth_image, self.median_filter_size)
sg2 = sgolay2.SGolayFilter2(window_size=51, poly_order=3)
depth_image = sg2(depth_image)
self.focus_image = focus_image
self.depth_image = depth_image
def save(self, filename='focus_stack.pkl'):
filepath = os.path.join(os.environ['HOME'], filename)
data = {
'raw_images' : self.step_to_image_list,
'median_images' : self.step_to_image_median,
}
with open(filepath,'wb') as f:
pickle.dump(data,f)
|
{"hexsha": "cf351e547abe9a556b40e173b71d1e1230e0f99a", "size": 4275, "ext": "py", "lang": "Python", "max_stars_repo_path": "flasercutter/image_stack_collector.py", "max_stars_repo_name": "willdickson/flasercutter", "max_stars_repo_head_hexsha": "a248599fa9c726798836407c39d69aa7c916025c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-20T22:29:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-20T22:29:04.000Z", "max_issues_repo_path": "flasercutter/image_stack_collector.py", "max_issues_repo_name": "willdickson/flasercutter_software", "max_issues_repo_head_hexsha": "4bae3e67cb08a64bf56c57a9e6d5700b17b56953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flasercutter/image_stack_collector.py", "max_forks_repo_name": "willdickson/flasercutter_software", "max_forks_repo_head_hexsha": "4bae3e67cb08a64bf56c57a9e6d5700b17b56953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4827586207, "max_line_length": 83, "alphanum_fraction": 0.6362573099, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1009}
|
\section{Rice Krispy Treats}
\label{riceKrispyTreats}
\setcounter{secnumdepth}{0}
Time: 45 minutes (5 minutes cooking, 40 minutes cooling)
Serves: 8
\begin{multicols}{2}
\subsection*{Ingredients}
\begin{itemize}
\item 2 ounces unsalted butter
\item 10 ounces marshmallows
\item 6 ounces rice krispies
\item OPTIONAL: 5 ounces peanut butter, and use just shy of 2 ounces butter instead
\item butter or cooking spray for greasing
\end{itemize}
\subsection*{Hardware}
\begin{itemize}
\item Large non-stick stock pot
\item Silicone spatula
\item 9x9 Baking dish
\end{itemize}
\clearpage
\subsection*{Instructions}
\begin{enumerate}
\item Bring 2 ounces butter to a slight bubble over medium-low heat in a stock pot (or about 1.4 ounces if adding peanut butter).
\item While the butter is heating, lightly grease a 9x9 baking dish with butter or cooking spray.
\item Add 10 ounces marshmallows and stir until the marshmallows are completely melted and smooth, about 4 minutes.
\item OPTIONAL, if adding peanut butter, Add 5 ounces peanut butter to the marshmallow mixture and stir to combine.
\item Add 6 ounces Rice Krispies and fold into the marshmallow mixture.
\item Once all Rice Krispies are evenly coated, dump the mixture into a 9x9 baking dish and press down with a spatula or wax paper to make sure all contents are pressed together.
\item Chill for at least 30 minutes in the fridge prior to serving.
\end{enumerate}
\subsection*{Notes}
\begin{itemize}
\item Based on the Kellog's recipe, as seen here: \url{https://www.ricekrispies.com/en_US/recipes/the-original-treats-recipe.html}.
\begin{itemize}
\item Main differences are increased butter, optional peanut butter, and changed all ingredients to weight instead of volume.
\end{itemize}
\item This can be modified pretty easily, add M\&M's, put melted chocolate on top before cooling, replace cereal with Fruity Pebble's, etc.
\item This can be spread into a 9x13 baking dish for thinner treats that also cool faster, but I like them thick.
\end{itemize}
\end{multicols}
\clearpage
|
{"hexsha": "abdc43a5d8c6d2728095c494024af9f79081a9d2", "size": 2133, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/desserts/riceKrispyTreats.tex", "max_stars_repo_name": "calebwatt15/caleb-watt-cookbook", "max_stars_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-03-10T06:39:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:06:54.000Z", "max_issues_repo_path": "chapters/desserts/riceKrispyTreats.tex", "max_issues_repo_name": "calebwatt15/caleb-watt-cookbook", "max_issues_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/desserts/riceKrispyTreats.tex", "max_forks_repo_name": "calebwatt15/caleb-watt-cookbook", "max_forks_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3695652174, "max_line_length": 182, "alphanum_fraction": 0.7543366151, "num_tokens": 568}
|
# encoding: utf-8
import cairo
import pango
import pangocairo
from font_draw import fontnames, trim
from random import randint
from PIL import Image
from utils import add_padding
import numpy as np
import codecs
import os
import re
allmatch = 0
# fontnames = ['serif']
def draw_line(line, outpath, spacing='normal', gtpath = None):
global allmatch
if spacing == 'normal':
font_space = 0
elif spacing == "condensed":
font_space = -5000
size = "40"
w = 3000
h = 200
nof = 0
# spacing = 'condensed' # this doesn't seem to work anyway
# for spacing in ['normal', 'condensed']:
for fontname in fontnames:
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
context = cairo.Context(surf)
#draw a background rectangle:
context.rectangle(0,0,w,h)
context.set_source_rgb(1, 1, 1)
context.fill()
# font_map = pangocairo.cairo_font_map_get_default()
pangocairo_context = pangocairo.CairoContext(context)
pangocairo_context.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pangocairo_context.create_layout()
# font_params = [fontname,'normal', 'normal', spacing, size]
font_params = [fontname, size]
font = pango.FontDescription(" ".join(font_params))
attr = pango.AttrLetterSpacing(font_space, 0, -1)
# attr = pango.AttrLetterSpacing(0, 0, -1)
attrlist = pango.AttrList()
attrlist.change(attr)
# font.set_stretch(pango.STRETCH_CONDENSED)
# font.set_stretch(pango.STRETCH_ULTRA_CONDENSED)
# print font.get_stretch()
layout.set_font_description(font)
layout.set_attributes(attrlist)
layout.set_text(line)
context.set_source_rgb(0, 0, 0)
pangocairo_context.update_layout(layout)
pangocairo_context.show_layout(layout)
# fname = "/tmp/%s%d.png" % (fontname, randint(0,20000000))
# fname = "/tmp/%s.png" % ('-'.join(font_params))
fname = outpath + fontname + '-'+ spacing + '.png'
# if os.path.exists(fname) and os.path.exists(outpath + fontname + '.gt.txt'):
# allmatch += 1
# if os.path.exists(fname) and not os.path.exists(gtpath + fontname + '.gt.txt'):
# print fname, gtpath + fontname + '.gt.txt'
# codecs.open(gtpath + fontname + '.gt.txt', 'w', 'utf-8').write(line)
# else:
# nof += 1
# continue
with open(fname, "wb") as image_file:
surf.write_to_png(image_file)
im = Image.open(fname)
im = im.convert('L')
a = np.asarray(im)
a = trim(a)/255
a = add_padding(a, padding=2)
# os.remove(fname)
# Image.fromarray(a*255).show()
Image.fromarray(a*255).save(fname)
codecs.open(gtpath + fontname + '.gt.txt', 'w', 'utf-8').write(line)
print allmatch
def draw_line2(line, font, size):
# size = "40"
if line.strip().endswith(u'་'):
line = line.strip(u'་')
line = line + u'་'
line = line.strip(u' ')
w = 8000
h = 250
spacing = 'normal' # this doesn't seem to work anyway
# for spacing in ['normal', 'condensed']:
surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
context = cairo.Context(surf)
#draw a background rectangle:
context.rectangle(0,0,w,h)
context.set_source_rgb(1, 1, 1)
context.fill()
#get font families:
# font_map = pangocairo.cairo_font_map_get_default()
# context.translate(0,0)
pangocairo_context = pangocairo.CairoContext(context)
pangocairo_context.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
layout = pangocairo_context.create_layout()
#fontname = sys.argv[1] if len(sys.argv) >= 2 else "Sans"
# font = pango.FontDescription(fontname + " 200")
font_params = [font,'normal', 'normal', spacing, str(size)]
font = pango.FontDescription(" ".join(font_params))
# font.set_stretch(pango.STRETCH_CONDENSED)
# else:
# font = pango.FontDescription(fontname + " bold 200")
layout.set_font_description(font)
layout.set_text(line)
context.set_source_rgb(0, 0, 0)
pangocairo_context.update_layout(layout)
pangocairo_context.show_layout(layout)
# fname = "/tmp/%s%d.png" % (fontname, randint(0,20000000))
fname = "/tmp/%s.png" % ('-'.join(font_params))
# fname = outpath + fontname + '.png'
# codecs.open(outpath + fontname + '.gt.txt', 'w', 'utf-8').write(line)
with open(fname, "wb") as image_file:
surf.write_to_png(image_file)
im = Image.open(fname)
# im = im.convert('L')
im = im.convert('L')
a = np.asarray(im, 'f')
os.remove(fname)
return a
# a = trim(a)/255
# a = add_padding(a, padding=2)
# Image.fromarray(a*255).save(fname)
def draw_lines_from_file(fl, outdir='generated-line-imgs', \
outdir_gt='generated-line-imgs-gt', spacing="normal", limit=None):
fl = codecs.open(fl, 'r', 'utf-8')
if not os.path.exists(outdir):
os.mkdir(outdir)
if not os.path.exists(outdir_gt):
os.mkdir(outdir_gt)
for i, line in enumerate(fl):
if limit and i > limit:
break
line = line.strip(u' ་')
if not line:
continue
print line
# line = next(fl).strip()
outfilepath = os.path.join(outdir, '%06d-' % i)
gtoutfilepath = os.path.join(outdir_gt, '%06d-' % i)
draw_line(' ' + line, outfilepath, gtpath=gtoutfilepath, spacing=spacing)
if __name__ == '__main__':
import os
# fl = codecs.open('/media/zr/zr-mechanical/Dropbox/sera-khandro-sample.txt', 'r', 'utf-8')
# fl = codecs.open('/media/zr/zr-mechanical/eKangyur-FINAL-sources-20141023/eKangyur/W4CZ5369/sources/allkangyurlines-nolinenum.txt', 'r', 'utf-8')
# fl = codecs.open('/media/zr/mech2/DownloadsOverflow/chintamanishatpadi_D.txt', 'r', 'utf-8')
# outdir = '/media/zr/mech2/sera-khandro-lines'
# fl = codecs.open('/tmp/testlines', 'r', 'utf-8')
fl = '/media/zr/zr-mechanical/Dropbox/sera-khandro-sample.txt'
draw_lines_from_file(fl, '/media/zr/mech2/seratest', '/media/zr/mech2/seratest-gt', spacing='condensed', limit=20)
import sys; sys.exit()
outdir = '/tmp/lines'
# outdir = '/media/zr/mech2/kangyur-lines'
# gtoutdir = '/media/zr/mech2/kangyur-lines-gt'
# outdir = '/media/zr/mech2/skt-lines'
# gtoutdir = '/media/zr/mech2/skt-lines-gt'
gtoutdir = '/tmp/lines-gt'
if not os.path.exists(outdir):
os.mkdir(outdir)
if not os.path.exists(gtoutdir):
os.mkdir(gtoutdir)
# for i in range(10):
for i, line in enumerate(fl):
if i > 30000:
break
line = line.strip()
if not line:
continue
print line
# line = next(fl).strip()
outfilepath = os.path.join(outdir, '%06d-' % i)
gtoutfilepath = os.path.join(gtoutdir, '%06d-' % i)
draw_line(' ' + line, outfilepath, gtpath=gtoutfilepath)
|
{"hexsha": "72b1dd5cd7304c5cb29129207391f17140c3e77a", "size": 7150, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_generation/line_draw.py", "max_stars_repo_name": "BuddhistDigitalResourceCenter/namsel", "max_stars_repo_head_hexsha": "f6f112b7e1710db07d47cbaed1dee33a33bfd814", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-02-18T04:03:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-06T17:44:21.000Z", "max_issues_repo_path": "data_generation/line_draw.py", "max_issues_repo_name": "buda-base/namsel", "max_issues_repo_head_hexsha": "f6f112b7e1710db07d47cbaed1dee33a33bfd814", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2017-02-19T13:47:48.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-04T07:42:08.000Z", "max_forks_repo_path": "data_generation/line_draw.py", "max_forks_repo_name": "buda-base/namsel", "max_forks_repo_head_hexsha": "f6f112b7e1710db07d47cbaed1dee33a33bfd814", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-02-19T11:15:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T09:37:21.000Z", "avg_line_length": 36.4795918367, "max_line_length": 151, "alphanum_fraction": 0.6076923077, "include": true, "reason": "import numpy", "num_tokens": 2026}
|
@testset "SOM training helper functions" begin
@testset "gridRectangular" begin
grid = GigaSOM.gridRectangular(5, 5)
@test size(grid) == (25, 2)
end
@testset "Kernels" begin
@test isapprox(
gaussianKernel(Vector{Float64}(1:7), 2.0),
[0.1760326, 0.1209853, 0.0647587, 0.0269954, 0.0087641, 0.0022159, 0.0004363],
atol = 1e-4,
)
@test isapprox(
bubbleKernel(Vector{Float64}(1:6), 5.0),
[0.979796, 0.916515, 0.8, 0.6, 0, 0],
atol = 0.001,
)
@test isapprox(
thresholdKernel(Vector{Float64}(1:10), 5.0),
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
atol = 1e-3,
)
@test isapprox(
thresholdKernel(Vector{Float64}(1:10), 10.1),
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
atol = 1e-3,
)
end
@testset "Radii-generating functions" begin
radii1 = expRadius().(5.0, 0.5, Vector(1:10), 10)
radii2 = expRadius(-10.0).(10.0, 0.1, Vector(1:20), 20)
radii3 = linearRadius.(50.0, 0.001, Vector(1:30), 30)
@test isapprox(radii1[1], 5.0)
@test isapprox(radii1[10], 0.5)
@test isapprox(radii2[1], 10.0)
@test isapprox(radii2[20], 0.1)
@test isapprox(radii3[1], 50.0)
@test isapprox(radii3[30], 0.001)
@test all(isapprox.(radii1[1:9] ./ radii1[2:10], radii1[1] / radii1[2]))
#note: radius2 is adjusted and thus not really exponential
@test all(isapprox.(radii3[1:29] .- radii3[2:30], radii3[1] - radii3[2]))
end
@testset "distMatrix" begin
g = GigaSOM.gridRectangular(2, 2)
dm = GigaSOM.distMatrix(Euclidean())(g)
@test size(dm) == (4, 4)
@test all([dm[i, i] == 0 for i = 1:4])
end
end
|
{"hexsha": "57b3d58eabb5849afc15c1c354a1ac0c70e2db87", "size": 1868, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testTrainutils.jl", "max_stars_repo_name": "oHunewald/GigaSOM.jl", "max_stars_repo_head_hexsha": "dd00899e514bea125306a1926452222eca007a10", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-03-31T12:19:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T12:33:57.000Z", "max_issues_repo_path": "test/testTrainutils.jl", "max_issues_repo_name": "oHunewald/GigaSOM.jl", "max_issues_repo_head_hexsha": "dd00899e514bea125306a1926452222eca007a10", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 162, "max_issues_repo_issues_event_min_datetime": "2019-03-31T09:30:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T19:48:46.000Z", "max_forks_repo_path": "test/testTrainutils.jl", "max_forks_repo_name": "oHunewald/GigaSOM.jl", "max_forks_repo_head_hexsha": "dd00899e514bea125306a1926452222eca007a10", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-04-02T08:38:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:48:09.000Z", "avg_line_length": 35.2452830189, "max_line_length": 90, "alphanum_fraction": 0.5235546039, "num_tokens": 754}
|
\documentclass[a4paper]{article}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{hyperref}
\usepackage[colorinlistoftodos]{todonotes}
\usepackage{url}
\newcommand{\vwi}{{\bf w}_i}
\newcommand{\vw}{{\bf w}}
\newcommand{\vx}{{\bf x}}
\newcommand{\vy}{{\bf y}}
\newcommand{\vxi}{{\bf x}_i}
\newcommand{\yi}{y_i}
\newcommand{\vxj}{{\bf x}_j}
\newcommand{\vxn}{{\bf x}_n}
\newcommand{\yj}{y_j}
\newcommand{\ai}{\alpha_i}
\newcommand{\aj}{\alpha_j}
\newcommand{\X}{{\bf X}}
\newcommand{\Y}{{\bf Y}}
\newcommand{\vz}{{\bf z}}
\newcommand{\msigma}{{\bf \Sigma}}
\newcommand{\vmu}{{\bf \mu}}
\newcommand{\vmuk}{{\bf \mu}_k}
\newcommand{\msigmak}{{\bf \Sigma}_k}
\newcommand{\vmuj}{{\bf \mu}_j}
\newcommand{\msigmaj}{{\bf \Sigma}_j}
\newcommand{\pij}{\pi_j}
\newcommand{\pik}{\pi_k}
\newcommand{\D}{\mathcal{D}}
\newcommand{\el}{\mathcal{L}}
\newcommand{\N}{\mathcal{N}}
\newcommand{\vxij}{{\bf x}_{ij}}
\newcommand{\vt}{{\bf t}}
\newcommand{\yh}{\hat{y}}
\newcommand{\code}[1]{{\footnotesize \tt #1}}
\newcommand{\alphai}{\alpha_i}
\title{Large-Scale Image Pattern Recognition\\
Parallel Machine Learning Implementation}
\author{(According to first name's initial's order in English vocabulary )\\
Haitang Hu, Huizhan Lv, Jian Jin, Tianyi Chen}
\date{\today}
\begin{document}
\maketitle
\begin{abstract}
Image recognition requires high workload because of its intrinsic complex feature space. When it comes to large amount of pictures, common machine learning algorithm shows obvious limitation in pattern recognition in terms of computation efficiency, resource occupation, etc. Thus techniques developed specifically to deal with large-scale data processing could be expected to have obviously superior performance. In this project we implement parallel SVM based on PCA in large-scale image pattern recognition and verify this scheme works in such kind of task with favourable performance as we have expected.
\end{abstract}
\section{Background and Motivation}
Pattern recognition concerning image has the mark of complex feature space. Consider a $32\times 32$ color picture, each picture will have 3072 features taking RGB scheme into account. For a dataset of 60,000 pictures, it is quite a huge workload for the classification task.\\
\\
However the ability to deal with classification in image recognition is of quite significance. For instance, when we look for images by inputting keyword "car" through a search engine, clearly it should distinguish between distinct types of objects and we don't expect a picture of a boat in the search result.\\
\\
We aim to develop an algorithm which makes classification of large-scale image dataset in an efficient manner, that is to say, gives out result quickly with a high level of accuracy.
\section{Algorithm}
\subsection{Preprocessing}
We use PCA for preprocessing. In PCA, the goal is to project the data $\vx$ with dimensionality $D$ onto a space having dimensionality $M <D$ while maximizing the variance of the projected data.\\
\\
Suppose $S$ is the data covariance matrix defined by\begin{equation}
S=Cov(\vx)
\end{equation}
Let $\{z_i\}$ represent $M$ linear combinations of our original $D$ predictors:
\begin{equation}
z_m=\vw_m^T\vx=\sum_{j=1}^D w_{jm} x_j,~~~~m=1,2,...,M
\end{equation}
We now maximize the projected variance \begin{equation}
Var(z_1)=\vw^T_1S\vw_1
\end{equation}
Constraint condition is $\vw^T_1\vw_1=1$. Make an unconstrained maximization of
\begin{equation}
w^T_1Sw_1-\lambda_1(w_1^T w_1-1).
\end{equation}
where $\lambda_1$ is a Lagrange multiplier. The optimization result is \begin{equation}
S\vw_1 = \lambda_1\vw_1
\end{equation}
So\begin{equation}
w_1^TSw_1 = \lambda_1
\end{equation}
which reveals that the variance will be a maximum when we set $\vw_1$ equal to the eigenvector having the largest eigenvalue $\lambda_1$. This eigenvector $\vw_1$ is the first principal component.\\
\\
The second principal component $\vw_2$ should also maximize variance, be of unit length, and be orthogonal to $w_1$. We could find that $\vw_2$ should be the eigenvector of $S$ with the second largest eigenvalue. Similarly, we can show that the other dimensions are given by the eigenvectors with decreasing eigenvalues.\\
\\
Since there are too many features in the task, we use PCA as preprocessing, which reduces 3072 features to 100 features. We projected all the features on the first 100 eigenvectors thus acquiring the data for classification. Also, to facilitate the computation of eigenvectors, we use SVD \begin{equation}
X=U\Sigma V
\end{equation}
where $X$ is data matrix. Then the eigenvectors of covariance matrix of $X$ will be the columns of the matrix $V$.
\subsection{Processing}
We use parallel SVM with Gaussian kernel for classification, which runs on four seperate machines. The standard SVMs are binary classifiers using a linear decision boundary with disciminant function \begin{equation}
g(\vx)=\vw^T\vx^t+w_0
\end{equation}
of which the decision function is \begin{equation}
y_{new}=\text{sign}(\vw^T\vx_i+w_0)
\end{equation}
In our tasks there is no guarantee that the dataset is linear separable, so we introduce kernel function which allow non-linear dicision boundaries.\\
\\
Kernel is based on transformation of original features, define the basis functions \begin{equation}
\vz =\phi(\vx)~\text{where}~z_j=\phi_j(\vx),~j = 1,...,k
\end{equation}
mapping from the $d$-dimensional $\vx$ space to the $k$-dimensional $\vz$ space where we write the discriminant as\begin{eqnarray}
&g(\vz)=\vw^T\vz\\
&g(\vx)=\vw^T\phi(\vx)=\sum_{j=1}^k w_j\phi_j(\vx)
\end{eqnarray}
The dual in standard SVM problem is now
\begin{equation}
L_d=\sum_i\lambda_i-\frac{1}{2}\sum\limits_i\sum\limits_j \lambda_i\lambda_j y_i y_j \phi(x_i)^T \phi(x_j)
\end{equation}
subject to \begin{equation}
\sum_i \lambda_iy_i=0,~~~~0\leq \lambda_i\leq C,~\forall i
\end{equation}
The idea in kernel machines is to replace the inner product of basis functions, $\phi(x_i)^T\phi(x_j)$, by a kernel function, $K(x_i,x_j)$, thus the kernel function also shows up in the discriminant
\begin{equation}
g(\vx)=\vw^T\phi(\vx)=[\sum_{i=1}^k \lambda_i y_i \phi(\vx_i)^T]\phi(\vx)=\sum_{j=1}^k \lambda_i y_i K(\vx_i,\vx)
\end{equation}
Use this kernel function, we do not need to map it to the new space at all. There are multiple types fo kernels and we use Guassian kernel \begin{equation}
K(x,x')=exp(-\frac{1}{2}(x-x')^T\Sigma^{-1}(x-x'))
\end{equation}
Besides kernel, we make another extension from binary class to $K-$classes by applying 1-of-K encoding scheme, in which y is a vector of length $K$ containing a single 1 for the correct class and 0 elsewhere. For example, if we have $K=5$ classes, then an input that belongs to class $2$ would be given a target vector: \begin{equation}
y=(0,1,0,0,0)^T
\end{equation}
\subsection{K-means Clustering Algorithm}
Comparing with SVM, K-means is another clustering algorithm.In we don't know the exact number of labels. K-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster.
The MLlib on spark has an implementation of K-means in python. Therefore, we can directly use it by importing this package.
\subsection{Percolation Clustering Algorithm}
Besides SVM and k-means, we also realize another clustering algorithm which can be used in the case that we don't know the number of labels(SVM), and we don't have the expectation of number of labels(k-means). This algorithm derived from the percolation algorithm of Newman \cite{Percolation}. The original algorithm is realized on lattice plane. We modified it to let it suitable for our continuable space. \\
In our derived algorithm, each data point i has other data points which are within the circle of radius $R$ centred at data point i as its neighbors.\\
Radius $R$ is our parameter. After each data determining their neighbors, a directed graph will be formed. Then the derived percolation clustering algorithm will be realized on such directed graph to get the clusters.
\section{Parallelization of Algorithm}
\subsection{Parallelization of PCA}
On the python framework of spark, we can esaily use the map() function to implement the parallel computations of normalizing data, and covariance matrix. The key of implementation of PCA is how to parallel compute the SVD of the covariance matrix, since the svd function in numpy package cannot be used in rdd object. In order to solve the parallelization of SVD decomposition, we tried to implment it by one-side Jacobi SVD algorithm, and another SVD algorithm from freeman lab,thunder python package.\\
One-sided Jacobi Algorithm can be used for singular value decomposition\cite{Jacobi}. We implement it by python, it works well on matrix of array type. However, when our one-sided Jacobi Algorithm was run on rdd, it cannot load the dimension of seperate rdd correctly. The code of one-sided Jacobi is on our github. \\
Another way is to use the svd algorithm on spark based on thunder python package from freeman lab. The freeman's svd algorithm cannot work directly. We made some adjustments of their source codes, the modified codes can work well on spark.
\subsection{Parallelization of SVM}
The mechanism of parallelization of SVM is illustrated in the paper of \cite{}. But due to the limited time, we didn't successfully realize it.
\subsection{Parallelization of K-means Clustering}
In the MLlib of Apache Spark, they have provided the k-means algorithm in python. We can directly use it on RDD. The mechanism of parallelization is the whole dataset will be divided into several RDD parts. K-means method will be run on these different RDDs. Then, the results will be sum up to form the final conclusion.
\subsection{Parallelization of Percolation Clustering}
The parallelization of percolation clustering is very similar to Co-Clustering algorithm, and k-means, we can implement Percolation clustering on different RDDS seperately, and sum up the results to form the final clusters.
\section{Framework and Implementaion}
\subsection{Dataset}
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
\begin{itemize}
\item Feature Size\\
Obviously, here we have a RGB picture with $32 \times 32 *3 = 3072$ features for each sample.
\item Feature Selection\\
As described, we here will first compressed this image from RGB mode to intensity mode, where each pixel just has one corresponding value stands for its intensity.\\
Then, we will apply PCA algorithm on this intensity picture($1024$ features), to shrink its size.\\
Later we will see the picture plot by eigenvectors.
\item Labels\\
10 lables stands from different object classes.\\
This is general supervised classification with multi classes. Here, we employ SVM with Gaussian Kernel to deal with this task.
\end{itemize}
\subsection{Framework-Apache Spark}
Apache Spark is a fast and general engine for large-scale data processing, which is could work in standalone cluster mode, on EC2, or run it on Hadoop YARN or Apache Mesos. Also, it can read from HDFS, HBase, Cassandra, and any Hadoop data source.\\
\\
Spark introduces an abstraction called resilient distributed datasets (RDDs). Spark can outperform Hadoop multiple times, which is the reason we choose it as the framework of our task. It supports both python and Java. We use python for the task.
\subsection{Implementations}
Since we have known the numbers of labels is ten from the original dataset, using SVM to train the dataset is the best comparing with k-means, and percolation algorithm in our clustering library.
\section{Result and evaluation}
\section{Conclusion}
Image recognition requires high workload because of its intrinsic complex feature space. When it comes to large amount of pictures, common machine learning algorithm shows obvious limitation in pattern recognition in terms of computation efficiency, resource occupation, etc. Thus techniques developed specifically to deal with large-scale data processing could be expected to have obviously superior performance. In this project we implement parallel SVM based on PCA in large-scale image pattern recognition and verify this scheme works in such kind of task with favourable performance as we have expected.
\begin{thebibliography}{}
\bibitem{Spark} Spark API, \emph{Spark Programming Guide}, \textbf{2014}
\url{http://spark.apache.org/docs/latest/programming-guide.html}.
\bibitem{CIFAR} Alex Krizhevsky, \emph{Learning Multiple Layers of Features from Tiny Images}, Master's Thesis, \textbf{2009}.
\bibitem{Percolation} M. E. J. Newman and R. M. Ziff, \emph{Fast Monte Carlo algorithm for site or bond percolation
}, \textbf{2001}.
\bibitem{Jacobi} B. B. Zhou and R. P. Brent, \emph{On Parallel Implementation of the One-sided Jacobi Algorithm for Singular Value Decompositions
}, \textbf{1995}.
\end{thebibliography}
\end{document}
|
{"hexsha": "6584b30ae7c14617d2fbad6dff78cb7eaa99e0b1", "size": 13123, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "main.tex", "max_stars_repo_name": "motian12ps/Bigdata_Project", "max_stars_repo_head_hexsha": "43d10cbb230e7988197cd9d08ee3fb9eaec567d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-04-08T20:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2015-04-08T20:09:23.000Z", "max_issues_repo_path": "main.tex", "max_issues_repo_name": "motian12ps/Bigdata_Project", "max_issues_repo_head_hexsha": "43d10cbb230e7988197cd9d08ee3fb9eaec567d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.tex", "max_forks_repo_name": "motian12ps/Bigdata_Project", "max_forks_repo_head_hexsha": "43d10cbb230e7988197cd9d08ee3fb9eaec567d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.0372093023, "max_line_length": 609, "alphanum_fraction": 0.7727653738, "num_tokens": 3458}
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Wenyang Zhou <576825820@qq.com>
# Dun Liang <randonlang@gmail.com>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
import jittor.distributions as jd
class TestOneHot(unittest.TestCase):
def test_presum(self):
a = jt.array([[1,2,3,4]])
b = jd.simple_presum(a)
assert (b.data == [[0,1,3,6,10]]).all()
def test_one_hot(self):
a = jd.OneHotCategorical(jt.array([0.25, 0.25, 0.25, 0.25]))
x = a.sample().numpy()
for i in range(1000):
x += a.sample().numpy()
assert (x > 200).all()
y = a.sample([2,3])
y.sync()
assert y.shape == [2,3,4]
probs,probs2 = np.random.uniform(0,1,(10)), np.random.uniform(0,1,(10))
probs,probs2 = probs / probs.sum(),probs2 / probs2.sum()
import torch
jc, jc2 = jd.OneHotCategorical(jt.array(probs).reshape(1,-1)),jd.OneHotCategorical(jt.array(probs2).reshape(1,-1))
tc, tc2 = torch.distributions.OneHotCategorical(torch.tensor(probs)),torch.distributions.OneHotCategorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data,tc.entropy().numpy())
x = np.zeros((4,10))
for _ in range(4):
nx = np.random.randint(0,9)
x[_,nx] = 1
assert np.allclose(jc.log_prob(jt.array(x)),tc.log_prob(torch.tensor(x)))
assert np.allclose(jd.kl_divergence(jc,jc2),torch.distributions.kl_divergence(tc,tc2))
def test_cate(self):
a = jd.Categorical(jt.array([0.25, 0.25, 0.25, 0.25]))
x =np.array([0,0,0,0])
for i in range(1000):
x[a.sample().item()]+=1
assert (x > 200).all()
y = a.sample([2,3])
y.sync()
assert y.shape == [2,3]
def test_normal(self):
import torch
for _ in range(4):
mu = np.random.uniform(-1,1)
sigma = np.random.uniform(0,2)
jn = jd.Normal(mu,sigma)
tn = torch.distributions.Normal(mu,sigma)
assert np.allclose(jn.entropy().data,tn.entropy().numpy())
x = np.random.uniform(-1,1)
assert np.allclose(jn.log_prob(x),tn.log_prob(torch.tensor(x)))
mu2 = np.random.uniform(-1,1)
sigma2 = np.random.uniform(0,2)
jn2 = jd.Normal(mu2,sigma2)
tn2 = torch.distributions.Normal(mu2,sigma2)
assert np.allclose(jd.kl_divergence(jn,jn2).data,torch.distributions.kl_divergence(tn,tn2).numpy())
def test_categorical(self):
import torch
for _ in range(4):
probs,probs2 = np.random.uniform(0,1,(10)), np.random.uniform(0,1,(10))
probs,probs2 = probs / probs.sum(),probs2 / probs2.sum()
jc, jc2 = jd.Categorical(jt.array(probs).reshape(1,-1)),jd.Categorical(jt.array(probs2).reshape(1,-1))
tc, tc2 = torch.distributions.Categorical(torch.tensor(probs)),torch.distributions.Categorical(torch.tensor(probs2))
assert np.allclose(jc.entropy().data, tc.entropy().numpy()), (jc.entropy().data, tc.entropy().numpy())
x = np.random.randint(0,10,(4))
assert np.allclose(jc.log_prob(x), tc.log_prob(torch.tensor(x)))
assert np.allclose(jd.kl_divergence(jc,jc2),torch.distributions.kl_divergence(tc,tc2))
def test_uniform(self):
import torch
for _ in range(4):
low, low2 = np.random.randint(-1,2), np.random.randint(-1,2)
leng, leng2 = np.random.uniform(0,2), np.random.uniform(0,2)
high, high2 = low + leng, low2 + leng2
ju, ju2 = jd.Uniform(low,high),jd.Uniform(low2,high2)
tu, tu2 = torch.distributions.Uniform(low,high),torch.distributions.Uniform(low2,high2)
assert np.allclose(ju.entropy().data,tu.entropy().numpy())
x = np.random.uniform(low,high)
assert np.allclose(ju.log_prob(x),tu.log_prob(torch.tensor(x)))
assert np.allclose(jd.kl_divergence(ju,ju2),torch.distributions.kl_divergence(tu,tu2))
def test_geometric(self):
import torch
for _ in range(4):
prob, prob2 = np.random.uniform(0,1), np.random.uniform(0,1)
jg, jg2 = jd.Geometric(prob),jd.Geometric(prob2)
tg, tg2 = torch.distributions.Geometric(prob),torch.distributions.Geometric(prob2)
assert np.allclose(jg.entropy().data,tg.entropy().numpy())
x = np.random.randint(1,10)
assert np.allclose(jg.log_prob(x),tg.log_prob(torch.tensor(x)))
# print(jd.kl_divergence(jg,jg2),torch.distributions.kl_divergence(tg,tg2))
assert np.allclose(jd.kl_divergence(jg,jg2),torch.distributions.kl_divergence(tg,tg2))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "54b75265e45751473305bb2b3eb2376e6994a7c0", "size": 5099, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/jittor/test/test_distributions.py", "max_stars_repo_name": "maxint/jittor", "max_stars_repo_head_hexsha": "0b6a2e32f64acaeefcea63df1ea35fd0d729c604", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/jittor/test/test_distributions.py", "max_issues_repo_name": "maxint/jittor", "max_issues_repo_head_hexsha": "0b6a2e32f64acaeefcea63df1ea35fd0d729c604", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/jittor/test/test_distributions.py", "max_forks_repo_name": "maxint/jittor", "max_forks_repo_head_hexsha": "0b6a2e32f64acaeefcea63df1ea35fd0d729c604", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.212962963, "max_line_length": 136, "alphanum_fraction": 0.5887428908, "include": true, "reason": "import numpy", "num_tokens": 1425}
|
#!/usr/bin/env python
"""
Created on Mon Mar 7 11:48:11 2016
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import numpy as np
from of.utils import ipshell
from scipy import sparse
from cpab.cpaNd import Tessellation as TessellationNd
class Tessellation(TessellationNd):
dim_domain = 2
_LargeNumber = 10**6
def __init__(self,nCx,nCy,nC,XMINS,XMAXS,tess):
nCx=int(nCx)
nCy=int(nCy)
nC=int(nC)
self.nCx=nCx
self.nCy=nCy
self.nC=nC
if len(XMINS)!=self.dim_domain:
raise ValueError(XMINS)
if len(XMAXS)!=self.dim_domain:
raise ValueError(XMAXS)
self.XMINS=XMINS
self.XMAXS=XMAXS
self.type=tess
cells_multiidx,cells_verts_homo_coo=self._create_cells_homo_coo()
self.cells_multiidx = cells_multiidx
self.cells_verts_homo_coo = cells_verts_homo_coo
if self.type=='I':
# THIS IS SPECIFIC FOR TRI TESS IN 2D ONLY
# Recall that the first 4 triangles have one shared point:
# The center of the first rectangle. And this point is the first
# in each of these 4 triangles.
# The next four triangles share the center of the second rectangtle,
# And so on.
self.box_centers=self.cells_verts_homo_coo[::4][:,0].copy()
elif self.type=='II':
self.box_centers=self.cells_verts_homo_coo.mean(axis=1)
else:
raise ValueError(tess)
_xmins=self.cells_verts_homo_coo[:,:,0].min(axis=1)
_ymins=self.cells_verts_homo_coo[:,:,1].min(axis=1)
_xmaxs=self.cells_verts_homo_coo[:,:,0].max(axis=1)
_ymaxs=self.cells_verts_homo_coo[:,:,1].max(axis=1)
self._xmins = np.asarray(zip(_xmins,_ymins))
self._xmaxs = np.asarray(zip(_xmaxs,_ymaxs))
self._xmins_LargeNumber = np.asarray(self._xmins).copy()
self._xmaxs_LargeNumber = np.asarray(self._xmaxs).copy()
self._xmins_LargeNumber[self._xmins_LargeNumber<=self.XMINS]=-self._LargeNumber
self._xmaxs_LargeNumber[self._xmaxs_LargeNumber>=self.XMAXS]=+self._LargeNumber
self.permuted_indices = np.random.permutation(self.nC)
def _create_cells_homo_coo(self):
xmin,ymin = self.XMINS
xmax,ymax = self.XMAXS
tess=self.type
nCx=self.nCx
nCy=self.nCy
nC=self.nC
if tess not in ['II','I']:
raise ValueError
Vx = np.linspace(xmin,xmax,nCx+1)
Vy = np.linspace(ymin,ymax,nCy+1)
cells_x = []
cells_x_verts = []
if tess == 'II':
for i in range(nCy):
for j in range(nCx):
cells_x.append((j,i))
ul = [Vx[j],Vy[i],1]
ur = [Vx[j+1],Vy[i],1]
ll = [Vx[j],Vy[i+1],1]
lr = [Vx[j+1],Vy[i+1],1]
ul = tuple(ul)
ur = tuple(ur)
ll = tuple(ll)
lr = tuple(lr)
cells_x_verts.append((ul,ur,lr,ll))
elif tess == 'I':
for i in range(nCy):
for j in range(nCx):
ul = [Vx[j],Vy[i],1]
ur = [Vx[j+1],Vy[i],1]
ll = [Vx[j],Vy[i+1],1]
lr = [Vx[j+1],Vy[i+1],1]
ul = tuple(ul)
ur = tuple(ur)
ll = tuple(ll)
lr = tuple(lr)
center = [(Vx[j]+Vx[j+1])/2,(Vy[i]+Vy[i+1])/2,1]
center = tuple(center)
cells_x_verts.append((center,ul,ur)) # order matters!
cells_x_verts.append((center,ur,lr)) # order matters!
cells_x_verts.append((center,lr,ll)) # order matters!
cells_x_verts.append((center,ll,ul)) # order matters!
cells_x.append((j,i,0))
cells_x.append((j,i,1))
cells_x.append((j,i,2))
cells_x.append((j,i,3))
else:
raise NotImplementedError(tess)
if len(cells_x_verts) != nC:
raise ValueError( len(cells_x_verts) , nC)
if len(cells_x) != nC:
raise ValueError( len(cells_x) , nC)
cells_multiidx,cells_verts = cells_x,cells_x_verts
cells_verts =np.asarray(cells_verts)
return cells_multiidx,cells_verts
# def create_verts_and_H(self,dim_range,
## nCx,nCy,nC, cells_multiidx,
## cells_verts,dim_domain,dim_range,
# valid_outside
# ):
# """
# H encodes the n'bors info.
# """
# if self.type == 'I':
# return self.create_verts_and_H_type_I(dim_range,valid_outside)
# elif self.type=='II':
# return self.create_verts_and_H_type_II(dim_range)
# else:
# raise NotImplementedError
def create_verts_and_H_type_I(self,dim_range,valid_outside):
"""
This assummes 2D
H encodes the n'bors info.
"""
if self.type != 'I':
raise ValueError(self.type)
dim_domain=self.dim_domain
nC = self.nC
cells_multiidx=self.cells_multiidx
cells_verts=self.cells_verts_homo_coo
nCx=self.nCx
nCy=self.nCy
if dim_domain !=2:
raise ValueError(dim_domain)
if dim_range not in (1,2):
raise NotImplementedError(dim_range)
nbrs = np.zeros((nC,nC),dtype=np.bool)
if valid_outside:
left=np.zeros((nC,nC),np.bool)
right=np.zeros((nC,nC),np.bool)
top=np.zeros((nC,nC),np.bool)
bottom=np.zeros((nC,nC),np.bool)
print 'Encoding continuity constraints.'
print 'If nC is large, this may take some time.'
print 'For a given configuration, however, this is done only once;'
print 'the results computed here will be saved and reused the next time'
print 'you use the same configuration.'
# TODO: Cython
#
for i in range(nC):
if nC > 200 and i % 200==0:
print i,'/',nC
for j in range(nC):
# shorter names
mi = cells_multiidx[i]
mj = cells_multiidx[j]
vi = cells_verts[i]
vj = cells_verts[j]
vi=self.make_it_hashable(vi)
vj=self.make_it_hashable(vj)
shared_verts = set(vi).intersection(vj)
if len(mi)!=3:
raise ValueError
if len(mj)!=3:
raise ValueError
if mi == mj:
# same cell, nothing to do here
continue
elif mi[:-1]==mj[:-1]:
# Same rect boxs, different triangles
s = set([mi[-1],mj[-1]])
if s in [set([0,1]),set([1,2]),set([2,3]),set([3,0])]:
nbrs[i,j]=1
else:
# different rect boxes
if valid_outside:
# try to deal with the extension
if mi[0]==mj[0]==0: # leftmost col
if mi[2]==mj[2]==3: # left triangle
if np.abs(mi[1]-mj[1])==1: # adjacent rows
nbrs[i,j]=1
left[i,j]=True
continue
if mi[0]==mj[0]==nCx-1: # rightmost col
if mi[2]==mj[2]==1: # right triangle
if np.abs(mi[1]-mj[1])==1: # adjacent rows
nbrs[i,j]=1
right[i,j]=True
continue
if mi[1]==mj[1]==0: # uppermost row
if mi[2]==mj[2]==0: # upper triangle
if np.abs(mi[0]-mj[0])==1: # adjacent cols
nbrs[i,j]=1
top[i,j]=True
continue
if mi[1]==mj[1]==nCy-1: # lowermost row
if mi[2]==mj[2]==2: # lower triangle
if np.abs(mi[0]-mj[0])==1: # adjacent cols
nbrs[i,j]=1
bottom[i,j]=True
continue
if set([mi[2],mj[2]]) not in [set([0,2]),set([1,3])]:
continue
pair = (mi[0]-mj[0]),(mi[1]-mj[1])
# Recall the order of triangles is
# 0
# 3 1
# 2
# vertical nbr's
if pair == (0,1) and (mi[2],mj[2])==(0,2):
nbrs[i,j]=1
elif pair == (0,-1) and (mi[2],mj[2])==(2,0):
nbrs[i,j]=1
# horizontal nbr's
elif pair == (1,0) and (mi[2],mj[2])==(3,1):
nbrs[i,j]=1
elif pair == (-1,0) and (mi[2],mj[2])==(1,3):
nbrs[i,j]=1
print 'Creating H of size (nC**2,nC)=({},{})'.format(nC**2,nC)
try:
H = np.zeros((nC**2,nC))
except MemoryError:
msg='Got MemoryError when trying to call np.zeros((nC**2,nC))'
ipshell(msg)
raise MemoryError('np.zeros((nC**2,nC))','nC={}'.format(nC))
# H = sparse.lil_matrix((nC**2,nC))
for i in range(nC):
for j in range(nC):
k = i*nC + j
if i < j:
continue
nbr = nbrs[i,j]
if nbr:
H[k,i]=-1
H[k,j]=+1
# ipshell('save H')
# 1/0
verts1 = []
verts2 = []
k = 0
print 'Extracting the vertices'
for count,h in enumerate(H):
if H.shape[0]>1000:
if count % 100000 == 0:
print count,'/',H.shape[0]
# ipshell('..')
if h.any():
# if h.nnz:
# ipshell('STOP')
# # Make h dense and flat
# h=np.asarray(h.todense()).ravel()
i = (h==1).nonzero()[0][0]
j = (h==-1).nonzero()[0][0]
mi = cells_multiidx[i]
mj = cells_multiidx[j]
vi = cells_verts[i]
vj = cells_verts[j]
vi=self.make_it_hashable(vi)
vj=self.make_it_hashable(vj)
shared_verts = set(vi).intersection(vj)
if len(shared_verts) ==0:
continue
if len(shared_verts) ==1:
# single vertex
if any([left[i,j],right[i,j],top[i,j],bottom[i,j]]):
# shared_vert is a set that contains a single tuple.
v_aux = list(shared_verts)[0] # v_aux is a tuple
v_aux = list(v_aux) # Now v_aux is a list (i.e. mutable)
if left[i,j] or right[i,j]:
v_aux[0]-=10 # Create a new vertex with the same y
elif top[i,j] or bottom[i,j]:
v_aux[1]-=10 # Create a new vertex with the same x
else:
raise ValueError("WTF?")
v_aux = tuple(v_aux)
shared_verts.add(v_aux) # add it to the set
# ipshell('hello')
# print shared_verts
else:
# We can skip it since the continuity at this vertex
# will be imposed via the edges.
continue
if len(shared_verts) != 2:
ipshell('oops')
raise ValueError(len(shared_verts),shared_verts)
try:
v1,v2 = np.asarray(list(shared_verts))
except:
ipshell('oops2')
raise
k+=2
verts1.append(v1)
verts2.append(v2)
# if a != (1,1):
# continue
# print a, ' is a nbr of ',b
# nEdges=nbrs.sum().astype(np.int)/2
# if k != nEdges*2:
# raise ValueError(k,nEdges)
nEdges = k/2
# Every edge connects 2 vertices.
# At every vertex, all components of the velocity must agree.
nConstraints = nEdges*2*dim_range
verts1 = np.asarray(verts1)
verts2 = np.asarray(verts2)
H = np.asarray([h for h in H if h.any()])
# H = np.asarray([h for h in H if h.nnz])
print 'H is ready'
return verts1,verts2,H,nEdges,nConstraints
def create_verts_and_H_type_II(self,
# nC, cells_multiidx, cells_verts,dim_domain,
dim_range):
"""
This assummes 2D
H encodes the n'bors info.
"""
if self.type != 'II':
raise ValueError(self.type)
dim_domain=self.dim_domain
nC = self.nC
cells_multiidx=self.cells_multiidx
cells_verts=self.cells_verts_homo_coo
# nCx=self.nCx
# nCy=self.nCy
if dim_domain !=2:
raise ValueError(dim_domain)
if dim_range not in (1,dim_domain):
raise NotImplementedError(dim_range)
nbrs = np.zeros((nC,nC))
for i in range(nC):
for j in range(nC):
# shorter names
mi = cells_multiidx[i]
mj = cells_multiidx[j]
if mi == mj:
continue
else:
pair = (np.abs(mi[0]-mj[0]),
np.abs(mi[1]-mj[1]))
if set(pair) == set([0,1]):
nbrs[i,j]=1
nEdges=nbrs.sum().astype(np.int)/2
H = np.zeros((nC**2,nC))
# H = sparse.lil_matrix((nC**2,nC))
for i in range(nC):
for j in range(nC):
k = i*nC + j
if i < j:
continue
nbr = nbrs[i,j]
if nbr:
H[k,i]=-1
H[k,j]=+1
# ipshell('hi')
# 1/0
verts1 = []
verts2 = []
k = 0
for h in H:
# ipshell('..')
if h.any():
# if h.nnz:
# Very annoying: I think there is a bug in the sparse matrix object.
# Even after 'todense' it is impossible to flatten it properly.
# h = np.asarray(h.todense().tolist()[0]) # Workaround.
k+=2
i = (h==1).nonzero()[0][0]
j = (h==-1).nonzero()[0][0]
# if set([i,j])==set([6,9]):
# ipshell('debug')
# 1/0
# a = mi
# b = mj
vi = cells_verts[i]
vj = cells_verts[j]
vi = self.make_it_hashable(vi)
vj = self.make_it_hashable(vj)
edge = set(vi).intersection(vj)
if len(edge) != 2:
ipshell('oops')
raise ValueError(len(edge),edge)
try:
v1,v2 = np.asarray(list(edge))
except:
ipshell('oops2')
raise
verts1.append(v1)
verts2.append(v2)
# if a != (1,1):
# continue
# print a, ' is a nbr of ',b
if k != nEdges*2:
raise ValueError(k,nEdges)
# Every edge connects 2 vertices.
# At every vertex, all components of the velocity must agree.
#nConstraints = nEdges*2*dim_domain
nConstraints = nEdges*2*dim_range
verts1 = np.asarray(verts1)
verts2 = np.asarray(verts2)
H = np.asarray([h for h in H if h.any()])
# H = np.asarray([h for h in H if h.nnz])
# ipshell('hi')
# 1/0
return verts1,verts2,H,nEdges,nConstraints
# return verts1,verts2,H,nConstraints
def create_constraint_mat_bdry(self,
zero_v_across_bdry,
verbose=False):
dim_domain=self.dim_domain
nC = self.nC
cells_multiidx=self.cells_multiidx
cells_verts=self.cells_verts_homo_coo
nCx=self.nCx
nCy=self.nCy
if dim_domain != 2:
raise ValueError(self.dim_domain)
if len(zero_v_across_bdry)!=2:
raise ValueError(zero_v_across_bdry)
zero_vx_across_bdry,zero_vy_across_bdry = zero_v_across_bdry
xmin,ymin = self.XMINS
xmax,ymax = self.XMAXS
nHomoCoo = dim_domain+1
length_Avee = dim_domain*nHomoCoo
nCols = nC*length_Avee
L = []
for i,cell in enumerate(cells_verts):
for j,v in enumerate(cell):
# s stands for start
# e stands for end
s = i*length_Avee
e = s+nHomoCoo
row = np.zeros(nCols)
row[s:e] = v
if zero_vx_across_bdry and v[0] in (xmin,xmax):
if verbose:
print 'vx', ' cell',i , 'vert ', j
L.append(row)
if zero_vy_across_bdry and v[1] in (ymin,ymax):
if verbose:
print 'vy', ' cell',i , 'vert ', j
L.append(np.roll(row,nHomoCoo))
L = np.asarray(L)
return L
|
{"hexsha": "50968be9197946111a8fd310702daa4521cab82a", "size": 20410, "ext": "py", "lang": "Python", "max_stars_repo_path": "cpab/cpa2d/Tessellation.py", "max_stars_repo_name": "freifeld/cpabDiffeo", "max_stars_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-03-16T21:35:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T04:16:21.000Z", "max_issues_repo_path": "cpab/cpa2d/Tessellation.py", "max_issues_repo_name": "freifeld/cpabDiffeo", "max_issues_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpab/cpa2d/Tessellation.py", "max_forks_repo_name": "freifeld/cpabDiffeo", "max_forks_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-08-12T23:02:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-14T18:20:36.000Z", "avg_line_length": 35.1290877797, "max_line_length": 96, "alphanum_fraction": 0.4008819206, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4607}
|
import sys
import argparse
import numpy as np
import pandas as pd
import LSTM.LSTM as lstm
def info():
"""
System information
"""
print('Python version: ', sys.version)
print('Numpy version: ', np.__version__)
print('Pandas version: ', pd.__version__)
def main():
parser = argparse.ArgumentParser(description = 'Web Traffic Forecasting',
argument_default = argparse.SUPPRESS)
options = parser.add_subparsers()
# print information
op = options.add_parser('info', description = 'Print system information')
op.set_defaults(func = info)
# Required args
parser.add_argument("-d1", "--train_path", default = '../data/train_1.csv',
help = "Path to the training series")
parser.add_argument("-d2", "--test_path", default = '../data/train_2.csv',
help = "Path to the testing series")
# Optional args
parser.add_argument("-s", "--savepath", type = str, default = 'lstm_prediction',
help = "Path to save LSTM prediction, [Default: lstm_prediction]")
parser.add_argument("-n", "--pred_days", type = int, default = 60,
help = "Number of days to forecasts, [Default: 60]")
parser.add_argument("-b", "--batch_size", type = int, default = 5000,
help = "Batch size, [Default: 5000]")
parser.add_argument("-e", "--epochs", type = int, default = 2,
help = "Number of epochs, [Default: 20]")
parser.add_argument("-ne", "--neuron", type = int, default = 5,
help = "Number of epochs, [Default: 5]")
parser.set_defaults(func = lstm.main)
args = parser.parse_args()
if hasattr(args, 'func'):
args = vars(args)
func = args.pop('func')
func(**args)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
{"hexsha": "a0a4c866f6f9d27d25ca0e25140e7b922cc04c2d", "size": 1972, "ext": "py", "lang": "Python", "max_stars_repo_path": "LSTM/__main__.py", "max_stars_repo_name": "melanieihuei/Web-Traffic-Forecasting", "max_stars_repo_head_hexsha": "d40c3e9c3900eccde271a0e82401f4f5283ee426", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-05-05T16:45:14.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-18T23:22:50.000Z", "max_issues_repo_path": "LSTM/__main__.py", "max_issues_repo_name": "dsp-uga/Edamame", "max_issues_repo_head_hexsha": "d40c3e9c3900eccde271a0e82401f4f5283ee426", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LSTM/__main__.py", "max_forks_repo_name": "dsp-uga/Edamame", "max_forks_repo_head_hexsha": "d40c3e9c3900eccde271a0e82401f4f5283ee426", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-04-29T20:48:38.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-28T20:41:46.000Z", "avg_line_length": 37.2075471698, "max_line_length": 90, "alphanum_fraction": 0.5679513185, "include": true, "reason": "import numpy", "num_tokens": 442}
|
include("../../MaximinOPF/src/MaximinOPF.jl")
using PowerModels
using JuMP
using SCS
using Ipopt
using ProxSDP
PowerModels.silence()
function SolveFP(pm_data,pm_form,pm_optimizer; x_vals=Dict{Int64,Float64}() )
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form, x_vals)
JuMP.set_optimizer(pm.model,pm_optimizer)
JuMP.optimize!(pm.model)
status=JuMP.termination_status(pm.model)
if status != OPTIMAL
println("FLAGGING: Solve status=",status)
end
return pm
end
testcase = Dict(
"file" => "data/case9.m",
"PMOption" => SparseSDPWRMPowerModel,
"name" => "case9K3ACR",
"attack_budget" => 0,
"inactive_indices" => [1,4,7],
"protected_indices" => []
)
pm_data = PowerModels.parse_file(testcase["file"])
pm_data["attacker_budget"] = testcase["attack_budget"] ###Adding another key and entry
pm_data["inactive_branches"] = testcase["inactive_indices"] ###Adding another key and entry
pm_data["protected_branches"] = testcase["protected_indices"] ###Adding another key and entry
sdp_solver=with_optimizer(ProxSDP.Optimizer, log_verbose=false, tol_primal=1e-6, tol_dual=1e-6 )
#sdp_solver=with_optimizer(Mosek.Optimizer,MSK_IPAR_LOG=0)
#conic_solver=with_optimizer(Mosek.Optimizer,MSK_IPAR_LOG=0)
conic_solver=with_optimizer(SCS.Optimizer,verbose=0)
ip_solver=with_optimizer(Ipopt.Optimizer,print_level=0)
# nonconvex AC forms
nonconvex_ac=[ACPPowerModel, ACRPowerModel, ACTPowerModel]
for pm_form in nonconvex_ac
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,ip_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
# linear approximations
linear_approx=[DCPPowerModel, DCMPPowerModel, NFAPowerModel]
for pm_form in linear_approx
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,conic_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
# quadratic approximations
quadratic_approx=[DCPLLPowerModel, LPACCPowerModel]
for pm_form in quadratic_approx
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,ip_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
# quadratic relaxations
quadratic_relax=[SOCWRPowerModel, SOCBFPowerModel, QCRMPowerModel, QCLSPowerModel]
for pm_form in quadratic_relax
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,ip_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
quad_conic_relax=[SOCWRConicPowerModel, SOCBFConicPowerModel]
for pm_form in quad_conic_relax
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,conic_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
# sdp relaxations
sdp_relax=[SDPWRMPowerModel, SparseSDPWRMPowerModel]
for pm_form in sdp_relax
println("Formulating and solving the form ",pm_form)
pm = MaximinOPF.PF_FeasModel(pm_data, pm_form)
JuMP.set_optimizer(pm.model,sdp_solver)
JuMP.optimize!(pm.model)
println("\tOptimal value using powerform ", pm_form, " is: ",JuMP.objective_value(pm.model), "with status ",JuMP.termination_status(pm.model))
end
|
{"hexsha": "dd54b75e215a21a2ea6b29418909ca38f013c09c", "size": 3988, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/SolveFeas.jl", "max_stars_repo_name": "Argonne-National-Laboratory/MaximinOPF.jl", "max_stars_repo_head_hexsha": "c24f001336d11d98ffcaa5a08a04ba53ed7f7dce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-07T21:12:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T06:31:07.000Z", "max_issues_repo_path": "examples/SolveFeas.jl", "max_issues_repo_name": "Argonne-National-Laboratory/MaximinOPF.jl", "max_issues_repo_head_hexsha": "c24f001336d11d98ffcaa5a08a04ba53ed7f7dce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-04-16T02:51:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-11T22:03:37.000Z", "max_forks_repo_path": "examples/SolveFeas.jl", "max_forks_repo_name": "Argonne-National-Laboratory/MaximinOPF.jl", "max_forks_repo_head_hexsha": "c24f001336d11d98ffcaa5a08a04ba53ed7f7dce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-25T16:47:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-25T16:47:55.000Z", "avg_line_length": 41.1134020619, "max_line_length": 146, "alphanum_fraction": 0.7652958877, "num_tokens": 1161}
|
"""
After using reinforcement learning to train a network, e.g. policy_gradient.py, to play a game well. We then want to
learn to estimate weather that network would win, lose or draw from a given position.
Alpha Go used a database of real positions to get it's predictions from, we don't have that for tic-tac-toe so instead
we generate some random game positions and train off of the results we get playing from those.
"""
import os
import random
import numpy as np
import tensorflow as tf
from common.network_helpers import create_network, load_network, save_network, \
get_deterministic_network_move
from games.tic_tac_toe import TicTacToeGameSpec
HIDDEN_NODES_VALUE = (100, 100, 100)
HIDDEN_NODES_REINFORCEMENT = (100, 100, 100)
BATCH_SIZE = 100 # every how many games to do a parameter update?
LEARN_RATE = 1e-4
REINFORCEMENT_NETWORK_PATH = 'current_network.p'
VALUE_NETWORK_PATH = 'value_netowrk.p'
TRAIN_SAMPLES = 10000
TEST_SAMPLES = 10000
# to play a different game change this to another spec, e.g TicTacToeXGameSpec or ConnectXGameSpec
game_spec = TicTacToeGameSpec()
NUMBER_RANDOM_RANGE = (1, (game_spec.board_squares() * 0.8)//1)
# it would be good to have real board positions, but failing that just generate random ones
def generate_random_board_position():
while True:
board_state = game_spec.new_board()
number_moves = random.randint(*NUMBER_RANDOM_RANGE)
side = 1
for _ in range(number_moves):
board_state = game_spec.apply_move(board_state, random.choice(list(game_spec.available_moves(board_state))),
side)
if game_spec.has_winner(board_state) != 0:
# start again if we hit an already winning position
continue
side = -side
return board_state
reinforcement_input_layer, reinforcement_output_layer, reinforcement_variables = create_network(
game_spec.board_squares(),
HIDDEN_NODES_REINFORCEMENT,
game_spec.outputs())
value_input_layer, value_output_layer, value_variables = create_network(game_spec.board_squares(), HIDDEN_NODES_VALUE,
output_nodes=1, output_softmax=False)
target_placeholder = tf.placeholder("float", (None, 1))
error = tf.reduce_sum(tf.square(target_placeholder - value_output_layer))
train_step = tf.train.RMSPropOptimizer(LEARN_RATE).minimize(error)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
load_network(session, reinforcement_variables, REINFORCEMENT_NETWORK_PATH)
if os.path.isfile(VALUE_NETWORK_PATH):
print("loading previous version of value network")
load_network(session, value_variables, VALUE_NETWORK_PATH)
def make_move(board_state, side):
move = get_deterministic_network_move(session, reinforcement_input_layer, reinforcement_output_layer,
board_state, side)
return game_spec.flat_move_to_tuple(np.argmax(move))
board_states_training = {}
board_states_test = []
episode_number = 0
while len(board_states_training) < TRAIN_SAMPLES + TEST_SAMPLES:
board_state = generate_random_board_position()
board_state_flat = tuple(np.ravel(board_state))
# only accept the board_state if not already in the dict
if board_state_flat not in board_states_training:
result = game_spec.play_game(make_move, make_move, board_state=board_state)
board_states_training[board_state_flat] = float(result)
# take a random selection from training into a test set
for _ in range(TEST_SAMPLES):
sample = random.choice(board_states_training.keys())
board_states_test.append((sample, board_states_training[sample]))
del board_states_training[sample]
board_states_training = list(board_states_training.iteritems())
test_error = session.run(error, feed_dict={value_input_layer: [x[0] for x in board_states_test],
target_placeholder: [[x[1]] for x in board_states_test]})
while True:
np.random.shuffle(board_states_training)
train_error = 0
for start_index in range(0, len(board_states_training) - BATCH_SIZE + 1, BATCH_SIZE):
mini_batch = board_states_training[start_index:start_index + BATCH_SIZE]
batch_error, _ = session.run([error, train_step],
feed_dict={value_input_layer: [x[0] for x in mini_batch],
target_placeholder: [[x[1]] for x in mini_batch]})
train_error += batch_error
new_test_error = session.run(error, feed_dict={value_input_layer: [x[0] for x in board_states_test],
target_placeholder: [[x[1]] for x in board_states_test]})
print("episode: %s train_error: %s test_error: %s" % (episode_number, train_error, test_error))
if new_test_error > test_error:
print("train error went up, stopping training")
break
test_error = new_test_error
episode_number += 1
save_network(session, value_variables, VALUE_NETWORK_PATH)
|
{"hexsha": "f682f0699e2a20c6ab6ff8b11869e1d76d9759dd", "size": 5304, "ext": "py", "lang": "Python", "max_stars_repo_path": "value_network.py", "max_stars_repo_name": "alphagamatoe/AlphaToe", "max_stars_repo_head_hexsha": "a7cd0969aa46dfd151a22ed8b9aec1a894747b17", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "value_network.py", "max_issues_repo_name": "alphagamatoe/AlphaToe", "max_issues_repo_head_hexsha": "a7cd0969aa46dfd151a22ed8b9aec1a894747b17", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "value_network.py", "max_forks_repo_name": "alphagamatoe/AlphaToe", "max_forks_repo_head_hexsha": "a7cd0969aa46dfd151a22ed8b9aec1a894747b17", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1162790698, "max_line_length": 120, "alphanum_fraction": 0.6881598793, "include": true, "reason": "import numpy", "num_tokens": 1104}
|
import os
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
init_time = str(time.asctime()).replace(' ', '-')
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('iters', '100000', 'Number of loop iterations')
tf.app.flags.DEFINE_integer('batch', '32', 'Batch size')
tf.app.flags.DEFINE_integer('z_dim', '81', 'Dimision of z')
tf.app.flags.DEFINE_integer('k_steps', '7', 'Number of iterations to train the discriminator for every loop iteration')
tf.app.flags.DEFINE_float('lr', '1e-3', 'Learning rate')
tf.app.flags.DEFINE_string('filename', 'log_'+init_time+'.txt', 'the log file for the experiment records')
filename = os.path.join('./', FLAGS.filename)
z_dim = FLAGS.z_dim
if not os.path.exists('out/'):
os.makedirs('out/')
def Log(string, log_file_path=filename):
with open(log_file_path, 'a+') as f:
f.write(string + '\n')
f.flush()
print(string)
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def sample_z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def D(X):
theta_D = tf.get_collection('D_var')[0]
(D_W1, D_W2, D_b1, D_b2) = (theta_D[0], theta_D[1], theta_D[2], theta_D[3])
D_h1 = tf.nn.relu(tf.matmul(X, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def G(Z):
theta_G = tf.get_collection('G_var')[0]
(G_W1, G_W2, G_b1, G_b2) = (theta_G[0], theta_G[1], theta_G[2], theta_G[3])
G_h1 = tf.nn.relu(tf.matmul(Z, G_W1) + G_b1)
G_logit = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_logit)
return G_prob
def main(args):
############################################################################################################################
# 1. Loading Data #
############################################################################################################################
Log('Loading MNIST dataset...')
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
############################################################################################################################
# 2. Hyper-param Setting #
############################################################################################################################
Log('Setting Hyper-param...')
lr = FLAGS.lr
iters = FLAGS.iters
k_steps = FLAGS.k_steps
batch_size = FLAGS.batch
Log('Learning rate: {}'.format(lr))
Log('Num of iterations: {}'.format(iters))
Log('Batch size: {}'.format(batch_size))
Log('Dimension of z: {}'.format(z_dim))
# Log(str(sys.argv))
############################################################################################################################
# 3. Model Setup #
############################################################################################################################
# 3.1 Input #
X = tf.placeholder(shape=[None, 784], dtype=tf.float32 )
Z = tf.placeholder(shape=[None, z_dim], dtype=tf.float32 )
D_W1 = tf.Variable(xavier_init([784, 128]), name='D_W1')
D_b1 = tf.Variable(tf.zeros(shape=[128]), name='D_b1')
D_W2 = tf.Variable(xavier_init([128, 1]), name='D_W2')
D_b2 = tf.Variable(tf.zeros(shape=[1]), name='D_b2')
theta_D = [D_W1, D_W2, D_b1, D_b2]
tf.add_to_collection('D_var', theta_D)
G_W1 = tf.Variable(xavier_init([z_dim, 128]), name='G_W1')
G_b1 = tf.Variable(tf.zeros(shape=[128]), name='G_b1')
G_W2 = tf.Variable(xavier_init([128, 784]), name='G_W2')
G_b2 = tf.Variable(tf.zeros(shape=[784]), name='G_b2')
theta_G = [G_W1, G_W2, G_b1, G_b2]
tf.add_to_collection('G_var', theta_G)
# 3.2 Loss function #
G_sample = G(Z)
D_real, D_logit_real = D(X)
D_fake, D_logit_fake = D(G_sample)
# D_loss = 0.5 * (tf.reduce_mean((1 - D_real)**2) + tf.reduce_mean(D_fake**2))
# G_loss = 0.5 * tf.reduce_mean((1 - D_fake)**2)
D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
G_loss = -tf.reduce_mean(tf.log(D_fake))
# 3.3 Optimizer #
opt_adm = tf.train.AdamOptimizer() # """Here you need to use Adam optimizer with no learning rate specified"""
opt_sgd = tf.train.GradientDescentOptimizer(learning_rate=lr) # """Here you need to use gradient descent optimizer with a learning rate specified"""
# opt_adm = tf.train.AdamOptimizer(learning_rate=lr)
# opt_sgd = tf.train.AdamOptimizer(learning_rate=lr)
# 3.4 Training step #
train_G_step = opt_adm.minimize(G_loss, var_list=tf.get_collection('G_var')[0])
train_D_step = opt_sgd.minimize(D_loss, var_list=tf.get_collection('D_var')[0])
############################################################################################################################
# 4. Training #
############################################################################################################################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# 4.1 Initialization #
sess.run(tf.global_variables_initializer())
i = 0
# 4.2 Model traininig #
for it in range(iters):
# 4.2.1 Do sampling
# X_batch, _ = mnist.train.next_batch(batch_size)
# 4.2.2 Train the discriminator #
for k in range(k_steps):
X_batch, _ = mnist.train.next_batch(batch_size)
z_batch = sample_z(batch_size, z_dim)
_, D_loss_curr = sess.run([train_D_step, D_loss], feed_dict={X: X_batch, Z: z_batch})
# 4.2.3 Train the generator #
X_batch, _ = mnist.train.next_batch(batch_size)
z_batch = sample_z(batch_size, z_dim)
_, G_loss_curr = sess.run([train_G_step, G_loss], feed_dict={X: X_batch, Z: z_batch})
if it % 1000 == 0:
Log('Iter: {}; D_loss: {:.4}; G_loss: {:.4}'.format(it, D_loss_curr, G_loss_curr))
samples = sess.run(G_sample, feed_dict={Z: sample_z(16, z_dim)})
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
############################################################################################################################
# 5. Testing #
############################################################################################################################
"""You should generate some fake pictures"""
# if not os.path.exists('out/'):
# os.makedirs('out/')
# print('Iter: {}; D_loss: {:.4}; G_loss: {:.4}'.format(iters, D_loss_curr, G_loss_curr))
# with tf.Session() as sess:
# samples = sess.run(G_sample, feed_dict={Z: sample_z(16, z_dim)})
# fig = plot(samples)
# plt.savefig('out/out.png')
# plt.close(fig)
if __name__ == '__main__':
tf.app.run()
|
{"hexsha": "2a3dc4ebedf6d0b3886dad9cbb839daa81504851", "size": 8492, "ext": "py", "lang": "Python", "max_stars_repo_path": "gan.py", "max_stars_repo_name": "huiEric/gan_lg", "max_stars_repo_head_hexsha": "897379208e76cbfe32bd48a978c21be4d053cf5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gan.py", "max_issues_repo_name": "huiEric/gan_lg", "max_issues_repo_head_hexsha": "897379208e76cbfe32bd48a978c21be4d053cf5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gan.py", "max_forks_repo_name": "huiEric/gan_lg", "max_forks_repo_head_hexsha": "897379208e76cbfe32bd48a978c21be4d053cf5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4243902439, "max_line_length": 154, "alphanum_fraction": 0.4730334432, "include": true, "reason": "import numpy", "num_tokens": 1939}
|
#importing the libraries
from keras.preprocessing.image import img_to_array
from keras.models import load_model
from imutils import build_montages
from imutils import paths
import numpy as np
import argparse
import random
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True,
help="path to out input directory of images")
ap.add_argument("-m", "--model", required=True,
help="path to pre-trained model")
args = vars(ap.parse_args())
# load the pre-trained network
print("[INFO] loading pre-trained network...")
model = load_model(args["model"])
# grab all image paths in the input directory and randomly sample them
imagePaths = list(paths.list_images(args["images"]))
random.shuffle(imagePaths)
imagePaths = imagePaths[:16]
# initialize our list of results
results = []
for p in imagePaths:
# load our original input image
orig = cv2.imread(p)
# pre-process our image by converting it from BGR to RGB channel
# ordering (since our Keras mdoel was trained on RGB ordering),
# resize it to 64x64 pixels, and then scale the pixel intensities
# to the range [0, 1]
image = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (64, 64))
image = image.astype("float") / 255.0
# order channel dimensions (channels-first or channels-last)
# depending on our Keras backend, then add a batch dimension to
# the image
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# make predictions on the input image
pred = model.predict(image)
pred = pred.argmax(axis=1)[0]
# an index of zero is the 'cataract' label while an index of
# one is the 'non cataract' label
label = "cataract" if pred == 0 else "non cataract"
color = (255, 0, 255) if pred == 0 else (255, 255, 0)
# resize our original input (so we can better visualize it) and
# then draw the label on the image
orig = cv2.resize(orig, (128, 128))
cv2.putText(orig, label, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,color, 2)
# add the output image to our list of results
results.append(orig)
# create a montage using 128x128 "tiles" with 4 rows and 4 columns
montage = build_montages(results, (128, 128), (4, 4))[0]
# show the output montage
cv2.imshow("Results", montage)
cv2.waitKey(0)
|
{"hexsha": "608f3563440e59c277e424aabd16134dd41b2d85", "size": 2369, "ext": "py", "lang": "Python", "max_stars_repo_path": "load.py", "max_stars_repo_name": "Chashmyar/ChashmyarTeam", "max_stars_repo_head_hexsha": "dc77bbc1d0da528a41ae92500db56fa62850197e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-04-01T18:31:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-02T06:20:49.000Z", "max_issues_repo_path": "load.py", "max_issues_repo_name": "Chashmyar/ChashmyarTeam", "max_issues_repo_head_hexsha": "dc77bbc1d0da528a41ae92500db56fa62850197e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "load.py", "max_forks_repo_name": "Chashmyar/ChashmyarTeam", "max_forks_repo_head_hexsha": "dc77bbc1d0da528a41ae92500db56fa62850197e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-04-01T18:18:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T22:01:39.000Z", "avg_line_length": 33.3661971831, "max_line_length": 77, "alphanum_fraction": 0.7095821022, "include": true, "reason": "import numpy", "num_tokens": 625}
|
import unittest, gmsh, subprocess, gmsh, json, os, time, glob
import numpy as np
from neuronmi.mesh.mesh_utils import *
from neuronmi.mesh.shapes import *
class TestEmiMesh(unittest.TestCase):
@classmethod
def tearDownClass(cls):
trash = ('h5', 'json', 'pvd', 'vtu', 'msh', 'geo_unrolled')
map(os.remove, sum((glob.glob('*.%s' % ext) for ext in trash), []))
# Saniyt checks for mesh
def test_emi_mesh(self):
gmsh.initialize()
root = 'test_2neuron'
msh_file = '%s.msh' % root
# This gives course enough mesh that the solver runs fast
box = Box(np.array([-60, -60, -100]), np.array([60, 60, 100]))
neurons = [BallStickNeuron({'soma_x': 0, 'soma_y': 0, 'soma_z': 0,
'soma_rad': 20, 'dend_len': 50, 'axon_len': 50,
'dend_rad': 15, 'axon_rad': 10}),
TaperedNeuron({'soma_x': 30, 'soma_y': -30, 'soma_z': 0,
'soma_rad': 20, 'dend_len': 20, 'axon_len': 20, 'axonh_len': 15, 'dendh_len': 15,
'dend_rad': 10, 'axon_rad': 8, 'axonh_rad': 10, 'dendh_rad': 15})]
probe = MicrowireProbe({'tip_x': 30, 'radius': 5, 'length': 800})
# Coarse enough for tests
size_params = {'DistMax': 20, 'DistMin': 10, 'LcMax': 40,
'neuron_LcMin': 6, 'probe_LcMin': 6}
model = gmsh.model
factory = model.occ
model.add('Neuron')
gmsh.option.setNumber('Mesh.PreserveNumberingMsh2', 1)
gmsh.option.setNumber('Mesh.MshFileVersion', 2.2)
# Add components to model
model, mapping = build_EMI_geometry(model, box, neurons, probe=probe)
with open('%s.json' % root, 'w') as out:
mapping.dump(out)
# Dump the mapping as json
mesh_config_EMI_model(model, mapping, size_params)
factory.synchronize()
# # This is a way to store the geometry as geo file
gmsh.write('%s.geo_unrolled' % root)
# # 3d model
model.mesh.generate(3)
# # We have some mesh
vtx_order, vtices, _ = model.mesh.getNodes()
self.assertTrue(len(vtx_order))
gmsh.write(msh_file)
model.remove()
gmsh.finalize()
def test_sandbox(self):
# NOTE: load_mesh, msh_to_h5 or things to do with HDF5File cause
# gmsh to fail on later tests. So these tests are isolated into
# their own process
self.assertTrue(subprocess.call(['python ./sandbox/two_neurons.py'], shell=True))
|
{"hexsha": "de3514381b020ca6751951a556c537e8843f151d", "size": 2678, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/mesh/test_emi_mesh.py", "max_stars_repo_name": "MiroK/nEuronMI", "max_stars_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-16T07:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-16T07:35:53.000Z", "max_issues_repo_path": "test/mesh/test_emi_mesh.py", "max_issues_repo_name": "MiroK/nEuronMI", "max_issues_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-11-08T16:59:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-05T10:44:49.000Z", "max_forks_repo_path": "test/mesh/test_emi_mesh.py", "max_forks_repo_name": "MiroK/nEuronMI", "max_forks_repo_head_hexsha": "227b26598fa2cde5aabec68db898f308fb44aa31", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-03T05:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-03T05:15:46.000Z", "avg_line_length": 36.6849315068, "max_line_length": 115, "alphanum_fraction": 0.5537714712, "include": true, "reason": "import numpy", "num_tokens": 748}
|
import Base: filter, map, reduce
"""
map(f, d::DTable) -> DTable
Applies `f` to each row of `d`.
The applied function needs to return a `Tables.Row` compatible object (e.g. `NamedTuple`).
# Examples
```julia
julia> d = DTable((a = [1, 2, 3], b = [1, 1, 1]), 2);
julia> m = map(x -> (r = x.a + x.b,), d)
DTable with 2 partitions
Tabletype: NamedTuple
julia> fetch(m)
(r = [2, 3, 4],)
julia> m = map(x -> (r1 = x.a + x.b, r2 = x.a - x.b), d)
DTable with 2 partitions
Tabletype: NamedTuple
julia> fetch(m)
(r1 = [2, 3, 4], r2 = [0, 1, 2])
```
"""
function map(f, d::DTable)
chunk_wrap = (_chunk, _f) -> begin
if isnonempty(_chunk)
m = TableOperations.map(_f, _chunk)
Tables.materializer(_chunk)(m)
else
_chunk
end
end
chunks = map(c -> Dagger.spawn(chunk_wrap, c, f), d.chunks)
DTable(chunks, d.tabletype)
end
"""
map(f, gd::GDTable) -> GDTable
Applies `f` to each row of `gd`.
The applied function needs to return a `Tables.Row` compatible object (e.g. `NamedTuple`).
# Examples
```julia
julia> g = Dagger.groupby(DTable((a=repeat('a':'c', inner=2),b=1:6), 2), :a)
GDTable with 3 partitions and 3 keys
Tabletype: NamedTuple
Grouped by: [:a]
julia> m = map(r -> (a = r.a, b = r.b, c = r.a + r.b), g)
GDTable with 3 partitions and 3 keys
Tabletype: NamedTuple
Grouped by: [:a]
julia> fetch(m)
(a = ['a', 'a', 'c', 'c', 'b', 'b'], b = [1, 2, 5, 6, 3, 4], c = ['b', 'c', 'h', 'i', 'e', 'f'])
```
"""
function map(f, gd::GDTable)
d = map(f, gd.dtable)
GDTable(d, gd.cols, gd.index)
end
"""
reduce(f, d::DTable; cols=nothing, [init]) -> NamedTuple
Reduces `d` using function `f` applied on all columns of the DTable.
By providing the kwarg `cols` as a `Vector{Symbol}` object it's possible
to restrict the reduction to the specified columns.
The reduced values are provided in a NamedTuple under names of reduced columns.
For the `init` kwarg please refer to `Base.reduce` documentation,
as it follows the same principles.
# Examples
```julia
julia> d = DTable((a = [1, 2, 3], b = [1, 1, 1]), 2);
julia> r1 = reduce(+, d)
EagerThunk (running)
julia> fetch(r1)
(a = 6, b = 3)
julia> r2 = reduce(*, d, cols=[:a])
EagerThunk (running)
julia> fetch(r2)
(a = 6,)
```
"""
function reduce(f, d::DTable; cols=nothing::Union{Nothing, Vector{Symbol}}, init=Base._InitialValue())
if length(d.chunks) > 0
columns = cols === nothing ? Tables.columnnames(d) : cols
else
return Dagger.@spawn NamedTuple()
end
col_in_chunk_reduce = (_f, _c, _init, _chunk) -> reduce(_f, Tables.getcolumn(_chunk, _c); init=_init)
chunk_reduce = (_f, _chunk, _cols, _init) -> begin
# TODO: potential speedup enabled by commented code below by reducing the columns in parallel
v = [col_in_chunk_reduce(_f, c, deepcopy(_init), _chunk) for c in _cols]
(; zip(_cols, v)...)
# TODO: uncomment and define a good threshold for parallelization when this get's resolved
# https://github.com/JuliaParallel/Dagger.jl/issues/267
# This piece of code (else option) below is causing the issue above
# when reduce is repeatedly executed or @btime is used.
# if length(_cols) <= 1
# v = [col_in_chunk_reduce(_f, c, _init, _chunk) for c in _cols]
# else
# values = [Dagger.spawn(col_in_chunk_reduce, _f, c, _init, _chunk) for c in _cols]
# v = fetch.(values)
# end
# (; zip(_cols, v)...)
end
chunk_reduce_spawner = (_d, _f, _columns, _init) -> [Dagger.@spawn chunk_reduce(_f, c, _columns, _init) for c in _d.chunks]
chunk_reduce_results = Dagger.@spawn chunk_reduce_spawner(d, f, columns, init)
construct_single_column = (_col, _chunk_results) -> getindex.(fetch.(_chunk_results), _col)
result_columns = [Dagger.@spawn construct_single_column(c, chunk_reduce_results) for c in columns]
reduce_result_column = (_f, _c, _init) -> reduce(_f, _c; init=_init)
reduce_chunks = [Dagger.@spawn reduce_result_column(f, c, deepcopy(init)) for c in result_columns]
construct_result = (_cols, _vals) -> (; zip(_cols, fetch.(_vals))...)
Dagger.@spawn construct_result(columns, reduce_chunks)
end
"""
reduce(f, gd::GDTable; cols=nothing, prefix="result_", [init]) -> EagerThunk -> NamedTuple
Reduces `gd` using function `f` applied on all columns of the DTable.
Returns results per group in columns with names prefixed with the `prefix` kwarg.
For more information on kwargs see `reduce(f, d::DTable)`
# Examples
```julia
julia> g = Dagger.groupby(DTable((a=repeat('a':'d', inner=2),b=1:8), 2), :a)
GDTable with 4 partitions and 4 keys
Tabletype: NamedTuple
Grouped by: [:a]
julia> fetch(reduce(*, g))
(a = ['a', 'c', 'd', 'b'], result_a = ["aa", "cc", "dd", "bb"], result_b = [2, 30, 56, 12])
```
"""
function reduce(
f,
gd::GDTable;
cols=nothing::Union{Nothing, Vector{Symbol}},
prefix::String="result_",
init=Base._InitialValue())
construct_result = (_keys, _results) -> begin
_results = fetch.(_results)
result_cols = keys(first(_results))
k = [col => getindex.(_keys, i) for (i, col) in enumerate(grouped_cols(gd))]
r = [Symbol(prefix * string(r)) => collect(getindex.(_results, r)) for r in result_cols]
(;k...,r...)
end
spawner = (_f, _cols, _init, _gd) -> Vector{EagerThunk}([reduce(_f, d[2]; cols=_cols, init=deepcopy(_init)) for d in _gd])
v = Dagger.@spawn spawner(f, cols, init, gd)
Dagger.@spawn construct_result(keys(gd), v)
end
"""
filter(f, d::DTable) -> DTable
Filter `d` using `f`.
Returns a filtered `DTable` that can be processed further.
# Examples
```julia
julia> d = DTable((a = [1, 2, 3], b = [1, 1, 1]), 2);
julia> f = filter(x -> x.a < 3, d)
DTable with 2 partitions
Tabletype: NamedTuple
julia> fetch(f)
(a = [1, 2], b = [1, 1])
julia> f = filter(x -> (x.a < 3) .& (x.b > 0), d)
DTable with 2 partitions
Tabletype: NamedTuple
julia> fetch(f)
(a = [1, 2], b = [1, 1])
```
"""
function filter(f, d::DTable)
chunk_wrap = (_chunk, _f) -> begin
m = TableOperations.filter(_f, _chunk)
Tables.materializer(_chunk)(m)
end
DTable(map(c -> Dagger.spawn(chunk_wrap, c, f), d.chunks), d.tabletype)
end
"""
filter(f, gd::GDTable) -> GDTable
Filter 'gd' using 'f', returning a filtered `GDTable`.
Calling `trim!` on a filtered `GDTable` will clean up the empty keys and partitions.
# Examples
```julia
julia> g = Dagger.groupby(DTable((a=repeat('a':'d', inner=2),b=1:8), 2), :a)
GDTable with 4 partitions and 4 keys
Tabletype: NamedTuple
Grouped by: [:a]
julia> f = filter(x -> x.a ∈ ['a', 'b'], g)
GDTable with 4 partitions and 4 keys
Tabletype: NamedTuple
Grouped by: [:a]
julia> fetch(f)
(a = ['a', 'a', 'b', 'b'], b = [1, 2, 3, 4])
julia> trim!(f)
GDTable with 2 partitions and 2 keys
Tabletype: NamedTuple
Grouped by: [:a]
```
"""
function filter(f, gd::GDTable)
d = filter(f, gd.dtable)
GDTable(d, gd.cols, gd.index)
end
|
{"hexsha": "a68c6e6e6bc3cc1643adbfee3f67d5120b31dff7", "size": 7035, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/table/operations.jl", "max_stars_repo_name": "mattwigway/Dagger.jl", "max_stars_repo_head_hexsha": "63b9553d44dc44472a7d61d7ddbb116024e1c9f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/table/operations.jl", "max_issues_repo_name": "mattwigway/Dagger.jl", "max_issues_repo_head_hexsha": "63b9553d44dc44472a7d61d7ddbb116024e1c9f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/table/operations.jl", "max_forks_repo_name": "mattwigway/Dagger.jl", "max_forks_repo_head_hexsha": "63b9553d44dc44472a7d61d7ddbb116024e1c9f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3125, "max_line_length": 127, "alphanum_fraction": 0.6288557214, "num_tokens": 2282}
|
from unittest import TestCase
import numpy as np
from numpy.testing import assert_almost_equal
from skfem.assembly import CellBasis
from skfem.element import ElementTriP1
from skfem.mesh import MeshTri
from skfem.utils import projection, enforce, condense, solve
from skfem.models import laplace, mass, unit_load
class InitializeScalarField(TestCase):
def runTest(self):
mesh = MeshTri().refined(5)
basis = CellBasis(mesh, ElementTriP1())
def fun(X):
x, y = X
return x ** 2 + y ** 2
x = projection(fun, basis)
y = fun(mesh.p)
normest = np.linalg.norm(x - y)
self.assertTrue(normest < 0.011,
msg="|x-y| = {}".format(normest))
class TestEnforce(TestCase):
mesh = MeshTri()
def runTest(self):
m = self.mesh
e = ElementTriP1()
basis = CellBasis(m, e)
A = laplace.assemble(basis)
M = mass.assemble(basis)
D = m.boundary_nodes()
assert_almost_equal(enforce(A, D=D).toarray(), np.eye(A.shape[0]))
assert_almost_equal(enforce(M, D=D, diag=0.).toarray(),
np.zeros(M.shape))
enforce(A, D=D, overwrite=True)
assert_almost_equal(A.toarray(), np.eye(A.shape[0]))
def test_simple_cg_solver():
m = MeshTri().refined(3)
basis = CellBasis(m, ElementTriP1())
A0 = laplace.coo_data(basis)
A1 = laplace.assemble(basis)
f = unit_load.assemble(basis)
D = m.boundary_nodes()
x1 = solve(*condense(A1, f, D=D))
f[D] = 0
x0 = A0.solve(f, D=D)
assert_almost_equal(x0, x1)
|
{"hexsha": "db64d15b42e190908fbf5fb08c8495bd647b8cbb", "size": 1637, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_utils.py", "max_stars_repo_name": "gatling-nrl/scikit-fem", "max_stars_repo_head_hexsha": "04730d80d612470b7e802eed4c21dd96b89cef61", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_utils.py", "max_issues_repo_name": "gatling-nrl/scikit-fem", "max_issues_repo_head_hexsha": "04730d80d612470b7e802eed4c21dd96b89cef61", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-07T00:56:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T20:06:34.000Z", "max_forks_repo_path": "tests/test_utils.py", "max_forks_repo_name": "gatling-nrl/scikit-fem", "max_forks_repo_head_hexsha": "04730d80d612470b7e802eed4c21dd96b89cef61", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1216216216, "max_line_length": 74, "alphanum_fraction": 0.6035430666, "include": true, "reason": "import numpy,from numpy", "num_tokens": 445}
|
(* (c) Copyright Microsoft Corporation and Inria. You may distribute *)
(* under the terms of either the CeCILL-B License or the CeCILL *)
(* version 2 License, as specified in the README file. *)
(* version 2 License, as specified in the README file. *)
(* *)
(***********************************************************************)
Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq div choice.
Require Import fintype finfun finset groups morphisms automorphism.
(***********************************************************************)
(* This file contains the definitions of: *)
(* *)
(* coset_of H == right cosets by the group H (see below) *)
(* coset_groupType H == the groupType induced by 'N(H) / H *)
(* coset H == the canonical projection induced by H *)
(* A / B == the quotient of A by B, *)
(* made to coincide w/ (A :&: 'N(B)) / B *)
(* quotm (nHG: H <| G) (nGf : f@* G = G) (nHf : f@*H = H) *)
(* == the quotient morphism induced by f and H *)
(***********************************************************************)
(* Lemmas for these notions, plus the three isomorphism theorems, and *)
(* counting lemmas for morphisms. *)
(***********************************************************************)
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Import GroupScope.
(********************************************************************)
(* Cosets are right cosets of elements in the normaliser *)
(********************************************************************)
Section Cosets.
Variables (gT : finGroupType) (Q A : {set gT}).
(* We let cosets coerce to GroupSet.sort, so they inherit the group *)
(* subset base group structure. Later we will define a proper group *)
(* structure on cosets, which will then hide the inherited structure *)
(* once coset_of unifies with FinGroup.sort; the coercion to *)
(* GroupSet.sort will no longer be used. *)
(* Note that for Hx Hy : coset_of H, Hx * Hy : {set gT} can mean *)
(* either set_of_coset (mulg Hx Hy) *)
(* OR mulg (set_of_coset Hx) (set_of_coset Hy) *)
(* However, since the two terms are actually convertible, we can *)
(* live with this ambiguity. *)
(* We take great care that neither the type coset_of H, its *)
(* finGroupType structure, nor the coset H morphism depend on the *)
(* actual group structure of H. Otherwise, rewriting would be *)
(* extremely awkward because all our equalities are stated at the *)
(* set level. *)
(* The trick we use is to interpret coset_of A, when A is any set, *)
(* as the type of cosets of the group <A> generated by A, in the *)
(* group <A, N(A)> generated by A and its normaliser. This coincides *)
(* with the type of bilateral cosets of A when A is a group. We *)
(* restrict the domain of coset_of A to 'N(A), so that we get almost *)
(* all the same conversion equalities as if we had forced A to be a *)
(* group in the first place -- the only exception is that *)
(* 1 : coset_of A : set _ = <<A>> rather than A, *)
(* is covered by the genGid lemma. *)
Notation H := <<A>>.
Definition coset_range := [pred B \in rcosets H 'N(A)].
Record coset_of : Type :=
Coset { set_of_coset :> GroupSet.sort gT; _ : coset_range set_of_coset }.
Canonical Structure coset_subType :=
Eval hnf in [subType for set_of_coset by coset_of_rect].
Definition coset_eqMixin := Eval hnf in [eqMixin of coset_of by <:].
Canonical Structure coset_eqType := Eval hnf in EqType coset_eqMixin.
Definition coset_choiceMixin := [choiceMixin of coset_of by <:].
Canonical Structure coset_choiceType :=
Eval hnf in ChoiceType coset_choiceMixin.
Definition coset_countMixin := [countMixin of coset_of by <:].
Canonical Structure coset_countType := Eval hnf in CountType coset_countMixin.
Canonical Structure coset_subCountType :=
Eval hnf in [subCountType of coset_of].
Definition coset_finMixin := [finMixin of coset_of by <:].
Canonical Structure coset_finType := Eval hnf in FinType coset_finMixin.
Canonical Structure coset_subFinType := Eval hnf in [subFinType of coset_of].
(* We build a new (canonical) structure of groupType for cosets. *)
(* When A is a group, this is the largest possible quotient 'N(A) / A *)
Lemma coset_one_proof : coset_range H.
Proof. by apply/rcosetsP; exists (1 : gT); rewrite (group1, mulg1). Qed.
Definition coset_one := Coset coset_one_proof.
Let nNH := subsetP (norm_gen A).
Lemma coset_range_mul : forall B C : coset_of, coset_range (B * C).
Proof.
case=> B /=; case/rcosetsP=> x Nx ->{B} [C] /=; case/rcosetsP=> y Ny ->{C}.
by apply/rcosetsP; exists (x * y); rewrite !(groupM, rcoset_mul, nNH).
Qed.
Definition coset_mul B C := Coset (coset_range_mul B C).
Lemma coset_range_inv : forall B : coset_of, coset_range B^-1.
Proof.
case=> B /=; case/rcosetsP=> x Nx ->{B}.
rewrite norm_rlcoset ?nNH // invg_lcoset.
by apply/rcosetsP; exists x^-1; rewrite ?groupV.
Qed.
Definition coset_inv B := Coset (coset_range_inv B).
Lemma coset_mulP : associative coset_mul.
Proof. by move=> B C D; apply: val_inj; rewrite /= mulgA. Qed.
Lemma coset_oneP : left_id coset_one coset_mul.
Proof.
case=> B coB; apply: val_inj => /=; case/rcosetsP: coB => x Hx ->{B}.
by rewrite mulgA mulGid.
Qed.
Lemma coset_invP : left_inverse coset_one coset_inv coset_mul.
Proof.
case=> B coB; apply: val_inj => /=; case/rcosetsP: coB => x Hx ->{B}.
rewrite invg_rcoset -mulgA (mulgA H) mulGid.
by rewrite norm_rlcoset ?nNH // -lcosetM mulVg mul1g.
Qed.
Definition coset_of_groupMixin :=
FinGroup.Mixin coset_mulP coset_oneP coset_invP.
Canonical Structure coset_baseGroupType :=
Eval hnf in BaseFinGroupType coset_of_groupMixin.
Canonical Structure coset_groupType := FinGroupType coset_invP.
(* Projection of the initial group type over the cosets groupType *)
Definition coset x : coset_of := insubd (1 : coset_of) (H :* x).
(* This is a primitive lemma -- we'll need to restate it for *)
(* the case where A is a group. *)
Lemma val_coset_prim : forall x, x \in 'N(A) -> coset x :=: H :* x.
Proof.
by move=> x Nx; rewrite val_insubd /= mem_rcosets -{1}(mul1g x) mem_mulg.
Qed.
Lemma coset_morphM : {in 'N(A) &, {morph coset : x y / x * y}}.
Proof.
move=> x y Nx Ny; apply: val_inj.
by rewrite /= !val_coset_prim ?groupM //= rcoset_mul ?nNH.
Qed.
Canonical Structure coset_morphism := Morphism coset_morphM.
Lemma ker_coset_prim : 'ker coset = 'N_H(A).
Proof.
apply/setP=> z; rewrite !in_setI andbC 2!inE -val_eqE /=.
case Nz: (z \in 'N(A)); rewrite ?andbF ?val_coset_prim // !andbT.
by apply/eqP/idP=> [<-| Az]; rewrite (rcoset_refl, rcoset_id).
Qed.
Implicit Type xbar : coset_of.
Lemma coset_mem : forall y xbar, y \in xbar -> coset y = xbar.
Proof.
move=> y [/= Hx NHx] /= Hxy; apply: val_inj=> /=.
case/rcosetsP: NHx (NHx) Hxy => x Nx -> NHx Hxy.
by rewrite val_insubd /= (rcoset_transl Hxy) NHx.
Qed.
(* coset is an inverse to repr *)
Lemma mem_repr_coset : forall xbar, repr xbar \in xbar.
Proof. case=> xbar /=; case/rcosetsP=> x _ ->; exact: mem_repr_rcoset. Qed.
Lemma repr_coset1 : repr (1 : coset_of) = 1.
Proof. exact: repr_group. Qed.
Lemma coset_reprK : cancel (fun xbar => repr xbar) coset.
Proof. move=> xbar; exact: coset_mem (mem_repr_coset xbar). Qed.
(* cosetP is slightly stronger than using repr because we only *)
(* guarantee repr xbar \in 'N(A) when A is a group. *)
Lemma cosetP : forall xbar, {x | x \in 'N(A) & xbar = coset x}.
Proof.
move=> xbar; pose x := repr 'N_xbar(A).
have [xbar_x Nx]: x \in xbar /\ x \in 'N(A).
apply/setIP; rewrite {}/x; case: xbar => Hy /=.
by case/rcosetsP=> y Ny ->; apply: (@mem_repr _ y); rewrite inE rcoset_refl.
by exists x; last rewrite (coset_mem xbar_x).
Qed.
Lemma coset_id : forall x, x \in A -> coset x = 1.
Proof. move=> x Ax; apply: coset_mem; exact: mem_gen. Qed.
Lemma coset_imT : coset @* 'N(A) = setT.
Proof.
by apply/setP=> xbar; case: (cosetP xbar) => x Nx ->; rewrite inE mem_morphim.
Qed.
Lemma coset_im : forall C : {set coset_of}, C \subset coset @* 'N(A).
Proof. by move=> C; rewrite coset_imT subsetT. Qed.
Definition quotient : {set coset_of} := coset @* Q.
Lemma quotientE : quotient = coset @* Q. Proof. by []. Qed.
End Cosets.
Prenex Implicits coset_of coset.
Arguments Scope quotient [_ group_scope group_scope].
Bind Scope group_scope with coset_of.
Notation "A / B" := (quotient A B) : group_scope.
Section CosetOfGroupTheory.
Variables (gT : finGroupType) (H : {group gT}).
Implicit Types A B : {set gT}.
Implicit Types G K : {group gT}.
Implicit Types xbar yb : coset_of H.
Implicit Types C D : {set coset_of H}.
Implicit Types L M : {group coset_of H}.
Canonical Structure quotient_group G A : {group coset_of A} :=
Eval hnf in [group of G / A].
Infix "/" := quotient_group : subgroup_scope.
Lemma val_coset : forall x, x \in 'N(H) -> coset H x :=: H :* x.
Proof. by move=> x Nx; rewrite val_coset_prim // genGid. Qed.
Lemma coset_default : forall x, (x \in 'N(H)) = false -> coset H x = 1.
Proof.
move=> x Nx; apply: val_inj.
by rewrite val_insubd /= mem_rcosets /= genGid mulSGid ?normG ?Nx.
Qed.
Lemma coset_norm : forall xbar, xbar \subset 'N(H).
Proof.
case=> Hx /=; case/rcosetsP=> x Nx ->.
by rewrite genGid mul_subG ?sub1set ?normG.
Qed.
Lemma ker_coset : 'ker (coset H) = H.
Proof. by rewrite ker_coset_prim genGid (setIidPl _) ?normG. Qed.
Lemma coset_idr : forall x, x \in 'N(H) -> coset H x = 1 -> x \in H.
Proof. by move=> x Nx Hx1; rewrite -ker_coset mem_morphpre //= Hx1 set11. Qed.
Lemma repr_coset_norm : forall xbar, repr xbar \in 'N(H).
Proof. move=> xbar; exact: subsetP (coset_norm _) _ (mem_repr_coset _). Qed.
Lemma imset_coset : forall G, coset H @: G = G / H.
Proof.
move=> G; apply/eqP; rewrite eqEsubset andbC imsetS ?subsetIr //=.
apply/subsetP=> xbar; case/imsetP=> x Gx -> {xbar}.
by case Nx: (x \in 'N(H)); rewrite ?(coset_default Nx) ?mem_morphim ?group1.
Qed.
Lemma val_quotient : forall A, val @: (A / H) = rcosets H 'N_A(H).
Proof.
move=> A; apply/setP=> B; apply/imsetP/rcosetsP=> [[xbar Axbar]|[x ANx]] ->{B}.
case/morphimP: Axbar => x Nx Ax ->{xbar}.
by exists x; [rewrite inE Ax | rewrite /= val_coset].
case/setIP: ANx => Ax Nx.
by exists (coset H x); [apply/morphimP; exists x | rewrite /= val_coset].
Qed.
Lemma card_quotient_subnorm : forall A, #|A / H| = #|'N_A(H) : H|.
Proof. by move=> A; rewrite -(card_imset _ val_inj) val_quotient. Qed.
Lemma card_quotient : forall A, A \subset 'N(H) -> #|A / H| = #|A : H|.
Proof. by move=> A nHA; rewrite card_quotient_subnorm (setIidPl nHA). Qed.
(* Specializing all the morphisms lemmas that have different assumptions *)
(* (e.g., because 'ker (coset H) = H), or conclusions (e.g., because we use *)
(* A / H rather than coset H @* A). We may want to reevaluate later, and *)
(* eliminate variants that aren't used . *)
(* Variant of morph1; no specialization for other morph lemmas. *)
Lemma coset1 : coset H 1 :=: H.
Proof. by rewrite morph1 /= genGid. Qed.
(* Variant of kerE. *)
Lemma cosetpre1 : coset H @*^-1 1 = H.
Proof. by rewrite -kerE ker_coset. Qed.
(* Variant of morphimEdom; mophimE[sub] covered by imset_coset. *)
(* morph[im|pre]Iim are also covered by quotientT. *)
Lemma quotientT : 'N(H) / H = setT.
Proof. exact: coset_imT. Qed.
(* Variant of morphimIdom. *)
Lemma quotientInorm : forall A, 'N_A(H) / H = A / H.
Proof. by move=> A; rewrite /quotient setIC morphimIdom. Qed.
Lemma mem_quotient : forall x G, x \in G -> coset H x \in G / H.
Proof. by move=> x G Gx; rewrite -imset_coset mem_imset. Qed.
Lemma quotientS : forall A B, A \subset B -> A / H \subset B / H.
Proof. exact: morphimS. Qed.
Lemma quotient0 : set0 / H = set0.
Proof. exact: morphim0. Qed.
Lemma quotient_set1 : forall x, x \in 'N(H) -> [set x] / H = [set coset H x].
Proof. exact: morphim_set1. Qed.
Lemma quotient1 : 1 / H = 1.
Proof. exact: morphim1. Qed.
Lemma quotientV : forall A, A^-1 / H = (A / H)^-1.
Proof. exact: morphimV. Qed.
Lemma quotientMl : forall A B,
A \subset 'N(H) -> A * B / H = (A / H) * (B / H).
Proof. exact: morphimMl. Qed.
Lemma quotientMr : forall A B,
B \subset 'N(H) -> A * B / H = (A / H) * (B / H).
Proof. exact: morphimMr. Qed.
Lemma cosetpreM : forall C D,
coset H @*^-1 (C * D) = coset H @*^-1 C * coset H @*^-1 D.
Proof. by move=> C D; rewrite morphpreMl ?coset_im. Qed.
Lemma quotientJ : forall A x, x \in 'N(H) -> A :^ x / H = (A / H) :^ coset H x.
Proof. exact: morphimJ. Qed.
Lemma quotientU : forall A B, (A :|: B) / H = A / H :|: B / H.
Proof. exact: morphimU. Qed.
Lemma quotientI : forall A B, (A :&: B) / H \subset A / H :&: B / H.
Proof. exact: morphimI. Qed.
Lemma coset_kerl : forall x y, x \in H -> coset H (x * y) = coset H y.
Proof.
move=> x y Hx; case Ny: (y \in 'N(H)); first by rewrite mkerl ?ker_coset.
by rewrite !coset_default ?groupMl // (subsetP (normG H)).
Qed.
Lemma coset_kerr : forall x y, y \in H -> coset H (x * y) = coset H x.
Proof.
move=> x y Hy; case Nx: (x \in 'N(H)); first by rewrite mkerr ?ker_coset.
by rewrite !coset_default ?groupMr // (subsetP (normG H)).
Qed.
Lemma rcoset_kercosetP : forall x y,
x \in 'N(H) -> y \in 'N(H) -> reflect (coset H x = coset H y) (x \in H :* y).
Proof. rewrite -{6}ker_coset; exact: rcoset_kerP. Qed.
Lemma kercoset_rcoset : forall x y,
x \in 'N(H) -> y \in 'N(H) ->
coset H x = coset H y -> exists2 z, z \in H & x = z * y.
Proof. move=> x y Gx Gy eqfxy; rewrite -ker_coset; exact: ker_rcoset. Qed.
Lemma quotientGI : forall G A, H \subset G -> (G :&: A) / H = G / H :&: A / H.
Proof. rewrite -{1}ker_coset; exact: morphimGI. Qed.
Lemma quotientIG : forall A G, H \subset G -> (A :&: G) / H = A / H :&: G / H.
Proof. rewrite -{1}ker_coset. exact: morphimIG. Qed.
Lemma quotientD : forall A B, A / H :\: B / H \subset (A :\: B) / H.
Proof. exact: morphimD. Qed.
Lemma quotientDG : forall A G, H \subset G -> (A :\: G) / H = A / H :\: G / H.
Proof. rewrite -{1}ker_coset; exact: morphimDG. Qed.
Lemma quotientK : forall A, A \subset 'N(H) -> coset H @*^-1 (A / H) = H * A.
Proof. rewrite -{8}ker_coset; exact: morphimK. Qed.
Lemma quotientGK : forall G, H <| G -> coset H @*^-1 (G / H) = G.
Proof. move=> G; case/andP; rewrite -{1}ker_coset; exact: morphimGK. Qed.
Lemma cosetpre_set1 : forall x,
x \in 'N(H) -> coset H @*^-1 [set coset H x] = H :* x.
Proof. by rewrite -{9}ker_coset; exact: morphpre_set1. Qed.
Lemma cosetpre_set1_coset : forall xbar, coset H @*^-1 [set xbar] = xbar.
Proof.
move=> xbar; case: (cosetP xbar) => x Nx ->.
by rewrite cosetpre_set1 ?val_coset.
Qed.
Lemma cosetpreK : forall C, coset H @*^-1 C / H = C.
Proof. by move=> C; rewrite /quotient morphpreK ?coset_im. Qed.
(* Variant of morhphim_ker *)
Lemma trivg_quotient : H / H = 1.
Proof. by rewrite -{3}ker_coset /quotient morphim_ker. Qed.
Lemma sub_cosetpre : forall M, H \subset coset H @*^-1 M.
Proof. rewrite -{3}ker_coset; exact: ker_sub_pre. Qed.
Lemma normal_cosetpre : forall M, H <| coset H @*^-1 M.
Proof. rewrite -{3}ker_coset; exact: ker_normal_pre. Qed.
Lemma cosetpreSK : forall C D,
(coset H @*^-1 C \subset coset H @*^-1 D) = (C \subset D).
Proof. by move=> C D; rewrite morphpreSK ?coset_im. Qed.
Lemma sub_quotient_pre : forall A C,
A \subset 'N(H) -> (A / H \subset C) = (A \subset coset H @*^-1 C).
Proof. by move=> A C; exact: sub_morphim_pre. Qed.
Lemma sub_cosetpre_quo : forall C G,
H <| G -> (coset H @*^-1 C \subset G) = (C \subset G / H).
Proof. by move=> C G nHG; rewrite -cosetpreSK quotientGK. Qed.
(* Variant of ker_trivg_morphim. *)
Lemma quotient_sub1 : forall A,
A \subset 'N(H) -> (A / H \subset [1]) = (A \subset H).
Proof. by move=> A nHA /=; rewrite -{10}ker_coset ker_trivg_morphim nHA. Qed.
Lemma quotientSK : forall A B,
A \subset 'N(H) -> (A / H \subset B / H) = (A \subset H * B).
Proof. by move=> A B nHA; rewrite morphimSK ?ker_coset. Qed.
Lemma quotientSGK : forall A G,
A \subset 'N(H) -> H \subset G -> (A / H \subset G / H) = (A \subset G).
Proof. rewrite -{2}ker_coset; exact: morphimSGK. Qed.
Lemma quotient_injG :
{in [pred G : {group gT} | H <| G] &, injective (fun G => G / H)}.
Proof. rewrite /normal -{1}ker_coset; exact: morphim_injG. Qed.
Lemma quotient_inj : forall G1 G2,
H <| G1 -> H <| G2 -> G1 / H = G2 / H -> G1 :=: G2.
Proof. rewrite /normal -{1 3}ker_coset; exact: morphim_inj. Qed.
Lemma quotient_gen : forall A, A \subset 'N(H) -> <<A>> / H = <<A / H>>.
Proof. exact: morphim_gen. Qed.
Lemma cosetpre_gen : forall C,
1 \in C -> coset H @*^-1 <<C>> = <<coset H @*^-1 C>>.
Proof. by move=> C C1; rewrite morphpre_gen ?coset_im. Qed.
Lemma quotientR : forall A B,
A \subset 'N(H) -> B \subset 'N(H) -> [~: A, B] / H = [~: A / H, B / H].
Proof. exact: morphimR. Qed.
Lemma quotient_norm : forall A, 'N(A) / H \subset 'N(A / H).
Proof. exact: morphim_norm. Qed.
Lemma quotient_norms : forall A B, A \subset 'N(B) -> A / H \subset 'N(B / H).
Proof. exact: morphim_norms. Qed.
Lemma quotient_subnorm : forall A B, 'N_A(B) / H \subset 'N_(A / H)(B / H).
Proof. exact: morphim_subnorm. Qed.
Lemma quotient_normal : forall A B, A <| B -> A / H <| B / H.
Proof. exact: morphim_normal. Qed.
Lemma quotient_cent1 : forall x, 'C[x] / H \subset 'C[coset H x].
Proof.
move=> x; case Nx: (x \in 'N(H)); first exact: morphim_cent1.
by rewrite coset_default // cent11T subsetT.
Qed.
Lemma quotient_cent1s : forall A x,
A \subset 'C[x] -> A / H \subset 'C[coset H x].
Proof.
move=> A x sAC; exact: subset_trans (quotientS sAC) (quotient_cent1 x).
Qed.
Lemma quotient_subcent1 : forall A x,
'C_A[x] / H \subset 'C_(A / H)[coset H x].
Proof.
move=> A x; exact: subset_trans (quotientI _ _) (setIS _ (quotient_cent1 x)).
Qed.
Lemma quotient_cent : forall A, 'C(A) / H \subset 'C(A / H).
Proof. exact: morphim_cent. Qed.
Lemma quotient_cents : forall A B,
A \subset 'C(B) -> A / H \subset 'C(B / H).
Proof. exact: morphim_cents. Qed.
Lemma quotient_abelian : forall A, abelian A -> abelian (A / H).
Proof. exact: morphim_abelian. Qed.
Lemma quotient_subcent : forall A B, 'C_A(B) / H \subset 'C_(A / H)(B / H).
Proof. exact: morphim_subcent. Qed.
Lemma cosetpre_normal : forall C D,
(coset H @*^-1 C <| coset H @*^-1 D) = (C <| D).
Proof. by move=> C D; rewrite morphpre_normal ?coset_im. Qed.
Lemma quotient_normG : forall G, H <| G -> 'N(G) / H = 'N(G / H).
Proof.
move=> G; case/andP=> sHG nHG.
by rewrite [_ / _]morphim_normG ?ker_coset // coset_imT setTI.
Qed.
Lemma quotient_subnormG : forall A G,
H <| G -> 'N_A(G) / H = 'N_(A / H)(G / H).
Proof.
by move=> A G; case/andP=> sHG nHG; rewrite -morphim_subnormG ?ker_coset.
Qed.
Lemma cosetpre_cent1 : forall x,
'C_('N(H))[x] \subset coset H @*^-1 'C[coset H x].
Proof.
move=> x; case Nx: (x \in 'N(H)); first by rewrite morphpre_cent1.
by rewrite coset_default // cent11T morphpreT subsetIl.
Qed.
Lemma cosetpre_cent1s : forall C x,
coset H @*^-1 C \subset 'C[x] -> C \subset 'C[coset H x].
Proof.
move=> C x sC; rewrite -cosetpreSK; apply: subset_trans (cosetpre_cent1 x).
by rewrite subsetI subsetIl.
Qed.
Lemma cosetpre_subcent1 : forall C x,
'C_(coset H @*^-1 C)[x] \subset coset H @*^-1 'C_C[coset H x].
Proof.
move=> C x; rewrite -morphpreIdom -setIA setICA morphpreI setIS //.
exact: cosetpre_cent1.
Qed.
Lemma cosetpre_cent : forall A, 'C_('N(H))(A) \subset coset H @*^-1 'C(A / H).
Proof. exact: morphpre_cent. Qed.
Lemma cosetpre_cents : forall A C,
coset H @*^-1 C \subset 'C(A) -> C \subset 'C(A / H).
Proof. by move=> A C; apply: morphpre_cents; rewrite ?coset_im. Qed.
Lemma cosetpre_subcent : forall C A,
'C_(coset H @*^-1 C)(A) \subset coset H @*^-1 'C_C(A / H).
Proof. exact: morphpre_subcent. Qed.
Section InverseImage.
Variables (G : {group gT}) (Kbar : {group coset_of H}).
Hypothesis nHG : H <| G.
CoInductive inv_quotient_spec (P : pred {group gT}) : Prop :=
InvQuotientSpec K of Kbar :=: K / H & H \subset K & P K.
Lemma inv_quotientS :
Kbar \subset G / H -> inv_quotient_spec (fun K => K \subset G).
Proof.
case/andP: nHG => sHG nHG' sKbarG.
have sKdH: Kbar \subset 'N(H) / H by rewrite (subset_trans sKbarG) ?morphimS.
exists (coset H @*^-1 Kbar)%G; first by rewrite cosetpreK.
by rewrite -{1}ker_coset morphpreS ?sub1G.
by rewrite sub_cosetpre_quo.
Qed.
Lemma inv_quotientN : Kbar <| G / H -> inv_quotient_spec (fun K => K <| G).
Proof.
move=> nKbar; case/inv_quotientS: (normal_sub nKbar) => K defKbar sHK sKG.
exists K => //; rewrite defKbar -cosetpre_normal !quotientGK // in nKbar.
exact: normalS nHG.
Qed.
End InverseImage.
Lemma quotient_mulg : forall A, A * H / H = A / H.
Proof.
move=> A; rewrite [_ /_]morphimMr ?normG //= -!quotientE.
by rewrite trivg_quotient mulg1.
Qed.
Lemma quotient_mulgr : forall A, H * A / H = A / H.
Proof.
move=> A; rewrite [_ /_]morphimMl ?normG //= -!quotientE.
by rewrite trivg_quotient mul1g.
Qed.
Lemma quotient_mulgen : forall G, G \subset 'N(H) -> G <*> H / H = G / H.
Proof.
move=> G nHG; rewrite -genM_mulgen quotientE morphim_gen -?quotientE.
by rewrite quotient_mulg genGid.
by rewrite -(mulSGid nHG) mulgS ?normG.
Qed.
Section Injective.
Variables (G : {group gT}).
Hypotheses (nHG : G \subset 'N(H)) (trGH : G :&: H = 1).
Lemma quotient_isom : isom G (G / H) (restrm nHG (coset H)).
Proof.
by apply/isomP; rewrite ker_restrm ker_coset morphim_restrm setIid trGH.
Qed.
Lemma quotient_isog : isog G (G / H).
Proof. exact: isom_isog quotient_isom. Qed.
End Injective.
End CosetOfGroupTheory.
Notation "A / H" := (quotient_group A H) : subgroup_scope.
Section Quotient1.
Variables (gT : finGroupType) (A : {set gT}).
Lemma coset1_injm : 'injm (@coset gT 1).
Proof. by rewrite ker_coset /=. Qed.
Lemma quotient1_isom : isom A (A / 1) (coset 1).
Proof. by apply: sub_isom coset1_injm; rewrite ?norms1. Qed.
Lemma quotient1_isog : isog A (A / 1).
Proof. apply: isom_isog quotient1_isom; exact: norms1. Qed.
End Quotient1.
Section QuotientMorphism.
Variable (gT : finGroupType) (G H : {group gT}) (f : {morphism G >-> gT}).
Implicit Types A : {set gT}.
Implicit Types B : {set (coset_groupType H)}.
Hypotheses (nHG : H <| G) (nGf : f @* G = G) (nHf : f @* H = H).
Notation fH := (coset H \o f).
Lemma quotm_restr_proof : G \subset 'dom fH.
Proof. by rewrite -sub_morphim_pre // nGf; case/andP: nHG. Qed.
Notation fH_G := (restrm quotm_restr_proof fH).
Lemma quotm_fact_proof1 : G \subset 'N(H).
Proof. by case/andP: nHG. Qed.
Lemma quotm_fact_proof2 : 'ker (coset H) \subset 'ker fH_G.
Proof.
case/andP: nHG => sHG _; rewrite ker_restrm ker_comp ker_coset subsetI.
by rewrite -sub_morphim_pre sHG ?nHf /=.
Qed.
Definition quotm := factm quotm_fact_proof1 quotm_fact_proof2.
Canonical Structure quotm_morphism := Eval hnf in [morphism of quotm].
Lemma morphim_quotm : forall A, quotm @* (A / H) = f @* A / H.
Proof.
case/andP: nHG => sHG nHG' A.
by rewrite morphim_factm morphim_restrm morphim_comp morphimIdom.
Qed.
Lemma cosetpre_quotm : forall A,
quotm @*^-1 (A / H) = f @*^-1 A / H.
Proof.
case/andP: nHG => sHG nHG' A; rewrite morphpre_factm morphpre_restrm.
rewrite morphpre_comp morphpreIdom quotientE -(morphimIdom _ A) /= -quotientE.
rewrite morphimK ?subsetIl // ker_coset morphpreMl ?nGf // -{3}nHf morphimK //.
rewrite -morphpreIim setIA -(morphpreIim _ A) !nGf (setIidPl nHG').
rewrite [_ * H]normC; last by apply: subset_trans nHG'; rewrite subsetIl.
by rewrite -mulgA quotient_mulgr -morphpreMl (mul1g, sub1G).
Qed.
Lemma ker_quotm : 'ker quotm = 'ker f / H.
Proof. by rewrite -cosetpre_quotm /quotient morphim1. Qed.
Lemma injm_quotm : 'injm f -> 'injm quotm.
Proof. by move/trivgP=> /= kf1; rewrite ker_quotm kf1 quotientE morphim1. Qed.
End QuotientMorphism.
Section FirstIsomorphism.
Variables aT rT : finGroupType.
Lemma first_isom : forall (G : {group aT}) (f : {morphism G >-> rT}),
{g : {morphism G / 'ker f >-> rT} | 'injm g &
forall A : {set aT}, g @* (A / 'ker f) = f @* A}.
Proof.
move=> G f; have nkG := ker_norm f.
have skk: 'ker (coset ('ker f)) \subset 'ker f by rewrite ker_coset.
exists (factm_morphism nkG skk) => /=; last exact: morphim_factm.
by rewrite ker_factm -quotientE trivg_quotient.
Qed.
Variables (G H : {group aT}) (f : {morphism G >-> rT}).
Hypothesis sHG : H \subset G.
Lemma first_isog : (G / 'ker f) \isog (f @* G).
Proof.
by case: (first_isom f) => g injg im_g; apply/isogP; exists g; rewrite ?im_g.
Qed.
Lemma first_isom_loc : {g : {morphism H / 'ker_H f >-> rT} |
'injm g & forall A : {set aT}, A \subset H -> g @* (A / 'ker_H f) = f @* A}.
Proof.
case: (first_isom (restrm_morphism sHG f)).
rewrite ker_restrm => g injg im_g; exists g => // A sAH.
by rewrite im_g morphim_restrm (setIidPr sAH).
Qed.
Lemma first_isog_loc : (H / 'ker_H f) \isog (f @* H).
Proof.
by case: first_isom_loc => g injg im_g; apply/isogP; exists g; rewrite ?im_g.
Qed.
End FirstIsomorphism.
Section SecondIsomorphism.
Variables (gT : finGroupType) (H K : {group gT}).
Hypothesis nKH : H \subset 'N(K).
Lemma second_isom : {f : {morphism H / (K :&: H) >-> coset_of K} |
'injm f & forall A : {set gT}, A \subset H -> f @* (A / (K :&: H)) = A / K}.
Proof.
have ->: K :&: H = 'ker_H (coset K) by rewrite ker_coset setIC.
exact: first_isom_loc.
Qed.
Lemma second_isog : H / (K :&: H) \isog H / K.
Proof. rewrite setIC -{1 3}(ker_coset K); exact: first_isog_loc. Qed.
Lemma weak_second_isog : H / (K :&: H) \isog H * K / K.
Proof. rewrite quotient_mulg; exact: second_isog. Qed.
End SecondIsomorphism.
Section ThirdIsomorphism.
Variables (gT : finGroupType) (G H K : {group gT}).
Hypothesis sHK : H \subset K.
Hypothesis snHG : H <| G.
Hypothesis snKG : K <| G.
Theorem third_isom : {f : {morphism (G / H) / (K / H) >-> coset_of K} | 'injm f
& forall A : {set gT}, A \subset G -> f @* (A / H / (K / H)) = A / K}.
Proof.
case/andP: snKG => sKG nKG; case/andP: snHG => sHG nHG.
have sHker: 'ker (coset H) \subset 'ker (restrm nKG (coset K)).
by rewrite ker_restrm !ker_coset subsetI sHG.
have:= first_isom_loc (factm_morphism nHG sHker) (subxx _) => /=.
rewrite ker_factm_loc ker_restrm ker_coset !(setIidPr sKG) /= -!quotientE.
case=> f injf im_f; exists f => // A sAG; rewrite im_f ?morphimS //.
by rewrite morphim_factm morphim_restrm (setIidPr sAG).
Qed.
Theorem third_isog : (G / H / (K / H)) \isog (G / K).
Proof.
by case: third_isom => f inj_f im_f; apply/isogP; exists f; rewrite ?im_f.
Qed.
End ThirdIsomorphism.
Lemma char_from_quotient : forall (gT : finGroupType) (G H K : {group gT}),
H <| K -> H \char G -> K / H \char G / H -> K \char G.
Proof.
move=> gT G H K; case/andP=> sHK nHK chHG; case/charP=> sKG chKG.
have nHG := char_normal chHG; case: (andP nHG) => sHG nHG'.
rewrite -(ker_coset H) in sHK; rewrite morphimSGK ?ker_coset // in sKG.
apply/charP; split=> // f injf Gf; apply/morphim_fixP => //.
have{chHG} Hf: f @* H = H by case/charP: chHG => _; apply.
rewrite -(morphimSGK _ sHK) -?quotientE; last first.
by apply: subset_trans nHG'; rewrite -{3}Gf morphimS.
rewrite -(morphim_quotm nHG Gf Hf) {}chKG // ?injm_quotm //.
by rewrite morphim_quotm Gf.
Qed.
(* Counting lemmas for morphisms. *)
Section CardMorphism.
Variables (aT rT : finGroupType) (D : {group aT}) (f : {morphism D >-> rT}).
Implicit Types G H : {group aT}.
Implicit Types L M : {group rT}.
Lemma card_morphim : forall G, #|f @* G| = #|D :&: G : 'ker f|.
Proof.
move=> G; rewrite -morphimIdom -indexgI -card_quotient; last first.
by rewrite normsI ?normG ?subIset ?ker_norm.
by apply: esym (isog_card _); rewrite first_isog_loc ?subsetIl.
Qed.
Lemma dvdn_morphim : forall G, #|f @* G| %| #|G|.
Proof.
move=> G; rewrite card_morphim (dvdn_trans (dvdn_indexg _ _)) //.
by rewrite cardSg ?subsetIr.
Qed.
Lemma index_morphim_ker : forall G H,
H \subset G -> G \subset D ->
(#|f @* G : f @* H| * #|'ker_G f : H|)%N = #|G : H|.
Proof.
move=> G H sHG sGD; apply/eqP.
rewrite -(eqn_pmul2l (cardG_gt0 (f @* H))) mulnA LaGrange ?morphimS //.
rewrite !card_morphim (setIidPr sGD) (setIidPr (subset_trans sHG sGD)).
rewrite -(eqn_pmul2l (cardG_gt0 ('ker_H f))) /=.
by rewrite -{1}(setIidPr sHG) setIAC mulnCA mulnC mulnA !LaGrangeI LaGrange.
Qed.
Lemma index_morphim : forall G H,
G :&: H \subset D -> #|f @* G : f @* H| %| #|G : H|.
Proof.
move=> G H dGH; rewrite -(indexgI G) -(setIidPr dGH) setIA.
apply: dvdn_trans (indexSg (subsetIl _ H) (subsetIr D G)).
rewrite -index_morphim_ker ?subsetIl ?subsetIr ?dvdn_mulr //= morphimIdom.
by rewrite indexgS ?morphimS ?subsetIr.
Qed.
Lemma index_injm : forall G H,
'injm f -> G \subset D -> #|f @* G : f @* H| = #|G : H|.
Proof.
move=> G H injf dG; rewrite -{2}(setIidPr dG) -(indexgI _ H) /=.
rewrite -index_morphim_ker ?subsetIl ?subsetIr //= setIAC morphimIdom setIC.
rewrite injmI ?subsetIr // indexgI /= morphimIdom setIC ker_injm //.
by rewrite -(indexgI (1 :&: _)) /= -setIA !(setIidPl (sub1G _)) indexgg muln1.
Qed.
Lemma card_morphpre : forall L,
L \subset f @* D -> #|f @*^-1 L| = (#|'ker f| * #|L|)%N.
Proof.
move=> L; move/morphpreK=> defL; rewrite -{2}defL card_morphim morphpreIdom.
by rewrite LaGrange // morphpreS ?sub1G.
Qed.
Lemma index_morphpre : forall L M,
L \subset f @* D -> #|f @*^-1 L : f @*^-1 M| = #|L : M|.
Proof.
move=> L M dL; rewrite -!divgI -morphpreI card_morphpre //.
have: L :&: M \subset f @* D by rewrite subIset ?dL.
by move/card_morphpre->; rewrite divn_pmul2l ?cardG_gt0.
Qed.
End CardMorphism.
Section CardCosetpre.
Variables (gT : finGroupType) (G H K : {group gT}) (L M : {group coset_of H}).
Lemma dvdn_quotient : #|G / H| %| #|G|.
Proof. exact: dvdn_morphim. Qed.
Lemma index_quotient_ker :
K \subset G -> G \subset 'N(H) ->
(#|G / H : K / H| * #|G :&: H : K|)%N = #|G : K|.
Proof. rewrite -{5}(ker_coset H); exact: index_morphim_ker. Qed.
Lemma index_quotient : G :&: K \subset 'N(H) -> #|G / H : K / H| %| #|G : K|.
Proof. exact: index_morphim. Qed.
Lemma index_quotient_eq :
G :&: H \subset K -> K \subset G -> G \subset 'N(H) ->
#|G / H : K / H| = #|G : K|.
Proof.
move=> sGH_K sKG sGN; rewrite -index_quotient_ker {sKG sGN}//.
by rewrite -(indexgI _ K) (setIidPl sGH_K) indexgg muln1.
Qed.
Lemma card_cosetpre : #|coset H @*^-1 L| = (#|H| * #|L|)%N.
Proof. by rewrite card_morphpre ?ker_coset ?coset_im. Qed.
Lemma index_cosetpre : #|coset H @*^-1 L : coset H @*^-1 M| = #|L : M|.
Proof. by rewrite index_morphpre ?coset_im. Qed.
End CardCosetpre.
|
{"author": "Wassasin", "repo": "ssreflect", "sha": "45cf056aa48bec1e7e2bbb77cb4458d4bddf43e4", "save_path": "github-repos/coq/Wassasin-ssreflect", "path": "github-repos/coq/Wassasin-ssreflect/ssreflect-45cf056aa48bec1e7e2bbb77cb4458d4bddf43e4/theories/normal.v"}
|
from src.Utils.Fitness import *
from src.Utils.Population import *
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from src.Utils.Graphs import *
from time import time
from src.Utils.HyperParameters import *
class MOSOSARM:
def __init__(self,nbItem,populationSize,nbIteration,nbObjectifs,objectiveNames,data,
hyperParameters = HyperParameters(['ruthlessRatio']),nbParasitismModification=5):
self.population = Population('horizontal_binary', populationSize, nbItem)
self.nbItem = nbItem
self.nbIteration = nbIteration
self.nbObjectifs = nbObjectifs
self.fitness = Fitness('horizontal_binary', objectiveNames, populationSize,nbItem )
self.bestInd = copy.deepcopy(self.population.population[rd.randint(0,populationSize-1)])
self.bestIndScore = np.zeros(nbObjectifs,dtype=float)
self.nbParasitismModification = nbParasitismModification
self.distance = np.array([[0 for i in range(populationSize)] for j in range(populationSize)])
self.executionTime = 0
self.fitness.ComputeScorePopulation(self.population.population, data)
self.UpdateBest()
def UpdateBest(self):
indexs = np.arange(self.population.populationSize)
paretoFront = np.ones(self.population.populationSize)
for i in range(self.population.populationSize):
for j in range(self.population.populationSize):
domination = self.fitness.Domination(self.fitness.scores[i],self.fitness.scores[j])
if domination == 1:
paretoFront[i] = 0
break
candidate = indexs[paretoFront == 1]
index = rd.choice(candidate)
self.bestInd = copy.deepcopy(self.population.population[index])
self.bestIndScore = copy.deepcopy(self.fitness.scores[index])
def Mutualism(self,data):
for i in range(self.population.populationSize):
j = rd.randint(0,self.population.populationSize-1)
while j == i:
j = rd.randint(0, self.population.populationSize - 1)
mutualVector = (self.population.population[i]+self.population.population[j])/2
BF1 = rd.randint(1,2)
if BF1 ==1 :
BF2 = 2
else:
BF2 = 1
iNew = copy.deepcopy(self.population.population[i]) + rd.random() * (self.bestInd - mutualVector*BF1)
iNewScore = self.fitness.ComputeScoreIndividual(iNew,data)
jNew = copy.deepcopy(self.population.population[j]) + rd.random() * (self.bestInd - mutualVector*BF2)
jNewScore = self.fitness.ComputeScoreIndividual(jNew, data)
dominationI = self.fitness.Domination(self.fitness.scores[i],iNewScore)
if dominationI == 1:
self.population.population[i] = copy.deepcopy(iNew)
dominationJ = self.fitness.Domination(self.fitness.scores[j],jNewScore)
if dominationJ == 1:
self.population.population[j] = copy.deepcopy(jNew)
def Commensalism(self,data):
for i in range(self.population.populationSize):
j = rd.randint(0,self.population.populationSize-1)
while j == i:
j = rd.randint(0, self.population.populationSize - 1)
iNew = self.population.population[i]+((rd.random()*2)-1)*(self.bestInd-self.population.population[j])
iNewScore = self.fitness.ComputeScoreIndividual(iNew, data)
dominationI = self.fitness.Domination(self.fitness.scores[i],iNewScore)
if dominationI == 1:
self.population.population[i] = copy.deepcopy(iNew)
def Parasitism(self,data):
for i in range(self.population.populationSize):
j = rd.randint(0,self.population.populationSize-1)
while j == i:
j = rd.randint(0, self.population.populationSize - 1)
iNew = copy.deepcopy(self.population.population[i])
nbChange = rd.randint(1,self.nbParasitismModification)
for k in range(nbChange):
index = rd.randint(0,self.nbItem*2-1)
iNew[index] = -1*iNew[index]
iNewScore = self.fitness.ComputeScoreIndividual(iNew, data)
dominationI = self.fitness.Domination(self.fitness.scores[j], iNewScore)
if dominationI == 1:
self.population.population[j] = copy.deepcopy(iNew)
def Run(self,data,i):
t1 = time()
self.Mutualism(data)
self.Commensalism(data)
self.Parasitism(data)
self.population.CheckIfNull()
self.fitness.ComputeScorePopulation(self.population.population, data)
self.executionTime = time() - t1
|
{"hexsha": "9236ef0362195b69eb5d519dfe8c2fcdecca4da6", "size": 4780, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Algorithm/MOSOSARM.py", "max_stars_repo_name": "TheophileBERTELOOT/MOEA-ARM", "max_stars_repo_head_hexsha": "ef1736049814c998ddbdf49fefa1e31e4908fb71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Algorithm/MOSOSARM.py", "max_issues_repo_name": "TheophileBERTELOOT/MOEA-ARM", "max_issues_repo_head_hexsha": "ef1736049814c998ddbdf49fefa1e31e4908fb71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Algorithm/MOSOSARM.py", "max_forks_repo_name": "TheophileBERTELOOT/MOEA-ARM", "max_forks_repo_head_hexsha": "ef1736049814c998ddbdf49fefa1e31e4908fb71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.862745098, "max_line_length": 113, "alphanum_fraction": 0.6426778243, "include": true, "reason": "from scipy", "num_tokens": 1049}
|
"""
This module contains the class OffloadingCommon, which is the base class of all algorithms (benchmarks, cco and decor).
OffloadingCommon defines several points in a computation offloading problem.
[--
In order to avoid Multiple Inheritance, CcoAlgorithm only inherit from Racos. Similar methods and properties are
copied from OffloadingCommon, which are marked by annotations.
--]
Author:
Hailiang Zhao, Cheng Zhang
"""
from cross_edge_offloading.utils.tool_function import ToolFunction
import numpy as np
import random
class OffloadingCommon(object):
"""
This class contains several points in a computation offloading problem, including:
(1) the objective function of the cross-edge computation offloading problem;
(2) the solution of the problem (edge_selection, harvested_energys).
"""
def __init__(self, parameter):
"""
Initialize key parameters in offloading problems of one time slot.
:param parameter: the instance of class Parameter
"""
self.__parameter = parameter
# =============================== state information ===============================
self.__battery_energy_levels = np.repeat(parameter.get_perturbation_para() / 2, parameter.get_user_num())
self.__virtual_energy_levels = self.__battery_energy_levels - \
np.repeat(parameter.get_perturbation_para(), parameter.get_user_num())
# =============================== independent variables ===============================
# edge_selections is a list with every element (edge_selection) being a numpy array,
# which is the feasible solution (independent var) of the problem $\mathcal{P}_2^{es}$
# 'self.edge_selections' stores the final optimal solution
self.__edge_selections = []
self.__harvested_energys = []
def obtain_time_consumption(self, division, edge_selection, channel_power_gains):
"""
Calculate the time consumption on transmission and edge execution for one mobile device.
:param division: the number of chosen edge sites (not zero)
:param edge_selection: the edge selection decision of one mobile devices
:param channel_power_gains: the channel power gains of one mobile devices to every connectable servers
:return: the time consumption on transmission and edge execution
"""
parameter = self.get_parameter()
transmit_times = ToolFunction.obtain_transmit_times(division, edge_selection, parameter, channel_power_gains)
edge_exe_times = ToolFunction.obtain_edge_exe_times(division, parameter)
edge_times = transmit_times + edge_exe_times
time_consumption = max(edge_times) + parameter.get_local_exe_time() + parameter.get_coordinate_cost() * division
return time_consumption
def obtain_overall_costs(self, edge_selections):
"""
Calculate the overall costs, which is the sum of cost of each mobile device.
:param edge_selections: the edge selection decisions for all mobile devices
:return: overall costs
"""
parameter = self.get_parameter()
overall_costs = 0
for i in range(parameter.get_user_num()):
if parameter.get_task_requests()[i] == 1:
division = int(sum(edge_selections[i]))
if division:
# cost = self.obtain_time_consumption(
# division, edge_selections[i], parameter.get_connectable_gains[i])
transmit_times = ToolFunction.obtain_transmit_times(division, edge_selections[i], parameter,
parameter.get_connectable_gains()[i])
edge_exe_times = ToolFunction.obtain_edge_exe_times(division, parameter)
edge_times = transmit_times + edge_exe_times
cost = max(edge_times) + parameter.get_local_exe_time() + parameter.get_coordinate_cost() * division
else:
cost = parameter.get_drop_penalty()
else:
cost = 0
overall_costs += cost
return overall_costs
def obtain_edge_selections(self):
"""
Obtain the feasible solution with random policy.
:return: edge_selections, every row denotes a mobile device who has task request
"""
parameter = self.get_parameter()
# first initialize with zero
edge_selections = []
for i in range(parameter.get_user_num()):
edge_selection = np.repeat(0, len(parameter.get_connectable_servers()[i]))
edge_selections.append(edge_selection)
# for every edge site, generate a random integer with [0, max_assign], and distribute connections to
# connectable mobile devices
for j in range(parameter.get_server_num()):
assign_num = random.randint(0, parameter.get_max_assign())
connectable_user_num = len(parameter.get_connectable_users()[j])
if assign_num >= connectable_user_num:
# every mobile device in it can be chosen
for i in range(connectable_user_num):
user_index = parameter.get_connectable_users()[j][i]
edge_index = list.index(parameter.get_connectable_servers()[user_index], j)
edge_selections[user_index][edge_index] = 1
else:
# randomly choose assign_num users to distribute j's computation capacity
user_indices = random.sample(parameter.get_connectable_users()[j], assign_num)
for i in range(len(user_indices)):
user_index = user_indices[i]
edge_index = list.index(parameter.get_connectable_servers()[user_index], j)
edge_selections[user_index][edge_index] = 1
# set those mobile devices who do not have task request to [0, 0, ..., 0]
# we can not delete them from the list because every row is the index of the corresponding mobile device
for i in range(parameter.get_user_num()):
if parameter.get_task_requests()[i] == 0:
edge_selections[i] = np.zeros(len(edge_selections[i]))
else:
division = int(sum(edge_selections[i]))
if division:
times = self.obtain_time_consumption(division, edge_selections[i],
parameter.get_connectable_gains()[i])
energys = ToolFunction.obtain_transmit_energy(division, edge_selections[i], parameter,
parameter.get_connectable_gains()[i])
# satisfy the constraint
if times >= parameter.get_ddl() or energys > self.__battery_energy_levels[i]:
edge_selections[i] = np.zeros(len(edge_selections[i]))
return edge_selections
def obtain_harvested_energys(self):
"""
Randomly choose energy between $[0, E_i^H]$ for every mobile device, and then set self.harvested_energys.
:return: no return
"""
parameter = self.get_parameter()
return list(map(random.uniform, [0] * parameter.get_user_num(), parameter.get_harvestable_energys()))
def update_energy_levels(self):
"""
Update the cost & virtual energy levels according to the involution expression \eqref{10}.
:return: no return
"""
parameter = self.get_parameter()
for i in range(parameter.get_user_num()):
division = int(sum(self.__edge_selections[i]))
if division:
self.__battery_energy_levels[i] = self.__battery_energy_levels[i] + \
self.__harvested_energys[i] - ToolFunction.obtain_transmit_energy(
division, self.__edge_selections[i], parameter, parameter.get_connectable_gains()[i]) - \
parameter.get_local_exe_energy()
else:
# check whether need to minus local_exe_energys
# if self.__battery_energy_levels[i] < parameter.get_local_exe_energy():
# self.__battery_energy_levels[i] = self.__battery_energy_levels[i] + self.__harvested_energys[i]
# else:
# self.__battery_energy_levels[i] = self.__battery_energy_levels[i] + \
# self.__harvested_energys[i] - parameter.get_local_exe_energy()
self.__battery_energy_levels[i] = self.__battery_energy_levels[i] + self.__harvested_energys[i]
self.__virtual_energy_levels[i] = self.__battery_energy_levels[i] - parameter.get_perturbation_para()
def get_parameter(self):
return self.__parameter
def get_battery_energy_levels(self):
return self.__battery_energy_levels
def get_virtual_energy_levels(self):
return self.__virtual_energy_levels
def get_harvested_energys(self):
return self.__harvested_energys
def set_harvested_energys(self, harvested_energys):
self.__harvested_energys = harvested_energys
def get_edge_selections(self):
return self.__edge_selections
def set_edge_selections(self, edge_selections):
self.__edge_selections = edge_selections
|
{"hexsha": "853111e25a0baed875486a8889e29aa5f6689edb", "size": 9406, "ext": "py", "lang": "Python", "max_stars_repo_path": "cross_edge_offloading/offloading_common.py", "max_stars_repo_name": "cnetboy/Cross-edge-Computation-Offloading", "max_stars_repo_head_hexsha": "64b7d9fe6b94e6e6f5fe934d97304549d21868b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2019-05-17T01:08:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T02:41:32.000Z", "max_issues_repo_path": "cross_edge_offloading/offloading_common.py", "max_issues_repo_name": "AlerfaRomeoo/Cross-edge-Computation-Offloading", "max_issues_repo_head_hexsha": "64b7d9fe6b94e6e6f5fe934d97304549d21868b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cross_edge_offloading/offloading_common.py", "max_forks_repo_name": "AlerfaRomeoo/Cross-edge-Computation-Offloading", "max_forks_repo_head_hexsha": "64b7d9fe6b94e6e6f5fe934d97304549d21868b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-09-11T09:58:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T07:06:50.000Z", "avg_line_length": 49.5052631579, "max_line_length": 120, "alphanum_fraction": 0.6408675314, "include": true, "reason": "import numpy", "num_tokens": 1863}
|
[STATEMENT]
lemma (in is_functor) cf_is_functor_if_ge_Limit:
assumes "\<Z> \<beta>" and "\<alpha> \<in>\<^sub>\<circ> \<beta>"
shows "\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<beta>\<^esub> \<BB>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<beta>\<^esub> \<BB>
[PROOF STEP]
by (rule is_functorI)
(
auto simp:
cat_cs_simps
assms
vfsequence_axioms
cf_is_semifunctor_if_ge_Limit
HomDom.cat_category_if_ge_Limit
HomCod.cat_category_if_ge_Limit
intro: cat_cs_intros
)
|
{"llama_tokens": 238, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_Functor", "length": 1}
|
from io import StringIO
from numbers import Integral
import numpy as np
import pandas
import pickle
import sklearn
from sklearn import tree
from sklearn.tree import export_text
from sklearn.tree import _tree
from sklearn.tree import DecisionTreeClassifier
filename = 'final_rf_model.sav'
rf = pickle.load(open(filename, 'rb'))
i_tree = 0
global_id = 0
def export_p4(decision_tree):
tree_ = decision_tree.tree_
class_names = decision_tree.classes_
right_child_fmt = "{} {} <= {}\n"
left_child_fmt = "{} {} > {}\n"
truncation_fmt = "{} {}\n"
feature_names_ = ["{}".format(i) for i in tree_.feature]
export_text.report = ""
max_depth=10
spacing=3
decimals=2
show_weights=False
if isinstance(decision_tree, DecisionTreeClassifier):
value_fmt = "{}{} weights: {}\n"
if not show_weights:
value_fmt = "{}{}{}\n"
else:
value_fmt = "{}{} value: {}\n"
def _add_leaf(value, class_name, indent, prevfeature, result, depth, previous_id):
global global_id
global i_tree
current_id = global_id
val = ''
is_classification = isinstance(decision_tree,
DecisionTreeClassifier)
if show_weights or not is_classification:
val = ["{1:.{0}f}, ".format(decimals, v) for v in value]
val = '['+''.join(val)[:-2]+']'
if is_classification:
val += ' class: ' + str(class_name)
export_text.report += value_fmt.format(indent, '', val)
print("table_add MyIngress.level_", i_tree,"_", depth, " ", "MyIngress.SetClass",i_tree," ", previous_id," ",prevfeature," ", result," ", "=>"," ", current_id, " ", int(float(class_name)), sep="")
def print_tree_recurse(node, depth, prevfeature, result, previous_id):
indent = ("|" + (" " * spacing)) * depth
indent = indent[:-spacing] + "-" * spacing
global global_id
global i_tree
global_id = global_id + 1
current_id = global_id
value = None
if tree_.n_outputs == 1:
value = tree_.value[node][0]
else:
value = tree_.value[node].T[0]
class_name = np.argmax(value)
if (tree_.n_classes[0] != 1 and
tree_.n_outputs == 1):
class_name = class_names[class_name]
if depth <= max_depth+1:
info_fmt = ""
info_fmt_left = info_fmt
info_fmt_right = info_fmt
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_names_[node]
threshold = tree_.threshold[node]
threshold = "{1:.{0}f}".format(decimals, threshold)
export_text.report += right_child_fmt.format(indent,
name,
threshold)
export_text.report += info_fmt_left
if int(name) == 9 or int(name) == 11:
#print("**********", name, threshold, float(threshold), 1000000.0*float(threshold))
print("table_add MyIngress.level_", i_tree,"_", depth, " MyIngress.CheckFeature ", previous_id, " ", prevfeature, " ", result, " ", "=>", " ", current_id, " ", name," ", int(1000000.0*float(threshold)), sep='')
else:
print("table_add MyIngress.level_", i_tree,"_", depth, " MyIngress.CheckFeature ", previous_id, " ", prevfeature, " ", result, " ", "=>", " ", current_id, " ", name," ", int(float(threshold)), sep='')
print_tree_recurse(tree_.children_left[node], depth+1, name, 1, current_id)
export_text.report += left_child_fmt.format(indent,
name,
threshold)
export_text.report += info_fmt_right
# print("level", depth, "checkfeature", prevfeature, result, "=>", name, threshold)
print_tree_recurse(tree_.children_right[node], depth+1, name, 0, current_id)
else: # leaf
_add_leaf(value, class_name, indent, prevfeature, result, depth, previous_id)
else:
subtree_depth = _compute_depth(tree_, node)
if subtree_depth == 1:
_add_leaf(value, class_name, indent, prevfeature, result, depth, previous_id)
else:
trunc_report = 'truncated branch of depth %d' % subtree_depth
export_text.report += truncation_fmt.format(indent,
trunc_report)
print_tree_recurse(0, 1, 0, 1, global_id)
for tree_in_forest in rf.estimators_:
#r = export_text(tree_in_forest)
#print(r)
global i_tree
i_tree = i_tree + 1
export_p4(tree_in_forest)
|
{"hexsha": "571871937c82b50aa4bb650badcd33f0a99053f8", "size": 4918, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/rftop4_v2.py", "max_stars_repo_name": "ksingh25/switchtree", "max_stars_repo_head_hexsha": "8a15a3a11100bb9444cea0a51c0ef5c141ec0b36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-18T13:08:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T13:08:42.000Z", "max_issues_repo_path": "scripts/rftop4_v2.py", "max_issues_repo_name": "ksingh25/switchtree", "max_issues_repo_head_hexsha": "8a15a3a11100bb9444cea0a51c0ef5c141ec0b36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-22T23:30:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T22:13:21.000Z", "max_forks_repo_path": "scripts/rftop4_v2.py", "max_forks_repo_name": "ksingh25/switchtree", "max_forks_repo_head_hexsha": "8a15a3a11100bb9444cea0a51c0ef5c141ec0b36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-09T20:50:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T20:50:26.000Z", "avg_line_length": 38.421875, "max_line_length": 229, "alphanum_fraction": 0.5551037007, "include": true, "reason": "import numpy", "num_tokens": 1126}
|
import numpy
from src.stats.bulk_stats import BulkStats
class GlobalStats(object):
def __init__(self):
super(GlobalStats, self).__init__()
self.bulks = []
def __del__(self):
# print 'GlobalStats.__del__'
del self.bulks
def get_new_bulk_stats(self, **info):
self.bulks.append(BulkStats(**info))
return self.bulks[-1]
def get_avg_total_time_vs_lambda(self, buffer_latency):
return self.get_avg_time_vs_lambda(BulkStats.get_avg_total_time.__name__, buffer_latency)
def get_avg_inner_time_vs_lambda(self, buffer_latency):
return self.get_avg_time_vs_lambda(BulkStats.get_avg_inner_time.__name__, buffer_latency)
def get_avg_time_vs_lambda(self, avg_time_func_name, buffer_latency):
points = []
for bulk in self.bulks:
if 'buffer_latency' not in bulk.info or 'gen_lambda' not in bulk.info:
raise Exception('No data to operate with')
if bulk.info['buffer_latency'] == buffer_latency:
avg_time = getattr(bulk, avg_time_func_name)()
gen_lambda = bulk.info['gen_lambda']
points.append((gen_lambda, avg_time))
return points
|
{"hexsha": "924a0cf65fd13c1a4055d9c5428cbae446e24873", "size": 1225, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/stats/global_stats.py", "max_stars_repo_name": "igorcoding/os-simulation", "max_stars_repo_head_hexsha": "1e76fdda75c138025950876a2e7b68e99a55c54a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/stats/global_stats.py", "max_issues_repo_name": "igorcoding/os-simulation", "max_issues_repo_head_hexsha": "1e76fdda75c138025950876a2e7b68e99a55c54a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/stats/global_stats.py", "max_forks_repo_name": "igorcoding/os-simulation", "max_forks_repo_head_hexsha": "1e76fdda75c138025950876a2e7b68e99a55c54a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2368421053, "max_line_length": 97, "alphanum_fraction": 0.6702040816, "include": true, "reason": "import numpy", "num_tokens": 274}
|
#include <cstdlib>
#include <iostream>
#include <functional>
#include <iomanip>
#include <list>
#include <boost/fusion/adapted/struct.hpp>
#include <boost/spirit/home/x3.hpp>
#include <boost/spirit/home/x3/support/ast/variant.hpp>
#include <boost/foreach.hpp>
namespace client {
namespace x3 = boost::spirit::x3;
typedef unsigned int uint;
namespace ast {
struct nil {};
struct binary_op;
struct conditional_op;
struct expression;
struct binary_operator {
std::string name;
std::function<uint(uint, uint)> op;
uint
operator()(uint lhs, uint rhs) const {
return op(lhs, rhs);
}
};
struct operand : x3::variant<
nil,
uint,
std::string,
x3::forward_ast<binary_op>,
x3::forward_ast<conditional_op>,
x3::forward_ast<expression>> {
using base_type::base_type;
using base_type::operator=;
};
struct binary_op {
binary_operator op;
operand lhs;
operand rhs;
};
struct conditional_op {
operand lhs;
operand rhs_true;
operand rhs_false;
};
struct operation {
binary_operator op;
operand rhs;
};
struct expression {
operand lhs;
std::list<operation> rhs;
};
} // namespace ast
} // namespace client
BOOST_FUSION_ADAPT_STRUCT(client::ast::expression, lhs, rhs)
BOOST_FUSION_ADAPT_STRUCT(client::ast::operation, op, rhs)
BOOST_FUSION_ADAPT_STRUCT(client::ast::conditional_op, lhs, rhs_true, rhs_false)
BOOST_FUSION_ADAPT_STRUCT(client::ast::binary_op, op, lhs, rhs)
namespace client {
namespace parser {
struct error_handler {
template <typename It, typename Ctx>
x3::error_handler_result
on_error(It f, It l, x3::expectation_failure<It> const& e, Ctx const&)
const {
std::cout << std::string(f, l) << "\n"
<< std::setw(1 + std::distance(f, e.where())) << "^"
<< "-- expected: " << e.which() << "\n";
return x3::error_handler_result::fail;
}
};
#define add_operation(NAME, OP) this->add(NAME, {NAME, OP});
struct multiplicative_op_ : x3::symbols<ast::binary_operator> {
multiplicative_op_() {
add_operation("%", [](uint a, uint b) { return std::fmod(a, b); });
}
} multiplicative_op;
struct logical_op_ : x3::symbols<ast::binary_operator> {
logical_op_() {
add_operation("&&", std::logical_and<uint>{});
add_operation("||", std::logical_or<uint>{});
}
} logical_op;
struct relational_op_ : x3::symbols<ast::binary_operator> {
relational_op_() {
add_operation("<", std::less<uint>{});
add_operation("<=", std::less_equal<uint>{});
add_operation(">", std::greater<uint>{});
add_operation(">=", std::greater_equal<uint>{});
}
} relational_op;
struct equality_op_ : x3::symbols<ast::binary_operator> {
equality_op_() {
add_operation("==", std::equal_to<uint>{});
add_operation("!=", std::not_equal_to<uint>{});
}
} equality_op;
auto make_conditional_op = [](auto& ctx) {
using boost::fusion::at_c;
x3::_val(ctx) = ast::conditional_op{
x3::_val(ctx), at_c<0>(x3::_attr(ctx)), at_c<1>(x3::_attr(ctx))};
};
// clang-format off
struct expression_class : error_handler {};
struct logical_class : error_handler {};
struct equality_class : error_handler {};
struct relational_class : error_handler {};
struct multiplicative_class : error_handler {};
struct primary_class : error_handler {};
struct conditional_class : error_handler {};
struct variable_class : error_handler {};
// Rule declarations
auto const expression = x3::rule<expression_class, ast::operand> {"expression"};
auto const conditional = x3::rule<conditional_class, ast::operand> {"conditional"};
auto const primary = x3::rule<primary_class, ast::operand> {"primary"};
auto const logical = x3::rule<logical_class, ast::expression>{"logical"};
auto const equality = x3::rule<equality_class, ast::expression>{"equality"};
auto const relational = x3::rule<relational_class, ast::expression>{"relational"};
auto const multiplicative = x3::rule<multiplicative_class, ast::expression>{"multiplicative"};
auto const variable = x3::rule<variable_class, std::string> {"variable"};
// Rule defintions
auto const expression_def = conditional;
auto const conditional_def =
logical[([](auto& ctx) { _val(ctx) = _attr(ctx); })]
>> -('?' > expression > ':' > expression)[make_conditional_op];
auto const logical_def =
equality
>> *((logical_op > logical)
| (logical_op > equality));
auto const equality_def =
relational
>> *(equality_op > relational);
auto const relational_def =
multiplicative
>> *(relational_op > multiplicative);
auto const multiplicative_def =
primary
>> *(multiplicative_op > primary);
auto const primary_def =
x3::uint_
| ('(' > expression > ')')
| variable;
auto const variable_def = x3::lexeme[x3::alpha >> *x3::alnum];
// clang-format on
BOOST_SPIRIT_DEFINE(
expression,
logical,
equality,
relational,
multiplicative,
primary,
conditional,
variable);
} // namespace parser
} // namespace client
namespace client {
namespace ast {
struct printer {
typedef void result_type;
result_type
operator()(operand const& ast) const {
boost::apply_visitor(*this, ast.get());
}
result_type
operator()(nil) const {}
result_type
operator()(expression const& ast) const {
if (ast.rhs.size() > 0) {
std::cout << '(';
}
boost::apply_visitor(*this, ast.lhs);
BOOST_FOREACH (operation const& op, ast.rhs) { (*this)(op); }
if (ast.rhs.size() > 0) {
std::cout << ')';
}
}
result_type
operator()(operation const& ast) const {
std::cout << ' ' << ast.op.name << ' ';
boost::apply_visitor(*this, ast.rhs);
}
result_type
operator()(binary_op const& ast) const {
std::cout << '(';
boost::apply_visitor(*this, ast.lhs);
boost::apply_visitor(*this, ast.rhs);
std::cout << ')';
}
result_type
operator()(conditional_op const& ast) const {
std::cout << '(';
boost::apply_visitor(*this, ast.lhs);
std::cout << " ? ";
boost::apply_visitor(*this, ast.rhs_true);
std::cout << " : ";
boost::apply_visitor(*this, ast.rhs_false);
std::cout << ')';
}
result_type
operator()(uint const& ast) const {
std::cout << ast;
}
result_type
operator()(std::string const& ast) const {
std::cout << ast;
}
};
struct evaluator {
typedef uint result_type;
evaluator(const result_type variable) : variable(variable) {}
result_type variable;
result_type
operator()(operand const& ast) const {
return boost::apply_visitor(*this, ast.get());
}
result_type
operator()(nil) const {
BOOST_ASSERT(0);
return 0;
}
result_type
operator()(expression const& ast) const {
result_type state = boost::apply_visitor(*this, ast.lhs);
BOOST_FOREACH (operation const& op, ast.rhs) {
state = (*this)(op, state);
}
return state;
}
result_type
operator()(operation const& ast, uint lhs) const {
result_type rhs = boost::apply_visitor(*this, ast.rhs);
return ast.op(lhs, rhs);
}
result_type
operator()(binary_op const& ast) const {
result_type lhs = boost::apply_visitor(*this, ast.lhs);
result_type rhs = boost::apply_visitor(*this, ast.rhs);
return ast.op(lhs, rhs);
}
result_type
operator()(conditional_op const& ast) const {
bool lhs = boost::apply_visitor(*this, ast.lhs);
if (lhs) {
return boost::apply_visitor(*this, ast.rhs_true);
}
return boost::apply_visitor(*this, ast.rhs_false);
}
result_type
operator()(uint const& ast) const {
return ast;
}
result_type
operator()(std::string const& ast) const {
return variable;
}
};
} // namespace ast
} // namespace client
int
main() {
typedef unsigned int uint;
namespace x3 = boost::spirit::x3;
std::map<std::string, std::function<uint(uint)>> test_expressions{
std::make_pair<std::string, std::function<uint(uint)>>(
"0", [](uint n) { return 0; }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 0) ? 0 : ((n == 1) ? 1 : 2)",
[](uint n) { return (n == 0) ? 0 : ((n == 1) ? 1 : 2); }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 0) ? 0 : ((n == 1) ? 1 : (((n % 100 == 2 || n % 100 == "
"22 || n % 100 == 42 || n % 100 == 62 || n % 100 == 82) || n % "
"1000 == 0 && (n % 100000 >= 1000 && n % 100000 <= 20000 || n "
"% 100000 == 40000 || n % 100000 == 60000 || n % 100000 == "
"80000) || n != 0 && n % 1000000 == 100000) ? 2 : ((n % 100 == "
"3 || n % 100 == 23 || n % 100 == 43 || n % 100 == 63 || n % "
"100 == 83) ? 3 : ((n != 1 && (n % 100 == 1 || n % 100 == 21 "
"|| n % 100 == 41 || n % 100 == 61 || n % 100 == 81)) ? 4 : "
"5))))",
[](uint n) {
return (n == 0)
? 0
: ((n == 1)
? 1
: (((n % 100 == 2 || n % 100 == 22 ||
n % 100 == 42 || n % 100 == 62 ||
n % 100 == 82) ||
n % 1000 == 0 &&
(n % 100000 >= 1000 &&
n % 100000 <= 20000 ||
n % 100000 == 40000 ||
n % 100000 == 60000 ||
n % 100000 == 80000) ||
n != 0 && n % 1000000 == 100000)
? 2
: ((n % 100 == 3 || n % 100 == 23 ||
n % 100 == 43 || n % 100 == 63 ||
n % 100 == 83)
? 3
: ((n != 1 && (n % 100 == 1 ||
n % 100 == 21 ||
n % 100 == 41 ||
n % 100 == 61 ||
n % 100 == 81))
? 4
: 5))));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 0) ? 0 : ((n == 1) ? 1 : ((n == 2) ? 2 : ((n % 100 >= 3 "
"&& n % 100 <= 10) ? 3 : ((n % 100 >= 11 && n % 100 <= 99) ? 4 "
": 5))))",
[](uint n) {
return (n == 0)
? 0
: ((n == 1)
? 1
: ((n == 2) ? 2
: ((n % 100 >= 3 && n % 100 <= 10)
? 3
: ((n % 100 >= 11 &&
n % 100 <= 99)
? 4
: 5))));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 0) ? 0 : ((n == 1) ? 1 : ((n == 2) ? 2 : ((n == 3) ? 3 "
": ((n == 6) ? 4 : 5))))",
[](uint n) {
return (n == 0)
? 0
: ((n == 1)
? 1
: ((n == 2)
? 2
: ((n == 3) ? 3
: ((n == 6) ? 4 : 5))));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2)",
[](uint n) {
return (n == 0 || n == 1) ? 0 : ((n >= 2 && n <= 10) ? 1 : 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"n != 1", [](uint n) { return n != 1; }),
std::make_pair<std::string, std::function<uint(uint)>>(
"n > 1", [](uint n) { return n > 1; }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 100 == 1) ? 0 : ((n % 100 == 2) ? 1 : ((n % 100 == 3 || "
"n % 100 == 4) ? 2 : 3))",
[](uint n) {
return (n % 100 == 1)
? 0
: ((n % 100 == 2)
? 1
: ((n % 100 == 3 || n % 100 == 4) ? 2 : 3));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 10 == 0 || n % 100 >= 11 && n % 100 <= 19) ? 0 : ((n % "
"10 == 1 && n % 100 != 11) ? 1 : 2)",
[](uint n) {
return (n % 10 == 0 || n % 100 >= 11 && n % 100 <= 19)
? 0
: ((n % 10 == 1 && n % 100 != 11) ? 1 : 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 10 == 1) ? 0 : ((n % 10 == 2) ? 1 : ((n % 100 == 0 || n "
"% 100 == 20 || n % 100 == 40 || n % 100 == 60 || n % 100 == "
"80) ? 2 : 3))",
[](uint n) {
return (n % 10 == 1)
? 0
: ((n % 10 == 2) ? 1
: ((n % 100 == 0 || n % 100 == 20 ||
n % 100 == 40 ||
n % 100 == 60 || n % 100 == 80)
? 2
: 3));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"n % 10 != 1 || n % 100 == 11",
[](uint n) { return n % 10 != 1 || n % 100 == 11; }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 10 == 1 && n % 100 != 11) ? 0 : ((n % 10 >= 2 && n % 10 "
"<= 4 && (n % 100 < 12 || n % 100 > 14)) ? 1 : 2)",
[](uint n) {
return (n % 10 == 1 && n % 100 != 11)
? 0
: ((n % 10 >= 2 && n % 10 <= 4 &&
(n % 100 < 12 || n % 100 > 14))
? 1
: 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 10 == 1 && (n % 100 < 11 || n % 100 > 19)) ? 0 : ((n % "
"10 >= 2 && n % 10 <= 9 && (n % 100 < 11 || n % 100 > 19)) ? 1 "
": 2)",
[](uint n) {
return (n % 10 == 1 && (n % 100 < 11 || n % 100 > 19))
? 0
: ((n % 10 >= 2 && n % 10 <= 9 &&
(n % 100 < 11 || n % 100 > 19))
? 1
: 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n % 10 == 1 && n % 100 != 11 && n % 100 != 71 && n % 100 != "
"91) ? 0 : ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 && "
"n % 100 != 92) ? 1 : ((((n % 10 == 3 || n % 10 == 4) || n % "
"10 == 9) && (n % 100 < 10 || n % 100 > 19) && (n % 100 < 70 "
"|| n % 100 > 79) && (n % 100 < 90 || n % 100 > 99)) ? 2 : ((n "
"!= 0 && n % 1000000 == 0) ? 3 : 4)))",
[](uint n) {
return (n % 10 == 1 && n % 100 != 11 && n % 100 != 71 &&
n % 100 != 91)
? 0
: ((n % 10 == 2 && n % 100 != 12 && n % 100 != 72 &&
n % 100 != 92)
? 1
: ((((n % 10 == 3 || n % 10 == 4) ||
n % 10 == 9) &&
(n % 100 < 10 || n % 100 > 19) &&
(n % 100 < 70 || n % 100 > 79) &&
(n % 100 < 90 || n % 100 > 99))
? 2
: ((n != 0 && n % 1000000 == 0) ? 3
: 4)));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 10) ? 1 "
": ((n % 100 >= 11 && n % 100 <= 19) ? 2 : 3))",
[](uint n) {
return (n == 1)
? 0
: ((n == 0 || n % 100 >= 2 && n % 100 <= 10)
? 1
: ((n % 100 >= 11 && n % 100 <= 19) ? 2 : 3));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1 "
": 2)",
[](uint n) {
return (n == 1)
? 0
: ((n == 0 || n % 100 >= 2 && n % 100 <= 19) ? 1
: 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 12 "
"|| n % 100 > 14)) ? 1 : 2)",
[](uint n) {
return (n == 1) ? 0
: ((n % 10 >= 2 && n % 10 <= 4 &&
(n % 100 < 12 || n % 100 > 14))
? 1
: 2);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n == 2) ? 1 : 2)",
[](uint n) { return (n == 1) ? 0 : ((n == 2) ? 1 : 2); }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : "
"3))",
[](uint n) {
return (n == 1)
? 0
: ((n == 2) ? 1 : ((n > 10 && n % 10 == 0) ? 2 : 3));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n == 2) ? 1 : ((n >= 3 && n <= 6) ? 2 : ((n "
">= 7 && n <= 10) ? 3 : 4)))",
[](uint n) {
return (n == 1)
? 0
: ((n == 2) ? 1
: ((n >= 3 && n <= 6)
? 2
: ((n >= 7 && n <= 10) ? 3 : 4)));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1) ? 0 : ((n >= 2 && n <= 4) ? 1 : 2)",
[](uint n) { return (n == 1) ? 0 : ((n >= 2 && n <= 4) ? 1 : 2); }),
std::make_pair<std::string, std::function<uint(uint)>>(
"(n == 1 || n == 11) ? 0 : ((n == 2 || n == 12) ? 1 : ((n >= 3 "
"&& n <= 10 || n >= 13 && n <= 19) ? 2 : 3))",
[](uint n) {
return (n == 1 || n == 11)
? 0
: ((n == 2 || n == 12)
? 1
: ((n >= 3 && n <= 10 || n >= 13 && n <= 19)
? 2
: 3));
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"n != 1 && n != 2 && n != 3 && (n % 10 == 4 || n % 10 == 6 || "
"n % 10 == 9)",
[](uint n) {
return n != 1 && n != 2 && n != 3 &&
(n % 10 == 4 || n % 10 == 6 || n % 10 == 9);
}),
std::make_pair<std::string, std::function<uint(uint)>>(
"n >= 2 && (n < 11 || n > 99)",
[](uint n) { return n >= 2 && (n < 11 || n > 99); }),
};
for (const std::pair<std::string, std::function<uint(uint)>>& key_value :
test_expressions) {
for (uint idx = 0; idx <= 1000; ++idx) {
const std::string& str{key_value.first};
client::ast::operand program;
client::ast::evaluator eval(idx);
client::ast::printer print;
std::string::const_iterator iter = str.begin();
std::string::const_iterator end = str.end();
bool r = phrase_parse(
iter, end, client::parser::expression, x3::space, program);
if (r && iter == end) {
const uint result{eval(program)};
const uint truth{key_value.second(idx)};
if (result != truth) {
std::cout << "-------------------------" << std::endl;
std::cout << "Program: ";
print(program);
std::cout << std::endl;
std::cout << "Expression: " << std::quoted(str)
<< std::endl;
std::cout << "n: " << idx << std::endl;
std::cout << "Result: " << result << std::endl;
std::cout << "Truth: " << truth << std::endl;
std::cout << "-------------------------" << std::endl;
std::cout << "FAIL: Result did not match truth!"
<< std::endl;
return EXIT_FAILURE;
}
} else {
std::string rest(iter, end);
std::cout << "Parsing failed" << std::endl;
std::cout << "stopped at: \" " << rest << "\"" << std::endl;
std::cout << "-------------------------" << std::endl;
return EXIT_FAILURE;
}
}
}
std::cout << "///////////////////////////////////////////////////\n\n";
std::cout << "Expression parser...\n\n";
std::cout << "///////////////////////////////////////////////////\n\n";
std::string str;
while (true) {
std::cout << "Type an expression...or [q or Q] to quit\n\n";
std::getline(std::cin, str);
if (str.empty() || str[0] == 'q' || str[0] == 'Q')
break;
std::cout << "Enter variable value: ";
std::string variable_str;
std::getline(std::cin, variable_str);
uint variable = std::stod(variable_str);
client::ast::operand program;
client::ast::evaluator eval(variable);
client::ast::printer print;
std::string::const_iterator iter = str.begin();
std::string::const_iterator const end = str.end();
bool r = phrase_parse(
iter, end, client::parser::expression, x3::space, program);
if (r && iter == end) {
std::cout << "-------------------------" << std::endl;
std::cout << "Program: ";
print(program);
std::cout << std::endl;
std::cout << "Expression: " << std::quoted(str) << std::endl;
std::cout << "Result: " << eval(program) << std::endl;
std::cout << "-------------------------" << std::endl;
} else {
std::string rest(iter, end);
std::cout << "-------------------------" << std::endl;
std::cout << "Parsing failed" << std::endl;
std::cout << "stopped at: \" " << rest << "\"" << std::endl;
std::cout << "-------------------------" << std::endl;
}
}
std::cout << "Bye... :-) \n\n";
return EXIT_SUCCESS;
}
|
{"hexsha": "3009d2ee0e98de35b483f178cdc583e8532ecf08", "size": 25591, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/plurals-parser.cpp", "max_stars_repo_name": "limitz404/plurals-parser-boost", "max_stars_repo_head_hexsha": "c90f226c5b54647e13cc07d83bd9895e8783737b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-11-27T09:58:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-27T09:58:37.000Z", "max_issues_repo_path": "src/plurals-parser.cpp", "max_issues_repo_name": "limitz404/plurals-parser-boost", "max_issues_repo_head_hexsha": "c90f226c5b54647e13cc07d83bd9895e8783737b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plurals-parser.cpp", "max_forks_repo_name": "limitz404/plurals-parser-boost", "max_forks_repo_head_hexsha": "c90f226c5b54647e13cc07d83bd9895e8783737b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7375776398, "max_line_length": 98, "alphanum_fraction": 0.3656363565, "num_tokens": 6512}
|
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import os
import pandas as pd
import numpy as np
class ToTensor(object):
"""Transform the numpy array to a tensor."""
def __init__(self, dtype=torch.float):
self.dtype = dtype
def __call__(self, input):
"""
:param input: numpy array.
:return: PyTorch's tensor.
"""
# Transform data on the cpu.
return torch.tensor(input, device=torch.device("cpu"),
dtype=self.dtype)
class AddChannel(object):
"""Add channel dimension to the input time series."""
def __call__(self, input):
"""
Rescale the channels.
:param image: the input image
:return: rescaled image with the required number of channels:return:
"""
# We receive only a single array of values as input, so have to add the
# channel as the zero-th dimension.
return torch.unsqueeze(input, dim=0)
class RemyDataset(Dataset):
def __init__(
self,
train=True,
data_path=None):
"""
:param dataset_name: the name of the dataset to fetch from file on disk.
:param transformations: pytorch transforms for transforms and tensor
conversion.
:param data_path: the path to the ucr dataset.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
if data_path is None:
data_path = os.path.join(dir_path, os.pardir, os.pardir,
"remy_data")
else:
data_path = os.path.join(dir_path, data_path)
if train is True:
suffix = "_train.csv"
else:
suffix = "_test.csv"
csv_path = data_path + '/' + 'remy_data' + suffix
self.data_all = pd.read_csv(csv_path, header=None)
self.labels = np.asarray(self.data_all.iloc[:, 0], dtype=np.int)
self.length = len(self.labels)
self.num_classes = len(np.unique(self.labels))
self.labels = self.__transform_labels(labels=self.labels,
num_classes=self.num_classes)
self.data = np.asarray(self.data_all.iloc[:, 1:], dtype=np.float)
# randomize the data
randomized_indices = np.random.choice(
self.length, self.length, replace=False)
self.data = self.data[randomized_indices, ...]
self.labels = self.labels[randomized_indices]
self.width = len(self.data[0, :])
self.dtype = torch.float
self.data = torch.tensor(self.data, device=torch.device("cpu"),
dtype=self.dtype)
# add the dimension for the channel
self.data = torch.unsqueeze(self.data, dim=1)
@staticmethod
def __transform_labels(labels, num_classes):
"""
Start class numbering from 0, and provide them in range from 0 to
self.num_classes - 1.
Example:
y_train = np.array([-1, 2, 3, 3, -1, 2])
nb_classes = 3
((y_train - y_train.min()) / (y_train.max() - y_train.min()) * (nb_classes - 1)).astype(int)
Out[45]: array([0, 1, 2, 2, 0, 1])
>>> labels = __transofrm_labels(labels = np.array([-1, 2, 3, 3, -1, 2]),
... num_classes=3)
>>> np.testing.assert_arrays_equal(x=labels,
... y=np.array([0, 1, 2, 2, 0, 1]))
:param labels: labels.
:param num_classes: number of classes.
:return: transformed labels.
"""
# The nll (negative log likelihood) loss requires target labels to be of
# type Long:
# https://discuss.pytorch.org/t/expected-object-of-type-variable-torch-longtensor-but-found-type/11833/3?u=adam_dziedzic
return ((labels - labels.min()) / (labels.max() - labels.min()) * (
num_classes - 1)).astype(np.int64)
@property
def width(self):
return self.__width
@width.setter
def width(self, val):
self.__width = val
@property
def num_classes(self):
return self.__num_classes
@num_classes.setter
def num_classes(self, val):
self.__num_classes = val
def __getitem__(self, index):
label = self.labels[index]
# Take the row index and all values starting from the second column.
# input = np.asarray(self.data.iloc[index][1:])
input = self.data[index]
# Transform time-series input to tensor.
# if self.transformations is not None:
# input = self.transformations(input)
# Return the time-series and the label.
return input, label
def __len__(self):
# self.data.index - The index(row labels) of the DataFrame.
# length = len(self.data.index)
length = len(self.data)
assert length == len(self.labels)
return length
def set_length(self, length):
"""
:param length: The lenght of the datasets (a subset of data points),
first length samples.
"""
assert len(self.data) == len(self.labels)
self.data = self.data[:length]
self.labels = self.labels[:length]
def set_range(self, start, stop):
"""
:param start: the start row
:param stop: the last row (exclusive) of the dataset
:return: the dataset with the specified range.
"""
assert len(self.data) == len(self.labels)
self.data = self.data[start:stop]
self.labels = self.labels[start:stop]
if __name__ == "__main__":
train_dataset = RemyDataset(train=True,
transformations=transforms.Compose(
[ToTensor(dtype=torch.float),
AddChannel()]))
print('first data item: ', train_dataset[0])
print("length of the train dataset: ", len(train_dataset))
|
{"hexsha": "e609ff2d405ad4fbbe2b1885ac793ac21544493e", "size": 5935, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnns/nnlib/datasets/remy/dataset.py", "max_stars_repo_name": "adam-dziedzic/time-series-ml", "max_stars_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-25T13:19:46.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-25T13:19:46.000Z", "max_issues_repo_path": "cnns/nnlib/datasets/remy/dataset.py", "max_issues_repo_name": "adam-dziedzic/time-series-ml", "max_issues_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnns/nnlib/datasets/remy/dataset.py", "max_forks_repo_name": "adam-dziedzic/time-series-ml", "max_forks_repo_head_hexsha": "81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3063583815, "max_line_length": 128, "alphanum_fraction": 0.5838247683, "include": true, "reason": "import numpy", "num_tokens": 1334}
|
#include <windows.h>
#include <atlbase.h>
#include <boost/test/unit_test.hpp>
#include "../DispSvr.h"
#include "../Exports/Inc/VideoMixer.h"
#include "../Exports/Inc/VideoPresenter.h"
using namespace std;
using namespace boost::unit_test;
using namespace DispSvr;
static HWND g_hwndDevice = 0;
struct CoInit
{
CoInit() { CoInitialize(NULL); }
~CoInit() { CoUninitialize(); }
};
void TestCoCreateInstance()
{
HRESULT hr = S_OK;
CoInit co;
CComPtr<IDisplayServer> pWizard;
hr = pWizard.CoCreateInstance(CLSID_DisplayServer);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pWizard, "CoCreateInstance CLSID_DisplayServer failed hr=0x" << hex << hr);
CComPtr<IDisplayObject> pCompositeDisplayObject;
hr = pCompositeDisplayObject.CoCreateInstance(CLSID_CompositeDisplayObject);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pCompositeDisplayObject, "CoCreateInstance CLSID_CompositeDisplayObject failed hr=0x" << hex << hr);
CComPtr<IDisplayObject> pVideoRootDisplayObject;
hr = pVideoRootDisplayObject.CoCreateInstance(CLSID_VideoRootDisplayObject);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pVideoRootDisplayObject, "CoCreateInstance CLSID_VideoRootDisplayObject failed hr=0x" << hex << hr);
CComPtr<IDisplayVideoMixer> pDisplayVideoMixer;
hr = pDisplayVideoMixer.CoCreateInstance(CLSID_DisplayVideoMixer);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pDisplayVideoMixer, "CoCreateInstance CLSID_DisplayVideoMixer failed hr=0x" << hex << hr);
CComPtr<IDisplayRenderEngine> pRenderEngine;
hr = pRenderEngine.CoCreateInstance(CLSID_DisplayRenderEngine);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pRenderEngine, "CoCreateInstance CLSID_DisplayRenderEngine failed hr=0x" << hex << hr);
}
void TestSingleton()
{
CoInit co;
HRESULT hr = S_OK;
CComPtr<IDisplayServer> pWizard[2];
hr = pWizard[0].CoCreateInstance(CLSID_DisplayServer);
hr = pWizard[1].CoCreateInstance(CLSID_DisplayServer);
BOOST_CHECK_EQUAL(pWizard[0], pWizard[1]);
}
HRESULT CreateDispSvr(DWORD dwDispSvrFlags, CComPtr<IDisplayServer> &pDispSvr)
{
HRESULT hr = pDispSvr.CoCreateInstance(CLSID_DisplayServer);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr) && pDispSvr, "CoCreateInstance CLSID_DisplayServer failed hr=0x" << hex << hr);
hr = pDispSvr->Initialize(dwDispSvrFlags, g_hwndDevice, NULL);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "pDispSvr->Initialize failed hr=0x" << hex << hr);
return hr;
}
void TestDispSvrVideoMixer()
{
CoInit co;
CComPtr<IDisplayServer> pDispSvr;
HRESULT hr = CreateDispSvr(0, pDispSvr);
if (FAILED(hr))
return;
CComPtr<IDisplayRenderEngine> pRenderEngine;
hr = pDispSvr->GetRenderEngine(&pRenderEngine);
if (FAILED(hr))
return;
if (SUCCEEDED(hr))
{
CComQIPtr<IDispSvrVideoMixer> pMixer = pRenderEngine;
if (pMixer)
{
// format unknown *SHOULD NOT* be supported.
PlaneCaps caps;
hr = pMixer->QueryPlaneCaps(PLANE_MAINVIDEO, PLANE_FORMAT_UNKNOWN, &caps);
BOOST_CHECK_MESSAGE(FAILED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_MAINVIDEO, PLANE_FORMAT_UNKNOWN, caps) failed. hr=0x" << hex << hr);
// format YUY2 must be supported by main video.
hr = pMixer->QueryPlaneCaps(PLANE_MAINVIDEO, PLANE_FORMAT_YUY2, &caps);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_MAINVIDEO, PLANE_FORMAT_YUY2, caps) failed. hr=0x" << hex << hr);
// format YUY2 must be supported by sub video.
hr = pMixer->QueryPlaneCaps(PLANE_SUBVIDEO, PLANE_FORMAT_YUY2, &caps);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_SUBVIDEO, PLANE_FORMAT_YUY2, caps) failed. hr=0x" << hex << hr);
// format ARGB must be supported by background, graphics, interactive.
hr = pMixer->QueryPlaneCaps(PLANE_BACKGROUND, PLANE_FORMAT_ARGB, &caps);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_BACKGROUND, PLANE_FORMAT_ARGB, caps) failed. hr=0x" << hex << hr);
hr = pMixer->QueryPlaneCaps(PLANE_GRAPHICS, PLANE_FORMAT_ARGB, &caps);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_GRAPHICS, PLANE_FORMAT_ARGB, caps) failed. hr=0x" << hex << hr);
hr = pMixer->QueryPlaneCaps(PLANE_INTERACTIVE, PLANE_FORMAT_ARGB, &caps);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::QueryPlaneCaps(PLANE_INTERACTIVE, PLANE_FORMAT_ARGB, caps) failed. hr=0x" << hex << hr);
// test case for dshow VideoSourceDO.
CComPtr<IDispSvrVideoMixerPlane> pPlane;
PlaneInit planeInit = {0};
planeInit.PlaneID = PLANE_MAINVIDEO;
planeInit.dwFlags = PLANE_INIT_EXTERNAL_SURFACE;
planeInit.Format = PLANE_FORMAT_YUY2; // YUY2 must be supported by main video.
hr = pMixer->CreatePlane(&planeInit, __uuidof(IDispSvrVideoMixerPlane), (void **)&pPlane);
BOOST_CHECK_MESSAGE(SUCCEEDED(hr), "IDispSvrVideoMixer::CreatePlane(PLANE_MAINVIDEO, PLANE_FORMAT_UNKNOWN) failed. hr=0x" << hex << hr);
}
}
pDispSvr->Terminate();
}
test_suite* init_unit_test_suite(int argc, char* argv [])
{
test_suite *pSuite = NULL;
g_hwndDevice = CreateWindow(_T("STATIC"), _T("DispSvr"), 0, 0, 0, 16, 16, 0, 0, 0, 0);
pSuite = BOOST_TEST_SUITE("DispSvr unit test");
pSuite->add(BOOST_TEST_CASE(&TestSingleton));
pSuite->add(BOOST_TEST_CASE(&TestCoCreateInstance));
pSuite->add(BOOST_TEST_CASE(&TestDispSvrVideoMixer));
return pSuite;
}
|
{"hexsha": "6369114b2fa92f3bc0a0b51edc178721d922e81f", "size": 5258, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Video/Render/Core/DispSvr/Main/test/test.cpp", "max_stars_repo_name": "goodspeed24e/2011Corel", "max_stars_repo_head_hexsha": "4efb585a589ea5587a877f4184493b758fa6f9b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-07-24T07:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-24T07:59:07.000Z", "max_issues_repo_path": "Video/Render/Core/DispSvr/Main/test/test.cpp", "max_issues_repo_name": "goodspeed24e/2011Corel", "max_issues_repo_head_hexsha": "4efb585a589ea5587a877f4184493b758fa6f9b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Video/Render/Core/DispSvr/Main/test/test.cpp", "max_forks_repo_name": "goodspeed24e/2011Corel", "max_forks_repo_head_hexsha": "4efb585a589ea5587a877f4184493b758fa6f9b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2015-03-17T12:11:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T01:15:52.000Z", "avg_line_length": 38.6617647059, "max_line_length": 147, "alphanum_fraction": 0.7662609357, "num_tokens": 1549}
|
from __future__ import print_function
import numpy as np
import reader
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
def _fit_linear_function(x, y):
X = np.array(x).reshape((-1, 1))
Y = np.array(y)
print('x: ', X)
print('y: ', Y)
model = LinearRegression()
model.fit(X, Y)
alpha = model.intercept_
beta = model.coef_[0]
#A = np.vstack([X, np.ones(len(X))]).T
#beta, alpha = np.linalg.lstsq(A, Y, rcond=None)[0]
return alpha, beta
def model_bcast_log():
fn='logs/nccl-bcast-n16IB.log'
#fn='logs/nccl-bcast-n64.log'
#fn='~/LinZ/kfac_pytorch/packages/nccl-tests/nccl-allreduce-32-100GbIB-RTX2080ti.log'
sizes, comms, errors = reader.read_times_from_nccl_log(fn, start=1024, end=1024*1024*512, original=True)
print('sizes: ', sizes)
print('comms: ', comms)
print('errors: ', errors)
alpha, beta = _fit_linear_function(np.array(sizes), comms)
print('alpha: ', alpha, ', beta: ', beta)
py = alpha + beta * np.array(sizes)
fig, ax = plt.subplots(figsize=(8.,4))
ax.plot(sizes, comms, marker='o', label='measured')
ax.plot(sizes, py, marker='^', label='fit')
plt.show()
def model_inverse_compute_log():
fn = 'logs/inverse-resnet50.log'
sizes, times = reader.read_tensorsize_vs_time(fn)
print('sizes: ', sizes)
print('times: ', times)
alpha, beta = _fit_linear_function(np.array(sizes), times)
print('alpha: ', alpha, ', beta: ', beta)
py = alpha + beta * np.array(sizes)
fig, ax = plt.subplots(figsize=(8.,4))
ax.scatter(sizes, times, marker='o', label='measured')
ax.scatter(sizes, py, marker='^', label='fit')
plt.show()
if __name__ == '__main__':
model_bcast_log()
#model_inverse_compute_log()
|
{"hexsha": "ab91dff51bc4b87945048ee38db5d9e8ec3a69f7", "size": 1786, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/comm_models.py", "max_stars_repo_name": "lzhangbv/kfac_pytorch", "max_stars_repo_head_hexsha": "159e7ef9541bb960d79c438622780cdcc71b3210", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/comm_models.py", "max_issues_repo_name": "lzhangbv/kfac_pytorch", "max_issues_repo_head_hexsha": "159e7ef9541bb960d79c438622780cdcc71b3210", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/comm_models.py", "max_forks_repo_name": "lzhangbv/kfac_pytorch", "max_forks_repo_head_hexsha": "159e7ef9541bb960d79c438622780cdcc71b3210", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3333333333, "max_line_length": 108, "alphanum_fraction": 0.6478163494, "include": true, "reason": "import numpy", "num_tokens": 532}
|
"""
- Blockchain for Federated Learning -
Blockchain script
"""
import hashlib
import json
import time
from flask import Flask,jsonify,request
from uuid import uuid4
from urllib.parse import urlparse
import requests
import random
from threading import Thread, Event
import pickle
import codecs
import data.federated_data_extractor as dataext
import numpy as np
from federatedlearner import *
def compute_global_model(base,updates,lrate):
'''
Function to compute the global model based on the client
updates received per round
'''
upd = dict()
for x in ['w1','w2','wo','b1','b2','bo']:
upd[x] = np.array(base[x], copy=True)
number_of_clients = len(updates)
for client in updates.keys():
for x in ['w1','w2','wo','b1','b2','bo']:
model = updates[client].update
upd[x] += (lrate/number_of_clients)*(model[x]+base[x])
upd["size"] = 0
reset()
dataset = dataext.load_data("data/mnist.d")
worker = NNWorker(None,
None,
dataset['test_images'],
dataset['test_labels'],
0,
"validation")
worker.build(upd)
accuracy = worker.evaluate()
worker.close()
return accuracy,upd
def find_len(text,strk):
'''
Function to find the specified string in the text and return its starting position
as well as length/last_index
'''
return text.find(strk),len(strk)
class Update:
def __init__(self,client,baseindex,update,datasize,computing_time,timestamp=time.time()):
'''
Function to initialize the update string parameters
'''
self.timestamp = timestamp
self.baseindex = baseindex
self.update = update
self.client = client
self.datasize = datasize
self.computing_time = computing_time
@staticmethod
def from_string(metadata):
'''
Function to get the update string values
'''
i,l = find_len(metadata,"'timestamp':")
i2,l2 = find_len(metadata,"'baseindex':")
i3,l3 = find_len(metadata,"'update': ")
i4,l4 = find_len(metadata,"'client':")
i5,l5 = find_len(metadata,"'datasize':")
i6,l6 = find_len(metadata,"'computing_time':")
baseindex = int(metadata[i2+l2:i3].replace(",",'').replace(" ",""))
update = dict(pickle.loads(codecs.decode(metadata[i3+l3:i4-1].encode(), "base64")))
timestamp = float(metadata[i+l:i2].replace(",",'').replace(" ",""))
client = metadata[i4+l4:i5].replace(",",'').replace(" ","")
datasize = int(metadata[i5+l5:i6].replace(",",'').replace(" ",""))
computing_time = float(metadata[i6+l6:].replace(",",'').replace(" ",""))
return Update(client,baseindex,update,datasize,computing_time,timestamp)
def __str__(self):
'''
Function to return the update string values in the required format
'''
return "'timestamp': {timestamp},\
'baseindex': {baseindex},\
'update': {update},\
'client': {client},\
'datasize': {datasize},\
'computing_time': {computing_time}".format(
timestamp = self.timestamp,
baseindex = self.baseindex,
update = codecs.encode(pickle.dumps(sorted(self.update.items())), "base64").decode(),
client = self.client,
datasize = self.datasize,
computing_time = self.computing_time
)
class Block:
def __init__(self,miner,index,basemodel,accuracy,updates,timestamp=time.time()):
'''
Function to initialize the update string parameters per created block
'''
self.index = index
self.miner = miner
self.timestamp = timestamp
self.basemodel = basemodel
self.accuracy = accuracy
self.updates = updates
@staticmethod
def from_string(metadata):
'''
Function to get the update string values per block
'''
i,l = find_len(metadata,"'timestamp':")
i2,l2 = find_len(metadata,"'basemodel': ")
i3,l3 = find_len(metadata,"'index':")
i4,l4 = find_len(metadata,"'miner':")
i5,l5 = find_len(metadata,"'accuracy':")
i6,l6 = find_len(metadata,"'updates':")
i9,l9 = find_len(metadata,"'updates_size':")
index = int(metadata[i3+l3:i4].replace(",",'').replace(" ",""))
miner = metadata[i4+l4:i].replace(",",'').replace(" ","")
timestamp = float(metadata[i+l:i2].replace(",",'').replace(" ",""))
basemodel = dict(pickle.loads(codecs.decode(metadata[i2+l2:i5-1].encode(), "base64")))
accuracy = float(metadata[i5+l5:i6].replace(",",'').replace(" ",""))
su = metadata[i6+l6:i9]
su = su[:su.rfind("]")+1]
updates = dict()
for x in json.loads(su):
isep,lsep = find_len(x,"@|!|@")
updates[x[:isep]] = Update.from_string(x[isep+lsep:])
updates_size = int(metadata[i9+l9:].replace(",",'').replace(" ",""))
return Block(miner,index,basemodel,accuracy,updates,timestamp)
def __str__(self):
'''
Function to return the update string values in the required format per block
'''
return "'index': {index},\
'miner': {miner},\
'timestamp': {timestamp},\
'basemodel': {basemodel},\
'accuracy': {accuracy},\
'updates': {updates},\
'updates_size': {updates_size}".format(
index = self.index,
miner = self.miner,
basemodel = codecs.encode(pickle.dumps(sorted(self.basemodel.items())), "base64").decode(),
accuracy = self.accuracy,
timestamp = self.timestamp,
updates = str([str(x[0])+"@|!|@"+str(x[1]) for x in sorted(self.updates.items())]),
updates_size = str(len(self.updates))
)
class Blockchain(object):
def __init__(self,miner_id,base_model=None,gen=False,update_limit=10,time_limit=1800):
super(Blockchain,self).__init__()
self.miner_id = miner_id
self.curblock = None
self.hashchain = []
self.current_updates = dict()
self.update_limit = update_limit
self.time_limit = time_limit
if gen:
genesis,hgenesis = self.make_block(base_model=base_model,previous_hash=1)
self.store_block(genesis,hgenesis)
self.nodes = set()
def register_node(self,address):
if address[:4] != "http":
address = "http://"+address
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
print("Registered node",address)
def make_block(self,previous_hash=None,base_model=None):
accuracy = 0
basemodel = None
time_limit = self.time_limit
update_limit = self.update_limit
if len(self.hashchain)>0:
update_limit = self.last_block['update_limit']
time_limit = self.last_block['time_limit']
if previous_hash==None:
previous_hash = self.hash(str(sorted(self.last_block.items())))
if base_model!=None:
accuracy = base_model['accuracy']
basemodel = base_model['model']
elif len(self.current_updates)>0:
base = self.curblock.basemodel
accuracy,basemodel = compute_global_model(base,self.current_updates,1)
index = len(self.hashchain)+1
block = Block(
miner = self.miner_id,
index = index,
basemodel = basemodel,
accuracy = accuracy,
updates = self.current_updates
)
hashblock = {
'index':index,
'hash': self.hash(str(block)),
'proof': random.randint(0,100000000),
'previous_hash': previous_hash,
'miner': self.miner_id,
'accuracy': str(accuracy),
'timestamp': time.time(),
'time_limit': time_limit,
'update_limit': update_limit,
'model_hash': self.hash(codecs.encode(pickle.dumps(sorted(block.basemodel.items())), "base64").decode())
}
return block,hashblock
def store_block(self,block,hashblock):
if self.curblock:
with open("blocks/federated_model"+str(self.curblock.index)+".block","wb") as f:
pickle.dump(self.curblock,f)
self.curblock = block
self.hashchain.append(hashblock)
self.current_updates = dict()
return hashblock
def new_update(self,client,baseindex,update,datasize,computing_time):
self.current_updates[client] = Update(
client = client,
baseindex = baseindex,
update = update,
datasize = datasize,
computing_time = computing_time
)
return self.last_block['index']+1
@staticmethod
def hash(text):
return hashlib.sha256(text.encode()).hexdigest()
@property
def last_block(self):
return self.hashchain[-1]
def proof_of_work(self,stop_event):
block,hblock = self.make_block()
stopped = False
while self.valid_proof(str(sorted(hblock.items()))) is False:
if stop_event.is_set():
stopped = True
break
hblock['proof'] += 1
if hblock['proof']%1000==0:
print("mining",hblock['proof'])
if stopped==False:
self.store_block(block,hblock)
if stopped:
print("Stopped")
else:
print("Done")
return hblock,stopped
@staticmethod
def valid_proof(block_data):
guess_hash = hashlib.sha256(block_data.encode()).hexdigest()
k = "00000"
return guess_hash[:len(k)] == k
def valid_chain(self,hchain):
last_block = hchain[0]
curren_index = 1
while curren_index<len(hchain):
hblock = hchain[curren_index]
if hblock['previous_hash'] != self.hash(str(sorted(last_block.items()))):
print("prev_hash diverso",curren_index)
return False
if not self.valid_proof(str(sorted(hblock.items()))):
print("invalid proof",curren_index)
return False
last_block = hblock
curren_index += 1
return True
def resolve_conflicts(self,stop_event):
neighbours = self.nodes
new_chain = None
bnode = None
max_length = len(self.hashchain)
for node in neighbours:
response = requests.get('http://{node}/chain'.format(node=node))
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length>max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
bnode = node
if new_chain:
stop_event.set()
self.hashchain = new_chain
hblock = self.hashchain[-1]
resp = requests.post('http://{node}/block'.format(node=bnode),
json={'hblock': hblock})
self.current_updates = dict()
if resp.status_code == 200:
if resp.json()['valid']:
self.curblock = Block.from_string(resp.json()['block'])
return True
return False
|
{"hexsha": "69f3602ed7b2e601ec6352873d3da173a491e9aa", "size": 11546, "ext": "py", "lang": "Python", "max_stars_repo_path": "Blockchain+Federated-Learning/blockchain.py", "max_stars_repo_name": "raja21068/BLOCKCHAIN-FEDERATED-LEARNING-MINST-DATASET", "max_stars_repo_head_hexsha": "20e46a5b37677b2cffa780a19eeefb1b702f1d86", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-26T05:40:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-26T05:40:19.000Z", "max_issues_repo_path": "src/blockchain.py", "max_issues_repo_name": "raja21068/BLOCKCHAIN-FEDERATED-LEARNING-MINST-DATASET", "max_issues_repo_head_hexsha": "20e46a5b37677b2cffa780a19eeefb1b702f1d86", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/blockchain.py", "max_forks_repo_name": "raja21068/BLOCKCHAIN-FEDERATED-LEARNING-MINST-DATASET", "max_forks_repo_head_hexsha": "20e46a5b37677b2cffa780a19eeefb1b702f1d86", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-24T18:01:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T18:01:16.000Z", "avg_line_length": 34.9878787879, "max_line_length": 116, "alphanum_fraction": 0.573358739, "include": true, "reason": "import numpy", "num_tokens": 2608}
|
# Model project - Externalities and Pigou taxes
Our model project consists of a microeconomic model describing the inefficiencies of pollution from production from a social economic point of view. We introduce a demand and a supply function, but the production of the suppliers is associated with a negative externality cost.
We apply model analysis methods to find the market and social equlibria output and prices, where we include graphs to illustrate these equlibria. As a method to solve this social inefficiency from the market powers, we introduce a Pigou tax and calculate the optimal size of this.
**1: Setup**
```python
#Importing the relevant packages
import sympy as sm
from sympy import *
import numpy as np
import scipy as sp
from scipy import optimize
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import interact, fixed
sm.init_printing(use_unicode=True) #This code enables pretty printing for the mathematical symbols we will use
```
```python
#Defining the relevant variables and parameters from our model with the sympy symbols function
xd = sm.symbols('x_d') #The amount of goods demanded by consumers
xs = sm.symbols('x_s') #The amount of goods supplied by suppliers
A = sm.symbols('A') #The price consumers are willing to pay if they can only get an infinitely small amount of goods (assumed declining willingess to pay)
B = sm.symbols('B') #The price the suppliers are willing to sell for if they can only sell an infinitely small amount of goods
p = sm.symbols('p') #The price of goods
alpha = sm.symbols('alpha') #A measure of the consumers' sensitivity to changes in prices
beta = sm.symbols('beta') #A measure of the suppliers' sensitivity to changes in prices
delta = sm.symbols('\delta') #An abritrarily chosen multiplier that creates the negative externality through production
x = sm.symbols('x') #The quantity of the goods traded
xc = sm.symbols('xc') #Used for plotting x
deltax = sm.symbols('deltax') #Used for plotting delta
```
```python
#Checking whether the variables and parameters are correctly defined
xd, xs, A, B, p, alpha, beta, delta, x
```
**2: The Model**
To set up our model, we firstly introduce the following demand and supply functions from the consumers and suppliers of the economy respectively including the negative externality function. It is a simple economy-setup with only one representative consumer, one representative supplier and a single type of good. The agents seek to trade based on the following equations. The producers and the consumers does not care about the negative externality and therefore this doesn't impact their trading behaviour. The equations are as follows:
Demand: $x_{d}=\frac{A-p}{a}$
Supply: $x_{s}=\frac{B+p}{\beta}$
Negative externality: $C_E(x)=(\delta x)^2$
Firstly, we define the demand, supply and negative externality cost functions as follows:
```python
demand = (A-p)/alpha
supply = (B+p)/beta
externality = (delta*x)**2
demand, supply, externality #Prints the three definitions
```
Firstly, from the demand and supply functions we can calculate the market price for the good by setting the two equal to each other and solving for $p$. This yields:
```python
#Setting demand and supply equal to each other and solving for p
Marketprice = sm.solve(sm.Eq(demand,supply),p) #We use the sympy .Eq function to set the two equal to each other
#and use the sympy solve function to solve for p in this equation. Marketprice)
Marketprice
```
From this result we see that the price - intuitively - is positively dependent on the consumers' initial valuation of the goods and negatively of the producers' initial valuation. This result can be inserted in either the demand or the supply function to obtain the amount of traded goods in equilibrium where supply equals demand.
```python
#Finding the equilibrium output by inserting the marketprice into the demand function
Marketoutput = demand.subs(p, Marketprice[0])
#We use the .subs method to insert the marketprice result from before instead of p in the demand function.
#As the marketprice expression is defined as a list-type, we include the index [0] to refer to the expression herein.
sm.simplify(Marketoutput) #This function simplifies the marketoutput expression.
```
```python
#We can check whether we get the same result by inserting the market price equilibrium into the supply function
CheckMarketoutput = supply.subs(p, Marketprice[0]) #Same calculation procedure as in code cell above.
sm.simplify(CheckMarketoutput)
```
Luckily, we find that the two results are identical, which shows that the found price should be correct.
From the marketoutput expression we once again see, that more goods are traded if consumers are willing to pay high prices for initial goods (through A) and suppliers are willing to supply them cheaply (through B). We also see that it depends negatively on the price sensitiviy of both the agents.
Unfortunately, the production from the suppliers also create a negative externality due to pollution or some other externality. This is assumed to have a convex negative impact on society. This convex function can be seen from the graphical depiction below, where we as an example have set $\delta=1$.
```python
#We impose an externality cost of production due to emission
delta = 1
xc = np.linspace(0,2)
ExternalityCost = (delta*xc)**2
plt.plot(xc,ExternalityCost)
plt.xlabel("Quantity")
plt.ylabel("Costs from externality")
plt.title("The convex cost function of the externality")
plt.show
```
In order to find the social optimal quantity produces and the associated price, we start by calculating the marginal cost of the externality below. From this the convex nature of the externality is once again evident. We get:
```python
#Finding the marginal externality cost by differentiating w.r.t. x
MarginalExternalityCost = sm.diff(externality, x) #Using the sympy function "diff" to differentiate externality wrt.x
MarginalExternalityCost #Printing the result
```
We now also need to find the inverse supply function, which shall be added to the marginal externality cost to give us the social marginal cost of production.
```python
#Private marginal cost (the inverse supply function)
PrivateMarginalCost = sm.solve(sm.Eq(supply,x),p) #We set the supply expression equal to x and solve for p.
PrivateMarginalCost
```
```python
#Social marginal cost is the sum of the private marginal cost and the marginal externality cost
SocialMarginalCost = PrivateMarginalCost[0] + MarginalExternalityCost
SocialMarginalCost
```
Seen above is the social marginal cost function, which takes the negative effects of the externality into account and adds it to the supply function. As $\delta>0$, the social marginal cost will be larger than the private cost from the suppliers. The social marginal cost curve will thus have a steeper slope than the supply curve.
To now finally find the socially optimal amount of traded goods and the associated price, we start by finding the inverse demand function:
```python
#Inverse demand curve
InverseDemand = sm.solve(sm.Eq(demand,x),p)
InverseDemand
```
And we now set this inverse demand function equal to the social marginal cost and solve for $x$ to find the optimal amount of traded goods:
```python
#Finding the social optimal output by setting the demand function equal to the social marginal cost
SocialOptimal = sm.solve(sm.Eq(InverseDemand[0], SocialMarginalCost), x)
SocialOptimal
```
Now to finally find the optimal price, we insert this expression into the demand function:
```python
SocialOptimalPrice = sm.solve(sm.Eq(demand,SocialOptimal[0]),p)
SocialOptimalPrice
```
Which is the optimal price when considering the externality.
**3: Graphing the economy**
To give a graphical overview of the economy, we plot a graph below, where it is possible to change the value of the parameter $\delta$ to give an insight in how the social cost and thereby the optimum depend greatly on this parameter.
```python
def PlotGraph(A, alpha, beta, B, delta):
#This function is able to plot the graphs of the demand, supply and SMC-functions with different parameter values.
x = np.linspace(0,200) #Here we choose over which span the x quantity runs in the graph.
d = A-alpha*x #Defining the demand function
s = beta*x-B #Defining the supply function
smc = x*(beta+2*delta**2)-B #Defining the social marginal cost function
plt.xlabel("Quantity") #Labelling x-axis
plt.ylabel("Price") #Labelling y-axis
plt.grid() #Putting a grid in the background of the graph
plt.title("Supply, demand and social marginal cost") #Adding title to graph
plt.plot(x, d, label="D") #Plotting and labelling demand function
plt.plot(x, s, label="S") #Plotting and labelling supply function
plt.plot(x, smc, label="SMC") #Plotting and labelling SMC function
plt.legend(loc="upper right") #Choosing to put label names in upper right corner.
```
```python
widgets.interact(PlotGraph,A=widgets.fixed(800), alpha=widgets.fixed(4),
delta=widgets.FloatSlider(description="$\delta$", min=0.0, max=2 , step=0.05, value=1),
B=widgets.fixed(0), beta=widgets.fixed(2))
#These lines of code use the graphing function "PlotGraph" and adds a Floatslider, so the user can adjust
#the value of the delta parameter.
```
interactive(children=(FloatSlider(value=1.0, description='$\\delta$', max=2.0, step=0.05), Output()), _dom_cla…
<function __main__.PlotGraph(A, alpha, beta, B, delta)>
From this graph we clearly see that when $\delta$ increases the socially optimal price also increases and thereby the quantity traded will be reduced. When $\delta$ on the other hand reaches zero, the SMC and supply curve will be identical - so in the lack of externalities the social optimum will also be the market optimum.
In this example, when externalities are present this is however not the case as neither the consumers nor the producers cares about the externality. To take this into account and reach the social optimum, we will now look at the effects of introducing a Pigou tax.
**4: Pigou taxes**
A Pigou tax is a tax that aims at correcting ineffecient market outcomes as with the current example. The tax will aim at increasing the price level of the specific good with the externality and thus, hopefully, affect the trading behaviour so the externality is reduced optimally. The optimal size of the Pigouvian tax is the difference between what the consumers are willing to pay and what the suppliers are willing to sell their goods for at the socially optimal traded quantity.
We have already found the price level for the consumers in the social optimum, and now only need to find the price at which the suppliers are willing to sell at the social optimum. These two prices are:
```python
#Inserting the social optimal output into the supply function
SocialOptimalSupply = sm.solve(sm.Eq(SocialOptimal[0], supply), p)
SocialOptimalSupply,SocialOptimalPrice
```
And now we simply subtract the two from each other to get:
```python
#The optimal pigou tax is the difference between the demand and supply
PigouTax = SocialOptimalPrice[0] - SocialOptimalSupply[0]
sm.simplify(PigouTax)
```
Which is then the optimal size of the Pigouvian tax, that can bring the two agents of the economy to trade the desired level of goods from a social point of view. We will quickly have a graphical look at how the size of this tax is affected by the size of $\delta$ as this is not necessarily clear from the expression above:
```python
#First we choose the parameter values for the graphing example below
A = 800
B = 0
alpha = 4
beta = 2
deltax = np.linspace(0,8) #Choosing the span in which delta should be plotted on the graph
Pigoutax = (2*deltax**2*(A+B))/(2*deltax**2+alpha+beta) #Defining the function for the Pigouvian tax
plt.plot(deltax,Pigoutax) #Plotting the graph and adding labels and titles.
plt.xlabel("Delta")
plt.ylabel("Pigou")
plt.title("Pigoutax")
plt.show
```
As before we have arbitrarily set the parameter values - specifically $A=800, B=0, \alpha=4$ and $\beta=2$. We see that as $\delta$ increases so does the optimal size of the Pigouvian tax. The optimal size is concave wrt. the size of $\delta$ and converges towards the size of A, which is the highest possible amount a consumer will be willing to pay for an infinitely small amount of goods.
**5: Solving the model numerically**
We will now solve the model numerically, where we assign arbitrary value to the parameters of the model and calculate the equilibrium outcomes with and without externalities. We recall that the equlibria are given as:
Private market equilibrium price: $p^{opt}=\frac{A \beta-B \alpha}{\alpha+\beta}$
Private market equilibrium output: $x^{opt}=\frac{A+B}{\alpha+\beta}$
Social market equilibrium price: $p^{soc}=\frac{2A\delta^2+A\beta-B\alpha}{2\delta^2+\alpha+\beta}$
Social market equilibrium price: $x^{soc}=\frac{A+B}{2\delta^2+\alpha+\beta}$
We will continue to use the parameter values that we used in the graphs above, that is: $A=800$, $B=0$, $\alpha=4$, $\beta=2$, $\delta=1$
```python
#We find the market equilibria with and without the externality included given the chosen parameter values
MarketEquilibriumPrice_num = optimize.fsolve(lambda p: (A*beta-B*alpha)/(alpha+beta)-p,0)
MarketEquilibriumOutput_num = optimize.fsolve(lambda x: (A+B)/(alpha+beta)-x,0)
SocialEquilibriumPrice_num = optimize.fsolve(lambda p: (2*A*delta**2+A*beta-B*alpha)/(2*delta**2+alpha+beta)-p,0)
SocialEquilibriumOutput_num = optimize.fsolve(lambda x: (A+B)/(2*delta**2+alpha+beta)-x,0)
print(f'The equilibrium price in the economy without externality costs is: {MarketEquilibriumPrice_num}')
print(f'The equilibrium output in the economy without externality costs is: {MarketEquilibriumOutput_num}')
print(f'The equilibrium price in the economy with externality costs is: {SocialEquilibriumPrice_num}')
print(f'The equilibrium output in the economy with externality costs is: {SocialEquilibriumOutput_num}')
```
The equilibrium price in the economy without externality costs is: [266.66666667]
The equilibrium output in the economy without externality costs is: [133.33333333]
The equilibrium price in the economy with externality costs is: [400.]
The equilibrium output in the economy with externality costs is: [100.]
Thus, we have now numerically solved the equilibria with and without the negative externality cost. We know from the graph, that when we included this externality, the prices would increase and the output decrease. We now see that including the externality cost raises the equilibrium price from 267 to 400, while the output falls from 133 to 100.
As a method to correct this market inefficiency, we introduced a Pigouvian tax to reach this social market equilibrium. Given the chosen parameter values, we can find this optimal size:
```python
PigouTax_num = optimize.fsolve(lambda t: (2*delta**2*(A+B))/(2*delta**2+alpha+beta)-t,0)
print(f'The optimal size of the Pigouvian tax in the economy is: {PigouTax_num}')
```
The optimal size of the Pigouvian tax in the economy is: [200.]
This optimal size of the tax means that there is a difference of 200 between the price that the buyer pays and the price that the seller receives for one unit of the good. Thus, when the buyer pays 400 for one unit of the good, the seller only receives 200. The remaining 200 goes to the government.
**6: Conclusion**
This model project has showed us how the presence of externalities in a market can cause differences between the market optimum and the social optimum. The larger the externality cost, the larger the difference between these two optimums will be. This is a very relevant insight as many parts of the real world economy are faced with similar issues, where many agents' behavior and incentives contradicts with what the society desires as a whole.
In the second part of the project, we introduced an instrument to fix potential market inefficiencies and lead the agents in the market towards the social optimum of goods traded. Of course this is a simple microeconomic setup, and in the real world it is impossible to identify the optimal size of the Pigouvian tax, as the exact desires and incentives from the agents of the economy are unknown. However, this tax is an effective way to reduce market inefficiencies, though it is impossible to get rid of all of them.
In the third and final part of the project, we solved the model numerically where we signed arbitrary values to the parameters of the model. Unsurprisingly, we found the output equilibrium to decrease and the price equilibrium to increase, when we included the negative externality in the model - which was similar to what we found from the graphical inspection. It's important to note that we have chosen the parameter values arbitrarily and changes to any of the variables will affect the equilibria.
|
{"hexsha": "1ffc16f806f5661b5d2def54860dfbb75de641b9", "size": 88480, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "1312/modelproject/Model-project.ipynb", "max_stars_repo_name": "NumEconCopenhagen/projects-2019-1312", "max_stars_repo_head_hexsha": "46356ec3ad338642152f6667a0761697d1c4cbdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "1312/modelproject/Model-project.ipynb", "max_issues_repo_name": "NumEconCopenhagen/projects-2019-1312", "max_issues_repo_head_hexsha": "46356ec3ad338642152f6667a0761697d1c4cbdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-04-09T11:31:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-13T17:59:39.000Z", "max_forks_repo_path": "1312/modelproject/Model-project.ipynb", "max_forks_repo_name": "NumEconCopenhagen/projects-2019-1312", "max_forks_repo_head_hexsha": "46356ec3ad338642152f6667a0761697d1c4cbdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-03-25T10:44:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-20T13:06:19.000Z", "avg_line_length": 102.4074074074, "max_line_length": 18732, "alphanum_fraction": 0.8410149186, "converted": true, "num_tokens": 3988}
|
#include <boost/hana/fwd/fold_right.hpp>
|
{"hexsha": "19136574f8f2621e64a43c3bea95e372e695958d", "size": 41, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_hana_fwd_fold_right.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_hana_fwd_fold_right.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_hana_fwd_fold_right.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 20.5, "max_line_length": 40, "alphanum_fraction": 0.7804878049, "num_tokens": 12}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.